repo_id
stringlengths 5
115
| size
int64 590
5.01M
| file_path
stringlengths 4
212
| content
stringlengths 590
5.01M
|
|---|---|---|---|
aenu1/aps3e
| 525,563
|
app/src/main/cpp/rpcs3/3rdparty/wolfssl/wolfssl/wolfcrypt/src/aes_gcm_asm.S
|
/* aes_gcm_asm.S */
/*
* Copyright (C) 2006-2023 wolfSSL Inc.
*
* This file is part of wolfSSL.
*
* wolfSSL is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* wolfSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
*/
#ifdef WOLFSSL_USER_SETTINGS
#ifdef WOLFSSL_USER_SETTINGS_ASM
/*
* user_settings_asm.h is a file generated by the script user_settings_asm.sh.
* The script takes in a user_settings.h and produces user_settings_asm.h, which
* is a stripped down version of user_settings.h containing only preprocessor
* directives. This makes the header safe to include in assembly (.S) files.
*/
#include "user_settings_asm.h"
#else
/*
* Note: if user_settings.h contains any C code (e.g. a typedef or function
* prototype), including it here in an assembly (.S) file will cause an
* assembler failure. See user_settings_asm.h above.
*/
#include "user_settings.h"
#endif /* WOLFSSL_USER_SETTINGS_ASM */
#endif /* WOLFSSL_USER_SETTINGS */
#ifndef HAVE_INTEL_AVX1
#define HAVE_INTEL_AVX1
#endif /* HAVE_INTEL_AVX1 */
#ifndef NO_AVX2_SUPPORT
#define HAVE_INTEL_AVX2
#endif /* NO_AVX2_SUPPORT */
#ifdef WOLFSSL_X86_64_BUILD
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_aes_gcm_one:
.quad 0x0, 0x1
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_aes_gcm_two:
.quad 0x0, 0x2
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_aes_gcm_three:
.quad 0x0, 0x3
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_aes_gcm_four:
.quad 0x0, 0x4
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_aes_gcm_five:
.quad 0x0, 0x5
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_aes_gcm_six:
.quad 0x0, 0x6
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_aes_gcm_seven:
.quad 0x0, 0x7
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_aes_gcm_eight:
.quad 0x0, 0x8
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_aes_gcm_bswap_epi64:
.quad 0x1020304050607, 0x8090a0b0c0d0e0f
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_aes_gcm_bswap_mask:
.quad 0x8090a0b0c0d0e0f, 0x1020304050607
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_aes_gcm_mod2_128:
.quad 0x1, 0xc200000000000000
#ifndef __APPLE__
.text
.globl AES_GCM_encrypt_aesni
.type AES_GCM_encrypt_aesni,@function
.align 16
AES_GCM_encrypt_aesni:
#else
.section __TEXT,__text
.globl _AES_GCM_encrypt_aesni
.p2align 4
_AES_GCM_encrypt_aesni:
#endif /* __APPLE__ */
pushq %r13
pushq %r12
pushq %rbx
pushq %r14
pushq %r15
movq %rdx, %r12
movq %rcx, %rax
movl 48(%rsp), %r11d
movl 56(%rsp), %ebx
movl 64(%rsp), %r14d
movq 72(%rsp), %r15
movl 80(%rsp), %r10d
subq $0xa0, %rsp
pxor %xmm4, %xmm4
pxor %xmm6, %xmm6
cmpl $12, %ebx
movl %ebx, %edx
jne L_AES_GCM_encrypt_aesni_iv_not_12
# # Calculate values when IV is 12 bytes
# Set counter based on IV
movl $0x1000000, %ecx
pinsrq $0x00, (%rax), %xmm4
pinsrd $2, 8(%rax), %xmm4
pinsrd $3, %ecx, %xmm4
# H = Encrypt X(=0) and T = Encrypt counter
movdqa %xmm4, %xmm1
movdqa (%r15), %xmm5
pxor %xmm5, %xmm1
movdqa 16(%r15), %xmm7
aesenc %xmm7, %xmm5
aesenc %xmm7, %xmm1
movdqa 32(%r15), %xmm7
aesenc %xmm7, %xmm5
aesenc %xmm7, %xmm1
movdqa 48(%r15), %xmm7
aesenc %xmm7, %xmm5
aesenc %xmm7, %xmm1
movdqa 64(%r15), %xmm7
aesenc %xmm7, %xmm5
aesenc %xmm7, %xmm1
movdqa 80(%r15), %xmm7
aesenc %xmm7, %xmm5
aesenc %xmm7, %xmm1
movdqa 96(%r15), %xmm7
aesenc %xmm7, %xmm5
aesenc %xmm7, %xmm1
movdqa 112(%r15), %xmm7
aesenc %xmm7, %xmm5
aesenc %xmm7, %xmm1
movdqa 128(%r15), %xmm7
aesenc %xmm7, %xmm5
aesenc %xmm7, %xmm1
movdqa 144(%r15), %xmm7
aesenc %xmm7, %xmm5
aesenc %xmm7, %xmm1
cmpl $11, %r10d
movdqa 160(%r15), %xmm7
jl L_AES_GCM_encrypt_aesni_calc_iv_12_last
aesenc %xmm7, %xmm5
aesenc %xmm7, %xmm1
movdqa 176(%r15), %xmm7
aesenc %xmm7, %xmm5
aesenc %xmm7, %xmm1
cmpl $13, %r10d
movdqa 192(%r15), %xmm7
jl L_AES_GCM_encrypt_aesni_calc_iv_12_last
aesenc %xmm7, %xmm5
aesenc %xmm7, %xmm1
movdqa 208(%r15), %xmm7
aesenc %xmm7, %xmm5
aesenc %xmm7, %xmm1
movdqa 224(%r15), %xmm7
L_AES_GCM_encrypt_aesni_calc_iv_12_last:
aesenclast %xmm7, %xmm5
aesenclast %xmm7, %xmm1
pshufb L_aes_gcm_bswap_mask(%rip), %xmm5
movdqu %xmm1, 144(%rsp)
jmp L_AES_GCM_encrypt_aesni_iv_done
L_AES_GCM_encrypt_aesni_iv_not_12:
# Calculate values when IV is not 12 bytes
# H = Encrypt X(=0)
movdqa (%r15), %xmm5
aesenc 16(%r15), %xmm5
aesenc 32(%r15), %xmm5
aesenc 48(%r15), %xmm5
aesenc 64(%r15), %xmm5
aesenc 80(%r15), %xmm5
aesenc 96(%r15), %xmm5
aesenc 112(%r15), %xmm5
aesenc 128(%r15), %xmm5
aesenc 144(%r15), %xmm5
cmpl $11, %r10d
movdqa 160(%r15), %xmm9
jl L_AES_GCM_encrypt_aesni_calc_iv_1_aesenc_avx_last
aesenc %xmm9, %xmm5
aesenc 176(%r15), %xmm5
cmpl $13, %r10d
movdqa 192(%r15), %xmm9
jl L_AES_GCM_encrypt_aesni_calc_iv_1_aesenc_avx_last
aesenc %xmm9, %xmm5
aesenc 208(%r15), %xmm5
movdqa 224(%r15), %xmm9
L_AES_GCM_encrypt_aesni_calc_iv_1_aesenc_avx_last:
aesenclast %xmm9, %xmm5
pshufb L_aes_gcm_bswap_mask(%rip), %xmm5
# Calc counter
# Initialization vector
cmpl $0x00, %edx
movq $0x00, %rcx
je L_AES_GCM_encrypt_aesni_calc_iv_done
cmpl $16, %edx
jl L_AES_GCM_encrypt_aesni_calc_iv_lt16
andl $0xfffffff0, %edx
L_AES_GCM_encrypt_aesni_calc_iv_16_loop:
movdqu (%rax,%rcx,1), %xmm8
pshufb L_aes_gcm_bswap_mask(%rip), %xmm8
pxor %xmm8, %xmm4
pshufd $0x4e, %xmm4, %xmm1
pshufd $0x4e, %xmm5, %xmm2
movdqa %xmm5, %xmm3
movdqa %xmm5, %xmm0
pclmulqdq $0x11, %xmm4, %xmm3
pclmulqdq $0x00, %xmm4, %xmm0
pxor %xmm4, %xmm1
pxor %xmm5, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
movdqa %xmm0, %xmm7
movdqa %xmm3, %xmm4
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm7
pxor %xmm1, %xmm4
movdqa %xmm7, %xmm0
movdqa %xmm4, %xmm1
psrld $31, %xmm0
psrld $31, %xmm1
pslld $0x01, %xmm7
pslld $0x01, %xmm4
movdqa %xmm0, %xmm2
pslldq $4, %xmm0
psrldq $12, %xmm2
pslldq $4, %xmm1
por %xmm2, %xmm4
por %xmm0, %xmm7
por %xmm1, %xmm4
movdqa %xmm7, %xmm0
movdqa %xmm7, %xmm1
movdqa %xmm7, %xmm2
pslld $31, %xmm0
pslld $30, %xmm1
pslld $25, %xmm2
pxor %xmm1, %xmm0
pxor %xmm2, %xmm0
movdqa %xmm0, %xmm1
psrldq $4, %xmm1
pslldq $12, %xmm0
pxor %xmm0, %xmm7
movdqa %xmm7, %xmm2
movdqa %xmm7, %xmm3
movdqa %xmm7, %xmm0
psrld $0x01, %xmm2
psrld $2, %xmm3
psrld $7, %xmm0
pxor %xmm3, %xmm2
pxor %xmm0, %xmm2
pxor %xmm1, %xmm2
pxor %xmm7, %xmm2
pxor %xmm2, %xmm4
addl $16, %ecx
cmpl %edx, %ecx
jl L_AES_GCM_encrypt_aesni_calc_iv_16_loop
movl %ebx, %edx
cmpl %edx, %ecx
je L_AES_GCM_encrypt_aesni_calc_iv_done
L_AES_GCM_encrypt_aesni_calc_iv_lt16:
subq $16, %rsp
pxor %xmm8, %xmm8
xorl %ebx, %ebx
movdqu %xmm8, (%rsp)
L_AES_GCM_encrypt_aesni_calc_iv_loop:
movzbl (%rax,%rcx,1), %r13d
movb %r13b, (%rsp,%rbx,1)
incl %ecx
incl %ebx
cmpl %edx, %ecx
jl L_AES_GCM_encrypt_aesni_calc_iv_loop
movdqu (%rsp), %xmm8
addq $16, %rsp
pshufb L_aes_gcm_bswap_mask(%rip), %xmm8
pxor %xmm8, %xmm4
pshufd $0x4e, %xmm4, %xmm1
pshufd $0x4e, %xmm5, %xmm2
movdqa %xmm5, %xmm3
movdqa %xmm5, %xmm0
pclmulqdq $0x11, %xmm4, %xmm3
pclmulqdq $0x00, %xmm4, %xmm0
pxor %xmm4, %xmm1
pxor %xmm5, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
movdqa %xmm0, %xmm7
movdqa %xmm3, %xmm4
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm7
pxor %xmm1, %xmm4
movdqa %xmm7, %xmm0
movdqa %xmm4, %xmm1
psrld $31, %xmm0
psrld $31, %xmm1
pslld $0x01, %xmm7
pslld $0x01, %xmm4
movdqa %xmm0, %xmm2
pslldq $4, %xmm0
psrldq $12, %xmm2
pslldq $4, %xmm1
por %xmm2, %xmm4
por %xmm0, %xmm7
por %xmm1, %xmm4
movdqa %xmm7, %xmm0
movdqa %xmm7, %xmm1
movdqa %xmm7, %xmm2
pslld $31, %xmm0
pslld $30, %xmm1
pslld $25, %xmm2
pxor %xmm1, %xmm0
pxor %xmm2, %xmm0
movdqa %xmm0, %xmm1
psrldq $4, %xmm1
pslldq $12, %xmm0
pxor %xmm0, %xmm7
movdqa %xmm7, %xmm2
movdqa %xmm7, %xmm3
movdqa %xmm7, %xmm0
psrld $0x01, %xmm2
psrld $2, %xmm3
psrld $7, %xmm0
pxor %xmm3, %xmm2
pxor %xmm0, %xmm2
pxor %xmm1, %xmm2
pxor %xmm7, %xmm2
pxor %xmm2, %xmm4
L_AES_GCM_encrypt_aesni_calc_iv_done:
# T = Encrypt counter
pxor %xmm0, %xmm0
shll $3, %edx
pinsrq $0x00, %rdx, %xmm0
pxor %xmm0, %xmm4
pshufd $0x4e, %xmm4, %xmm1
pshufd $0x4e, %xmm5, %xmm2
movdqa %xmm5, %xmm3
movdqa %xmm5, %xmm0
pclmulqdq $0x11, %xmm4, %xmm3
pclmulqdq $0x00, %xmm4, %xmm0
pxor %xmm4, %xmm1
pxor %xmm5, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
movdqa %xmm0, %xmm7
movdqa %xmm3, %xmm4
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm7
pxor %xmm1, %xmm4
movdqa %xmm7, %xmm0
movdqa %xmm4, %xmm1
psrld $31, %xmm0
psrld $31, %xmm1
pslld $0x01, %xmm7
pslld $0x01, %xmm4
movdqa %xmm0, %xmm2
pslldq $4, %xmm0
psrldq $12, %xmm2
pslldq $4, %xmm1
por %xmm2, %xmm4
por %xmm0, %xmm7
por %xmm1, %xmm4
movdqa %xmm7, %xmm0
movdqa %xmm7, %xmm1
movdqa %xmm7, %xmm2
pslld $31, %xmm0
pslld $30, %xmm1
pslld $25, %xmm2
pxor %xmm1, %xmm0
pxor %xmm2, %xmm0
movdqa %xmm0, %xmm1
psrldq $4, %xmm1
pslldq $12, %xmm0
pxor %xmm0, %xmm7
movdqa %xmm7, %xmm2
movdqa %xmm7, %xmm3
movdqa %xmm7, %xmm0
psrld $0x01, %xmm2
psrld $2, %xmm3
psrld $7, %xmm0
pxor %xmm3, %xmm2
pxor %xmm0, %xmm2
pxor %xmm1, %xmm2
pxor %xmm7, %xmm2
pxor %xmm2, %xmm4
pshufb L_aes_gcm_bswap_mask(%rip), %xmm4
# Encrypt counter
movdqa (%r15), %xmm8
pxor %xmm4, %xmm8
aesenc 16(%r15), %xmm8
aesenc 32(%r15), %xmm8
aesenc 48(%r15), %xmm8
aesenc 64(%r15), %xmm8
aesenc 80(%r15), %xmm8
aesenc 96(%r15), %xmm8
aesenc 112(%r15), %xmm8
aesenc 128(%r15), %xmm8
aesenc 144(%r15), %xmm8
cmpl $11, %r10d
movdqa 160(%r15), %xmm9
jl L_AES_GCM_encrypt_aesni_calc_iv_2_aesenc_avx_last
aesenc %xmm9, %xmm8
aesenc 176(%r15), %xmm8
cmpl $13, %r10d
movdqa 192(%r15), %xmm9
jl L_AES_GCM_encrypt_aesni_calc_iv_2_aesenc_avx_last
aesenc %xmm9, %xmm8
aesenc 208(%r15), %xmm8
movdqa 224(%r15), %xmm9
L_AES_GCM_encrypt_aesni_calc_iv_2_aesenc_avx_last:
aesenclast %xmm9, %xmm8
movdqu %xmm8, 144(%rsp)
L_AES_GCM_encrypt_aesni_iv_done:
# Additional authentication data
movl %r11d, %edx
cmpl $0x00, %edx
je L_AES_GCM_encrypt_aesni_calc_aad_done
xorl %ecx, %ecx
cmpl $16, %edx
jl L_AES_GCM_encrypt_aesni_calc_aad_lt16
andl $0xfffffff0, %edx
L_AES_GCM_encrypt_aesni_calc_aad_16_loop:
movdqu (%r12,%rcx,1), %xmm8
pshufb L_aes_gcm_bswap_mask(%rip), %xmm8
pxor %xmm8, %xmm6
pshufd $0x4e, %xmm6, %xmm1
pshufd $0x4e, %xmm5, %xmm2
movdqa %xmm5, %xmm3
movdqa %xmm5, %xmm0
pclmulqdq $0x11, %xmm6, %xmm3
pclmulqdq $0x00, %xmm6, %xmm0
pxor %xmm6, %xmm1
pxor %xmm5, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
movdqa %xmm0, %xmm7
movdqa %xmm3, %xmm6
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm7
pxor %xmm1, %xmm6
movdqa %xmm7, %xmm0
movdqa %xmm6, %xmm1
psrld $31, %xmm0
psrld $31, %xmm1
pslld $0x01, %xmm7
pslld $0x01, %xmm6
movdqa %xmm0, %xmm2
pslldq $4, %xmm0
psrldq $12, %xmm2
pslldq $4, %xmm1
por %xmm2, %xmm6
por %xmm0, %xmm7
por %xmm1, %xmm6
movdqa %xmm7, %xmm0
movdqa %xmm7, %xmm1
movdqa %xmm7, %xmm2
pslld $31, %xmm0
pslld $30, %xmm1
pslld $25, %xmm2
pxor %xmm1, %xmm0
pxor %xmm2, %xmm0
movdqa %xmm0, %xmm1
psrldq $4, %xmm1
pslldq $12, %xmm0
pxor %xmm0, %xmm7
movdqa %xmm7, %xmm2
movdqa %xmm7, %xmm3
movdqa %xmm7, %xmm0
psrld $0x01, %xmm2
psrld $2, %xmm3
psrld $7, %xmm0
pxor %xmm3, %xmm2
pxor %xmm0, %xmm2
pxor %xmm1, %xmm2
pxor %xmm7, %xmm2
pxor %xmm2, %xmm6
addl $16, %ecx
cmpl %edx, %ecx
jl L_AES_GCM_encrypt_aesni_calc_aad_16_loop
movl %r11d, %edx
cmpl %edx, %ecx
je L_AES_GCM_encrypt_aesni_calc_aad_done
L_AES_GCM_encrypt_aesni_calc_aad_lt16:
subq $16, %rsp
pxor %xmm8, %xmm8
xorl %ebx, %ebx
movdqu %xmm8, (%rsp)
L_AES_GCM_encrypt_aesni_calc_aad_loop:
movzbl (%r12,%rcx,1), %r13d
movb %r13b, (%rsp,%rbx,1)
incl %ecx
incl %ebx
cmpl %edx, %ecx
jl L_AES_GCM_encrypt_aesni_calc_aad_loop
movdqu (%rsp), %xmm8
addq $16, %rsp
pshufb L_aes_gcm_bswap_mask(%rip), %xmm8
pxor %xmm8, %xmm6
pshufd $0x4e, %xmm6, %xmm1
pshufd $0x4e, %xmm5, %xmm2
movdqa %xmm5, %xmm3
movdqa %xmm5, %xmm0
pclmulqdq $0x11, %xmm6, %xmm3
pclmulqdq $0x00, %xmm6, %xmm0
pxor %xmm6, %xmm1
pxor %xmm5, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
movdqa %xmm0, %xmm7
movdqa %xmm3, %xmm6
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm7
pxor %xmm1, %xmm6
movdqa %xmm7, %xmm0
movdqa %xmm6, %xmm1
psrld $31, %xmm0
psrld $31, %xmm1
pslld $0x01, %xmm7
pslld $0x01, %xmm6
movdqa %xmm0, %xmm2
pslldq $4, %xmm0
psrldq $12, %xmm2
pslldq $4, %xmm1
por %xmm2, %xmm6
por %xmm0, %xmm7
por %xmm1, %xmm6
movdqa %xmm7, %xmm0
movdqa %xmm7, %xmm1
movdqa %xmm7, %xmm2
pslld $31, %xmm0
pslld $30, %xmm1
pslld $25, %xmm2
pxor %xmm1, %xmm0
pxor %xmm2, %xmm0
movdqa %xmm0, %xmm1
psrldq $4, %xmm1
pslldq $12, %xmm0
pxor %xmm0, %xmm7
movdqa %xmm7, %xmm2
movdqa %xmm7, %xmm3
movdqa %xmm7, %xmm0
psrld $0x01, %xmm2
psrld $2, %xmm3
psrld $7, %xmm0
pxor %xmm3, %xmm2
pxor %xmm0, %xmm2
pxor %xmm1, %xmm2
pxor %xmm7, %xmm2
pxor %xmm2, %xmm6
L_AES_GCM_encrypt_aesni_calc_aad_done:
# Calculate counter and H
pshufb L_aes_gcm_bswap_epi64(%rip), %xmm4
movdqa %xmm5, %xmm9
paddd L_aes_gcm_one(%rip), %xmm4
movdqa %xmm5, %xmm8
movdqu %xmm4, 128(%rsp)
psrlq $63, %xmm9
psllq $0x01, %xmm8
pslldq $8, %xmm9
por %xmm9, %xmm8
pshufd $0xff, %xmm5, %xmm5
psrad $31, %xmm5
pand L_aes_gcm_mod2_128(%rip), %xmm5
pxor %xmm8, %xmm5
xorq %rbx, %rbx
cmpl $0x80, %r9d
movl %r9d, %r13d
jl L_AES_GCM_encrypt_aesni_done_128
andl $0xffffff80, %r13d
movdqa %xmm6, %xmm2
# H ^ 1
movdqu %xmm5, (%rsp)
# H ^ 2
pshufd $0x4e, %xmm5, %xmm9
pshufd $0x4e, %xmm5, %xmm10
movdqa %xmm5, %xmm11
movdqa %xmm5, %xmm8
pclmulqdq $0x11, %xmm5, %xmm11
pclmulqdq $0x00, %xmm5, %xmm8
pxor %xmm5, %xmm9
pxor %xmm5, %xmm10
pclmulqdq $0x00, %xmm10, %xmm9
pxor %xmm8, %xmm9
pxor %xmm11, %xmm9
movdqa %xmm9, %xmm10
movdqa %xmm11, %xmm0
pslldq $8, %xmm10
psrldq $8, %xmm9
pxor %xmm10, %xmm8
pxor %xmm9, %xmm0
movdqa %xmm8, %xmm12
movdqa %xmm8, %xmm13
movdqa %xmm8, %xmm14
pslld $31, %xmm12
pslld $30, %xmm13
pslld $25, %xmm14
pxor %xmm13, %xmm12
pxor %xmm14, %xmm12
movdqa %xmm12, %xmm13
psrldq $4, %xmm13
pslldq $12, %xmm12
pxor %xmm12, %xmm8
movdqa %xmm8, %xmm14
movdqa %xmm8, %xmm10
movdqa %xmm8, %xmm9
psrld $0x01, %xmm14
psrld $2, %xmm10
psrld $7, %xmm9
pxor %xmm10, %xmm14
pxor %xmm9, %xmm14
pxor %xmm13, %xmm14
pxor %xmm8, %xmm14
pxor %xmm14, %xmm0
movdqu %xmm0, 16(%rsp)
# H ^ 3
pshufd $0x4e, %xmm5, %xmm9
pshufd $0x4e, %xmm0, %xmm10
movdqa %xmm0, %xmm11
movdqa %xmm0, %xmm8
pclmulqdq $0x11, %xmm5, %xmm11
pclmulqdq $0x00, %xmm5, %xmm8
pxor %xmm5, %xmm9
pxor %xmm0, %xmm10
pclmulqdq $0x00, %xmm10, %xmm9
pxor %xmm8, %xmm9
pxor %xmm11, %xmm9
movdqa %xmm9, %xmm10
movdqa %xmm11, %xmm1
pslldq $8, %xmm10
psrldq $8, %xmm9
pxor %xmm10, %xmm8
pxor %xmm9, %xmm1
movdqa %xmm8, %xmm12
movdqa %xmm8, %xmm13
movdqa %xmm8, %xmm14
pslld $31, %xmm12
pslld $30, %xmm13
pslld $25, %xmm14
pxor %xmm13, %xmm12
pxor %xmm14, %xmm12
movdqa %xmm12, %xmm13
psrldq $4, %xmm13
pslldq $12, %xmm12
pxor %xmm12, %xmm8
movdqa %xmm8, %xmm14
movdqa %xmm8, %xmm10
movdqa %xmm8, %xmm9
psrld $0x01, %xmm14
psrld $2, %xmm10
psrld $7, %xmm9
pxor %xmm10, %xmm14
pxor %xmm9, %xmm14
pxor %xmm13, %xmm14
pxor %xmm8, %xmm14
pxor %xmm14, %xmm1
movdqu %xmm1, 32(%rsp)
# H ^ 4
pshufd $0x4e, %xmm0, %xmm9
pshufd $0x4e, %xmm0, %xmm10
movdqa %xmm0, %xmm11
movdqa %xmm0, %xmm8
pclmulqdq $0x11, %xmm0, %xmm11
pclmulqdq $0x00, %xmm0, %xmm8
pxor %xmm0, %xmm9
pxor %xmm0, %xmm10
pclmulqdq $0x00, %xmm10, %xmm9
pxor %xmm8, %xmm9
pxor %xmm11, %xmm9
movdqa %xmm9, %xmm10
movdqa %xmm11, %xmm3
pslldq $8, %xmm10
psrldq $8, %xmm9
pxor %xmm10, %xmm8
pxor %xmm9, %xmm3
movdqa %xmm8, %xmm12
movdqa %xmm8, %xmm13
movdqa %xmm8, %xmm14
pslld $31, %xmm12
pslld $30, %xmm13
pslld $25, %xmm14
pxor %xmm13, %xmm12
pxor %xmm14, %xmm12
movdqa %xmm12, %xmm13
psrldq $4, %xmm13
pslldq $12, %xmm12
pxor %xmm12, %xmm8
movdqa %xmm8, %xmm14
movdqa %xmm8, %xmm10
movdqa %xmm8, %xmm9
psrld $0x01, %xmm14
psrld $2, %xmm10
psrld $7, %xmm9
pxor %xmm10, %xmm14
pxor %xmm9, %xmm14
pxor %xmm13, %xmm14
pxor %xmm8, %xmm14
pxor %xmm14, %xmm3
movdqu %xmm3, 48(%rsp)
# H ^ 5
pshufd $0x4e, %xmm0, %xmm9
pshufd $0x4e, %xmm1, %xmm10
movdqa %xmm1, %xmm11
movdqa %xmm1, %xmm8
pclmulqdq $0x11, %xmm0, %xmm11
pclmulqdq $0x00, %xmm0, %xmm8
pxor %xmm0, %xmm9
pxor %xmm1, %xmm10
pclmulqdq $0x00, %xmm10, %xmm9
pxor %xmm8, %xmm9
pxor %xmm11, %xmm9
movdqa %xmm9, %xmm10
movdqa %xmm11, %xmm7
pslldq $8, %xmm10
psrldq $8, %xmm9
pxor %xmm10, %xmm8
pxor %xmm9, %xmm7
movdqa %xmm8, %xmm12
movdqa %xmm8, %xmm13
movdqa %xmm8, %xmm14
pslld $31, %xmm12
pslld $30, %xmm13
pslld $25, %xmm14
pxor %xmm13, %xmm12
pxor %xmm14, %xmm12
movdqa %xmm12, %xmm13
psrldq $4, %xmm13
pslldq $12, %xmm12
pxor %xmm12, %xmm8
movdqa %xmm8, %xmm14
movdqa %xmm8, %xmm10
movdqa %xmm8, %xmm9
psrld $0x01, %xmm14
psrld $2, %xmm10
psrld $7, %xmm9
pxor %xmm10, %xmm14
pxor %xmm9, %xmm14
pxor %xmm13, %xmm14
pxor %xmm8, %xmm14
pxor %xmm14, %xmm7
movdqu %xmm7, 64(%rsp)
# H ^ 6
pshufd $0x4e, %xmm1, %xmm9
pshufd $0x4e, %xmm1, %xmm10
movdqa %xmm1, %xmm11
movdqa %xmm1, %xmm8
pclmulqdq $0x11, %xmm1, %xmm11
pclmulqdq $0x00, %xmm1, %xmm8
pxor %xmm1, %xmm9
pxor %xmm1, %xmm10
pclmulqdq $0x00, %xmm10, %xmm9
pxor %xmm8, %xmm9
pxor %xmm11, %xmm9
movdqa %xmm9, %xmm10
movdqa %xmm11, %xmm7
pslldq $8, %xmm10
psrldq $8, %xmm9
pxor %xmm10, %xmm8
pxor %xmm9, %xmm7
movdqa %xmm8, %xmm12
movdqa %xmm8, %xmm13
movdqa %xmm8, %xmm14
pslld $31, %xmm12
pslld $30, %xmm13
pslld $25, %xmm14
pxor %xmm13, %xmm12
pxor %xmm14, %xmm12
movdqa %xmm12, %xmm13
psrldq $4, %xmm13
pslldq $12, %xmm12
pxor %xmm12, %xmm8
movdqa %xmm8, %xmm14
movdqa %xmm8, %xmm10
movdqa %xmm8, %xmm9
psrld $0x01, %xmm14
psrld $2, %xmm10
psrld $7, %xmm9
pxor %xmm10, %xmm14
pxor %xmm9, %xmm14
pxor %xmm13, %xmm14
pxor %xmm8, %xmm14
pxor %xmm14, %xmm7
movdqu %xmm7, 80(%rsp)
# H ^ 7
pshufd $0x4e, %xmm1, %xmm9
pshufd $0x4e, %xmm3, %xmm10
movdqa %xmm3, %xmm11
movdqa %xmm3, %xmm8
pclmulqdq $0x11, %xmm1, %xmm11
pclmulqdq $0x00, %xmm1, %xmm8
pxor %xmm1, %xmm9
pxor %xmm3, %xmm10
pclmulqdq $0x00, %xmm10, %xmm9
pxor %xmm8, %xmm9
pxor %xmm11, %xmm9
movdqa %xmm9, %xmm10
movdqa %xmm11, %xmm7
pslldq $8, %xmm10
psrldq $8, %xmm9
pxor %xmm10, %xmm8
pxor %xmm9, %xmm7
movdqa %xmm8, %xmm12
movdqa %xmm8, %xmm13
movdqa %xmm8, %xmm14
pslld $31, %xmm12
pslld $30, %xmm13
pslld $25, %xmm14
pxor %xmm13, %xmm12
pxor %xmm14, %xmm12
movdqa %xmm12, %xmm13
psrldq $4, %xmm13
pslldq $12, %xmm12
pxor %xmm12, %xmm8
movdqa %xmm8, %xmm14
movdqa %xmm8, %xmm10
movdqa %xmm8, %xmm9
psrld $0x01, %xmm14
psrld $2, %xmm10
psrld $7, %xmm9
pxor %xmm10, %xmm14
pxor %xmm9, %xmm14
pxor %xmm13, %xmm14
pxor %xmm8, %xmm14
pxor %xmm14, %xmm7
movdqu %xmm7, 96(%rsp)
# H ^ 8
pshufd $0x4e, %xmm3, %xmm9
pshufd $0x4e, %xmm3, %xmm10
movdqa %xmm3, %xmm11
movdqa %xmm3, %xmm8
pclmulqdq $0x11, %xmm3, %xmm11
pclmulqdq $0x00, %xmm3, %xmm8
pxor %xmm3, %xmm9
pxor %xmm3, %xmm10
pclmulqdq $0x00, %xmm10, %xmm9
pxor %xmm8, %xmm9
pxor %xmm11, %xmm9
movdqa %xmm9, %xmm10
movdqa %xmm11, %xmm7
pslldq $8, %xmm10
psrldq $8, %xmm9
pxor %xmm10, %xmm8
pxor %xmm9, %xmm7
movdqa %xmm8, %xmm12
movdqa %xmm8, %xmm13
movdqa %xmm8, %xmm14
pslld $31, %xmm12
pslld $30, %xmm13
pslld $25, %xmm14
pxor %xmm13, %xmm12
pxor %xmm14, %xmm12
movdqa %xmm12, %xmm13
psrldq $4, %xmm13
pslldq $12, %xmm12
pxor %xmm12, %xmm8
movdqa %xmm8, %xmm14
movdqa %xmm8, %xmm10
movdqa %xmm8, %xmm9
psrld $0x01, %xmm14
psrld $2, %xmm10
psrld $7, %xmm9
pxor %xmm10, %xmm14
pxor %xmm9, %xmm14
pxor %xmm13, %xmm14
pxor %xmm8, %xmm14
pxor %xmm14, %xmm7
movdqu %xmm7, 112(%rsp)
# First 128 bytes of input
movdqu 128(%rsp), %xmm8
movdqa L_aes_gcm_bswap_epi64(%rip), %xmm1
movdqa %xmm8, %xmm0
pshufb %xmm1, %xmm8
movdqa %xmm0, %xmm9
paddd L_aes_gcm_one(%rip), %xmm9
pshufb %xmm1, %xmm9
movdqa %xmm0, %xmm10
paddd L_aes_gcm_two(%rip), %xmm10
pshufb %xmm1, %xmm10
movdqa %xmm0, %xmm11
paddd L_aes_gcm_three(%rip), %xmm11
pshufb %xmm1, %xmm11
movdqa %xmm0, %xmm12
paddd L_aes_gcm_four(%rip), %xmm12
pshufb %xmm1, %xmm12
movdqa %xmm0, %xmm13
paddd L_aes_gcm_five(%rip), %xmm13
pshufb %xmm1, %xmm13
movdqa %xmm0, %xmm14
paddd L_aes_gcm_six(%rip), %xmm14
pshufb %xmm1, %xmm14
movdqa %xmm0, %xmm15
paddd L_aes_gcm_seven(%rip), %xmm15
pshufb %xmm1, %xmm15
paddd L_aes_gcm_eight(%rip), %xmm0
movdqa (%r15), %xmm7
movdqu %xmm0, 128(%rsp)
pxor %xmm7, %xmm8
pxor %xmm7, %xmm9
pxor %xmm7, %xmm10
pxor %xmm7, %xmm11
pxor %xmm7, %xmm12
pxor %xmm7, %xmm13
pxor %xmm7, %xmm14
pxor %xmm7, %xmm15
movdqa 16(%r15), %xmm7
aesenc %xmm7, %xmm8
aesenc %xmm7, %xmm9
aesenc %xmm7, %xmm10
aesenc %xmm7, %xmm11
aesenc %xmm7, %xmm12
aesenc %xmm7, %xmm13
aesenc %xmm7, %xmm14
aesenc %xmm7, %xmm15
movdqa 32(%r15), %xmm7
aesenc %xmm7, %xmm8
aesenc %xmm7, %xmm9
aesenc %xmm7, %xmm10
aesenc %xmm7, %xmm11
aesenc %xmm7, %xmm12
aesenc %xmm7, %xmm13
aesenc %xmm7, %xmm14
aesenc %xmm7, %xmm15
movdqa 48(%r15), %xmm7
aesenc %xmm7, %xmm8
aesenc %xmm7, %xmm9
aesenc %xmm7, %xmm10
aesenc %xmm7, %xmm11
aesenc %xmm7, %xmm12
aesenc %xmm7, %xmm13
aesenc %xmm7, %xmm14
aesenc %xmm7, %xmm15
movdqa 64(%r15), %xmm7
aesenc %xmm7, %xmm8
aesenc %xmm7, %xmm9
aesenc %xmm7, %xmm10
aesenc %xmm7, %xmm11
aesenc %xmm7, %xmm12
aesenc %xmm7, %xmm13
aesenc %xmm7, %xmm14
aesenc %xmm7, %xmm15
movdqa 80(%r15), %xmm7
aesenc %xmm7, %xmm8
aesenc %xmm7, %xmm9
aesenc %xmm7, %xmm10
aesenc %xmm7, %xmm11
aesenc %xmm7, %xmm12
aesenc %xmm7, %xmm13
aesenc %xmm7, %xmm14
aesenc %xmm7, %xmm15
movdqa 96(%r15), %xmm7
aesenc %xmm7, %xmm8
aesenc %xmm7, %xmm9
aesenc %xmm7, %xmm10
aesenc %xmm7, %xmm11
aesenc %xmm7, %xmm12
aesenc %xmm7, %xmm13
aesenc %xmm7, %xmm14
aesenc %xmm7, %xmm15
movdqa 112(%r15), %xmm7
aesenc %xmm7, %xmm8
aesenc %xmm7, %xmm9
aesenc %xmm7, %xmm10
aesenc %xmm7, %xmm11
aesenc %xmm7, %xmm12
aesenc %xmm7, %xmm13
aesenc %xmm7, %xmm14
aesenc %xmm7, %xmm15
movdqa 128(%r15), %xmm7
aesenc %xmm7, %xmm8
aesenc %xmm7, %xmm9
aesenc %xmm7, %xmm10
aesenc %xmm7, %xmm11
aesenc %xmm7, %xmm12
aesenc %xmm7, %xmm13
aesenc %xmm7, %xmm14
aesenc %xmm7, %xmm15
movdqa 144(%r15), %xmm7
aesenc %xmm7, %xmm8
aesenc %xmm7, %xmm9
aesenc %xmm7, %xmm10
aesenc %xmm7, %xmm11
aesenc %xmm7, %xmm12
aesenc %xmm7, %xmm13
aesenc %xmm7, %xmm14
aesenc %xmm7, %xmm15
cmpl $11, %r10d
movdqa 160(%r15), %xmm7
jl L_AES_GCM_encrypt_aesni_enc_done
aesenc %xmm7, %xmm8
aesenc %xmm7, %xmm9
aesenc %xmm7, %xmm10
aesenc %xmm7, %xmm11
aesenc %xmm7, %xmm12
aesenc %xmm7, %xmm13
aesenc %xmm7, %xmm14
aesenc %xmm7, %xmm15
movdqa 176(%r15), %xmm7
aesenc %xmm7, %xmm8
aesenc %xmm7, %xmm9
aesenc %xmm7, %xmm10
aesenc %xmm7, %xmm11
aesenc %xmm7, %xmm12
aesenc %xmm7, %xmm13
aesenc %xmm7, %xmm14
aesenc %xmm7, %xmm15
cmpl $13, %r10d
movdqa 192(%r15), %xmm7
jl L_AES_GCM_encrypt_aesni_enc_done
aesenc %xmm7, %xmm8
aesenc %xmm7, %xmm9
aesenc %xmm7, %xmm10
aesenc %xmm7, %xmm11
aesenc %xmm7, %xmm12
aesenc %xmm7, %xmm13
aesenc %xmm7, %xmm14
aesenc %xmm7, %xmm15
movdqa 208(%r15), %xmm7
aesenc %xmm7, %xmm8
aesenc %xmm7, %xmm9
aesenc %xmm7, %xmm10
aesenc %xmm7, %xmm11
aesenc %xmm7, %xmm12
aesenc %xmm7, %xmm13
aesenc %xmm7, %xmm14
aesenc %xmm7, %xmm15
movdqa 224(%r15), %xmm7
L_AES_GCM_encrypt_aesni_enc_done:
aesenclast %xmm7, %xmm8
aesenclast %xmm7, %xmm9
movdqu (%rdi), %xmm0
movdqu 16(%rdi), %xmm1
pxor %xmm0, %xmm8
pxor %xmm1, %xmm9
movdqu %xmm8, (%rsi)
movdqu %xmm9, 16(%rsi)
aesenclast %xmm7, %xmm10
aesenclast %xmm7, %xmm11
movdqu 32(%rdi), %xmm0
movdqu 48(%rdi), %xmm1
pxor %xmm0, %xmm10
pxor %xmm1, %xmm11
movdqu %xmm10, 32(%rsi)
movdqu %xmm11, 48(%rsi)
aesenclast %xmm7, %xmm12
aesenclast %xmm7, %xmm13
movdqu 64(%rdi), %xmm0
movdqu 80(%rdi), %xmm1
pxor %xmm0, %xmm12
pxor %xmm1, %xmm13
movdqu %xmm12, 64(%rsi)
movdqu %xmm13, 80(%rsi)
aesenclast %xmm7, %xmm14
aesenclast %xmm7, %xmm15
movdqu 96(%rdi), %xmm0
movdqu 112(%rdi), %xmm1
pxor %xmm0, %xmm14
pxor %xmm1, %xmm15
movdqu %xmm14, 96(%rsi)
movdqu %xmm15, 112(%rsi)
cmpl $0x80, %r13d
movl $0x80, %ebx
jle L_AES_GCM_encrypt_aesni_end_128
# More 128 bytes of input
L_AES_GCM_encrypt_aesni_ghash_128:
leaq (%rdi,%rbx,1), %rcx
leaq (%rsi,%rbx,1), %rdx
movdqu 128(%rsp), %xmm8
movdqa L_aes_gcm_bswap_epi64(%rip), %xmm1
movdqa %xmm8, %xmm0
pshufb %xmm1, %xmm8
movdqa %xmm0, %xmm9
paddd L_aes_gcm_one(%rip), %xmm9
pshufb %xmm1, %xmm9
movdqa %xmm0, %xmm10
paddd L_aes_gcm_two(%rip), %xmm10
pshufb %xmm1, %xmm10
movdqa %xmm0, %xmm11
paddd L_aes_gcm_three(%rip), %xmm11
pshufb %xmm1, %xmm11
movdqa %xmm0, %xmm12
paddd L_aes_gcm_four(%rip), %xmm12
pshufb %xmm1, %xmm12
movdqa %xmm0, %xmm13
paddd L_aes_gcm_five(%rip), %xmm13
pshufb %xmm1, %xmm13
movdqa %xmm0, %xmm14
paddd L_aes_gcm_six(%rip), %xmm14
pshufb %xmm1, %xmm14
movdqa %xmm0, %xmm15
paddd L_aes_gcm_seven(%rip), %xmm15
pshufb %xmm1, %xmm15
paddd L_aes_gcm_eight(%rip), %xmm0
movdqa (%r15), %xmm7
movdqu %xmm0, 128(%rsp)
pxor %xmm7, %xmm8
pxor %xmm7, %xmm9
pxor %xmm7, %xmm10
pxor %xmm7, %xmm11
pxor %xmm7, %xmm12
pxor %xmm7, %xmm13
pxor %xmm7, %xmm14
pxor %xmm7, %xmm15
movdqu 112(%rsp), %xmm7
movdqu -128(%rdx), %xmm0
aesenc 16(%r15), %xmm8
pshufb L_aes_gcm_bswap_mask(%rip), %xmm0
pxor %xmm2, %xmm0
pshufd $0x4e, %xmm7, %xmm1
pshufd $0x4e, %xmm0, %xmm5
pxor %xmm7, %xmm1
pxor %xmm0, %xmm5
movdqa %xmm0, %xmm3
pclmulqdq $0x11, %xmm7, %xmm3
aesenc 16(%r15), %xmm9
aesenc 16(%r15), %xmm10
movdqa %xmm0, %xmm2
pclmulqdq $0x00, %xmm7, %xmm2
aesenc 16(%r15), %xmm11
aesenc 16(%r15), %xmm12
pclmulqdq $0x00, %xmm5, %xmm1
aesenc 16(%r15), %xmm13
aesenc 16(%r15), %xmm14
aesenc 16(%r15), %xmm15
pxor %xmm2, %xmm1
pxor %xmm3, %xmm1
movdqu 96(%rsp), %xmm7
movdqu -112(%rdx), %xmm0
pshufd $0x4e, %xmm7, %xmm4
pshufb L_aes_gcm_bswap_mask(%rip), %xmm0
aesenc 32(%r15), %xmm8
pxor %xmm7, %xmm4
pshufd $0x4e, %xmm0, %xmm5
pxor %xmm0, %xmm5
movdqa %xmm0, %xmm6
pclmulqdq $0x11, %xmm7, %xmm6
aesenc 32(%r15), %xmm9
aesenc 32(%r15), %xmm10
pclmulqdq $0x00, %xmm0, %xmm7
aesenc 32(%r15), %xmm11
aesenc 32(%r15), %xmm12
pclmulqdq $0x00, %xmm5, %xmm4
aesenc 32(%r15), %xmm13
aesenc 32(%r15), %xmm14
aesenc 32(%r15), %xmm15
pxor %xmm7, %xmm1
pxor %xmm7, %xmm2
pxor %xmm6, %xmm1
pxor %xmm6, %xmm3
pxor %xmm4, %xmm1
movdqu 80(%rsp), %xmm7
movdqu -96(%rdx), %xmm0
pshufd $0x4e, %xmm7, %xmm4
pshufb L_aes_gcm_bswap_mask(%rip), %xmm0
aesenc 48(%r15), %xmm8
pxor %xmm7, %xmm4
pshufd $0x4e, %xmm0, %xmm5
pxor %xmm0, %xmm5
movdqa %xmm0, %xmm6
pclmulqdq $0x11, %xmm7, %xmm6
aesenc 48(%r15), %xmm9
aesenc 48(%r15), %xmm10
pclmulqdq $0x00, %xmm0, %xmm7
aesenc 48(%r15), %xmm11
aesenc 48(%r15), %xmm12
pclmulqdq $0x00, %xmm5, %xmm4
aesenc 48(%r15), %xmm13
aesenc 48(%r15), %xmm14
aesenc 48(%r15), %xmm15
pxor %xmm7, %xmm1
pxor %xmm7, %xmm2
pxor %xmm6, %xmm1
pxor %xmm6, %xmm3
pxor %xmm4, %xmm1
movdqu 64(%rsp), %xmm7
movdqu -80(%rdx), %xmm0
pshufd $0x4e, %xmm7, %xmm4
pshufb L_aes_gcm_bswap_mask(%rip), %xmm0
aesenc 64(%r15), %xmm8
pxor %xmm7, %xmm4
pshufd $0x4e, %xmm0, %xmm5
pxor %xmm0, %xmm5
movdqa %xmm0, %xmm6
pclmulqdq $0x11, %xmm7, %xmm6
aesenc 64(%r15), %xmm9
aesenc 64(%r15), %xmm10
pclmulqdq $0x00, %xmm0, %xmm7
aesenc 64(%r15), %xmm11
aesenc 64(%r15), %xmm12
pclmulqdq $0x00, %xmm5, %xmm4
aesenc 64(%r15), %xmm13
aesenc 64(%r15), %xmm14
aesenc 64(%r15), %xmm15
pxor %xmm7, %xmm1
pxor %xmm7, %xmm2
pxor %xmm6, %xmm1
pxor %xmm6, %xmm3
pxor %xmm4, %xmm1
movdqu 48(%rsp), %xmm7
movdqu -64(%rdx), %xmm0
pshufd $0x4e, %xmm7, %xmm4
pshufb L_aes_gcm_bswap_mask(%rip), %xmm0
aesenc 80(%r15), %xmm8
pxor %xmm7, %xmm4
pshufd $0x4e, %xmm0, %xmm5
pxor %xmm0, %xmm5
movdqa %xmm0, %xmm6
pclmulqdq $0x11, %xmm7, %xmm6
aesenc 80(%r15), %xmm9
aesenc 80(%r15), %xmm10
pclmulqdq $0x00, %xmm0, %xmm7
aesenc 80(%r15), %xmm11
aesenc 80(%r15), %xmm12
pclmulqdq $0x00, %xmm5, %xmm4
aesenc 80(%r15), %xmm13
aesenc 80(%r15), %xmm14
aesenc 80(%r15), %xmm15
pxor %xmm7, %xmm1
pxor %xmm7, %xmm2
pxor %xmm6, %xmm1
pxor %xmm6, %xmm3
pxor %xmm4, %xmm1
movdqu 32(%rsp), %xmm7
movdqu -48(%rdx), %xmm0
pshufd $0x4e, %xmm7, %xmm4
pshufb L_aes_gcm_bswap_mask(%rip), %xmm0
aesenc 96(%r15), %xmm8
pxor %xmm7, %xmm4
pshufd $0x4e, %xmm0, %xmm5
pxor %xmm0, %xmm5
movdqa %xmm0, %xmm6
pclmulqdq $0x11, %xmm7, %xmm6
aesenc 96(%r15), %xmm9
aesenc 96(%r15), %xmm10
pclmulqdq $0x00, %xmm0, %xmm7
aesenc 96(%r15), %xmm11
aesenc 96(%r15), %xmm12
pclmulqdq $0x00, %xmm5, %xmm4
aesenc 96(%r15), %xmm13
aesenc 96(%r15), %xmm14
aesenc 96(%r15), %xmm15
pxor %xmm7, %xmm1
pxor %xmm7, %xmm2
pxor %xmm6, %xmm1
pxor %xmm6, %xmm3
pxor %xmm4, %xmm1
movdqu 16(%rsp), %xmm7
movdqu -32(%rdx), %xmm0
pshufd $0x4e, %xmm7, %xmm4
pshufb L_aes_gcm_bswap_mask(%rip), %xmm0
aesenc 112(%r15), %xmm8
pxor %xmm7, %xmm4
pshufd $0x4e, %xmm0, %xmm5
pxor %xmm0, %xmm5
movdqa %xmm0, %xmm6
pclmulqdq $0x11, %xmm7, %xmm6
aesenc 112(%r15), %xmm9
aesenc 112(%r15), %xmm10
pclmulqdq $0x00, %xmm0, %xmm7
aesenc 112(%r15), %xmm11
aesenc 112(%r15), %xmm12
pclmulqdq $0x00, %xmm5, %xmm4
aesenc 112(%r15), %xmm13
aesenc 112(%r15), %xmm14
aesenc 112(%r15), %xmm15
pxor %xmm7, %xmm1
pxor %xmm7, %xmm2
pxor %xmm6, %xmm1
pxor %xmm6, %xmm3
pxor %xmm4, %xmm1
movdqu (%rsp), %xmm7
movdqu -16(%rdx), %xmm0
pshufd $0x4e, %xmm7, %xmm4
pshufb L_aes_gcm_bswap_mask(%rip), %xmm0
aesenc 128(%r15), %xmm8
pxor %xmm7, %xmm4
pshufd $0x4e, %xmm0, %xmm5
pxor %xmm0, %xmm5
movdqa %xmm0, %xmm6
pclmulqdq $0x11, %xmm7, %xmm6
aesenc 128(%r15), %xmm9
aesenc 128(%r15), %xmm10
pclmulqdq $0x00, %xmm0, %xmm7
aesenc 128(%r15), %xmm11
aesenc 128(%r15), %xmm12
pclmulqdq $0x00, %xmm5, %xmm4
aesenc 128(%r15), %xmm13
aesenc 128(%r15), %xmm14
aesenc 128(%r15), %xmm15
pxor %xmm7, %xmm1
pxor %xmm7, %xmm2
pxor %xmm6, %xmm1
pxor %xmm6, %xmm3
pxor %xmm4, %xmm1
movdqa %xmm1, %xmm5
psrldq $8, %xmm1
pslldq $8, %xmm5
aesenc 144(%r15), %xmm8
pxor %xmm5, %xmm2
pxor %xmm1, %xmm3
movdqa %xmm2, %xmm7
movdqa %xmm2, %xmm4
movdqa %xmm2, %xmm5
aesenc 144(%r15), %xmm9
pslld $31, %xmm7
pslld $30, %xmm4
pslld $25, %xmm5
aesenc 144(%r15), %xmm10
pxor %xmm4, %xmm7
pxor %xmm5, %xmm7
aesenc 144(%r15), %xmm11
movdqa %xmm7, %xmm4
pslldq $12, %xmm7
psrldq $4, %xmm4
aesenc 144(%r15), %xmm12
pxor %xmm7, %xmm2
movdqa %xmm2, %xmm5
movdqa %xmm2, %xmm1
movdqa %xmm2, %xmm0
aesenc 144(%r15), %xmm13
psrld $0x01, %xmm5
psrld $2, %xmm1
psrld $7, %xmm0
aesenc 144(%r15), %xmm14
pxor %xmm1, %xmm5
pxor %xmm0, %xmm5
aesenc 144(%r15), %xmm15
pxor %xmm4, %xmm5
pxor %xmm5, %xmm2
pxor %xmm3, %xmm2
cmpl $11, %r10d
movdqa 160(%r15), %xmm7
jl L_AES_GCM_encrypt_aesni_aesenc_128_ghash_avx_done
aesenc %xmm7, %xmm8
aesenc %xmm7, %xmm9
aesenc %xmm7, %xmm10
aesenc %xmm7, %xmm11
aesenc %xmm7, %xmm12
aesenc %xmm7, %xmm13
aesenc %xmm7, %xmm14
aesenc %xmm7, %xmm15
movdqa 176(%r15), %xmm7
aesenc %xmm7, %xmm8
aesenc %xmm7, %xmm9
aesenc %xmm7, %xmm10
aesenc %xmm7, %xmm11
aesenc %xmm7, %xmm12
aesenc %xmm7, %xmm13
aesenc %xmm7, %xmm14
aesenc %xmm7, %xmm15
cmpl $13, %r10d
movdqa 192(%r15), %xmm7
jl L_AES_GCM_encrypt_aesni_aesenc_128_ghash_avx_done
aesenc %xmm7, %xmm8
aesenc %xmm7, %xmm9
aesenc %xmm7, %xmm10
aesenc %xmm7, %xmm11
aesenc %xmm7, %xmm12
aesenc %xmm7, %xmm13
aesenc %xmm7, %xmm14
aesenc %xmm7, %xmm15
movdqa 208(%r15), %xmm7
aesenc %xmm7, %xmm8
aesenc %xmm7, %xmm9
aesenc %xmm7, %xmm10
aesenc %xmm7, %xmm11
aesenc %xmm7, %xmm12
aesenc %xmm7, %xmm13
aesenc %xmm7, %xmm14
aesenc %xmm7, %xmm15
movdqa 224(%r15), %xmm7
L_AES_GCM_encrypt_aesni_aesenc_128_ghash_avx_done:
aesenclast %xmm7, %xmm8
aesenclast %xmm7, %xmm9
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
pxor %xmm0, %xmm8
pxor %xmm1, %xmm9
movdqu %xmm8, (%rdx)
movdqu %xmm9, 16(%rdx)
aesenclast %xmm7, %xmm10
aesenclast %xmm7, %xmm11
movdqu 32(%rcx), %xmm0
movdqu 48(%rcx), %xmm1
pxor %xmm0, %xmm10
pxor %xmm1, %xmm11
movdqu %xmm10, 32(%rdx)
movdqu %xmm11, 48(%rdx)
aesenclast %xmm7, %xmm12
aesenclast %xmm7, %xmm13
movdqu 64(%rcx), %xmm0
movdqu 80(%rcx), %xmm1
pxor %xmm0, %xmm12
pxor %xmm1, %xmm13
movdqu %xmm12, 64(%rdx)
movdqu %xmm13, 80(%rdx)
aesenclast %xmm7, %xmm14
aesenclast %xmm7, %xmm15
movdqu 96(%rcx), %xmm0
movdqu 112(%rcx), %xmm1
pxor %xmm0, %xmm14
pxor %xmm1, %xmm15
movdqu %xmm14, 96(%rdx)
movdqu %xmm15, 112(%rdx)
addl $0x80, %ebx
cmpl %r13d, %ebx
jl L_AES_GCM_encrypt_aesni_ghash_128
L_AES_GCM_encrypt_aesni_end_128:
movdqa L_aes_gcm_bswap_mask(%rip), %xmm4
pshufb %xmm4, %xmm8
pshufb %xmm4, %xmm9
pshufb %xmm4, %xmm10
pshufb %xmm4, %xmm11
pxor %xmm2, %xmm8
pshufb %xmm4, %xmm12
pshufb %xmm4, %xmm13
pshufb %xmm4, %xmm14
pshufb %xmm4, %xmm15
movdqu 112(%rsp), %xmm7
pshufd $0x4e, %xmm8, %xmm1
pshufd $0x4e, %xmm7, %xmm2
movdqa %xmm7, %xmm3
movdqa %xmm7, %xmm0
pclmulqdq $0x11, %xmm8, %xmm3
pclmulqdq $0x00, %xmm8, %xmm0
pxor %xmm8, %xmm1
pxor %xmm7, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
movdqa %xmm0, %xmm4
movdqa %xmm3, %xmm6
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm4
pxor %xmm1, %xmm6
movdqu 96(%rsp), %xmm7
pshufd $0x4e, %xmm9, %xmm1
pshufd $0x4e, %xmm7, %xmm2
movdqa %xmm7, %xmm3
movdqa %xmm7, %xmm0
pclmulqdq $0x11, %xmm9, %xmm3
pclmulqdq $0x00, %xmm9, %xmm0
pxor %xmm9, %xmm1
pxor %xmm7, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
pxor %xmm0, %xmm4
pxor %xmm3, %xmm6
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm4
pxor %xmm1, %xmm6
movdqu 80(%rsp), %xmm7
pshufd $0x4e, %xmm10, %xmm1
pshufd $0x4e, %xmm7, %xmm2
movdqa %xmm7, %xmm3
movdqa %xmm7, %xmm0
pclmulqdq $0x11, %xmm10, %xmm3
pclmulqdq $0x00, %xmm10, %xmm0
pxor %xmm10, %xmm1
pxor %xmm7, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
pxor %xmm0, %xmm4
pxor %xmm3, %xmm6
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm4
pxor %xmm1, %xmm6
movdqu 64(%rsp), %xmm7
pshufd $0x4e, %xmm11, %xmm1
pshufd $0x4e, %xmm7, %xmm2
movdqa %xmm7, %xmm3
movdqa %xmm7, %xmm0
pclmulqdq $0x11, %xmm11, %xmm3
pclmulqdq $0x00, %xmm11, %xmm0
pxor %xmm11, %xmm1
pxor %xmm7, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
pxor %xmm0, %xmm4
pxor %xmm3, %xmm6
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm4
pxor %xmm1, %xmm6
movdqu 48(%rsp), %xmm7
pshufd $0x4e, %xmm12, %xmm1
pshufd $0x4e, %xmm7, %xmm2
movdqa %xmm7, %xmm3
movdqa %xmm7, %xmm0
pclmulqdq $0x11, %xmm12, %xmm3
pclmulqdq $0x00, %xmm12, %xmm0
pxor %xmm12, %xmm1
pxor %xmm7, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
pxor %xmm0, %xmm4
pxor %xmm3, %xmm6
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm4
pxor %xmm1, %xmm6
movdqu 32(%rsp), %xmm7
pshufd $0x4e, %xmm13, %xmm1
pshufd $0x4e, %xmm7, %xmm2
movdqa %xmm7, %xmm3
movdqa %xmm7, %xmm0
pclmulqdq $0x11, %xmm13, %xmm3
pclmulqdq $0x00, %xmm13, %xmm0
pxor %xmm13, %xmm1
pxor %xmm7, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
pxor %xmm0, %xmm4
pxor %xmm3, %xmm6
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm4
pxor %xmm1, %xmm6
movdqu 16(%rsp), %xmm7
pshufd $0x4e, %xmm14, %xmm1
pshufd $0x4e, %xmm7, %xmm2
movdqa %xmm7, %xmm3
movdqa %xmm7, %xmm0
pclmulqdq $0x11, %xmm14, %xmm3
pclmulqdq $0x00, %xmm14, %xmm0
pxor %xmm14, %xmm1
pxor %xmm7, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
pxor %xmm0, %xmm4
pxor %xmm3, %xmm6
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm4
pxor %xmm1, %xmm6
movdqu (%rsp), %xmm7
pshufd $0x4e, %xmm15, %xmm1
pshufd $0x4e, %xmm7, %xmm2
movdqa %xmm7, %xmm3
movdqa %xmm7, %xmm0
pclmulqdq $0x11, %xmm15, %xmm3
pclmulqdq $0x00, %xmm15, %xmm0
pxor %xmm15, %xmm1
pxor %xmm7, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
pxor %xmm0, %xmm4
pxor %xmm3, %xmm6
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm4
pxor %xmm1, %xmm6
movdqa %xmm4, %xmm0
movdqa %xmm4, %xmm1
movdqa %xmm4, %xmm2
pslld $31, %xmm0
pslld $30, %xmm1
pslld $25, %xmm2
pxor %xmm1, %xmm0
pxor %xmm2, %xmm0
movdqa %xmm0, %xmm1
psrldq $4, %xmm1
pslldq $12, %xmm0
pxor %xmm0, %xmm4
movdqa %xmm4, %xmm2
movdqa %xmm4, %xmm3
movdqa %xmm4, %xmm0
psrld $0x01, %xmm2
psrld $2, %xmm3
psrld $7, %xmm0
pxor %xmm3, %xmm2
pxor %xmm0, %xmm2
pxor %xmm1, %xmm2
pxor %xmm4, %xmm2
pxor %xmm2, %xmm6
movdqu (%rsp), %xmm5
L_AES_GCM_encrypt_aesni_done_128:
movl %r9d, %edx
cmpl %edx, %ebx
jge L_AES_GCM_encrypt_aesni_done_enc
movl %r9d, %r13d
andl $0xfffffff0, %r13d
cmpl %r13d, %ebx
jge L_AES_GCM_encrypt_aesni_last_block_done
leaq (%rdi,%rbx,1), %rcx
leaq (%rsi,%rbx,1), %rdx
movdqu 128(%rsp), %xmm8
movdqa %xmm8, %xmm9
pshufb L_aes_gcm_bswap_epi64(%rip), %xmm8
paddd L_aes_gcm_one(%rip), %xmm9
pxor (%r15), %xmm8
movdqu %xmm9, 128(%rsp)
aesenc 16(%r15), %xmm8
aesenc 32(%r15), %xmm8
aesenc 48(%r15), %xmm8
aesenc 64(%r15), %xmm8
aesenc 80(%r15), %xmm8
aesenc 96(%r15), %xmm8
aesenc 112(%r15), %xmm8
aesenc 128(%r15), %xmm8
aesenc 144(%r15), %xmm8
cmpl $11, %r10d
movdqa 160(%r15), %xmm9
jl L_AES_GCM_encrypt_aesni_aesenc_block_aesenc_avx_last
aesenc %xmm9, %xmm8
aesenc 176(%r15), %xmm8
cmpl $13, %r10d
movdqa 192(%r15), %xmm9
jl L_AES_GCM_encrypt_aesni_aesenc_block_aesenc_avx_last
aesenc %xmm9, %xmm8
aesenc 208(%r15), %xmm8
movdqa 224(%r15), %xmm9
L_AES_GCM_encrypt_aesni_aesenc_block_aesenc_avx_last:
aesenclast %xmm9, %xmm8
movdqu (%rcx), %xmm9
pxor %xmm9, %xmm8
movdqu %xmm8, (%rdx)
pshufb L_aes_gcm_bswap_mask(%rip), %xmm8
pxor %xmm8, %xmm6
addl $16, %ebx
cmpl %r13d, %ebx
jge L_AES_GCM_encrypt_aesni_last_block_ghash
L_AES_GCM_encrypt_aesni_last_block_start:
leaq (%rdi,%rbx,1), %rcx
leaq (%rsi,%rbx,1), %rdx
movdqu 128(%rsp), %xmm8
movdqa %xmm8, %xmm9
pshufb L_aes_gcm_bswap_epi64(%rip), %xmm8
paddd L_aes_gcm_one(%rip), %xmm9
pxor (%r15), %xmm8
movdqu %xmm9, 128(%rsp)
movdqa %xmm6, %xmm10
pclmulqdq $16, %xmm5, %xmm10
aesenc 16(%r15), %xmm8
aesenc 32(%r15), %xmm8
movdqa %xmm6, %xmm11
pclmulqdq $0x01, %xmm5, %xmm11
aesenc 48(%r15), %xmm8
aesenc 64(%r15), %xmm8
movdqa %xmm6, %xmm12
pclmulqdq $0x00, %xmm5, %xmm12
aesenc 80(%r15), %xmm8
movdqa %xmm6, %xmm1
pclmulqdq $0x11, %xmm5, %xmm1
aesenc 96(%r15), %xmm8
pxor %xmm11, %xmm10
movdqa %xmm10, %xmm2
psrldq $8, %xmm10
pslldq $8, %xmm2
aesenc 112(%r15), %xmm8
movdqa %xmm1, %xmm3
pxor %xmm12, %xmm2
pxor %xmm10, %xmm3
movdqa L_aes_gcm_mod2_128(%rip), %xmm0
movdqa %xmm2, %xmm11
pclmulqdq $16, %xmm0, %xmm11
aesenc 128(%r15), %xmm8
pshufd $0x4e, %xmm2, %xmm10
pxor %xmm11, %xmm10
movdqa %xmm10, %xmm11
pclmulqdq $16, %xmm0, %xmm11
aesenc 144(%r15), %xmm8
pshufd $0x4e, %xmm10, %xmm6
pxor %xmm11, %xmm6
pxor %xmm3, %xmm6
cmpl $11, %r10d
movdqa 160(%r15), %xmm9
jl L_AES_GCM_encrypt_aesni_aesenc_gfmul_last
aesenc %xmm9, %xmm8
aesenc 176(%r15), %xmm8
cmpl $13, %r10d
movdqa 192(%r15), %xmm9
jl L_AES_GCM_encrypt_aesni_aesenc_gfmul_last
aesenc %xmm9, %xmm8
aesenc 208(%r15), %xmm8
movdqa 224(%r15), %xmm9
L_AES_GCM_encrypt_aesni_aesenc_gfmul_last:
aesenclast %xmm9, %xmm8
movdqu (%rcx), %xmm9
pxor %xmm9, %xmm8
movdqu %xmm8, (%rdx)
pshufb L_aes_gcm_bswap_mask(%rip), %xmm8
pxor %xmm8, %xmm6
addl $16, %ebx
cmpl %r13d, %ebx
jl L_AES_GCM_encrypt_aesni_last_block_start
L_AES_GCM_encrypt_aesni_last_block_ghash:
pshufd $0x4e, %xmm5, %xmm9
pshufd $0x4e, %xmm6, %xmm10
movdqa %xmm6, %xmm11
movdqa %xmm6, %xmm8
pclmulqdq $0x11, %xmm5, %xmm11
pclmulqdq $0x00, %xmm5, %xmm8
pxor %xmm5, %xmm9
pxor %xmm6, %xmm10
pclmulqdq $0x00, %xmm10, %xmm9
pxor %xmm8, %xmm9
pxor %xmm11, %xmm9
movdqa %xmm9, %xmm10
movdqa %xmm11, %xmm6
pslldq $8, %xmm10
psrldq $8, %xmm9
pxor %xmm10, %xmm8
pxor %xmm9, %xmm6
movdqa %xmm8, %xmm12
movdqa %xmm8, %xmm13
movdqa %xmm8, %xmm14
pslld $31, %xmm12
pslld $30, %xmm13
pslld $25, %xmm14
pxor %xmm13, %xmm12
pxor %xmm14, %xmm12
movdqa %xmm12, %xmm13
psrldq $4, %xmm13
pslldq $12, %xmm12
pxor %xmm12, %xmm8
movdqa %xmm8, %xmm14
movdqa %xmm8, %xmm10
movdqa %xmm8, %xmm9
psrld $0x01, %xmm14
psrld $2, %xmm10
psrld $7, %xmm9
pxor %xmm10, %xmm14
pxor %xmm9, %xmm14
pxor %xmm13, %xmm14
pxor %xmm8, %xmm14
pxor %xmm14, %xmm6
L_AES_GCM_encrypt_aesni_last_block_done:
movl %r9d, %ecx
movl %ecx, %edx
andl $15, %ecx
jz L_AES_GCM_encrypt_aesni_aesenc_last15_enc_avx_done
movdqu 128(%rsp), %xmm4
pshufb L_aes_gcm_bswap_epi64(%rip), %xmm4
pxor (%r15), %xmm4
aesenc 16(%r15), %xmm4
aesenc 32(%r15), %xmm4
aesenc 48(%r15), %xmm4
aesenc 64(%r15), %xmm4
aesenc 80(%r15), %xmm4
aesenc 96(%r15), %xmm4
aesenc 112(%r15), %xmm4
aesenc 128(%r15), %xmm4
aesenc 144(%r15), %xmm4
cmpl $11, %r10d
movdqa 160(%r15), %xmm9
jl L_AES_GCM_encrypt_aesni_aesenc_last15_enc_avx_aesenc_avx_last
aesenc %xmm9, %xmm4
aesenc 176(%r15), %xmm4
cmpl $13, %r10d
movdqa 192(%r15), %xmm9
jl L_AES_GCM_encrypt_aesni_aesenc_last15_enc_avx_aesenc_avx_last
aesenc %xmm9, %xmm4
aesenc 208(%r15), %xmm4
movdqa 224(%r15), %xmm9
L_AES_GCM_encrypt_aesni_aesenc_last15_enc_avx_aesenc_avx_last:
aesenclast %xmm9, %xmm4
subq $16, %rsp
xorl %ecx, %ecx
movdqu %xmm4, (%rsp)
L_AES_GCM_encrypt_aesni_aesenc_last15_enc_avx_loop:
movzbl (%rdi,%rbx,1), %r13d
xorb (%rsp,%rcx,1), %r13b
movb %r13b, (%rsi,%rbx,1)
movb %r13b, (%rsp,%rcx,1)
incl %ebx
incl %ecx
cmpl %edx, %ebx
jl L_AES_GCM_encrypt_aesni_aesenc_last15_enc_avx_loop
xorq %r13, %r13
cmpl $16, %ecx
je L_AES_GCM_encrypt_aesni_aesenc_last15_enc_avx_finish_enc
L_AES_GCM_encrypt_aesni_aesenc_last15_enc_avx_byte_loop:
movb %r13b, (%rsp,%rcx,1)
incl %ecx
cmpl $16, %ecx
jl L_AES_GCM_encrypt_aesni_aesenc_last15_enc_avx_byte_loop
L_AES_GCM_encrypt_aesni_aesenc_last15_enc_avx_finish_enc:
movdqu (%rsp), %xmm4
addq $16, %rsp
pshufb L_aes_gcm_bswap_mask(%rip), %xmm4
pxor %xmm4, %xmm6
pshufd $0x4e, %xmm5, %xmm9
pshufd $0x4e, %xmm6, %xmm10
movdqa %xmm6, %xmm11
movdqa %xmm6, %xmm8
pclmulqdq $0x11, %xmm5, %xmm11
pclmulqdq $0x00, %xmm5, %xmm8
pxor %xmm5, %xmm9
pxor %xmm6, %xmm10
pclmulqdq $0x00, %xmm10, %xmm9
pxor %xmm8, %xmm9
pxor %xmm11, %xmm9
movdqa %xmm9, %xmm10
movdqa %xmm11, %xmm6
pslldq $8, %xmm10
psrldq $8, %xmm9
pxor %xmm10, %xmm8
pxor %xmm9, %xmm6
movdqa %xmm8, %xmm12
movdqa %xmm8, %xmm13
movdqa %xmm8, %xmm14
pslld $31, %xmm12
pslld $30, %xmm13
pslld $25, %xmm14
pxor %xmm13, %xmm12
pxor %xmm14, %xmm12
movdqa %xmm12, %xmm13
psrldq $4, %xmm13
pslldq $12, %xmm12
pxor %xmm12, %xmm8
movdqa %xmm8, %xmm14
movdqa %xmm8, %xmm10
movdqa %xmm8, %xmm9
psrld $0x01, %xmm14
psrld $2, %xmm10
psrld $7, %xmm9
pxor %xmm10, %xmm14
pxor %xmm9, %xmm14
pxor %xmm13, %xmm14
pxor %xmm8, %xmm14
pxor %xmm14, %xmm6
L_AES_GCM_encrypt_aesni_aesenc_last15_enc_avx_done:
L_AES_GCM_encrypt_aesni_done_enc:
movl %r9d, %edx
movl %r11d, %ecx
shlq $3, %rdx
shlq $3, %rcx
pinsrq $0x00, %rdx, %xmm0
pinsrq $0x01, %rcx, %xmm0
pxor %xmm0, %xmm6
pshufd $0x4e, %xmm5, %xmm9
pshufd $0x4e, %xmm6, %xmm10
movdqa %xmm6, %xmm11
movdqa %xmm6, %xmm8
pclmulqdq $0x11, %xmm5, %xmm11
pclmulqdq $0x00, %xmm5, %xmm8
pxor %xmm5, %xmm9
pxor %xmm6, %xmm10
pclmulqdq $0x00, %xmm10, %xmm9
pxor %xmm8, %xmm9
pxor %xmm11, %xmm9
movdqa %xmm9, %xmm10
movdqa %xmm11, %xmm6
pslldq $8, %xmm10
psrldq $8, %xmm9
pxor %xmm10, %xmm8
pxor %xmm9, %xmm6
movdqa %xmm8, %xmm12
movdqa %xmm8, %xmm13
movdqa %xmm8, %xmm14
pslld $31, %xmm12
pslld $30, %xmm13
pslld $25, %xmm14
pxor %xmm13, %xmm12
pxor %xmm14, %xmm12
movdqa %xmm12, %xmm13
psrldq $4, %xmm13
pslldq $12, %xmm12
pxor %xmm12, %xmm8
movdqa %xmm8, %xmm14
movdqa %xmm8, %xmm10
movdqa %xmm8, %xmm9
psrld $0x01, %xmm14
psrld $2, %xmm10
psrld $7, %xmm9
pxor %xmm10, %xmm14
pxor %xmm9, %xmm14
pxor %xmm13, %xmm14
pxor %xmm8, %xmm14
pxor %xmm14, %xmm6
pshufb L_aes_gcm_bswap_mask(%rip), %xmm6
movdqu 144(%rsp), %xmm0
pxor %xmm6, %xmm0
cmpl $16, %r14d
je L_AES_GCM_encrypt_aesni_store_tag_16
xorq %rcx, %rcx
movdqu %xmm0, (%rsp)
L_AES_GCM_encrypt_aesni_store_tag_loop:
movzbl (%rsp,%rcx,1), %r13d
movb %r13b, (%r8,%rcx,1)
incl %ecx
cmpl %r14d, %ecx
jne L_AES_GCM_encrypt_aesni_store_tag_loop
jmp L_AES_GCM_encrypt_aesni_store_tag_done
L_AES_GCM_encrypt_aesni_store_tag_16:
movdqu %xmm0, (%r8)
L_AES_GCM_encrypt_aesni_store_tag_done:
addq $0xa0, %rsp
popq %r15
popq %r14
popq %rbx
popq %r12
popq %r13
repz retq
#ifndef __APPLE__
.size AES_GCM_encrypt_aesni,.-AES_GCM_encrypt_aesni
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl AES_GCM_decrypt_aesni
.type AES_GCM_decrypt_aesni,@function
.align 16
AES_GCM_decrypt_aesni:
#else
.section __TEXT,__text
.globl _AES_GCM_decrypt_aesni
.p2align 4
_AES_GCM_decrypt_aesni:
#endif /* __APPLE__ */
pushq %r13
pushq %r12
pushq %rbx
pushq %r14
pushq %r15
pushq %rbp
movq %rdx, %r12
movq %rcx, %rax
movl 56(%rsp), %r11d
movl 64(%rsp), %ebx
movl 72(%rsp), %r14d
movq 80(%rsp), %r15
movl 88(%rsp), %r10d
movq 96(%rsp), %rbp
subq $0xa8, %rsp
pxor %xmm4, %xmm4
pxor %xmm6, %xmm6
cmpl $12, %ebx
movl %ebx, %edx
jne L_AES_GCM_decrypt_aesni_iv_not_12
# # Calculate values when IV is 12 bytes
# Set counter based on IV
movl $0x1000000, %ecx
pinsrq $0x00, (%rax), %xmm4
pinsrd $2, 8(%rax), %xmm4
pinsrd $3, %ecx, %xmm4
# H = Encrypt X(=0) and T = Encrypt counter
movdqa %xmm4, %xmm1
movdqa (%r15), %xmm5
pxor %xmm5, %xmm1
movdqa 16(%r15), %xmm7
aesenc %xmm7, %xmm5
aesenc %xmm7, %xmm1
movdqa 32(%r15), %xmm7
aesenc %xmm7, %xmm5
aesenc %xmm7, %xmm1
movdqa 48(%r15), %xmm7
aesenc %xmm7, %xmm5
aesenc %xmm7, %xmm1
movdqa 64(%r15), %xmm7
aesenc %xmm7, %xmm5
aesenc %xmm7, %xmm1
movdqa 80(%r15), %xmm7
aesenc %xmm7, %xmm5
aesenc %xmm7, %xmm1
movdqa 96(%r15), %xmm7
aesenc %xmm7, %xmm5
aesenc %xmm7, %xmm1
movdqa 112(%r15), %xmm7
aesenc %xmm7, %xmm5
aesenc %xmm7, %xmm1
movdqa 128(%r15), %xmm7
aesenc %xmm7, %xmm5
aesenc %xmm7, %xmm1
movdqa 144(%r15), %xmm7
aesenc %xmm7, %xmm5
aesenc %xmm7, %xmm1
cmpl $11, %r10d
movdqa 160(%r15), %xmm7
jl L_AES_GCM_decrypt_aesni_calc_iv_12_last
aesenc %xmm7, %xmm5
aesenc %xmm7, %xmm1
movdqa 176(%r15), %xmm7
aesenc %xmm7, %xmm5
aesenc %xmm7, %xmm1
cmpl $13, %r10d
movdqa 192(%r15), %xmm7
jl L_AES_GCM_decrypt_aesni_calc_iv_12_last
aesenc %xmm7, %xmm5
aesenc %xmm7, %xmm1
movdqa 208(%r15), %xmm7
aesenc %xmm7, %xmm5
aesenc %xmm7, %xmm1
movdqa 224(%r15), %xmm7
L_AES_GCM_decrypt_aesni_calc_iv_12_last:
aesenclast %xmm7, %xmm5
aesenclast %xmm7, %xmm1
pshufb L_aes_gcm_bswap_mask(%rip), %xmm5
movdqu %xmm1, 144(%rsp)
jmp L_AES_GCM_decrypt_aesni_iv_done
L_AES_GCM_decrypt_aesni_iv_not_12:
# Calculate values when IV is not 12 bytes
# H = Encrypt X(=0)
movdqa (%r15), %xmm5
aesenc 16(%r15), %xmm5
aesenc 32(%r15), %xmm5
aesenc 48(%r15), %xmm5
aesenc 64(%r15), %xmm5
aesenc 80(%r15), %xmm5
aesenc 96(%r15), %xmm5
aesenc 112(%r15), %xmm5
aesenc 128(%r15), %xmm5
aesenc 144(%r15), %xmm5
cmpl $11, %r10d
movdqa 160(%r15), %xmm9
jl L_AES_GCM_decrypt_aesni_calc_iv_1_aesenc_avx_last
aesenc %xmm9, %xmm5
aesenc 176(%r15), %xmm5
cmpl $13, %r10d
movdqa 192(%r15), %xmm9
jl L_AES_GCM_decrypt_aesni_calc_iv_1_aesenc_avx_last
aesenc %xmm9, %xmm5
aesenc 208(%r15), %xmm5
movdqa 224(%r15), %xmm9
L_AES_GCM_decrypt_aesni_calc_iv_1_aesenc_avx_last:
aesenclast %xmm9, %xmm5
pshufb L_aes_gcm_bswap_mask(%rip), %xmm5
# Calc counter
# Initialization vector
cmpl $0x00, %edx
movq $0x00, %rcx
je L_AES_GCM_decrypt_aesni_calc_iv_done
cmpl $16, %edx
jl L_AES_GCM_decrypt_aesni_calc_iv_lt16
andl $0xfffffff0, %edx
L_AES_GCM_decrypt_aesni_calc_iv_16_loop:
movdqu (%rax,%rcx,1), %xmm8
pshufb L_aes_gcm_bswap_mask(%rip), %xmm8
pxor %xmm8, %xmm4
pshufd $0x4e, %xmm4, %xmm1
pshufd $0x4e, %xmm5, %xmm2
movdqa %xmm5, %xmm3
movdqa %xmm5, %xmm0
pclmulqdq $0x11, %xmm4, %xmm3
pclmulqdq $0x00, %xmm4, %xmm0
pxor %xmm4, %xmm1
pxor %xmm5, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
movdqa %xmm0, %xmm7
movdqa %xmm3, %xmm4
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm7
pxor %xmm1, %xmm4
movdqa %xmm7, %xmm0
movdqa %xmm4, %xmm1
psrld $31, %xmm0
psrld $31, %xmm1
pslld $0x01, %xmm7
pslld $0x01, %xmm4
movdqa %xmm0, %xmm2
pslldq $4, %xmm0
psrldq $12, %xmm2
pslldq $4, %xmm1
por %xmm2, %xmm4
por %xmm0, %xmm7
por %xmm1, %xmm4
movdqa %xmm7, %xmm0
movdqa %xmm7, %xmm1
movdqa %xmm7, %xmm2
pslld $31, %xmm0
pslld $30, %xmm1
pslld $25, %xmm2
pxor %xmm1, %xmm0
pxor %xmm2, %xmm0
movdqa %xmm0, %xmm1
psrldq $4, %xmm1
pslldq $12, %xmm0
pxor %xmm0, %xmm7
movdqa %xmm7, %xmm2
movdqa %xmm7, %xmm3
movdqa %xmm7, %xmm0
psrld $0x01, %xmm2
psrld $2, %xmm3
psrld $7, %xmm0
pxor %xmm3, %xmm2
pxor %xmm0, %xmm2
pxor %xmm1, %xmm2
pxor %xmm7, %xmm2
pxor %xmm2, %xmm4
addl $16, %ecx
cmpl %edx, %ecx
jl L_AES_GCM_decrypt_aesni_calc_iv_16_loop
movl %ebx, %edx
cmpl %edx, %ecx
je L_AES_GCM_decrypt_aesni_calc_iv_done
L_AES_GCM_decrypt_aesni_calc_iv_lt16:
subq $16, %rsp
pxor %xmm8, %xmm8
xorl %ebx, %ebx
movdqu %xmm8, (%rsp)
L_AES_GCM_decrypt_aesni_calc_iv_loop:
movzbl (%rax,%rcx,1), %r13d
movb %r13b, (%rsp,%rbx,1)
incl %ecx
incl %ebx
cmpl %edx, %ecx
jl L_AES_GCM_decrypt_aesni_calc_iv_loop
movdqu (%rsp), %xmm8
addq $16, %rsp
pshufb L_aes_gcm_bswap_mask(%rip), %xmm8
pxor %xmm8, %xmm4
pshufd $0x4e, %xmm4, %xmm1
pshufd $0x4e, %xmm5, %xmm2
movdqa %xmm5, %xmm3
movdqa %xmm5, %xmm0
pclmulqdq $0x11, %xmm4, %xmm3
pclmulqdq $0x00, %xmm4, %xmm0
pxor %xmm4, %xmm1
pxor %xmm5, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
movdqa %xmm0, %xmm7
movdqa %xmm3, %xmm4
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm7
pxor %xmm1, %xmm4
movdqa %xmm7, %xmm0
movdqa %xmm4, %xmm1
psrld $31, %xmm0
psrld $31, %xmm1
pslld $0x01, %xmm7
pslld $0x01, %xmm4
movdqa %xmm0, %xmm2
pslldq $4, %xmm0
psrldq $12, %xmm2
pslldq $4, %xmm1
por %xmm2, %xmm4
por %xmm0, %xmm7
por %xmm1, %xmm4
movdqa %xmm7, %xmm0
movdqa %xmm7, %xmm1
movdqa %xmm7, %xmm2
pslld $31, %xmm0
pslld $30, %xmm1
pslld $25, %xmm2
pxor %xmm1, %xmm0
pxor %xmm2, %xmm0
movdqa %xmm0, %xmm1
psrldq $4, %xmm1
pslldq $12, %xmm0
pxor %xmm0, %xmm7
movdqa %xmm7, %xmm2
movdqa %xmm7, %xmm3
movdqa %xmm7, %xmm0
psrld $0x01, %xmm2
psrld $2, %xmm3
psrld $7, %xmm0
pxor %xmm3, %xmm2
pxor %xmm0, %xmm2
pxor %xmm1, %xmm2
pxor %xmm7, %xmm2
pxor %xmm2, %xmm4
L_AES_GCM_decrypt_aesni_calc_iv_done:
# T = Encrypt counter
pxor %xmm0, %xmm0
shll $3, %edx
pinsrq $0x00, %rdx, %xmm0
pxor %xmm0, %xmm4
pshufd $0x4e, %xmm4, %xmm1
pshufd $0x4e, %xmm5, %xmm2
movdqa %xmm5, %xmm3
movdqa %xmm5, %xmm0
pclmulqdq $0x11, %xmm4, %xmm3
pclmulqdq $0x00, %xmm4, %xmm0
pxor %xmm4, %xmm1
pxor %xmm5, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
movdqa %xmm0, %xmm7
movdqa %xmm3, %xmm4
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm7
pxor %xmm1, %xmm4
movdqa %xmm7, %xmm0
movdqa %xmm4, %xmm1
psrld $31, %xmm0
psrld $31, %xmm1
pslld $0x01, %xmm7
pslld $0x01, %xmm4
movdqa %xmm0, %xmm2
pslldq $4, %xmm0
psrldq $12, %xmm2
pslldq $4, %xmm1
por %xmm2, %xmm4
por %xmm0, %xmm7
por %xmm1, %xmm4
movdqa %xmm7, %xmm0
movdqa %xmm7, %xmm1
movdqa %xmm7, %xmm2
pslld $31, %xmm0
pslld $30, %xmm1
pslld $25, %xmm2
pxor %xmm1, %xmm0
pxor %xmm2, %xmm0
movdqa %xmm0, %xmm1
psrldq $4, %xmm1
pslldq $12, %xmm0
pxor %xmm0, %xmm7
movdqa %xmm7, %xmm2
movdqa %xmm7, %xmm3
movdqa %xmm7, %xmm0
psrld $0x01, %xmm2
psrld $2, %xmm3
psrld $7, %xmm0
pxor %xmm3, %xmm2
pxor %xmm0, %xmm2
pxor %xmm1, %xmm2
pxor %xmm7, %xmm2
pxor %xmm2, %xmm4
pshufb L_aes_gcm_bswap_mask(%rip), %xmm4
# Encrypt counter
movdqa (%r15), %xmm8
pxor %xmm4, %xmm8
aesenc 16(%r15), %xmm8
aesenc 32(%r15), %xmm8
aesenc 48(%r15), %xmm8
aesenc 64(%r15), %xmm8
aesenc 80(%r15), %xmm8
aesenc 96(%r15), %xmm8
aesenc 112(%r15), %xmm8
aesenc 128(%r15), %xmm8
aesenc 144(%r15), %xmm8
cmpl $11, %r10d
movdqa 160(%r15), %xmm9
jl L_AES_GCM_decrypt_aesni_calc_iv_2_aesenc_avx_last
aesenc %xmm9, %xmm8
aesenc 176(%r15), %xmm8
cmpl $13, %r10d
movdqa 192(%r15), %xmm9
jl L_AES_GCM_decrypt_aesni_calc_iv_2_aesenc_avx_last
aesenc %xmm9, %xmm8
aesenc 208(%r15), %xmm8
movdqa 224(%r15), %xmm9
L_AES_GCM_decrypt_aesni_calc_iv_2_aesenc_avx_last:
aesenclast %xmm9, %xmm8
movdqu %xmm8, 144(%rsp)
L_AES_GCM_decrypt_aesni_iv_done:
# Additional authentication data
movl %r11d, %edx
cmpl $0x00, %edx
je L_AES_GCM_decrypt_aesni_calc_aad_done
xorl %ecx, %ecx
cmpl $16, %edx
jl L_AES_GCM_decrypt_aesni_calc_aad_lt16
andl $0xfffffff0, %edx
L_AES_GCM_decrypt_aesni_calc_aad_16_loop:
movdqu (%r12,%rcx,1), %xmm8
pshufb L_aes_gcm_bswap_mask(%rip), %xmm8
pxor %xmm8, %xmm6
pshufd $0x4e, %xmm6, %xmm1
pshufd $0x4e, %xmm5, %xmm2
movdqa %xmm5, %xmm3
movdqa %xmm5, %xmm0
pclmulqdq $0x11, %xmm6, %xmm3
pclmulqdq $0x00, %xmm6, %xmm0
pxor %xmm6, %xmm1
pxor %xmm5, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
movdqa %xmm0, %xmm7
movdqa %xmm3, %xmm6
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm7
pxor %xmm1, %xmm6
movdqa %xmm7, %xmm0
movdqa %xmm6, %xmm1
psrld $31, %xmm0
psrld $31, %xmm1
pslld $0x01, %xmm7
pslld $0x01, %xmm6
movdqa %xmm0, %xmm2
pslldq $4, %xmm0
psrldq $12, %xmm2
pslldq $4, %xmm1
por %xmm2, %xmm6
por %xmm0, %xmm7
por %xmm1, %xmm6
movdqa %xmm7, %xmm0
movdqa %xmm7, %xmm1
movdqa %xmm7, %xmm2
pslld $31, %xmm0
pslld $30, %xmm1
pslld $25, %xmm2
pxor %xmm1, %xmm0
pxor %xmm2, %xmm0
movdqa %xmm0, %xmm1
psrldq $4, %xmm1
pslldq $12, %xmm0
pxor %xmm0, %xmm7
movdqa %xmm7, %xmm2
movdqa %xmm7, %xmm3
movdqa %xmm7, %xmm0
psrld $0x01, %xmm2
psrld $2, %xmm3
psrld $7, %xmm0
pxor %xmm3, %xmm2
pxor %xmm0, %xmm2
pxor %xmm1, %xmm2
pxor %xmm7, %xmm2
pxor %xmm2, %xmm6
addl $16, %ecx
cmpl %edx, %ecx
jl L_AES_GCM_decrypt_aesni_calc_aad_16_loop
movl %r11d, %edx
cmpl %edx, %ecx
je L_AES_GCM_decrypt_aesni_calc_aad_done
L_AES_GCM_decrypt_aesni_calc_aad_lt16:
subq $16, %rsp
pxor %xmm8, %xmm8
xorl %ebx, %ebx
movdqu %xmm8, (%rsp)
L_AES_GCM_decrypt_aesni_calc_aad_loop:
movzbl (%r12,%rcx,1), %r13d
movb %r13b, (%rsp,%rbx,1)
incl %ecx
incl %ebx
cmpl %edx, %ecx
jl L_AES_GCM_decrypt_aesni_calc_aad_loop
movdqu (%rsp), %xmm8
addq $16, %rsp
pshufb L_aes_gcm_bswap_mask(%rip), %xmm8
pxor %xmm8, %xmm6
pshufd $0x4e, %xmm6, %xmm1
pshufd $0x4e, %xmm5, %xmm2
movdqa %xmm5, %xmm3
movdqa %xmm5, %xmm0
pclmulqdq $0x11, %xmm6, %xmm3
pclmulqdq $0x00, %xmm6, %xmm0
pxor %xmm6, %xmm1
pxor %xmm5, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
movdqa %xmm0, %xmm7
movdqa %xmm3, %xmm6
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm7
pxor %xmm1, %xmm6
movdqa %xmm7, %xmm0
movdqa %xmm6, %xmm1
psrld $31, %xmm0
psrld $31, %xmm1
pslld $0x01, %xmm7
pslld $0x01, %xmm6
movdqa %xmm0, %xmm2
pslldq $4, %xmm0
psrldq $12, %xmm2
pslldq $4, %xmm1
por %xmm2, %xmm6
por %xmm0, %xmm7
por %xmm1, %xmm6
movdqa %xmm7, %xmm0
movdqa %xmm7, %xmm1
movdqa %xmm7, %xmm2
pslld $31, %xmm0
pslld $30, %xmm1
pslld $25, %xmm2
pxor %xmm1, %xmm0
pxor %xmm2, %xmm0
movdqa %xmm0, %xmm1
psrldq $4, %xmm1
pslldq $12, %xmm0
pxor %xmm0, %xmm7
movdqa %xmm7, %xmm2
movdqa %xmm7, %xmm3
movdqa %xmm7, %xmm0
psrld $0x01, %xmm2
psrld $2, %xmm3
psrld $7, %xmm0
pxor %xmm3, %xmm2
pxor %xmm0, %xmm2
pxor %xmm1, %xmm2
pxor %xmm7, %xmm2
pxor %xmm2, %xmm6
L_AES_GCM_decrypt_aesni_calc_aad_done:
# Calculate counter and H
pshufb L_aes_gcm_bswap_epi64(%rip), %xmm4
movdqa %xmm5, %xmm9
paddd L_aes_gcm_one(%rip), %xmm4
movdqa %xmm5, %xmm8
movdqu %xmm4, 128(%rsp)
psrlq $63, %xmm9
psllq $0x01, %xmm8
pslldq $8, %xmm9
por %xmm9, %xmm8
pshufd $0xff, %xmm5, %xmm5
psrad $31, %xmm5
pand L_aes_gcm_mod2_128(%rip), %xmm5
pxor %xmm8, %xmm5
xorl %ebx, %ebx
cmpl $0x80, %r9d
movl %r9d, %r13d
jl L_AES_GCM_decrypt_aesni_done_128
andl $0xffffff80, %r13d
movdqa %xmm6, %xmm2
# H ^ 1
movdqu %xmm5, (%rsp)
# H ^ 2
pshufd $0x4e, %xmm5, %xmm9
pshufd $0x4e, %xmm5, %xmm10
movdqa %xmm5, %xmm11
movdqa %xmm5, %xmm8
pclmulqdq $0x11, %xmm5, %xmm11
pclmulqdq $0x00, %xmm5, %xmm8
pxor %xmm5, %xmm9
pxor %xmm5, %xmm10
pclmulqdq $0x00, %xmm10, %xmm9
pxor %xmm8, %xmm9
pxor %xmm11, %xmm9
movdqa %xmm9, %xmm10
movdqa %xmm11, %xmm0
pslldq $8, %xmm10
psrldq $8, %xmm9
pxor %xmm10, %xmm8
pxor %xmm9, %xmm0
movdqa %xmm8, %xmm12
movdqa %xmm8, %xmm13
movdqa %xmm8, %xmm14
pslld $31, %xmm12
pslld $30, %xmm13
pslld $25, %xmm14
pxor %xmm13, %xmm12
pxor %xmm14, %xmm12
movdqa %xmm12, %xmm13
psrldq $4, %xmm13
pslldq $12, %xmm12
pxor %xmm12, %xmm8
movdqa %xmm8, %xmm14
movdqa %xmm8, %xmm10
movdqa %xmm8, %xmm9
psrld $0x01, %xmm14
psrld $2, %xmm10
psrld $7, %xmm9
pxor %xmm10, %xmm14
pxor %xmm9, %xmm14
pxor %xmm13, %xmm14
pxor %xmm8, %xmm14
pxor %xmm14, %xmm0
movdqu %xmm0, 16(%rsp)
# H ^ 3
pshufd $0x4e, %xmm5, %xmm9
pshufd $0x4e, %xmm0, %xmm10
movdqa %xmm0, %xmm11
movdqa %xmm0, %xmm8
pclmulqdq $0x11, %xmm5, %xmm11
pclmulqdq $0x00, %xmm5, %xmm8
pxor %xmm5, %xmm9
pxor %xmm0, %xmm10
pclmulqdq $0x00, %xmm10, %xmm9
pxor %xmm8, %xmm9
pxor %xmm11, %xmm9
movdqa %xmm9, %xmm10
movdqa %xmm11, %xmm1
pslldq $8, %xmm10
psrldq $8, %xmm9
pxor %xmm10, %xmm8
pxor %xmm9, %xmm1
movdqa %xmm8, %xmm12
movdqa %xmm8, %xmm13
movdqa %xmm8, %xmm14
pslld $31, %xmm12
pslld $30, %xmm13
pslld $25, %xmm14
pxor %xmm13, %xmm12
pxor %xmm14, %xmm12
movdqa %xmm12, %xmm13
psrldq $4, %xmm13
pslldq $12, %xmm12
pxor %xmm12, %xmm8
movdqa %xmm8, %xmm14
movdqa %xmm8, %xmm10
movdqa %xmm8, %xmm9
psrld $0x01, %xmm14
psrld $2, %xmm10
psrld $7, %xmm9
pxor %xmm10, %xmm14
pxor %xmm9, %xmm14
pxor %xmm13, %xmm14
pxor %xmm8, %xmm14
pxor %xmm14, %xmm1
movdqu %xmm1, 32(%rsp)
# H ^ 4
pshufd $0x4e, %xmm0, %xmm9
pshufd $0x4e, %xmm0, %xmm10
movdqa %xmm0, %xmm11
movdqa %xmm0, %xmm8
pclmulqdq $0x11, %xmm0, %xmm11
pclmulqdq $0x00, %xmm0, %xmm8
pxor %xmm0, %xmm9
pxor %xmm0, %xmm10
pclmulqdq $0x00, %xmm10, %xmm9
pxor %xmm8, %xmm9
pxor %xmm11, %xmm9
movdqa %xmm9, %xmm10
movdqa %xmm11, %xmm3
pslldq $8, %xmm10
psrldq $8, %xmm9
pxor %xmm10, %xmm8
pxor %xmm9, %xmm3
movdqa %xmm8, %xmm12
movdqa %xmm8, %xmm13
movdqa %xmm8, %xmm14
pslld $31, %xmm12
pslld $30, %xmm13
pslld $25, %xmm14
pxor %xmm13, %xmm12
pxor %xmm14, %xmm12
movdqa %xmm12, %xmm13
psrldq $4, %xmm13
pslldq $12, %xmm12
pxor %xmm12, %xmm8
movdqa %xmm8, %xmm14
movdqa %xmm8, %xmm10
movdqa %xmm8, %xmm9
psrld $0x01, %xmm14
psrld $2, %xmm10
psrld $7, %xmm9
pxor %xmm10, %xmm14
pxor %xmm9, %xmm14
pxor %xmm13, %xmm14
pxor %xmm8, %xmm14
pxor %xmm14, %xmm3
movdqu %xmm3, 48(%rsp)
# H ^ 5
pshufd $0x4e, %xmm0, %xmm9
pshufd $0x4e, %xmm1, %xmm10
movdqa %xmm1, %xmm11
movdqa %xmm1, %xmm8
pclmulqdq $0x11, %xmm0, %xmm11
pclmulqdq $0x00, %xmm0, %xmm8
pxor %xmm0, %xmm9
pxor %xmm1, %xmm10
pclmulqdq $0x00, %xmm10, %xmm9
pxor %xmm8, %xmm9
pxor %xmm11, %xmm9
movdqa %xmm9, %xmm10
movdqa %xmm11, %xmm7
pslldq $8, %xmm10
psrldq $8, %xmm9
pxor %xmm10, %xmm8
pxor %xmm9, %xmm7
movdqa %xmm8, %xmm12
movdqa %xmm8, %xmm13
movdqa %xmm8, %xmm14
pslld $31, %xmm12
pslld $30, %xmm13
pslld $25, %xmm14
pxor %xmm13, %xmm12
pxor %xmm14, %xmm12
movdqa %xmm12, %xmm13
psrldq $4, %xmm13
pslldq $12, %xmm12
pxor %xmm12, %xmm8
movdqa %xmm8, %xmm14
movdqa %xmm8, %xmm10
movdqa %xmm8, %xmm9
psrld $0x01, %xmm14
psrld $2, %xmm10
psrld $7, %xmm9
pxor %xmm10, %xmm14
pxor %xmm9, %xmm14
pxor %xmm13, %xmm14
pxor %xmm8, %xmm14
pxor %xmm14, %xmm7
movdqu %xmm7, 64(%rsp)
# H ^ 6
pshufd $0x4e, %xmm1, %xmm9
pshufd $0x4e, %xmm1, %xmm10
movdqa %xmm1, %xmm11
movdqa %xmm1, %xmm8
pclmulqdq $0x11, %xmm1, %xmm11
pclmulqdq $0x00, %xmm1, %xmm8
pxor %xmm1, %xmm9
pxor %xmm1, %xmm10
pclmulqdq $0x00, %xmm10, %xmm9
pxor %xmm8, %xmm9
pxor %xmm11, %xmm9
movdqa %xmm9, %xmm10
movdqa %xmm11, %xmm7
pslldq $8, %xmm10
psrldq $8, %xmm9
pxor %xmm10, %xmm8
pxor %xmm9, %xmm7
movdqa %xmm8, %xmm12
movdqa %xmm8, %xmm13
movdqa %xmm8, %xmm14
pslld $31, %xmm12
pslld $30, %xmm13
pslld $25, %xmm14
pxor %xmm13, %xmm12
pxor %xmm14, %xmm12
movdqa %xmm12, %xmm13
psrldq $4, %xmm13
pslldq $12, %xmm12
pxor %xmm12, %xmm8
movdqa %xmm8, %xmm14
movdqa %xmm8, %xmm10
movdqa %xmm8, %xmm9
psrld $0x01, %xmm14
psrld $2, %xmm10
psrld $7, %xmm9
pxor %xmm10, %xmm14
pxor %xmm9, %xmm14
pxor %xmm13, %xmm14
pxor %xmm8, %xmm14
pxor %xmm14, %xmm7
movdqu %xmm7, 80(%rsp)
# H ^ 7
pshufd $0x4e, %xmm1, %xmm9
pshufd $0x4e, %xmm3, %xmm10
movdqa %xmm3, %xmm11
movdqa %xmm3, %xmm8
pclmulqdq $0x11, %xmm1, %xmm11
pclmulqdq $0x00, %xmm1, %xmm8
pxor %xmm1, %xmm9
pxor %xmm3, %xmm10
pclmulqdq $0x00, %xmm10, %xmm9
pxor %xmm8, %xmm9
pxor %xmm11, %xmm9
movdqa %xmm9, %xmm10
movdqa %xmm11, %xmm7
pslldq $8, %xmm10
psrldq $8, %xmm9
pxor %xmm10, %xmm8
pxor %xmm9, %xmm7
movdqa %xmm8, %xmm12
movdqa %xmm8, %xmm13
movdqa %xmm8, %xmm14
pslld $31, %xmm12
pslld $30, %xmm13
pslld $25, %xmm14
pxor %xmm13, %xmm12
pxor %xmm14, %xmm12
movdqa %xmm12, %xmm13
psrldq $4, %xmm13
pslldq $12, %xmm12
pxor %xmm12, %xmm8
movdqa %xmm8, %xmm14
movdqa %xmm8, %xmm10
movdqa %xmm8, %xmm9
psrld $0x01, %xmm14
psrld $2, %xmm10
psrld $7, %xmm9
pxor %xmm10, %xmm14
pxor %xmm9, %xmm14
pxor %xmm13, %xmm14
pxor %xmm8, %xmm14
pxor %xmm14, %xmm7
movdqu %xmm7, 96(%rsp)
# H ^ 8
pshufd $0x4e, %xmm3, %xmm9
pshufd $0x4e, %xmm3, %xmm10
movdqa %xmm3, %xmm11
movdqa %xmm3, %xmm8
pclmulqdq $0x11, %xmm3, %xmm11
pclmulqdq $0x00, %xmm3, %xmm8
pxor %xmm3, %xmm9
pxor %xmm3, %xmm10
pclmulqdq $0x00, %xmm10, %xmm9
pxor %xmm8, %xmm9
pxor %xmm11, %xmm9
movdqa %xmm9, %xmm10
movdqa %xmm11, %xmm7
pslldq $8, %xmm10
psrldq $8, %xmm9
pxor %xmm10, %xmm8
pxor %xmm9, %xmm7
movdqa %xmm8, %xmm12
movdqa %xmm8, %xmm13
movdqa %xmm8, %xmm14
pslld $31, %xmm12
pslld $30, %xmm13
pslld $25, %xmm14
pxor %xmm13, %xmm12
pxor %xmm14, %xmm12
movdqa %xmm12, %xmm13
psrldq $4, %xmm13
pslldq $12, %xmm12
pxor %xmm12, %xmm8
movdqa %xmm8, %xmm14
movdqa %xmm8, %xmm10
movdqa %xmm8, %xmm9
psrld $0x01, %xmm14
psrld $2, %xmm10
psrld $7, %xmm9
pxor %xmm10, %xmm14
pxor %xmm9, %xmm14
pxor %xmm13, %xmm14
pxor %xmm8, %xmm14
pxor %xmm14, %xmm7
movdqu %xmm7, 112(%rsp)
L_AES_GCM_decrypt_aesni_ghash_128:
leaq (%rdi,%rbx,1), %rcx
leaq (%rsi,%rbx,1), %rdx
movdqu 128(%rsp), %xmm8
movdqa L_aes_gcm_bswap_epi64(%rip), %xmm1
movdqa %xmm8, %xmm0
pshufb %xmm1, %xmm8
movdqa %xmm0, %xmm9
paddd L_aes_gcm_one(%rip), %xmm9
pshufb %xmm1, %xmm9
movdqa %xmm0, %xmm10
paddd L_aes_gcm_two(%rip), %xmm10
pshufb %xmm1, %xmm10
movdqa %xmm0, %xmm11
paddd L_aes_gcm_three(%rip), %xmm11
pshufb %xmm1, %xmm11
movdqa %xmm0, %xmm12
paddd L_aes_gcm_four(%rip), %xmm12
pshufb %xmm1, %xmm12
movdqa %xmm0, %xmm13
paddd L_aes_gcm_five(%rip), %xmm13
pshufb %xmm1, %xmm13
movdqa %xmm0, %xmm14
paddd L_aes_gcm_six(%rip), %xmm14
pshufb %xmm1, %xmm14
movdqa %xmm0, %xmm15
paddd L_aes_gcm_seven(%rip), %xmm15
pshufb %xmm1, %xmm15
paddd L_aes_gcm_eight(%rip), %xmm0
movdqa (%r15), %xmm7
movdqu %xmm0, 128(%rsp)
pxor %xmm7, %xmm8
pxor %xmm7, %xmm9
pxor %xmm7, %xmm10
pxor %xmm7, %xmm11
pxor %xmm7, %xmm12
pxor %xmm7, %xmm13
pxor %xmm7, %xmm14
pxor %xmm7, %xmm15
movdqu 112(%rsp), %xmm7
movdqu (%rcx), %xmm0
aesenc 16(%r15), %xmm8
pshufb L_aes_gcm_bswap_mask(%rip), %xmm0
pxor %xmm2, %xmm0
pshufd $0x4e, %xmm7, %xmm1
pshufd $0x4e, %xmm0, %xmm5
pxor %xmm7, %xmm1
pxor %xmm0, %xmm5
movdqa %xmm0, %xmm3
pclmulqdq $0x11, %xmm7, %xmm3
aesenc 16(%r15), %xmm9
aesenc 16(%r15), %xmm10
movdqa %xmm0, %xmm2
pclmulqdq $0x00, %xmm7, %xmm2
aesenc 16(%r15), %xmm11
aesenc 16(%r15), %xmm12
pclmulqdq $0x00, %xmm5, %xmm1
aesenc 16(%r15), %xmm13
aesenc 16(%r15), %xmm14
aesenc 16(%r15), %xmm15
pxor %xmm2, %xmm1
pxor %xmm3, %xmm1
movdqu 96(%rsp), %xmm7
movdqu 16(%rcx), %xmm0
pshufd $0x4e, %xmm7, %xmm4
pshufb L_aes_gcm_bswap_mask(%rip), %xmm0
aesenc 32(%r15), %xmm8
pxor %xmm7, %xmm4
pshufd $0x4e, %xmm0, %xmm5
pxor %xmm0, %xmm5
movdqa %xmm0, %xmm6
pclmulqdq $0x11, %xmm7, %xmm6
aesenc 32(%r15), %xmm9
aesenc 32(%r15), %xmm10
pclmulqdq $0x00, %xmm0, %xmm7
aesenc 32(%r15), %xmm11
aesenc 32(%r15), %xmm12
pclmulqdq $0x00, %xmm5, %xmm4
aesenc 32(%r15), %xmm13
aesenc 32(%r15), %xmm14
aesenc 32(%r15), %xmm15
pxor %xmm7, %xmm1
pxor %xmm7, %xmm2
pxor %xmm6, %xmm1
pxor %xmm6, %xmm3
pxor %xmm4, %xmm1
movdqu 80(%rsp), %xmm7
movdqu 32(%rcx), %xmm0
pshufd $0x4e, %xmm7, %xmm4
pshufb L_aes_gcm_bswap_mask(%rip), %xmm0
aesenc 48(%r15), %xmm8
pxor %xmm7, %xmm4
pshufd $0x4e, %xmm0, %xmm5
pxor %xmm0, %xmm5
movdqa %xmm0, %xmm6
pclmulqdq $0x11, %xmm7, %xmm6
aesenc 48(%r15), %xmm9
aesenc 48(%r15), %xmm10
pclmulqdq $0x00, %xmm0, %xmm7
aesenc 48(%r15), %xmm11
aesenc 48(%r15), %xmm12
pclmulqdq $0x00, %xmm5, %xmm4
aesenc 48(%r15), %xmm13
aesenc 48(%r15), %xmm14
aesenc 48(%r15), %xmm15
pxor %xmm7, %xmm1
pxor %xmm7, %xmm2
pxor %xmm6, %xmm1
pxor %xmm6, %xmm3
pxor %xmm4, %xmm1
movdqu 64(%rsp), %xmm7
movdqu 48(%rcx), %xmm0
pshufd $0x4e, %xmm7, %xmm4
pshufb L_aes_gcm_bswap_mask(%rip), %xmm0
aesenc 64(%r15), %xmm8
pxor %xmm7, %xmm4
pshufd $0x4e, %xmm0, %xmm5
pxor %xmm0, %xmm5
movdqa %xmm0, %xmm6
pclmulqdq $0x11, %xmm7, %xmm6
aesenc 64(%r15), %xmm9
aesenc 64(%r15), %xmm10
pclmulqdq $0x00, %xmm0, %xmm7
aesenc 64(%r15), %xmm11
aesenc 64(%r15), %xmm12
pclmulqdq $0x00, %xmm5, %xmm4
aesenc 64(%r15), %xmm13
aesenc 64(%r15), %xmm14
aesenc 64(%r15), %xmm15
pxor %xmm7, %xmm1
pxor %xmm7, %xmm2
pxor %xmm6, %xmm1
pxor %xmm6, %xmm3
pxor %xmm4, %xmm1
movdqu 48(%rsp), %xmm7
movdqu 64(%rcx), %xmm0
pshufd $0x4e, %xmm7, %xmm4
pshufb L_aes_gcm_bswap_mask(%rip), %xmm0
aesenc 80(%r15), %xmm8
pxor %xmm7, %xmm4
pshufd $0x4e, %xmm0, %xmm5
pxor %xmm0, %xmm5
movdqa %xmm0, %xmm6
pclmulqdq $0x11, %xmm7, %xmm6
aesenc 80(%r15), %xmm9
aesenc 80(%r15), %xmm10
pclmulqdq $0x00, %xmm0, %xmm7
aesenc 80(%r15), %xmm11
aesenc 80(%r15), %xmm12
pclmulqdq $0x00, %xmm5, %xmm4
aesenc 80(%r15), %xmm13
aesenc 80(%r15), %xmm14
aesenc 80(%r15), %xmm15
pxor %xmm7, %xmm1
pxor %xmm7, %xmm2
pxor %xmm6, %xmm1
pxor %xmm6, %xmm3
pxor %xmm4, %xmm1
movdqu 32(%rsp), %xmm7
movdqu 80(%rcx), %xmm0
pshufd $0x4e, %xmm7, %xmm4
pshufb L_aes_gcm_bswap_mask(%rip), %xmm0
aesenc 96(%r15), %xmm8
pxor %xmm7, %xmm4
pshufd $0x4e, %xmm0, %xmm5
pxor %xmm0, %xmm5
movdqa %xmm0, %xmm6
pclmulqdq $0x11, %xmm7, %xmm6
aesenc 96(%r15), %xmm9
aesenc 96(%r15), %xmm10
pclmulqdq $0x00, %xmm0, %xmm7
aesenc 96(%r15), %xmm11
aesenc 96(%r15), %xmm12
pclmulqdq $0x00, %xmm5, %xmm4
aesenc 96(%r15), %xmm13
aesenc 96(%r15), %xmm14
aesenc 96(%r15), %xmm15
pxor %xmm7, %xmm1
pxor %xmm7, %xmm2
pxor %xmm6, %xmm1
pxor %xmm6, %xmm3
pxor %xmm4, %xmm1
movdqu 16(%rsp), %xmm7
movdqu 96(%rcx), %xmm0
pshufd $0x4e, %xmm7, %xmm4
pshufb L_aes_gcm_bswap_mask(%rip), %xmm0
aesenc 112(%r15), %xmm8
pxor %xmm7, %xmm4
pshufd $0x4e, %xmm0, %xmm5
pxor %xmm0, %xmm5
movdqa %xmm0, %xmm6
pclmulqdq $0x11, %xmm7, %xmm6
aesenc 112(%r15), %xmm9
aesenc 112(%r15), %xmm10
pclmulqdq $0x00, %xmm0, %xmm7
aesenc 112(%r15), %xmm11
aesenc 112(%r15), %xmm12
pclmulqdq $0x00, %xmm5, %xmm4
aesenc 112(%r15), %xmm13
aesenc 112(%r15), %xmm14
aesenc 112(%r15), %xmm15
pxor %xmm7, %xmm1
pxor %xmm7, %xmm2
pxor %xmm6, %xmm1
pxor %xmm6, %xmm3
pxor %xmm4, %xmm1
movdqu (%rsp), %xmm7
movdqu 112(%rcx), %xmm0
pshufd $0x4e, %xmm7, %xmm4
pshufb L_aes_gcm_bswap_mask(%rip), %xmm0
aesenc 128(%r15), %xmm8
pxor %xmm7, %xmm4
pshufd $0x4e, %xmm0, %xmm5
pxor %xmm0, %xmm5
movdqa %xmm0, %xmm6
pclmulqdq $0x11, %xmm7, %xmm6
aesenc 128(%r15), %xmm9
aesenc 128(%r15), %xmm10
pclmulqdq $0x00, %xmm0, %xmm7
aesenc 128(%r15), %xmm11
aesenc 128(%r15), %xmm12
pclmulqdq $0x00, %xmm5, %xmm4
aesenc 128(%r15), %xmm13
aesenc 128(%r15), %xmm14
aesenc 128(%r15), %xmm15
pxor %xmm7, %xmm1
pxor %xmm7, %xmm2
pxor %xmm6, %xmm1
pxor %xmm6, %xmm3
pxor %xmm4, %xmm1
movdqa %xmm1, %xmm5
psrldq $8, %xmm1
pslldq $8, %xmm5
aesenc 144(%r15), %xmm8
pxor %xmm5, %xmm2
pxor %xmm1, %xmm3
movdqa %xmm2, %xmm7
movdqa %xmm2, %xmm4
movdqa %xmm2, %xmm5
aesenc 144(%r15), %xmm9
pslld $31, %xmm7
pslld $30, %xmm4
pslld $25, %xmm5
aesenc 144(%r15), %xmm10
pxor %xmm4, %xmm7
pxor %xmm5, %xmm7
aesenc 144(%r15), %xmm11
movdqa %xmm7, %xmm4
pslldq $12, %xmm7
psrldq $4, %xmm4
aesenc 144(%r15), %xmm12
pxor %xmm7, %xmm2
movdqa %xmm2, %xmm5
movdqa %xmm2, %xmm1
movdqa %xmm2, %xmm0
aesenc 144(%r15), %xmm13
psrld $0x01, %xmm5
psrld $2, %xmm1
psrld $7, %xmm0
aesenc 144(%r15), %xmm14
pxor %xmm1, %xmm5
pxor %xmm0, %xmm5
aesenc 144(%r15), %xmm15
pxor %xmm4, %xmm5
pxor %xmm5, %xmm2
pxor %xmm3, %xmm2
cmpl $11, %r10d
movdqa 160(%r15), %xmm7
jl L_AES_GCM_decrypt_aesni_aesenc_128_ghash_avx_done
aesenc %xmm7, %xmm8
aesenc %xmm7, %xmm9
aesenc %xmm7, %xmm10
aesenc %xmm7, %xmm11
aesenc %xmm7, %xmm12
aesenc %xmm7, %xmm13
aesenc %xmm7, %xmm14
aesenc %xmm7, %xmm15
movdqa 176(%r15), %xmm7
aesenc %xmm7, %xmm8
aesenc %xmm7, %xmm9
aesenc %xmm7, %xmm10
aesenc %xmm7, %xmm11
aesenc %xmm7, %xmm12
aesenc %xmm7, %xmm13
aesenc %xmm7, %xmm14
aesenc %xmm7, %xmm15
cmpl $13, %r10d
movdqa 192(%r15), %xmm7
jl L_AES_GCM_decrypt_aesni_aesenc_128_ghash_avx_done
aesenc %xmm7, %xmm8
aesenc %xmm7, %xmm9
aesenc %xmm7, %xmm10
aesenc %xmm7, %xmm11
aesenc %xmm7, %xmm12
aesenc %xmm7, %xmm13
aesenc %xmm7, %xmm14
aesenc %xmm7, %xmm15
movdqa 208(%r15), %xmm7
aesenc %xmm7, %xmm8
aesenc %xmm7, %xmm9
aesenc %xmm7, %xmm10
aesenc %xmm7, %xmm11
aesenc %xmm7, %xmm12
aesenc %xmm7, %xmm13
aesenc %xmm7, %xmm14
aesenc %xmm7, %xmm15
movdqa 224(%r15), %xmm7
L_AES_GCM_decrypt_aesni_aesenc_128_ghash_avx_done:
aesenclast %xmm7, %xmm8
aesenclast %xmm7, %xmm9
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
pxor %xmm0, %xmm8
pxor %xmm1, %xmm9
movdqu %xmm8, (%rdx)
movdqu %xmm9, 16(%rdx)
aesenclast %xmm7, %xmm10
aesenclast %xmm7, %xmm11
movdqu 32(%rcx), %xmm0
movdqu 48(%rcx), %xmm1
pxor %xmm0, %xmm10
pxor %xmm1, %xmm11
movdqu %xmm10, 32(%rdx)
movdqu %xmm11, 48(%rdx)
aesenclast %xmm7, %xmm12
aesenclast %xmm7, %xmm13
movdqu 64(%rcx), %xmm0
movdqu 80(%rcx), %xmm1
pxor %xmm0, %xmm12
pxor %xmm1, %xmm13
movdqu %xmm12, 64(%rdx)
movdqu %xmm13, 80(%rdx)
aesenclast %xmm7, %xmm14
aesenclast %xmm7, %xmm15
movdqu 96(%rcx), %xmm0
movdqu 112(%rcx), %xmm1
pxor %xmm0, %xmm14
pxor %xmm1, %xmm15
movdqu %xmm14, 96(%rdx)
movdqu %xmm15, 112(%rdx)
addl $0x80, %ebx
cmpl %r13d, %ebx
jl L_AES_GCM_decrypt_aesni_ghash_128
movdqa %xmm2, %xmm6
movdqu (%rsp), %xmm5
L_AES_GCM_decrypt_aesni_done_128:
movl %r9d, %edx
cmpl %edx, %ebx
jge L_AES_GCM_decrypt_aesni_done_dec
movl %r9d, %r13d
andl $0xfffffff0, %r13d
cmpl %r13d, %ebx
jge L_AES_GCM_decrypt_aesni_last_block_done
L_AES_GCM_decrypt_aesni_last_block_start:
leaq (%rdi,%rbx,1), %rcx
leaq (%rsi,%rbx,1), %rdx
movdqu (%rcx), %xmm1
movdqa %xmm5, %xmm0
pshufb L_aes_gcm_bswap_mask(%rip), %xmm1
pxor %xmm6, %xmm1
movdqu 128(%rsp), %xmm8
movdqa %xmm8, %xmm9
pshufb L_aes_gcm_bswap_epi64(%rip), %xmm8
paddd L_aes_gcm_one(%rip), %xmm9
pxor (%r15), %xmm8
movdqu %xmm9, 128(%rsp)
movdqa %xmm1, %xmm10
pclmulqdq $16, %xmm0, %xmm10
aesenc 16(%r15), %xmm8
aesenc 32(%r15), %xmm8
movdqa %xmm1, %xmm11
pclmulqdq $0x01, %xmm0, %xmm11
aesenc 48(%r15), %xmm8
aesenc 64(%r15), %xmm8
movdqa %xmm1, %xmm12
pclmulqdq $0x00, %xmm0, %xmm12
aesenc 80(%r15), %xmm8
movdqa %xmm1, %xmm1
pclmulqdq $0x11, %xmm0, %xmm1
aesenc 96(%r15), %xmm8
pxor %xmm11, %xmm10
movdqa %xmm10, %xmm2
psrldq $8, %xmm10
pslldq $8, %xmm2
aesenc 112(%r15), %xmm8
movdqa %xmm1, %xmm3
pxor %xmm12, %xmm2
pxor %xmm10, %xmm3
movdqa L_aes_gcm_mod2_128(%rip), %xmm0
movdqa %xmm2, %xmm11
pclmulqdq $16, %xmm0, %xmm11
aesenc 128(%r15), %xmm8
pshufd $0x4e, %xmm2, %xmm10
pxor %xmm11, %xmm10
movdqa %xmm10, %xmm11
pclmulqdq $16, %xmm0, %xmm11
aesenc 144(%r15), %xmm8
pshufd $0x4e, %xmm10, %xmm6
pxor %xmm11, %xmm6
pxor %xmm3, %xmm6
cmpl $11, %r10d
movdqa 160(%r15), %xmm9
jl L_AES_GCM_decrypt_aesni_aesenc_gfmul_last
aesenc %xmm9, %xmm8
aesenc 176(%r15), %xmm8
cmpl $13, %r10d
movdqa 192(%r15), %xmm9
jl L_AES_GCM_decrypt_aesni_aesenc_gfmul_last
aesenc %xmm9, %xmm8
aesenc 208(%r15), %xmm8
movdqa 224(%r15), %xmm9
L_AES_GCM_decrypt_aesni_aesenc_gfmul_last:
aesenclast %xmm9, %xmm8
movdqu (%rcx), %xmm9
pxor %xmm9, %xmm8
movdqu %xmm8, (%rdx)
addl $16, %ebx
cmpl %r13d, %ebx
jl L_AES_GCM_decrypt_aesni_last_block_start
L_AES_GCM_decrypt_aesni_last_block_done:
movl %r9d, %ecx
movl %ecx, %edx
andl $15, %ecx
jz L_AES_GCM_decrypt_aesni_aesenc_last15_dec_avx_done
movdqu 128(%rsp), %xmm4
pshufb L_aes_gcm_bswap_epi64(%rip), %xmm4
pxor (%r15), %xmm4
aesenc 16(%r15), %xmm4
aesenc 32(%r15), %xmm4
aesenc 48(%r15), %xmm4
aesenc 64(%r15), %xmm4
aesenc 80(%r15), %xmm4
aesenc 96(%r15), %xmm4
aesenc 112(%r15), %xmm4
aesenc 128(%r15), %xmm4
aesenc 144(%r15), %xmm4
cmpl $11, %r10d
movdqa 160(%r15), %xmm9
jl L_AES_GCM_decrypt_aesni_aesenc_last15_dec_avx_aesenc_avx_last
aesenc %xmm9, %xmm4
aesenc 176(%r15), %xmm4
cmpl $13, %r10d
movdqa 192(%r15), %xmm9
jl L_AES_GCM_decrypt_aesni_aesenc_last15_dec_avx_aesenc_avx_last
aesenc %xmm9, %xmm4
aesenc 208(%r15), %xmm4
movdqa 224(%r15), %xmm9
L_AES_GCM_decrypt_aesni_aesenc_last15_dec_avx_aesenc_avx_last:
aesenclast %xmm9, %xmm4
subq $32, %rsp
xorl %ecx, %ecx
movdqu %xmm4, (%rsp)
pxor %xmm0, %xmm0
movdqu %xmm0, 16(%rsp)
L_AES_GCM_decrypt_aesni_aesenc_last15_dec_avx_loop:
movzbl (%rdi,%rbx,1), %r13d
movb %r13b, 16(%rsp,%rcx,1)
xorb (%rsp,%rcx,1), %r13b
movb %r13b, (%rsi,%rbx,1)
incl %ebx
incl %ecx
cmpl %edx, %ebx
jl L_AES_GCM_decrypt_aesni_aesenc_last15_dec_avx_loop
movdqu 16(%rsp), %xmm4
addq $32, %rsp
pshufb L_aes_gcm_bswap_mask(%rip), %xmm4
pxor %xmm4, %xmm6
pshufd $0x4e, %xmm5, %xmm9
pshufd $0x4e, %xmm6, %xmm10
movdqa %xmm6, %xmm11
movdqa %xmm6, %xmm8
pclmulqdq $0x11, %xmm5, %xmm11
pclmulqdq $0x00, %xmm5, %xmm8
pxor %xmm5, %xmm9
pxor %xmm6, %xmm10
pclmulqdq $0x00, %xmm10, %xmm9
pxor %xmm8, %xmm9
pxor %xmm11, %xmm9
movdqa %xmm9, %xmm10
movdqa %xmm11, %xmm6
pslldq $8, %xmm10
psrldq $8, %xmm9
pxor %xmm10, %xmm8
pxor %xmm9, %xmm6
movdqa %xmm8, %xmm12
movdqa %xmm8, %xmm13
movdqa %xmm8, %xmm14
pslld $31, %xmm12
pslld $30, %xmm13
pslld $25, %xmm14
pxor %xmm13, %xmm12
pxor %xmm14, %xmm12
movdqa %xmm12, %xmm13
psrldq $4, %xmm13
pslldq $12, %xmm12
pxor %xmm12, %xmm8
movdqa %xmm8, %xmm14
movdqa %xmm8, %xmm10
movdqa %xmm8, %xmm9
psrld $0x01, %xmm14
psrld $2, %xmm10
psrld $7, %xmm9
pxor %xmm10, %xmm14
pxor %xmm9, %xmm14
pxor %xmm13, %xmm14
pxor %xmm8, %xmm14
pxor %xmm14, %xmm6
L_AES_GCM_decrypt_aesni_aesenc_last15_dec_avx_done:
L_AES_GCM_decrypt_aesni_done_dec:
movl %r9d, %edx
movl %r11d, %ecx
shlq $3, %rdx
shlq $3, %rcx
pinsrq $0x00, %rdx, %xmm0
pinsrq $0x01, %rcx, %xmm0
pxor %xmm0, %xmm6
pshufd $0x4e, %xmm5, %xmm9
pshufd $0x4e, %xmm6, %xmm10
movdqa %xmm6, %xmm11
movdqa %xmm6, %xmm8
pclmulqdq $0x11, %xmm5, %xmm11
pclmulqdq $0x00, %xmm5, %xmm8
pxor %xmm5, %xmm9
pxor %xmm6, %xmm10
pclmulqdq $0x00, %xmm10, %xmm9
pxor %xmm8, %xmm9
pxor %xmm11, %xmm9
movdqa %xmm9, %xmm10
movdqa %xmm11, %xmm6
pslldq $8, %xmm10
psrldq $8, %xmm9
pxor %xmm10, %xmm8
pxor %xmm9, %xmm6
movdqa %xmm8, %xmm12
movdqa %xmm8, %xmm13
movdqa %xmm8, %xmm14
pslld $31, %xmm12
pslld $30, %xmm13
pslld $25, %xmm14
pxor %xmm13, %xmm12
pxor %xmm14, %xmm12
movdqa %xmm12, %xmm13
psrldq $4, %xmm13
pslldq $12, %xmm12
pxor %xmm12, %xmm8
movdqa %xmm8, %xmm14
movdqa %xmm8, %xmm10
movdqa %xmm8, %xmm9
psrld $0x01, %xmm14
psrld $2, %xmm10
psrld $7, %xmm9
pxor %xmm10, %xmm14
pxor %xmm9, %xmm14
pxor %xmm13, %xmm14
pxor %xmm8, %xmm14
pxor %xmm14, %xmm6
pshufb L_aes_gcm_bswap_mask(%rip), %xmm6
movdqu 144(%rsp), %xmm0
pxor %xmm6, %xmm0
cmpl $16, %r14d
je L_AES_GCM_decrypt_aesni_cmp_tag_16
subq $16, %rsp
xorq %rcx, %rcx
xorq %rbx, %rbx
movdqu %xmm0, (%rsp)
L_AES_GCM_decrypt_aesni_cmp_tag_loop:
movzbl (%rsp,%rcx,1), %r13d
xorb (%r8,%rcx,1), %r13b
orb %r13b, %bl
incl %ecx
cmpl %r14d, %ecx
jne L_AES_GCM_decrypt_aesni_cmp_tag_loop
cmpb $0x00, %bl
sete %bl
addq $16, %rsp
xorq %rcx, %rcx
jmp L_AES_GCM_decrypt_aesni_cmp_tag_done
L_AES_GCM_decrypt_aesni_cmp_tag_16:
movdqu (%r8), %xmm1
pcmpeqb %xmm1, %xmm0
pmovmskb %xmm0, %rdx
# %%edx == 0xFFFF then return 1 else => return 0
xorl %ebx, %ebx
cmpl $0xffff, %edx
sete %bl
L_AES_GCM_decrypt_aesni_cmp_tag_done:
movl %ebx, (%rbp)
addq $0xa8, %rsp
popq %rbp
popq %r15
popq %r14
popq %rbx
popq %r12
popq %r13
repz retq
#ifndef __APPLE__
.size AES_GCM_decrypt_aesni,.-AES_GCM_decrypt_aesni
#endif /* __APPLE__ */
#ifdef WOLFSSL_AESGCM_STREAM
#ifndef __APPLE__
.text
.globl AES_GCM_init_aesni
.type AES_GCM_init_aesni,@function
.align 16
AES_GCM_init_aesni:
#else
.section __TEXT,__text
.globl _AES_GCM_init_aesni
.p2align 4
_AES_GCM_init_aesni:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
movq %rdx, %r10
movl %ecx, %r11d
movq 32(%rsp), %rax
subq $16, %rsp
pxor %xmm4, %xmm4
movl %r11d, %edx
cmpl $12, %edx
jne L_AES_GCM_init_aesni_iv_not_12
# # Calculate values when IV is 12 bytes
# Set counter based on IV
movl $0x1000000, %ecx
pinsrq $0x00, (%r10), %xmm4
pinsrd $2, 8(%r10), %xmm4
pinsrd $3, %ecx, %xmm4
# H = Encrypt X(=0) and T = Encrypt counter
movdqa %xmm4, %xmm1
movdqa (%rdi), %xmm5
pxor %xmm5, %xmm1
movdqa 16(%rdi), %xmm6
aesenc %xmm6, %xmm5
aesenc %xmm6, %xmm1
movdqa 32(%rdi), %xmm6
aesenc %xmm6, %xmm5
aesenc %xmm6, %xmm1
movdqa 48(%rdi), %xmm6
aesenc %xmm6, %xmm5
aesenc %xmm6, %xmm1
movdqa 64(%rdi), %xmm6
aesenc %xmm6, %xmm5
aesenc %xmm6, %xmm1
movdqa 80(%rdi), %xmm6
aesenc %xmm6, %xmm5
aesenc %xmm6, %xmm1
movdqa 96(%rdi), %xmm6
aesenc %xmm6, %xmm5
aesenc %xmm6, %xmm1
movdqa 112(%rdi), %xmm6
aesenc %xmm6, %xmm5
aesenc %xmm6, %xmm1
movdqa 128(%rdi), %xmm6
aesenc %xmm6, %xmm5
aesenc %xmm6, %xmm1
movdqa 144(%rdi), %xmm6
aesenc %xmm6, %xmm5
aesenc %xmm6, %xmm1
cmpl $11, %esi
movdqa 160(%rdi), %xmm6
jl L_AES_GCM_init_aesni_calc_iv_12_last
aesenc %xmm6, %xmm5
aesenc %xmm6, %xmm1
movdqa 176(%rdi), %xmm6
aesenc %xmm6, %xmm5
aesenc %xmm6, %xmm1
cmpl $13, %esi
movdqa 192(%rdi), %xmm6
jl L_AES_GCM_init_aesni_calc_iv_12_last
aesenc %xmm6, %xmm5
aesenc %xmm6, %xmm1
movdqa 208(%rdi), %xmm6
aesenc %xmm6, %xmm5
aesenc %xmm6, %xmm1
movdqa 224(%rdi), %xmm6
L_AES_GCM_init_aesni_calc_iv_12_last:
aesenclast %xmm6, %xmm5
aesenclast %xmm6, %xmm1
pshufb L_aes_gcm_bswap_mask(%rip), %xmm5
movdqu %xmm1, %xmm15
jmp L_AES_GCM_init_aesni_iv_done
L_AES_GCM_init_aesni_iv_not_12:
# Calculate values when IV is not 12 bytes
# H = Encrypt X(=0)
movdqa (%rdi), %xmm5
aesenc 16(%rdi), %xmm5
aesenc 32(%rdi), %xmm5
aesenc 48(%rdi), %xmm5
aesenc 64(%rdi), %xmm5
aesenc 80(%rdi), %xmm5
aesenc 96(%rdi), %xmm5
aesenc 112(%rdi), %xmm5
aesenc 128(%rdi), %xmm5
aesenc 144(%rdi), %xmm5
cmpl $11, %esi
movdqa 160(%rdi), %xmm8
jl L_AES_GCM_init_aesni_calc_iv_1_aesenc_avx_last
aesenc %xmm8, %xmm5
aesenc 176(%rdi), %xmm5
cmpl $13, %esi
movdqa 192(%rdi), %xmm8
jl L_AES_GCM_init_aesni_calc_iv_1_aesenc_avx_last
aesenc %xmm8, %xmm5
aesenc 208(%rdi), %xmm5
movdqa 224(%rdi), %xmm8
L_AES_GCM_init_aesni_calc_iv_1_aesenc_avx_last:
aesenclast %xmm8, %xmm5
pshufb L_aes_gcm_bswap_mask(%rip), %xmm5
# Calc counter
# Initialization vector
cmpl $0x00, %edx
movq $0x00, %rcx
je L_AES_GCM_init_aesni_calc_iv_done
cmpl $16, %edx
jl L_AES_GCM_init_aesni_calc_iv_lt16
andl $0xfffffff0, %edx
L_AES_GCM_init_aesni_calc_iv_16_loop:
movdqu (%r10,%rcx,1), %xmm7
pshufb L_aes_gcm_bswap_mask(%rip), %xmm7
pxor %xmm7, %xmm4
pshufd $0x4e, %xmm4, %xmm1
pshufd $0x4e, %xmm5, %xmm2
movdqa %xmm5, %xmm3
movdqa %xmm5, %xmm0
pclmulqdq $0x11, %xmm4, %xmm3
pclmulqdq $0x00, %xmm4, %xmm0
pxor %xmm4, %xmm1
pxor %xmm5, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
movdqa %xmm0, %xmm6
movdqa %xmm3, %xmm4
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm6
pxor %xmm1, %xmm4
movdqa %xmm6, %xmm0
movdqa %xmm4, %xmm1
psrld $31, %xmm0
psrld $31, %xmm1
pslld $0x01, %xmm6
pslld $0x01, %xmm4
movdqa %xmm0, %xmm2
pslldq $4, %xmm0
psrldq $12, %xmm2
pslldq $4, %xmm1
por %xmm2, %xmm4
por %xmm0, %xmm6
por %xmm1, %xmm4
movdqa %xmm6, %xmm0
movdqa %xmm6, %xmm1
movdqa %xmm6, %xmm2
pslld $31, %xmm0
pslld $30, %xmm1
pslld $25, %xmm2
pxor %xmm1, %xmm0
pxor %xmm2, %xmm0
movdqa %xmm0, %xmm1
psrldq $4, %xmm1
pslldq $12, %xmm0
pxor %xmm0, %xmm6
movdqa %xmm6, %xmm2
movdqa %xmm6, %xmm3
movdqa %xmm6, %xmm0
psrld $0x01, %xmm2
psrld $2, %xmm3
psrld $7, %xmm0
pxor %xmm3, %xmm2
pxor %xmm0, %xmm2
pxor %xmm1, %xmm2
pxor %xmm6, %xmm2
pxor %xmm2, %xmm4
addl $16, %ecx
cmpl %edx, %ecx
jl L_AES_GCM_init_aesni_calc_iv_16_loop
movl %r11d, %edx
cmpl %edx, %ecx
je L_AES_GCM_init_aesni_calc_iv_done
L_AES_GCM_init_aesni_calc_iv_lt16:
subq $16, %rsp
pxor %xmm7, %xmm7
xorl %r13d, %r13d
movdqu %xmm7, (%rsp)
L_AES_GCM_init_aesni_calc_iv_loop:
movzbl (%r10,%rcx,1), %r12d
movb %r12b, (%rsp,%r13,1)
incl %ecx
incl %r13d
cmpl %edx, %ecx
jl L_AES_GCM_init_aesni_calc_iv_loop
movdqu (%rsp), %xmm7
addq $16, %rsp
pshufb L_aes_gcm_bswap_mask(%rip), %xmm7
pxor %xmm7, %xmm4
pshufd $0x4e, %xmm4, %xmm1
pshufd $0x4e, %xmm5, %xmm2
movdqa %xmm5, %xmm3
movdqa %xmm5, %xmm0
pclmulqdq $0x11, %xmm4, %xmm3
pclmulqdq $0x00, %xmm4, %xmm0
pxor %xmm4, %xmm1
pxor %xmm5, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
movdqa %xmm0, %xmm6
movdqa %xmm3, %xmm4
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm6
pxor %xmm1, %xmm4
movdqa %xmm6, %xmm0
movdqa %xmm4, %xmm1
psrld $31, %xmm0
psrld $31, %xmm1
pslld $0x01, %xmm6
pslld $0x01, %xmm4
movdqa %xmm0, %xmm2
pslldq $4, %xmm0
psrldq $12, %xmm2
pslldq $4, %xmm1
por %xmm2, %xmm4
por %xmm0, %xmm6
por %xmm1, %xmm4
movdqa %xmm6, %xmm0
movdqa %xmm6, %xmm1
movdqa %xmm6, %xmm2
pslld $31, %xmm0
pslld $30, %xmm1
pslld $25, %xmm2
pxor %xmm1, %xmm0
pxor %xmm2, %xmm0
movdqa %xmm0, %xmm1
psrldq $4, %xmm1
pslldq $12, %xmm0
pxor %xmm0, %xmm6
movdqa %xmm6, %xmm2
movdqa %xmm6, %xmm3
movdqa %xmm6, %xmm0
psrld $0x01, %xmm2
psrld $2, %xmm3
psrld $7, %xmm0
pxor %xmm3, %xmm2
pxor %xmm0, %xmm2
pxor %xmm1, %xmm2
pxor %xmm6, %xmm2
pxor %xmm2, %xmm4
L_AES_GCM_init_aesni_calc_iv_done:
# T = Encrypt counter
pxor %xmm0, %xmm0
shll $3, %edx
pinsrq $0x00, %rdx, %xmm0
pxor %xmm0, %xmm4
pshufd $0x4e, %xmm4, %xmm1
pshufd $0x4e, %xmm5, %xmm2
movdqa %xmm5, %xmm3
movdqa %xmm5, %xmm0
pclmulqdq $0x11, %xmm4, %xmm3
pclmulqdq $0x00, %xmm4, %xmm0
pxor %xmm4, %xmm1
pxor %xmm5, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
movdqa %xmm0, %xmm6
movdqa %xmm3, %xmm4
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm6
pxor %xmm1, %xmm4
movdqa %xmm6, %xmm0
movdqa %xmm4, %xmm1
psrld $31, %xmm0
psrld $31, %xmm1
pslld $0x01, %xmm6
pslld $0x01, %xmm4
movdqa %xmm0, %xmm2
pslldq $4, %xmm0
psrldq $12, %xmm2
pslldq $4, %xmm1
por %xmm2, %xmm4
por %xmm0, %xmm6
por %xmm1, %xmm4
movdqa %xmm6, %xmm0
movdqa %xmm6, %xmm1
movdqa %xmm6, %xmm2
pslld $31, %xmm0
pslld $30, %xmm1
pslld $25, %xmm2
pxor %xmm1, %xmm0
pxor %xmm2, %xmm0
movdqa %xmm0, %xmm1
psrldq $4, %xmm1
pslldq $12, %xmm0
pxor %xmm0, %xmm6
movdqa %xmm6, %xmm2
movdqa %xmm6, %xmm3
movdqa %xmm6, %xmm0
psrld $0x01, %xmm2
psrld $2, %xmm3
psrld $7, %xmm0
pxor %xmm3, %xmm2
pxor %xmm0, %xmm2
pxor %xmm1, %xmm2
pxor %xmm6, %xmm2
pxor %xmm2, %xmm4
pshufb L_aes_gcm_bswap_mask(%rip), %xmm4
# Encrypt counter
movdqa (%rdi), %xmm7
pxor %xmm4, %xmm7
aesenc 16(%rdi), %xmm7
aesenc 32(%rdi), %xmm7
aesenc 48(%rdi), %xmm7
aesenc 64(%rdi), %xmm7
aesenc 80(%rdi), %xmm7
aesenc 96(%rdi), %xmm7
aesenc 112(%rdi), %xmm7
aesenc 128(%rdi), %xmm7
aesenc 144(%rdi), %xmm7
cmpl $11, %esi
movdqa 160(%rdi), %xmm8
jl L_AES_GCM_init_aesni_calc_iv_2_aesenc_avx_last
aesenc %xmm8, %xmm7
aesenc 176(%rdi), %xmm7
cmpl $13, %esi
movdqa 192(%rdi), %xmm8
jl L_AES_GCM_init_aesni_calc_iv_2_aesenc_avx_last
aesenc %xmm8, %xmm7
aesenc 208(%rdi), %xmm7
movdqa 224(%rdi), %xmm8
L_AES_GCM_init_aesni_calc_iv_2_aesenc_avx_last:
aesenclast %xmm8, %xmm7
movdqu %xmm7, %xmm15
L_AES_GCM_init_aesni_iv_done:
movdqa %xmm15, (%rax)
pshufb L_aes_gcm_bswap_epi64(%rip), %xmm4
paddd L_aes_gcm_one(%rip), %xmm4
movdqa %xmm5, (%r8)
movdqa %xmm4, (%r9)
addq $16, %rsp
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size AES_GCM_init_aesni,.-AES_GCM_init_aesni
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl AES_GCM_aad_update_aesni
.type AES_GCM_aad_update_aesni,@function
.align 16
AES_GCM_aad_update_aesni:
#else
.section __TEXT,__text
.globl _AES_GCM_aad_update_aesni
.p2align 4
_AES_GCM_aad_update_aesni:
#endif /* __APPLE__ */
movq %rcx, %rax
movdqa (%rdx), %xmm5
movdqa (%rax), %xmm6
xorl %ecx, %ecx
L_AES_GCM_aad_update_aesni_16_loop:
movdqu (%rdi,%rcx,1), %xmm7
pshufb L_aes_gcm_bswap_mask(%rip), %xmm7
pxor %xmm7, %xmm5
pshufd $0x4e, %xmm5, %xmm1
pshufd $0x4e, %xmm6, %xmm2
movdqa %xmm6, %xmm3
movdqa %xmm6, %xmm0
pclmulqdq $0x11, %xmm5, %xmm3
pclmulqdq $0x00, %xmm5, %xmm0
pxor %xmm5, %xmm1
pxor %xmm6, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
movdqa %xmm0, %xmm4
movdqa %xmm3, %xmm5
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm4
pxor %xmm1, %xmm5
movdqa %xmm4, %xmm0
movdqa %xmm5, %xmm1
psrld $31, %xmm0
psrld $31, %xmm1
pslld $0x01, %xmm4
pslld $0x01, %xmm5
movdqa %xmm0, %xmm2
pslldq $4, %xmm0
psrldq $12, %xmm2
pslldq $4, %xmm1
por %xmm2, %xmm5
por %xmm0, %xmm4
por %xmm1, %xmm5
movdqa %xmm4, %xmm0
movdqa %xmm4, %xmm1
movdqa %xmm4, %xmm2
pslld $31, %xmm0
pslld $30, %xmm1
pslld $25, %xmm2
pxor %xmm1, %xmm0
pxor %xmm2, %xmm0
movdqa %xmm0, %xmm1
psrldq $4, %xmm1
pslldq $12, %xmm0
pxor %xmm0, %xmm4
movdqa %xmm4, %xmm2
movdqa %xmm4, %xmm3
movdqa %xmm4, %xmm0
psrld $0x01, %xmm2
psrld $2, %xmm3
psrld $7, %xmm0
pxor %xmm3, %xmm2
pxor %xmm0, %xmm2
pxor %xmm1, %xmm2
pxor %xmm4, %xmm2
pxor %xmm2, %xmm5
addl $16, %ecx
cmpl %esi, %ecx
jl L_AES_GCM_aad_update_aesni_16_loop
movdqa %xmm5, (%rdx)
repz retq
#ifndef __APPLE__
.size AES_GCM_aad_update_aesni,.-AES_GCM_aad_update_aesni
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl AES_GCM_encrypt_block_aesni
.type AES_GCM_encrypt_block_aesni,@function
.align 16
AES_GCM_encrypt_block_aesni:
#else
.section __TEXT,__text
.globl _AES_GCM_encrypt_block_aesni
.p2align 4
_AES_GCM_encrypt_block_aesni:
#endif /* __APPLE__ */
movq %rdx, %r10
movq %rcx, %r11
movdqu (%r8), %xmm0
movdqa %xmm0, %xmm1
pshufb L_aes_gcm_bswap_epi64(%rip), %xmm0
paddd L_aes_gcm_one(%rip), %xmm1
pxor (%rdi), %xmm0
movdqu %xmm1, (%r8)
aesenc 16(%rdi), %xmm0
aesenc 32(%rdi), %xmm0
aesenc 48(%rdi), %xmm0
aesenc 64(%rdi), %xmm0
aesenc 80(%rdi), %xmm0
aesenc 96(%rdi), %xmm0
aesenc 112(%rdi), %xmm0
aesenc 128(%rdi), %xmm0
aesenc 144(%rdi), %xmm0
cmpl $11, %esi
movdqa 160(%rdi), %xmm1
jl L_AES_GCM_encrypt_block_aesni_aesenc_block_aesenc_avx_last
aesenc %xmm1, %xmm0
aesenc 176(%rdi), %xmm0
cmpl $13, %esi
movdqa 192(%rdi), %xmm1
jl L_AES_GCM_encrypt_block_aesni_aesenc_block_aesenc_avx_last
aesenc %xmm1, %xmm0
aesenc 208(%rdi), %xmm0
movdqa 224(%rdi), %xmm1
L_AES_GCM_encrypt_block_aesni_aesenc_block_aesenc_avx_last:
aesenclast %xmm1, %xmm0
movdqu (%r11), %xmm1
pxor %xmm1, %xmm0
movdqu %xmm0, (%r10)
pshufb L_aes_gcm_bswap_mask(%rip), %xmm0
repz retq
#ifndef __APPLE__
.size AES_GCM_encrypt_block_aesni,.-AES_GCM_encrypt_block_aesni
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl AES_GCM_ghash_block_aesni
.type AES_GCM_ghash_block_aesni,@function
.align 16
AES_GCM_ghash_block_aesni:
#else
.section __TEXT,__text
.globl _AES_GCM_ghash_block_aesni
.p2align 4
_AES_GCM_ghash_block_aesni:
#endif /* __APPLE__ */
movdqa (%rsi), %xmm4
movdqa (%rdx), %xmm5
movdqu (%rdi), %xmm7
pshufb L_aes_gcm_bswap_mask(%rip), %xmm7
pxor %xmm7, %xmm4
pshufd $0x4e, %xmm4, %xmm1
pshufd $0x4e, %xmm5, %xmm2
movdqa %xmm5, %xmm3
movdqa %xmm5, %xmm0
pclmulqdq $0x11, %xmm4, %xmm3
pclmulqdq $0x00, %xmm4, %xmm0
pxor %xmm4, %xmm1
pxor %xmm5, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
movdqa %xmm0, %xmm6
movdqa %xmm3, %xmm4
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm6
pxor %xmm1, %xmm4
movdqa %xmm6, %xmm0
movdqa %xmm4, %xmm1
psrld $31, %xmm0
psrld $31, %xmm1
pslld $0x01, %xmm6
pslld $0x01, %xmm4
movdqa %xmm0, %xmm2
pslldq $4, %xmm0
psrldq $12, %xmm2
pslldq $4, %xmm1
por %xmm2, %xmm4
por %xmm0, %xmm6
por %xmm1, %xmm4
movdqa %xmm6, %xmm0
movdqa %xmm6, %xmm1
movdqa %xmm6, %xmm2
pslld $31, %xmm0
pslld $30, %xmm1
pslld $25, %xmm2
pxor %xmm1, %xmm0
pxor %xmm2, %xmm0
movdqa %xmm0, %xmm1
psrldq $4, %xmm1
pslldq $12, %xmm0
pxor %xmm0, %xmm6
movdqa %xmm6, %xmm2
movdqa %xmm6, %xmm3
movdqa %xmm6, %xmm0
psrld $0x01, %xmm2
psrld $2, %xmm3
psrld $7, %xmm0
pxor %xmm3, %xmm2
pxor %xmm0, %xmm2
pxor %xmm1, %xmm2
pxor %xmm6, %xmm2
pxor %xmm2, %xmm4
movdqa %xmm4, (%rsi)
repz retq
#ifndef __APPLE__
.size AES_GCM_ghash_block_aesni,.-AES_GCM_ghash_block_aesni
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl AES_GCM_encrypt_update_aesni
.type AES_GCM_encrypt_update_aesni,@function
.align 16
AES_GCM_encrypt_update_aesni:
#else
.section __TEXT,__text
.globl _AES_GCM_encrypt_update_aesni
.p2align 4
_AES_GCM_encrypt_update_aesni:
#endif /* __APPLE__ */
pushq %r13
pushq %r12
pushq %r14
movq %rdx, %r10
movq %rcx, %r11
movq 32(%rsp), %rax
movq 40(%rsp), %r12
subq $0xa0, %rsp
movdqa (%r9), %xmm6
movdqa (%rax), %xmm5
movdqa %xmm5, %xmm9
movdqa %xmm5, %xmm8
psrlq $63, %xmm9
psllq $0x01, %xmm8
pslldq $8, %xmm9
por %xmm9, %xmm8
pshufd $0xff, %xmm5, %xmm5
psrad $31, %xmm5
pand L_aes_gcm_mod2_128(%rip), %xmm5
pxor %xmm8, %xmm5
xorq %r14, %r14
cmpl $0x80, %r8d
movl %r8d, %r13d
jl L_AES_GCM_encrypt_update_aesni_done_128
andl $0xffffff80, %r13d
movdqa %xmm6, %xmm2
# H ^ 1
movdqu %xmm5, (%rsp)
# H ^ 2
pshufd $0x4e, %xmm5, %xmm9
pshufd $0x4e, %xmm5, %xmm10
movdqa %xmm5, %xmm11
movdqa %xmm5, %xmm8
pclmulqdq $0x11, %xmm5, %xmm11
pclmulqdq $0x00, %xmm5, %xmm8
pxor %xmm5, %xmm9
pxor %xmm5, %xmm10
pclmulqdq $0x00, %xmm10, %xmm9
pxor %xmm8, %xmm9
pxor %xmm11, %xmm9
movdqa %xmm9, %xmm10
movdqa %xmm11, %xmm0
pslldq $8, %xmm10
psrldq $8, %xmm9
pxor %xmm10, %xmm8
pxor %xmm9, %xmm0
movdqa %xmm8, %xmm12
movdqa %xmm8, %xmm13
movdqa %xmm8, %xmm14
pslld $31, %xmm12
pslld $30, %xmm13
pslld $25, %xmm14
pxor %xmm13, %xmm12
pxor %xmm14, %xmm12
movdqa %xmm12, %xmm13
psrldq $4, %xmm13
pslldq $12, %xmm12
pxor %xmm12, %xmm8
movdqa %xmm8, %xmm14
movdqa %xmm8, %xmm10
movdqa %xmm8, %xmm9
psrld $0x01, %xmm14
psrld $2, %xmm10
psrld $7, %xmm9
pxor %xmm10, %xmm14
pxor %xmm9, %xmm14
pxor %xmm13, %xmm14
pxor %xmm8, %xmm14
pxor %xmm14, %xmm0
movdqu %xmm0, 16(%rsp)
# H ^ 3
pshufd $0x4e, %xmm5, %xmm9
pshufd $0x4e, %xmm0, %xmm10
movdqa %xmm0, %xmm11
movdqa %xmm0, %xmm8
pclmulqdq $0x11, %xmm5, %xmm11
pclmulqdq $0x00, %xmm5, %xmm8
pxor %xmm5, %xmm9
pxor %xmm0, %xmm10
pclmulqdq $0x00, %xmm10, %xmm9
pxor %xmm8, %xmm9
pxor %xmm11, %xmm9
movdqa %xmm9, %xmm10
movdqa %xmm11, %xmm1
pslldq $8, %xmm10
psrldq $8, %xmm9
pxor %xmm10, %xmm8
pxor %xmm9, %xmm1
movdqa %xmm8, %xmm12
movdqa %xmm8, %xmm13
movdqa %xmm8, %xmm14
pslld $31, %xmm12
pslld $30, %xmm13
pslld $25, %xmm14
pxor %xmm13, %xmm12
pxor %xmm14, %xmm12
movdqa %xmm12, %xmm13
psrldq $4, %xmm13
pslldq $12, %xmm12
pxor %xmm12, %xmm8
movdqa %xmm8, %xmm14
movdqa %xmm8, %xmm10
movdqa %xmm8, %xmm9
psrld $0x01, %xmm14
psrld $2, %xmm10
psrld $7, %xmm9
pxor %xmm10, %xmm14
pxor %xmm9, %xmm14
pxor %xmm13, %xmm14
pxor %xmm8, %xmm14
pxor %xmm14, %xmm1
movdqu %xmm1, 32(%rsp)
# H ^ 4
pshufd $0x4e, %xmm0, %xmm9
pshufd $0x4e, %xmm0, %xmm10
movdqa %xmm0, %xmm11
movdqa %xmm0, %xmm8
pclmulqdq $0x11, %xmm0, %xmm11
pclmulqdq $0x00, %xmm0, %xmm8
pxor %xmm0, %xmm9
pxor %xmm0, %xmm10
pclmulqdq $0x00, %xmm10, %xmm9
pxor %xmm8, %xmm9
pxor %xmm11, %xmm9
movdqa %xmm9, %xmm10
movdqa %xmm11, %xmm3
pslldq $8, %xmm10
psrldq $8, %xmm9
pxor %xmm10, %xmm8
pxor %xmm9, %xmm3
movdqa %xmm8, %xmm12
movdqa %xmm8, %xmm13
movdqa %xmm8, %xmm14
pslld $31, %xmm12
pslld $30, %xmm13
pslld $25, %xmm14
pxor %xmm13, %xmm12
pxor %xmm14, %xmm12
movdqa %xmm12, %xmm13
psrldq $4, %xmm13
pslldq $12, %xmm12
pxor %xmm12, %xmm8
movdqa %xmm8, %xmm14
movdqa %xmm8, %xmm10
movdqa %xmm8, %xmm9
psrld $0x01, %xmm14
psrld $2, %xmm10
psrld $7, %xmm9
pxor %xmm10, %xmm14
pxor %xmm9, %xmm14
pxor %xmm13, %xmm14
pxor %xmm8, %xmm14
pxor %xmm14, %xmm3
movdqu %xmm3, 48(%rsp)
# H ^ 5
pshufd $0x4e, %xmm0, %xmm9
pshufd $0x4e, %xmm1, %xmm10
movdqa %xmm1, %xmm11
movdqa %xmm1, %xmm8
pclmulqdq $0x11, %xmm0, %xmm11
pclmulqdq $0x00, %xmm0, %xmm8
pxor %xmm0, %xmm9
pxor %xmm1, %xmm10
pclmulqdq $0x00, %xmm10, %xmm9
pxor %xmm8, %xmm9
pxor %xmm11, %xmm9
movdqa %xmm9, %xmm10
movdqa %xmm11, %xmm7
pslldq $8, %xmm10
psrldq $8, %xmm9
pxor %xmm10, %xmm8
pxor %xmm9, %xmm7
movdqa %xmm8, %xmm12
movdqa %xmm8, %xmm13
movdqa %xmm8, %xmm14
pslld $31, %xmm12
pslld $30, %xmm13
pslld $25, %xmm14
pxor %xmm13, %xmm12
pxor %xmm14, %xmm12
movdqa %xmm12, %xmm13
psrldq $4, %xmm13
pslldq $12, %xmm12
pxor %xmm12, %xmm8
movdqa %xmm8, %xmm14
movdqa %xmm8, %xmm10
movdqa %xmm8, %xmm9
psrld $0x01, %xmm14
psrld $2, %xmm10
psrld $7, %xmm9
pxor %xmm10, %xmm14
pxor %xmm9, %xmm14
pxor %xmm13, %xmm14
pxor %xmm8, %xmm14
pxor %xmm14, %xmm7
movdqu %xmm7, 64(%rsp)
# H ^ 6
pshufd $0x4e, %xmm1, %xmm9
pshufd $0x4e, %xmm1, %xmm10
movdqa %xmm1, %xmm11
movdqa %xmm1, %xmm8
pclmulqdq $0x11, %xmm1, %xmm11
pclmulqdq $0x00, %xmm1, %xmm8
pxor %xmm1, %xmm9
pxor %xmm1, %xmm10
pclmulqdq $0x00, %xmm10, %xmm9
pxor %xmm8, %xmm9
pxor %xmm11, %xmm9
movdqa %xmm9, %xmm10
movdqa %xmm11, %xmm7
pslldq $8, %xmm10
psrldq $8, %xmm9
pxor %xmm10, %xmm8
pxor %xmm9, %xmm7
movdqa %xmm8, %xmm12
movdqa %xmm8, %xmm13
movdqa %xmm8, %xmm14
pslld $31, %xmm12
pslld $30, %xmm13
pslld $25, %xmm14
pxor %xmm13, %xmm12
pxor %xmm14, %xmm12
movdqa %xmm12, %xmm13
psrldq $4, %xmm13
pslldq $12, %xmm12
pxor %xmm12, %xmm8
movdqa %xmm8, %xmm14
movdqa %xmm8, %xmm10
movdqa %xmm8, %xmm9
psrld $0x01, %xmm14
psrld $2, %xmm10
psrld $7, %xmm9
pxor %xmm10, %xmm14
pxor %xmm9, %xmm14
pxor %xmm13, %xmm14
pxor %xmm8, %xmm14
pxor %xmm14, %xmm7
movdqu %xmm7, 80(%rsp)
# H ^ 7
pshufd $0x4e, %xmm1, %xmm9
pshufd $0x4e, %xmm3, %xmm10
movdqa %xmm3, %xmm11
movdqa %xmm3, %xmm8
pclmulqdq $0x11, %xmm1, %xmm11
pclmulqdq $0x00, %xmm1, %xmm8
pxor %xmm1, %xmm9
pxor %xmm3, %xmm10
pclmulqdq $0x00, %xmm10, %xmm9
pxor %xmm8, %xmm9
pxor %xmm11, %xmm9
movdqa %xmm9, %xmm10
movdqa %xmm11, %xmm7
pslldq $8, %xmm10
psrldq $8, %xmm9
pxor %xmm10, %xmm8
pxor %xmm9, %xmm7
movdqa %xmm8, %xmm12
movdqa %xmm8, %xmm13
movdqa %xmm8, %xmm14
pslld $31, %xmm12
pslld $30, %xmm13
pslld $25, %xmm14
pxor %xmm13, %xmm12
pxor %xmm14, %xmm12
movdqa %xmm12, %xmm13
psrldq $4, %xmm13
pslldq $12, %xmm12
pxor %xmm12, %xmm8
movdqa %xmm8, %xmm14
movdqa %xmm8, %xmm10
movdqa %xmm8, %xmm9
psrld $0x01, %xmm14
psrld $2, %xmm10
psrld $7, %xmm9
pxor %xmm10, %xmm14
pxor %xmm9, %xmm14
pxor %xmm13, %xmm14
pxor %xmm8, %xmm14
pxor %xmm14, %xmm7
movdqu %xmm7, 96(%rsp)
# H ^ 8
pshufd $0x4e, %xmm3, %xmm9
pshufd $0x4e, %xmm3, %xmm10
movdqa %xmm3, %xmm11
movdqa %xmm3, %xmm8
pclmulqdq $0x11, %xmm3, %xmm11
pclmulqdq $0x00, %xmm3, %xmm8
pxor %xmm3, %xmm9
pxor %xmm3, %xmm10
pclmulqdq $0x00, %xmm10, %xmm9
pxor %xmm8, %xmm9
pxor %xmm11, %xmm9
movdqa %xmm9, %xmm10
movdqa %xmm11, %xmm7
pslldq $8, %xmm10
psrldq $8, %xmm9
pxor %xmm10, %xmm8
pxor %xmm9, %xmm7
movdqa %xmm8, %xmm12
movdqa %xmm8, %xmm13
movdqa %xmm8, %xmm14
pslld $31, %xmm12
pslld $30, %xmm13
pslld $25, %xmm14
pxor %xmm13, %xmm12
pxor %xmm14, %xmm12
movdqa %xmm12, %xmm13
psrldq $4, %xmm13
pslldq $12, %xmm12
pxor %xmm12, %xmm8
movdqa %xmm8, %xmm14
movdqa %xmm8, %xmm10
movdqa %xmm8, %xmm9
psrld $0x01, %xmm14
psrld $2, %xmm10
psrld $7, %xmm9
pxor %xmm10, %xmm14
pxor %xmm9, %xmm14
pxor %xmm13, %xmm14
pxor %xmm8, %xmm14
pxor %xmm14, %xmm7
movdqu %xmm7, 112(%rsp)
# First 128 bytes of input
movdqu (%r12), %xmm8
movdqa L_aes_gcm_bswap_epi64(%rip), %xmm1
movdqa %xmm8, %xmm0
pshufb %xmm1, %xmm8
movdqa %xmm0, %xmm9
paddd L_aes_gcm_one(%rip), %xmm9
pshufb %xmm1, %xmm9
movdqa %xmm0, %xmm10
paddd L_aes_gcm_two(%rip), %xmm10
pshufb %xmm1, %xmm10
movdqa %xmm0, %xmm11
paddd L_aes_gcm_three(%rip), %xmm11
pshufb %xmm1, %xmm11
movdqa %xmm0, %xmm12
paddd L_aes_gcm_four(%rip), %xmm12
pshufb %xmm1, %xmm12
movdqa %xmm0, %xmm13
paddd L_aes_gcm_five(%rip), %xmm13
pshufb %xmm1, %xmm13
movdqa %xmm0, %xmm14
paddd L_aes_gcm_six(%rip), %xmm14
pshufb %xmm1, %xmm14
movdqa %xmm0, %xmm15
paddd L_aes_gcm_seven(%rip), %xmm15
pshufb %xmm1, %xmm15
paddd L_aes_gcm_eight(%rip), %xmm0
movdqa (%rdi), %xmm7
movdqu %xmm0, (%r12)
pxor %xmm7, %xmm8
pxor %xmm7, %xmm9
pxor %xmm7, %xmm10
pxor %xmm7, %xmm11
pxor %xmm7, %xmm12
pxor %xmm7, %xmm13
pxor %xmm7, %xmm14
pxor %xmm7, %xmm15
movdqa 16(%rdi), %xmm7
aesenc %xmm7, %xmm8
aesenc %xmm7, %xmm9
aesenc %xmm7, %xmm10
aesenc %xmm7, %xmm11
aesenc %xmm7, %xmm12
aesenc %xmm7, %xmm13
aesenc %xmm7, %xmm14
aesenc %xmm7, %xmm15
movdqa 32(%rdi), %xmm7
aesenc %xmm7, %xmm8
aesenc %xmm7, %xmm9
aesenc %xmm7, %xmm10
aesenc %xmm7, %xmm11
aesenc %xmm7, %xmm12
aesenc %xmm7, %xmm13
aesenc %xmm7, %xmm14
aesenc %xmm7, %xmm15
movdqa 48(%rdi), %xmm7
aesenc %xmm7, %xmm8
aesenc %xmm7, %xmm9
aesenc %xmm7, %xmm10
aesenc %xmm7, %xmm11
aesenc %xmm7, %xmm12
aesenc %xmm7, %xmm13
aesenc %xmm7, %xmm14
aesenc %xmm7, %xmm15
movdqa 64(%rdi), %xmm7
aesenc %xmm7, %xmm8
aesenc %xmm7, %xmm9
aesenc %xmm7, %xmm10
aesenc %xmm7, %xmm11
aesenc %xmm7, %xmm12
aesenc %xmm7, %xmm13
aesenc %xmm7, %xmm14
aesenc %xmm7, %xmm15
movdqa 80(%rdi), %xmm7
aesenc %xmm7, %xmm8
aesenc %xmm7, %xmm9
aesenc %xmm7, %xmm10
aesenc %xmm7, %xmm11
aesenc %xmm7, %xmm12
aesenc %xmm7, %xmm13
aesenc %xmm7, %xmm14
aesenc %xmm7, %xmm15
movdqa 96(%rdi), %xmm7
aesenc %xmm7, %xmm8
aesenc %xmm7, %xmm9
aesenc %xmm7, %xmm10
aesenc %xmm7, %xmm11
aesenc %xmm7, %xmm12
aesenc %xmm7, %xmm13
aesenc %xmm7, %xmm14
aesenc %xmm7, %xmm15
movdqa 112(%rdi), %xmm7
aesenc %xmm7, %xmm8
aesenc %xmm7, %xmm9
aesenc %xmm7, %xmm10
aesenc %xmm7, %xmm11
aesenc %xmm7, %xmm12
aesenc %xmm7, %xmm13
aesenc %xmm7, %xmm14
aesenc %xmm7, %xmm15
movdqa 128(%rdi), %xmm7
aesenc %xmm7, %xmm8
aesenc %xmm7, %xmm9
aesenc %xmm7, %xmm10
aesenc %xmm7, %xmm11
aesenc %xmm7, %xmm12
aesenc %xmm7, %xmm13
aesenc %xmm7, %xmm14
aesenc %xmm7, %xmm15
movdqa 144(%rdi), %xmm7
aesenc %xmm7, %xmm8
aesenc %xmm7, %xmm9
aesenc %xmm7, %xmm10
aesenc %xmm7, %xmm11
aesenc %xmm7, %xmm12
aesenc %xmm7, %xmm13
aesenc %xmm7, %xmm14
aesenc %xmm7, %xmm15
cmpl $11, %esi
movdqa 160(%rdi), %xmm7
jl L_AES_GCM_encrypt_update_aesni_enc_done
aesenc %xmm7, %xmm8
aesenc %xmm7, %xmm9
aesenc %xmm7, %xmm10
aesenc %xmm7, %xmm11
aesenc %xmm7, %xmm12
aesenc %xmm7, %xmm13
aesenc %xmm7, %xmm14
aesenc %xmm7, %xmm15
movdqa 176(%rdi), %xmm7
aesenc %xmm7, %xmm8
aesenc %xmm7, %xmm9
aesenc %xmm7, %xmm10
aesenc %xmm7, %xmm11
aesenc %xmm7, %xmm12
aesenc %xmm7, %xmm13
aesenc %xmm7, %xmm14
aesenc %xmm7, %xmm15
cmpl $13, %esi
movdqa 192(%rdi), %xmm7
jl L_AES_GCM_encrypt_update_aesni_enc_done
aesenc %xmm7, %xmm8
aesenc %xmm7, %xmm9
aesenc %xmm7, %xmm10
aesenc %xmm7, %xmm11
aesenc %xmm7, %xmm12
aesenc %xmm7, %xmm13
aesenc %xmm7, %xmm14
aesenc %xmm7, %xmm15
movdqa 208(%rdi), %xmm7
aesenc %xmm7, %xmm8
aesenc %xmm7, %xmm9
aesenc %xmm7, %xmm10
aesenc %xmm7, %xmm11
aesenc %xmm7, %xmm12
aesenc %xmm7, %xmm13
aesenc %xmm7, %xmm14
aesenc %xmm7, %xmm15
movdqa 224(%rdi), %xmm7
L_AES_GCM_encrypt_update_aesni_enc_done:
aesenclast %xmm7, %xmm8
aesenclast %xmm7, %xmm9
movdqu (%r11), %xmm0
movdqu 16(%r11), %xmm1
pxor %xmm0, %xmm8
pxor %xmm1, %xmm9
movdqu %xmm8, (%r10)
movdqu %xmm9, 16(%r10)
aesenclast %xmm7, %xmm10
aesenclast %xmm7, %xmm11
movdqu 32(%r11), %xmm0
movdqu 48(%r11), %xmm1
pxor %xmm0, %xmm10
pxor %xmm1, %xmm11
movdqu %xmm10, 32(%r10)
movdqu %xmm11, 48(%r10)
aesenclast %xmm7, %xmm12
aesenclast %xmm7, %xmm13
movdqu 64(%r11), %xmm0
movdqu 80(%r11), %xmm1
pxor %xmm0, %xmm12
pxor %xmm1, %xmm13
movdqu %xmm12, 64(%r10)
movdqu %xmm13, 80(%r10)
aesenclast %xmm7, %xmm14
aesenclast %xmm7, %xmm15
movdqu 96(%r11), %xmm0
movdqu 112(%r11), %xmm1
pxor %xmm0, %xmm14
pxor %xmm1, %xmm15
movdqu %xmm14, 96(%r10)
movdqu %xmm15, 112(%r10)
cmpl $0x80, %r13d
movl $0x80, %r14d
jle L_AES_GCM_encrypt_update_aesni_end_128
# More 128 bytes of input
L_AES_GCM_encrypt_update_aesni_ghash_128:
leaq (%r11,%r14,1), %rcx
leaq (%r10,%r14,1), %rdx
movdqu (%r12), %xmm8
movdqa L_aes_gcm_bswap_epi64(%rip), %xmm1
movdqa %xmm8, %xmm0
pshufb %xmm1, %xmm8
movdqa %xmm0, %xmm9
paddd L_aes_gcm_one(%rip), %xmm9
pshufb %xmm1, %xmm9
movdqa %xmm0, %xmm10
paddd L_aes_gcm_two(%rip), %xmm10
pshufb %xmm1, %xmm10
movdqa %xmm0, %xmm11
paddd L_aes_gcm_three(%rip), %xmm11
pshufb %xmm1, %xmm11
movdqa %xmm0, %xmm12
paddd L_aes_gcm_four(%rip), %xmm12
pshufb %xmm1, %xmm12
movdqa %xmm0, %xmm13
paddd L_aes_gcm_five(%rip), %xmm13
pshufb %xmm1, %xmm13
movdqa %xmm0, %xmm14
paddd L_aes_gcm_six(%rip), %xmm14
pshufb %xmm1, %xmm14
movdqa %xmm0, %xmm15
paddd L_aes_gcm_seven(%rip), %xmm15
pshufb %xmm1, %xmm15
paddd L_aes_gcm_eight(%rip), %xmm0
movdqa (%rdi), %xmm7
movdqu %xmm0, (%r12)
pxor %xmm7, %xmm8
pxor %xmm7, %xmm9
pxor %xmm7, %xmm10
pxor %xmm7, %xmm11
pxor %xmm7, %xmm12
pxor %xmm7, %xmm13
pxor %xmm7, %xmm14
pxor %xmm7, %xmm15
movdqu 112(%rsp), %xmm7
movdqu -128(%rdx), %xmm0
aesenc 16(%rdi), %xmm8
pshufb L_aes_gcm_bswap_mask(%rip), %xmm0
pxor %xmm2, %xmm0
pshufd $0x4e, %xmm7, %xmm1
pshufd $0x4e, %xmm0, %xmm5
pxor %xmm7, %xmm1
pxor %xmm0, %xmm5
movdqa %xmm0, %xmm3
pclmulqdq $0x11, %xmm7, %xmm3
aesenc 16(%rdi), %xmm9
aesenc 16(%rdi), %xmm10
movdqa %xmm0, %xmm2
pclmulqdq $0x00, %xmm7, %xmm2
aesenc 16(%rdi), %xmm11
aesenc 16(%rdi), %xmm12
pclmulqdq $0x00, %xmm5, %xmm1
aesenc 16(%rdi), %xmm13
aesenc 16(%rdi), %xmm14
aesenc 16(%rdi), %xmm15
pxor %xmm2, %xmm1
pxor %xmm3, %xmm1
movdqu 96(%rsp), %xmm7
movdqu -112(%rdx), %xmm0
pshufd $0x4e, %xmm7, %xmm4
pshufb L_aes_gcm_bswap_mask(%rip), %xmm0
aesenc 32(%rdi), %xmm8
pxor %xmm7, %xmm4
pshufd $0x4e, %xmm0, %xmm5
pxor %xmm0, %xmm5
movdqa %xmm0, %xmm6
pclmulqdq $0x11, %xmm7, %xmm6
aesenc 32(%rdi), %xmm9
aesenc 32(%rdi), %xmm10
pclmulqdq $0x00, %xmm0, %xmm7
aesenc 32(%rdi), %xmm11
aesenc 32(%rdi), %xmm12
pclmulqdq $0x00, %xmm5, %xmm4
aesenc 32(%rdi), %xmm13
aesenc 32(%rdi), %xmm14
aesenc 32(%rdi), %xmm15
pxor %xmm7, %xmm1
pxor %xmm7, %xmm2
pxor %xmm6, %xmm1
pxor %xmm6, %xmm3
pxor %xmm4, %xmm1
movdqu 80(%rsp), %xmm7
movdqu -96(%rdx), %xmm0
pshufd $0x4e, %xmm7, %xmm4
pshufb L_aes_gcm_bswap_mask(%rip), %xmm0
aesenc 48(%rdi), %xmm8
pxor %xmm7, %xmm4
pshufd $0x4e, %xmm0, %xmm5
pxor %xmm0, %xmm5
movdqa %xmm0, %xmm6
pclmulqdq $0x11, %xmm7, %xmm6
aesenc 48(%rdi), %xmm9
aesenc 48(%rdi), %xmm10
pclmulqdq $0x00, %xmm0, %xmm7
aesenc 48(%rdi), %xmm11
aesenc 48(%rdi), %xmm12
pclmulqdq $0x00, %xmm5, %xmm4
aesenc 48(%rdi), %xmm13
aesenc 48(%rdi), %xmm14
aesenc 48(%rdi), %xmm15
pxor %xmm7, %xmm1
pxor %xmm7, %xmm2
pxor %xmm6, %xmm1
pxor %xmm6, %xmm3
pxor %xmm4, %xmm1
movdqu 64(%rsp), %xmm7
movdqu -80(%rdx), %xmm0
pshufd $0x4e, %xmm7, %xmm4
pshufb L_aes_gcm_bswap_mask(%rip), %xmm0
aesenc 64(%rdi), %xmm8
pxor %xmm7, %xmm4
pshufd $0x4e, %xmm0, %xmm5
pxor %xmm0, %xmm5
movdqa %xmm0, %xmm6
pclmulqdq $0x11, %xmm7, %xmm6
aesenc 64(%rdi), %xmm9
aesenc 64(%rdi), %xmm10
pclmulqdq $0x00, %xmm0, %xmm7
aesenc 64(%rdi), %xmm11
aesenc 64(%rdi), %xmm12
pclmulqdq $0x00, %xmm5, %xmm4
aesenc 64(%rdi), %xmm13
aesenc 64(%rdi), %xmm14
aesenc 64(%rdi), %xmm15
pxor %xmm7, %xmm1
pxor %xmm7, %xmm2
pxor %xmm6, %xmm1
pxor %xmm6, %xmm3
pxor %xmm4, %xmm1
movdqu 48(%rsp), %xmm7
movdqu -64(%rdx), %xmm0
pshufd $0x4e, %xmm7, %xmm4
pshufb L_aes_gcm_bswap_mask(%rip), %xmm0
aesenc 80(%rdi), %xmm8
pxor %xmm7, %xmm4
pshufd $0x4e, %xmm0, %xmm5
pxor %xmm0, %xmm5
movdqa %xmm0, %xmm6
pclmulqdq $0x11, %xmm7, %xmm6
aesenc 80(%rdi), %xmm9
aesenc 80(%rdi), %xmm10
pclmulqdq $0x00, %xmm0, %xmm7
aesenc 80(%rdi), %xmm11
aesenc 80(%rdi), %xmm12
pclmulqdq $0x00, %xmm5, %xmm4
aesenc 80(%rdi), %xmm13
aesenc 80(%rdi), %xmm14
aesenc 80(%rdi), %xmm15
pxor %xmm7, %xmm1
pxor %xmm7, %xmm2
pxor %xmm6, %xmm1
pxor %xmm6, %xmm3
pxor %xmm4, %xmm1
movdqu 32(%rsp), %xmm7
movdqu -48(%rdx), %xmm0
pshufd $0x4e, %xmm7, %xmm4
pshufb L_aes_gcm_bswap_mask(%rip), %xmm0
aesenc 96(%rdi), %xmm8
pxor %xmm7, %xmm4
pshufd $0x4e, %xmm0, %xmm5
pxor %xmm0, %xmm5
movdqa %xmm0, %xmm6
pclmulqdq $0x11, %xmm7, %xmm6
aesenc 96(%rdi), %xmm9
aesenc 96(%rdi), %xmm10
pclmulqdq $0x00, %xmm0, %xmm7
aesenc 96(%rdi), %xmm11
aesenc 96(%rdi), %xmm12
pclmulqdq $0x00, %xmm5, %xmm4
aesenc 96(%rdi), %xmm13
aesenc 96(%rdi), %xmm14
aesenc 96(%rdi), %xmm15
pxor %xmm7, %xmm1
pxor %xmm7, %xmm2
pxor %xmm6, %xmm1
pxor %xmm6, %xmm3
pxor %xmm4, %xmm1
movdqu 16(%rsp), %xmm7
movdqu -32(%rdx), %xmm0
pshufd $0x4e, %xmm7, %xmm4
pshufb L_aes_gcm_bswap_mask(%rip), %xmm0
aesenc 112(%rdi), %xmm8
pxor %xmm7, %xmm4
pshufd $0x4e, %xmm0, %xmm5
pxor %xmm0, %xmm5
movdqa %xmm0, %xmm6
pclmulqdq $0x11, %xmm7, %xmm6
aesenc 112(%rdi), %xmm9
aesenc 112(%rdi), %xmm10
pclmulqdq $0x00, %xmm0, %xmm7
aesenc 112(%rdi), %xmm11
aesenc 112(%rdi), %xmm12
pclmulqdq $0x00, %xmm5, %xmm4
aesenc 112(%rdi), %xmm13
aesenc 112(%rdi), %xmm14
aesenc 112(%rdi), %xmm15
pxor %xmm7, %xmm1
pxor %xmm7, %xmm2
pxor %xmm6, %xmm1
pxor %xmm6, %xmm3
pxor %xmm4, %xmm1
movdqu (%rsp), %xmm7
movdqu -16(%rdx), %xmm0
pshufd $0x4e, %xmm7, %xmm4
pshufb L_aes_gcm_bswap_mask(%rip), %xmm0
aesenc 128(%rdi), %xmm8
pxor %xmm7, %xmm4
pshufd $0x4e, %xmm0, %xmm5
pxor %xmm0, %xmm5
movdqa %xmm0, %xmm6
pclmulqdq $0x11, %xmm7, %xmm6
aesenc 128(%rdi), %xmm9
aesenc 128(%rdi), %xmm10
pclmulqdq $0x00, %xmm0, %xmm7
aesenc 128(%rdi), %xmm11
aesenc 128(%rdi), %xmm12
pclmulqdq $0x00, %xmm5, %xmm4
aesenc 128(%rdi), %xmm13
aesenc 128(%rdi), %xmm14
aesenc 128(%rdi), %xmm15
pxor %xmm7, %xmm1
pxor %xmm7, %xmm2
pxor %xmm6, %xmm1
pxor %xmm6, %xmm3
pxor %xmm4, %xmm1
movdqa %xmm1, %xmm5
psrldq $8, %xmm1
pslldq $8, %xmm5
aesenc 144(%rdi), %xmm8
pxor %xmm5, %xmm2
pxor %xmm1, %xmm3
movdqa %xmm2, %xmm7
movdqa %xmm2, %xmm4
movdqa %xmm2, %xmm5
aesenc 144(%rdi), %xmm9
pslld $31, %xmm7
pslld $30, %xmm4
pslld $25, %xmm5
aesenc 144(%rdi), %xmm10
pxor %xmm4, %xmm7
pxor %xmm5, %xmm7
aesenc 144(%rdi), %xmm11
movdqa %xmm7, %xmm4
pslldq $12, %xmm7
psrldq $4, %xmm4
aesenc 144(%rdi), %xmm12
pxor %xmm7, %xmm2
movdqa %xmm2, %xmm5
movdqa %xmm2, %xmm1
movdqa %xmm2, %xmm0
aesenc 144(%rdi), %xmm13
psrld $0x01, %xmm5
psrld $2, %xmm1
psrld $7, %xmm0
aesenc 144(%rdi), %xmm14
pxor %xmm1, %xmm5
pxor %xmm0, %xmm5
aesenc 144(%rdi), %xmm15
pxor %xmm4, %xmm5
pxor %xmm5, %xmm2
pxor %xmm3, %xmm2
cmpl $11, %esi
movdqa 160(%rdi), %xmm7
jl L_AES_GCM_encrypt_update_aesni_aesenc_128_ghash_avx_done
aesenc %xmm7, %xmm8
aesenc %xmm7, %xmm9
aesenc %xmm7, %xmm10
aesenc %xmm7, %xmm11
aesenc %xmm7, %xmm12
aesenc %xmm7, %xmm13
aesenc %xmm7, %xmm14
aesenc %xmm7, %xmm15
movdqa 176(%rdi), %xmm7
aesenc %xmm7, %xmm8
aesenc %xmm7, %xmm9
aesenc %xmm7, %xmm10
aesenc %xmm7, %xmm11
aesenc %xmm7, %xmm12
aesenc %xmm7, %xmm13
aesenc %xmm7, %xmm14
aesenc %xmm7, %xmm15
cmpl $13, %esi
movdqa 192(%rdi), %xmm7
jl L_AES_GCM_encrypt_update_aesni_aesenc_128_ghash_avx_done
aesenc %xmm7, %xmm8
aesenc %xmm7, %xmm9
aesenc %xmm7, %xmm10
aesenc %xmm7, %xmm11
aesenc %xmm7, %xmm12
aesenc %xmm7, %xmm13
aesenc %xmm7, %xmm14
aesenc %xmm7, %xmm15
movdqa 208(%rdi), %xmm7
aesenc %xmm7, %xmm8
aesenc %xmm7, %xmm9
aesenc %xmm7, %xmm10
aesenc %xmm7, %xmm11
aesenc %xmm7, %xmm12
aesenc %xmm7, %xmm13
aesenc %xmm7, %xmm14
aesenc %xmm7, %xmm15
movdqa 224(%rdi), %xmm7
L_AES_GCM_encrypt_update_aesni_aesenc_128_ghash_avx_done:
aesenclast %xmm7, %xmm8
aesenclast %xmm7, %xmm9
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
pxor %xmm0, %xmm8
pxor %xmm1, %xmm9
movdqu %xmm8, (%rdx)
movdqu %xmm9, 16(%rdx)
aesenclast %xmm7, %xmm10
aesenclast %xmm7, %xmm11
movdqu 32(%rcx), %xmm0
movdqu 48(%rcx), %xmm1
pxor %xmm0, %xmm10
pxor %xmm1, %xmm11
movdqu %xmm10, 32(%rdx)
movdqu %xmm11, 48(%rdx)
aesenclast %xmm7, %xmm12
aesenclast %xmm7, %xmm13
movdqu 64(%rcx), %xmm0
movdqu 80(%rcx), %xmm1
pxor %xmm0, %xmm12
pxor %xmm1, %xmm13
movdqu %xmm12, 64(%rdx)
movdqu %xmm13, 80(%rdx)
aesenclast %xmm7, %xmm14
aesenclast %xmm7, %xmm15
movdqu 96(%rcx), %xmm0
movdqu 112(%rcx), %xmm1
pxor %xmm0, %xmm14
pxor %xmm1, %xmm15
movdqu %xmm14, 96(%rdx)
movdqu %xmm15, 112(%rdx)
addl $0x80, %r14d
cmpl %r13d, %r14d
jl L_AES_GCM_encrypt_update_aesni_ghash_128
L_AES_GCM_encrypt_update_aesni_end_128:
movdqa L_aes_gcm_bswap_mask(%rip), %xmm4
pshufb %xmm4, %xmm8
pshufb %xmm4, %xmm9
pshufb %xmm4, %xmm10
pshufb %xmm4, %xmm11
pxor %xmm2, %xmm8
pshufb %xmm4, %xmm12
pshufb %xmm4, %xmm13
pshufb %xmm4, %xmm14
pshufb %xmm4, %xmm15
movdqu 112(%rsp), %xmm7
pshufd $0x4e, %xmm8, %xmm1
pshufd $0x4e, %xmm7, %xmm2
movdqa %xmm7, %xmm3
movdqa %xmm7, %xmm0
pclmulqdq $0x11, %xmm8, %xmm3
pclmulqdq $0x00, %xmm8, %xmm0
pxor %xmm8, %xmm1
pxor %xmm7, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
movdqa %xmm0, %xmm4
movdqa %xmm3, %xmm6
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm4
pxor %xmm1, %xmm6
movdqu 96(%rsp), %xmm7
pshufd $0x4e, %xmm9, %xmm1
pshufd $0x4e, %xmm7, %xmm2
movdqa %xmm7, %xmm3
movdqa %xmm7, %xmm0
pclmulqdq $0x11, %xmm9, %xmm3
pclmulqdq $0x00, %xmm9, %xmm0
pxor %xmm9, %xmm1
pxor %xmm7, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
pxor %xmm0, %xmm4
pxor %xmm3, %xmm6
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm4
pxor %xmm1, %xmm6
movdqu 80(%rsp), %xmm7
pshufd $0x4e, %xmm10, %xmm1
pshufd $0x4e, %xmm7, %xmm2
movdqa %xmm7, %xmm3
movdqa %xmm7, %xmm0
pclmulqdq $0x11, %xmm10, %xmm3
pclmulqdq $0x00, %xmm10, %xmm0
pxor %xmm10, %xmm1
pxor %xmm7, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
pxor %xmm0, %xmm4
pxor %xmm3, %xmm6
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm4
pxor %xmm1, %xmm6
movdqu 64(%rsp), %xmm7
pshufd $0x4e, %xmm11, %xmm1
pshufd $0x4e, %xmm7, %xmm2
movdqa %xmm7, %xmm3
movdqa %xmm7, %xmm0
pclmulqdq $0x11, %xmm11, %xmm3
pclmulqdq $0x00, %xmm11, %xmm0
pxor %xmm11, %xmm1
pxor %xmm7, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
pxor %xmm0, %xmm4
pxor %xmm3, %xmm6
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm4
pxor %xmm1, %xmm6
movdqu 48(%rsp), %xmm7
pshufd $0x4e, %xmm12, %xmm1
pshufd $0x4e, %xmm7, %xmm2
movdqa %xmm7, %xmm3
movdqa %xmm7, %xmm0
pclmulqdq $0x11, %xmm12, %xmm3
pclmulqdq $0x00, %xmm12, %xmm0
pxor %xmm12, %xmm1
pxor %xmm7, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
pxor %xmm0, %xmm4
pxor %xmm3, %xmm6
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm4
pxor %xmm1, %xmm6
movdqu 32(%rsp), %xmm7
pshufd $0x4e, %xmm13, %xmm1
pshufd $0x4e, %xmm7, %xmm2
movdqa %xmm7, %xmm3
movdqa %xmm7, %xmm0
pclmulqdq $0x11, %xmm13, %xmm3
pclmulqdq $0x00, %xmm13, %xmm0
pxor %xmm13, %xmm1
pxor %xmm7, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
pxor %xmm0, %xmm4
pxor %xmm3, %xmm6
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm4
pxor %xmm1, %xmm6
movdqu 16(%rsp), %xmm7
pshufd $0x4e, %xmm14, %xmm1
pshufd $0x4e, %xmm7, %xmm2
movdqa %xmm7, %xmm3
movdqa %xmm7, %xmm0
pclmulqdq $0x11, %xmm14, %xmm3
pclmulqdq $0x00, %xmm14, %xmm0
pxor %xmm14, %xmm1
pxor %xmm7, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
pxor %xmm0, %xmm4
pxor %xmm3, %xmm6
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm4
pxor %xmm1, %xmm6
movdqu (%rsp), %xmm7
pshufd $0x4e, %xmm15, %xmm1
pshufd $0x4e, %xmm7, %xmm2
movdqa %xmm7, %xmm3
movdqa %xmm7, %xmm0
pclmulqdq $0x11, %xmm15, %xmm3
pclmulqdq $0x00, %xmm15, %xmm0
pxor %xmm15, %xmm1
pxor %xmm7, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
pxor %xmm0, %xmm4
pxor %xmm3, %xmm6
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm4
pxor %xmm1, %xmm6
movdqa %xmm4, %xmm0
movdqa %xmm4, %xmm1
movdqa %xmm4, %xmm2
pslld $31, %xmm0
pslld $30, %xmm1
pslld $25, %xmm2
pxor %xmm1, %xmm0
pxor %xmm2, %xmm0
movdqa %xmm0, %xmm1
psrldq $4, %xmm1
pslldq $12, %xmm0
pxor %xmm0, %xmm4
movdqa %xmm4, %xmm2
movdqa %xmm4, %xmm3
movdqa %xmm4, %xmm0
psrld $0x01, %xmm2
psrld $2, %xmm3
psrld $7, %xmm0
pxor %xmm3, %xmm2
pxor %xmm0, %xmm2
pxor %xmm1, %xmm2
pxor %xmm4, %xmm2
pxor %xmm2, %xmm6
movdqu (%rsp), %xmm5
L_AES_GCM_encrypt_update_aesni_done_128:
movl %r8d, %edx
cmpl %edx, %r14d
jge L_AES_GCM_encrypt_update_aesni_done_enc
movl %r8d, %r13d
andl $0xfffffff0, %r13d
cmpl %r13d, %r14d
jge L_AES_GCM_encrypt_update_aesni_last_block_done
leaq (%r11,%r14,1), %rcx
leaq (%r10,%r14,1), %rdx
movdqu (%r12), %xmm8
movdqa %xmm8, %xmm9
pshufb L_aes_gcm_bswap_epi64(%rip), %xmm8
paddd L_aes_gcm_one(%rip), %xmm9
pxor (%rdi), %xmm8
movdqu %xmm9, (%r12)
aesenc 16(%rdi), %xmm8
aesenc 32(%rdi), %xmm8
aesenc 48(%rdi), %xmm8
aesenc 64(%rdi), %xmm8
aesenc 80(%rdi), %xmm8
aesenc 96(%rdi), %xmm8
aesenc 112(%rdi), %xmm8
aesenc 128(%rdi), %xmm8
aesenc 144(%rdi), %xmm8
cmpl $11, %esi
movdqa 160(%rdi), %xmm9
jl L_AES_GCM_encrypt_update_aesni_aesenc_block_aesenc_avx_last
aesenc %xmm9, %xmm8
aesenc 176(%rdi), %xmm8
cmpl $13, %esi
movdqa 192(%rdi), %xmm9
jl L_AES_GCM_encrypt_update_aesni_aesenc_block_aesenc_avx_last
aesenc %xmm9, %xmm8
aesenc 208(%rdi), %xmm8
movdqa 224(%rdi), %xmm9
L_AES_GCM_encrypt_update_aesni_aesenc_block_aesenc_avx_last:
aesenclast %xmm9, %xmm8
movdqu (%rcx), %xmm9
pxor %xmm9, %xmm8
movdqu %xmm8, (%rdx)
pshufb L_aes_gcm_bswap_mask(%rip), %xmm8
pxor %xmm8, %xmm6
addl $16, %r14d
cmpl %r13d, %r14d
jge L_AES_GCM_encrypt_update_aesni_last_block_ghash
L_AES_GCM_encrypt_update_aesni_last_block_start:
leaq (%r11,%r14,1), %rcx
leaq (%r10,%r14,1), %rdx
movdqu (%r12), %xmm8
movdqa %xmm8, %xmm9
pshufb L_aes_gcm_bswap_epi64(%rip), %xmm8
paddd L_aes_gcm_one(%rip), %xmm9
pxor (%rdi), %xmm8
movdqu %xmm9, (%r12)
movdqa %xmm6, %xmm10
pclmulqdq $16, %xmm5, %xmm10
aesenc 16(%rdi), %xmm8
aesenc 32(%rdi), %xmm8
movdqa %xmm6, %xmm11
pclmulqdq $0x01, %xmm5, %xmm11
aesenc 48(%rdi), %xmm8
aesenc 64(%rdi), %xmm8
movdqa %xmm6, %xmm12
pclmulqdq $0x00, %xmm5, %xmm12
aesenc 80(%rdi), %xmm8
movdqa %xmm6, %xmm1
pclmulqdq $0x11, %xmm5, %xmm1
aesenc 96(%rdi), %xmm8
pxor %xmm11, %xmm10
movdqa %xmm10, %xmm2
psrldq $8, %xmm10
pslldq $8, %xmm2
aesenc 112(%rdi), %xmm8
movdqa %xmm1, %xmm3
pxor %xmm12, %xmm2
pxor %xmm10, %xmm3
movdqa L_aes_gcm_mod2_128(%rip), %xmm0
movdqa %xmm2, %xmm11
pclmulqdq $16, %xmm0, %xmm11
aesenc 128(%rdi), %xmm8
pshufd $0x4e, %xmm2, %xmm10
pxor %xmm11, %xmm10
movdqa %xmm10, %xmm11
pclmulqdq $16, %xmm0, %xmm11
aesenc 144(%rdi), %xmm8
pshufd $0x4e, %xmm10, %xmm6
pxor %xmm11, %xmm6
pxor %xmm3, %xmm6
cmpl $11, %esi
movdqa 160(%rdi), %xmm9
jl L_AES_GCM_encrypt_update_aesni_aesenc_gfmul_last
aesenc %xmm9, %xmm8
aesenc 176(%rdi), %xmm8
cmpl $13, %esi
movdqa 192(%rdi), %xmm9
jl L_AES_GCM_encrypt_update_aesni_aesenc_gfmul_last
aesenc %xmm9, %xmm8
aesenc 208(%rdi), %xmm8
movdqa 224(%rdi), %xmm9
L_AES_GCM_encrypt_update_aesni_aesenc_gfmul_last:
aesenclast %xmm9, %xmm8
movdqu (%rcx), %xmm9
pxor %xmm9, %xmm8
movdqu %xmm8, (%rdx)
pshufb L_aes_gcm_bswap_mask(%rip), %xmm8
pxor %xmm8, %xmm6
addl $16, %r14d
cmpl %r13d, %r14d
jl L_AES_GCM_encrypt_update_aesni_last_block_start
L_AES_GCM_encrypt_update_aesni_last_block_ghash:
pshufd $0x4e, %xmm5, %xmm9
pshufd $0x4e, %xmm6, %xmm10
movdqa %xmm6, %xmm11
movdqa %xmm6, %xmm8
pclmulqdq $0x11, %xmm5, %xmm11
pclmulqdq $0x00, %xmm5, %xmm8
pxor %xmm5, %xmm9
pxor %xmm6, %xmm10
pclmulqdq $0x00, %xmm10, %xmm9
pxor %xmm8, %xmm9
pxor %xmm11, %xmm9
movdqa %xmm9, %xmm10
movdqa %xmm11, %xmm6
pslldq $8, %xmm10
psrldq $8, %xmm9
pxor %xmm10, %xmm8
pxor %xmm9, %xmm6
movdqa %xmm8, %xmm12
movdqa %xmm8, %xmm13
movdqa %xmm8, %xmm14
pslld $31, %xmm12
pslld $30, %xmm13
pslld $25, %xmm14
pxor %xmm13, %xmm12
pxor %xmm14, %xmm12
movdqa %xmm12, %xmm13
psrldq $4, %xmm13
pslldq $12, %xmm12
pxor %xmm12, %xmm8
movdqa %xmm8, %xmm14
movdqa %xmm8, %xmm10
movdqa %xmm8, %xmm9
psrld $0x01, %xmm14
psrld $2, %xmm10
psrld $7, %xmm9
pxor %xmm10, %xmm14
pxor %xmm9, %xmm14
pxor %xmm13, %xmm14
pxor %xmm8, %xmm14
pxor %xmm14, %xmm6
L_AES_GCM_encrypt_update_aesni_last_block_done:
L_AES_GCM_encrypt_update_aesni_done_enc:
movdqa %xmm6, (%r9)
addq $0xa0, %rsp
popq %r14
popq %r12
popq %r13
repz retq
#ifndef __APPLE__
.size AES_GCM_encrypt_update_aesni,.-AES_GCM_encrypt_update_aesni
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl AES_GCM_encrypt_final_aesni
.type AES_GCM_encrypt_final_aesni,@function
.align 16
AES_GCM_encrypt_final_aesni:
#else
.section __TEXT,__text
.globl _AES_GCM_encrypt_final_aesni
.p2align 4
_AES_GCM_encrypt_final_aesni:
#endif /* __APPLE__ */
pushq %r13
movl %edx, %eax
movl %ecx, %r10d
movl %r8d, %r11d
movq 16(%rsp), %r8
subq $16, %rsp
movdqa (%rdi), %xmm4
movdqa (%r9), %xmm5
movdqa (%r8), %xmm6
movdqa %xmm5, %xmm8
movdqa %xmm5, %xmm7
psrlq $63, %xmm8
psllq $0x01, %xmm7
pslldq $8, %xmm8
por %xmm8, %xmm7
pshufd $0xff, %xmm5, %xmm5
psrad $31, %xmm5
pand L_aes_gcm_mod2_128(%rip), %xmm5
pxor %xmm7, %xmm5
movl %r10d, %edx
movl %r11d, %ecx
shlq $3, %rdx
shlq $3, %rcx
pinsrq $0x00, %rdx, %xmm0
pinsrq $0x01, %rcx, %xmm0
pxor %xmm0, %xmm4
pshufd $0x4e, %xmm5, %xmm8
pshufd $0x4e, %xmm4, %xmm9
movdqa %xmm4, %xmm10
movdqa %xmm4, %xmm7
pclmulqdq $0x11, %xmm5, %xmm10
pclmulqdq $0x00, %xmm5, %xmm7
pxor %xmm5, %xmm8
pxor %xmm4, %xmm9
pclmulqdq $0x00, %xmm9, %xmm8
pxor %xmm7, %xmm8
pxor %xmm10, %xmm8
movdqa %xmm8, %xmm9
movdqa %xmm10, %xmm4
pslldq $8, %xmm9
psrldq $8, %xmm8
pxor %xmm9, %xmm7
pxor %xmm8, %xmm4
movdqa %xmm7, %xmm11
movdqa %xmm7, %xmm12
movdqa %xmm7, %xmm13
pslld $31, %xmm11
pslld $30, %xmm12
pslld $25, %xmm13
pxor %xmm12, %xmm11
pxor %xmm13, %xmm11
movdqa %xmm11, %xmm12
psrldq $4, %xmm12
pslldq $12, %xmm11
pxor %xmm11, %xmm7
movdqa %xmm7, %xmm13
movdqa %xmm7, %xmm9
movdqa %xmm7, %xmm8
psrld $0x01, %xmm13
psrld $2, %xmm9
psrld $7, %xmm8
pxor %xmm9, %xmm13
pxor %xmm8, %xmm13
pxor %xmm12, %xmm13
pxor %xmm7, %xmm13
pxor %xmm13, %xmm4
pshufb L_aes_gcm_bswap_mask(%rip), %xmm4
movdqu %xmm6, %xmm0
pxor %xmm4, %xmm0
cmpl $16, %eax
je L_AES_GCM_encrypt_final_aesni_store_tag_16
xorq %rcx, %rcx
movdqu %xmm0, (%rsp)
L_AES_GCM_encrypt_final_aesni_store_tag_loop:
movzbl (%rsp,%rcx,1), %r13d
movb %r13b, (%rsi,%rcx,1)
incl %ecx
cmpl %eax, %ecx
jne L_AES_GCM_encrypt_final_aesni_store_tag_loop
jmp L_AES_GCM_encrypt_final_aesni_store_tag_done
L_AES_GCM_encrypt_final_aesni_store_tag_16:
movdqu %xmm0, (%rsi)
L_AES_GCM_encrypt_final_aesni_store_tag_done:
addq $16, %rsp
popq %r13
repz retq
#ifndef __APPLE__
.size AES_GCM_encrypt_final_aesni,.-AES_GCM_encrypt_final_aesni
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl AES_GCM_decrypt_update_aesni
.type AES_GCM_decrypt_update_aesni,@function
.align 16
AES_GCM_decrypt_update_aesni:
#else
.section __TEXT,__text
.globl _AES_GCM_decrypt_update_aesni
.p2align 4
_AES_GCM_decrypt_update_aesni:
#endif /* __APPLE__ */
pushq %r13
pushq %r12
pushq %r14
pushq %r15
movq %rdx, %r10
movq %rcx, %r11
movq 40(%rsp), %rax
movq 48(%rsp), %r12
subq $0xa8, %rsp
movdqa (%r9), %xmm6
movdqa (%rax), %xmm5
movdqa %xmm5, %xmm9
movdqa %xmm5, %xmm8
psrlq $63, %xmm9
psllq $0x01, %xmm8
pslldq $8, %xmm9
por %xmm9, %xmm8
pshufd $0xff, %xmm5, %xmm5
psrad $31, %xmm5
pand L_aes_gcm_mod2_128(%rip), %xmm5
pxor %xmm8, %xmm5
xorl %r14d, %r14d
cmpl $0x80, %r8d
movl %r8d, %r13d
jl L_AES_GCM_decrypt_update_aesni_done_128
andl $0xffffff80, %r13d
movdqa %xmm6, %xmm2
# H ^ 1
movdqu %xmm5, (%rsp)
# H ^ 2
pshufd $0x4e, %xmm5, %xmm9
pshufd $0x4e, %xmm5, %xmm10
movdqa %xmm5, %xmm11
movdqa %xmm5, %xmm8
pclmulqdq $0x11, %xmm5, %xmm11
pclmulqdq $0x00, %xmm5, %xmm8
pxor %xmm5, %xmm9
pxor %xmm5, %xmm10
pclmulqdq $0x00, %xmm10, %xmm9
pxor %xmm8, %xmm9
pxor %xmm11, %xmm9
movdqa %xmm9, %xmm10
movdqa %xmm11, %xmm0
pslldq $8, %xmm10
psrldq $8, %xmm9
pxor %xmm10, %xmm8
pxor %xmm9, %xmm0
movdqa %xmm8, %xmm12
movdqa %xmm8, %xmm13
movdqa %xmm8, %xmm14
pslld $31, %xmm12
pslld $30, %xmm13
pslld $25, %xmm14
pxor %xmm13, %xmm12
pxor %xmm14, %xmm12
movdqa %xmm12, %xmm13
psrldq $4, %xmm13
pslldq $12, %xmm12
pxor %xmm12, %xmm8
movdqa %xmm8, %xmm14
movdqa %xmm8, %xmm10
movdqa %xmm8, %xmm9
psrld $0x01, %xmm14
psrld $2, %xmm10
psrld $7, %xmm9
pxor %xmm10, %xmm14
pxor %xmm9, %xmm14
pxor %xmm13, %xmm14
pxor %xmm8, %xmm14
pxor %xmm14, %xmm0
movdqu %xmm0, 16(%rsp)
# H ^ 3
pshufd $0x4e, %xmm5, %xmm9
pshufd $0x4e, %xmm0, %xmm10
movdqa %xmm0, %xmm11
movdqa %xmm0, %xmm8
pclmulqdq $0x11, %xmm5, %xmm11
pclmulqdq $0x00, %xmm5, %xmm8
pxor %xmm5, %xmm9
pxor %xmm0, %xmm10
pclmulqdq $0x00, %xmm10, %xmm9
pxor %xmm8, %xmm9
pxor %xmm11, %xmm9
movdqa %xmm9, %xmm10
movdqa %xmm11, %xmm1
pslldq $8, %xmm10
psrldq $8, %xmm9
pxor %xmm10, %xmm8
pxor %xmm9, %xmm1
movdqa %xmm8, %xmm12
movdqa %xmm8, %xmm13
movdqa %xmm8, %xmm14
pslld $31, %xmm12
pslld $30, %xmm13
pslld $25, %xmm14
pxor %xmm13, %xmm12
pxor %xmm14, %xmm12
movdqa %xmm12, %xmm13
psrldq $4, %xmm13
pslldq $12, %xmm12
pxor %xmm12, %xmm8
movdqa %xmm8, %xmm14
movdqa %xmm8, %xmm10
movdqa %xmm8, %xmm9
psrld $0x01, %xmm14
psrld $2, %xmm10
psrld $7, %xmm9
pxor %xmm10, %xmm14
pxor %xmm9, %xmm14
pxor %xmm13, %xmm14
pxor %xmm8, %xmm14
pxor %xmm14, %xmm1
movdqu %xmm1, 32(%rsp)
# H ^ 4
pshufd $0x4e, %xmm0, %xmm9
pshufd $0x4e, %xmm0, %xmm10
movdqa %xmm0, %xmm11
movdqa %xmm0, %xmm8
pclmulqdq $0x11, %xmm0, %xmm11
pclmulqdq $0x00, %xmm0, %xmm8
pxor %xmm0, %xmm9
pxor %xmm0, %xmm10
pclmulqdq $0x00, %xmm10, %xmm9
pxor %xmm8, %xmm9
pxor %xmm11, %xmm9
movdqa %xmm9, %xmm10
movdqa %xmm11, %xmm3
pslldq $8, %xmm10
psrldq $8, %xmm9
pxor %xmm10, %xmm8
pxor %xmm9, %xmm3
movdqa %xmm8, %xmm12
movdqa %xmm8, %xmm13
movdqa %xmm8, %xmm14
pslld $31, %xmm12
pslld $30, %xmm13
pslld $25, %xmm14
pxor %xmm13, %xmm12
pxor %xmm14, %xmm12
movdqa %xmm12, %xmm13
psrldq $4, %xmm13
pslldq $12, %xmm12
pxor %xmm12, %xmm8
movdqa %xmm8, %xmm14
movdqa %xmm8, %xmm10
movdqa %xmm8, %xmm9
psrld $0x01, %xmm14
psrld $2, %xmm10
psrld $7, %xmm9
pxor %xmm10, %xmm14
pxor %xmm9, %xmm14
pxor %xmm13, %xmm14
pxor %xmm8, %xmm14
pxor %xmm14, %xmm3
movdqu %xmm3, 48(%rsp)
# H ^ 5
pshufd $0x4e, %xmm0, %xmm9
pshufd $0x4e, %xmm1, %xmm10
movdqa %xmm1, %xmm11
movdqa %xmm1, %xmm8
pclmulqdq $0x11, %xmm0, %xmm11
pclmulqdq $0x00, %xmm0, %xmm8
pxor %xmm0, %xmm9
pxor %xmm1, %xmm10
pclmulqdq $0x00, %xmm10, %xmm9
pxor %xmm8, %xmm9
pxor %xmm11, %xmm9
movdqa %xmm9, %xmm10
movdqa %xmm11, %xmm7
pslldq $8, %xmm10
psrldq $8, %xmm9
pxor %xmm10, %xmm8
pxor %xmm9, %xmm7
movdqa %xmm8, %xmm12
movdqa %xmm8, %xmm13
movdqa %xmm8, %xmm14
pslld $31, %xmm12
pslld $30, %xmm13
pslld $25, %xmm14
pxor %xmm13, %xmm12
pxor %xmm14, %xmm12
movdqa %xmm12, %xmm13
psrldq $4, %xmm13
pslldq $12, %xmm12
pxor %xmm12, %xmm8
movdqa %xmm8, %xmm14
movdqa %xmm8, %xmm10
movdqa %xmm8, %xmm9
psrld $0x01, %xmm14
psrld $2, %xmm10
psrld $7, %xmm9
pxor %xmm10, %xmm14
pxor %xmm9, %xmm14
pxor %xmm13, %xmm14
pxor %xmm8, %xmm14
pxor %xmm14, %xmm7
movdqu %xmm7, 64(%rsp)
# H ^ 6
pshufd $0x4e, %xmm1, %xmm9
pshufd $0x4e, %xmm1, %xmm10
movdqa %xmm1, %xmm11
movdqa %xmm1, %xmm8
pclmulqdq $0x11, %xmm1, %xmm11
pclmulqdq $0x00, %xmm1, %xmm8
pxor %xmm1, %xmm9
pxor %xmm1, %xmm10
pclmulqdq $0x00, %xmm10, %xmm9
pxor %xmm8, %xmm9
pxor %xmm11, %xmm9
movdqa %xmm9, %xmm10
movdqa %xmm11, %xmm7
pslldq $8, %xmm10
psrldq $8, %xmm9
pxor %xmm10, %xmm8
pxor %xmm9, %xmm7
movdqa %xmm8, %xmm12
movdqa %xmm8, %xmm13
movdqa %xmm8, %xmm14
pslld $31, %xmm12
pslld $30, %xmm13
pslld $25, %xmm14
pxor %xmm13, %xmm12
pxor %xmm14, %xmm12
movdqa %xmm12, %xmm13
psrldq $4, %xmm13
pslldq $12, %xmm12
pxor %xmm12, %xmm8
movdqa %xmm8, %xmm14
movdqa %xmm8, %xmm10
movdqa %xmm8, %xmm9
psrld $0x01, %xmm14
psrld $2, %xmm10
psrld $7, %xmm9
pxor %xmm10, %xmm14
pxor %xmm9, %xmm14
pxor %xmm13, %xmm14
pxor %xmm8, %xmm14
pxor %xmm14, %xmm7
movdqu %xmm7, 80(%rsp)
# H ^ 7
pshufd $0x4e, %xmm1, %xmm9
pshufd $0x4e, %xmm3, %xmm10
movdqa %xmm3, %xmm11
movdqa %xmm3, %xmm8
pclmulqdq $0x11, %xmm1, %xmm11
pclmulqdq $0x00, %xmm1, %xmm8
pxor %xmm1, %xmm9
pxor %xmm3, %xmm10
pclmulqdq $0x00, %xmm10, %xmm9
pxor %xmm8, %xmm9
pxor %xmm11, %xmm9
movdqa %xmm9, %xmm10
movdqa %xmm11, %xmm7
pslldq $8, %xmm10
psrldq $8, %xmm9
pxor %xmm10, %xmm8
pxor %xmm9, %xmm7
movdqa %xmm8, %xmm12
movdqa %xmm8, %xmm13
movdqa %xmm8, %xmm14
pslld $31, %xmm12
pslld $30, %xmm13
pslld $25, %xmm14
pxor %xmm13, %xmm12
pxor %xmm14, %xmm12
movdqa %xmm12, %xmm13
psrldq $4, %xmm13
pslldq $12, %xmm12
pxor %xmm12, %xmm8
movdqa %xmm8, %xmm14
movdqa %xmm8, %xmm10
movdqa %xmm8, %xmm9
psrld $0x01, %xmm14
psrld $2, %xmm10
psrld $7, %xmm9
pxor %xmm10, %xmm14
pxor %xmm9, %xmm14
pxor %xmm13, %xmm14
pxor %xmm8, %xmm14
pxor %xmm14, %xmm7
movdqu %xmm7, 96(%rsp)
# H ^ 8
pshufd $0x4e, %xmm3, %xmm9
pshufd $0x4e, %xmm3, %xmm10
movdqa %xmm3, %xmm11
movdqa %xmm3, %xmm8
pclmulqdq $0x11, %xmm3, %xmm11
pclmulqdq $0x00, %xmm3, %xmm8
pxor %xmm3, %xmm9
pxor %xmm3, %xmm10
pclmulqdq $0x00, %xmm10, %xmm9
pxor %xmm8, %xmm9
pxor %xmm11, %xmm9
movdqa %xmm9, %xmm10
movdqa %xmm11, %xmm7
pslldq $8, %xmm10
psrldq $8, %xmm9
pxor %xmm10, %xmm8
pxor %xmm9, %xmm7
movdqa %xmm8, %xmm12
movdqa %xmm8, %xmm13
movdqa %xmm8, %xmm14
pslld $31, %xmm12
pslld $30, %xmm13
pslld $25, %xmm14
pxor %xmm13, %xmm12
pxor %xmm14, %xmm12
movdqa %xmm12, %xmm13
psrldq $4, %xmm13
pslldq $12, %xmm12
pxor %xmm12, %xmm8
movdqa %xmm8, %xmm14
movdqa %xmm8, %xmm10
movdqa %xmm8, %xmm9
psrld $0x01, %xmm14
psrld $2, %xmm10
psrld $7, %xmm9
pxor %xmm10, %xmm14
pxor %xmm9, %xmm14
pxor %xmm13, %xmm14
pxor %xmm8, %xmm14
pxor %xmm14, %xmm7
movdqu %xmm7, 112(%rsp)
L_AES_GCM_decrypt_update_aesni_ghash_128:
leaq (%r11,%r14,1), %rcx
leaq (%r10,%r14,1), %rdx
movdqu (%r12), %xmm8
movdqa L_aes_gcm_bswap_epi64(%rip), %xmm1
movdqa %xmm8, %xmm0
pshufb %xmm1, %xmm8
movdqa %xmm0, %xmm9
paddd L_aes_gcm_one(%rip), %xmm9
pshufb %xmm1, %xmm9
movdqa %xmm0, %xmm10
paddd L_aes_gcm_two(%rip), %xmm10
pshufb %xmm1, %xmm10
movdqa %xmm0, %xmm11
paddd L_aes_gcm_three(%rip), %xmm11
pshufb %xmm1, %xmm11
movdqa %xmm0, %xmm12
paddd L_aes_gcm_four(%rip), %xmm12
pshufb %xmm1, %xmm12
movdqa %xmm0, %xmm13
paddd L_aes_gcm_five(%rip), %xmm13
pshufb %xmm1, %xmm13
movdqa %xmm0, %xmm14
paddd L_aes_gcm_six(%rip), %xmm14
pshufb %xmm1, %xmm14
movdqa %xmm0, %xmm15
paddd L_aes_gcm_seven(%rip), %xmm15
pshufb %xmm1, %xmm15
paddd L_aes_gcm_eight(%rip), %xmm0
movdqa (%rdi), %xmm7
movdqu %xmm0, (%r12)
pxor %xmm7, %xmm8
pxor %xmm7, %xmm9
pxor %xmm7, %xmm10
pxor %xmm7, %xmm11
pxor %xmm7, %xmm12
pxor %xmm7, %xmm13
pxor %xmm7, %xmm14
pxor %xmm7, %xmm15
movdqu 112(%rsp), %xmm7
movdqu (%rcx), %xmm0
aesenc 16(%rdi), %xmm8
pshufb L_aes_gcm_bswap_mask(%rip), %xmm0
pxor %xmm2, %xmm0
pshufd $0x4e, %xmm7, %xmm1
pshufd $0x4e, %xmm0, %xmm5
pxor %xmm7, %xmm1
pxor %xmm0, %xmm5
movdqa %xmm0, %xmm3
pclmulqdq $0x11, %xmm7, %xmm3
aesenc 16(%rdi), %xmm9
aesenc 16(%rdi), %xmm10
movdqa %xmm0, %xmm2
pclmulqdq $0x00, %xmm7, %xmm2
aesenc 16(%rdi), %xmm11
aesenc 16(%rdi), %xmm12
pclmulqdq $0x00, %xmm5, %xmm1
aesenc 16(%rdi), %xmm13
aesenc 16(%rdi), %xmm14
aesenc 16(%rdi), %xmm15
pxor %xmm2, %xmm1
pxor %xmm3, %xmm1
movdqu 96(%rsp), %xmm7
movdqu 16(%rcx), %xmm0
pshufd $0x4e, %xmm7, %xmm4
pshufb L_aes_gcm_bswap_mask(%rip), %xmm0
aesenc 32(%rdi), %xmm8
pxor %xmm7, %xmm4
pshufd $0x4e, %xmm0, %xmm5
pxor %xmm0, %xmm5
movdqa %xmm0, %xmm6
pclmulqdq $0x11, %xmm7, %xmm6
aesenc 32(%rdi), %xmm9
aesenc 32(%rdi), %xmm10
pclmulqdq $0x00, %xmm0, %xmm7
aesenc 32(%rdi), %xmm11
aesenc 32(%rdi), %xmm12
pclmulqdq $0x00, %xmm5, %xmm4
aesenc 32(%rdi), %xmm13
aesenc 32(%rdi), %xmm14
aesenc 32(%rdi), %xmm15
pxor %xmm7, %xmm1
pxor %xmm7, %xmm2
pxor %xmm6, %xmm1
pxor %xmm6, %xmm3
pxor %xmm4, %xmm1
movdqu 80(%rsp), %xmm7
movdqu 32(%rcx), %xmm0
pshufd $0x4e, %xmm7, %xmm4
pshufb L_aes_gcm_bswap_mask(%rip), %xmm0
aesenc 48(%rdi), %xmm8
pxor %xmm7, %xmm4
pshufd $0x4e, %xmm0, %xmm5
pxor %xmm0, %xmm5
movdqa %xmm0, %xmm6
pclmulqdq $0x11, %xmm7, %xmm6
aesenc 48(%rdi), %xmm9
aesenc 48(%rdi), %xmm10
pclmulqdq $0x00, %xmm0, %xmm7
aesenc 48(%rdi), %xmm11
aesenc 48(%rdi), %xmm12
pclmulqdq $0x00, %xmm5, %xmm4
aesenc 48(%rdi), %xmm13
aesenc 48(%rdi), %xmm14
aesenc 48(%rdi), %xmm15
pxor %xmm7, %xmm1
pxor %xmm7, %xmm2
pxor %xmm6, %xmm1
pxor %xmm6, %xmm3
pxor %xmm4, %xmm1
movdqu 64(%rsp), %xmm7
movdqu 48(%rcx), %xmm0
pshufd $0x4e, %xmm7, %xmm4
pshufb L_aes_gcm_bswap_mask(%rip), %xmm0
aesenc 64(%rdi), %xmm8
pxor %xmm7, %xmm4
pshufd $0x4e, %xmm0, %xmm5
pxor %xmm0, %xmm5
movdqa %xmm0, %xmm6
pclmulqdq $0x11, %xmm7, %xmm6
aesenc 64(%rdi), %xmm9
aesenc 64(%rdi), %xmm10
pclmulqdq $0x00, %xmm0, %xmm7
aesenc 64(%rdi), %xmm11
aesenc 64(%rdi), %xmm12
pclmulqdq $0x00, %xmm5, %xmm4
aesenc 64(%rdi), %xmm13
aesenc 64(%rdi), %xmm14
aesenc 64(%rdi), %xmm15
pxor %xmm7, %xmm1
pxor %xmm7, %xmm2
pxor %xmm6, %xmm1
pxor %xmm6, %xmm3
pxor %xmm4, %xmm1
movdqu 48(%rsp), %xmm7
movdqu 64(%rcx), %xmm0
pshufd $0x4e, %xmm7, %xmm4
pshufb L_aes_gcm_bswap_mask(%rip), %xmm0
aesenc 80(%rdi), %xmm8
pxor %xmm7, %xmm4
pshufd $0x4e, %xmm0, %xmm5
pxor %xmm0, %xmm5
movdqa %xmm0, %xmm6
pclmulqdq $0x11, %xmm7, %xmm6
aesenc 80(%rdi), %xmm9
aesenc 80(%rdi), %xmm10
pclmulqdq $0x00, %xmm0, %xmm7
aesenc 80(%rdi), %xmm11
aesenc 80(%rdi), %xmm12
pclmulqdq $0x00, %xmm5, %xmm4
aesenc 80(%rdi), %xmm13
aesenc 80(%rdi), %xmm14
aesenc 80(%rdi), %xmm15
pxor %xmm7, %xmm1
pxor %xmm7, %xmm2
pxor %xmm6, %xmm1
pxor %xmm6, %xmm3
pxor %xmm4, %xmm1
movdqu 32(%rsp), %xmm7
movdqu 80(%rcx), %xmm0
pshufd $0x4e, %xmm7, %xmm4
pshufb L_aes_gcm_bswap_mask(%rip), %xmm0
aesenc 96(%rdi), %xmm8
pxor %xmm7, %xmm4
pshufd $0x4e, %xmm0, %xmm5
pxor %xmm0, %xmm5
movdqa %xmm0, %xmm6
pclmulqdq $0x11, %xmm7, %xmm6
aesenc 96(%rdi), %xmm9
aesenc 96(%rdi), %xmm10
pclmulqdq $0x00, %xmm0, %xmm7
aesenc 96(%rdi), %xmm11
aesenc 96(%rdi), %xmm12
pclmulqdq $0x00, %xmm5, %xmm4
aesenc 96(%rdi), %xmm13
aesenc 96(%rdi), %xmm14
aesenc 96(%rdi), %xmm15
pxor %xmm7, %xmm1
pxor %xmm7, %xmm2
pxor %xmm6, %xmm1
pxor %xmm6, %xmm3
pxor %xmm4, %xmm1
movdqu 16(%rsp), %xmm7
movdqu 96(%rcx), %xmm0
pshufd $0x4e, %xmm7, %xmm4
pshufb L_aes_gcm_bswap_mask(%rip), %xmm0
aesenc 112(%rdi), %xmm8
pxor %xmm7, %xmm4
pshufd $0x4e, %xmm0, %xmm5
pxor %xmm0, %xmm5
movdqa %xmm0, %xmm6
pclmulqdq $0x11, %xmm7, %xmm6
aesenc 112(%rdi), %xmm9
aesenc 112(%rdi), %xmm10
pclmulqdq $0x00, %xmm0, %xmm7
aesenc 112(%rdi), %xmm11
aesenc 112(%rdi), %xmm12
pclmulqdq $0x00, %xmm5, %xmm4
aesenc 112(%rdi), %xmm13
aesenc 112(%rdi), %xmm14
aesenc 112(%rdi), %xmm15
pxor %xmm7, %xmm1
pxor %xmm7, %xmm2
pxor %xmm6, %xmm1
pxor %xmm6, %xmm3
pxor %xmm4, %xmm1
movdqu (%rsp), %xmm7
movdqu 112(%rcx), %xmm0
pshufd $0x4e, %xmm7, %xmm4
pshufb L_aes_gcm_bswap_mask(%rip), %xmm0
aesenc 128(%rdi), %xmm8
pxor %xmm7, %xmm4
pshufd $0x4e, %xmm0, %xmm5
pxor %xmm0, %xmm5
movdqa %xmm0, %xmm6
pclmulqdq $0x11, %xmm7, %xmm6
aesenc 128(%rdi), %xmm9
aesenc 128(%rdi), %xmm10
pclmulqdq $0x00, %xmm0, %xmm7
aesenc 128(%rdi), %xmm11
aesenc 128(%rdi), %xmm12
pclmulqdq $0x00, %xmm5, %xmm4
aesenc 128(%rdi), %xmm13
aesenc 128(%rdi), %xmm14
aesenc 128(%rdi), %xmm15
pxor %xmm7, %xmm1
pxor %xmm7, %xmm2
pxor %xmm6, %xmm1
pxor %xmm6, %xmm3
pxor %xmm4, %xmm1
movdqa %xmm1, %xmm5
psrldq $8, %xmm1
pslldq $8, %xmm5
aesenc 144(%rdi), %xmm8
pxor %xmm5, %xmm2
pxor %xmm1, %xmm3
movdqa %xmm2, %xmm7
movdqa %xmm2, %xmm4
movdqa %xmm2, %xmm5
aesenc 144(%rdi), %xmm9
pslld $31, %xmm7
pslld $30, %xmm4
pslld $25, %xmm5
aesenc 144(%rdi), %xmm10
pxor %xmm4, %xmm7
pxor %xmm5, %xmm7
aesenc 144(%rdi), %xmm11
movdqa %xmm7, %xmm4
pslldq $12, %xmm7
psrldq $4, %xmm4
aesenc 144(%rdi), %xmm12
pxor %xmm7, %xmm2
movdqa %xmm2, %xmm5
movdqa %xmm2, %xmm1
movdqa %xmm2, %xmm0
aesenc 144(%rdi), %xmm13
psrld $0x01, %xmm5
psrld $2, %xmm1
psrld $7, %xmm0
aesenc 144(%rdi), %xmm14
pxor %xmm1, %xmm5
pxor %xmm0, %xmm5
aesenc 144(%rdi), %xmm15
pxor %xmm4, %xmm5
pxor %xmm5, %xmm2
pxor %xmm3, %xmm2
cmpl $11, %esi
movdqa 160(%rdi), %xmm7
jl L_AES_GCM_decrypt_update_aesni_aesenc_128_ghash_avx_done
aesenc %xmm7, %xmm8
aesenc %xmm7, %xmm9
aesenc %xmm7, %xmm10
aesenc %xmm7, %xmm11
aesenc %xmm7, %xmm12
aesenc %xmm7, %xmm13
aesenc %xmm7, %xmm14
aesenc %xmm7, %xmm15
movdqa 176(%rdi), %xmm7
aesenc %xmm7, %xmm8
aesenc %xmm7, %xmm9
aesenc %xmm7, %xmm10
aesenc %xmm7, %xmm11
aesenc %xmm7, %xmm12
aesenc %xmm7, %xmm13
aesenc %xmm7, %xmm14
aesenc %xmm7, %xmm15
cmpl $13, %esi
movdqa 192(%rdi), %xmm7
jl L_AES_GCM_decrypt_update_aesni_aesenc_128_ghash_avx_done
aesenc %xmm7, %xmm8
aesenc %xmm7, %xmm9
aesenc %xmm7, %xmm10
aesenc %xmm7, %xmm11
aesenc %xmm7, %xmm12
aesenc %xmm7, %xmm13
aesenc %xmm7, %xmm14
aesenc %xmm7, %xmm15
movdqa 208(%rdi), %xmm7
aesenc %xmm7, %xmm8
aesenc %xmm7, %xmm9
aesenc %xmm7, %xmm10
aesenc %xmm7, %xmm11
aesenc %xmm7, %xmm12
aesenc %xmm7, %xmm13
aesenc %xmm7, %xmm14
aesenc %xmm7, %xmm15
movdqa 224(%rdi), %xmm7
L_AES_GCM_decrypt_update_aesni_aesenc_128_ghash_avx_done:
aesenclast %xmm7, %xmm8
aesenclast %xmm7, %xmm9
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
pxor %xmm0, %xmm8
pxor %xmm1, %xmm9
movdqu %xmm8, (%rdx)
movdqu %xmm9, 16(%rdx)
aesenclast %xmm7, %xmm10
aesenclast %xmm7, %xmm11
movdqu 32(%rcx), %xmm0
movdqu 48(%rcx), %xmm1
pxor %xmm0, %xmm10
pxor %xmm1, %xmm11
movdqu %xmm10, 32(%rdx)
movdqu %xmm11, 48(%rdx)
aesenclast %xmm7, %xmm12
aesenclast %xmm7, %xmm13
movdqu 64(%rcx), %xmm0
movdqu 80(%rcx), %xmm1
pxor %xmm0, %xmm12
pxor %xmm1, %xmm13
movdqu %xmm12, 64(%rdx)
movdqu %xmm13, 80(%rdx)
aesenclast %xmm7, %xmm14
aesenclast %xmm7, %xmm15
movdqu 96(%rcx), %xmm0
movdqu 112(%rcx), %xmm1
pxor %xmm0, %xmm14
pxor %xmm1, %xmm15
movdqu %xmm14, 96(%rdx)
movdqu %xmm15, 112(%rdx)
addl $0x80, %r14d
cmpl %r13d, %r14d
jl L_AES_GCM_decrypt_update_aesni_ghash_128
movdqa %xmm2, %xmm6
movdqu (%rsp), %xmm5
L_AES_GCM_decrypt_update_aesni_done_128:
movl %r8d, %edx
cmpl %edx, %r14d
jge L_AES_GCM_decrypt_update_aesni_done_dec
movl %r8d, %r13d
andl $0xfffffff0, %r13d
cmpl %r13d, %r14d
jge L_AES_GCM_decrypt_update_aesni_last_block_done
L_AES_GCM_decrypt_update_aesni_last_block_start:
leaq (%r11,%r14,1), %rcx
leaq (%r10,%r14,1), %rdx
movdqu (%rcx), %xmm1
movdqa %xmm5, %xmm0
pshufb L_aes_gcm_bswap_mask(%rip), %xmm1
pxor %xmm6, %xmm1
movdqu (%r12), %xmm8
movdqa %xmm8, %xmm9
pshufb L_aes_gcm_bswap_epi64(%rip), %xmm8
paddd L_aes_gcm_one(%rip), %xmm9
pxor (%rdi), %xmm8
movdqu %xmm9, (%r12)
movdqa %xmm1, %xmm10
pclmulqdq $16, %xmm0, %xmm10
aesenc 16(%rdi), %xmm8
aesenc 32(%rdi), %xmm8
movdqa %xmm1, %xmm11
pclmulqdq $0x01, %xmm0, %xmm11
aesenc 48(%rdi), %xmm8
aesenc 64(%rdi), %xmm8
movdqa %xmm1, %xmm12
pclmulqdq $0x00, %xmm0, %xmm12
aesenc 80(%rdi), %xmm8
movdqa %xmm1, %xmm1
pclmulqdq $0x11, %xmm0, %xmm1
aesenc 96(%rdi), %xmm8
pxor %xmm11, %xmm10
movdqa %xmm10, %xmm2
psrldq $8, %xmm10
pslldq $8, %xmm2
aesenc 112(%rdi), %xmm8
movdqa %xmm1, %xmm3
pxor %xmm12, %xmm2
pxor %xmm10, %xmm3
movdqa L_aes_gcm_mod2_128(%rip), %xmm0
movdqa %xmm2, %xmm11
pclmulqdq $16, %xmm0, %xmm11
aesenc 128(%rdi), %xmm8
pshufd $0x4e, %xmm2, %xmm10
pxor %xmm11, %xmm10
movdqa %xmm10, %xmm11
pclmulqdq $16, %xmm0, %xmm11
aesenc 144(%rdi), %xmm8
pshufd $0x4e, %xmm10, %xmm6
pxor %xmm11, %xmm6
pxor %xmm3, %xmm6
cmpl $11, %esi
movdqa 160(%rdi), %xmm9
jl L_AES_GCM_decrypt_update_aesni_aesenc_gfmul_last
aesenc %xmm9, %xmm8
aesenc 176(%rdi), %xmm8
cmpl $13, %esi
movdqa 192(%rdi), %xmm9
jl L_AES_GCM_decrypt_update_aesni_aesenc_gfmul_last
aesenc %xmm9, %xmm8
aesenc 208(%rdi), %xmm8
movdqa 224(%rdi), %xmm9
L_AES_GCM_decrypt_update_aesni_aesenc_gfmul_last:
aesenclast %xmm9, %xmm8
movdqu (%rcx), %xmm9
pxor %xmm9, %xmm8
movdqu %xmm8, (%rdx)
addl $16, %r14d
cmpl %r13d, %r14d
jl L_AES_GCM_decrypt_update_aesni_last_block_start
L_AES_GCM_decrypt_update_aesni_last_block_done:
L_AES_GCM_decrypt_update_aesni_done_dec:
movdqa %xmm6, (%r9)
addq $0xa8, %rsp
popq %r15
popq %r14
popq %r12
popq %r13
repz retq
#ifndef __APPLE__
.size AES_GCM_decrypt_update_aesni,.-AES_GCM_decrypt_update_aesni
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl AES_GCM_decrypt_final_aesni
.type AES_GCM_decrypt_final_aesni,@function
.align 16
AES_GCM_decrypt_final_aesni:
#else
.section __TEXT,__text
.globl _AES_GCM_decrypt_final_aesni
.p2align 4
_AES_GCM_decrypt_final_aesni:
#endif /* __APPLE__ */
pushq %r13
pushq %rbp
pushq %r12
movl %edx, %eax
movl %ecx, %r10d
movl %r8d, %r11d
movq 32(%rsp), %r8
movq 40(%rsp), %rbp
subq $16, %rsp
movdqa (%rdi), %xmm6
movdqa (%r9), %xmm5
movdqa (%r8), %xmm15
movdqa %xmm5, %xmm8
movdqa %xmm5, %xmm7
psrlq $63, %xmm8
psllq $0x01, %xmm7
pslldq $8, %xmm8
por %xmm8, %xmm7
pshufd $0xff, %xmm5, %xmm5
psrad $31, %xmm5
pand L_aes_gcm_mod2_128(%rip), %xmm5
pxor %xmm7, %xmm5
movl %r10d, %edx
movl %r11d, %ecx
shlq $3, %rdx
shlq $3, %rcx
pinsrq $0x00, %rdx, %xmm0
pinsrq $0x01, %rcx, %xmm0
pxor %xmm0, %xmm6
pshufd $0x4e, %xmm5, %xmm8
pshufd $0x4e, %xmm6, %xmm9
movdqa %xmm6, %xmm10
movdqa %xmm6, %xmm7
pclmulqdq $0x11, %xmm5, %xmm10
pclmulqdq $0x00, %xmm5, %xmm7
pxor %xmm5, %xmm8
pxor %xmm6, %xmm9
pclmulqdq $0x00, %xmm9, %xmm8
pxor %xmm7, %xmm8
pxor %xmm10, %xmm8
movdqa %xmm8, %xmm9
movdqa %xmm10, %xmm6
pslldq $8, %xmm9
psrldq $8, %xmm8
pxor %xmm9, %xmm7
pxor %xmm8, %xmm6
movdqa %xmm7, %xmm11
movdqa %xmm7, %xmm12
movdqa %xmm7, %xmm13
pslld $31, %xmm11
pslld $30, %xmm12
pslld $25, %xmm13
pxor %xmm12, %xmm11
pxor %xmm13, %xmm11
movdqa %xmm11, %xmm12
psrldq $4, %xmm12
pslldq $12, %xmm11
pxor %xmm11, %xmm7
movdqa %xmm7, %xmm13
movdqa %xmm7, %xmm9
movdqa %xmm7, %xmm8
psrld $0x01, %xmm13
psrld $2, %xmm9
psrld $7, %xmm8
pxor %xmm9, %xmm13
pxor %xmm8, %xmm13
pxor %xmm12, %xmm13
pxor %xmm7, %xmm13
pxor %xmm13, %xmm6
pshufb L_aes_gcm_bswap_mask(%rip), %xmm6
movdqu %xmm15, %xmm0
pxor %xmm6, %xmm0
cmpl $16, %eax
je L_AES_GCM_decrypt_final_aesni_cmp_tag_16
subq $16, %rsp
xorq %rcx, %rcx
xorq %r12, %r12
movdqu %xmm0, (%rsp)
L_AES_GCM_decrypt_final_aesni_cmp_tag_loop:
movzbl (%rsp,%rcx,1), %r13d
xorb (%rsi,%rcx,1), %r13b
orb %r13b, %r12b
incl %ecx
cmpl %eax, %ecx
jne L_AES_GCM_decrypt_final_aesni_cmp_tag_loop
cmpb $0x00, %r12b
sete %r12b
addq $16, %rsp
xorq %rcx, %rcx
jmp L_AES_GCM_decrypt_final_aesni_cmp_tag_done
L_AES_GCM_decrypt_final_aesni_cmp_tag_16:
movdqu (%rsi), %xmm1
pcmpeqb %xmm1, %xmm0
pmovmskb %xmm0, %rdx
# %%edx == 0xFFFF then return 1 else => return 0
xorl %r12d, %r12d
cmpl $0xffff, %edx
sete %r12b
L_AES_GCM_decrypt_final_aesni_cmp_tag_done:
movl %r12d, (%rbp)
addq $16, %rsp
popq %r12
popq %rbp
popq %r13
repz retq
#ifndef __APPLE__
.size AES_GCM_decrypt_final_aesni,.-AES_GCM_decrypt_final_aesni
#endif /* __APPLE__ */
#endif /* WOLFSSL_AESGCM_STREAM */
#ifdef HAVE_INTEL_AVX1
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_avx1_aes_gcm_one:
.quad 0x0, 0x1
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_avx1_aes_gcm_two:
.quad 0x0, 0x2
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_avx1_aes_gcm_three:
.quad 0x0, 0x3
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_avx1_aes_gcm_four:
.quad 0x0, 0x4
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_avx1_aes_gcm_five:
.quad 0x0, 0x5
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_avx1_aes_gcm_six:
.quad 0x0, 0x6
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_avx1_aes_gcm_seven:
.quad 0x0, 0x7
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_avx1_aes_gcm_eight:
.quad 0x0, 0x8
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_avx1_aes_gcm_bswap_epi64:
.quad 0x1020304050607, 0x8090a0b0c0d0e0f
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_avx1_aes_gcm_bswap_mask:
.quad 0x8090a0b0c0d0e0f, 0x1020304050607
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_avx1_aes_gcm_mod2_128:
.quad 0x1, 0xc200000000000000
#ifndef __APPLE__
.text
.globl AES_GCM_encrypt_avx1
.type AES_GCM_encrypt_avx1,@function
.align 16
AES_GCM_encrypt_avx1:
#else
.section __TEXT,__text
.globl _AES_GCM_encrypt_avx1
.p2align 4
_AES_GCM_encrypt_avx1:
#endif /* __APPLE__ */
pushq %r13
pushq %r12
pushq %rbx
pushq %r14
pushq %r15
movq %rdx, %r12
movq %rcx, %rax
movl 48(%rsp), %r11d
movl 56(%rsp), %ebx
movl 64(%rsp), %r14d
movq 72(%rsp), %r15
movl 80(%rsp), %r10d
subq $0xa0, %rsp
vpxor %xmm4, %xmm4, %xmm4
vpxor %xmm6, %xmm6, %xmm6
movl %ebx, %edx
cmpl $12, %edx
jne L_AES_GCM_encrypt_avx1_iv_not_12
# # Calculate values when IV is 12 bytes
# Set counter based on IV
movl $0x1000000, %ecx
vmovq (%rax), %xmm4
vpinsrd $2, 8(%rax), %xmm4, %xmm4
vpinsrd $3, %ecx, %xmm4, %xmm4
# H = Encrypt X(=0) and T = Encrypt counter
vmovdqa (%r15), %xmm5
vpxor %xmm5, %xmm4, %xmm1
vmovdqa 16(%r15), %xmm7
vaesenc %xmm7, %xmm5, %xmm5
vaesenc %xmm7, %xmm1, %xmm1
vmovdqa 32(%r15), %xmm7
vaesenc %xmm7, %xmm5, %xmm5
vaesenc %xmm7, %xmm1, %xmm1
vmovdqa 48(%r15), %xmm7
vaesenc %xmm7, %xmm5, %xmm5
vaesenc %xmm7, %xmm1, %xmm1
vmovdqa 64(%r15), %xmm7
vaesenc %xmm7, %xmm5, %xmm5
vaesenc %xmm7, %xmm1, %xmm1
vmovdqa 80(%r15), %xmm7
vaesenc %xmm7, %xmm5, %xmm5
vaesenc %xmm7, %xmm1, %xmm1
vmovdqa 96(%r15), %xmm7
vaesenc %xmm7, %xmm5, %xmm5
vaesenc %xmm7, %xmm1, %xmm1
vmovdqa 112(%r15), %xmm7
vaesenc %xmm7, %xmm5, %xmm5
vaesenc %xmm7, %xmm1, %xmm1
vmovdqa 128(%r15), %xmm7
vaesenc %xmm7, %xmm5, %xmm5
vaesenc %xmm7, %xmm1, %xmm1
vmovdqa 144(%r15), %xmm7
vaesenc %xmm7, %xmm5, %xmm5
vaesenc %xmm7, %xmm1, %xmm1
cmpl $11, %r10d
vmovdqa 160(%r15), %xmm7
jl L_AES_GCM_encrypt_avx1_calc_iv_12_last
vaesenc %xmm7, %xmm5, %xmm5
vaesenc %xmm7, %xmm1, %xmm1
vmovdqa 176(%r15), %xmm7
vaesenc %xmm7, %xmm5, %xmm5
vaesenc %xmm7, %xmm1, %xmm1
cmpl $13, %r10d
vmovdqa 192(%r15), %xmm7
jl L_AES_GCM_encrypt_avx1_calc_iv_12_last
vaesenc %xmm7, %xmm5, %xmm5
vaesenc %xmm7, %xmm1, %xmm1
vmovdqa 208(%r15), %xmm7
vaesenc %xmm7, %xmm5, %xmm5
vaesenc %xmm7, %xmm1, %xmm1
vmovdqa 224(%r15), %xmm7
L_AES_GCM_encrypt_avx1_calc_iv_12_last:
vaesenclast %xmm7, %xmm5, %xmm5
vaesenclast %xmm7, %xmm1, %xmm1
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm5, %xmm5
vmovdqu %xmm1, 144(%rsp)
jmp L_AES_GCM_encrypt_avx1_iv_done
L_AES_GCM_encrypt_avx1_iv_not_12:
# Calculate values when IV is not 12 bytes
# H = Encrypt X(=0)
vmovdqa (%r15), %xmm5
vaesenc 16(%r15), %xmm5, %xmm5
vaesenc 32(%r15), %xmm5, %xmm5
vaesenc 48(%r15), %xmm5, %xmm5
vaesenc 64(%r15), %xmm5, %xmm5
vaesenc 80(%r15), %xmm5, %xmm5
vaesenc 96(%r15), %xmm5, %xmm5
vaesenc 112(%r15), %xmm5, %xmm5
vaesenc 128(%r15), %xmm5, %xmm5
vaesenc 144(%r15), %xmm5, %xmm5
cmpl $11, %r10d
vmovdqa 160(%r15), %xmm9
jl L_AES_GCM_encrypt_avx1_calc_iv_1_aesenc_avx_last
vaesenc %xmm9, %xmm5, %xmm5
vaesenc 176(%r15), %xmm5, %xmm5
cmpl $13, %r10d
vmovdqa 192(%r15), %xmm9
jl L_AES_GCM_encrypt_avx1_calc_iv_1_aesenc_avx_last
vaesenc %xmm9, %xmm5, %xmm5
vaesenc 208(%r15), %xmm5, %xmm5
vmovdqa 224(%r15), %xmm9
L_AES_GCM_encrypt_avx1_calc_iv_1_aesenc_avx_last:
vaesenclast %xmm9, %xmm5, %xmm5
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm5, %xmm5
# Calc counter
# Initialization vector
cmpl $0x00, %edx
movq $0x00, %rcx
je L_AES_GCM_encrypt_avx1_calc_iv_done
cmpl $16, %edx
jl L_AES_GCM_encrypt_avx1_calc_iv_lt16
andl $0xfffffff0, %edx
L_AES_GCM_encrypt_avx1_calc_iv_16_loop:
vmovdqu (%rax,%rcx,1), %xmm8
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm8, %xmm8
vpxor %xmm8, %xmm4, %xmm4
# ghash_gfmul_avx
vpshufd $0x4e, %xmm4, %xmm1
vpshufd $0x4e, %xmm5, %xmm2
vpclmulqdq $0x11, %xmm4, %xmm5, %xmm3
vpclmulqdq $0x00, %xmm4, %xmm5, %xmm0
vpxor %xmm4, %xmm1, %xmm1
vpxor %xmm5, %xmm2, %xmm2
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vmovdqa %xmm0, %xmm7
vmovdqa %xmm3, %xmm4
vpslldq $8, %xmm1, %xmm2
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm2, %xmm7, %xmm7
vpxor %xmm1, %xmm4, %xmm4
vpsrld $31, %xmm7, %xmm0
vpsrld $31, %xmm4, %xmm1
vpslld $0x01, %xmm7, %xmm7
vpslld $0x01, %xmm4, %xmm4
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm4, %xmm4
vpor %xmm0, %xmm7, %xmm7
vpor %xmm1, %xmm4, %xmm4
vpslld $31, %xmm7, %xmm0
vpslld $30, %xmm7, %xmm1
vpslld $25, %xmm7, %xmm2
vpxor %xmm1, %xmm0, %xmm0
vpxor %xmm2, %xmm0, %xmm0
vmovdqa %xmm0, %xmm1
vpsrldq $4, %xmm1, %xmm1
vpslldq $12, %xmm0, %xmm0
vpxor %xmm0, %xmm7, %xmm7
vpsrld $0x01, %xmm7, %xmm2
vpsrld $2, %xmm7, %xmm3
vpsrld $7, %xmm7, %xmm0
vpxor %xmm3, %xmm2, %xmm2
vpxor %xmm0, %xmm2, %xmm2
vpxor %xmm1, %xmm2, %xmm2
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm2, %xmm4, %xmm4
addl $16, %ecx
cmpl %edx, %ecx
jl L_AES_GCM_encrypt_avx1_calc_iv_16_loop
movl %ebx, %edx
cmpl %edx, %ecx
je L_AES_GCM_encrypt_avx1_calc_iv_done
L_AES_GCM_encrypt_avx1_calc_iv_lt16:
subq $16, %rsp
vpxor %xmm8, %xmm8, %xmm8
xorl %ebx, %ebx
vmovdqu %xmm8, (%rsp)
L_AES_GCM_encrypt_avx1_calc_iv_loop:
movzbl (%rax,%rcx,1), %r13d
movb %r13b, (%rsp,%rbx,1)
incl %ecx
incl %ebx
cmpl %edx, %ecx
jl L_AES_GCM_encrypt_avx1_calc_iv_loop
vmovdqu (%rsp), %xmm8
addq $16, %rsp
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm8, %xmm8
vpxor %xmm8, %xmm4, %xmm4
# ghash_gfmul_avx
vpshufd $0x4e, %xmm4, %xmm1
vpshufd $0x4e, %xmm5, %xmm2
vpclmulqdq $0x11, %xmm4, %xmm5, %xmm3
vpclmulqdq $0x00, %xmm4, %xmm5, %xmm0
vpxor %xmm4, %xmm1, %xmm1
vpxor %xmm5, %xmm2, %xmm2
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vmovdqa %xmm0, %xmm7
vmovdqa %xmm3, %xmm4
vpslldq $8, %xmm1, %xmm2
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm2, %xmm7, %xmm7
vpxor %xmm1, %xmm4, %xmm4
vpsrld $31, %xmm7, %xmm0
vpsrld $31, %xmm4, %xmm1
vpslld $0x01, %xmm7, %xmm7
vpslld $0x01, %xmm4, %xmm4
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm4, %xmm4
vpor %xmm0, %xmm7, %xmm7
vpor %xmm1, %xmm4, %xmm4
vpslld $31, %xmm7, %xmm0
vpslld $30, %xmm7, %xmm1
vpslld $25, %xmm7, %xmm2
vpxor %xmm1, %xmm0, %xmm0
vpxor %xmm2, %xmm0, %xmm0
vmovdqa %xmm0, %xmm1
vpsrldq $4, %xmm1, %xmm1
vpslldq $12, %xmm0, %xmm0
vpxor %xmm0, %xmm7, %xmm7
vpsrld $0x01, %xmm7, %xmm2
vpsrld $2, %xmm7, %xmm3
vpsrld $7, %xmm7, %xmm0
vpxor %xmm3, %xmm2, %xmm2
vpxor %xmm0, %xmm2, %xmm2
vpxor %xmm1, %xmm2, %xmm2
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm2, %xmm4, %xmm4
L_AES_GCM_encrypt_avx1_calc_iv_done:
# T = Encrypt counter
vpxor %xmm0, %xmm0, %xmm0
shll $3, %edx
vmovq %rdx, %xmm0
vpxor %xmm0, %xmm4, %xmm4
# ghash_gfmul_avx
vpshufd $0x4e, %xmm4, %xmm1
vpshufd $0x4e, %xmm5, %xmm2
vpclmulqdq $0x11, %xmm4, %xmm5, %xmm3
vpclmulqdq $0x00, %xmm4, %xmm5, %xmm0
vpxor %xmm4, %xmm1, %xmm1
vpxor %xmm5, %xmm2, %xmm2
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vmovdqa %xmm0, %xmm7
vmovdqa %xmm3, %xmm4
vpslldq $8, %xmm1, %xmm2
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm2, %xmm7, %xmm7
vpxor %xmm1, %xmm4, %xmm4
vpsrld $31, %xmm7, %xmm0
vpsrld $31, %xmm4, %xmm1
vpslld $0x01, %xmm7, %xmm7
vpslld $0x01, %xmm4, %xmm4
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm4, %xmm4
vpor %xmm0, %xmm7, %xmm7
vpor %xmm1, %xmm4, %xmm4
vpslld $31, %xmm7, %xmm0
vpslld $30, %xmm7, %xmm1
vpslld $25, %xmm7, %xmm2
vpxor %xmm1, %xmm0, %xmm0
vpxor %xmm2, %xmm0, %xmm0
vmovdqa %xmm0, %xmm1
vpsrldq $4, %xmm1, %xmm1
vpslldq $12, %xmm0, %xmm0
vpxor %xmm0, %xmm7, %xmm7
vpsrld $0x01, %xmm7, %xmm2
vpsrld $2, %xmm7, %xmm3
vpsrld $7, %xmm7, %xmm0
vpxor %xmm3, %xmm2, %xmm2
vpxor %xmm0, %xmm2, %xmm2
vpxor %xmm1, %xmm2, %xmm2
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm2, %xmm4, %xmm4
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm4, %xmm4
# Encrypt counter
vmovdqa (%r15), %xmm8
vpxor %xmm4, %xmm8, %xmm8
vaesenc 16(%r15), %xmm8, %xmm8
vaesenc 32(%r15), %xmm8, %xmm8
vaesenc 48(%r15), %xmm8, %xmm8
vaesenc 64(%r15), %xmm8, %xmm8
vaesenc 80(%r15), %xmm8, %xmm8
vaesenc 96(%r15), %xmm8, %xmm8
vaesenc 112(%r15), %xmm8, %xmm8
vaesenc 128(%r15), %xmm8, %xmm8
vaesenc 144(%r15), %xmm8, %xmm8
cmpl $11, %r10d
vmovdqa 160(%r15), %xmm9
jl L_AES_GCM_encrypt_avx1_calc_iv_2_aesenc_avx_last
vaesenc %xmm9, %xmm8, %xmm8
vaesenc 176(%r15), %xmm8, %xmm8
cmpl $13, %r10d
vmovdqa 192(%r15), %xmm9
jl L_AES_GCM_encrypt_avx1_calc_iv_2_aesenc_avx_last
vaesenc %xmm9, %xmm8, %xmm8
vaesenc 208(%r15), %xmm8, %xmm8
vmovdqa 224(%r15), %xmm9
L_AES_GCM_encrypt_avx1_calc_iv_2_aesenc_avx_last:
vaesenclast %xmm9, %xmm8, %xmm8
vmovdqu %xmm8, 144(%rsp)
L_AES_GCM_encrypt_avx1_iv_done:
# Additional authentication data
movl %r11d, %edx
cmpl $0x00, %edx
je L_AES_GCM_encrypt_avx1_calc_aad_done
xorl %ecx, %ecx
cmpl $16, %edx
jl L_AES_GCM_encrypt_avx1_calc_aad_lt16
andl $0xfffffff0, %edx
L_AES_GCM_encrypt_avx1_calc_aad_16_loop:
vmovdqu (%r12,%rcx,1), %xmm8
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm8, %xmm8
vpxor %xmm8, %xmm6, %xmm6
# ghash_gfmul_avx
vpshufd $0x4e, %xmm6, %xmm1
vpshufd $0x4e, %xmm5, %xmm2
vpclmulqdq $0x11, %xmm6, %xmm5, %xmm3
vpclmulqdq $0x00, %xmm6, %xmm5, %xmm0
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm5, %xmm2, %xmm2
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vmovdqa %xmm0, %xmm7
vmovdqa %xmm3, %xmm6
vpslldq $8, %xmm1, %xmm2
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm2, %xmm7, %xmm7
vpxor %xmm1, %xmm6, %xmm6
vpsrld $31, %xmm7, %xmm0
vpsrld $31, %xmm6, %xmm1
vpslld $0x01, %xmm7, %xmm7
vpslld $0x01, %xmm6, %xmm6
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm6, %xmm6
vpor %xmm0, %xmm7, %xmm7
vpor %xmm1, %xmm6, %xmm6
vpslld $31, %xmm7, %xmm0
vpslld $30, %xmm7, %xmm1
vpslld $25, %xmm7, %xmm2
vpxor %xmm1, %xmm0, %xmm0
vpxor %xmm2, %xmm0, %xmm0
vmovdqa %xmm0, %xmm1
vpsrldq $4, %xmm1, %xmm1
vpslldq $12, %xmm0, %xmm0
vpxor %xmm0, %xmm7, %xmm7
vpsrld $0x01, %xmm7, %xmm2
vpsrld $2, %xmm7, %xmm3
vpsrld $7, %xmm7, %xmm0
vpxor %xmm3, %xmm2, %xmm2
vpxor %xmm0, %xmm2, %xmm2
vpxor %xmm1, %xmm2, %xmm2
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm2, %xmm6, %xmm6
addl $16, %ecx
cmpl %edx, %ecx
jl L_AES_GCM_encrypt_avx1_calc_aad_16_loop
movl %r11d, %edx
cmpl %edx, %ecx
je L_AES_GCM_encrypt_avx1_calc_aad_done
L_AES_GCM_encrypt_avx1_calc_aad_lt16:
subq $16, %rsp
vpxor %xmm8, %xmm8, %xmm8
xorl %ebx, %ebx
vmovdqu %xmm8, (%rsp)
L_AES_GCM_encrypt_avx1_calc_aad_loop:
movzbl (%r12,%rcx,1), %r13d
movb %r13b, (%rsp,%rbx,1)
incl %ecx
incl %ebx
cmpl %edx, %ecx
jl L_AES_GCM_encrypt_avx1_calc_aad_loop
vmovdqu (%rsp), %xmm8
addq $16, %rsp
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm8, %xmm8
vpxor %xmm8, %xmm6, %xmm6
# ghash_gfmul_avx
vpshufd $0x4e, %xmm6, %xmm1
vpshufd $0x4e, %xmm5, %xmm2
vpclmulqdq $0x11, %xmm6, %xmm5, %xmm3
vpclmulqdq $0x00, %xmm6, %xmm5, %xmm0
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm5, %xmm2, %xmm2
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vmovdqa %xmm0, %xmm7
vmovdqa %xmm3, %xmm6
vpslldq $8, %xmm1, %xmm2
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm2, %xmm7, %xmm7
vpxor %xmm1, %xmm6, %xmm6
vpsrld $31, %xmm7, %xmm0
vpsrld $31, %xmm6, %xmm1
vpslld $0x01, %xmm7, %xmm7
vpslld $0x01, %xmm6, %xmm6
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm6, %xmm6
vpor %xmm0, %xmm7, %xmm7
vpor %xmm1, %xmm6, %xmm6
vpslld $31, %xmm7, %xmm0
vpslld $30, %xmm7, %xmm1
vpslld $25, %xmm7, %xmm2
vpxor %xmm1, %xmm0, %xmm0
vpxor %xmm2, %xmm0, %xmm0
vmovdqa %xmm0, %xmm1
vpsrldq $4, %xmm1, %xmm1
vpslldq $12, %xmm0, %xmm0
vpxor %xmm0, %xmm7, %xmm7
vpsrld $0x01, %xmm7, %xmm2
vpsrld $2, %xmm7, %xmm3
vpsrld $7, %xmm7, %xmm0
vpxor %xmm3, %xmm2, %xmm2
vpxor %xmm0, %xmm2, %xmm2
vpxor %xmm1, %xmm2, %xmm2
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm2, %xmm6, %xmm6
L_AES_GCM_encrypt_avx1_calc_aad_done:
# Calculate counter and H
vpsrlq $63, %xmm5, %xmm9
vpsllq $0x01, %xmm5, %xmm8
vpslldq $8, %xmm9, %xmm9
vpor %xmm9, %xmm8, %xmm8
vpshufd $0xff, %xmm5, %xmm5
vpsrad $31, %xmm5, %xmm5
vpshufb L_avx1_aes_gcm_bswap_epi64(%rip), %xmm4, %xmm4
vpand L_avx1_aes_gcm_mod2_128(%rip), %xmm5, %xmm5
vpaddd L_avx1_aes_gcm_one(%rip), %xmm4, %xmm4
vpxor %xmm8, %xmm5, %xmm5
vmovdqu %xmm4, 128(%rsp)
xorl %ebx, %ebx
cmpl $0x80, %r9d
movl %r9d, %r13d
jl L_AES_GCM_encrypt_avx1_done_128
andl $0xffffff80, %r13d
vmovdqa %xmm6, %xmm2
# H ^ 1
vmovdqu %xmm5, (%rsp)
# H ^ 2
vpclmulqdq $0x00, %xmm5, %xmm5, %xmm8
vpclmulqdq $0x11, %xmm5, %xmm5, %xmm0
vpslld $31, %xmm8, %xmm12
vpslld $30, %xmm8, %xmm13
vpslld $25, %xmm8, %xmm14
vpxor %xmm13, %xmm12, %xmm12
vpxor %xmm14, %xmm12, %xmm12
vpsrldq $4, %xmm12, %xmm13
vpslldq $12, %xmm12, %xmm12
vpxor %xmm12, %xmm8, %xmm8
vpsrld $0x01, %xmm8, %xmm14
vpsrld $2, %xmm8, %xmm10
vpsrld $7, %xmm8, %xmm9
vpxor %xmm10, %xmm14, %xmm14
vpxor %xmm9, %xmm14, %xmm14
vpxor %xmm13, %xmm14, %xmm14
vpxor %xmm8, %xmm14, %xmm14
vpxor %xmm14, %xmm0, %xmm0
vmovdqu %xmm0, 16(%rsp)
# H ^ 3
# ghash_gfmul_red_avx
vpshufd $0x4e, %xmm5, %xmm9
vpshufd $0x4e, %xmm0, %xmm10
vpclmulqdq $0x11, %xmm5, %xmm0, %xmm11
vpclmulqdq $0x00, %xmm5, %xmm0, %xmm8
vpxor %xmm5, %xmm9, %xmm9
vpxor %xmm0, %xmm10, %xmm10
vpclmulqdq $0x00, %xmm10, %xmm9, %xmm9
vpxor %xmm8, %xmm9, %xmm9
vpxor %xmm11, %xmm9, %xmm9
vpslldq $8, %xmm9, %xmm10
vpsrldq $8, %xmm9, %xmm9
vpxor %xmm10, %xmm8, %xmm8
vpxor %xmm9, %xmm11, %xmm1
vpslld $31, %xmm8, %xmm12
vpslld $30, %xmm8, %xmm13
vpslld $25, %xmm8, %xmm14
vpxor %xmm13, %xmm12, %xmm12
vpxor %xmm14, %xmm12, %xmm12
vpsrldq $4, %xmm12, %xmm13
vpslldq $12, %xmm12, %xmm12
vpxor %xmm12, %xmm8, %xmm8
vpsrld $0x01, %xmm8, %xmm14
vpsrld $2, %xmm8, %xmm10
vpsrld $7, %xmm8, %xmm9
vpxor %xmm10, %xmm14, %xmm14
vpxor %xmm9, %xmm14, %xmm14
vpxor %xmm13, %xmm14, %xmm14
vpxor %xmm8, %xmm14, %xmm14
vpxor %xmm14, %xmm1, %xmm1
vmovdqu %xmm1, 32(%rsp)
# H ^ 4
vpclmulqdq $0x00, %xmm0, %xmm0, %xmm8
vpclmulqdq $0x11, %xmm0, %xmm0, %xmm3
vpslld $31, %xmm8, %xmm12
vpslld $30, %xmm8, %xmm13
vpslld $25, %xmm8, %xmm14
vpxor %xmm13, %xmm12, %xmm12
vpxor %xmm14, %xmm12, %xmm12
vpsrldq $4, %xmm12, %xmm13
vpslldq $12, %xmm12, %xmm12
vpxor %xmm12, %xmm8, %xmm8
vpsrld $0x01, %xmm8, %xmm14
vpsrld $2, %xmm8, %xmm10
vpsrld $7, %xmm8, %xmm9
vpxor %xmm10, %xmm14, %xmm14
vpxor %xmm9, %xmm14, %xmm14
vpxor %xmm13, %xmm14, %xmm14
vpxor %xmm8, %xmm14, %xmm14
vpxor %xmm14, %xmm3, %xmm3
vmovdqu %xmm3, 48(%rsp)
# H ^ 5
# ghash_gfmul_red_avx
vpshufd $0x4e, %xmm0, %xmm9
vpshufd $0x4e, %xmm1, %xmm10
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm11
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm8
vpxor %xmm0, %xmm9, %xmm9
vpxor %xmm1, %xmm10, %xmm10
vpclmulqdq $0x00, %xmm10, %xmm9, %xmm9
vpxor %xmm8, %xmm9, %xmm9
vpxor %xmm11, %xmm9, %xmm9
vpslldq $8, %xmm9, %xmm10
vpsrldq $8, %xmm9, %xmm9
vpxor %xmm10, %xmm8, %xmm8
vpxor %xmm9, %xmm11, %xmm7
vpslld $31, %xmm8, %xmm12
vpslld $30, %xmm8, %xmm13
vpslld $25, %xmm8, %xmm14
vpxor %xmm13, %xmm12, %xmm12
vpxor %xmm14, %xmm12, %xmm12
vpsrldq $4, %xmm12, %xmm13
vpslldq $12, %xmm12, %xmm12
vpxor %xmm12, %xmm8, %xmm8
vpsrld $0x01, %xmm8, %xmm14
vpsrld $2, %xmm8, %xmm10
vpsrld $7, %xmm8, %xmm9
vpxor %xmm10, %xmm14, %xmm14
vpxor %xmm9, %xmm14, %xmm14
vpxor %xmm13, %xmm14, %xmm14
vpxor %xmm8, %xmm14, %xmm14
vpxor %xmm14, %xmm7, %xmm7
vmovdqu %xmm7, 64(%rsp)
# H ^ 6
vpclmulqdq $0x00, %xmm1, %xmm1, %xmm8
vpclmulqdq $0x11, %xmm1, %xmm1, %xmm7
vpslld $31, %xmm8, %xmm12
vpslld $30, %xmm8, %xmm13
vpslld $25, %xmm8, %xmm14
vpxor %xmm13, %xmm12, %xmm12
vpxor %xmm14, %xmm12, %xmm12
vpsrldq $4, %xmm12, %xmm13
vpslldq $12, %xmm12, %xmm12
vpxor %xmm12, %xmm8, %xmm8
vpsrld $0x01, %xmm8, %xmm14
vpsrld $2, %xmm8, %xmm10
vpsrld $7, %xmm8, %xmm9
vpxor %xmm10, %xmm14, %xmm14
vpxor %xmm9, %xmm14, %xmm14
vpxor %xmm13, %xmm14, %xmm14
vpxor %xmm8, %xmm14, %xmm14
vpxor %xmm14, %xmm7, %xmm7
vmovdqu %xmm7, 80(%rsp)
# H ^ 7
# ghash_gfmul_red_avx
vpshufd $0x4e, %xmm1, %xmm9
vpshufd $0x4e, %xmm3, %xmm10
vpclmulqdq $0x11, %xmm1, %xmm3, %xmm11
vpclmulqdq $0x00, %xmm1, %xmm3, %xmm8
vpxor %xmm1, %xmm9, %xmm9
vpxor %xmm3, %xmm10, %xmm10
vpclmulqdq $0x00, %xmm10, %xmm9, %xmm9
vpxor %xmm8, %xmm9, %xmm9
vpxor %xmm11, %xmm9, %xmm9
vpslldq $8, %xmm9, %xmm10
vpsrldq $8, %xmm9, %xmm9
vpxor %xmm10, %xmm8, %xmm8
vpxor %xmm9, %xmm11, %xmm7
vpslld $31, %xmm8, %xmm12
vpslld $30, %xmm8, %xmm13
vpslld $25, %xmm8, %xmm14
vpxor %xmm13, %xmm12, %xmm12
vpxor %xmm14, %xmm12, %xmm12
vpsrldq $4, %xmm12, %xmm13
vpslldq $12, %xmm12, %xmm12
vpxor %xmm12, %xmm8, %xmm8
vpsrld $0x01, %xmm8, %xmm14
vpsrld $2, %xmm8, %xmm10
vpsrld $7, %xmm8, %xmm9
vpxor %xmm10, %xmm14, %xmm14
vpxor %xmm9, %xmm14, %xmm14
vpxor %xmm13, %xmm14, %xmm14
vpxor %xmm8, %xmm14, %xmm14
vpxor %xmm14, %xmm7, %xmm7
vmovdqu %xmm7, 96(%rsp)
# H ^ 8
vpclmulqdq $0x00, %xmm3, %xmm3, %xmm8
vpclmulqdq $0x11, %xmm3, %xmm3, %xmm7
vpslld $31, %xmm8, %xmm12
vpslld $30, %xmm8, %xmm13
vpslld $25, %xmm8, %xmm14
vpxor %xmm13, %xmm12, %xmm12
vpxor %xmm14, %xmm12, %xmm12
vpsrldq $4, %xmm12, %xmm13
vpslldq $12, %xmm12, %xmm12
vpxor %xmm12, %xmm8, %xmm8
vpsrld $0x01, %xmm8, %xmm14
vpsrld $2, %xmm8, %xmm10
vpsrld $7, %xmm8, %xmm9
vpxor %xmm10, %xmm14, %xmm14
vpxor %xmm9, %xmm14, %xmm14
vpxor %xmm13, %xmm14, %xmm14
vpxor %xmm8, %xmm14, %xmm14
vpxor %xmm14, %xmm7, %xmm7
vmovdqu %xmm7, 112(%rsp)
# First 128 bytes of input
vmovdqu 128(%rsp), %xmm0
vmovdqa L_avx1_aes_gcm_bswap_epi64(%rip), %xmm1
vpshufb %xmm1, %xmm0, %xmm8
vpaddd L_avx1_aes_gcm_one(%rip), %xmm0, %xmm9
vpshufb %xmm1, %xmm9, %xmm9
vpaddd L_avx1_aes_gcm_two(%rip), %xmm0, %xmm10
vpshufb %xmm1, %xmm10, %xmm10
vpaddd L_avx1_aes_gcm_three(%rip), %xmm0, %xmm11
vpshufb %xmm1, %xmm11, %xmm11
vpaddd L_avx1_aes_gcm_four(%rip), %xmm0, %xmm12
vpshufb %xmm1, %xmm12, %xmm12
vpaddd L_avx1_aes_gcm_five(%rip), %xmm0, %xmm13
vpshufb %xmm1, %xmm13, %xmm13
vpaddd L_avx1_aes_gcm_six(%rip), %xmm0, %xmm14
vpshufb %xmm1, %xmm14, %xmm14
vpaddd L_avx1_aes_gcm_seven(%rip), %xmm0, %xmm15
vpshufb %xmm1, %xmm15, %xmm15
vpaddd L_avx1_aes_gcm_eight(%rip), %xmm0, %xmm0
vmovdqa (%r15), %xmm7
vmovdqu %xmm0, 128(%rsp)
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm7, %xmm9, %xmm9
vpxor %xmm7, %xmm10, %xmm10
vpxor %xmm7, %xmm11, %xmm11
vpxor %xmm7, %xmm12, %xmm12
vpxor %xmm7, %xmm13, %xmm13
vpxor %xmm7, %xmm14, %xmm14
vpxor %xmm7, %xmm15, %xmm15
vmovdqa 16(%r15), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqa 32(%r15), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqa 48(%r15), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqa 64(%r15), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqa 80(%r15), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqa 96(%r15), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqa 112(%r15), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqa 128(%r15), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqa 144(%r15), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
cmpl $11, %r10d
vmovdqa 160(%r15), %xmm7
jl L_AES_GCM_encrypt_avx1_aesenc_128_enc_done
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqa 176(%r15), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
cmpl $13, %r10d
vmovdqa 192(%r15), %xmm7
jl L_AES_GCM_encrypt_avx1_aesenc_128_enc_done
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqa 208(%r15), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqa 224(%r15), %xmm7
L_AES_GCM_encrypt_avx1_aesenc_128_enc_done:
vaesenclast %xmm7, %xmm8, %xmm8
vaesenclast %xmm7, %xmm9, %xmm9
vmovdqu (%rdi), %xmm0
vmovdqu 16(%rdi), %xmm1
vpxor %xmm0, %xmm8, %xmm8
vpxor %xmm1, %xmm9, %xmm9
vmovdqu %xmm8, (%rsi)
vmovdqu %xmm9, 16(%rsi)
vaesenclast %xmm7, %xmm10, %xmm10
vaesenclast %xmm7, %xmm11, %xmm11
vmovdqu 32(%rdi), %xmm0
vmovdqu 48(%rdi), %xmm1
vpxor %xmm0, %xmm10, %xmm10
vpxor %xmm1, %xmm11, %xmm11
vmovdqu %xmm10, 32(%rsi)
vmovdqu %xmm11, 48(%rsi)
vaesenclast %xmm7, %xmm12, %xmm12
vaesenclast %xmm7, %xmm13, %xmm13
vmovdqu 64(%rdi), %xmm0
vmovdqu 80(%rdi), %xmm1
vpxor %xmm0, %xmm12, %xmm12
vpxor %xmm1, %xmm13, %xmm13
vmovdqu %xmm12, 64(%rsi)
vmovdqu %xmm13, 80(%rsi)
vaesenclast %xmm7, %xmm14, %xmm14
vaesenclast %xmm7, %xmm15, %xmm15
vmovdqu 96(%rdi), %xmm0
vmovdqu 112(%rdi), %xmm1
vpxor %xmm0, %xmm14, %xmm14
vpxor %xmm1, %xmm15, %xmm15
vmovdqu %xmm14, 96(%rsi)
vmovdqu %xmm15, 112(%rsi)
cmpl $0x80, %r13d
movl $0x80, %ebx
jle L_AES_GCM_encrypt_avx1_end_128
# More 128 bytes of input
L_AES_GCM_encrypt_avx1_ghash_128:
leaq (%rdi,%rbx,1), %rcx
leaq (%rsi,%rbx,1), %rdx
vmovdqu 128(%rsp), %xmm0
vmovdqa L_avx1_aes_gcm_bswap_epi64(%rip), %xmm1
vpshufb %xmm1, %xmm0, %xmm8
vpaddd L_avx1_aes_gcm_one(%rip), %xmm0, %xmm9
vpshufb %xmm1, %xmm9, %xmm9
vpaddd L_avx1_aes_gcm_two(%rip), %xmm0, %xmm10
vpshufb %xmm1, %xmm10, %xmm10
vpaddd L_avx1_aes_gcm_three(%rip), %xmm0, %xmm11
vpshufb %xmm1, %xmm11, %xmm11
vpaddd L_avx1_aes_gcm_four(%rip), %xmm0, %xmm12
vpshufb %xmm1, %xmm12, %xmm12
vpaddd L_avx1_aes_gcm_five(%rip), %xmm0, %xmm13
vpshufb %xmm1, %xmm13, %xmm13
vpaddd L_avx1_aes_gcm_six(%rip), %xmm0, %xmm14
vpshufb %xmm1, %xmm14, %xmm14
vpaddd L_avx1_aes_gcm_seven(%rip), %xmm0, %xmm15
vpshufb %xmm1, %xmm15, %xmm15
vpaddd L_avx1_aes_gcm_eight(%rip), %xmm0, %xmm0
vmovdqa (%r15), %xmm7
vmovdqu %xmm0, 128(%rsp)
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm7, %xmm9, %xmm9
vpxor %xmm7, %xmm10, %xmm10
vpxor %xmm7, %xmm11, %xmm11
vpxor %xmm7, %xmm12, %xmm12
vpxor %xmm7, %xmm13, %xmm13
vpxor %xmm7, %xmm14, %xmm14
vpxor %xmm7, %xmm15, %xmm15
vmovdqu 112(%rsp), %xmm7
vmovdqu -128(%rdx), %xmm0
vaesenc 16(%r15), %xmm8, %xmm8
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vpxor %xmm2, %xmm0, %xmm0
vpshufd $0x4e, %xmm7, %xmm1
vpshufd $0x4e, %xmm0, %xmm5
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm0, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm7, %xmm0, %xmm3
vaesenc 16(%r15), %xmm9, %xmm9
vaesenc 16(%r15), %xmm10, %xmm10
vpclmulqdq $0x00, %xmm7, %xmm0, %xmm2
vaesenc 16(%r15), %xmm11, %xmm11
vaesenc 16(%r15), %xmm12, %xmm12
vpclmulqdq $0x00, %xmm5, %xmm1, %xmm1
vaesenc 16(%r15), %xmm13, %xmm13
vaesenc 16(%r15), %xmm14, %xmm14
vaesenc 16(%r15), %xmm15, %xmm15
vpxor %xmm2, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vmovdqu 96(%rsp), %xmm7
vmovdqu -112(%rdx), %xmm0
vpshufd $0x4e, %xmm7, %xmm4
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vaesenc 32(%r15), %xmm8, %xmm8
vpxor %xmm7, %xmm4, %xmm4
vpshufd $0x4e, %xmm0, %xmm5
vpxor %xmm0, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm7, %xmm0, %xmm6
vaesenc 32(%r15), %xmm9, %xmm9
vaesenc 32(%r15), %xmm10, %xmm10
vpclmulqdq $0x00, %xmm7, %xmm0, %xmm7
vaesenc 32(%r15), %xmm11, %xmm11
vaesenc 32(%r15), %xmm12, %xmm12
vpclmulqdq $0x00, %xmm5, %xmm4, %xmm4
vaesenc 32(%r15), %xmm13, %xmm13
vaesenc 32(%r15), %xmm14, %xmm14
vaesenc 32(%r15), %xmm15, %xmm15
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm4, %xmm1, %xmm1
vmovdqu 80(%rsp), %xmm7
vmovdqu -96(%rdx), %xmm0
vpshufd $0x4e, %xmm7, %xmm4
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vaesenc 48(%r15), %xmm8, %xmm8
vpxor %xmm7, %xmm4, %xmm4
vpshufd $0x4e, %xmm0, %xmm5
vpxor %xmm0, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm7, %xmm0, %xmm6
vaesenc 48(%r15), %xmm9, %xmm9
vaesenc 48(%r15), %xmm10, %xmm10
vpclmulqdq $0x00, %xmm7, %xmm0, %xmm7
vaesenc 48(%r15), %xmm11, %xmm11
vaesenc 48(%r15), %xmm12, %xmm12
vpclmulqdq $0x00, %xmm5, %xmm4, %xmm4
vaesenc 48(%r15), %xmm13, %xmm13
vaesenc 48(%r15), %xmm14, %xmm14
vaesenc 48(%r15), %xmm15, %xmm15
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm4, %xmm1, %xmm1
vmovdqu 64(%rsp), %xmm7
vmovdqu -80(%rdx), %xmm0
vpshufd $0x4e, %xmm7, %xmm4
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vaesenc 64(%r15), %xmm8, %xmm8
vpxor %xmm7, %xmm4, %xmm4
vpshufd $0x4e, %xmm0, %xmm5
vpxor %xmm0, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm7, %xmm0, %xmm6
vaesenc 64(%r15), %xmm9, %xmm9
vaesenc 64(%r15), %xmm10, %xmm10
vpclmulqdq $0x00, %xmm7, %xmm0, %xmm7
vaesenc 64(%r15), %xmm11, %xmm11
vaesenc 64(%r15), %xmm12, %xmm12
vpclmulqdq $0x00, %xmm5, %xmm4, %xmm4
vaesenc 64(%r15), %xmm13, %xmm13
vaesenc 64(%r15), %xmm14, %xmm14
vaesenc 64(%r15), %xmm15, %xmm15
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm4, %xmm1, %xmm1
vmovdqu 48(%rsp), %xmm7
vmovdqu -64(%rdx), %xmm0
vpshufd $0x4e, %xmm7, %xmm4
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vaesenc 80(%r15), %xmm8, %xmm8
vpxor %xmm7, %xmm4, %xmm4
vpshufd $0x4e, %xmm0, %xmm5
vpxor %xmm0, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm7, %xmm0, %xmm6
vaesenc 80(%r15), %xmm9, %xmm9
vaesenc 80(%r15), %xmm10, %xmm10
vpclmulqdq $0x00, %xmm7, %xmm0, %xmm7
vaesenc 80(%r15), %xmm11, %xmm11
vaesenc 80(%r15), %xmm12, %xmm12
vpclmulqdq $0x00, %xmm5, %xmm4, %xmm4
vaesenc 80(%r15), %xmm13, %xmm13
vaesenc 80(%r15), %xmm14, %xmm14
vaesenc 80(%r15), %xmm15, %xmm15
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm4, %xmm1, %xmm1
vmovdqu 32(%rsp), %xmm7
vmovdqu -48(%rdx), %xmm0
vpshufd $0x4e, %xmm7, %xmm4
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vaesenc 96(%r15), %xmm8, %xmm8
vpxor %xmm7, %xmm4, %xmm4
vpshufd $0x4e, %xmm0, %xmm5
vpxor %xmm0, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm7, %xmm0, %xmm6
vaesenc 96(%r15), %xmm9, %xmm9
vaesenc 96(%r15), %xmm10, %xmm10
vpclmulqdq $0x00, %xmm7, %xmm0, %xmm7
vaesenc 96(%r15), %xmm11, %xmm11
vaesenc 96(%r15), %xmm12, %xmm12
vpclmulqdq $0x00, %xmm5, %xmm4, %xmm4
vaesenc 96(%r15), %xmm13, %xmm13
vaesenc 96(%r15), %xmm14, %xmm14
vaesenc 96(%r15), %xmm15, %xmm15
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm4, %xmm1, %xmm1
vmovdqu 16(%rsp), %xmm7
vmovdqu -32(%rdx), %xmm0
vpshufd $0x4e, %xmm7, %xmm4
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vaesenc 112(%r15), %xmm8, %xmm8
vpxor %xmm7, %xmm4, %xmm4
vpshufd $0x4e, %xmm0, %xmm5
vpxor %xmm0, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm7, %xmm0, %xmm6
vaesenc 112(%r15), %xmm9, %xmm9
vaesenc 112(%r15), %xmm10, %xmm10
vpclmulqdq $0x00, %xmm7, %xmm0, %xmm7
vaesenc 112(%r15), %xmm11, %xmm11
vaesenc 112(%r15), %xmm12, %xmm12
vpclmulqdq $0x00, %xmm5, %xmm4, %xmm4
vaesenc 112(%r15), %xmm13, %xmm13
vaesenc 112(%r15), %xmm14, %xmm14
vaesenc 112(%r15), %xmm15, %xmm15
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm4, %xmm1, %xmm1
vmovdqu (%rsp), %xmm7
vmovdqu -16(%rdx), %xmm0
vpshufd $0x4e, %xmm7, %xmm4
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vaesenc 128(%r15), %xmm8, %xmm8
vpxor %xmm7, %xmm4, %xmm4
vpshufd $0x4e, %xmm0, %xmm5
vpxor %xmm0, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm7, %xmm0, %xmm6
vaesenc 128(%r15), %xmm9, %xmm9
vaesenc 128(%r15), %xmm10, %xmm10
vpclmulqdq $0x00, %xmm7, %xmm0, %xmm7
vaesenc 128(%r15), %xmm11, %xmm11
vaesenc 128(%r15), %xmm12, %xmm12
vpclmulqdq $0x00, %xmm5, %xmm4, %xmm4
vaesenc 128(%r15), %xmm13, %xmm13
vaesenc 128(%r15), %xmm14, %xmm14
vaesenc 128(%r15), %xmm15, %xmm15
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm4, %xmm1, %xmm1
vpslldq $8, %xmm1, %xmm5
vpsrldq $8, %xmm1, %xmm1
vaesenc 144(%r15), %xmm8, %xmm8
vpxor %xmm5, %xmm2, %xmm2
vpxor %xmm1, %xmm3, %xmm3
vaesenc 144(%r15), %xmm9, %xmm9
vpslld $31, %xmm2, %xmm7
vpslld $30, %xmm2, %xmm4
vpslld $25, %xmm2, %xmm5
vaesenc 144(%r15), %xmm10, %xmm10
vpxor %xmm4, %xmm7, %xmm7
vpxor %xmm5, %xmm7, %xmm7
vaesenc 144(%r15), %xmm11, %xmm11
vpsrldq $4, %xmm7, %xmm4
vpslldq $12, %xmm7, %xmm7
vaesenc 144(%r15), %xmm12, %xmm12
vpxor %xmm7, %xmm2, %xmm2
vpsrld $0x01, %xmm2, %xmm5
vaesenc 144(%r15), %xmm13, %xmm13
vpsrld $2, %xmm2, %xmm1
vpsrld $7, %xmm2, %xmm0
vaesenc 144(%r15), %xmm14, %xmm14
vpxor %xmm1, %xmm5, %xmm5
vpxor %xmm0, %xmm5, %xmm5
vaesenc 144(%r15), %xmm15, %xmm15
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm5, %xmm2, %xmm2
vpxor %xmm3, %xmm2, %xmm2
cmpl $11, %r10d
vmovdqa 160(%r15), %xmm7
jl L_AES_GCM_encrypt_avx1_aesenc_128_ghash_avx_done
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqa 176(%r15), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
cmpl $13, %r10d
vmovdqa 192(%r15), %xmm7
jl L_AES_GCM_encrypt_avx1_aesenc_128_ghash_avx_done
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqa 208(%r15), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqa 224(%r15), %xmm7
L_AES_GCM_encrypt_avx1_aesenc_128_ghash_avx_done:
vaesenclast %xmm7, %xmm8, %xmm8
vaesenclast %xmm7, %xmm9, %xmm9
vmovdqu (%rcx), %xmm0
vmovdqu 16(%rcx), %xmm1
vpxor %xmm0, %xmm8, %xmm8
vpxor %xmm1, %xmm9, %xmm9
vmovdqu %xmm8, (%rdx)
vmovdqu %xmm9, 16(%rdx)
vaesenclast %xmm7, %xmm10, %xmm10
vaesenclast %xmm7, %xmm11, %xmm11
vmovdqu 32(%rcx), %xmm0
vmovdqu 48(%rcx), %xmm1
vpxor %xmm0, %xmm10, %xmm10
vpxor %xmm1, %xmm11, %xmm11
vmovdqu %xmm10, 32(%rdx)
vmovdqu %xmm11, 48(%rdx)
vaesenclast %xmm7, %xmm12, %xmm12
vaesenclast %xmm7, %xmm13, %xmm13
vmovdqu 64(%rcx), %xmm0
vmovdqu 80(%rcx), %xmm1
vpxor %xmm0, %xmm12, %xmm12
vpxor %xmm1, %xmm13, %xmm13
vmovdqu %xmm12, 64(%rdx)
vmovdqu %xmm13, 80(%rdx)
vaesenclast %xmm7, %xmm14, %xmm14
vaesenclast %xmm7, %xmm15, %xmm15
vmovdqu 96(%rcx), %xmm0
vmovdqu 112(%rcx), %xmm1
vpxor %xmm0, %xmm14, %xmm14
vpxor %xmm1, %xmm15, %xmm15
vmovdqu %xmm14, 96(%rdx)
vmovdqu %xmm15, 112(%rdx)
addl $0x80, %ebx
cmpl %r13d, %ebx
jl L_AES_GCM_encrypt_avx1_ghash_128
L_AES_GCM_encrypt_avx1_end_128:
vmovdqa L_avx1_aes_gcm_bswap_mask(%rip), %xmm4
vpshufb %xmm4, %xmm8, %xmm8
vpshufb %xmm4, %xmm9, %xmm9
vpshufb %xmm4, %xmm10, %xmm10
vpshufb %xmm4, %xmm11, %xmm11
vpxor %xmm2, %xmm8, %xmm8
vpshufb %xmm4, %xmm12, %xmm12
vpshufb %xmm4, %xmm13, %xmm13
vpshufb %xmm4, %xmm14, %xmm14
vpshufb %xmm4, %xmm15, %xmm15
vmovdqu (%rsp), %xmm7
vmovdqu 16(%rsp), %xmm5
# ghash_gfmul_avx
vpshufd $0x4e, %xmm15, %xmm1
vpshufd $0x4e, %xmm7, %xmm2
vpclmulqdq $0x11, %xmm15, %xmm7, %xmm3
vpclmulqdq $0x00, %xmm15, %xmm7, %xmm0
vpxor %xmm15, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vmovdqa %xmm0, %xmm4
vmovdqa %xmm3, %xmm6
vpslldq $8, %xmm1, %xmm2
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm2, %xmm4, %xmm4
vpxor %xmm1, %xmm6, %xmm6
# ghash_gfmul_xor_avx
vpshufd $0x4e, %xmm14, %xmm1
vpshufd $0x4e, %xmm5, %xmm2
vpclmulqdq $0x11, %xmm14, %xmm5, %xmm3
vpclmulqdq $0x00, %xmm14, %xmm5, %xmm0
vpxor %xmm14, %xmm1, %xmm1
vpxor %xmm5, %xmm2, %xmm2
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vpxor %xmm0, %xmm4, %xmm4
vpxor %xmm3, %xmm6, %xmm6
vpslldq $8, %xmm1, %xmm2
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm2, %xmm4, %xmm4
vpxor %xmm1, %xmm6, %xmm6
vmovdqu 32(%rsp), %xmm7
vmovdqu 48(%rsp), %xmm5
# ghash_gfmul_xor_avx
vpshufd $0x4e, %xmm13, %xmm1
vpshufd $0x4e, %xmm7, %xmm2
vpclmulqdq $0x11, %xmm13, %xmm7, %xmm3
vpclmulqdq $0x00, %xmm13, %xmm7, %xmm0
vpxor %xmm13, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vpxor %xmm0, %xmm4, %xmm4
vpxor %xmm3, %xmm6, %xmm6
vpslldq $8, %xmm1, %xmm2
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm2, %xmm4, %xmm4
vpxor %xmm1, %xmm6, %xmm6
# ghash_gfmul_xor_avx
vpshufd $0x4e, %xmm12, %xmm1
vpshufd $0x4e, %xmm5, %xmm2
vpclmulqdq $0x11, %xmm12, %xmm5, %xmm3
vpclmulqdq $0x00, %xmm12, %xmm5, %xmm0
vpxor %xmm12, %xmm1, %xmm1
vpxor %xmm5, %xmm2, %xmm2
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vpxor %xmm0, %xmm4, %xmm4
vpxor %xmm3, %xmm6, %xmm6
vpslldq $8, %xmm1, %xmm2
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm2, %xmm4, %xmm4
vpxor %xmm1, %xmm6, %xmm6
vmovdqu 64(%rsp), %xmm7
vmovdqu 80(%rsp), %xmm5
# ghash_gfmul_xor_avx
vpshufd $0x4e, %xmm11, %xmm1
vpshufd $0x4e, %xmm7, %xmm2
vpclmulqdq $0x11, %xmm11, %xmm7, %xmm3
vpclmulqdq $0x00, %xmm11, %xmm7, %xmm0
vpxor %xmm11, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vpxor %xmm0, %xmm4, %xmm4
vpxor %xmm3, %xmm6, %xmm6
vpslldq $8, %xmm1, %xmm2
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm2, %xmm4, %xmm4
vpxor %xmm1, %xmm6, %xmm6
# ghash_gfmul_xor_avx
vpshufd $0x4e, %xmm10, %xmm1
vpshufd $0x4e, %xmm5, %xmm2
vpclmulqdq $0x11, %xmm10, %xmm5, %xmm3
vpclmulqdq $0x00, %xmm10, %xmm5, %xmm0
vpxor %xmm10, %xmm1, %xmm1
vpxor %xmm5, %xmm2, %xmm2
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vpxor %xmm0, %xmm4, %xmm4
vpxor %xmm3, %xmm6, %xmm6
vpslldq $8, %xmm1, %xmm2
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm2, %xmm4, %xmm4
vpxor %xmm1, %xmm6, %xmm6
vmovdqu 96(%rsp), %xmm7
vmovdqu 112(%rsp), %xmm5
# ghash_gfmul_xor_avx
vpshufd $0x4e, %xmm9, %xmm1
vpshufd $0x4e, %xmm7, %xmm2
vpclmulqdq $0x11, %xmm9, %xmm7, %xmm3
vpclmulqdq $0x00, %xmm9, %xmm7, %xmm0
vpxor %xmm9, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vpxor %xmm0, %xmm4, %xmm4
vpxor %xmm3, %xmm6, %xmm6
vpslldq $8, %xmm1, %xmm2
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm2, %xmm4, %xmm4
vpxor %xmm1, %xmm6, %xmm6
# ghash_gfmul_xor_avx
vpshufd $0x4e, %xmm8, %xmm1
vpshufd $0x4e, %xmm5, %xmm2
vpclmulqdq $0x11, %xmm8, %xmm5, %xmm3
vpclmulqdq $0x00, %xmm8, %xmm5, %xmm0
vpxor %xmm8, %xmm1, %xmm1
vpxor %xmm5, %xmm2, %xmm2
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vpxor %xmm0, %xmm4, %xmm4
vpxor %xmm3, %xmm6, %xmm6
vpslldq $8, %xmm1, %xmm2
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm2, %xmm4, %xmm4
vpxor %xmm1, %xmm6, %xmm6
vpslld $31, %xmm4, %xmm0
vpslld $30, %xmm4, %xmm1
vpslld $25, %xmm4, %xmm2
vpxor %xmm1, %xmm0, %xmm0
vpxor %xmm2, %xmm0, %xmm0
vmovdqa %xmm0, %xmm1
vpsrldq $4, %xmm1, %xmm1
vpslldq $12, %xmm0, %xmm0
vpxor %xmm0, %xmm4, %xmm4
vpsrld $0x01, %xmm4, %xmm2
vpsrld $2, %xmm4, %xmm3
vpsrld $7, %xmm4, %xmm0
vpxor %xmm3, %xmm2, %xmm2
vpxor %xmm0, %xmm2, %xmm2
vpxor %xmm1, %xmm2, %xmm2
vpxor %xmm4, %xmm2, %xmm2
vpxor %xmm2, %xmm6, %xmm6
vmovdqu (%rsp), %xmm5
L_AES_GCM_encrypt_avx1_done_128:
movl %r9d, %edx
cmpl %edx, %ebx
jge L_AES_GCM_encrypt_avx1_done_enc
movl %r9d, %r13d
andl $0xfffffff0, %r13d
cmpl %r13d, %ebx
jge L_AES_GCM_encrypt_avx1_last_block_done
vmovdqu 128(%rsp), %xmm9
vpshufb L_avx1_aes_gcm_bswap_epi64(%rip), %xmm9, %xmm8
vpaddd L_avx1_aes_gcm_one(%rip), %xmm9, %xmm9
vmovdqu %xmm9, 128(%rsp)
vpxor (%r15), %xmm8, %xmm8
vaesenc 16(%r15), %xmm8, %xmm8
vaesenc 32(%r15), %xmm8, %xmm8
vaesenc 48(%r15), %xmm8, %xmm8
vaesenc 64(%r15), %xmm8, %xmm8
vaesenc 80(%r15), %xmm8, %xmm8
vaesenc 96(%r15), %xmm8, %xmm8
vaesenc 112(%r15), %xmm8, %xmm8
vaesenc 128(%r15), %xmm8, %xmm8
vaesenc 144(%r15), %xmm8, %xmm8
cmpl $11, %r10d
vmovdqa 160(%r15), %xmm9
jl L_AES_GCM_encrypt_avx1_aesenc_block_last
vaesenc %xmm9, %xmm8, %xmm8
vaesenc 176(%r15), %xmm8, %xmm8
cmpl $13, %r10d
vmovdqa 192(%r15), %xmm9
jl L_AES_GCM_encrypt_avx1_aesenc_block_last
vaesenc %xmm9, %xmm8, %xmm8
vaesenc 208(%r15), %xmm8, %xmm8
vmovdqa 224(%r15), %xmm9
L_AES_GCM_encrypt_avx1_aesenc_block_last:
vaesenclast %xmm9, %xmm8, %xmm8
vmovdqu (%rdi,%rbx,1), %xmm9
vpxor %xmm9, %xmm8, %xmm8
vmovdqu %xmm8, (%rsi,%rbx,1)
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm8, %xmm8
vpxor %xmm8, %xmm6, %xmm6
addl $16, %ebx
cmpl %r13d, %ebx
jge L_AES_GCM_encrypt_avx1_last_block_ghash
L_AES_GCM_encrypt_avx1_last_block_start:
vmovdqu (%rdi,%rbx,1), %xmm13
vmovdqu 128(%rsp), %xmm9
vpshufb L_avx1_aes_gcm_bswap_epi64(%rip), %xmm9, %xmm8
vpaddd L_avx1_aes_gcm_one(%rip), %xmm9, %xmm9
vmovdqu %xmm9, 128(%rsp)
vpxor (%r15), %xmm8, %xmm8
vpclmulqdq $16, %xmm5, %xmm6, %xmm10
vaesenc 16(%r15), %xmm8, %xmm8
vaesenc 32(%r15), %xmm8, %xmm8
vpclmulqdq $0x01, %xmm5, %xmm6, %xmm11
vaesenc 48(%r15), %xmm8, %xmm8
vaesenc 64(%r15), %xmm8, %xmm8
vpclmulqdq $0x00, %xmm5, %xmm6, %xmm12
vaesenc 80(%r15), %xmm8, %xmm8
vpclmulqdq $0x11, %xmm5, %xmm6, %xmm1
vaesenc 96(%r15), %xmm8, %xmm8
vpxor %xmm11, %xmm10, %xmm10
vpslldq $8, %xmm10, %xmm2
vpsrldq $8, %xmm10, %xmm10
vaesenc 112(%r15), %xmm8, %xmm8
vpxor %xmm12, %xmm2, %xmm2
vpxor %xmm10, %xmm1, %xmm3
vmovdqa L_avx1_aes_gcm_mod2_128(%rip), %xmm0
vpclmulqdq $16, %xmm0, %xmm2, %xmm11
vaesenc 128(%r15), %xmm8, %xmm8
vpshufd $0x4e, %xmm2, %xmm10
vpxor %xmm11, %xmm10, %xmm10
vpclmulqdq $16, %xmm0, %xmm10, %xmm11
vaesenc 144(%r15), %xmm8, %xmm8
vpshufd $0x4e, %xmm10, %xmm10
vpxor %xmm11, %xmm10, %xmm10
vpxor %xmm3, %xmm10, %xmm6
cmpl $11, %r10d
vmovdqa 160(%r15), %xmm9
jl L_AES_GCM_encrypt_avx1_aesenc_gfmul_last
vaesenc %xmm9, %xmm8, %xmm8
vaesenc 176(%r15), %xmm8, %xmm8
cmpl $13, %r10d
vmovdqa 192(%r15), %xmm9
jl L_AES_GCM_encrypt_avx1_aesenc_gfmul_last
vaesenc %xmm9, %xmm8, %xmm8
vaesenc 208(%r15), %xmm8, %xmm8
vmovdqa 224(%r15), %xmm9
L_AES_GCM_encrypt_avx1_aesenc_gfmul_last:
vaesenclast %xmm9, %xmm8, %xmm8
vmovdqa %xmm13, %xmm0
vpxor %xmm0, %xmm8, %xmm8
vmovdqu %xmm8, (%rsi,%rbx,1)
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm8, %xmm8
addl $16, %ebx
vpxor %xmm8, %xmm6, %xmm6
cmpl %r13d, %ebx
jl L_AES_GCM_encrypt_avx1_last_block_start
L_AES_GCM_encrypt_avx1_last_block_ghash:
# ghash_gfmul_red_avx
vpshufd $0x4e, %xmm5, %xmm9
vpshufd $0x4e, %xmm6, %xmm10
vpclmulqdq $0x11, %xmm5, %xmm6, %xmm11
vpclmulqdq $0x00, %xmm5, %xmm6, %xmm8
vpxor %xmm5, %xmm9, %xmm9
vpxor %xmm6, %xmm10, %xmm10
vpclmulqdq $0x00, %xmm10, %xmm9, %xmm9
vpxor %xmm8, %xmm9, %xmm9
vpxor %xmm11, %xmm9, %xmm9
vpslldq $8, %xmm9, %xmm10
vpsrldq $8, %xmm9, %xmm9
vpxor %xmm10, %xmm8, %xmm8
vpxor %xmm9, %xmm11, %xmm6
vpslld $31, %xmm8, %xmm12
vpslld $30, %xmm8, %xmm13
vpslld $25, %xmm8, %xmm14
vpxor %xmm13, %xmm12, %xmm12
vpxor %xmm14, %xmm12, %xmm12
vpsrldq $4, %xmm12, %xmm13
vpslldq $12, %xmm12, %xmm12
vpxor %xmm12, %xmm8, %xmm8
vpsrld $0x01, %xmm8, %xmm14
vpsrld $2, %xmm8, %xmm10
vpsrld $7, %xmm8, %xmm9
vpxor %xmm10, %xmm14, %xmm14
vpxor %xmm9, %xmm14, %xmm14
vpxor %xmm13, %xmm14, %xmm14
vpxor %xmm8, %xmm14, %xmm14
vpxor %xmm14, %xmm6, %xmm6
L_AES_GCM_encrypt_avx1_last_block_done:
movl %r9d, %ecx
movl %ecx, %edx
andl $15, %ecx
jz L_AES_GCM_encrypt_avx1_aesenc_last15_enc_avx_done
vmovdqu 128(%rsp), %xmm4
vpshufb L_avx1_aes_gcm_bswap_epi64(%rip), %xmm4, %xmm4
vpxor (%r15), %xmm4, %xmm4
vaesenc 16(%r15), %xmm4, %xmm4
vaesenc 32(%r15), %xmm4, %xmm4
vaesenc 48(%r15), %xmm4, %xmm4
vaesenc 64(%r15), %xmm4, %xmm4
vaesenc 80(%r15), %xmm4, %xmm4
vaesenc 96(%r15), %xmm4, %xmm4
vaesenc 112(%r15), %xmm4, %xmm4
vaesenc 128(%r15), %xmm4, %xmm4
vaesenc 144(%r15), %xmm4, %xmm4
cmpl $11, %r10d
vmovdqa 160(%r15), %xmm9
jl L_AES_GCM_encrypt_avx1_aesenc_last15_enc_avx_aesenc_avx_last
vaesenc %xmm9, %xmm4, %xmm4
vaesenc 176(%r15), %xmm4, %xmm4
cmpl $13, %r10d
vmovdqa 192(%r15), %xmm9
jl L_AES_GCM_encrypt_avx1_aesenc_last15_enc_avx_aesenc_avx_last
vaesenc %xmm9, %xmm4, %xmm4
vaesenc 208(%r15), %xmm4, %xmm4
vmovdqa 224(%r15), %xmm9
L_AES_GCM_encrypt_avx1_aesenc_last15_enc_avx_aesenc_avx_last:
vaesenclast %xmm9, %xmm4, %xmm4
subq $16, %rsp
xorl %ecx, %ecx
vmovdqu %xmm4, (%rsp)
L_AES_GCM_encrypt_avx1_aesenc_last15_enc_avx_loop:
movzbl (%rdi,%rbx,1), %r13d
xorb (%rsp,%rcx,1), %r13b
movb %r13b, (%rsi,%rbx,1)
movb %r13b, (%rsp,%rcx,1)
incl %ebx
incl %ecx
cmpl %edx, %ebx
jl L_AES_GCM_encrypt_avx1_aesenc_last15_enc_avx_loop
xorq %r13, %r13
cmpl $16, %ecx
je L_AES_GCM_encrypt_avx1_aesenc_last15_enc_avx_finish_enc
L_AES_GCM_encrypt_avx1_aesenc_last15_enc_avx_byte_loop:
movb %r13b, (%rsp,%rcx,1)
incl %ecx
cmpl $16, %ecx
jl L_AES_GCM_encrypt_avx1_aesenc_last15_enc_avx_byte_loop
L_AES_GCM_encrypt_avx1_aesenc_last15_enc_avx_finish_enc:
vmovdqu (%rsp), %xmm4
addq $16, %rsp
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm4, %xmm4
vpxor %xmm4, %xmm6, %xmm6
# ghash_gfmul_red_avx
vpshufd $0x4e, %xmm5, %xmm9
vpshufd $0x4e, %xmm6, %xmm10
vpclmulqdq $0x11, %xmm5, %xmm6, %xmm11
vpclmulqdq $0x00, %xmm5, %xmm6, %xmm8
vpxor %xmm5, %xmm9, %xmm9
vpxor %xmm6, %xmm10, %xmm10
vpclmulqdq $0x00, %xmm10, %xmm9, %xmm9
vpxor %xmm8, %xmm9, %xmm9
vpxor %xmm11, %xmm9, %xmm9
vpslldq $8, %xmm9, %xmm10
vpsrldq $8, %xmm9, %xmm9
vpxor %xmm10, %xmm8, %xmm8
vpxor %xmm9, %xmm11, %xmm6
vpslld $31, %xmm8, %xmm12
vpslld $30, %xmm8, %xmm13
vpslld $25, %xmm8, %xmm14
vpxor %xmm13, %xmm12, %xmm12
vpxor %xmm14, %xmm12, %xmm12
vpsrldq $4, %xmm12, %xmm13
vpslldq $12, %xmm12, %xmm12
vpxor %xmm12, %xmm8, %xmm8
vpsrld $0x01, %xmm8, %xmm14
vpsrld $2, %xmm8, %xmm10
vpsrld $7, %xmm8, %xmm9
vpxor %xmm10, %xmm14, %xmm14
vpxor %xmm9, %xmm14, %xmm14
vpxor %xmm13, %xmm14, %xmm14
vpxor %xmm8, %xmm14, %xmm14
vpxor %xmm14, %xmm6, %xmm6
L_AES_GCM_encrypt_avx1_aesenc_last15_enc_avx_done:
L_AES_GCM_encrypt_avx1_done_enc:
movl %r9d, %edx
movl %r11d, %ecx
shlq $3, %rdx
shlq $3, %rcx
vmovq %rdx, %xmm0
vmovq %rcx, %xmm1
vpunpcklqdq %xmm1, %xmm0, %xmm0
vpxor %xmm0, %xmm6, %xmm6
# ghash_gfmul_red_avx
vpshufd $0x4e, %xmm5, %xmm9
vpshufd $0x4e, %xmm6, %xmm10
vpclmulqdq $0x11, %xmm5, %xmm6, %xmm11
vpclmulqdq $0x00, %xmm5, %xmm6, %xmm8
vpxor %xmm5, %xmm9, %xmm9
vpxor %xmm6, %xmm10, %xmm10
vpclmulqdq $0x00, %xmm10, %xmm9, %xmm9
vpxor %xmm8, %xmm9, %xmm9
vpxor %xmm11, %xmm9, %xmm9
vpslldq $8, %xmm9, %xmm10
vpsrldq $8, %xmm9, %xmm9
vpxor %xmm10, %xmm8, %xmm8
vpxor %xmm9, %xmm11, %xmm6
vpslld $31, %xmm8, %xmm12
vpslld $30, %xmm8, %xmm13
vpslld $25, %xmm8, %xmm14
vpxor %xmm13, %xmm12, %xmm12
vpxor %xmm14, %xmm12, %xmm12
vpsrldq $4, %xmm12, %xmm13
vpslldq $12, %xmm12, %xmm12
vpxor %xmm12, %xmm8, %xmm8
vpsrld $0x01, %xmm8, %xmm14
vpsrld $2, %xmm8, %xmm10
vpsrld $7, %xmm8, %xmm9
vpxor %xmm10, %xmm14, %xmm14
vpxor %xmm9, %xmm14, %xmm14
vpxor %xmm13, %xmm14, %xmm14
vpxor %xmm8, %xmm14, %xmm14
vpxor %xmm14, %xmm6, %xmm6
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm6, %xmm6
vmovdqu 144(%rsp), %xmm0
vpxor %xmm6, %xmm0, %xmm0
cmpl $16, %r14d
je L_AES_GCM_encrypt_avx1_store_tag_16
xorq %rcx, %rcx
vmovdqu %xmm0, (%rsp)
L_AES_GCM_encrypt_avx1_store_tag_loop:
movzbl (%rsp,%rcx,1), %r13d
movb %r13b, (%r8,%rcx,1)
incl %ecx
cmpl %r14d, %ecx
jne L_AES_GCM_encrypt_avx1_store_tag_loop
jmp L_AES_GCM_encrypt_avx1_store_tag_done
L_AES_GCM_encrypt_avx1_store_tag_16:
vmovdqu %xmm0, (%r8)
L_AES_GCM_encrypt_avx1_store_tag_done:
vzeroupper
addq $0xa0, %rsp
popq %r15
popq %r14
popq %rbx
popq %r12
popq %r13
repz retq
#ifndef __APPLE__
.size AES_GCM_encrypt_avx1,.-AES_GCM_encrypt_avx1
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl AES_GCM_decrypt_avx1
.type AES_GCM_decrypt_avx1,@function
.align 16
AES_GCM_decrypt_avx1:
#else
.section __TEXT,__text
.globl _AES_GCM_decrypt_avx1
.p2align 4
_AES_GCM_decrypt_avx1:
#endif /* __APPLE__ */
pushq %r13
pushq %r12
pushq %rbx
pushq %r14
pushq %r15
pushq %rbp
movq %rdx, %r12
movq %rcx, %rax
movl 56(%rsp), %r11d
movl 64(%rsp), %ebx
movl 72(%rsp), %r14d
movq 80(%rsp), %r15
movl 88(%rsp), %r10d
movq 96(%rsp), %rbp
subq $0xa8, %rsp
vpxor %xmm4, %xmm4, %xmm4
vpxor %xmm6, %xmm6, %xmm6
cmpl $12, %ebx
movl %ebx, %edx
jne L_AES_GCM_decrypt_avx1_iv_not_12
# # Calculate values when IV is 12 bytes
# Set counter based on IV
movl $0x1000000, %ecx
vmovq (%rax), %xmm4
vpinsrd $2, 8(%rax), %xmm4, %xmm4
vpinsrd $3, %ecx, %xmm4, %xmm4
# H = Encrypt X(=0) and T = Encrypt counter
vmovdqa (%r15), %xmm5
vpxor %xmm5, %xmm4, %xmm1
vmovdqa 16(%r15), %xmm7
vaesenc %xmm7, %xmm5, %xmm5
vaesenc %xmm7, %xmm1, %xmm1
vmovdqa 32(%r15), %xmm7
vaesenc %xmm7, %xmm5, %xmm5
vaesenc %xmm7, %xmm1, %xmm1
vmovdqa 48(%r15), %xmm7
vaesenc %xmm7, %xmm5, %xmm5
vaesenc %xmm7, %xmm1, %xmm1
vmovdqa 64(%r15), %xmm7
vaesenc %xmm7, %xmm5, %xmm5
vaesenc %xmm7, %xmm1, %xmm1
vmovdqa 80(%r15), %xmm7
vaesenc %xmm7, %xmm5, %xmm5
vaesenc %xmm7, %xmm1, %xmm1
vmovdqa 96(%r15), %xmm7
vaesenc %xmm7, %xmm5, %xmm5
vaesenc %xmm7, %xmm1, %xmm1
vmovdqa 112(%r15), %xmm7
vaesenc %xmm7, %xmm5, %xmm5
vaesenc %xmm7, %xmm1, %xmm1
vmovdqa 128(%r15), %xmm7
vaesenc %xmm7, %xmm5, %xmm5
vaesenc %xmm7, %xmm1, %xmm1
vmovdqa 144(%r15), %xmm7
vaesenc %xmm7, %xmm5, %xmm5
vaesenc %xmm7, %xmm1, %xmm1
cmpl $11, %r10d
vmovdqa 160(%r15), %xmm7
jl L_AES_GCM_decrypt_avx1_calc_iv_12_last
vaesenc %xmm7, %xmm5, %xmm5
vaesenc %xmm7, %xmm1, %xmm1
vmovdqa 176(%r15), %xmm7
vaesenc %xmm7, %xmm5, %xmm5
vaesenc %xmm7, %xmm1, %xmm1
cmpl $13, %r10d
vmovdqa 192(%r15), %xmm7
jl L_AES_GCM_decrypt_avx1_calc_iv_12_last
vaesenc %xmm7, %xmm5, %xmm5
vaesenc %xmm7, %xmm1, %xmm1
vmovdqa 208(%r15), %xmm7
vaesenc %xmm7, %xmm5, %xmm5
vaesenc %xmm7, %xmm1, %xmm1
vmovdqa 224(%r15), %xmm7
L_AES_GCM_decrypt_avx1_calc_iv_12_last:
vaesenclast %xmm7, %xmm5, %xmm5
vaesenclast %xmm7, %xmm1, %xmm1
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm5, %xmm5
vmovdqu %xmm1, 144(%rsp)
jmp L_AES_GCM_decrypt_avx1_iv_done
L_AES_GCM_decrypt_avx1_iv_not_12:
# Calculate values when IV is not 12 bytes
# H = Encrypt X(=0)
vmovdqa (%r15), %xmm5
vaesenc 16(%r15), %xmm5, %xmm5
vaesenc 32(%r15), %xmm5, %xmm5
vaesenc 48(%r15), %xmm5, %xmm5
vaesenc 64(%r15), %xmm5, %xmm5
vaesenc 80(%r15), %xmm5, %xmm5
vaesenc 96(%r15), %xmm5, %xmm5
vaesenc 112(%r15), %xmm5, %xmm5
vaesenc 128(%r15), %xmm5, %xmm5
vaesenc 144(%r15), %xmm5, %xmm5
cmpl $11, %r10d
vmovdqa 160(%r15), %xmm9
jl L_AES_GCM_decrypt_avx1_calc_iv_1_aesenc_avx_last
vaesenc %xmm9, %xmm5, %xmm5
vaesenc 176(%r15), %xmm5, %xmm5
cmpl $13, %r10d
vmovdqa 192(%r15), %xmm9
jl L_AES_GCM_decrypt_avx1_calc_iv_1_aesenc_avx_last
vaesenc %xmm9, %xmm5, %xmm5
vaesenc 208(%r15), %xmm5, %xmm5
vmovdqa 224(%r15), %xmm9
L_AES_GCM_decrypt_avx1_calc_iv_1_aesenc_avx_last:
vaesenclast %xmm9, %xmm5, %xmm5
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm5, %xmm5
# Calc counter
# Initialization vector
cmpl $0x00, %edx
movq $0x00, %rcx
je L_AES_GCM_decrypt_avx1_calc_iv_done
cmpl $16, %edx
jl L_AES_GCM_decrypt_avx1_calc_iv_lt16
andl $0xfffffff0, %edx
L_AES_GCM_decrypt_avx1_calc_iv_16_loop:
vmovdqu (%rax,%rcx,1), %xmm8
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm8, %xmm8
vpxor %xmm8, %xmm4, %xmm4
# ghash_gfmul_avx
vpshufd $0x4e, %xmm4, %xmm1
vpshufd $0x4e, %xmm5, %xmm2
vpclmulqdq $0x11, %xmm4, %xmm5, %xmm3
vpclmulqdq $0x00, %xmm4, %xmm5, %xmm0
vpxor %xmm4, %xmm1, %xmm1
vpxor %xmm5, %xmm2, %xmm2
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vmovdqa %xmm0, %xmm7
vmovdqa %xmm3, %xmm4
vpslldq $8, %xmm1, %xmm2
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm2, %xmm7, %xmm7
vpxor %xmm1, %xmm4, %xmm4
vpsrld $31, %xmm7, %xmm0
vpsrld $31, %xmm4, %xmm1
vpslld $0x01, %xmm7, %xmm7
vpslld $0x01, %xmm4, %xmm4
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm4, %xmm4
vpor %xmm0, %xmm7, %xmm7
vpor %xmm1, %xmm4, %xmm4
vpslld $31, %xmm7, %xmm0
vpslld $30, %xmm7, %xmm1
vpslld $25, %xmm7, %xmm2
vpxor %xmm1, %xmm0, %xmm0
vpxor %xmm2, %xmm0, %xmm0
vmovdqa %xmm0, %xmm1
vpsrldq $4, %xmm1, %xmm1
vpslldq $12, %xmm0, %xmm0
vpxor %xmm0, %xmm7, %xmm7
vpsrld $0x01, %xmm7, %xmm2
vpsrld $2, %xmm7, %xmm3
vpsrld $7, %xmm7, %xmm0
vpxor %xmm3, %xmm2, %xmm2
vpxor %xmm0, %xmm2, %xmm2
vpxor %xmm1, %xmm2, %xmm2
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm2, %xmm4, %xmm4
addl $16, %ecx
cmpl %edx, %ecx
jl L_AES_GCM_decrypt_avx1_calc_iv_16_loop
movl %ebx, %edx
cmpl %edx, %ecx
je L_AES_GCM_decrypt_avx1_calc_iv_done
L_AES_GCM_decrypt_avx1_calc_iv_lt16:
subq $16, %rsp
vpxor %xmm8, %xmm8, %xmm8
xorl %ebx, %ebx
vmovdqu %xmm8, (%rsp)
L_AES_GCM_decrypt_avx1_calc_iv_loop:
movzbl (%rax,%rcx,1), %r13d
movb %r13b, (%rsp,%rbx,1)
incl %ecx
incl %ebx
cmpl %edx, %ecx
jl L_AES_GCM_decrypt_avx1_calc_iv_loop
vmovdqu (%rsp), %xmm8
addq $16, %rsp
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm8, %xmm8
vpxor %xmm8, %xmm4, %xmm4
# ghash_gfmul_avx
vpshufd $0x4e, %xmm4, %xmm1
vpshufd $0x4e, %xmm5, %xmm2
vpclmulqdq $0x11, %xmm4, %xmm5, %xmm3
vpclmulqdq $0x00, %xmm4, %xmm5, %xmm0
vpxor %xmm4, %xmm1, %xmm1
vpxor %xmm5, %xmm2, %xmm2
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vmovdqa %xmm0, %xmm7
vmovdqa %xmm3, %xmm4
vpslldq $8, %xmm1, %xmm2
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm2, %xmm7, %xmm7
vpxor %xmm1, %xmm4, %xmm4
vpsrld $31, %xmm7, %xmm0
vpsrld $31, %xmm4, %xmm1
vpslld $0x01, %xmm7, %xmm7
vpslld $0x01, %xmm4, %xmm4
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm4, %xmm4
vpor %xmm0, %xmm7, %xmm7
vpor %xmm1, %xmm4, %xmm4
vpslld $31, %xmm7, %xmm0
vpslld $30, %xmm7, %xmm1
vpslld $25, %xmm7, %xmm2
vpxor %xmm1, %xmm0, %xmm0
vpxor %xmm2, %xmm0, %xmm0
vmovdqa %xmm0, %xmm1
vpsrldq $4, %xmm1, %xmm1
vpslldq $12, %xmm0, %xmm0
vpxor %xmm0, %xmm7, %xmm7
vpsrld $0x01, %xmm7, %xmm2
vpsrld $2, %xmm7, %xmm3
vpsrld $7, %xmm7, %xmm0
vpxor %xmm3, %xmm2, %xmm2
vpxor %xmm0, %xmm2, %xmm2
vpxor %xmm1, %xmm2, %xmm2
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm2, %xmm4, %xmm4
L_AES_GCM_decrypt_avx1_calc_iv_done:
# T = Encrypt counter
vpxor %xmm0, %xmm0, %xmm0
shll $3, %edx
vmovq %rdx, %xmm0
vpxor %xmm0, %xmm4, %xmm4
# ghash_gfmul_avx
vpshufd $0x4e, %xmm4, %xmm1
vpshufd $0x4e, %xmm5, %xmm2
vpclmulqdq $0x11, %xmm4, %xmm5, %xmm3
vpclmulqdq $0x00, %xmm4, %xmm5, %xmm0
vpxor %xmm4, %xmm1, %xmm1
vpxor %xmm5, %xmm2, %xmm2
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vmovdqa %xmm0, %xmm7
vmovdqa %xmm3, %xmm4
vpslldq $8, %xmm1, %xmm2
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm2, %xmm7, %xmm7
vpxor %xmm1, %xmm4, %xmm4
vpsrld $31, %xmm7, %xmm0
vpsrld $31, %xmm4, %xmm1
vpslld $0x01, %xmm7, %xmm7
vpslld $0x01, %xmm4, %xmm4
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm4, %xmm4
vpor %xmm0, %xmm7, %xmm7
vpor %xmm1, %xmm4, %xmm4
vpslld $31, %xmm7, %xmm0
vpslld $30, %xmm7, %xmm1
vpslld $25, %xmm7, %xmm2
vpxor %xmm1, %xmm0, %xmm0
vpxor %xmm2, %xmm0, %xmm0
vmovdqa %xmm0, %xmm1
vpsrldq $4, %xmm1, %xmm1
vpslldq $12, %xmm0, %xmm0
vpxor %xmm0, %xmm7, %xmm7
vpsrld $0x01, %xmm7, %xmm2
vpsrld $2, %xmm7, %xmm3
vpsrld $7, %xmm7, %xmm0
vpxor %xmm3, %xmm2, %xmm2
vpxor %xmm0, %xmm2, %xmm2
vpxor %xmm1, %xmm2, %xmm2
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm2, %xmm4, %xmm4
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm4, %xmm4
# Encrypt counter
vmovdqa (%r15), %xmm8
vpxor %xmm4, %xmm8, %xmm8
vaesenc 16(%r15), %xmm8, %xmm8
vaesenc 32(%r15), %xmm8, %xmm8
vaesenc 48(%r15), %xmm8, %xmm8
vaesenc 64(%r15), %xmm8, %xmm8
vaesenc 80(%r15), %xmm8, %xmm8
vaesenc 96(%r15), %xmm8, %xmm8
vaesenc 112(%r15), %xmm8, %xmm8
vaesenc 128(%r15), %xmm8, %xmm8
vaesenc 144(%r15), %xmm8, %xmm8
cmpl $11, %r10d
vmovdqa 160(%r15), %xmm9
jl L_AES_GCM_decrypt_avx1_calc_iv_2_aesenc_avx_last
vaesenc %xmm9, %xmm8, %xmm8
vaesenc 176(%r15), %xmm8, %xmm8
cmpl $13, %r10d
vmovdqa 192(%r15), %xmm9
jl L_AES_GCM_decrypt_avx1_calc_iv_2_aesenc_avx_last
vaesenc %xmm9, %xmm8, %xmm8
vaesenc 208(%r15), %xmm8, %xmm8
vmovdqa 224(%r15), %xmm9
L_AES_GCM_decrypt_avx1_calc_iv_2_aesenc_avx_last:
vaesenclast %xmm9, %xmm8, %xmm8
vmovdqu %xmm8, 144(%rsp)
L_AES_GCM_decrypt_avx1_iv_done:
# Additional authentication data
movl %r11d, %edx
cmpl $0x00, %edx
je L_AES_GCM_decrypt_avx1_calc_aad_done
xorl %ecx, %ecx
cmpl $16, %edx
jl L_AES_GCM_decrypt_avx1_calc_aad_lt16
andl $0xfffffff0, %edx
L_AES_GCM_decrypt_avx1_calc_aad_16_loop:
vmovdqu (%r12,%rcx,1), %xmm8
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm8, %xmm8
vpxor %xmm8, %xmm6, %xmm6
# ghash_gfmul_avx
vpshufd $0x4e, %xmm6, %xmm1
vpshufd $0x4e, %xmm5, %xmm2
vpclmulqdq $0x11, %xmm6, %xmm5, %xmm3
vpclmulqdq $0x00, %xmm6, %xmm5, %xmm0
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm5, %xmm2, %xmm2
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vmovdqa %xmm0, %xmm7
vmovdqa %xmm3, %xmm6
vpslldq $8, %xmm1, %xmm2
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm2, %xmm7, %xmm7
vpxor %xmm1, %xmm6, %xmm6
vpsrld $31, %xmm7, %xmm0
vpsrld $31, %xmm6, %xmm1
vpslld $0x01, %xmm7, %xmm7
vpslld $0x01, %xmm6, %xmm6
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm6, %xmm6
vpor %xmm0, %xmm7, %xmm7
vpor %xmm1, %xmm6, %xmm6
vpslld $31, %xmm7, %xmm0
vpslld $30, %xmm7, %xmm1
vpslld $25, %xmm7, %xmm2
vpxor %xmm1, %xmm0, %xmm0
vpxor %xmm2, %xmm0, %xmm0
vmovdqa %xmm0, %xmm1
vpsrldq $4, %xmm1, %xmm1
vpslldq $12, %xmm0, %xmm0
vpxor %xmm0, %xmm7, %xmm7
vpsrld $0x01, %xmm7, %xmm2
vpsrld $2, %xmm7, %xmm3
vpsrld $7, %xmm7, %xmm0
vpxor %xmm3, %xmm2, %xmm2
vpxor %xmm0, %xmm2, %xmm2
vpxor %xmm1, %xmm2, %xmm2
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm2, %xmm6, %xmm6
addl $16, %ecx
cmpl %edx, %ecx
jl L_AES_GCM_decrypt_avx1_calc_aad_16_loop
movl %r11d, %edx
cmpl %edx, %ecx
je L_AES_GCM_decrypt_avx1_calc_aad_done
L_AES_GCM_decrypt_avx1_calc_aad_lt16:
subq $16, %rsp
vpxor %xmm8, %xmm8, %xmm8
xorl %ebx, %ebx
vmovdqu %xmm8, (%rsp)
L_AES_GCM_decrypt_avx1_calc_aad_loop:
movzbl (%r12,%rcx,1), %r13d
movb %r13b, (%rsp,%rbx,1)
incl %ecx
incl %ebx
cmpl %edx, %ecx
jl L_AES_GCM_decrypt_avx1_calc_aad_loop
vmovdqu (%rsp), %xmm8
addq $16, %rsp
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm8, %xmm8
vpxor %xmm8, %xmm6, %xmm6
# ghash_gfmul_avx
vpshufd $0x4e, %xmm6, %xmm1
vpshufd $0x4e, %xmm5, %xmm2
vpclmulqdq $0x11, %xmm6, %xmm5, %xmm3
vpclmulqdq $0x00, %xmm6, %xmm5, %xmm0
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm5, %xmm2, %xmm2
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vmovdqa %xmm0, %xmm7
vmovdqa %xmm3, %xmm6
vpslldq $8, %xmm1, %xmm2
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm2, %xmm7, %xmm7
vpxor %xmm1, %xmm6, %xmm6
vpsrld $31, %xmm7, %xmm0
vpsrld $31, %xmm6, %xmm1
vpslld $0x01, %xmm7, %xmm7
vpslld $0x01, %xmm6, %xmm6
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm6, %xmm6
vpor %xmm0, %xmm7, %xmm7
vpor %xmm1, %xmm6, %xmm6
vpslld $31, %xmm7, %xmm0
vpslld $30, %xmm7, %xmm1
vpslld $25, %xmm7, %xmm2
vpxor %xmm1, %xmm0, %xmm0
vpxor %xmm2, %xmm0, %xmm0
vmovdqa %xmm0, %xmm1
vpsrldq $4, %xmm1, %xmm1
vpslldq $12, %xmm0, %xmm0
vpxor %xmm0, %xmm7, %xmm7
vpsrld $0x01, %xmm7, %xmm2
vpsrld $2, %xmm7, %xmm3
vpsrld $7, %xmm7, %xmm0
vpxor %xmm3, %xmm2, %xmm2
vpxor %xmm0, %xmm2, %xmm2
vpxor %xmm1, %xmm2, %xmm2
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm2, %xmm6, %xmm6
L_AES_GCM_decrypt_avx1_calc_aad_done:
# Calculate counter and H
vpsrlq $63, %xmm5, %xmm9
vpsllq $0x01, %xmm5, %xmm8
vpslldq $8, %xmm9, %xmm9
vpor %xmm9, %xmm8, %xmm8
vpshufd $0xff, %xmm5, %xmm5
vpsrad $31, %xmm5, %xmm5
vpshufb L_avx1_aes_gcm_bswap_epi64(%rip), %xmm4, %xmm4
vpand L_avx1_aes_gcm_mod2_128(%rip), %xmm5, %xmm5
vpaddd L_avx1_aes_gcm_one(%rip), %xmm4, %xmm4
vpxor %xmm8, %xmm5, %xmm5
vmovdqu %xmm4, 128(%rsp)
xorl %ebx, %ebx
cmpl $0x80, %r9d
movl %r9d, %r13d
jl L_AES_GCM_decrypt_avx1_done_128
andl $0xffffff80, %r13d
vmovdqa %xmm6, %xmm2
# H ^ 1
vmovdqu %xmm5, (%rsp)
# H ^ 2
vpclmulqdq $0x00, %xmm5, %xmm5, %xmm8
vpclmulqdq $0x11, %xmm5, %xmm5, %xmm0
vpslld $31, %xmm8, %xmm12
vpslld $30, %xmm8, %xmm13
vpslld $25, %xmm8, %xmm14
vpxor %xmm13, %xmm12, %xmm12
vpxor %xmm14, %xmm12, %xmm12
vpsrldq $4, %xmm12, %xmm13
vpslldq $12, %xmm12, %xmm12
vpxor %xmm12, %xmm8, %xmm8
vpsrld $0x01, %xmm8, %xmm14
vpsrld $2, %xmm8, %xmm10
vpsrld $7, %xmm8, %xmm9
vpxor %xmm10, %xmm14, %xmm14
vpxor %xmm9, %xmm14, %xmm14
vpxor %xmm13, %xmm14, %xmm14
vpxor %xmm8, %xmm14, %xmm14
vpxor %xmm14, %xmm0, %xmm0
vmovdqu %xmm0, 16(%rsp)
# H ^ 3
# ghash_gfmul_red_avx
vpshufd $0x4e, %xmm5, %xmm9
vpshufd $0x4e, %xmm0, %xmm10
vpclmulqdq $0x11, %xmm5, %xmm0, %xmm11
vpclmulqdq $0x00, %xmm5, %xmm0, %xmm8
vpxor %xmm5, %xmm9, %xmm9
vpxor %xmm0, %xmm10, %xmm10
vpclmulqdq $0x00, %xmm10, %xmm9, %xmm9
vpxor %xmm8, %xmm9, %xmm9
vpxor %xmm11, %xmm9, %xmm9
vpslldq $8, %xmm9, %xmm10
vpsrldq $8, %xmm9, %xmm9
vpxor %xmm10, %xmm8, %xmm8
vpxor %xmm9, %xmm11, %xmm1
vpslld $31, %xmm8, %xmm12
vpslld $30, %xmm8, %xmm13
vpslld $25, %xmm8, %xmm14
vpxor %xmm13, %xmm12, %xmm12
vpxor %xmm14, %xmm12, %xmm12
vpsrldq $4, %xmm12, %xmm13
vpslldq $12, %xmm12, %xmm12
vpxor %xmm12, %xmm8, %xmm8
vpsrld $0x01, %xmm8, %xmm14
vpsrld $2, %xmm8, %xmm10
vpsrld $7, %xmm8, %xmm9
vpxor %xmm10, %xmm14, %xmm14
vpxor %xmm9, %xmm14, %xmm14
vpxor %xmm13, %xmm14, %xmm14
vpxor %xmm8, %xmm14, %xmm14
vpxor %xmm14, %xmm1, %xmm1
vmovdqu %xmm1, 32(%rsp)
# H ^ 4
vpclmulqdq $0x00, %xmm0, %xmm0, %xmm8
vpclmulqdq $0x11, %xmm0, %xmm0, %xmm3
vpslld $31, %xmm8, %xmm12
vpslld $30, %xmm8, %xmm13
vpslld $25, %xmm8, %xmm14
vpxor %xmm13, %xmm12, %xmm12
vpxor %xmm14, %xmm12, %xmm12
vpsrldq $4, %xmm12, %xmm13
vpslldq $12, %xmm12, %xmm12
vpxor %xmm12, %xmm8, %xmm8
vpsrld $0x01, %xmm8, %xmm14
vpsrld $2, %xmm8, %xmm10
vpsrld $7, %xmm8, %xmm9
vpxor %xmm10, %xmm14, %xmm14
vpxor %xmm9, %xmm14, %xmm14
vpxor %xmm13, %xmm14, %xmm14
vpxor %xmm8, %xmm14, %xmm14
vpxor %xmm14, %xmm3, %xmm3
vmovdqu %xmm3, 48(%rsp)
# H ^ 5
# ghash_gfmul_red_avx
vpshufd $0x4e, %xmm0, %xmm9
vpshufd $0x4e, %xmm1, %xmm10
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm11
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm8
vpxor %xmm0, %xmm9, %xmm9
vpxor %xmm1, %xmm10, %xmm10
vpclmulqdq $0x00, %xmm10, %xmm9, %xmm9
vpxor %xmm8, %xmm9, %xmm9
vpxor %xmm11, %xmm9, %xmm9
vpslldq $8, %xmm9, %xmm10
vpsrldq $8, %xmm9, %xmm9
vpxor %xmm10, %xmm8, %xmm8
vpxor %xmm9, %xmm11, %xmm7
vpslld $31, %xmm8, %xmm12
vpslld $30, %xmm8, %xmm13
vpslld $25, %xmm8, %xmm14
vpxor %xmm13, %xmm12, %xmm12
vpxor %xmm14, %xmm12, %xmm12
vpsrldq $4, %xmm12, %xmm13
vpslldq $12, %xmm12, %xmm12
vpxor %xmm12, %xmm8, %xmm8
vpsrld $0x01, %xmm8, %xmm14
vpsrld $2, %xmm8, %xmm10
vpsrld $7, %xmm8, %xmm9
vpxor %xmm10, %xmm14, %xmm14
vpxor %xmm9, %xmm14, %xmm14
vpxor %xmm13, %xmm14, %xmm14
vpxor %xmm8, %xmm14, %xmm14
vpxor %xmm14, %xmm7, %xmm7
vmovdqu %xmm7, 64(%rsp)
# H ^ 6
vpclmulqdq $0x00, %xmm1, %xmm1, %xmm8
vpclmulqdq $0x11, %xmm1, %xmm1, %xmm7
vpslld $31, %xmm8, %xmm12
vpslld $30, %xmm8, %xmm13
vpslld $25, %xmm8, %xmm14
vpxor %xmm13, %xmm12, %xmm12
vpxor %xmm14, %xmm12, %xmm12
vpsrldq $4, %xmm12, %xmm13
vpslldq $12, %xmm12, %xmm12
vpxor %xmm12, %xmm8, %xmm8
vpsrld $0x01, %xmm8, %xmm14
vpsrld $2, %xmm8, %xmm10
vpsrld $7, %xmm8, %xmm9
vpxor %xmm10, %xmm14, %xmm14
vpxor %xmm9, %xmm14, %xmm14
vpxor %xmm13, %xmm14, %xmm14
vpxor %xmm8, %xmm14, %xmm14
vpxor %xmm14, %xmm7, %xmm7
vmovdqu %xmm7, 80(%rsp)
# H ^ 7
# ghash_gfmul_red_avx
vpshufd $0x4e, %xmm1, %xmm9
vpshufd $0x4e, %xmm3, %xmm10
vpclmulqdq $0x11, %xmm1, %xmm3, %xmm11
vpclmulqdq $0x00, %xmm1, %xmm3, %xmm8
vpxor %xmm1, %xmm9, %xmm9
vpxor %xmm3, %xmm10, %xmm10
vpclmulqdq $0x00, %xmm10, %xmm9, %xmm9
vpxor %xmm8, %xmm9, %xmm9
vpxor %xmm11, %xmm9, %xmm9
vpslldq $8, %xmm9, %xmm10
vpsrldq $8, %xmm9, %xmm9
vpxor %xmm10, %xmm8, %xmm8
vpxor %xmm9, %xmm11, %xmm7
vpslld $31, %xmm8, %xmm12
vpslld $30, %xmm8, %xmm13
vpslld $25, %xmm8, %xmm14
vpxor %xmm13, %xmm12, %xmm12
vpxor %xmm14, %xmm12, %xmm12
vpsrldq $4, %xmm12, %xmm13
vpslldq $12, %xmm12, %xmm12
vpxor %xmm12, %xmm8, %xmm8
vpsrld $0x01, %xmm8, %xmm14
vpsrld $2, %xmm8, %xmm10
vpsrld $7, %xmm8, %xmm9
vpxor %xmm10, %xmm14, %xmm14
vpxor %xmm9, %xmm14, %xmm14
vpxor %xmm13, %xmm14, %xmm14
vpxor %xmm8, %xmm14, %xmm14
vpxor %xmm14, %xmm7, %xmm7
vmovdqu %xmm7, 96(%rsp)
# H ^ 8
vpclmulqdq $0x00, %xmm3, %xmm3, %xmm8
vpclmulqdq $0x11, %xmm3, %xmm3, %xmm7
vpslld $31, %xmm8, %xmm12
vpslld $30, %xmm8, %xmm13
vpslld $25, %xmm8, %xmm14
vpxor %xmm13, %xmm12, %xmm12
vpxor %xmm14, %xmm12, %xmm12
vpsrldq $4, %xmm12, %xmm13
vpslldq $12, %xmm12, %xmm12
vpxor %xmm12, %xmm8, %xmm8
vpsrld $0x01, %xmm8, %xmm14
vpsrld $2, %xmm8, %xmm10
vpsrld $7, %xmm8, %xmm9
vpxor %xmm10, %xmm14, %xmm14
vpxor %xmm9, %xmm14, %xmm14
vpxor %xmm13, %xmm14, %xmm14
vpxor %xmm8, %xmm14, %xmm14
vpxor %xmm14, %xmm7, %xmm7
vmovdqu %xmm7, 112(%rsp)
L_AES_GCM_decrypt_avx1_ghash_128:
leaq (%rdi,%rbx,1), %rcx
leaq (%rsi,%rbx,1), %rdx
vmovdqu 128(%rsp), %xmm0
vmovdqa L_avx1_aes_gcm_bswap_epi64(%rip), %xmm1
vpshufb %xmm1, %xmm0, %xmm8
vpaddd L_avx1_aes_gcm_one(%rip), %xmm0, %xmm9
vpshufb %xmm1, %xmm9, %xmm9
vpaddd L_avx1_aes_gcm_two(%rip), %xmm0, %xmm10
vpshufb %xmm1, %xmm10, %xmm10
vpaddd L_avx1_aes_gcm_three(%rip), %xmm0, %xmm11
vpshufb %xmm1, %xmm11, %xmm11
vpaddd L_avx1_aes_gcm_four(%rip), %xmm0, %xmm12
vpshufb %xmm1, %xmm12, %xmm12
vpaddd L_avx1_aes_gcm_five(%rip), %xmm0, %xmm13
vpshufb %xmm1, %xmm13, %xmm13
vpaddd L_avx1_aes_gcm_six(%rip), %xmm0, %xmm14
vpshufb %xmm1, %xmm14, %xmm14
vpaddd L_avx1_aes_gcm_seven(%rip), %xmm0, %xmm15
vpshufb %xmm1, %xmm15, %xmm15
vpaddd L_avx1_aes_gcm_eight(%rip), %xmm0, %xmm0
vmovdqa (%r15), %xmm7
vmovdqu %xmm0, 128(%rsp)
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm7, %xmm9, %xmm9
vpxor %xmm7, %xmm10, %xmm10
vpxor %xmm7, %xmm11, %xmm11
vpxor %xmm7, %xmm12, %xmm12
vpxor %xmm7, %xmm13, %xmm13
vpxor %xmm7, %xmm14, %xmm14
vpxor %xmm7, %xmm15, %xmm15
vmovdqu 112(%rsp), %xmm7
vmovdqu (%rcx), %xmm0
vaesenc 16(%r15), %xmm8, %xmm8
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vpxor %xmm2, %xmm0, %xmm0
vpshufd $0x4e, %xmm7, %xmm1
vpshufd $0x4e, %xmm0, %xmm5
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm0, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm7, %xmm0, %xmm3
vaesenc 16(%r15), %xmm9, %xmm9
vaesenc 16(%r15), %xmm10, %xmm10
vpclmulqdq $0x00, %xmm7, %xmm0, %xmm2
vaesenc 16(%r15), %xmm11, %xmm11
vaesenc 16(%r15), %xmm12, %xmm12
vpclmulqdq $0x00, %xmm5, %xmm1, %xmm1
vaesenc 16(%r15), %xmm13, %xmm13
vaesenc 16(%r15), %xmm14, %xmm14
vaesenc 16(%r15), %xmm15, %xmm15
vpxor %xmm2, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vmovdqu 96(%rsp), %xmm7
vmovdqu 16(%rcx), %xmm0
vpshufd $0x4e, %xmm7, %xmm4
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vaesenc 32(%r15), %xmm8, %xmm8
vpxor %xmm7, %xmm4, %xmm4
vpshufd $0x4e, %xmm0, %xmm5
vpxor %xmm0, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm7, %xmm0, %xmm6
vaesenc 32(%r15), %xmm9, %xmm9
vaesenc 32(%r15), %xmm10, %xmm10
vpclmulqdq $0x00, %xmm7, %xmm0, %xmm7
vaesenc 32(%r15), %xmm11, %xmm11
vaesenc 32(%r15), %xmm12, %xmm12
vpclmulqdq $0x00, %xmm5, %xmm4, %xmm4
vaesenc 32(%r15), %xmm13, %xmm13
vaesenc 32(%r15), %xmm14, %xmm14
vaesenc 32(%r15), %xmm15, %xmm15
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm4, %xmm1, %xmm1
vmovdqu 80(%rsp), %xmm7
vmovdqu 32(%rcx), %xmm0
vpshufd $0x4e, %xmm7, %xmm4
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vaesenc 48(%r15), %xmm8, %xmm8
vpxor %xmm7, %xmm4, %xmm4
vpshufd $0x4e, %xmm0, %xmm5
vpxor %xmm0, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm7, %xmm0, %xmm6
vaesenc 48(%r15), %xmm9, %xmm9
vaesenc 48(%r15), %xmm10, %xmm10
vpclmulqdq $0x00, %xmm7, %xmm0, %xmm7
vaesenc 48(%r15), %xmm11, %xmm11
vaesenc 48(%r15), %xmm12, %xmm12
vpclmulqdq $0x00, %xmm5, %xmm4, %xmm4
vaesenc 48(%r15), %xmm13, %xmm13
vaesenc 48(%r15), %xmm14, %xmm14
vaesenc 48(%r15), %xmm15, %xmm15
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm4, %xmm1, %xmm1
vmovdqu 64(%rsp), %xmm7
vmovdqu 48(%rcx), %xmm0
vpshufd $0x4e, %xmm7, %xmm4
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vaesenc 64(%r15), %xmm8, %xmm8
vpxor %xmm7, %xmm4, %xmm4
vpshufd $0x4e, %xmm0, %xmm5
vpxor %xmm0, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm7, %xmm0, %xmm6
vaesenc 64(%r15), %xmm9, %xmm9
vaesenc 64(%r15), %xmm10, %xmm10
vpclmulqdq $0x00, %xmm7, %xmm0, %xmm7
vaesenc 64(%r15), %xmm11, %xmm11
vaesenc 64(%r15), %xmm12, %xmm12
vpclmulqdq $0x00, %xmm5, %xmm4, %xmm4
vaesenc 64(%r15), %xmm13, %xmm13
vaesenc 64(%r15), %xmm14, %xmm14
vaesenc 64(%r15), %xmm15, %xmm15
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm4, %xmm1, %xmm1
vmovdqu 48(%rsp), %xmm7
vmovdqu 64(%rcx), %xmm0
vpshufd $0x4e, %xmm7, %xmm4
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vaesenc 80(%r15), %xmm8, %xmm8
vpxor %xmm7, %xmm4, %xmm4
vpshufd $0x4e, %xmm0, %xmm5
vpxor %xmm0, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm7, %xmm0, %xmm6
vaesenc 80(%r15), %xmm9, %xmm9
vaesenc 80(%r15), %xmm10, %xmm10
vpclmulqdq $0x00, %xmm7, %xmm0, %xmm7
vaesenc 80(%r15), %xmm11, %xmm11
vaesenc 80(%r15), %xmm12, %xmm12
vpclmulqdq $0x00, %xmm5, %xmm4, %xmm4
vaesenc 80(%r15), %xmm13, %xmm13
vaesenc 80(%r15), %xmm14, %xmm14
vaesenc 80(%r15), %xmm15, %xmm15
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm4, %xmm1, %xmm1
vmovdqu 32(%rsp), %xmm7
vmovdqu 80(%rcx), %xmm0
vpshufd $0x4e, %xmm7, %xmm4
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vaesenc 96(%r15), %xmm8, %xmm8
vpxor %xmm7, %xmm4, %xmm4
vpshufd $0x4e, %xmm0, %xmm5
vpxor %xmm0, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm7, %xmm0, %xmm6
vaesenc 96(%r15), %xmm9, %xmm9
vaesenc 96(%r15), %xmm10, %xmm10
vpclmulqdq $0x00, %xmm7, %xmm0, %xmm7
vaesenc 96(%r15), %xmm11, %xmm11
vaesenc 96(%r15), %xmm12, %xmm12
vpclmulqdq $0x00, %xmm5, %xmm4, %xmm4
vaesenc 96(%r15), %xmm13, %xmm13
vaesenc 96(%r15), %xmm14, %xmm14
vaesenc 96(%r15), %xmm15, %xmm15
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm4, %xmm1, %xmm1
vmovdqu 16(%rsp), %xmm7
vmovdqu 96(%rcx), %xmm0
vpshufd $0x4e, %xmm7, %xmm4
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vaesenc 112(%r15), %xmm8, %xmm8
vpxor %xmm7, %xmm4, %xmm4
vpshufd $0x4e, %xmm0, %xmm5
vpxor %xmm0, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm7, %xmm0, %xmm6
vaesenc 112(%r15), %xmm9, %xmm9
vaesenc 112(%r15), %xmm10, %xmm10
vpclmulqdq $0x00, %xmm7, %xmm0, %xmm7
vaesenc 112(%r15), %xmm11, %xmm11
vaesenc 112(%r15), %xmm12, %xmm12
vpclmulqdq $0x00, %xmm5, %xmm4, %xmm4
vaesenc 112(%r15), %xmm13, %xmm13
vaesenc 112(%r15), %xmm14, %xmm14
vaesenc 112(%r15), %xmm15, %xmm15
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm4, %xmm1, %xmm1
vmovdqu (%rsp), %xmm7
vmovdqu 112(%rcx), %xmm0
vpshufd $0x4e, %xmm7, %xmm4
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vaesenc 128(%r15), %xmm8, %xmm8
vpxor %xmm7, %xmm4, %xmm4
vpshufd $0x4e, %xmm0, %xmm5
vpxor %xmm0, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm7, %xmm0, %xmm6
vaesenc 128(%r15), %xmm9, %xmm9
vaesenc 128(%r15), %xmm10, %xmm10
vpclmulqdq $0x00, %xmm7, %xmm0, %xmm7
vaesenc 128(%r15), %xmm11, %xmm11
vaesenc 128(%r15), %xmm12, %xmm12
vpclmulqdq $0x00, %xmm5, %xmm4, %xmm4
vaesenc 128(%r15), %xmm13, %xmm13
vaesenc 128(%r15), %xmm14, %xmm14
vaesenc 128(%r15), %xmm15, %xmm15
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm4, %xmm1, %xmm1
vpslldq $8, %xmm1, %xmm5
vpsrldq $8, %xmm1, %xmm1
vaesenc 144(%r15), %xmm8, %xmm8
vpxor %xmm5, %xmm2, %xmm2
vpxor %xmm1, %xmm3, %xmm3
vaesenc 144(%r15), %xmm9, %xmm9
vpslld $31, %xmm2, %xmm7
vpslld $30, %xmm2, %xmm4
vpslld $25, %xmm2, %xmm5
vaesenc 144(%r15), %xmm10, %xmm10
vpxor %xmm4, %xmm7, %xmm7
vpxor %xmm5, %xmm7, %xmm7
vaesenc 144(%r15), %xmm11, %xmm11
vpsrldq $4, %xmm7, %xmm4
vpslldq $12, %xmm7, %xmm7
vaesenc 144(%r15), %xmm12, %xmm12
vpxor %xmm7, %xmm2, %xmm2
vpsrld $0x01, %xmm2, %xmm5
vaesenc 144(%r15), %xmm13, %xmm13
vpsrld $2, %xmm2, %xmm1
vpsrld $7, %xmm2, %xmm0
vaesenc 144(%r15), %xmm14, %xmm14
vpxor %xmm1, %xmm5, %xmm5
vpxor %xmm0, %xmm5, %xmm5
vaesenc 144(%r15), %xmm15, %xmm15
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm5, %xmm2, %xmm2
vpxor %xmm3, %xmm2, %xmm2
cmpl $11, %r10d
vmovdqa 160(%r15), %xmm7
jl L_AES_GCM_decrypt_avx1_aesenc_128_ghash_avx_done
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqa 176(%r15), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
cmpl $13, %r10d
vmovdqa 192(%r15), %xmm7
jl L_AES_GCM_decrypt_avx1_aesenc_128_ghash_avx_done
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqa 208(%r15), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqa 224(%r15), %xmm7
L_AES_GCM_decrypt_avx1_aesenc_128_ghash_avx_done:
vaesenclast %xmm7, %xmm8, %xmm8
vaesenclast %xmm7, %xmm9, %xmm9
vmovdqu (%rcx), %xmm0
vmovdqu 16(%rcx), %xmm1
vpxor %xmm0, %xmm8, %xmm8
vpxor %xmm1, %xmm9, %xmm9
vmovdqu %xmm8, (%rdx)
vmovdqu %xmm9, 16(%rdx)
vaesenclast %xmm7, %xmm10, %xmm10
vaesenclast %xmm7, %xmm11, %xmm11
vmovdqu 32(%rcx), %xmm0
vmovdqu 48(%rcx), %xmm1
vpxor %xmm0, %xmm10, %xmm10
vpxor %xmm1, %xmm11, %xmm11
vmovdqu %xmm10, 32(%rdx)
vmovdqu %xmm11, 48(%rdx)
vaesenclast %xmm7, %xmm12, %xmm12
vaesenclast %xmm7, %xmm13, %xmm13
vmovdqu 64(%rcx), %xmm0
vmovdqu 80(%rcx), %xmm1
vpxor %xmm0, %xmm12, %xmm12
vpxor %xmm1, %xmm13, %xmm13
vmovdqu %xmm12, 64(%rdx)
vmovdqu %xmm13, 80(%rdx)
vaesenclast %xmm7, %xmm14, %xmm14
vaesenclast %xmm7, %xmm15, %xmm15
vmovdqu 96(%rcx), %xmm0
vmovdqu 112(%rcx), %xmm1
vpxor %xmm0, %xmm14, %xmm14
vpxor %xmm1, %xmm15, %xmm15
vmovdqu %xmm14, 96(%rdx)
vmovdqu %xmm15, 112(%rdx)
addl $0x80, %ebx
cmpl %r13d, %ebx
jl L_AES_GCM_decrypt_avx1_ghash_128
vmovdqa %xmm2, %xmm6
vmovdqu (%rsp), %xmm5
L_AES_GCM_decrypt_avx1_done_128:
movl %r9d, %edx
cmpl %edx, %ebx
jge L_AES_GCM_decrypt_avx1_done_dec
movl %r9d, %r13d
andl $0xfffffff0, %r13d
cmpl %r13d, %ebx
jge L_AES_GCM_decrypt_avx1_last_block_done
L_AES_GCM_decrypt_avx1_last_block_start:
vmovdqu (%rdi,%rbx,1), %xmm13
vmovdqa %xmm5, %xmm0
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm13, %xmm1
vpxor %xmm6, %xmm1, %xmm1
vmovdqu 128(%rsp), %xmm9
vpshufb L_avx1_aes_gcm_bswap_epi64(%rip), %xmm9, %xmm8
vpaddd L_avx1_aes_gcm_one(%rip), %xmm9, %xmm9
vmovdqu %xmm9, 128(%rsp)
vpxor (%r15), %xmm8, %xmm8
vpclmulqdq $16, %xmm0, %xmm1, %xmm10
vaesenc 16(%r15), %xmm8, %xmm8
vaesenc 32(%r15), %xmm8, %xmm8
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm11
vaesenc 48(%r15), %xmm8, %xmm8
vaesenc 64(%r15), %xmm8, %xmm8
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm12
vaesenc 80(%r15), %xmm8, %xmm8
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vaesenc 96(%r15), %xmm8, %xmm8
vpxor %xmm11, %xmm10, %xmm10
vpslldq $8, %xmm10, %xmm2
vpsrldq $8, %xmm10, %xmm10
vaesenc 112(%r15), %xmm8, %xmm8
vpxor %xmm12, %xmm2, %xmm2
vpxor %xmm10, %xmm1, %xmm3
vmovdqa L_avx1_aes_gcm_mod2_128(%rip), %xmm0
vpclmulqdq $16, %xmm0, %xmm2, %xmm11
vaesenc 128(%r15), %xmm8, %xmm8
vpshufd $0x4e, %xmm2, %xmm10
vpxor %xmm11, %xmm10, %xmm10
vpclmulqdq $16, %xmm0, %xmm10, %xmm11
vaesenc 144(%r15), %xmm8, %xmm8
vpshufd $0x4e, %xmm10, %xmm10
vpxor %xmm11, %xmm10, %xmm10
vpxor %xmm3, %xmm10, %xmm6
cmpl $11, %r10d
vmovdqa 160(%r15), %xmm9
jl L_AES_GCM_decrypt_avx1_aesenc_gfmul_last
vaesenc %xmm9, %xmm8, %xmm8
vaesenc 176(%r15), %xmm8, %xmm8
cmpl $13, %r10d
vmovdqa 192(%r15), %xmm9
jl L_AES_GCM_decrypt_avx1_aesenc_gfmul_last
vaesenc %xmm9, %xmm8, %xmm8
vaesenc 208(%r15), %xmm8, %xmm8
vmovdqa 224(%r15), %xmm9
L_AES_GCM_decrypt_avx1_aesenc_gfmul_last:
vaesenclast %xmm9, %xmm8, %xmm8
vmovdqa %xmm13, %xmm0
vpxor %xmm0, %xmm8, %xmm8
vmovdqu %xmm8, (%rsi,%rbx,1)
addl $16, %ebx
cmpl %r13d, %ebx
jl L_AES_GCM_decrypt_avx1_last_block_start
L_AES_GCM_decrypt_avx1_last_block_done:
movl %r9d, %ecx
movl %ecx, %edx
andl $15, %ecx
jz L_AES_GCM_decrypt_avx1_aesenc_last15_dec_avx_done
vmovdqu 128(%rsp), %xmm4
vpshufb L_avx1_aes_gcm_bswap_epi64(%rip), %xmm4, %xmm4
vpxor (%r15), %xmm4, %xmm4
vaesenc 16(%r15), %xmm4, %xmm4
vaesenc 32(%r15), %xmm4, %xmm4
vaesenc 48(%r15), %xmm4, %xmm4
vaesenc 64(%r15), %xmm4, %xmm4
vaesenc 80(%r15), %xmm4, %xmm4
vaesenc 96(%r15), %xmm4, %xmm4
vaesenc 112(%r15), %xmm4, %xmm4
vaesenc 128(%r15), %xmm4, %xmm4
vaesenc 144(%r15), %xmm4, %xmm4
cmpl $11, %r10d
vmovdqa 160(%r15), %xmm9
jl L_AES_GCM_decrypt_avx1_aesenc_last15_dec_avx_aesenc_avx_last
vaesenc %xmm9, %xmm4, %xmm4
vaesenc 176(%r15), %xmm4, %xmm4
cmpl $13, %r10d
vmovdqa 192(%r15), %xmm9
jl L_AES_GCM_decrypt_avx1_aesenc_last15_dec_avx_aesenc_avx_last
vaesenc %xmm9, %xmm4, %xmm4
vaesenc 208(%r15), %xmm4, %xmm4
vmovdqa 224(%r15), %xmm9
L_AES_GCM_decrypt_avx1_aesenc_last15_dec_avx_aesenc_avx_last:
vaesenclast %xmm9, %xmm4, %xmm4
subq $32, %rsp
xorl %ecx, %ecx
vmovdqu %xmm4, (%rsp)
vpxor %xmm0, %xmm0, %xmm0
vmovdqu %xmm0, 16(%rsp)
L_AES_GCM_decrypt_avx1_aesenc_last15_dec_avx_loop:
movzbl (%rdi,%rbx,1), %r13d
movb %r13b, 16(%rsp,%rcx,1)
xorb (%rsp,%rcx,1), %r13b
movb %r13b, (%rsi,%rbx,1)
incl %ebx
incl %ecx
cmpl %edx, %ebx
jl L_AES_GCM_decrypt_avx1_aesenc_last15_dec_avx_loop
vmovdqu 16(%rsp), %xmm4
addq $32, %rsp
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm4, %xmm4
vpxor %xmm4, %xmm6, %xmm6
# ghash_gfmul_red_avx
vpshufd $0x4e, %xmm5, %xmm9
vpshufd $0x4e, %xmm6, %xmm10
vpclmulqdq $0x11, %xmm5, %xmm6, %xmm11
vpclmulqdq $0x00, %xmm5, %xmm6, %xmm8
vpxor %xmm5, %xmm9, %xmm9
vpxor %xmm6, %xmm10, %xmm10
vpclmulqdq $0x00, %xmm10, %xmm9, %xmm9
vpxor %xmm8, %xmm9, %xmm9
vpxor %xmm11, %xmm9, %xmm9
vpslldq $8, %xmm9, %xmm10
vpsrldq $8, %xmm9, %xmm9
vpxor %xmm10, %xmm8, %xmm8
vpxor %xmm9, %xmm11, %xmm6
vpslld $31, %xmm8, %xmm12
vpslld $30, %xmm8, %xmm13
vpslld $25, %xmm8, %xmm14
vpxor %xmm13, %xmm12, %xmm12
vpxor %xmm14, %xmm12, %xmm12
vpsrldq $4, %xmm12, %xmm13
vpslldq $12, %xmm12, %xmm12
vpxor %xmm12, %xmm8, %xmm8
vpsrld $0x01, %xmm8, %xmm14
vpsrld $2, %xmm8, %xmm10
vpsrld $7, %xmm8, %xmm9
vpxor %xmm10, %xmm14, %xmm14
vpxor %xmm9, %xmm14, %xmm14
vpxor %xmm13, %xmm14, %xmm14
vpxor %xmm8, %xmm14, %xmm14
vpxor %xmm14, %xmm6, %xmm6
L_AES_GCM_decrypt_avx1_aesenc_last15_dec_avx_done:
L_AES_GCM_decrypt_avx1_done_dec:
movl %r9d, %edx
movl %r11d, %ecx
shlq $3, %rdx
shlq $3, %rcx
vmovq %rdx, %xmm0
vmovq %rcx, %xmm1
vpunpcklqdq %xmm1, %xmm0, %xmm0
vpxor %xmm0, %xmm6, %xmm6
# ghash_gfmul_red_avx
vpshufd $0x4e, %xmm5, %xmm9
vpshufd $0x4e, %xmm6, %xmm10
vpclmulqdq $0x11, %xmm5, %xmm6, %xmm11
vpclmulqdq $0x00, %xmm5, %xmm6, %xmm8
vpxor %xmm5, %xmm9, %xmm9
vpxor %xmm6, %xmm10, %xmm10
vpclmulqdq $0x00, %xmm10, %xmm9, %xmm9
vpxor %xmm8, %xmm9, %xmm9
vpxor %xmm11, %xmm9, %xmm9
vpslldq $8, %xmm9, %xmm10
vpsrldq $8, %xmm9, %xmm9
vpxor %xmm10, %xmm8, %xmm8
vpxor %xmm9, %xmm11, %xmm6
vpslld $31, %xmm8, %xmm12
vpslld $30, %xmm8, %xmm13
vpslld $25, %xmm8, %xmm14
vpxor %xmm13, %xmm12, %xmm12
vpxor %xmm14, %xmm12, %xmm12
vpsrldq $4, %xmm12, %xmm13
vpslldq $12, %xmm12, %xmm12
vpxor %xmm12, %xmm8, %xmm8
vpsrld $0x01, %xmm8, %xmm14
vpsrld $2, %xmm8, %xmm10
vpsrld $7, %xmm8, %xmm9
vpxor %xmm10, %xmm14, %xmm14
vpxor %xmm9, %xmm14, %xmm14
vpxor %xmm13, %xmm14, %xmm14
vpxor %xmm8, %xmm14, %xmm14
vpxor %xmm14, %xmm6, %xmm6
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm6, %xmm6
vmovdqu 144(%rsp), %xmm0
vpxor %xmm6, %xmm0, %xmm0
cmpl $16, %r14d
je L_AES_GCM_decrypt_avx1_cmp_tag_16
subq $16, %rsp
xorq %rcx, %rcx
xorq %rbx, %rbx
vmovdqu %xmm0, (%rsp)
L_AES_GCM_decrypt_avx1_cmp_tag_loop:
movzbl (%rsp,%rcx,1), %r13d
xorb (%r8,%rcx,1), %r13b
orb %r13b, %bl
incl %ecx
cmpl %r14d, %ecx
jne L_AES_GCM_decrypt_avx1_cmp_tag_loop
cmpb $0x00, %bl
sete %bl
addq $16, %rsp
xorq %rcx, %rcx
jmp L_AES_GCM_decrypt_avx1_cmp_tag_done
L_AES_GCM_decrypt_avx1_cmp_tag_16:
vmovdqu (%r8), %xmm1
vpcmpeqb %xmm1, %xmm0, %xmm0
vpmovmskb %xmm0, %rdx
# %%edx == 0xFFFF then return 1 else => return 0
xorl %ebx, %ebx
cmpl $0xffff, %edx
sete %bl
L_AES_GCM_decrypt_avx1_cmp_tag_done:
movl %ebx, (%rbp)
vzeroupper
addq $0xa8, %rsp
popq %rbp
popq %r15
popq %r14
popq %rbx
popq %r12
popq %r13
repz retq
#ifndef __APPLE__
.size AES_GCM_decrypt_avx1,.-AES_GCM_decrypt_avx1
#endif /* __APPLE__ */
#ifdef WOLFSSL_AESGCM_STREAM
#ifndef __APPLE__
.text
.globl AES_GCM_init_avx1
.type AES_GCM_init_avx1,@function
.align 16
AES_GCM_init_avx1:
#else
.section __TEXT,__text
.globl _AES_GCM_init_avx1
.p2align 4
_AES_GCM_init_avx1:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
movq %rdx, %r10
movl %ecx, %r11d
movq 24(%rsp), %rax
subq $16, %rsp
vpxor %xmm4, %xmm4, %xmm4
movl %r11d, %edx
cmpl $12, %edx
jne L_AES_GCM_init_avx1_iv_not_12
# # Calculate values when IV is 12 bytes
# Set counter based on IV
movl $0x1000000, %ecx
vmovq (%r10), %xmm4
vpinsrd $2, 8(%r10), %xmm4, %xmm4
vpinsrd $3, %ecx, %xmm4, %xmm4
# H = Encrypt X(=0) and T = Encrypt counter
vmovdqa (%rdi), %xmm5
vpxor %xmm5, %xmm4, %xmm1
vmovdqa 16(%rdi), %xmm6
vaesenc %xmm6, %xmm5, %xmm5
vaesenc %xmm6, %xmm1, %xmm1
vmovdqa 32(%rdi), %xmm6
vaesenc %xmm6, %xmm5, %xmm5
vaesenc %xmm6, %xmm1, %xmm1
vmovdqa 48(%rdi), %xmm6
vaesenc %xmm6, %xmm5, %xmm5
vaesenc %xmm6, %xmm1, %xmm1
vmovdqa 64(%rdi), %xmm6
vaesenc %xmm6, %xmm5, %xmm5
vaesenc %xmm6, %xmm1, %xmm1
vmovdqa 80(%rdi), %xmm6
vaesenc %xmm6, %xmm5, %xmm5
vaesenc %xmm6, %xmm1, %xmm1
vmovdqa 96(%rdi), %xmm6
vaesenc %xmm6, %xmm5, %xmm5
vaesenc %xmm6, %xmm1, %xmm1
vmovdqa 112(%rdi), %xmm6
vaesenc %xmm6, %xmm5, %xmm5
vaesenc %xmm6, %xmm1, %xmm1
vmovdqa 128(%rdi), %xmm6
vaesenc %xmm6, %xmm5, %xmm5
vaesenc %xmm6, %xmm1, %xmm1
vmovdqa 144(%rdi), %xmm6
vaesenc %xmm6, %xmm5, %xmm5
vaesenc %xmm6, %xmm1, %xmm1
cmpl $11, %esi
vmovdqa 160(%rdi), %xmm6
jl L_AES_GCM_init_avx1_calc_iv_12_last
vaesenc %xmm6, %xmm5, %xmm5
vaesenc %xmm6, %xmm1, %xmm1
vmovdqa 176(%rdi), %xmm6
vaesenc %xmm6, %xmm5, %xmm5
vaesenc %xmm6, %xmm1, %xmm1
cmpl $13, %esi
vmovdqa 192(%rdi), %xmm6
jl L_AES_GCM_init_avx1_calc_iv_12_last
vaesenc %xmm6, %xmm5, %xmm5
vaesenc %xmm6, %xmm1, %xmm1
vmovdqa 208(%rdi), %xmm6
vaesenc %xmm6, %xmm5, %xmm5
vaesenc %xmm6, %xmm1, %xmm1
vmovdqa 224(%rdi), %xmm6
L_AES_GCM_init_avx1_calc_iv_12_last:
vaesenclast %xmm6, %xmm5, %xmm5
vaesenclast %xmm6, %xmm1, %xmm1
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm5, %xmm5
vmovdqu %xmm1, %xmm15
jmp L_AES_GCM_init_avx1_iv_done
L_AES_GCM_init_avx1_iv_not_12:
# Calculate values when IV is not 12 bytes
# H = Encrypt X(=0)
vmovdqa (%rdi), %xmm5
vaesenc 16(%rdi), %xmm5, %xmm5
vaesenc 32(%rdi), %xmm5, %xmm5
vaesenc 48(%rdi), %xmm5, %xmm5
vaesenc 64(%rdi), %xmm5, %xmm5
vaesenc 80(%rdi), %xmm5, %xmm5
vaesenc 96(%rdi), %xmm5, %xmm5
vaesenc 112(%rdi), %xmm5, %xmm5
vaesenc 128(%rdi), %xmm5, %xmm5
vaesenc 144(%rdi), %xmm5, %xmm5
cmpl $11, %esi
vmovdqa 160(%rdi), %xmm8
jl L_AES_GCM_init_avx1_calc_iv_1_aesenc_avx_last
vaesenc %xmm8, %xmm5, %xmm5
vaesenc 176(%rdi), %xmm5, %xmm5
cmpl $13, %esi
vmovdqa 192(%rdi), %xmm8
jl L_AES_GCM_init_avx1_calc_iv_1_aesenc_avx_last
vaesenc %xmm8, %xmm5, %xmm5
vaesenc 208(%rdi), %xmm5, %xmm5
vmovdqa 224(%rdi), %xmm8
L_AES_GCM_init_avx1_calc_iv_1_aesenc_avx_last:
vaesenclast %xmm8, %xmm5, %xmm5
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm5, %xmm5
# Calc counter
# Initialization vector
cmpl $0x00, %edx
movq $0x00, %rcx
je L_AES_GCM_init_avx1_calc_iv_done
cmpl $16, %edx
jl L_AES_GCM_init_avx1_calc_iv_lt16
andl $0xfffffff0, %edx
L_AES_GCM_init_avx1_calc_iv_16_loop:
vmovdqu (%r10,%rcx,1), %xmm7
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm7, %xmm7
vpxor %xmm7, %xmm4, %xmm4
# ghash_gfmul_avx
vpshufd $0x4e, %xmm4, %xmm1
vpshufd $0x4e, %xmm5, %xmm2
vpclmulqdq $0x11, %xmm4, %xmm5, %xmm3
vpclmulqdq $0x00, %xmm4, %xmm5, %xmm0
vpxor %xmm4, %xmm1, %xmm1
vpxor %xmm5, %xmm2, %xmm2
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vmovdqa %xmm0, %xmm6
vmovdqa %xmm3, %xmm4
vpslldq $8, %xmm1, %xmm2
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm1, %xmm4, %xmm4
vpsrld $31, %xmm6, %xmm0
vpsrld $31, %xmm4, %xmm1
vpslld $0x01, %xmm6, %xmm6
vpslld $0x01, %xmm4, %xmm4
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm4, %xmm4
vpor %xmm0, %xmm6, %xmm6
vpor %xmm1, %xmm4, %xmm4
vpslld $31, %xmm6, %xmm0
vpslld $30, %xmm6, %xmm1
vpslld $25, %xmm6, %xmm2
vpxor %xmm1, %xmm0, %xmm0
vpxor %xmm2, %xmm0, %xmm0
vmovdqa %xmm0, %xmm1
vpsrldq $4, %xmm1, %xmm1
vpslldq $12, %xmm0, %xmm0
vpxor %xmm0, %xmm6, %xmm6
vpsrld $0x01, %xmm6, %xmm2
vpsrld $2, %xmm6, %xmm3
vpsrld $7, %xmm6, %xmm0
vpxor %xmm3, %xmm2, %xmm2
vpxor %xmm0, %xmm2, %xmm2
vpxor %xmm1, %xmm2, %xmm2
vpxor %xmm6, %xmm2, %xmm2
vpxor %xmm2, %xmm4, %xmm4
addl $16, %ecx
cmpl %edx, %ecx
jl L_AES_GCM_init_avx1_calc_iv_16_loop
movl %r11d, %edx
cmpl %edx, %ecx
je L_AES_GCM_init_avx1_calc_iv_done
L_AES_GCM_init_avx1_calc_iv_lt16:
subq $16, %rsp
vpxor %xmm7, %xmm7, %xmm7
xorl %r13d, %r13d
vmovdqu %xmm7, (%rsp)
L_AES_GCM_init_avx1_calc_iv_loop:
movzbl (%r10,%rcx,1), %r12d
movb %r12b, (%rsp,%r13,1)
incl %ecx
incl %r13d
cmpl %edx, %ecx
jl L_AES_GCM_init_avx1_calc_iv_loop
vmovdqu (%rsp), %xmm7
addq $16, %rsp
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm7, %xmm7
vpxor %xmm7, %xmm4, %xmm4
# ghash_gfmul_avx
vpshufd $0x4e, %xmm4, %xmm1
vpshufd $0x4e, %xmm5, %xmm2
vpclmulqdq $0x11, %xmm4, %xmm5, %xmm3
vpclmulqdq $0x00, %xmm4, %xmm5, %xmm0
vpxor %xmm4, %xmm1, %xmm1
vpxor %xmm5, %xmm2, %xmm2
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vmovdqa %xmm0, %xmm6
vmovdqa %xmm3, %xmm4
vpslldq $8, %xmm1, %xmm2
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm1, %xmm4, %xmm4
vpsrld $31, %xmm6, %xmm0
vpsrld $31, %xmm4, %xmm1
vpslld $0x01, %xmm6, %xmm6
vpslld $0x01, %xmm4, %xmm4
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm4, %xmm4
vpor %xmm0, %xmm6, %xmm6
vpor %xmm1, %xmm4, %xmm4
vpslld $31, %xmm6, %xmm0
vpslld $30, %xmm6, %xmm1
vpslld $25, %xmm6, %xmm2
vpxor %xmm1, %xmm0, %xmm0
vpxor %xmm2, %xmm0, %xmm0
vmovdqa %xmm0, %xmm1
vpsrldq $4, %xmm1, %xmm1
vpslldq $12, %xmm0, %xmm0
vpxor %xmm0, %xmm6, %xmm6
vpsrld $0x01, %xmm6, %xmm2
vpsrld $2, %xmm6, %xmm3
vpsrld $7, %xmm6, %xmm0
vpxor %xmm3, %xmm2, %xmm2
vpxor %xmm0, %xmm2, %xmm2
vpxor %xmm1, %xmm2, %xmm2
vpxor %xmm6, %xmm2, %xmm2
vpxor %xmm2, %xmm4, %xmm4
L_AES_GCM_init_avx1_calc_iv_done:
# T = Encrypt counter
vpxor %xmm0, %xmm0, %xmm0
shll $3, %edx
vmovq %rdx, %xmm0
vpxor %xmm0, %xmm4, %xmm4
# ghash_gfmul_avx
vpshufd $0x4e, %xmm4, %xmm1
vpshufd $0x4e, %xmm5, %xmm2
vpclmulqdq $0x11, %xmm4, %xmm5, %xmm3
vpclmulqdq $0x00, %xmm4, %xmm5, %xmm0
vpxor %xmm4, %xmm1, %xmm1
vpxor %xmm5, %xmm2, %xmm2
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vmovdqa %xmm0, %xmm6
vmovdqa %xmm3, %xmm4
vpslldq $8, %xmm1, %xmm2
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm1, %xmm4, %xmm4
vpsrld $31, %xmm6, %xmm0
vpsrld $31, %xmm4, %xmm1
vpslld $0x01, %xmm6, %xmm6
vpslld $0x01, %xmm4, %xmm4
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm4, %xmm4
vpor %xmm0, %xmm6, %xmm6
vpor %xmm1, %xmm4, %xmm4
vpslld $31, %xmm6, %xmm0
vpslld $30, %xmm6, %xmm1
vpslld $25, %xmm6, %xmm2
vpxor %xmm1, %xmm0, %xmm0
vpxor %xmm2, %xmm0, %xmm0
vmovdqa %xmm0, %xmm1
vpsrldq $4, %xmm1, %xmm1
vpslldq $12, %xmm0, %xmm0
vpxor %xmm0, %xmm6, %xmm6
vpsrld $0x01, %xmm6, %xmm2
vpsrld $2, %xmm6, %xmm3
vpsrld $7, %xmm6, %xmm0
vpxor %xmm3, %xmm2, %xmm2
vpxor %xmm0, %xmm2, %xmm2
vpxor %xmm1, %xmm2, %xmm2
vpxor %xmm6, %xmm2, %xmm2
vpxor %xmm2, %xmm4, %xmm4
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm4, %xmm4
# Encrypt counter
vmovdqa (%rdi), %xmm7
vpxor %xmm4, %xmm7, %xmm7
vaesenc 16(%rdi), %xmm7, %xmm7
vaesenc 32(%rdi), %xmm7, %xmm7
vaesenc 48(%rdi), %xmm7, %xmm7
vaesenc 64(%rdi), %xmm7, %xmm7
vaesenc 80(%rdi), %xmm7, %xmm7
vaesenc 96(%rdi), %xmm7, %xmm7
vaesenc 112(%rdi), %xmm7, %xmm7
vaesenc 128(%rdi), %xmm7, %xmm7
vaesenc 144(%rdi), %xmm7, %xmm7
cmpl $11, %esi
vmovdqa 160(%rdi), %xmm8
jl L_AES_GCM_init_avx1_calc_iv_2_aesenc_avx_last
vaesenc %xmm8, %xmm7, %xmm7
vaesenc 176(%rdi), %xmm7, %xmm7
cmpl $13, %esi
vmovdqa 192(%rdi), %xmm8
jl L_AES_GCM_init_avx1_calc_iv_2_aesenc_avx_last
vaesenc %xmm8, %xmm7, %xmm7
vaesenc 208(%rdi), %xmm7, %xmm7
vmovdqa 224(%rdi), %xmm8
L_AES_GCM_init_avx1_calc_iv_2_aesenc_avx_last:
vaesenclast %xmm8, %xmm7, %xmm7
vmovdqu %xmm7, %xmm15
L_AES_GCM_init_avx1_iv_done:
vmovdqa %xmm15, (%rax)
vpshufb L_avx1_aes_gcm_bswap_epi64(%rip), %xmm4, %xmm4
vpaddd L_avx1_aes_gcm_one(%rip), %xmm4, %xmm4
vmovdqa %xmm5, (%r8)
vmovdqa %xmm4, (%r9)
vzeroupper
addq $16, %rsp
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size AES_GCM_init_avx1,.-AES_GCM_init_avx1
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl AES_GCM_aad_update_avx1
.type AES_GCM_aad_update_avx1,@function
.align 16
AES_GCM_aad_update_avx1:
#else
.section __TEXT,__text
.globl _AES_GCM_aad_update_avx1
.p2align 4
_AES_GCM_aad_update_avx1:
#endif /* __APPLE__ */
movq %rcx, %rax
vmovdqa (%rdx), %xmm5
vmovdqa (%rax), %xmm6
xorl %ecx, %ecx
L_AES_GCM_aad_update_avx1_16_loop:
vmovdqu (%rdi,%rcx,1), %xmm7
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm7, %xmm7
vpxor %xmm7, %xmm5, %xmm5
# ghash_gfmul_avx
vpshufd $0x4e, %xmm5, %xmm1
vpshufd $0x4e, %xmm6, %xmm2
vpclmulqdq $0x11, %xmm5, %xmm6, %xmm3
vpclmulqdq $0x00, %xmm5, %xmm6, %xmm0
vpxor %xmm5, %xmm1, %xmm1
vpxor %xmm6, %xmm2, %xmm2
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vmovdqa %xmm0, %xmm4
vmovdqa %xmm3, %xmm5
vpslldq $8, %xmm1, %xmm2
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm2, %xmm4, %xmm4
vpxor %xmm1, %xmm5, %xmm5
vpsrld $31, %xmm4, %xmm0
vpsrld $31, %xmm5, %xmm1
vpslld $0x01, %xmm4, %xmm4
vpslld $0x01, %xmm5, %xmm5
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm5, %xmm5
vpor %xmm0, %xmm4, %xmm4
vpor %xmm1, %xmm5, %xmm5
vpslld $31, %xmm4, %xmm0
vpslld $30, %xmm4, %xmm1
vpslld $25, %xmm4, %xmm2
vpxor %xmm1, %xmm0, %xmm0
vpxor %xmm2, %xmm0, %xmm0
vmovdqa %xmm0, %xmm1
vpsrldq $4, %xmm1, %xmm1
vpslldq $12, %xmm0, %xmm0
vpxor %xmm0, %xmm4, %xmm4
vpsrld $0x01, %xmm4, %xmm2
vpsrld $2, %xmm4, %xmm3
vpsrld $7, %xmm4, %xmm0
vpxor %xmm3, %xmm2, %xmm2
vpxor %xmm0, %xmm2, %xmm2
vpxor %xmm1, %xmm2, %xmm2
vpxor %xmm4, %xmm2, %xmm2
vpxor %xmm2, %xmm5, %xmm5
addl $16, %ecx
cmpl %esi, %ecx
jl L_AES_GCM_aad_update_avx1_16_loop
vmovdqa %xmm5, (%rdx)
vzeroupper
repz retq
#ifndef __APPLE__
.size AES_GCM_aad_update_avx1,.-AES_GCM_aad_update_avx1
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl AES_GCM_encrypt_block_avx1
.type AES_GCM_encrypt_block_avx1,@function
.align 16
AES_GCM_encrypt_block_avx1:
#else
.section __TEXT,__text
.globl _AES_GCM_encrypt_block_avx1
.p2align 4
_AES_GCM_encrypt_block_avx1:
#endif /* __APPLE__ */
movq %rdx, %r10
movq %rcx, %r11
vmovdqu (%r8), %xmm1
vpshufb L_avx1_aes_gcm_bswap_epi64(%rip), %xmm1, %xmm0
vpaddd L_avx1_aes_gcm_one(%rip), %xmm1, %xmm1
vmovdqu %xmm1, (%r8)
vpxor (%rdi), %xmm0, %xmm0
vaesenc 16(%rdi), %xmm0, %xmm0
vaesenc 32(%rdi), %xmm0, %xmm0
vaesenc 48(%rdi), %xmm0, %xmm0
vaesenc 64(%rdi), %xmm0, %xmm0
vaesenc 80(%rdi), %xmm0, %xmm0
vaesenc 96(%rdi), %xmm0, %xmm0
vaesenc 112(%rdi), %xmm0, %xmm0
vaesenc 128(%rdi), %xmm0, %xmm0
vaesenc 144(%rdi), %xmm0, %xmm0
cmpl $11, %esi
vmovdqa 160(%rdi), %xmm1
jl L_AES_GCM_encrypt_block_avx1_aesenc_block_last
vaesenc %xmm1, %xmm0, %xmm0
vaesenc 176(%rdi), %xmm0, %xmm0
cmpl $13, %esi
vmovdqa 192(%rdi), %xmm1
jl L_AES_GCM_encrypt_block_avx1_aesenc_block_last
vaesenc %xmm1, %xmm0, %xmm0
vaesenc 208(%rdi), %xmm0, %xmm0
vmovdqa 224(%rdi), %xmm1
L_AES_GCM_encrypt_block_avx1_aesenc_block_last:
vaesenclast %xmm1, %xmm0, %xmm0
vmovdqu (%r11), %xmm1
vpxor %xmm1, %xmm0, %xmm0
vmovdqu %xmm0, (%r10)
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vzeroupper
repz retq
#ifndef __APPLE__
.size AES_GCM_encrypt_block_avx1,.-AES_GCM_encrypt_block_avx1
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl AES_GCM_ghash_block_avx1
.type AES_GCM_ghash_block_avx1,@function
.align 16
AES_GCM_ghash_block_avx1:
#else
.section __TEXT,__text
.globl _AES_GCM_ghash_block_avx1
.p2align 4
_AES_GCM_ghash_block_avx1:
#endif /* __APPLE__ */
vmovdqa (%rsi), %xmm4
vmovdqa (%rdx), %xmm5
vmovdqu (%rdi), %xmm7
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm7, %xmm7
vpxor %xmm7, %xmm4, %xmm4
# ghash_gfmul_avx
vpshufd $0x4e, %xmm4, %xmm1
vpshufd $0x4e, %xmm5, %xmm2
vpclmulqdq $0x11, %xmm4, %xmm5, %xmm3
vpclmulqdq $0x00, %xmm4, %xmm5, %xmm0
vpxor %xmm4, %xmm1, %xmm1
vpxor %xmm5, %xmm2, %xmm2
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vmovdqa %xmm0, %xmm6
vmovdqa %xmm3, %xmm4
vpslldq $8, %xmm1, %xmm2
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm1, %xmm4, %xmm4
vpsrld $31, %xmm6, %xmm0
vpsrld $31, %xmm4, %xmm1
vpslld $0x01, %xmm6, %xmm6
vpslld $0x01, %xmm4, %xmm4
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm4, %xmm4
vpor %xmm0, %xmm6, %xmm6
vpor %xmm1, %xmm4, %xmm4
vpslld $31, %xmm6, %xmm0
vpslld $30, %xmm6, %xmm1
vpslld $25, %xmm6, %xmm2
vpxor %xmm1, %xmm0, %xmm0
vpxor %xmm2, %xmm0, %xmm0
vmovdqa %xmm0, %xmm1
vpsrldq $4, %xmm1, %xmm1
vpslldq $12, %xmm0, %xmm0
vpxor %xmm0, %xmm6, %xmm6
vpsrld $0x01, %xmm6, %xmm2
vpsrld $2, %xmm6, %xmm3
vpsrld $7, %xmm6, %xmm0
vpxor %xmm3, %xmm2, %xmm2
vpxor %xmm0, %xmm2, %xmm2
vpxor %xmm1, %xmm2, %xmm2
vpxor %xmm6, %xmm2, %xmm2
vpxor %xmm2, %xmm4, %xmm4
vmovdqa %xmm4, (%rsi)
vzeroupper
repz retq
#ifndef __APPLE__
.size AES_GCM_ghash_block_avx1,.-AES_GCM_ghash_block_avx1
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl AES_GCM_encrypt_update_avx1
.type AES_GCM_encrypt_update_avx1,@function
.align 16
AES_GCM_encrypt_update_avx1:
#else
.section __TEXT,__text
.globl _AES_GCM_encrypt_update_avx1
.p2align 4
_AES_GCM_encrypt_update_avx1:
#endif /* __APPLE__ */
pushq %r13
pushq %r12
pushq %r14
movq %rdx, %r10
movq %rcx, %r11
movq 32(%rsp), %rax
movq 40(%rsp), %r12
subq $0xa0, %rsp
vmovdqa (%r9), %xmm6
vmovdqa (%rax), %xmm5
vpsrlq $63, %xmm5, %xmm9
vpsllq $0x01, %xmm5, %xmm8
vpslldq $8, %xmm9, %xmm9
vpor %xmm9, %xmm8, %xmm8
vpshufd $0xff, %xmm5, %xmm5
vpsrad $31, %xmm5, %xmm5
vpand L_avx1_aes_gcm_mod2_128(%rip), %xmm5, %xmm5
vpxor %xmm8, %xmm5, %xmm5
xorl %r14d, %r14d
cmpl $0x80, %r8d
movl %r8d, %r13d
jl L_AES_GCM_encrypt_update_avx1_done_128
andl $0xffffff80, %r13d
vmovdqa %xmm6, %xmm2
# H ^ 1
vmovdqu %xmm5, (%rsp)
# H ^ 2
vpclmulqdq $0x00, %xmm5, %xmm5, %xmm8
vpclmulqdq $0x11, %xmm5, %xmm5, %xmm0
vpslld $31, %xmm8, %xmm12
vpslld $30, %xmm8, %xmm13
vpslld $25, %xmm8, %xmm14
vpxor %xmm13, %xmm12, %xmm12
vpxor %xmm14, %xmm12, %xmm12
vpsrldq $4, %xmm12, %xmm13
vpslldq $12, %xmm12, %xmm12
vpxor %xmm12, %xmm8, %xmm8
vpsrld $0x01, %xmm8, %xmm14
vpsrld $2, %xmm8, %xmm10
vpsrld $7, %xmm8, %xmm9
vpxor %xmm10, %xmm14, %xmm14
vpxor %xmm9, %xmm14, %xmm14
vpxor %xmm13, %xmm14, %xmm14
vpxor %xmm8, %xmm14, %xmm14
vpxor %xmm14, %xmm0, %xmm0
vmovdqu %xmm0, 16(%rsp)
# H ^ 3
# ghash_gfmul_red_avx
vpshufd $0x4e, %xmm5, %xmm9
vpshufd $0x4e, %xmm0, %xmm10
vpclmulqdq $0x11, %xmm5, %xmm0, %xmm11
vpclmulqdq $0x00, %xmm5, %xmm0, %xmm8
vpxor %xmm5, %xmm9, %xmm9
vpxor %xmm0, %xmm10, %xmm10
vpclmulqdq $0x00, %xmm10, %xmm9, %xmm9
vpxor %xmm8, %xmm9, %xmm9
vpxor %xmm11, %xmm9, %xmm9
vpslldq $8, %xmm9, %xmm10
vpsrldq $8, %xmm9, %xmm9
vpxor %xmm10, %xmm8, %xmm8
vpxor %xmm9, %xmm11, %xmm1
vpslld $31, %xmm8, %xmm12
vpslld $30, %xmm8, %xmm13
vpslld $25, %xmm8, %xmm14
vpxor %xmm13, %xmm12, %xmm12
vpxor %xmm14, %xmm12, %xmm12
vpsrldq $4, %xmm12, %xmm13
vpslldq $12, %xmm12, %xmm12
vpxor %xmm12, %xmm8, %xmm8
vpsrld $0x01, %xmm8, %xmm14
vpsrld $2, %xmm8, %xmm10
vpsrld $7, %xmm8, %xmm9
vpxor %xmm10, %xmm14, %xmm14
vpxor %xmm9, %xmm14, %xmm14
vpxor %xmm13, %xmm14, %xmm14
vpxor %xmm8, %xmm14, %xmm14
vpxor %xmm14, %xmm1, %xmm1
vmovdqu %xmm1, 32(%rsp)
# H ^ 4
vpclmulqdq $0x00, %xmm0, %xmm0, %xmm8
vpclmulqdq $0x11, %xmm0, %xmm0, %xmm3
vpslld $31, %xmm8, %xmm12
vpslld $30, %xmm8, %xmm13
vpslld $25, %xmm8, %xmm14
vpxor %xmm13, %xmm12, %xmm12
vpxor %xmm14, %xmm12, %xmm12
vpsrldq $4, %xmm12, %xmm13
vpslldq $12, %xmm12, %xmm12
vpxor %xmm12, %xmm8, %xmm8
vpsrld $0x01, %xmm8, %xmm14
vpsrld $2, %xmm8, %xmm10
vpsrld $7, %xmm8, %xmm9
vpxor %xmm10, %xmm14, %xmm14
vpxor %xmm9, %xmm14, %xmm14
vpxor %xmm13, %xmm14, %xmm14
vpxor %xmm8, %xmm14, %xmm14
vpxor %xmm14, %xmm3, %xmm3
vmovdqu %xmm3, 48(%rsp)
# H ^ 5
# ghash_gfmul_red_avx
vpshufd $0x4e, %xmm0, %xmm9
vpshufd $0x4e, %xmm1, %xmm10
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm11
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm8
vpxor %xmm0, %xmm9, %xmm9
vpxor %xmm1, %xmm10, %xmm10
vpclmulqdq $0x00, %xmm10, %xmm9, %xmm9
vpxor %xmm8, %xmm9, %xmm9
vpxor %xmm11, %xmm9, %xmm9
vpslldq $8, %xmm9, %xmm10
vpsrldq $8, %xmm9, %xmm9
vpxor %xmm10, %xmm8, %xmm8
vpxor %xmm9, %xmm11, %xmm7
vpslld $31, %xmm8, %xmm12
vpslld $30, %xmm8, %xmm13
vpslld $25, %xmm8, %xmm14
vpxor %xmm13, %xmm12, %xmm12
vpxor %xmm14, %xmm12, %xmm12
vpsrldq $4, %xmm12, %xmm13
vpslldq $12, %xmm12, %xmm12
vpxor %xmm12, %xmm8, %xmm8
vpsrld $0x01, %xmm8, %xmm14
vpsrld $2, %xmm8, %xmm10
vpsrld $7, %xmm8, %xmm9
vpxor %xmm10, %xmm14, %xmm14
vpxor %xmm9, %xmm14, %xmm14
vpxor %xmm13, %xmm14, %xmm14
vpxor %xmm8, %xmm14, %xmm14
vpxor %xmm14, %xmm7, %xmm7
vmovdqu %xmm7, 64(%rsp)
# H ^ 6
vpclmulqdq $0x00, %xmm1, %xmm1, %xmm8
vpclmulqdq $0x11, %xmm1, %xmm1, %xmm7
vpslld $31, %xmm8, %xmm12
vpslld $30, %xmm8, %xmm13
vpslld $25, %xmm8, %xmm14
vpxor %xmm13, %xmm12, %xmm12
vpxor %xmm14, %xmm12, %xmm12
vpsrldq $4, %xmm12, %xmm13
vpslldq $12, %xmm12, %xmm12
vpxor %xmm12, %xmm8, %xmm8
vpsrld $0x01, %xmm8, %xmm14
vpsrld $2, %xmm8, %xmm10
vpsrld $7, %xmm8, %xmm9
vpxor %xmm10, %xmm14, %xmm14
vpxor %xmm9, %xmm14, %xmm14
vpxor %xmm13, %xmm14, %xmm14
vpxor %xmm8, %xmm14, %xmm14
vpxor %xmm14, %xmm7, %xmm7
vmovdqu %xmm7, 80(%rsp)
# H ^ 7
# ghash_gfmul_red_avx
vpshufd $0x4e, %xmm1, %xmm9
vpshufd $0x4e, %xmm3, %xmm10
vpclmulqdq $0x11, %xmm1, %xmm3, %xmm11
vpclmulqdq $0x00, %xmm1, %xmm3, %xmm8
vpxor %xmm1, %xmm9, %xmm9
vpxor %xmm3, %xmm10, %xmm10
vpclmulqdq $0x00, %xmm10, %xmm9, %xmm9
vpxor %xmm8, %xmm9, %xmm9
vpxor %xmm11, %xmm9, %xmm9
vpslldq $8, %xmm9, %xmm10
vpsrldq $8, %xmm9, %xmm9
vpxor %xmm10, %xmm8, %xmm8
vpxor %xmm9, %xmm11, %xmm7
vpslld $31, %xmm8, %xmm12
vpslld $30, %xmm8, %xmm13
vpslld $25, %xmm8, %xmm14
vpxor %xmm13, %xmm12, %xmm12
vpxor %xmm14, %xmm12, %xmm12
vpsrldq $4, %xmm12, %xmm13
vpslldq $12, %xmm12, %xmm12
vpxor %xmm12, %xmm8, %xmm8
vpsrld $0x01, %xmm8, %xmm14
vpsrld $2, %xmm8, %xmm10
vpsrld $7, %xmm8, %xmm9
vpxor %xmm10, %xmm14, %xmm14
vpxor %xmm9, %xmm14, %xmm14
vpxor %xmm13, %xmm14, %xmm14
vpxor %xmm8, %xmm14, %xmm14
vpxor %xmm14, %xmm7, %xmm7
vmovdqu %xmm7, 96(%rsp)
# H ^ 8
vpclmulqdq $0x00, %xmm3, %xmm3, %xmm8
vpclmulqdq $0x11, %xmm3, %xmm3, %xmm7
vpslld $31, %xmm8, %xmm12
vpslld $30, %xmm8, %xmm13
vpslld $25, %xmm8, %xmm14
vpxor %xmm13, %xmm12, %xmm12
vpxor %xmm14, %xmm12, %xmm12
vpsrldq $4, %xmm12, %xmm13
vpslldq $12, %xmm12, %xmm12
vpxor %xmm12, %xmm8, %xmm8
vpsrld $0x01, %xmm8, %xmm14
vpsrld $2, %xmm8, %xmm10
vpsrld $7, %xmm8, %xmm9
vpxor %xmm10, %xmm14, %xmm14
vpxor %xmm9, %xmm14, %xmm14
vpxor %xmm13, %xmm14, %xmm14
vpxor %xmm8, %xmm14, %xmm14
vpxor %xmm14, %xmm7, %xmm7
vmovdqu %xmm7, 112(%rsp)
# First 128 bytes of input
vmovdqu (%r12), %xmm0
vmovdqa L_avx1_aes_gcm_bswap_epi64(%rip), %xmm1
vpshufb %xmm1, %xmm0, %xmm8
vpaddd L_avx1_aes_gcm_one(%rip), %xmm0, %xmm9
vpshufb %xmm1, %xmm9, %xmm9
vpaddd L_avx1_aes_gcm_two(%rip), %xmm0, %xmm10
vpshufb %xmm1, %xmm10, %xmm10
vpaddd L_avx1_aes_gcm_three(%rip), %xmm0, %xmm11
vpshufb %xmm1, %xmm11, %xmm11
vpaddd L_avx1_aes_gcm_four(%rip), %xmm0, %xmm12
vpshufb %xmm1, %xmm12, %xmm12
vpaddd L_avx1_aes_gcm_five(%rip), %xmm0, %xmm13
vpshufb %xmm1, %xmm13, %xmm13
vpaddd L_avx1_aes_gcm_six(%rip), %xmm0, %xmm14
vpshufb %xmm1, %xmm14, %xmm14
vpaddd L_avx1_aes_gcm_seven(%rip), %xmm0, %xmm15
vpshufb %xmm1, %xmm15, %xmm15
vpaddd L_avx1_aes_gcm_eight(%rip), %xmm0, %xmm0
vmovdqa (%rdi), %xmm7
vmovdqu %xmm0, (%r12)
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm7, %xmm9, %xmm9
vpxor %xmm7, %xmm10, %xmm10
vpxor %xmm7, %xmm11, %xmm11
vpxor %xmm7, %xmm12, %xmm12
vpxor %xmm7, %xmm13, %xmm13
vpxor %xmm7, %xmm14, %xmm14
vpxor %xmm7, %xmm15, %xmm15
vmovdqa 16(%rdi), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqa 32(%rdi), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqa 48(%rdi), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqa 64(%rdi), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqa 80(%rdi), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqa 96(%rdi), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqa 112(%rdi), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqa 128(%rdi), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqa 144(%rdi), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
cmpl $11, %esi
vmovdqa 160(%rdi), %xmm7
jl L_AES_GCM_encrypt_update_avx1_aesenc_128_enc_done
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqa 176(%rdi), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
cmpl $13, %esi
vmovdqa 192(%rdi), %xmm7
jl L_AES_GCM_encrypt_update_avx1_aesenc_128_enc_done
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqa 208(%rdi), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqa 224(%rdi), %xmm7
L_AES_GCM_encrypt_update_avx1_aesenc_128_enc_done:
vaesenclast %xmm7, %xmm8, %xmm8
vaesenclast %xmm7, %xmm9, %xmm9
vmovdqu (%r11), %xmm0
vmovdqu 16(%r11), %xmm1
vpxor %xmm0, %xmm8, %xmm8
vpxor %xmm1, %xmm9, %xmm9
vmovdqu %xmm8, (%r10)
vmovdqu %xmm9, 16(%r10)
vaesenclast %xmm7, %xmm10, %xmm10
vaesenclast %xmm7, %xmm11, %xmm11
vmovdqu 32(%r11), %xmm0
vmovdqu 48(%r11), %xmm1
vpxor %xmm0, %xmm10, %xmm10
vpxor %xmm1, %xmm11, %xmm11
vmovdqu %xmm10, 32(%r10)
vmovdqu %xmm11, 48(%r10)
vaesenclast %xmm7, %xmm12, %xmm12
vaesenclast %xmm7, %xmm13, %xmm13
vmovdqu 64(%r11), %xmm0
vmovdqu 80(%r11), %xmm1
vpxor %xmm0, %xmm12, %xmm12
vpxor %xmm1, %xmm13, %xmm13
vmovdqu %xmm12, 64(%r10)
vmovdqu %xmm13, 80(%r10)
vaesenclast %xmm7, %xmm14, %xmm14
vaesenclast %xmm7, %xmm15, %xmm15
vmovdqu 96(%r11), %xmm0
vmovdqu 112(%r11), %xmm1
vpxor %xmm0, %xmm14, %xmm14
vpxor %xmm1, %xmm15, %xmm15
vmovdqu %xmm14, 96(%r10)
vmovdqu %xmm15, 112(%r10)
cmpl $0x80, %r13d
movl $0x80, %r14d
jle L_AES_GCM_encrypt_update_avx1_end_128
# More 128 bytes of input
L_AES_GCM_encrypt_update_avx1_ghash_128:
leaq (%r11,%r14,1), %rcx
leaq (%r10,%r14,1), %rdx
vmovdqu (%r12), %xmm0
vmovdqa L_avx1_aes_gcm_bswap_epi64(%rip), %xmm1
vpshufb %xmm1, %xmm0, %xmm8
vpaddd L_avx1_aes_gcm_one(%rip), %xmm0, %xmm9
vpshufb %xmm1, %xmm9, %xmm9
vpaddd L_avx1_aes_gcm_two(%rip), %xmm0, %xmm10
vpshufb %xmm1, %xmm10, %xmm10
vpaddd L_avx1_aes_gcm_three(%rip), %xmm0, %xmm11
vpshufb %xmm1, %xmm11, %xmm11
vpaddd L_avx1_aes_gcm_four(%rip), %xmm0, %xmm12
vpshufb %xmm1, %xmm12, %xmm12
vpaddd L_avx1_aes_gcm_five(%rip), %xmm0, %xmm13
vpshufb %xmm1, %xmm13, %xmm13
vpaddd L_avx1_aes_gcm_six(%rip), %xmm0, %xmm14
vpshufb %xmm1, %xmm14, %xmm14
vpaddd L_avx1_aes_gcm_seven(%rip), %xmm0, %xmm15
vpshufb %xmm1, %xmm15, %xmm15
vpaddd L_avx1_aes_gcm_eight(%rip), %xmm0, %xmm0
vmovdqa (%rdi), %xmm7
vmovdqu %xmm0, (%r12)
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm7, %xmm9, %xmm9
vpxor %xmm7, %xmm10, %xmm10
vpxor %xmm7, %xmm11, %xmm11
vpxor %xmm7, %xmm12, %xmm12
vpxor %xmm7, %xmm13, %xmm13
vpxor %xmm7, %xmm14, %xmm14
vpxor %xmm7, %xmm15, %xmm15
vmovdqu 112(%rsp), %xmm7
vmovdqu -128(%rdx), %xmm0
vaesenc 16(%rdi), %xmm8, %xmm8
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vpxor %xmm2, %xmm0, %xmm0
vpshufd $0x4e, %xmm7, %xmm1
vpshufd $0x4e, %xmm0, %xmm5
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm0, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm7, %xmm0, %xmm3
vaesenc 16(%rdi), %xmm9, %xmm9
vaesenc 16(%rdi), %xmm10, %xmm10
vpclmulqdq $0x00, %xmm7, %xmm0, %xmm2
vaesenc 16(%rdi), %xmm11, %xmm11
vaesenc 16(%rdi), %xmm12, %xmm12
vpclmulqdq $0x00, %xmm5, %xmm1, %xmm1
vaesenc 16(%rdi), %xmm13, %xmm13
vaesenc 16(%rdi), %xmm14, %xmm14
vaesenc 16(%rdi), %xmm15, %xmm15
vpxor %xmm2, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vmovdqu 96(%rsp), %xmm7
vmovdqu -112(%rdx), %xmm0
vpshufd $0x4e, %xmm7, %xmm4
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vaesenc 32(%rdi), %xmm8, %xmm8
vpxor %xmm7, %xmm4, %xmm4
vpshufd $0x4e, %xmm0, %xmm5
vpxor %xmm0, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm7, %xmm0, %xmm6
vaesenc 32(%rdi), %xmm9, %xmm9
vaesenc 32(%rdi), %xmm10, %xmm10
vpclmulqdq $0x00, %xmm7, %xmm0, %xmm7
vaesenc 32(%rdi), %xmm11, %xmm11
vaesenc 32(%rdi), %xmm12, %xmm12
vpclmulqdq $0x00, %xmm5, %xmm4, %xmm4
vaesenc 32(%rdi), %xmm13, %xmm13
vaesenc 32(%rdi), %xmm14, %xmm14
vaesenc 32(%rdi), %xmm15, %xmm15
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm4, %xmm1, %xmm1
vmovdqu 80(%rsp), %xmm7
vmovdqu -96(%rdx), %xmm0
vpshufd $0x4e, %xmm7, %xmm4
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vaesenc 48(%rdi), %xmm8, %xmm8
vpxor %xmm7, %xmm4, %xmm4
vpshufd $0x4e, %xmm0, %xmm5
vpxor %xmm0, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm7, %xmm0, %xmm6
vaesenc 48(%rdi), %xmm9, %xmm9
vaesenc 48(%rdi), %xmm10, %xmm10
vpclmulqdq $0x00, %xmm7, %xmm0, %xmm7
vaesenc 48(%rdi), %xmm11, %xmm11
vaesenc 48(%rdi), %xmm12, %xmm12
vpclmulqdq $0x00, %xmm5, %xmm4, %xmm4
vaesenc 48(%rdi), %xmm13, %xmm13
vaesenc 48(%rdi), %xmm14, %xmm14
vaesenc 48(%rdi), %xmm15, %xmm15
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm4, %xmm1, %xmm1
vmovdqu 64(%rsp), %xmm7
vmovdqu -80(%rdx), %xmm0
vpshufd $0x4e, %xmm7, %xmm4
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vaesenc 64(%rdi), %xmm8, %xmm8
vpxor %xmm7, %xmm4, %xmm4
vpshufd $0x4e, %xmm0, %xmm5
vpxor %xmm0, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm7, %xmm0, %xmm6
vaesenc 64(%rdi), %xmm9, %xmm9
vaesenc 64(%rdi), %xmm10, %xmm10
vpclmulqdq $0x00, %xmm7, %xmm0, %xmm7
vaesenc 64(%rdi), %xmm11, %xmm11
vaesenc 64(%rdi), %xmm12, %xmm12
vpclmulqdq $0x00, %xmm5, %xmm4, %xmm4
vaesenc 64(%rdi), %xmm13, %xmm13
vaesenc 64(%rdi), %xmm14, %xmm14
vaesenc 64(%rdi), %xmm15, %xmm15
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm4, %xmm1, %xmm1
vmovdqu 48(%rsp), %xmm7
vmovdqu -64(%rdx), %xmm0
vpshufd $0x4e, %xmm7, %xmm4
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vaesenc 80(%rdi), %xmm8, %xmm8
vpxor %xmm7, %xmm4, %xmm4
vpshufd $0x4e, %xmm0, %xmm5
vpxor %xmm0, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm7, %xmm0, %xmm6
vaesenc 80(%rdi), %xmm9, %xmm9
vaesenc 80(%rdi), %xmm10, %xmm10
vpclmulqdq $0x00, %xmm7, %xmm0, %xmm7
vaesenc 80(%rdi), %xmm11, %xmm11
vaesenc 80(%rdi), %xmm12, %xmm12
vpclmulqdq $0x00, %xmm5, %xmm4, %xmm4
vaesenc 80(%rdi), %xmm13, %xmm13
vaesenc 80(%rdi), %xmm14, %xmm14
vaesenc 80(%rdi), %xmm15, %xmm15
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm4, %xmm1, %xmm1
vmovdqu 32(%rsp), %xmm7
vmovdqu -48(%rdx), %xmm0
vpshufd $0x4e, %xmm7, %xmm4
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vaesenc 96(%rdi), %xmm8, %xmm8
vpxor %xmm7, %xmm4, %xmm4
vpshufd $0x4e, %xmm0, %xmm5
vpxor %xmm0, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm7, %xmm0, %xmm6
vaesenc 96(%rdi), %xmm9, %xmm9
vaesenc 96(%rdi), %xmm10, %xmm10
vpclmulqdq $0x00, %xmm7, %xmm0, %xmm7
vaesenc 96(%rdi), %xmm11, %xmm11
vaesenc 96(%rdi), %xmm12, %xmm12
vpclmulqdq $0x00, %xmm5, %xmm4, %xmm4
vaesenc 96(%rdi), %xmm13, %xmm13
vaesenc 96(%rdi), %xmm14, %xmm14
vaesenc 96(%rdi), %xmm15, %xmm15
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm4, %xmm1, %xmm1
vmovdqu 16(%rsp), %xmm7
vmovdqu -32(%rdx), %xmm0
vpshufd $0x4e, %xmm7, %xmm4
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vaesenc 112(%rdi), %xmm8, %xmm8
vpxor %xmm7, %xmm4, %xmm4
vpshufd $0x4e, %xmm0, %xmm5
vpxor %xmm0, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm7, %xmm0, %xmm6
vaesenc 112(%rdi), %xmm9, %xmm9
vaesenc 112(%rdi), %xmm10, %xmm10
vpclmulqdq $0x00, %xmm7, %xmm0, %xmm7
vaesenc 112(%rdi), %xmm11, %xmm11
vaesenc 112(%rdi), %xmm12, %xmm12
vpclmulqdq $0x00, %xmm5, %xmm4, %xmm4
vaesenc 112(%rdi), %xmm13, %xmm13
vaesenc 112(%rdi), %xmm14, %xmm14
vaesenc 112(%rdi), %xmm15, %xmm15
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm4, %xmm1, %xmm1
vmovdqu (%rsp), %xmm7
vmovdqu -16(%rdx), %xmm0
vpshufd $0x4e, %xmm7, %xmm4
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vaesenc 128(%rdi), %xmm8, %xmm8
vpxor %xmm7, %xmm4, %xmm4
vpshufd $0x4e, %xmm0, %xmm5
vpxor %xmm0, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm7, %xmm0, %xmm6
vaesenc 128(%rdi), %xmm9, %xmm9
vaesenc 128(%rdi), %xmm10, %xmm10
vpclmulqdq $0x00, %xmm7, %xmm0, %xmm7
vaesenc 128(%rdi), %xmm11, %xmm11
vaesenc 128(%rdi), %xmm12, %xmm12
vpclmulqdq $0x00, %xmm5, %xmm4, %xmm4
vaesenc 128(%rdi), %xmm13, %xmm13
vaesenc 128(%rdi), %xmm14, %xmm14
vaesenc 128(%rdi), %xmm15, %xmm15
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm4, %xmm1, %xmm1
vpslldq $8, %xmm1, %xmm5
vpsrldq $8, %xmm1, %xmm1
vaesenc 144(%rdi), %xmm8, %xmm8
vpxor %xmm5, %xmm2, %xmm2
vpxor %xmm1, %xmm3, %xmm3
vaesenc 144(%rdi), %xmm9, %xmm9
vpslld $31, %xmm2, %xmm7
vpslld $30, %xmm2, %xmm4
vpslld $25, %xmm2, %xmm5
vaesenc 144(%rdi), %xmm10, %xmm10
vpxor %xmm4, %xmm7, %xmm7
vpxor %xmm5, %xmm7, %xmm7
vaesenc 144(%rdi), %xmm11, %xmm11
vpsrldq $4, %xmm7, %xmm4
vpslldq $12, %xmm7, %xmm7
vaesenc 144(%rdi), %xmm12, %xmm12
vpxor %xmm7, %xmm2, %xmm2
vpsrld $0x01, %xmm2, %xmm5
vaesenc 144(%rdi), %xmm13, %xmm13
vpsrld $2, %xmm2, %xmm1
vpsrld $7, %xmm2, %xmm0
vaesenc 144(%rdi), %xmm14, %xmm14
vpxor %xmm1, %xmm5, %xmm5
vpxor %xmm0, %xmm5, %xmm5
vaesenc 144(%rdi), %xmm15, %xmm15
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm5, %xmm2, %xmm2
vpxor %xmm3, %xmm2, %xmm2
cmpl $11, %esi
vmovdqa 160(%rdi), %xmm7
jl L_AES_GCM_encrypt_update_avx1_aesenc_128_ghash_avx_done
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqa 176(%rdi), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
cmpl $13, %esi
vmovdqa 192(%rdi), %xmm7
jl L_AES_GCM_encrypt_update_avx1_aesenc_128_ghash_avx_done
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqa 208(%rdi), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqa 224(%rdi), %xmm7
L_AES_GCM_encrypt_update_avx1_aesenc_128_ghash_avx_done:
vaesenclast %xmm7, %xmm8, %xmm8
vaesenclast %xmm7, %xmm9, %xmm9
vmovdqu (%rcx), %xmm0
vmovdqu 16(%rcx), %xmm1
vpxor %xmm0, %xmm8, %xmm8
vpxor %xmm1, %xmm9, %xmm9
vmovdqu %xmm8, (%rdx)
vmovdqu %xmm9, 16(%rdx)
vaesenclast %xmm7, %xmm10, %xmm10
vaesenclast %xmm7, %xmm11, %xmm11
vmovdqu 32(%rcx), %xmm0
vmovdqu 48(%rcx), %xmm1
vpxor %xmm0, %xmm10, %xmm10
vpxor %xmm1, %xmm11, %xmm11
vmovdqu %xmm10, 32(%rdx)
vmovdqu %xmm11, 48(%rdx)
vaesenclast %xmm7, %xmm12, %xmm12
vaesenclast %xmm7, %xmm13, %xmm13
vmovdqu 64(%rcx), %xmm0
vmovdqu 80(%rcx), %xmm1
vpxor %xmm0, %xmm12, %xmm12
vpxor %xmm1, %xmm13, %xmm13
vmovdqu %xmm12, 64(%rdx)
vmovdqu %xmm13, 80(%rdx)
vaesenclast %xmm7, %xmm14, %xmm14
vaesenclast %xmm7, %xmm15, %xmm15
vmovdqu 96(%rcx), %xmm0
vmovdqu 112(%rcx), %xmm1
vpxor %xmm0, %xmm14, %xmm14
vpxor %xmm1, %xmm15, %xmm15
vmovdqu %xmm14, 96(%rdx)
vmovdqu %xmm15, 112(%rdx)
addl $0x80, %r14d
cmpl %r13d, %r14d
jl L_AES_GCM_encrypt_update_avx1_ghash_128
L_AES_GCM_encrypt_update_avx1_end_128:
vmovdqa L_avx1_aes_gcm_bswap_mask(%rip), %xmm4
vpshufb %xmm4, %xmm8, %xmm8
vpshufb %xmm4, %xmm9, %xmm9
vpshufb %xmm4, %xmm10, %xmm10
vpshufb %xmm4, %xmm11, %xmm11
vpxor %xmm2, %xmm8, %xmm8
vpshufb %xmm4, %xmm12, %xmm12
vpshufb %xmm4, %xmm13, %xmm13
vpshufb %xmm4, %xmm14, %xmm14
vpshufb %xmm4, %xmm15, %xmm15
vmovdqu (%rsp), %xmm7
vmovdqu 16(%rsp), %xmm5
# ghash_gfmul_avx
vpshufd $0x4e, %xmm15, %xmm1
vpshufd $0x4e, %xmm7, %xmm2
vpclmulqdq $0x11, %xmm15, %xmm7, %xmm3
vpclmulqdq $0x00, %xmm15, %xmm7, %xmm0
vpxor %xmm15, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vmovdqa %xmm0, %xmm4
vmovdqa %xmm3, %xmm6
vpslldq $8, %xmm1, %xmm2
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm2, %xmm4, %xmm4
vpxor %xmm1, %xmm6, %xmm6
# ghash_gfmul_xor_avx
vpshufd $0x4e, %xmm14, %xmm1
vpshufd $0x4e, %xmm5, %xmm2
vpclmulqdq $0x11, %xmm14, %xmm5, %xmm3
vpclmulqdq $0x00, %xmm14, %xmm5, %xmm0
vpxor %xmm14, %xmm1, %xmm1
vpxor %xmm5, %xmm2, %xmm2
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vpxor %xmm0, %xmm4, %xmm4
vpxor %xmm3, %xmm6, %xmm6
vpslldq $8, %xmm1, %xmm2
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm2, %xmm4, %xmm4
vpxor %xmm1, %xmm6, %xmm6
vmovdqu 32(%rsp), %xmm7
vmovdqu 48(%rsp), %xmm5
# ghash_gfmul_xor_avx
vpshufd $0x4e, %xmm13, %xmm1
vpshufd $0x4e, %xmm7, %xmm2
vpclmulqdq $0x11, %xmm13, %xmm7, %xmm3
vpclmulqdq $0x00, %xmm13, %xmm7, %xmm0
vpxor %xmm13, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vpxor %xmm0, %xmm4, %xmm4
vpxor %xmm3, %xmm6, %xmm6
vpslldq $8, %xmm1, %xmm2
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm2, %xmm4, %xmm4
vpxor %xmm1, %xmm6, %xmm6
# ghash_gfmul_xor_avx
vpshufd $0x4e, %xmm12, %xmm1
vpshufd $0x4e, %xmm5, %xmm2
vpclmulqdq $0x11, %xmm12, %xmm5, %xmm3
vpclmulqdq $0x00, %xmm12, %xmm5, %xmm0
vpxor %xmm12, %xmm1, %xmm1
vpxor %xmm5, %xmm2, %xmm2
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vpxor %xmm0, %xmm4, %xmm4
vpxor %xmm3, %xmm6, %xmm6
vpslldq $8, %xmm1, %xmm2
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm2, %xmm4, %xmm4
vpxor %xmm1, %xmm6, %xmm6
vmovdqu 64(%rsp), %xmm7
vmovdqu 80(%rsp), %xmm5
# ghash_gfmul_xor_avx
vpshufd $0x4e, %xmm11, %xmm1
vpshufd $0x4e, %xmm7, %xmm2
vpclmulqdq $0x11, %xmm11, %xmm7, %xmm3
vpclmulqdq $0x00, %xmm11, %xmm7, %xmm0
vpxor %xmm11, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vpxor %xmm0, %xmm4, %xmm4
vpxor %xmm3, %xmm6, %xmm6
vpslldq $8, %xmm1, %xmm2
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm2, %xmm4, %xmm4
vpxor %xmm1, %xmm6, %xmm6
# ghash_gfmul_xor_avx
vpshufd $0x4e, %xmm10, %xmm1
vpshufd $0x4e, %xmm5, %xmm2
vpclmulqdq $0x11, %xmm10, %xmm5, %xmm3
vpclmulqdq $0x00, %xmm10, %xmm5, %xmm0
vpxor %xmm10, %xmm1, %xmm1
vpxor %xmm5, %xmm2, %xmm2
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vpxor %xmm0, %xmm4, %xmm4
vpxor %xmm3, %xmm6, %xmm6
vpslldq $8, %xmm1, %xmm2
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm2, %xmm4, %xmm4
vpxor %xmm1, %xmm6, %xmm6
vmovdqu 96(%rsp), %xmm7
vmovdqu 112(%rsp), %xmm5
# ghash_gfmul_xor_avx
vpshufd $0x4e, %xmm9, %xmm1
vpshufd $0x4e, %xmm7, %xmm2
vpclmulqdq $0x11, %xmm9, %xmm7, %xmm3
vpclmulqdq $0x00, %xmm9, %xmm7, %xmm0
vpxor %xmm9, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vpxor %xmm0, %xmm4, %xmm4
vpxor %xmm3, %xmm6, %xmm6
vpslldq $8, %xmm1, %xmm2
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm2, %xmm4, %xmm4
vpxor %xmm1, %xmm6, %xmm6
# ghash_gfmul_xor_avx
vpshufd $0x4e, %xmm8, %xmm1
vpshufd $0x4e, %xmm5, %xmm2
vpclmulqdq $0x11, %xmm8, %xmm5, %xmm3
vpclmulqdq $0x00, %xmm8, %xmm5, %xmm0
vpxor %xmm8, %xmm1, %xmm1
vpxor %xmm5, %xmm2, %xmm2
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vpxor %xmm0, %xmm4, %xmm4
vpxor %xmm3, %xmm6, %xmm6
vpslldq $8, %xmm1, %xmm2
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm2, %xmm4, %xmm4
vpxor %xmm1, %xmm6, %xmm6
vpslld $31, %xmm4, %xmm0
vpslld $30, %xmm4, %xmm1
vpslld $25, %xmm4, %xmm2
vpxor %xmm1, %xmm0, %xmm0
vpxor %xmm2, %xmm0, %xmm0
vmovdqa %xmm0, %xmm1
vpsrldq $4, %xmm1, %xmm1
vpslldq $12, %xmm0, %xmm0
vpxor %xmm0, %xmm4, %xmm4
vpsrld $0x01, %xmm4, %xmm2
vpsrld $2, %xmm4, %xmm3
vpsrld $7, %xmm4, %xmm0
vpxor %xmm3, %xmm2, %xmm2
vpxor %xmm0, %xmm2, %xmm2
vpxor %xmm1, %xmm2, %xmm2
vpxor %xmm4, %xmm2, %xmm2
vpxor %xmm2, %xmm6, %xmm6
vmovdqu (%rsp), %xmm5
L_AES_GCM_encrypt_update_avx1_done_128:
movl %r8d, %edx
cmpl %edx, %r14d
jge L_AES_GCM_encrypt_update_avx1_done_enc
movl %r8d, %r13d
andl $0xfffffff0, %r13d
cmpl %r13d, %r14d
jge L_AES_GCM_encrypt_update_avx1_last_block_done
vmovdqu (%r12), %xmm9
vpshufb L_avx1_aes_gcm_bswap_epi64(%rip), %xmm9, %xmm8
vpaddd L_avx1_aes_gcm_one(%rip), %xmm9, %xmm9
vmovdqu %xmm9, (%r12)
vpxor (%rdi), %xmm8, %xmm8
vaesenc 16(%rdi), %xmm8, %xmm8
vaesenc 32(%rdi), %xmm8, %xmm8
vaesenc 48(%rdi), %xmm8, %xmm8
vaesenc 64(%rdi), %xmm8, %xmm8
vaesenc 80(%rdi), %xmm8, %xmm8
vaesenc 96(%rdi), %xmm8, %xmm8
vaesenc 112(%rdi), %xmm8, %xmm8
vaesenc 128(%rdi), %xmm8, %xmm8
vaesenc 144(%rdi), %xmm8, %xmm8
cmpl $11, %esi
vmovdqa 160(%rdi), %xmm9
jl L_AES_GCM_encrypt_update_avx1_aesenc_block_last
vaesenc %xmm9, %xmm8, %xmm8
vaesenc 176(%rdi), %xmm8, %xmm8
cmpl $13, %esi
vmovdqa 192(%rdi), %xmm9
jl L_AES_GCM_encrypt_update_avx1_aesenc_block_last
vaesenc %xmm9, %xmm8, %xmm8
vaesenc 208(%rdi), %xmm8, %xmm8
vmovdqa 224(%rdi), %xmm9
L_AES_GCM_encrypt_update_avx1_aesenc_block_last:
vaesenclast %xmm9, %xmm8, %xmm8
vmovdqu (%r11,%r14,1), %xmm9
vpxor %xmm9, %xmm8, %xmm8
vmovdqu %xmm8, (%r10,%r14,1)
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm8, %xmm8
vpxor %xmm8, %xmm6, %xmm6
addl $16, %r14d
cmpl %r13d, %r14d
jge L_AES_GCM_encrypt_update_avx1_last_block_ghash
L_AES_GCM_encrypt_update_avx1_last_block_start:
vmovdqu (%r11,%r14,1), %xmm13
vmovdqu (%r12), %xmm9
vpshufb L_avx1_aes_gcm_bswap_epi64(%rip), %xmm9, %xmm8
vpaddd L_avx1_aes_gcm_one(%rip), %xmm9, %xmm9
vmovdqu %xmm9, (%r12)
vpxor (%rdi), %xmm8, %xmm8
vpclmulqdq $16, %xmm5, %xmm6, %xmm10
vaesenc 16(%rdi), %xmm8, %xmm8
vaesenc 32(%rdi), %xmm8, %xmm8
vpclmulqdq $0x01, %xmm5, %xmm6, %xmm11
vaesenc 48(%rdi), %xmm8, %xmm8
vaesenc 64(%rdi), %xmm8, %xmm8
vpclmulqdq $0x00, %xmm5, %xmm6, %xmm12
vaesenc 80(%rdi), %xmm8, %xmm8
vpclmulqdq $0x11, %xmm5, %xmm6, %xmm1
vaesenc 96(%rdi), %xmm8, %xmm8
vpxor %xmm11, %xmm10, %xmm10
vpslldq $8, %xmm10, %xmm2
vpsrldq $8, %xmm10, %xmm10
vaesenc 112(%rdi), %xmm8, %xmm8
vpxor %xmm12, %xmm2, %xmm2
vpxor %xmm10, %xmm1, %xmm3
vmovdqa L_avx1_aes_gcm_mod2_128(%rip), %xmm0
vpclmulqdq $16, %xmm0, %xmm2, %xmm11
vaesenc 128(%rdi), %xmm8, %xmm8
vpshufd $0x4e, %xmm2, %xmm10
vpxor %xmm11, %xmm10, %xmm10
vpclmulqdq $16, %xmm0, %xmm10, %xmm11
vaesenc 144(%rdi), %xmm8, %xmm8
vpshufd $0x4e, %xmm10, %xmm10
vpxor %xmm11, %xmm10, %xmm10
vpxor %xmm3, %xmm10, %xmm6
cmpl $11, %esi
vmovdqa 160(%rdi), %xmm9
jl L_AES_GCM_encrypt_update_avx1_aesenc_gfmul_last
vaesenc %xmm9, %xmm8, %xmm8
vaesenc 176(%rdi), %xmm8, %xmm8
cmpl $13, %esi
vmovdqa 192(%rdi), %xmm9
jl L_AES_GCM_encrypt_update_avx1_aesenc_gfmul_last
vaesenc %xmm9, %xmm8, %xmm8
vaesenc 208(%rdi), %xmm8, %xmm8
vmovdqa 224(%rdi), %xmm9
L_AES_GCM_encrypt_update_avx1_aesenc_gfmul_last:
vaesenclast %xmm9, %xmm8, %xmm8
vmovdqa %xmm13, %xmm0
vpxor %xmm0, %xmm8, %xmm8
vmovdqu %xmm8, (%r10,%r14,1)
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm8, %xmm8
addl $16, %r14d
vpxor %xmm8, %xmm6, %xmm6
cmpl %r13d, %r14d
jl L_AES_GCM_encrypt_update_avx1_last_block_start
L_AES_GCM_encrypt_update_avx1_last_block_ghash:
# ghash_gfmul_red_avx
vpshufd $0x4e, %xmm5, %xmm9
vpshufd $0x4e, %xmm6, %xmm10
vpclmulqdq $0x11, %xmm5, %xmm6, %xmm11
vpclmulqdq $0x00, %xmm5, %xmm6, %xmm8
vpxor %xmm5, %xmm9, %xmm9
vpxor %xmm6, %xmm10, %xmm10
vpclmulqdq $0x00, %xmm10, %xmm9, %xmm9
vpxor %xmm8, %xmm9, %xmm9
vpxor %xmm11, %xmm9, %xmm9
vpslldq $8, %xmm9, %xmm10
vpsrldq $8, %xmm9, %xmm9
vpxor %xmm10, %xmm8, %xmm8
vpxor %xmm9, %xmm11, %xmm6
vpslld $31, %xmm8, %xmm12
vpslld $30, %xmm8, %xmm13
vpslld $25, %xmm8, %xmm14
vpxor %xmm13, %xmm12, %xmm12
vpxor %xmm14, %xmm12, %xmm12
vpsrldq $4, %xmm12, %xmm13
vpslldq $12, %xmm12, %xmm12
vpxor %xmm12, %xmm8, %xmm8
vpsrld $0x01, %xmm8, %xmm14
vpsrld $2, %xmm8, %xmm10
vpsrld $7, %xmm8, %xmm9
vpxor %xmm10, %xmm14, %xmm14
vpxor %xmm9, %xmm14, %xmm14
vpxor %xmm13, %xmm14, %xmm14
vpxor %xmm8, %xmm14, %xmm14
vpxor %xmm14, %xmm6, %xmm6
L_AES_GCM_encrypt_update_avx1_last_block_done:
L_AES_GCM_encrypt_update_avx1_done_enc:
vmovdqa %xmm6, (%r9)
vzeroupper
addq $0xa0, %rsp
popq %r14
popq %r12
popq %r13
repz retq
#ifndef __APPLE__
.size AES_GCM_encrypt_update_avx1,.-AES_GCM_encrypt_update_avx1
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl AES_GCM_encrypt_final_avx1
.type AES_GCM_encrypt_final_avx1,@function
.align 16
AES_GCM_encrypt_final_avx1:
#else
.section __TEXT,__text
.globl _AES_GCM_encrypt_final_avx1
.p2align 4
_AES_GCM_encrypt_final_avx1:
#endif /* __APPLE__ */
pushq %r13
movl %edx, %eax
movl %ecx, %r10d
movl %r8d, %r11d
movq 16(%rsp), %r8
subq $16, %rsp
vmovdqa (%rdi), %xmm4
vmovdqa (%r9), %xmm5
vmovdqa (%r8), %xmm6
vpsrlq $63, %xmm5, %xmm8
vpsllq $0x01, %xmm5, %xmm7
vpslldq $8, %xmm8, %xmm8
vpor %xmm8, %xmm7, %xmm7
vpshufd $0xff, %xmm5, %xmm5
vpsrad $31, %xmm5, %xmm5
vpand L_avx1_aes_gcm_mod2_128(%rip), %xmm5, %xmm5
vpxor %xmm7, %xmm5, %xmm5
movl %r10d, %edx
movl %r11d, %ecx
shlq $3, %rdx
shlq $3, %rcx
vmovq %rdx, %xmm0
vmovq %rcx, %xmm1
vpunpcklqdq %xmm1, %xmm0, %xmm0
vpxor %xmm0, %xmm4, %xmm4
# ghash_gfmul_red_avx
vpshufd $0x4e, %xmm5, %xmm8
vpshufd $0x4e, %xmm4, %xmm9
vpclmulqdq $0x11, %xmm5, %xmm4, %xmm10
vpclmulqdq $0x00, %xmm5, %xmm4, %xmm7
vpxor %xmm5, %xmm8, %xmm8
vpxor %xmm4, %xmm9, %xmm9
vpclmulqdq $0x00, %xmm9, %xmm8, %xmm8
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm10, %xmm8, %xmm8
vpslldq $8, %xmm8, %xmm9
vpsrldq $8, %xmm8, %xmm8
vpxor %xmm9, %xmm7, %xmm7
vpxor %xmm8, %xmm10, %xmm4
vpslld $31, %xmm7, %xmm11
vpslld $30, %xmm7, %xmm12
vpslld $25, %xmm7, %xmm13
vpxor %xmm12, %xmm11, %xmm11
vpxor %xmm13, %xmm11, %xmm11
vpsrldq $4, %xmm11, %xmm12
vpslldq $12, %xmm11, %xmm11
vpxor %xmm11, %xmm7, %xmm7
vpsrld $0x01, %xmm7, %xmm13
vpsrld $2, %xmm7, %xmm9
vpsrld $7, %xmm7, %xmm8
vpxor %xmm9, %xmm13, %xmm13
vpxor %xmm8, %xmm13, %xmm13
vpxor %xmm12, %xmm13, %xmm13
vpxor %xmm7, %xmm13, %xmm13
vpxor %xmm13, %xmm4, %xmm4
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm4, %xmm4
vpxor %xmm6, %xmm4, %xmm0
cmpl $16, %eax
je L_AES_GCM_encrypt_final_avx1_store_tag_16
xorq %rcx, %rcx
vmovdqu %xmm0, (%rsp)
L_AES_GCM_encrypt_final_avx1_store_tag_loop:
movzbl (%rsp,%rcx,1), %r13d
movb %r13b, (%rsi,%rcx,1)
incl %ecx
cmpl %eax, %ecx
jne L_AES_GCM_encrypt_final_avx1_store_tag_loop
jmp L_AES_GCM_encrypt_final_avx1_store_tag_done
L_AES_GCM_encrypt_final_avx1_store_tag_16:
vmovdqu %xmm0, (%rsi)
L_AES_GCM_encrypt_final_avx1_store_tag_done:
vzeroupper
addq $16, %rsp
popq %r13
repz retq
#ifndef __APPLE__
.size AES_GCM_encrypt_final_avx1,.-AES_GCM_encrypt_final_avx1
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl AES_GCM_decrypt_update_avx1
.type AES_GCM_decrypt_update_avx1,@function
.align 16
AES_GCM_decrypt_update_avx1:
#else
.section __TEXT,__text
.globl _AES_GCM_decrypt_update_avx1
.p2align 4
_AES_GCM_decrypt_update_avx1:
#endif /* __APPLE__ */
pushq %r13
pushq %r12
pushq %r14
movq %rdx, %r10
movq %rcx, %r11
movq 32(%rsp), %rax
movq 40(%rsp), %r12
subq $0xa8, %rsp
vmovdqa (%r9), %xmm6
vmovdqa (%rax), %xmm5
vpsrlq $63, %xmm5, %xmm9
vpsllq $0x01, %xmm5, %xmm8
vpslldq $8, %xmm9, %xmm9
vpor %xmm9, %xmm8, %xmm8
vpshufd $0xff, %xmm5, %xmm5
vpsrad $31, %xmm5, %xmm5
vpand L_avx1_aes_gcm_mod2_128(%rip), %xmm5, %xmm5
vpxor %xmm8, %xmm5, %xmm5
xorl %r14d, %r14d
cmpl $0x80, %r8d
movl %r8d, %r13d
jl L_AES_GCM_decrypt_update_avx1_done_128
andl $0xffffff80, %r13d
vmovdqa %xmm6, %xmm2
# H ^ 1
vmovdqu %xmm5, (%rsp)
# H ^ 2
vpclmulqdq $0x00, %xmm5, %xmm5, %xmm8
vpclmulqdq $0x11, %xmm5, %xmm5, %xmm0
vpslld $31, %xmm8, %xmm12
vpslld $30, %xmm8, %xmm13
vpslld $25, %xmm8, %xmm14
vpxor %xmm13, %xmm12, %xmm12
vpxor %xmm14, %xmm12, %xmm12
vpsrldq $4, %xmm12, %xmm13
vpslldq $12, %xmm12, %xmm12
vpxor %xmm12, %xmm8, %xmm8
vpsrld $0x01, %xmm8, %xmm14
vpsrld $2, %xmm8, %xmm10
vpsrld $7, %xmm8, %xmm9
vpxor %xmm10, %xmm14, %xmm14
vpxor %xmm9, %xmm14, %xmm14
vpxor %xmm13, %xmm14, %xmm14
vpxor %xmm8, %xmm14, %xmm14
vpxor %xmm14, %xmm0, %xmm0
vmovdqu %xmm0, 16(%rsp)
# H ^ 3
# ghash_gfmul_red_avx
vpshufd $0x4e, %xmm5, %xmm9
vpshufd $0x4e, %xmm0, %xmm10
vpclmulqdq $0x11, %xmm5, %xmm0, %xmm11
vpclmulqdq $0x00, %xmm5, %xmm0, %xmm8
vpxor %xmm5, %xmm9, %xmm9
vpxor %xmm0, %xmm10, %xmm10
vpclmulqdq $0x00, %xmm10, %xmm9, %xmm9
vpxor %xmm8, %xmm9, %xmm9
vpxor %xmm11, %xmm9, %xmm9
vpslldq $8, %xmm9, %xmm10
vpsrldq $8, %xmm9, %xmm9
vpxor %xmm10, %xmm8, %xmm8
vpxor %xmm9, %xmm11, %xmm1
vpslld $31, %xmm8, %xmm12
vpslld $30, %xmm8, %xmm13
vpslld $25, %xmm8, %xmm14
vpxor %xmm13, %xmm12, %xmm12
vpxor %xmm14, %xmm12, %xmm12
vpsrldq $4, %xmm12, %xmm13
vpslldq $12, %xmm12, %xmm12
vpxor %xmm12, %xmm8, %xmm8
vpsrld $0x01, %xmm8, %xmm14
vpsrld $2, %xmm8, %xmm10
vpsrld $7, %xmm8, %xmm9
vpxor %xmm10, %xmm14, %xmm14
vpxor %xmm9, %xmm14, %xmm14
vpxor %xmm13, %xmm14, %xmm14
vpxor %xmm8, %xmm14, %xmm14
vpxor %xmm14, %xmm1, %xmm1
vmovdqu %xmm1, 32(%rsp)
# H ^ 4
vpclmulqdq $0x00, %xmm0, %xmm0, %xmm8
vpclmulqdq $0x11, %xmm0, %xmm0, %xmm3
vpslld $31, %xmm8, %xmm12
vpslld $30, %xmm8, %xmm13
vpslld $25, %xmm8, %xmm14
vpxor %xmm13, %xmm12, %xmm12
vpxor %xmm14, %xmm12, %xmm12
vpsrldq $4, %xmm12, %xmm13
vpslldq $12, %xmm12, %xmm12
vpxor %xmm12, %xmm8, %xmm8
vpsrld $0x01, %xmm8, %xmm14
vpsrld $2, %xmm8, %xmm10
vpsrld $7, %xmm8, %xmm9
vpxor %xmm10, %xmm14, %xmm14
vpxor %xmm9, %xmm14, %xmm14
vpxor %xmm13, %xmm14, %xmm14
vpxor %xmm8, %xmm14, %xmm14
vpxor %xmm14, %xmm3, %xmm3
vmovdqu %xmm3, 48(%rsp)
# H ^ 5
# ghash_gfmul_red_avx
vpshufd $0x4e, %xmm0, %xmm9
vpshufd $0x4e, %xmm1, %xmm10
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm11
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm8
vpxor %xmm0, %xmm9, %xmm9
vpxor %xmm1, %xmm10, %xmm10
vpclmulqdq $0x00, %xmm10, %xmm9, %xmm9
vpxor %xmm8, %xmm9, %xmm9
vpxor %xmm11, %xmm9, %xmm9
vpslldq $8, %xmm9, %xmm10
vpsrldq $8, %xmm9, %xmm9
vpxor %xmm10, %xmm8, %xmm8
vpxor %xmm9, %xmm11, %xmm7
vpslld $31, %xmm8, %xmm12
vpslld $30, %xmm8, %xmm13
vpslld $25, %xmm8, %xmm14
vpxor %xmm13, %xmm12, %xmm12
vpxor %xmm14, %xmm12, %xmm12
vpsrldq $4, %xmm12, %xmm13
vpslldq $12, %xmm12, %xmm12
vpxor %xmm12, %xmm8, %xmm8
vpsrld $0x01, %xmm8, %xmm14
vpsrld $2, %xmm8, %xmm10
vpsrld $7, %xmm8, %xmm9
vpxor %xmm10, %xmm14, %xmm14
vpxor %xmm9, %xmm14, %xmm14
vpxor %xmm13, %xmm14, %xmm14
vpxor %xmm8, %xmm14, %xmm14
vpxor %xmm14, %xmm7, %xmm7
vmovdqu %xmm7, 64(%rsp)
# H ^ 6
vpclmulqdq $0x00, %xmm1, %xmm1, %xmm8
vpclmulqdq $0x11, %xmm1, %xmm1, %xmm7
vpslld $31, %xmm8, %xmm12
vpslld $30, %xmm8, %xmm13
vpslld $25, %xmm8, %xmm14
vpxor %xmm13, %xmm12, %xmm12
vpxor %xmm14, %xmm12, %xmm12
vpsrldq $4, %xmm12, %xmm13
vpslldq $12, %xmm12, %xmm12
vpxor %xmm12, %xmm8, %xmm8
vpsrld $0x01, %xmm8, %xmm14
vpsrld $2, %xmm8, %xmm10
vpsrld $7, %xmm8, %xmm9
vpxor %xmm10, %xmm14, %xmm14
vpxor %xmm9, %xmm14, %xmm14
vpxor %xmm13, %xmm14, %xmm14
vpxor %xmm8, %xmm14, %xmm14
vpxor %xmm14, %xmm7, %xmm7
vmovdqu %xmm7, 80(%rsp)
# H ^ 7
# ghash_gfmul_red_avx
vpshufd $0x4e, %xmm1, %xmm9
vpshufd $0x4e, %xmm3, %xmm10
vpclmulqdq $0x11, %xmm1, %xmm3, %xmm11
vpclmulqdq $0x00, %xmm1, %xmm3, %xmm8
vpxor %xmm1, %xmm9, %xmm9
vpxor %xmm3, %xmm10, %xmm10
vpclmulqdq $0x00, %xmm10, %xmm9, %xmm9
vpxor %xmm8, %xmm9, %xmm9
vpxor %xmm11, %xmm9, %xmm9
vpslldq $8, %xmm9, %xmm10
vpsrldq $8, %xmm9, %xmm9
vpxor %xmm10, %xmm8, %xmm8
vpxor %xmm9, %xmm11, %xmm7
vpslld $31, %xmm8, %xmm12
vpslld $30, %xmm8, %xmm13
vpslld $25, %xmm8, %xmm14
vpxor %xmm13, %xmm12, %xmm12
vpxor %xmm14, %xmm12, %xmm12
vpsrldq $4, %xmm12, %xmm13
vpslldq $12, %xmm12, %xmm12
vpxor %xmm12, %xmm8, %xmm8
vpsrld $0x01, %xmm8, %xmm14
vpsrld $2, %xmm8, %xmm10
vpsrld $7, %xmm8, %xmm9
vpxor %xmm10, %xmm14, %xmm14
vpxor %xmm9, %xmm14, %xmm14
vpxor %xmm13, %xmm14, %xmm14
vpxor %xmm8, %xmm14, %xmm14
vpxor %xmm14, %xmm7, %xmm7
vmovdqu %xmm7, 96(%rsp)
# H ^ 8
vpclmulqdq $0x00, %xmm3, %xmm3, %xmm8
vpclmulqdq $0x11, %xmm3, %xmm3, %xmm7
vpslld $31, %xmm8, %xmm12
vpslld $30, %xmm8, %xmm13
vpslld $25, %xmm8, %xmm14
vpxor %xmm13, %xmm12, %xmm12
vpxor %xmm14, %xmm12, %xmm12
vpsrldq $4, %xmm12, %xmm13
vpslldq $12, %xmm12, %xmm12
vpxor %xmm12, %xmm8, %xmm8
vpsrld $0x01, %xmm8, %xmm14
vpsrld $2, %xmm8, %xmm10
vpsrld $7, %xmm8, %xmm9
vpxor %xmm10, %xmm14, %xmm14
vpxor %xmm9, %xmm14, %xmm14
vpxor %xmm13, %xmm14, %xmm14
vpxor %xmm8, %xmm14, %xmm14
vpxor %xmm14, %xmm7, %xmm7
vmovdqu %xmm7, 112(%rsp)
L_AES_GCM_decrypt_update_avx1_ghash_128:
leaq (%r11,%r14,1), %rcx
leaq (%r10,%r14,1), %rdx
vmovdqu (%r12), %xmm0
vmovdqa L_avx1_aes_gcm_bswap_epi64(%rip), %xmm1
vpshufb %xmm1, %xmm0, %xmm8
vpaddd L_avx1_aes_gcm_one(%rip), %xmm0, %xmm9
vpshufb %xmm1, %xmm9, %xmm9
vpaddd L_avx1_aes_gcm_two(%rip), %xmm0, %xmm10
vpshufb %xmm1, %xmm10, %xmm10
vpaddd L_avx1_aes_gcm_three(%rip), %xmm0, %xmm11
vpshufb %xmm1, %xmm11, %xmm11
vpaddd L_avx1_aes_gcm_four(%rip), %xmm0, %xmm12
vpshufb %xmm1, %xmm12, %xmm12
vpaddd L_avx1_aes_gcm_five(%rip), %xmm0, %xmm13
vpshufb %xmm1, %xmm13, %xmm13
vpaddd L_avx1_aes_gcm_six(%rip), %xmm0, %xmm14
vpshufb %xmm1, %xmm14, %xmm14
vpaddd L_avx1_aes_gcm_seven(%rip), %xmm0, %xmm15
vpshufb %xmm1, %xmm15, %xmm15
vpaddd L_avx1_aes_gcm_eight(%rip), %xmm0, %xmm0
vmovdqa (%rdi), %xmm7
vmovdqu %xmm0, (%r12)
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm7, %xmm9, %xmm9
vpxor %xmm7, %xmm10, %xmm10
vpxor %xmm7, %xmm11, %xmm11
vpxor %xmm7, %xmm12, %xmm12
vpxor %xmm7, %xmm13, %xmm13
vpxor %xmm7, %xmm14, %xmm14
vpxor %xmm7, %xmm15, %xmm15
vmovdqu 112(%rsp), %xmm7
vmovdqu (%rcx), %xmm0
vaesenc 16(%rdi), %xmm8, %xmm8
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vpxor %xmm2, %xmm0, %xmm0
vpshufd $0x4e, %xmm7, %xmm1
vpshufd $0x4e, %xmm0, %xmm5
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm0, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm7, %xmm0, %xmm3
vaesenc 16(%rdi), %xmm9, %xmm9
vaesenc 16(%rdi), %xmm10, %xmm10
vpclmulqdq $0x00, %xmm7, %xmm0, %xmm2
vaesenc 16(%rdi), %xmm11, %xmm11
vaesenc 16(%rdi), %xmm12, %xmm12
vpclmulqdq $0x00, %xmm5, %xmm1, %xmm1
vaesenc 16(%rdi), %xmm13, %xmm13
vaesenc 16(%rdi), %xmm14, %xmm14
vaesenc 16(%rdi), %xmm15, %xmm15
vpxor %xmm2, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vmovdqu 96(%rsp), %xmm7
vmovdqu 16(%rcx), %xmm0
vpshufd $0x4e, %xmm7, %xmm4
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vaesenc 32(%rdi), %xmm8, %xmm8
vpxor %xmm7, %xmm4, %xmm4
vpshufd $0x4e, %xmm0, %xmm5
vpxor %xmm0, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm7, %xmm0, %xmm6
vaesenc 32(%rdi), %xmm9, %xmm9
vaesenc 32(%rdi), %xmm10, %xmm10
vpclmulqdq $0x00, %xmm7, %xmm0, %xmm7
vaesenc 32(%rdi), %xmm11, %xmm11
vaesenc 32(%rdi), %xmm12, %xmm12
vpclmulqdq $0x00, %xmm5, %xmm4, %xmm4
vaesenc 32(%rdi), %xmm13, %xmm13
vaesenc 32(%rdi), %xmm14, %xmm14
vaesenc 32(%rdi), %xmm15, %xmm15
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm4, %xmm1, %xmm1
vmovdqu 80(%rsp), %xmm7
vmovdqu 32(%rcx), %xmm0
vpshufd $0x4e, %xmm7, %xmm4
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vaesenc 48(%rdi), %xmm8, %xmm8
vpxor %xmm7, %xmm4, %xmm4
vpshufd $0x4e, %xmm0, %xmm5
vpxor %xmm0, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm7, %xmm0, %xmm6
vaesenc 48(%rdi), %xmm9, %xmm9
vaesenc 48(%rdi), %xmm10, %xmm10
vpclmulqdq $0x00, %xmm7, %xmm0, %xmm7
vaesenc 48(%rdi), %xmm11, %xmm11
vaesenc 48(%rdi), %xmm12, %xmm12
vpclmulqdq $0x00, %xmm5, %xmm4, %xmm4
vaesenc 48(%rdi), %xmm13, %xmm13
vaesenc 48(%rdi), %xmm14, %xmm14
vaesenc 48(%rdi), %xmm15, %xmm15
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm4, %xmm1, %xmm1
vmovdqu 64(%rsp), %xmm7
vmovdqu 48(%rcx), %xmm0
vpshufd $0x4e, %xmm7, %xmm4
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vaesenc 64(%rdi), %xmm8, %xmm8
vpxor %xmm7, %xmm4, %xmm4
vpshufd $0x4e, %xmm0, %xmm5
vpxor %xmm0, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm7, %xmm0, %xmm6
vaesenc 64(%rdi), %xmm9, %xmm9
vaesenc 64(%rdi), %xmm10, %xmm10
vpclmulqdq $0x00, %xmm7, %xmm0, %xmm7
vaesenc 64(%rdi), %xmm11, %xmm11
vaesenc 64(%rdi), %xmm12, %xmm12
vpclmulqdq $0x00, %xmm5, %xmm4, %xmm4
vaesenc 64(%rdi), %xmm13, %xmm13
vaesenc 64(%rdi), %xmm14, %xmm14
vaesenc 64(%rdi), %xmm15, %xmm15
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm4, %xmm1, %xmm1
vmovdqu 48(%rsp), %xmm7
vmovdqu 64(%rcx), %xmm0
vpshufd $0x4e, %xmm7, %xmm4
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vaesenc 80(%rdi), %xmm8, %xmm8
vpxor %xmm7, %xmm4, %xmm4
vpshufd $0x4e, %xmm0, %xmm5
vpxor %xmm0, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm7, %xmm0, %xmm6
vaesenc 80(%rdi), %xmm9, %xmm9
vaesenc 80(%rdi), %xmm10, %xmm10
vpclmulqdq $0x00, %xmm7, %xmm0, %xmm7
vaesenc 80(%rdi), %xmm11, %xmm11
vaesenc 80(%rdi), %xmm12, %xmm12
vpclmulqdq $0x00, %xmm5, %xmm4, %xmm4
vaesenc 80(%rdi), %xmm13, %xmm13
vaesenc 80(%rdi), %xmm14, %xmm14
vaesenc 80(%rdi), %xmm15, %xmm15
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm4, %xmm1, %xmm1
vmovdqu 32(%rsp), %xmm7
vmovdqu 80(%rcx), %xmm0
vpshufd $0x4e, %xmm7, %xmm4
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vaesenc 96(%rdi), %xmm8, %xmm8
vpxor %xmm7, %xmm4, %xmm4
vpshufd $0x4e, %xmm0, %xmm5
vpxor %xmm0, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm7, %xmm0, %xmm6
vaesenc 96(%rdi), %xmm9, %xmm9
vaesenc 96(%rdi), %xmm10, %xmm10
vpclmulqdq $0x00, %xmm7, %xmm0, %xmm7
vaesenc 96(%rdi), %xmm11, %xmm11
vaesenc 96(%rdi), %xmm12, %xmm12
vpclmulqdq $0x00, %xmm5, %xmm4, %xmm4
vaesenc 96(%rdi), %xmm13, %xmm13
vaesenc 96(%rdi), %xmm14, %xmm14
vaesenc 96(%rdi), %xmm15, %xmm15
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm4, %xmm1, %xmm1
vmovdqu 16(%rsp), %xmm7
vmovdqu 96(%rcx), %xmm0
vpshufd $0x4e, %xmm7, %xmm4
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vaesenc 112(%rdi), %xmm8, %xmm8
vpxor %xmm7, %xmm4, %xmm4
vpshufd $0x4e, %xmm0, %xmm5
vpxor %xmm0, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm7, %xmm0, %xmm6
vaesenc 112(%rdi), %xmm9, %xmm9
vaesenc 112(%rdi), %xmm10, %xmm10
vpclmulqdq $0x00, %xmm7, %xmm0, %xmm7
vaesenc 112(%rdi), %xmm11, %xmm11
vaesenc 112(%rdi), %xmm12, %xmm12
vpclmulqdq $0x00, %xmm5, %xmm4, %xmm4
vaesenc 112(%rdi), %xmm13, %xmm13
vaesenc 112(%rdi), %xmm14, %xmm14
vaesenc 112(%rdi), %xmm15, %xmm15
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm4, %xmm1, %xmm1
vmovdqu (%rsp), %xmm7
vmovdqu 112(%rcx), %xmm0
vpshufd $0x4e, %xmm7, %xmm4
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vaesenc 128(%rdi), %xmm8, %xmm8
vpxor %xmm7, %xmm4, %xmm4
vpshufd $0x4e, %xmm0, %xmm5
vpxor %xmm0, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm7, %xmm0, %xmm6
vaesenc 128(%rdi), %xmm9, %xmm9
vaesenc 128(%rdi), %xmm10, %xmm10
vpclmulqdq $0x00, %xmm7, %xmm0, %xmm7
vaesenc 128(%rdi), %xmm11, %xmm11
vaesenc 128(%rdi), %xmm12, %xmm12
vpclmulqdq $0x00, %xmm5, %xmm4, %xmm4
vaesenc 128(%rdi), %xmm13, %xmm13
vaesenc 128(%rdi), %xmm14, %xmm14
vaesenc 128(%rdi), %xmm15, %xmm15
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm4, %xmm1, %xmm1
vpslldq $8, %xmm1, %xmm5
vpsrldq $8, %xmm1, %xmm1
vaesenc 144(%rdi), %xmm8, %xmm8
vpxor %xmm5, %xmm2, %xmm2
vpxor %xmm1, %xmm3, %xmm3
vaesenc 144(%rdi), %xmm9, %xmm9
vpslld $31, %xmm2, %xmm7
vpslld $30, %xmm2, %xmm4
vpslld $25, %xmm2, %xmm5
vaesenc 144(%rdi), %xmm10, %xmm10
vpxor %xmm4, %xmm7, %xmm7
vpxor %xmm5, %xmm7, %xmm7
vaesenc 144(%rdi), %xmm11, %xmm11
vpsrldq $4, %xmm7, %xmm4
vpslldq $12, %xmm7, %xmm7
vaesenc 144(%rdi), %xmm12, %xmm12
vpxor %xmm7, %xmm2, %xmm2
vpsrld $0x01, %xmm2, %xmm5
vaesenc 144(%rdi), %xmm13, %xmm13
vpsrld $2, %xmm2, %xmm1
vpsrld $7, %xmm2, %xmm0
vaesenc 144(%rdi), %xmm14, %xmm14
vpxor %xmm1, %xmm5, %xmm5
vpxor %xmm0, %xmm5, %xmm5
vaesenc 144(%rdi), %xmm15, %xmm15
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm5, %xmm2, %xmm2
vpxor %xmm3, %xmm2, %xmm2
cmpl $11, %esi
vmovdqa 160(%rdi), %xmm7
jl L_AES_GCM_decrypt_update_avx1_aesenc_128_ghash_avx_done
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqa 176(%rdi), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
cmpl $13, %esi
vmovdqa 192(%rdi), %xmm7
jl L_AES_GCM_decrypt_update_avx1_aesenc_128_ghash_avx_done
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqa 208(%rdi), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqa 224(%rdi), %xmm7
L_AES_GCM_decrypt_update_avx1_aesenc_128_ghash_avx_done:
vaesenclast %xmm7, %xmm8, %xmm8
vaesenclast %xmm7, %xmm9, %xmm9
vmovdqu (%rcx), %xmm0
vmovdqu 16(%rcx), %xmm1
vpxor %xmm0, %xmm8, %xmm8
vpxor %xmm1, %xmm9, %xmm9
vmovdqu %xmm8, (%rdx)
vmovdqu %xmm9, 16(%rdx)
vaesenclast %xmm7, %xmm10, %xmm10
vaesenclast %xmm7, %xmm11, %xmm11
vmovdqu 32(%rcx), %xmm0
vmovdqu 48(%rcx), %xmm1
vpxor %xmm0, %xmm10, %xmm10
vpxor %xmm1, %xmm11, %xmm11
vmovdqu %xmm10, 32(%rdx)
vmovdqu %xmm11, 48(%rdx)
vaesenclast %xmm7, %xmm12, %xmm12
vaesenclast %xmm7, %xmm13, %xmm13
vmovdqu 64(%rcx), %xmm0
vmovdqu 80(%rcx), %xmm1
vpxor %xmm0, %xmm12, %xmm12
vpxor %xmm1, %xmm13, %xmm13
vmovdqu %xmm12, 64(%rdx)
vmovdqu %xmm13, 80(%rdx)
vaesenclast %xmm7, %xmm14, %xmm14
vaesenclast %xmm7, %xmm15, %xmm15
vmovdqu 96(%rcx), %xmm0
vmovdqu 112(%rcx), %xmm1
vpxor %xmm0, %xmm14, %xmm14
vpxor %xmm1, %xmm15, %xmm15
vmovdqu %xmm14, 96(%rdx)
vmovdqu %xmm15, 112(%rdx)
addl $0x80, %r14d
cmpl %r13d, %r14d
jl L_AES_GCM_decrypt_update_avx1_ghash_128
vmovdqa %xmm2, %xmm6
vmovdqu (%rsp), %xmm5
L_AES_GCM_decrypt_update_avx1_done_128:
movl %r8d, %edx
cmpl %edx, %r14d
jge L_AES_GCM_decrypt_update_avx1_done_dec
movl %r8d, %r13d
andl $0xfffffff0, %r13d
cmpl %r13d, %r14d
jge L_AES_GCM_decrypt_update_avx1_last_block_done
L_AES_GCM_decrypt_update_avx1_last_block_start:
vmovdqu (%r11,%r14,1), %xmm13
vmovdqa %xmm5, %xmm0
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm13, %xmm1
vpxor %xmm6, %xmm1, %xmm1
vmovdqu (%r12), %xmm9
vpshufb L_avx1_aes_gcm_bswap_epi64(%rip), %xmm9, %xmm8
vpaddd L_avx1_aes_gcm_one(%rip), %xmm9, %xmm9
vmovdqu %xmm9, (%r12)
vpxor (%rdi), %xmm8, %xmm8
vpclmulqdq $16, %xmm0, %xmm1, %xmm10
vaesenc 16(%rdi), %xmm8, %xmm8
vaesenc 32(%rdi), %xmm8, %xmm8
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm11
vaesenc 48(%rdi), %xmm8, %xmm8
vaesenc 64(%rdi), %xmm8, %xmm8
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm12
vaesenc 80(%rdi), %xmm8, %xmm8
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vaesenc 96(%rdi), %xmm8, %xmm8
vpxor %xmm11, %xmm10, %xmm10
vpslldq $8, %xmm10, %xmm2
vpsrldq $8, %xmm10, %xmm10
vaesenc 112(%rdi), %xmm8, %xmm8
vpxor %xmm12, %xmm2, %xmm2
vpxor %xmm10, %xmm1, %xmm3
vmovdqa L_avx1_aes_gcm_mod2_128(%rip), %xmm0
vpclmulqdq $16, %xmm0, %xmm2, %xmm11
vaesenc 128(%rdi), %xmm8, %xmm8
vpshufd $0x4e, %xmm2, %xmm10
vpxor %xmm11, %xmm10, %xmm10
vpclmulqdq $16, %xmm0, %xmm10, %xmm11
vaesenc 144(%rdi), %xmm8, %xmm8
vpshufd $0x4e, %xmm10, %xmm10
vpxor %xmm11, %xmm10, %xmm10
vpxor %xmm3, %xmm10, %xmm6
cmpl $11, %esi
vmovdqa 160(%rdi), %xmm9
jl L_AES_GCM_decrypt_update_avx1_aesenc_gfmul_last
vaesenc %xmm9, %xmm8, %xmm8
vaesenc 176(%rdi), %xmm8, %xmm8
cmpl $13, %esi
vmovdqa 192(%rdi), %xmm9
jl L_AES_GCM_decrypt_update_avx1_aesenc_gfmul_last
vaesenc %xmm9, %xmm8, %xmm8
vaesenc 208(%rdi), %xmm8, %xmm8
vmovdqa 224(%rdi), %xmm9
L_AES_GCM_decrypt_update_avx1_aesenc_gfmul_last:
vaesenclast %xmm9, %xmm8, %xmm8
vmovdqa %xmm13, %xmm0
vpxor %xmm0, %xmm8, %xmm8
vmovdqu %xmm8, (%r10,%r14,1)
addl $16, %r14d
cmpl %r13d, %r14d
jl L_AES_GCM_decrypt_update_avx1_last_block_start
L_AES_GCM_decrypt_update_avx1_last_block_done:
L_AES_GCM_decrypt_update_avx1_done_dec:
vmovdqa %xmm6, (%r9)
vzeroupper
addq $0xa8, %rsp
popq %r14
popq %r12
popq %r13
repz retq
#ifndef __APPLE__
.size AES_GCM_decrypt_update_avx1,.-AES_GCM_decrypt_update_avx1
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl AES_GCM_decrypt_final_avx1
.type AES_GCM_decrypt_final_avx1,@function
.align 16
AES_GCM_decrypt_final_avx1:
#else
.section __TEXT,__text
.globl _AES_GCM_decrypt_final_avx1
.p2align 4
_AES_GCM_decrypt_final_avx1:
#endif /* __APPLE__ */
pushq %r13
pushq %rbp
pushq %r12
movl %edx, %eax
movl %ecx, %r10d
movl %r8d, %r11d
movq 32(%rsp), %r8
movq 40(%rsp), %rbp
subq $16, %rsp
vmovdqa (%rdi), %xmm6
vmovdqa (%r9), %xmm5
vmovdqa (%r8), %xmm15
vpsrlq $63, %xmm5, %xmm8
vpsllq $0x01, %xmm5, %xmm7
vpslldq $8, %xmm8, %xmm8
vpor %xmm8, %xmm7, %xmm7
vpshufd $0xff, %xmm5, %xmm5
vpsrad $31, %xmm5, %xmm5
vpand L_avx1_aes_gcm_mod2_128(%rip), %xmm5, %xmm5
vpxor %xmm7, %xmm5, %xmm5
movl %r10d, %edx
movl %r11d, %ecx
shlq $3, %rdx
shlq $3, %rcx
vmovq %rdx, %xmm0
vmovq %rcx, %xmm1
vpunpcklqdq %xmm1, %xmm0, %xmm0
vpxor %xmm0, %xmm6, %xmm6
# ghash_gfmul_red_avx
vpshufd $0x4e, %xmm5, %xmm8
vpshufd $0x4e, %xmm6, %xmm9
vpclmulqdq $0x11, %xmm5, %xmm6, %xmm10
vpclmulqdq $0x00, %xmm5, %xmm6, %xmm7
vpxor %xmm5, %xmm8, %xmm8
vpxor %xmm6, %xmm9, %xmm9
vpclmulqdq $0x00, %xmm9, %xmm8, %xmm8
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm10, %xmm8, %xmm8
vpslldq $8, %xmm8, %xmm9
vpsrldq $8, %xmm8, %xmm8
vpxor %xmm9, %xmm7, %xmm7
vpxor %xmm8, %xmm10, %xmm6
vpslld $31, %xmm7, %xmm11
vpslld $30, %xmm7, %xmm12
vpslld $25, %xmm7, %xmm13
vpxor %xmm12, %xmm11, %xmm11
vpxor %xmm13, %xmm11, %xmm11
vpsrldq $4, %xmm11, %xmm12
vpslldq $12, %xmm11, %xmm11
vpxor %xmm11, %xmm7, %xmm7
vpsrld $0x01, %xmm7, %xmm13
vpsrld $2, %xmm7, %xmm9
vpsrld $7, %xmm7, %xmm8
vpxor %xmm9, %xmm13, %xmm13
vpxor %xmm8, %xmm13, %xmm13
vpxor %xmm12, %xmm13, %xmm13
vpxor %xmm7, %xmm13, %xmm13
vpxor %xmm13, %xmm6, %xmm6
vpshufb L_avx1_aes_gcm_bswap_mask(%rip), %xmm6, %xmm6
vpxor %xmm15, %xmm6, %xmm0
cmpl $16, %eax
je L_AES_GCM_decrypt_final_avx1_cmp_tag_16
subq $16, %rsp
xorq %rcx, %rcx
xorq %r12, %r12
vmovdqu %xmm0, (%rsp)
L_AES_GCM_decrypt_final_avx1_cmp_tag_loop:
movzbl (%rsp,%rcx,1), %r13d
xorb (%rsi,%rcx,1), %r13b
orb %r13b, %r12b
incl %ecx
cmpl %eax, %ecx
jne L_AES_GCM_decrypt_final_avx1_cmp_tag_loop
cmpb $0x00, %r12b
sete %r12b
addq $16, %rsp
xorq %rcx, %rcx
jmp L_AES_GCM_decrypt_final_avx1_cmp_tag_done
L_AES_GCM_decrypt_final_avx1_cmp_tag_16:
vmovdqu (%rsi), %xmm1
vpcmpeqb %xmm1, %xmm0, %xmm0
vpmovmskb %xmm0, %rdx
# %%edx == 0xFFFF then return 1 else => return 0
xorl %r12d, %r12d
cmpl $0xffff, %edx
sete %r12b
L_AES_GCM_decrypt_final_avx1_cmp_tag_done:
movl %r12d, (%rbp)
vzeroupper
addq $16, %rsp
popq %r12
popq %rbp
popq %r13
repz retq
#ifndef __APPLE__
.size AES_GCM_decrypt_final_avx1,.-AES_GCM_decrypt_final_avx1
#endif /* __APPLE__ */
#endif /* WOLFSSL_AESGCM_STREAM */
#endif /* HAVE_INTEL_AVX1 */
#ifdef HAVE_INTEL_AVX2
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_avx2_aes_gcm_one:
.quad 0x0, 0x1
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_avx2_aes_gcm_two:
.quad 0x0, 0x2
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_avx2_aes_gcm_three:
.quad 0x0, 0x3
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_avx2_aes_gcm_four:
.quad 0x0, 0x4
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_avx2_aes_gcm_five:
.quad 0x0, 0x5
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_avx2_aes_gcm_six:
.quad 0x0, 0x6
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_avx2_aes_gcm_seven:
.quad 0x0, 0x7
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_avx2_aes_gcm_eight:
.quad 0x0, 0x8
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_avx2_aes_gcm_bswap_one:
.quad 0x0, 0x100000000000000
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_avx2_aes_gcm_bswap_epi64:
.quad 0x1020304050607, 0x8090a0b0c0d0e0f
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_avx2_aes_gcm_bswap_mask:
.quad 0x8090a0b0c0d0e0f, 0x1020304050607
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_avx2_aes_gcm_mod2_128:
.quad 0x1, 0xc200000000000000
#ifndef __APPLE__
.text
.globl AES_GCM_encrypt_avx2
.type AES_GCM_encrypt_avx2,@function
.align 16
AES_GCM_encrypt_avx2:
#else
.section __TEXT,__text
.globl _AES_GCM_encrypt_avx2
.p2align 4
_AES_GCM_encrypt_avx2:
#endif /* __APPLE__ */
pushq %r13
pushq %r12
pushq %r15
pushq %rbx
pushq %r14
movq %rdx, %r12
movq %rcx, %rax
movq %r8, %r15
movq %rsi, %r8
movl %r9d, %r10d
movl 48(%rsp), %r11d
movl 56(%rsp), %ebx
movl 64(%rsp), %r14d
movq 72(%rsp), %rsi
movl 80(%rsp), %r9d
subq $0xa0, %rsp
vpxor %xmm4, %xmm4, %xmm4
vpxor %xmm6, %xmm6, %xmm6
movl %ebx, %edx
cmpl $12, %edx
je L_AES_GCM_encrypt_avx2_iv_12
# Calculate values when IV is not 12 bytes
# H = Encrypt X(=0)
vmovdqu (%rsi), %xmm5
vaesenc 16(%rsi), %xmm5, %xmm5
vaesenc 32(%rsi), %xmm5, %xmm5
vaesenc 48(%rsi), %xmm5, %xmm5
vaesenc 64(%rsi), %xmm5, %xmm5
vaesenc 80(%rsi), %xmm5, %xmm5
vaesenc 96(%rsi), %xmm5, %xmm5
vaesenc 112(%rsi), %xmm5, %xmm5
vaesenc 128(%rsi), %xmm5, %xmm5
vaesenc 144(%rsi), %xmm5, %xmm5
cmpl $11, %r9d
vmovdqu 160(%rsi), %xmm0
jl L_AES_GCM_encrypt_avx2_calc_iv_1_aesenc_avx_last
vaesenc %xmm0, %xmm5, %xmm5
vaesenc 176(%rsi), %xmm5, %xmm5
cmpl $13, %r9d
vmovdqu 192(%rsi), %xmm0
jl L_AES_GCM_encrypt_avx2_calc_iv_1_aesenc_avx_last
vaesenc %xmm0, %xmm5, %xmm5
vaesenc 208(%rsi), %xmm5, %xmm5
vmovdqu 224(%rsi), %xmm0
L_AES_GCM_encrypt_avx2_calc_iv_1_aesenc_avx_last:
vaesenclast %xmm0, %xmm5, %xmm5
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm5, %xmm5
# Calc counter
# Initialization vector
cmpl $0x00, %edx
movq $0x00, %rcx
je L_AES_GCM_encrypt_avx2_calc_iv_done
cmpl $16, %edx
jl L_AES_GCM_encrypt_avx2_calc_iv_lt16
andl $0xfffffff0, %edx
L_AES_GCM_encrypt_avx2_calc_iv_16_loop:
vmovdqu (%rax,%rcx,1), %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vpxor %xmm0, %xmm4, %xmm4
# ghash_gfmul_avx
vpclmulqdq $16, %xmm4, %xmm5, %xmm2
vpclmulqdq $0x01, %xmm4, %xmm5, %xmm1
vpclmulqdq $0x00, %xmm4, %xmm5, %xmm0
vpclmulqdq $0x11, %xmm4, %xmm5, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpslldq $8, %xmm2, %xmm1
vpsrldq $8, %xmm2, %xmm2
vpxor %xmm1, %xmm0, %xmm7
vpxor %xmm2, %xmm3, %xmm4
# ghash_mid
vpsrld $31, %xmm7, %xmm0
vpsrld $31, %xmm4, %xmm1
vpslld $0x01, %xmm7, %xmm7
vpslld $0x01, %xmm4, %xmm4
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm4, %xmm4
vpor %xmm0, %xmm7, %xmm7
vpor %xmm1, %xmm4, %xmm4
# ghash_red
vmovdqu L_avx2_aes_gcm_mod2_128(%rip), %xmm2
vpclmulqdq $16, %xmm2, %xmm7, %xmm0
vpshufd $0x4e, %xmm7, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $16, %xmm2, %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm4, %xmm4
addl $16, %ecx
cmpl %edx, %ecx
jl L_AES_GCM_encrypt_avx2_calc_iv_16_loop
movl %ebx, %edx
cmpl %edx, %ecx
je L_AES_GCM_encrypt_avx2_calc_iv_done
L_AES_GCM_encrypt_avx2_calc_iv_lt16:
vpxor %xmm0, %xmm0, %xmm0
xorl %ebx, %ebx
vmovdqu %xmm0, (%rsp)
L_AES_GCM_encrypt_avx2_calc_iv_loop:
movzbl (%rax,%rcx,1), %r13d
movb %r13b, (%rsp,%rbx,1)
incl %ecx
incl %ebx
cmpl %edx, %ecx
jl L_AES_GCM_encrypt_avx2_calc_iv_loop
vmovdqu (%rsp), %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vpxor %xmm0, %xmm4, %xmm4
# ghash_gfmul_avx
vpclmulqdq $16, %xmm4, %xmm5, %xmm2
vpclmulqdq $0x01, %xmm4, %xmm5, %xmm1
vpclmulqdq $0x00, %xmm4, %xmm5, %xmm0
vpclmulqdq $0x11, %xmm4, %xmm5, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpslldq $8, %xmm2, %xmm1
vpsrldq $8, %xmm2, %xmm2
vpxor %xmm1, %xmm0, %xmm7
vpxor %xmm2, %xmm3, %xmm4
# ghash_mid
vpsrld $31, %xmm7, %xmm0
vpsrld $31, %xmm4, %xmm1
vpslld $0x01, %xmm7, %xmm7
vpslld $0x01, %xmm4, %xmm4
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm4, %xmm4
vpor %xmm0, %xmm7, %xmm7
vpor %xmm1, %xmm4, %xmm4
# ghash_red
vmovdqu L_avx2_aes_gcm_mod2_128(%rip), %xmm2
vpclmulqdq $16, %xmm2, %xmm7, %xmm0
vpshufd $0x4e, %xmm7, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $16, %xmm2, %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm4, %xmm4
L_AES_GCM_encrypt_avx2_calc_iv_done:
# T = Encrypt counter
vpxor %xmm0, %xmm0, %xmm0
shll $3, %edx
vmovq %rdx, %xmm0
vpxor %xmm0, %xmm4, %xmm4
# ghash_gfmul_avx
vpclmulqdq $16, %xmm4, %xmm5, %xmm2
vpclmulqdq $0x01, %xmm4, %xmm5, %xmm1
vpclmulqdq $0x00, %xmm4, %xmm5, %xmm0
vpclmulqdq $0x11, %xmm4, %xmm5, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpslldq $8, %xmm2, %xmm1
vpsrldq $8, %xmm2, %xmm2
vpxor %xmm1, %xmm0, %xmm7
vpxor %xmm2, %xmm3, %xmm4
# ghash_mid
vpsrld $31, %xmm7, %xmm0
vpsrld $31, %xmm4, %xmm1
vpslld $0x01, %xmm7, %xmm7
vpslld $0x01, %xmm4, %xmm4
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm4, %xmm4
vpor %xmm0, %xmm7, %xmm7
vpor %xmm1, %xmm4, %xmm4
# ghash_red
vmovdqu L_avx2_aes_gcm_mod2_128(%rip), %xmm2
vpclmulqdq $16, %xmm2, %xmm7, %xmm0
vpshufd $0x4e, %xmm7, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $16, %xmm2, %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm4, %xmm4
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm4, %xmm4
# Encrypt counter
vmovdqu (%rsi), %xmm15
vpxor %xmm4, %xmm15, %xmm15
vaesenc 16(%rsi), %xmm15, %xmm15
vaesenc 32(%rsi), %xmm15, %xmm15
vaesenc 48(%rsi), %xmm15, %xmm15
vaesenc 64(%rsi), %xmm15, %xmm15
vaesenc 80(%rsi), %xmm15, %xmm15
vaesenc 96(%rsi), %xmm15, %xmm15
vaesenc 112(%rsi), %xmm15, %xmm15
vaesenc 128(%rsi), %xmm15, %xmm15
vaesenc 144(%rsi), %xmm15, %xmm15
cmpl $11, %r9d
vmovdqu 160(%rsi), %xmm0
jl L_AES_GCM_encrypt_avx2_calc_iv_2_aesenc_avx_last
vaesenc %xmm0, %xmm15, %xmm15
vaesenc 176(%rsi), %xmm15, %xmm15
cmpl $13, %r9d
vmovdqu 192(%rsi), %xmm0
jl L_AES_GCM_encrypt_avx2_calc_iv_2_aesenc_avx_last
vaesenc %xmm0, %xmm15, %xmm15
vaesenc 208(%rsi), %xmm15, %xmm15
vmovdqu 224(%rsi), %xmm0
L_AES_GCM_encrypt_avx2_calc_iv_2_aesenc_avx_last:
vaesenclast %xmm0, %xmm15, %xmm15
jmp L_AES_GCM_encrypt_avx2_iv_done
L_AES_GCM_encrypt_avx2_iv_12:
# # Calculate values when IV is 12 bytes
# Set counter based on IV
vmovdqu L_avx2_aes_gcm_bswap_one(%rip), %xmm4
vmovdqu (%rsi), %xmm5
vpblendd $7, (%rax), %xmm4, %xmm4
# H = Encrypt X(=0) and T = Encrypt counter
vmovdqu 16(%rsi), %xmm7
vpxor %xmm5, %xmm4, %xmm15
vaesenc %xmm7, %xmm5, %xmm5
vaesenc %xmm7, %xmm15, %xmm15
vmovdqu 32(%rsi), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm15, %xmm15
vmovdqu 48(%rsi), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm15, %xmm15
vmovdqu 64(%rsi), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm15, %xmm15
vmovdqu 80(%rsi), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm15, %xmm15
vmovdqu 96(%rsi), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm15, %xmm15
vmovdqu 112(%rsi), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm15, %xmm15
vmovdqu 128(%rsi), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm15, %xmm15
vmovdqu 144(%rsi), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm15, %xmm15
cmpl $11, %r9d
vmovdqu 160(%rsi), %xmm0
jl L_AES_GCM_encrypt_avx2_calc_iv_12_last
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm15, %xmm15
vmovdqu 176(%rsi), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm15, %xmm15
cmpl $13, %r9d
vmovdqu 192(%rsi), %xmm0
jl L_AES_GCM_encrypt_avx2_calc_iv_12_last
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm15, %xmm15
vmovdqu 208(%rsi), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm15, %xmm15
vmovdqu 224(%rsi), %xmm0
L_AES_GCM_encrypt_avx2_calc_iv_12_last:
vaesenclast %xmm0, %xmm5, %xmm5
vaesenclast %xmm0, %xmm15, %xmm15
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm5, %xmm5
L_AES_GCM_encrypt_avx2_iv_done:
# Additional authentication data
movl %r11d, %edx
cmpl $0x00, %edx
je L_AES_GCM_encrypt_avx2_calc_aad_done
xorl %ecx, %ecx
cmpl $16, %edx
jl L_AES_GCM_encrypt_avx2_calc_aad_lt16
andl $0xfffffff0, %edx
L_AES_GCM_encrypt_avx2_calc_aad_16_loop:
vmovdqu (%r12,%rcx,1), %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vpxor %xmm0, %xmm6, %xmm6
# ghash_gfmul_avx
vpclmulqdq $16, %xmm6, %xmm5, %xmm2
vpclmulqdq $0x01, %xmm6, %xmm5, %xmm1
vpclmulqdq $0x00, %xmm6, %xmm5, %xmm0
vpclmulqdq $0x11, %xmm6, %xmm5, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpslldq $8, %xmm2, %xmm1
vpsrldq $8, %xmm2, %xmm2
vpxor %xmm1, %xmm0, %xmm7
vpxor %xmm2, %xmm3, %xmm6
# ghash_mid
vpsrld $31, %xmm7, %xmm0
vpsrld $31, %xmm6, %xmm1
vpslld $0x01, %xmm7, %xmm7
vpslld $0x01, %xmm6, %xmm6
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm6, %xmm6
vpor %xmm0, %xmm7, %xmm7
vpor %xmm1, %xmm6, %xmm6
# ghash_red
vmovdqu L_avx2_aes_gcm_mod2_128(%rip), %xmm2
vpclmulqdq $16, %xmm2, %xmm7, %xmm0
vpshufd $0x4e, %xmm7, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $16, %xmm2, %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm6, %xmm6
addl $16, %ecx
cmpl %edx, %ecx
jl L_AES_GCM_encrypt_avx2_calc_aad_16_loop
movl %r11d, %edx
cmpl %edx, %ecx
je L_AES_GCM_encrypt_avx2_calc_aad_done
L_AES_GCM_encrypt_avx2_calc_aad_lt16:
vpxor %xmm0, %xmm0, %xmm0
xorl %ebx, %ebx
vmovdqu %xmm0, (%rsp)
L_AES_GCM_encrypt_avx2_calc_aad_loop:
movzbl (%r12,%rcx,1), %r13d
movb %r13b, (%rsp,%rbx,1)
incl %ecx
incl %ebx
cmpl %edx, %ecx
jl L_AES_GCM_encrypt_avx2_calc_aad_loop
vmovdqu (%rsp), %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vpxor %xmm0, %xmm6, %xmm6
# ghash_gfmul_avx
vpclmulqdq $16, %xmm6, %xmm5, %xmm2
vpclmulqdq $0x01, %xmm6, %xmm5, %xmm1
vpclmulqdq $0x00, %xmm6, %xmm5, %xmm0
vpclmulqdq $0x11, %xmm6, %xmm5, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpslldq $8, %xmm2, %xmm1
vpsrldq $8, %xmm2, %xmm2
vpxor %xmm1, %xmm0, %xmm7
vpxor %xmm2, %xmm3, %xmm6
# ghash_mid
vpsrld $31, %xmm7, %xmm0
vpsrld $31, %xmm6, %xmm1
vpslld $0x01, %xmm7, %xmm7
vpslld $0x01, %xmm6, %xmm6
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm6, %xmm6
vpor %xmm0, %xmm7, %xmm7
vpor %xmm1, %xmm6, %xmm6
# ghash_red
vmovdqu L_avx2_aes_gcm_mod2_128(%rip), %xmm2
vpclmulqdq $16, %xmm2, %xmm7, %xmm0
vpshufd $0x4e, %xmm7, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $16, %xmm2, %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm6, %xmm6
L_AES_GCM_encrypt_avx2_calc_aad_done:
# Calculate counter and H
vpsrlq $63, %xmm5, %xmm1
vpsllq $0x01, %xmm5, %xmm0
vpslldq $8, %xmm1, %xmm1
vpor %xmm1, %xmm0, %xmm0
vpshufd $0xff, %xmm5, %xmm5
vpsrad $31, %xmm5, %xmm5
vpshufb L_avx2_aes_gcm_bswap_epi64(%rip), %xmm4, %xmm4
vpand L_avx2_aes_gcm_mod2_128(%rip), %xmm5, %xmm5
vpaddd L_avx2_aes_gcm_one(%rip), %xmm4, %xmm4
vpxor %xmm0, %xmm5, %xmm5
xorl %ebx, %ebx
cmpl $0x80, %r10d
movl %r10d, %r13d
jl L_AES_GCM_encrypt_avx2_done_128
andl $0xffffff80, %r13d
vmovdqu %xmm4, 128(%rsp)
vmovdqu %xmm15, 144(%rsp)
vmovdqu L_avx2_aes_gcm_mod2_128(%rip), %xmm3
# H ^ 1 and H ^ 2
vpclmulqdq $0x00, %xmm5, %xmm5, %xmm9
vpclmulqdq $0x11, %xmm5, %xmm5, %xmm10
vpclmulqdq $16, %xmm3, %xmm9, %xmm8
vpshufd $0x4e, %xmm9, %xmm9
vpxor %xmm8, %xmm9, %xmm9
vpclmulqdq $16, %xmm3, %xmm9, %xmm8
vpshufd $0x4e, %xmm9, %xmm9
vpxor %xmm8, %xmm9, %xmm9
vpxor %xmm9, %xmm10, %xmm0
vmovdqu %xmm5, (%rsp)
vmovdqu %xmm0, 16(%rsp)
# H ^ 3 and H ^ 4
vpclmulqdq $16, %xmm5, %xmm0, %xmm11
vpclmulqdq $0x01, %xmm5, %xmm0, %xmm10
vpclmulqdq $0x00, %xmm5, %xmm0, %xmm9
vpclmulqdq $0x11, %xmm5, %xmm0, %xmm12
vpclmulqdq $0x00, %xmm0, %xmm0, %xmm13
vpclmulqdq $0x11, %xmm0, %xmm0, %xmm14
vpxor %xmm10, %xmm11, %xmm11
vpslldq $8, %xmm11, %xmm10
vpsrldq $8, %xmm11, %xmm11
vpxor %xmm9, %xmm10, %xmm10
vpclmulqdq $16, %xmm3, %xmm13, %xmm8
vpclmulqdq $16, %xmm3, %xmm10, %xmm9
vpshufd $0x4e, %xmm10, %xmm10
vpshufd $0x4e, %xmm13, %xmm13
vpxor %xmm9, %xmm10, %xmm10
vpxor %xmm8, %xmm13, %xmm13
vpclmulqdq $16, %xmm3, %xmm10, %xmm9
vpclmulqdq $16, %xmm3, %xmm13, %xmm8
vpshufd $0x4e, %xmm10, %xmm10
vpshufd $0x4e, %xmm13, %xmm13
vpxor %xmm11, %xmm12, %xmm12
vpxor %xmm8, %xmm13, %xmm13
vpxor %xmm12, %xmm10, %xmm10
vpxor %xmm14, %xmm13, %xmm2
vpxor %xmm9, %xmm10, %xmm1
vmovdqu %xmm1, 32(%rsp)
vmovdqu %xmm2, 48(%rsp)
# H ^ 5 and H ^ 6
vpclmulqdq $16, %xmm0, %xmm1, %xmm11
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm10
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm9
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm12
vpclmulqdq $0x00, %xmm1, %xmm1, %xmm13
vpclmulqdq $0x11, %xmm1, %xmm1, %xmm14
vpxor %xmm10, %xmm11, %xmm11
vpslldq $8, %xmm11, %xmm10
vpsrldq $8, %xmm11, %xmm11
vpxor %xmm9, %xmm10, %xmm10
vpclmulqdq $16, %xmm3, %xmm13, %xmm8
vpclmulqdq $16, %xmm3, %xmm10, %xmm9
vpshufd $0x4e, %xmm10, %xmm10
vpshufd $0x4e, %xmm13, %xmm13
vpxor %xmm9, %xmm10, %xmm10
vpxor %xmm8, %xmm13, %xmm13
vpclmulqdq $16, %xmm3, %xmm10, %xmm9
vpclmulqdq $16, %xmm3, %xmm13, %xmm8
vpshufd $0x4e, %xmm10, %xmm10
vpshufd $0x4e, %xmm13, %xmm13
vpxor %xmm11, %xmm12, %xmm12
vpxor %xmm8, %xmm13, %xmm13
vpxor %xmm12, %xmm10, %xmm10
vpxor %xmm14, %xmm13, %xmm0
vpxor %xmm9, %xmm10, %xmm7
vmovdqu %xmm7, 64(%rsp)
vmovdqu %xmm0, 80(%rsp)
# H ^ 7 and H ^ 8
vpclmulqdq $16, %xmm1, %xmm2, %xmm11
vpclmulqdq $0x01, %xmm1, %xmm2, %xmm10
vpclmulqdq $0x00, %xmm1, %xmm2, %xmm9
vpclmulqdq $0x11, %xmm1, %xmm2, %xmm12
vpclmulqdq $0x00, %xmm2, %xmm2, %xmm13
vpclmulqdq $0x11, %xmm2, %xmm2, %xmm14
vpxor %xmm10, %xmm11, %xmm11
vpslldq $8, %xmm11, %xmm10
vpsrldq $8, %xmm11, %xmm11
vpxor %xmm9, %xmm10, %xmm10
vpclmulqdq $16, %xmm3, %xmm13, %xmm8
vpclmulqdq $16, %xmm3, %xmm10, %xmm9
vpshufd $0x4e, %xmm10, %xmm10
vpshufd $0x4e, %xmm13, %xmm13
vpxor %xmm9, %xmm10, %xmm10
vpxor %xmm8, %xmm13, %xmm13
vpclmulqdq $16, %xmm3, %xmm10, %xmm9
vpclmulqdq $16, %xmm3, %xmm13, %xmm8
vpshufd $0x4e, %xmm10, %xmm10
vpshufd $0x4e, %xmm13, %xmm13
vpxor %xmm11, %xmm12, %xmm12
vpxor %xmm8, %xmm13, %xmm13
vpxor %xmm12, %xmm10, %xmm10
vpxor %xmm14, %xmm13, %xmm0
vpxor %xmm9, %xmm10, %xmm7
vmovdqu %xmm7, 96(%rsp)
vmovdqu %xmm0, 112(%rsp)
# First 128 bytes of input
# aesenc_128
# aesenc_ctr
vmovdqu 128(%rsp), %xmm0
vmovdqu L_avx2_aes_gcm_bswap_epi64(%rip), %xmm1
vpaddd L_avx2_aes_gcm_one(%rip), %xmm0, %xmm9
vpshufb %xmm1, %xmm0, %xmm8
vpaddd L_avx2_aes_gcm_two(%rip), %xmm0, %xmm10
vpshufb %xmm1, %xmm9, %xmm9
vpaddd L_avx2_aes_gcm_three(%rip), %xmm0, %xmm11
vpshufb %xmm1, %xmm10, %xmm10
vpaddd L_avx2_aes_gcm_four(%rip), %xmm0, %xmm12
vpshufb %xmm1, %xmm11, %xmm11
vpaddd L_avx2_aes_gcm_five(%rip), %xmm0, %xmm13
vpshufb %xmm1, %xmm12, %xmm12
vpaddd L_avx2_aes_gcm_six(%rip), %xmm0, %xmm14
vpshufb %xmm1, %xmm13, %xmm13
vpaddd L_avx2_aes_gcm_seven(%rip), %xmm0, %xmm15
vpshufb %xmm1, %xmm14, %xmm14
vpaddd L_avx2_aes_gcm_eight(%rip), %xmm0, %xmm0
vpshufb %xmm1, %xmm15, %xmm15
# aesenc_xor
vmovdqu (%rsi), %xmm7
vmovdqu %xmm0, 128(%rsp)
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm7, %xmm9, %xmm9
vpxor %xmm7, %xmm10, %xmm10
vpxor %xmm7, %xmm11, %xmm11
vpxor %xmm7, %xmm12, %xmm12
vpxor %xmm7, %xmm13, %xmm13
vpxor %xmm7, %xmm14, %xmm14
vpxor %xmm7, %xmm15, %xmm15
vmovdqu 16(%rsi), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqu 32(%rsi), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqu 48(%rsi), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqu 64(%rsi), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqu 80(%rsi), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqu 96(%rsi), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqu 112(%rsi), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqu 128(%rsi), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqu 144(%rsi), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
cmpl $11, %r9d
vmovdqu 160(%rsi), %xmm7
jl L_AES_GCM_encrypt_avx2_aesenc_128_enc_done
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqu 176(%rsi), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
cmpl $13, %r9d
vmovdqu 192(%rsi), %xmm7
jl L_AES_GCM_encrypt_avx2_aesenc_128_enc_done
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqu 208(%rsi), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqu 224(%rsi), %xmm7
L_AES_GCM_encrypt_avx2_aesenc_128_enc_done:
# aesenc_last
vaesenclast %xmm7, %xmm8, %xmm8
vaesenclast %xmm7, %xmm9, %xmm9
vaesenclast %xmm7, %xmm10, %xmm10
vaesenclast %xmm7, %xmm11, %xmm11
vmovdqu (%rdi), %xmm0
vmovdqu 16(%rdi), %xmm1
vmovdqu 32(%rdi), %xmm2
vmovdqu 48(%rdi), %xmm3
vpxor %xmm0, %xmm8, %xmm8
vpxor %xmm1, %xmm9, %xmm9
vpxor %xmm2, %xmm10, %xmm10
vpxor %xmm3, %xmm11, %xmm11
vmovdqu %xmm8, (%r8)
vmovdqu %xmm9, 16(%r8)
vmovdqu %xmm10, 32(%r8)
vmovdqu %xmm11, 48(%r8)
vaesenclast %xmm7, %xmm12, %xmm12
vaesenclast %xmm7, %xmm13, %xmm13
vaesenclast %xmm7, %xmm14, %xmm14
vaesenclast %xmm7, %xmm15, %xmm15
vmovdqu 64(%rdi), %xmm0
vmovdqu 80(%rdi), %xmm1
vmovdqu 96(%rdi), %xmm2
vmovdqu 112(%rdi), %xmm3
vpxor %xmm0, %xmm12, %xmm12
vpxor %xmm1, %xmm13, %xmm13
vpxor %xmm2, %xmm14, %xmm14
vpxor %xmm3, %xmm15, %xmm15
vmovdqu %xmm12, 64(%r8)
vmovdqu %xmm13, 80(%r8)
vmovdqu %xmm14, 96(%r8)
vmovdqu %xmm15, 112(%r8)
cmpl $0x80, %r13d
movl $0x80, %ebx
jle L_AES_GCM_encrypt_avx2_end_128
# More 128 bytes of input
L_AES_GCM_encrypt_avx2_ghash_128:
# aesenc_128_ghash
leaq (%rdi,%rbx,1), %rcx
leaq (%r8,%rbx,1), %rdx
# aesenc_ctr
vmovdqu 128(%rsp), %xmm0
vmovdqu L_avx2_aes_gcm_bswap_epi64(%rip), %xmm1
vpaddd L_avx2_aes_gcm_one(%rip), %xmm0, %xmm9
vpshufb %xmm1, %xmm0, %xmm8
vpaddd L_avx2_aes_gcm_two(%rip), %xmm0, %xmm10
vpshufb %xmm1, %xmm9, %xmm9
vpaddd L_avx2_aes_gcm_three(%rip), %xmm0, %xmm11
vpshufb %xmm1, %xmm10, %xmm10
vpaddd L_avx2_aes_gcm_four(%rip), %xmm0, %xmm12
vpshufb %xmm1, %xmm11, %xmm11
vpaddd L_avx2_aes_gcm_five(%rip), %xmm0, %xmm13
vpshufb %xmm1, %xmm12, %xmm12
vpaddd L_avx2_aes_gcm_six(%rip), %xmm0, %xmm14
vpshufb %xmm1, %xmm13, %xmm13
vpaddd L_avx2_aes_gcm_seven(%rip), %xmm0, %xmm15
vpshufb %xmm1, %xmm14, %xmm14
vpaddd L_avx2_aes_gcm_eight(%rip), %xmm0, %xmm0
vpshufb %xmm1, %xmm15, %xmm15
# aesenc_xor
vmovdqu (%rsi), %xmm7
vmovdqu %xmm0, 128(%rsp)
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm7, %xmm9, %xmm9
vpxor %xmm7, %xmm10, %xmm10
vpxor %xmm7, %xmm11, %xmm11
vpxor %xmm7, %xmm12, %xmm12
vpxor %xmm7, %xmm13, %xmm13
vpxor %xmm7, %xmm14, %xmm14
vpxor %xmm7, %xmm15, %xmm15
# aesenc_pclmul_1
vmovdqu -128(%rdx), %xmm1
vmovdqu 16(%rsi), %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm1, %xmm1
vmovdqu 112(%rsp), %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpclmulqdq $16, %xmm2, %xmm1, %xmm5
vpclmulqdq $0x01, %xmm2, %xmm1, %xmm3
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm6
vpclmulqdq $0x11, %xmm2, %xmm1, %xmm7
vaesenc %xmm0, %xmm8, %xmm8
vaesenc %xmm0, %xmm9, %xmm9
vaesenc %xmm0, %xmm10, %xmm10
vaesenc %xmm0, %xmm11, %xmm11
vaesenc %xmm0, %xmm12, %xmm12
vaesenc %xmm0, %xmm13, %xmm13
vaesenc %xmm0, %xmm14, %xmm14
vaesenc %xmm0, %xmm15, %xmm15
# aesenc_pclmul_2
vmovdqu -112(%rdx), %xmm1
vmovdqu 96(%rsp), %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm1, %xmm1
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vmovdqu 32(%rsi), %xmm0
vpxor %xmm1, %xmm7, %xmm7
vaesenc %xmm0, %xmm8, %xmm8
vaesenc %xmm0, %xmm9, %xmm9
vaesenc %xmm0, %xmm10, %xmm10
vaesenc %xmm0, %xmm11, %xmm11
vaesenc %xmm0, %xmm12, %xmm12
vaesenc %xmm0, %xmm13, %xmm13
vaesenc %xmm0, %xmm14, %xmm14
vaesenc %xmm0, %xmm15, %xmm15
# aesenc_pclmul_n
vmovdqu -96(%rdx), %xmm1
vmovdqu 80(%rsp), %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm1, %xmm1
vpxor %xmm2, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpxor %xmm4, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vmovdqu 48(%rsi), %xmm0
vpxor %xmm1, %xmm7, %xmm7
vaesenc %xmm0, %xmm8, %xmm8
vaesenc %xmm0, %xmm9, %xmm9
vaesenc %xmm0, %xmm10, %xmm10
vaesenc %xmm0, %xmm11, %xmm11
vaesenc %xmm0, %xmm12, %xmm12
vaesenc %xmm0, %xmm13, %xmm13
vaesenc %xmm0, %xmm14, %xmm14
vaesenc %xmm0, %xmm15, %xmm15
# aesenc_pclmul_n
vmovdqu -80(%rdx), %xmm1
vmovdqu 64(%rsp), %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm1, %xmm1
vpxor %xmm2, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpxor %xmm4, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vmovdqu 64(%rsi), %xmm0
vpxor %xmm1, %xmm7, %xmm7
vaesenc %xmm0, %xmm8, %xmm8
vaesenc %xmm0, %xmm9, %xmm9
vaesenc %xmm0, %xmm10, %xmm10
vaesenc %xmm0, %xmm11, %xmm11
vaesenc %xmm0, %xmm12, %xmm12
vaesenc %xmm0, %xmm13, %xmm13
vaesenc %xmm0, %xmm14, %xmm14
vaesenc %xmm0, %xmm15, %xmm15
# aesenc_pclmul_n
vmovdqu -64(%rdx), %xmm1
vmovdqu 48(%rsp), %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm1, %xmm1
vpxor %xmm2, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpxor %xmm4, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vmovdqu 80(%rsi), %xmm0
vpxor %xmm1, %xmm7, %xmm7
vaesenc %xmm0, %xmm8, %xmm8
vaesenc %xmm0, %xmm9, %xmm9
vaesenc %xmm0, %xmm10, %xmm10
vaesenc %xmm0, %xmm11, %xmm11
vaesenc %xmm0, %xmm12, %xmm12
vaesenc %xmm0, %xmm13, %xmm13
vaesenc %xmm0, %xmm14, %xmm14
vaesenc %xmm0, %xmm15, %xmm15
# aesenc_pclmul_n
vmovdqu -48(%rdx), %xmm1
vmovdqu 32(%rsp), %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm1, %xmm1
vpxor %xmm2, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpxor %xmm4, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vmovdqu 96(%rsi), %xmm0
vpxor %xmm1, %xmm7, %xmm7
vaesenc %xmm0, %xmm8, %xmm8
vaesenc %xmm0, %xmm9, %xmm9
vaesenc %xmm0, %xmm10, %xmm10
vaesenc %xmm0, %xmm11, %xmm11
vaesenc %xmm0, %xmm12, %xmm12
vaesenc %xmm0, %xmm13, %xmm13
vaesenc %xmm0, %xmm14, %xmm14
vaesenc %xmm0, %xmm15, %xmm15
# aesenc_pclmul_n
vmovdqu -32(%rdx), %xmm1
vmovdqu 16(%rsp), %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm1, %xmm1
vpxor %xmm2, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpxor %xmm4, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vmovdqu 112(%rsi), %xmm0
vpxor %xmm1, %xmm7, %xmm7
vaesenc %xmm0, %xmm8, %xmm8
vaesenc %xmm0, %xmm9, %xmm9
vaesenc %xmm0, %xmm10, %xmm10
vaesenc %xmm0, %xmm11, %xmm11
vaesenc %xmm0, %xmm12, %xmm12
vaesenc %xmm0, %xmm13, %xmm13
vaesenc %xmm0, %xmm14, %xmm14
vaesenc %xmm0, %xmm15, %xmm15
# aesenc_pclmul_n
vmovdqu -16(%rdx), %xmm1
vmovdqu (%rsp), %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm1, %xmm1
vpxor %xmm2, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpxor %xmm4, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vmovdqu 128(%rsi), %xmm0
vpxor %xmm1, %xmm7, %xmm7
vaesenc %xmm0, %xmm8, %xmm8
vaesenc %xmm0, %xmm9, %xmm9
vaesenc %xmm0, %xmm10, %xmm10
vaesenc %xmm0, %xmm11, %xmm11
vaesenc %xmm0, %xmm12, %xmm12
vaesenc %xmm0, %xmm13, %xmm13
vaesenc %xmm0, %xmm14, %xmm14
vaesenc %xmm0, %xmm15, %xmm15
# aesenc_pclmul_l
vpxor %xmm2, %xmm5, %xmm5
vpxor %xmm4, %xmm6, %xmm6
vpxor %xmm3, %xmm5, %xmm5
vpslldq $8, %xmm5, %xmm1
vpsrldq $8, %xmm5, %xmm5
vmovdqu 144(%rsi), %xmm4
vmovdqu L_avx2_aes_gcm_mod2_128(%rip), %xmm0
vaesenc %xmm4, %xmm8, %xmm8
vpxor %xmm1, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
vpclmulqdq $16, %xmm0, %xmm6, %xmm3
vaesenc %xmm4, %xmm9, %xmm9
vaesenc %xmm4, %xmm10, %xmm10
vaesenc %xmm4, %xmm11, %xmm11
vpshufd $0x4e, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $16, %xmm0, %xmm6, %xmm3
vaesenc %xmm4, %xmm12, %xmm12
vaesenc %xmm4, %xmm13, %xmm13
vaesenc %xmm4, %xmm14, %xmm14
vpshufd $0x4e, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm7, %xmm6, %xmm6
vaesenc %xmm4, %xmm15, %xmm15
cmpl $11, %r9d
vmovdqu 160(%rsi), %xmm7
jl L_AES_GCM_encrypt_avx2_aesenc_128_ghash_avx_done
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqu 176(%rsi), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
cmpl $13, %r9d
vmovdqu 192(%rsi), %xmm7
jl L_AES_GCM_encrypt_avx2_aesenc_128_ghash_avx_done
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqu 208(%rsi), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqu 224(%rsi), %xmm7
L_AES_GCM_encrypt_avx2_aesenc_128_ghash_avx_done:
# aesenc_last
vaesenclast %xmm7, %xmm8, %xmm8
vaesenclast %xmm7, %xmm9, %xmm9
vaesenclast %xmm7, %xmm10, %xmm10
vaesenclast %xmm7, %xmm11, %xmm11
vmovdqu (%rcx), %xmm0
vmovdqu 16(%rcx), %xmm1
vmovdqu 32(%rcx), %xmm2
vmovdqu 48(%rcx), %xmm3
vpxor %xmm0, %xmm8, %xmm8
vpxor %xmm1, %xmm9, %xmm9
vpxor %xmm2, %xmm10, %xmm10
vpxor %xmm3, %xmm11, %xmm11
vmovdqu %xmm8, (%rdx)
vmovdqu %xmm9, 16(%rdx)
vmovdqu %xmm10, 32(%rdx)
vmovdqu %xmm11, 48(%rdx)
vaesenclast %xmm7, %xmm12, %xmm12
vaesenclast %xmm7, %xmm13, %xmm13
vaesenclast %xmm7, %xmm14, %xmm14
vaesenclast %xmm7, %xmm15, %xmm15
vmovdqu 64(%rcx), %xmm0
vmovdqu 80(%rcx), %xmm1
vmovdqu 96(%rcx), %xmm2
vmovdqu 112(%rcx), %xmm3
vpxor %xmm0, %xmm12, %xmm12
vpxor %xmm1, %xmm13, %xmm13
vpxor %xmm2, %xmm14, %xmm14
vpxor %xmm3, %xmm15, %xmm15
vmovdqu %xmm12, 64(%rdx)
vmovdqu %xmm13, 80(%rdx)
vmovdqu %xmm14, 96(%rdx)
vmovdqu %xmm15, 112(%rdx)
# aesenc_128_ghash - end
addl $0x80, %ebx
cmpl %r13d, %ebx
jl L_AES_GCM_encrypt_avx2_ghash_128
L_AES_GCM_encrypt_avx2_end_128:
vmovdqu L_avx2_aes_gcm_bswap_mask(%rip), %xmm4
vpshufb %xmm4, %xmm8, %xmm8
vpshufb %xmm4, %xmm9, %xmm9
vpshufb %xmm4, %xmm10, %xmm10
vpshufb %xmm4, %xmm11, %xmm11
vpshufb %xmm4, %xmm12, %xmm12
vpshufb %xmm4, %xmm13, %xmm13
vpshufb %xmm4, %xmm14, %xmm14
vpshufb %xmm4, %xmm15, %xmm15
vpxor %xmm6, %xmm8, %xmm8
vmovdqu (%rsp), %xmm7
vpclmulqdq $16, %xmm15, %xmm7, %xmm5
vpclmulqdq $0x01, %xmm15, %xmm7, %xmm1
vpclmulqdq $0x00, %xmm15, %xmm7, %xmm4
vpclmulqdq $0x11, %xmm15, %xmm7, %xmm6
vpxor %xmm1, %xmm5, %xmm5
vmovdqu 16(%rsp), %xmm7
vpclmulqdq $16, %xmm14, %xmm7, %xmm2
vpclmulqdq $0x01, %xmm14, %xmm7, %xmm1
vpclmulqdq $0x00, %xmm14, %xmm7, %xmm0
vpclmulqdq $0x11, %xmm14, %xmm7, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm2, %xmm5, %xmm5
vpxor %xmm0, %xmm4, %xmm4
vmovdqu 32(%rsp), %xmm15
vmovdqu 48(%rsp), %xmm7
vpclmulqdq $16, %xmm13, %xmm15, %xmm2
vpclmulqdq $0x01, %xmm13, %xmm15, %xmm1
vpclmulqdq $0x00, %xmm13, %xmm15, %xmm0
vpclmulqdq $0x11, %xmm13, %xmm15, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm2, %xmm5, %xmm5
vpxor %xmm0, %xmm4, %xmm4
vpclmulqdq $16, %xmm12, %xmm7, %xmm2
vpclmulqdq $0x01, %xmm12, %xmm7, %xmm1
vpclmulqdq $0x00, %xmm12, %xmm7, %xmm0
vpclmulqdq $0x11, %xmm12, %xmm7, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm2, %xmm5, %xmm5
vpxor %xmm0, %xmm4, %xmm4
vmovdqu 64(%rsp), %xmm15
vmovdqu 80(%rsp), %xmm7
vpclmulqdq $16, %xmm11, %xmm15, %xmm2
vpclmulqdq $0x01, %xmm11, %xmm15, %xmm1
vpclmulqdq $0x00, %xmm11, %xmm15, %xmm0
vpclmulqdq $0x11, %xmm11, %xmm15, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm2, %xmm5, %xmm5
vpxor %xmm0, %xmm4, %xmm4
vpclmulqdq $16, %xmm10, %xmm7, %xmm2
vpclmulqdq $0x01, %xmm10, %xmm7, %xmm1
vpclmulqdq $0x00, %xmm10, %xmm7, %xmm0
vpclmulqdq $0x11, %xmm10, %xmm7, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm2, %xmm5, %xmm5
vpxor %xmm0, %xmm4, %xmm4
vmovdqu 96(%rsp), %xmm15
vmovdqu 112(%rsp), %xmm7
vpclmulqdq $16, %xmm9, %xmm15, %xmm2
vpclmulqdq $0x01, %xmm9, %xmm15, %xmm1
vpclmulqdq $0x00, %xmm9, %xmm15, %xmm0
vpclmulqdq $0x11, %xmm9, %xmm15, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm2, %xmm5, %xmm5
vpxor %xmm0, %xmm4, %xmm4
vpclmulqdq $16, %xmm8, %xmm7, %xmm2
vpclmulqdq $0x01, %xmm8, %xmm7, %xmm1
vpclmulqdq $0x00, %xmm8, %xmm7, %xmm0
vpclmulqdq $0x11, %xmm8, %xmm7, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm2, %xmm5, %xmm5
vpxor %xmm0, %xmm4, %xmm4
vpslldq $8, %xmm5, %xmm7
vpsrldq $8, %xmm5, %xmm5
vpxor %xmm7, %xmm4, %xmm4
vpxor %xmm5, %xmm6, %xmm6
# ghash_red
vmovdqu L_avx2_aes_gcm_mod2_128(%rip), %xmm2
vpclmulqdq $16, %xmm2, %xmm4, %xmm0
vpshufd $0x4e, %xmm4, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $16, %xmm2, %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm6, %xmm6
vmovdqu (%rsp), %xmm5
vmovdqu 128(%rsp), %xmm4
vmovdqu 144(%rsp), %xmm15
L_AES_GCM_encrypt_avx2_done_128:
cmpl %r10d, %ebx
je L_AES_GCM_encrypt_avx2_done_enc
movl %r10d, %r13d
andl $0xfffffff0, %r13d
cmpl %r13d, %ebx
jge L_AES_GCM_encrypt_avx2_last_block_done
# aesenc_block
vmovdqu %xmm4, %xmm1
vpshufb L_avx2_aes_gcm_bswap_epi64(%rip), %xmm1, %xmm0
vpaddd L_avx2_aes_gcm_one(%rip), %xmm1, %xmm1
vpxor (%rsi), %xmm0, %xmm0
vmovdqu 16(%rsi), %xmm2
vaesenc %xmm2, %xmm0, %xmm0
vmovdqu 32(%rsi), %xmm2
vaesenc %xmm2, %xmm0, %xmm0
vmovdqu 48(%rsi), %xmm2
vaesenc %xmm2, %xmm0, %xmm0
vmovdqu 64(%rsi), %xmm2
vaesenc %xmm2, %xmm0, %xmm0
vmovdqu 80(%rsi), %xmm2
vaesenc %xmm2, %xmm0, %xmm0
vmovdqu 96(%rsi), %xmm2
vaesenc %xmm2, %xmm0, %xmm0
vmovdqu 112(%rsi), %xmm2
vaesenc %xmm2, %xmm0, %xmm0
vmovdqu 128(%rsi), %xmm2
vaesenc %xmm2, %xmm0, %xmm0
vmovdqu 144(%rsi), %xmm2
vaesenc %xmm2, %xmm0, %xmm0
vmovdqu %xmm1, %xmm4
cmpl $11, %r9d
vmovdqu 160(%rsi), %xmm1
jl L_AES_GCM_encrypt_avx2_aesenc_block_last
vaesenc %xmm1, %xmm0, %xmm0
vmovdqu 176(%rsi), %xmm2
vaesenc %xmm2, %xmm0, %xmm0
cmpl $13, %r9d
vmovdqu 192(%rsi), %xmm1
jl L_AES_GCM_encrypt_avx2_aesenc_block_last
vaesenc %xmm1, %xmm0, %xmm0
vmovdqu 208(%rsi), %xmm2
vaesenc %xmm2, %xmm0, %xmm0
vmovdqu 224(%rsi), %xmm1
L_AES_GCM_encrypt_avx2_aesenc_block_last:
vaesenclast %xmm1, %xmm0, %xmm0
vmovdqu (%rdi,%rbx,1), %xmm1
vpxor %xmm1, %xmm0, %xmm0
vmovdqu %xmm0, (%r8,%rbx,1)
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vpxor %xmm0, %xmm6, %xmm6
addl $16, %ebx
cmpl %r13d, %ebx
jge L_AES_GCM_encrypt_avx2_last_block_ghash
L_AES_GCM_encrypt_avx2_last_block_start:
vmovdqu (%rdi,%rbx,1), %xmm12
vpshufb L_avx2_aes_gcm_bswap_epi64(%rip), %xmm4, %xmm11
vpaddd L_avx2_aes_gcm_one(%rip), %xmm4, %xmm4
# aesenc_gfmul_sb
vpclmulqdq $0x01, %xmm5, %xmm6, %xmm2
vpclmulqdq $16, %xmm5, %xmm6, %xmm3
vpclmulqdq $0x00, %xmm5, %xmm6, %xmm1
vpclmulqdq $0x11, %xmm5, %xmm6, %xmm8
vpxor (%rsi), %xmm11, %xmm11
vaesenc 16(%rsi), %xmm11, %xmm11
vpxor %xmm2, %xmm3, %xmm3
vpslldq $8, %xmm3, %xmm2
vpsrldq $8, %xmm3, %xmm3
vaesenc 32(%rsi), %xmm11, %xmm11
vpxor %xmm1, %xmm2, %xmm2
vpclmulqdq $16, L_avx2_aes_gcm_mod2_128(%rip), %xmm2, %xmm1
vaesenc 48(%rsi), %xmm11, %xmm11
vaesenc 64(%rsi), %xmm11, %xmm11
vaesenc 80(%rsi), %xmm11, %xmm11
vpshufd $0x4e, %xmm2, %xmm2
vpxor %xmm1, %xmm2, %xmm2
vpclmulqdq $16, L_avx2_aes_gcm_mod2_128(%rip), %xmm2, %xmm1
vaesenc 96(%rsi), %xmm11, %xmm11
vaesenc 112(%rsi), %xmm11, %xmm11
vaesenc 128(%rsi), %xmm11, %xmm11
vpshufd $0x4e, %xmm2, %xmm2
vaesenc 144(%rsi), %xmm11, %xmm11
vpxor %xmm3, %xmm8, %xmm8
vpxor %xmm8, %xmm2, %xmm2
vmovdqu 160(%rsi), %xmm0
cmpl $11, %r9d
jl L_AES_GCM_encrypt_avx2_aesenc_gfmul_sb_last
vaesenc %xmm0, %xmm11, %xmm11
vaesenc 176(%rsi), %xmm11, %xmm11
vmovdqu 192(%rsi), %xmm0
cmpl $13, %r9d
jl L_AES_GCM_encrypt_avx2_aesenc_gfmul_sb_last
vaesenc %xmm0, %xmm11, %xmm11
vaesenc 208(%rsi), %xmm11, %xmm11
vmovdqu 224(%rsi), %xmm0
L_AES_GCM_encrypt_avx2_aesenc_gfmul_sb_last:
vaesenclast %xmm0, %xmm11, %xmm11
vpxor %xmm1, %xmm2, %xmm6
vpxor %xmm12, %xmm11, %xmm11
vmovdqu %xmm11, (%r8,%rbx,1)
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm11, %xmm11
vpxor %xmm11, %xmm6, %xmm6
addl $16, %ebx
cmpl %r13d, %ebx
jl L_AES_GCM_encrypt_avx2_last_block_start
L_AES_GCM_encrypt_avx2_last_block_ghash:
# ghash_gfmul_red
vpclmulqdq $16, %xmm5, %xmm6, %xmm10
vpclmulqdq $0x01, %xmm5, %xmm6, %xmm9
vpclmulqdq $0x00, %xmm5, %xmm6, %xmm8
vpxor %xmm9, %xmm10, %xmm10
vpslldq $8, %xmm10, %xmm9
vpsrldq $8, %xmm10, %xmm10
vpxor %xmm8, %xmm9, %xmm9
vpclmulqdq $0x11, %xmm5, %xmm6, %xmm6
vpclmulqdq $16, L_avx2_aes_gcm_mod2_128(%rip), %xmm9, %xmm8
vpshufd $0x4e, %xmm9, %xmm9
vpxor %xmm8, %xmm9, %xmm9
vpclmulqdq $16, L_avx2_aes_gcm_mod2_128(%rip), %xmm9, %xmm8
vpshufd $0x4e, %xmm9, %xmm9
vpxor %xmm10, %xmm6, %xmm6
vpxor %xmm9, %xmm6, %xmm6
vpxor %xmm8, %xmm6, %xmm6
L_AES_GCM_encrypt_avx2_last_block_done:
movl %r10d, %ecx
movl %r10d, %edx
andl $15, %ecx
jz L_AES_GCM_encrypt_avx2_done_enc
# aesenc_last15_enc
vpshufb L_avx2_aes_gcm_bswap_epi64(%rip), %xmm4, %xmm4
vpxor (%rsi), %xmm4, %xmm4
vaesenc 16(%rsi), %xmm4, %xmm4
vaesenc 32(%rsi), %xmm4, %xmm4
vaesenc 48(%rsi), %xmm4, %xmm4
vaesenc 64(%rsi), %xmm4, %xmm4
vaesenc 80(%rsi), %xmm4, %xmm4
vaesenc 96(%rsi), %xmm4, %xmm4
vaesenc 112(%rsi), %xmm4, %xmm4
vaesenc 128(%rsi), %xmm4, %xmm4
vaesenc 144(%rsi), %xmm4, %xmm4
cmpl $11, %r9d
vmovdqu 160(%rsi), %xmm0
jl L_AES_GCM_encrypt_avx2_aesenc_last15_enc_avx_aesenc_avx_last
vaesenc %xmm0, %xmm4, %xmm4
vaesenc 176(%rsi), %xmm4, %xmm4
cmpl $13, %r9d
vmovdqu 192(%rsi), %xmm0
jl L_AES_GCM_encrypt_avx2_aesenc_last15_enc_avx_aesenc_avx_last
vaesenc %xmm0, %xmm4, %xmm4
vaesenc 208(%rsi), %xmm4, %xmm4
vmovdqu 224(%rsi), %xmm0
L_AES_GCM_encrypt_avx2_aesenc_last15_enc_avx_aesenc_avx_last:
vaesenclast %xmm0, %xmm4, %xmm4
xorl %ecx, %ecx
vpxor %xmm0, %xmm0, %xmm0
vmovdqu %xmm4, (%rsp)
vmovdqu %xmm0, 16(%rsp)
L_AES_GCM_encrypt_avx2_aesenc_last15_enc_avx_loop:
movzbl (%rdi,%rbx,1), %r13d
xorb (%rsp,%rcx,1), %r13b
movb %r13b, 16(%rsp,%rcx,1)
movb %r13b, (%r8,%rbx,1)
incl %ebx
incl %ecx
cmpl %edx, %ebx
jl L_AES_GCM_encrypt_avx2_aesenc_last15_enc_avx_loop
L_AES_GCM_encrypt_avx2_aesenc_last15_enc_avx_finish_enc:
vmovdqu 16(%rsp), %xmm4
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm4, %xmm4
vpxor %xmm4, %xmm6, %xmm6
# ghash_gfmul_red
vpclmulqdq $16, %xmm5, %xmm6, %xmm2
vpclmulqdq $0x01, %xmm5, %xmm6, %xmm1
vpclmulqdq $0x00, %xmm5, %xmm6, %xmm0
vpxor %xmm1, %xmm2, %xmm2
vpslldq $8, %xmm2, %xmm1
vpsrldq $8, %xmm2, %xmm2
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $0x11, %xmm5, %xmm6, %xmm6
vpclmulqdq $16, L_avx2_aes_gcm_mod2_128(%rip), %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $16, L_avx2_aes_gcm_mod2_128(%rip), %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm1, %xmm6, %xmm6
vpxor %xmm0, %xmm6, %xmm6
L_AES_GCM_encrypt_avx2_done_enc:
# calc_tag
shlq $3, %r10
shlq $3, %r11
vmovq %r10, %xmm0
vmovq %r11, %xmm1
vpunpcklqdq %xmm1, %xmm0, %xmm0
vpxor %xmm6, %xmm0, %xmm0
# ghash_gfmul_red
vpclmulqdq $16, %xmm5, %xmm0, %xmm4
vpclmulqdq $0x01, %xmm5, %xmm0, %xmm3
vpclmulqdq $0x00, %xmm5, %xmm0, %xmm2
vpxor %xmm3, %xmm4, %xmm4
vpslldq $8, %xmm4, %xmm3
vpsrldq $8, %xmm4, %xmm4
vpxor %xmm2, %xmm3, %xmm3
vpclmulqdq $0x11, %xmm5, %xmm0, %xmm0
vpclmulqdq $16, L_avx2_aes_gcm_mod2_128(%rip), %xmm3, %xmm2
vpshufd $0x4e, %xmm3, %xmm3
vpxor %xmm2, %xmm3, %xmm3
vpclmulqdq $16, L_avx2_aes_gcm_mod2_128(%rip), %xmm3, %xmm2
vpshufd $0x4e, %xmm3, %xmm3
vpxor %xmm4, %xmm0, %xmm0
vpxor %xmm3, %xmm0, %xmm0
vpxor %xmm2, %xmm0, %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vpxor %xmm15, %xmm0, %xmm0
# store_tag
cmpl $16, %r14d
je L_AES_GCM_encrypt_avx2_store_tag_16
xorq %rcx, %rcx
vmovdqu %xmm0, (%rsp)
L_AES_GCM_encrypt_avx2_store_tag_loop:
movzbl (%rsp,%rcx,1), %r13d
movb %r13b, (%r15,%rcx,1)
incl %ecx
cmpl %r14d, %ecx
jne L_AES_GCM_encrypt_avx2_store_tag_loop
jmp L_AES_GCM_encrypt_avx2_store_tag_done
L_AES_GCM_encrypt_avx2_store_tag_16:
vmovdqu %xmm0, (%r15)
L_AES_GCM_encrypt_avx2_store_tag_done:
vzeroupper
addq $0xa0, %rsp
popq %r14
popq %rbx
popq %r15
popq %r12
popq %r13
repz retq
#ifndef __APPLE__
.size AES_GCM_encrypt_avx2,.-AES_GCM_encrypt_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl AES_GCM_decrypt_avx2
.type AES_GCM_decrypt_avx2,@function
.align 16
AES_GCM_decrypt_avx2:
#else
.section __TEXT,__text
.globl _AES_GCM_decrypt_avx2
.p2align 4
_AES_GCM_decrypt_avx2:
#endif /* __APPLE__ */
pushq %r13
pushq %r12
pushq %r14
pushq %rbx
pushq %r15
pushq %rbp
movq %rdx, %r12
movq %rcx, %rax
movq %r8, %r14
movq %rsi, %r8
movl %r9d, %r10d
movl 56(%rsp), %r11d
movl 64(%rsp), %ebx
movl 72(%rsp), %r15d
movq 80(%rsp), %rsi
movl 88(%rsp), %r9d
movq 96(%rsp), %rbp
subq $0xa8, %rsp
vpxor %xmm4, %xmm4, %xmm4
vpxor %xmm6, %xmm6, %xmm6
movl %ebx, %edx
cmpl $12, %edx
je L_AES_GCM_decrypt_avx2_iv_12
# Calculate values when IV is not 12 bytes
# H = Encrypt X(=0)
vmovdqu (%rsi), %xmm5
vaesenc 16(%rsi), %xmm5, %xmm5
vaesenc 32(%rsi), %xmm5, %xmm5
vaesenc 48(%rsi), %xmm5, %xmm5
vaesenc 64(%rsi), %xmm5, %xmm5
vaesenc 80(%rsi), %xmm5, %xmm5
vaesenc 96(%rsi), %xmm5, %xmm5
vaesenc 112(%rsi), %xmm5, %xmm5
vaesenc 128(%rsi), %xmm5, %xmm5
vaesenc 144(%rsi), %xmm5, %xmm5
cmpl $11, %r9d
vmovdqu 160(%rsi), %xmm0
jl L_AES_GCM_decrypt_avx2_calc_iv_1_aesenc_avx_last
vaesenc %xmm0, %xmm5, %xmm5
vaesenc 176(%rsi), %xmm5, %xmm5
cmpl $13, %r9d
vmovdqu 192(%rsi), %xmm0
jl L_AES_GCM_decrypt_avx2_calc_iv_1_aesenc_avx_last
vaesenc %xmm0, %xmm5, %xmm5
vaesenc 208(%rsi), %xmm5, %xmm5
vmovdqu 224(%rsi), %xmm0
L_AES_GCM_decrypt_avx2_calc_iv_1_aesenc_avx_last:
vaesenclast %xmm0, %xmm5, %xmm5
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm5, %xmm5
# Calc counter
# Initialization vector
cmpl $0x00, %edx
movq $0x00, %rcx
je L_AES_GCM_decrypt_avx2_calc_iv_done
cmpl $16, %edx
jl L_AES_GCM_decrypt_avx2_calc_iv_lt16
andl $0xfffffff0, %edx
L_AES_GCM_decrypt_avx2_calc_iv_16_loop:
vmovdqu (%rax,%rcx,1), %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vpxor %xmm0, %xmm4, %xmm4
# ghash_gfmul_avx
vpclmulqdq $16, %xmm4, %xmm5, %xmm2
vpclmulqdq $0x01, %xmm4, %xmm5, %xmm1
vpclmulqdq $0x00, %xmm4, %xmm5, %xmm0
vpclmulqdq $0x11, %xmm4, %xmm5, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpslldq $8, %xmm2, %xmm1
vpsrldq $8, %xmm2, %xmm2
vpxor %xmm1, %xmm0, %xmm7
vpxor %xmm2, %xmm3, %xmm4
# ghash_mid
vpsrld $31, %xmm7, %xmm0
vpsrld $31, %xmm4, %xmm1
vpslld $0x01, %xmm7, %xmm7
vpslld $0x01, %xmm4, %xmm4
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm4, %xmm4
vpor %xmm0, %xmm7, %xmm7
vpor %xmm1, %xmm4, %xmm4
# ghash_red
vmovdqu L_avx2_aes_gcm_mod2_128(%rip), %xmm2
vpclmulqdq $16, %xmm2, %xmm7, %xmm0
vpshufd $0x4e, %xmm7, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $16, %xmm2, %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm4, %xmm4
addl $16, %ecx
cmpl %edx, %ecx
jl L_AES_GCM_decrypt_avx2_calc_iv_16_loop
movl %ebx, %edx
cmpl %edx, %ecx
je L_AES_GCM_decrypt_avx2_calc_iv_done
L_AES_GCM_decrypt_avx2_calc_iv_lt16:
vpxor %xmm0, %xmm0, %xmm0
xorl %ebx, %ebx
vmovdqu %xmm0, (%rsp)
L_AES_GCM_decrypt_avx2_calc_iv_loop:
movzbl (%rax,%rcx,1), %r13d
movb %r13b, (%rsp,%rbx,1)
incl %ecx
incl %ebx
cmpl %edx, %ecx
jl L_AES_GCM_decrypt_avx2_calc_iv_loop
vmovdqu (%rsp), %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vpxor %xmm0, %xmm4, %xmm4
# ghash_gfmul_avx
vpclmulqdq $16, %xmm4, %xmm5, %xmm2
vpclmulqdq $0x01, %xmm4, %xmm5, %xmm1
vpclmulqdq $0x00, %xmm4, %xmm5, %xmm0
vpclmulqdq $0x11, %xmm4, %xmm5, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpslldq $8, %xmm2, %xmm1
vpsrldq $8, %xmm2, %xmm2
vpxor %xmm1, %xmm0, %xmm7
vpxor %xmm2, %xmm3, %xmm4
# ghash_mid
vpsrld $31, %xmm7, %xmm0
vpsrld $31, %xmm4, %xmm1
vpslld $0x01, %xmm7, %xmm7
vpslld $0x01, %xmm4, %xmm4
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm4, %xmm4
vpor %xmm0, %xmm7, %xmm7
vpor %xmm1, %xmm4, %xmm4
# ghash_red
vmovdqu L_avx2_aes_gcm_mod2_128(%rip), %xmm2
vpclmulqdq $16, %xmm2, %xmm7, %xmm0
vpshufd $0x4e, %xmm7, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $16, %xmm2, %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm4, %xmm4
L_AES_GCM_decrypt_avx2_calc_iv_done:
# T = Encrypt counter
vpxor %xmm0, %xmm0, %xmm0
shll $3, %edx
vmovq %rdx, %xmm0
vpxor %xmm0, %xmm4, %xmm4
# ghash_gfmul_avx
vpclmulqdq $16, %xmm4, %xmm5, %xmm2
vpclmulqdq $0x01, %xmm4, %xmm5, %xmm1
vpclmulqdq $0x00, %xmm4, %xmm5, %xmm0
vpclmulqdq $0x11, %xmm4, %xmm5, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpslldq $8, %xmm2, %xmm1
vpsrldq $8, %xmm2, %xmm2
vpxor %xmm1, %xmm0, %xmm7
vpxor %xmm2, %xmm3, %xmm4
# ghash_mid
vpsrld $31, %xmm7, %xmm0
vpsrld $31, %xmm4, %xmm1
vpslld $0x01, %xmm7, %xmm7
vpslld $0x01, %xmm4, %xmm4
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm4, %xmm4
vpor %xmm0, %xmm7, %xmm7
vpor %xmm1, %xmm4, %xmm4
# ghash_red
vmovdqu L_avx2_aes_gcm_mod2_128(%rip), %xmm2
vpclmulqdq $16, %xmm2, %xmm7, %xmm0
vpshufd $0x4e, %xmm7, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $16, %xmm2, %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm4, %xmm4
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm4, %xmm4
# Encrypt counter
vmovdqu (%rsi), %xmm15
vpxor %xmm4, %xmm15, %xmm15
vaesenc 16(%rsi), %xmm15, %xmm15
vaesenc 32(%rsi), %xmm15, %xmm15
vaesenc 48(%rsi), %xmm15, %xmm15
vaesenc 64(%rsi), %xmm15, %xmm15
vaesenc 80(%rsi), %xmm15, %xmm15
vaesenc 96(%rsi), %xmm15, %xmm15
vaesenc 112(%rsi), %xmm15, %xmm15
vaesenc 128(%rsi), %xmm15, %xmm15
vaesenc 144(%rsi), %xmm15, %xmm15
cmpl $11, %r9d
vmovdqu 160(%rsi), %xmm0
jl L_AES_GCM_decrypt_avx2_calc_iv_2_aesenc_avx_last
vaesenc %xmm0, %xmm15, %xmm15
vaesenc 176(%rsi), %xmm15, %xmm15
cmpl $13, %r9d
vmovdqu 192(%rsi), %xmm0
jl L_AES_GCM_decrypt_avx2_calc_iv_2_aesenc_avx_last
vaesenc %xmm0, %xmm15, %xmm15
vaesenc 208(%rsi), %xmm15, %xmm15
vmovdqu 224(%rsi), %xmm0
L_AES_GCM_decrypt_avx2_calc_iv_2_aesenc_avx_last:
vaesenclast %xmm0, %xmm15, %xmm15
jmp L_AES_GCM_decrypt_avx2_iv_done
L_AES_GCM_decrypt_avx2_iv_12:
# # Calculate values when IV is 12 bytes
# Set counter based on IV
vmovdqu L_avx2_aes_gcm_bswap_one(%rip), %xmm4
vmovdqu (%rsi), %xmm5
vpblendd $7, (%rax), %xmm4, %xmm4
# H = Encrypt X(=0) and T = Encrypt counter
vmovdqu 16(%rsi), %xmm7
vpxor %xmm5, %xmm4, %xmm15
vaesenc %xmm7, %xmm5, %xmm5
vaesenc %xmm7, %xmm15, %xmm15
vmovdqu 32(%rsi), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm15, %xmm15
vmovdqu 48(%rsi), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm15, %xmm15
vmovdqu 64(%rsi), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm15, %xmm15
vmovdqu 80(%rsi), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm15, %xmm15
vmovdqu 96(%rsi), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm15, %xmm15
vmovdqu 112(%rsi), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm15, %xmm15
vmovdqu 128(%rsi), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm15, %xmm15
vmovdqu 144(%rsi), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm15, %xmm15
cmpl $11, %r9d
vmovdqu 160(%rsi), %xmm0
jl L_AES_GCM_decrypt_avx2_calc_iv_12_last
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm15, %xmm15
vmovdqu 176(%rsi), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm15, %xmm15
cmpl $13, %r9d
vmovdqu 192(%rsi), %xmm0
jl L_AES_GCM_decrypt_avx2_calc_iv_12_last
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm15, %xmm15
vmovdqu 208(%rsi), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm15, %xmm15
vmovdqu 224(%rsi), %xmm0
L_AES_GCM_decrypt_avx2_calc_iv_12_last:
vaesenclast %xmm0, %xmm5, %xmm5
vaesenclast %xmm0, %xmm15, %xmm15
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm5, %xmm5
L_AES_GCM_decrypt_avx2_iv_done:
# Additional authentication data
movl %r11d, %edx
cmpl $0x00, %edx
je L_AES_GCM_decrypt_avx2_calc_aad_done
xorl %ecx, %ecx
cmpl $16, %edx
jl L_AES_GCM_decrypt_avx2_calc_aad_lt16
andl $0xfffffff0, %edx
L_AES_GCM_decrypt_avx2_calc_aad_16_loop:
vmovdqu (%r12,%rcx,1), %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vpxor %xmm0, %xmm6, %xmm6
# ghash_gfmul_avx
vpclmulqdq $16, %xmm6, %xmm5, %xmm2
vpclmulqdq $0x01, %xmm6, %xmm5, %xmm1
vpclmulqdq $0x00, %xmm6, %xmm5, %xmm0
vpclmulqdq $0x11, %xmm6, %xmm5, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpslldq $8, %xmm2, %xmm1
vpsrldq $8, %xmm2, %xmm2
vpxor %xmm1, %xmm0, %xmm7
vpxor %xmm2, %xmm3, %xmm6
# ghash_mid
vpsrld $31, %xmm7, %xmm0
vpsrld $31, %xmm6, %xmm1
vpslld $0x01, %xmm7, %xmm7
vpslld $0x01, %xmm6, %xmm6
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm6, %xmm6
vpor %xmm0, %xmm7, %xmm7
vpor %xmm1, %xmm6, %xmm6
# ghash_red
vmovdqu L_avx2_aes_gcm_mod2_128(%rip), %xmm2
vpclmulqdq $16, %xmm2, %xmm7, %xmm0
vpshufd $0x4e, %xmm7, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $16, %xmm2, %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm6, %xmm6
addl $16, %ecx
cmpl %edx, %ecx
jl L_AES_GCM_decrypt_avx2_calc_aad_16_loop
movl %r11d, %edx
cmpl %edx, %ecx
je L_AES_GCM_decrypt_avx2_calc_aad_done
L_AES_GCM_decrypt_avx2_calc_aad_lt16:
vpxor %xmm0, %xmm0, %xmm0
xorl %ebx, %ebx
vmovdqu %xmm0, (%rsp)
L_AES_GCM_decrypt_avx2_calc_aad_loop:
movzbl (%r12,%rcx,1), %r13d
movb %r13b, (%rsp,%rbx,1)
incl %ecx
incl %ebx
cmpl %edx, %ecx
jl L_AES_GCM_decrypt_avx2_calc_aad_loop
vmovdqu (%rsp), %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vpxor %xmm0, %xmm6, %xmm6
# ghash_gfmul_avx
vpclmulqdq $16, %xmm6, %xmm5, %xmm2
vpclmulqdq $0x01, %xmm6, %xmm5, %xmm1
vpclmulqdq $0x00, %xmm6, %xmm5, %xmm0
vpclmulqdq $0x11, %xmm6, %xmm5, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpslldq $8, %xmm2, %xmm1
vpsrldq $8, %xmm2, %xmm2
vpxor %xmm1, %xmm0, %xmm7
vpxor %xmm2, %xmm3, %xmm6
# ghash_mid
vpsrld $31, %xmm7, %xmm0
vpsrld $31, %xmm6, %xmm1
vpslld $0x01, %xmm7, %xmm7
vpslld $0x01, %xmm6, %xmm6
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm6, %xmm6
vpor %xmm0, %xmm7, %xmm7
vpor %xmm1, %xmm6, %xmm6
# ghash_red
vmovdqu L_avx2_aes_gcm_mod2_128(%rip), %xmm2
vpclmulqdq $16, %xmm2, %xmm7, %xmm0
vpshufd $0x4e, %xmm7, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $16, %xmm2, %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm6, %xmm6
L_AES_GCM_decrypt_avx2_calc_aad_done:
# Calculate counter and H
vpsrlq $63, %xmm5, %xmm1
vpsllq $0x01, %xmm5, %xmm0
vpslldq $8, %xmm1, %xmm1
vpor %xmm1, %xmm0, %xmm0
vpshufd $0xff, %xmm5, %xmm5
vpsrad $31, %xmm5, %xmm5
vpshufb L_avx2_aes_gcm_bswap_epi64(%rip), %xmm4, %xmm4
vpand L_avx2_aes_gcm_mod2_128(%rip), %xmm5, %xmm5
vpaddd L_avx2_aes_gcm_one(%rip), %xmm4, %xmm4
vpxor %xmm0, %xmm5, %xmm5
xorl %ebx, %ebx
cmpl $0x80, %r10d
movl %r10d, %r13d
jl L_AES_GCM_decrypt_avx2_done_128
andl $0xffffff80, %r13d
vmovdqu %xmm4, 128(%rsp)
vmovdqu %xmm15, 144(%rsp)
vmovdqu L_avx2_aes_gcm_mod2_128(%rip), %xmm3
# H ^ 1 and H ^ 2
vpclmulqdq $0x00, %xmm5, %xmm5, %xmm9
vpclmulqdq $0x11, %xmm5, %xmm5, %xmm10
vpclmulqdq $16, %xmm3, %xmm9, %xmm8
vpshufd $0x4e, %xmm9, %xmm9
vpxor %xmm8, %xmm9, %xmm9
vpclmulqdq $16, %xmm3, %xmm9, %xmm8
vpshufd $0x4e, %xmm9, %xmm9
vpxor %xmm8, %xmm9, %xmm9
vpxor %xmm9, %xmm10, %xmm0
vmovdqu %xmm5, (%rsp)
vmovdqu %xmm0, 16(%rsp)
# H ^ 3 and H ^ 4
vpclmulqdq $16, %xmm5, %xmm0, %xmm11
vpclmulqdq $0x01, %xmm5, %xmm0, %xmm10
vpclmulqdq $0x00, %xmm5, %xmm0, %xmm9
vpclmulqdq $0x11, %xmm5, %xmm0, %xmm12
vpclmulqdq $0x00, %xmm0, %xmm0, %xmm13
vpclmulqdq $0x11, %xmm0, %xmm0, %xmm14
vpxor %xmm10, %xmm11, %xmm11
vpslldq $8, %xmm11, %xmm10
vpsrldq $8, %xmm11, %xmm11
vpxor %xmm9, %xmm10, %xmm10
vpclmulqdq $16, %xmm3, %xmm13, %xmm8
vpclmulqdq $16, %xmm3, %xmm10, %xmm9
vpshufd $0x4e, %xmm10, %xmm10
vpshufd $0x4e, %xmm13, %xmm13
vpxor %xmm9, %xmm10, %xmm10
vpxor %xmm8, %xmm13, %xmm13
vpclmulqdq $16, %xmm3, %xmm10, %xmm9
vpclmulqdq $16, %xmm3, %xmm13, %xmm8
vpshufd $0x4e, %xmm10, %xmm10
vpshufd $0x4e, %xmm13, %xmm13
vpxor %xmm11, %xmm12, %xmm12
vpxor %xmm8, %xmm13, %xmm13
vpxor %xmm12, %xmm10, %xmm10
vpxor %xmm14, %xmm13, %xmm2
vpxor %xmm9, %xmm10, %xmm1
vmovdqu %xmm1, 32(%rsp)
vmovdqu %xmm2, 48(%rsp)
# H ^ 5 and H ^ 6
vpclmulqdq $16, %xmm0, %xmm1, %xmm11
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm10
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm9
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm12
vpclmulqdq $0x00, %xmm1, %xmm1, %xmm13
vpclmulqdq $0x11, %xmm1, %xmm1, %xmm14
vpxor %xmm10, %xmm11, %xmm11
vpslldq $8, %xmm11, %xmm10
vpsrldq $8, %xmm11, %xmm11
vpxor %xmm9, %xmm10, %xmm10
vpclmulqdq $16, %xmm3, %xmm13, %xmm8
vpclmulqdq $16, %xmm3, %xmm10, %xmm9
vpshufd $0x4e, %xmm10, %xmm10
vpshufd $0x4e, %xmm13, %xmm13
vpxor %xmm9, %xmm10, %xmm10
vpxor %xmm8, %xmm13, %xmm13
vpclmulqdq $16, %xmm3, %xmm10, %xmm9
vpclmulqdq $16, %xmm3, %xmm13, %xmm8
vpshufd $0x4e, %xmm10, %xmm10
vpshufd $0x4e, %xmm13, %xmm13
vpxor %xmm11, %xmm12, %xmm12
vpxor %xmm8, %xmm13, %xmm13
vpxor %xmm12, %xmm10, %xmm10
vpxor %xmm14, %xmm13, %xmm0
vpxor %xmm9, %xmm10, %xmm7
vmovdqu %xmm7, 64(%rsp)
vmovdqu %xmm0, 80(%rsp)
# H ^ 7 and H ^ 8
vpclmulqdq $16, %xmm1, %xmm2, %xmm11
vpclmulqdq $0x01, %xmm1, %xmm2, %xmm10
vpclmulqdq $0x00, %xmm1, %xmm2, %xmm9
vpclmulqdq $0x11, %xmm1, %xmm2, %xmm12
vpclmulqdq $0x00, %xmm2, %xmm2, %xmm13
vpclmulqdq $0x11, %xmm2, %xmm2, %xmm14
vpxor %xmm10, %xmm11, %xmm11
vpslldq $8, %xmm11, %xmm10
vpsrldq $8, %xmm11, %xmm11
vpxor %xmm9, %xmm10, %xmm10
vpclmulqdq $16, %xmm3, %xmm13, %xmm8
vpclmulqdq $16, %xmm3, %xmm10, %xmm9
vpshufd $0x4e, %xmm10, %xmm10
vpshufd $0x4e, %xmm13, %xmm13
vpxor %xmm9, %xmm10, %xmm10
vpxor %xmm8, %xmm13, %xmm13
vpclmulqdq $16, %xmm3, %xmm10, %xmm9
vpclmulqdq $16, %xmm3, %xmm13, %xmm8
vpshufd $0x4e, %xmm10, %xmm10
vpshufd $0x4e, %xmm13, %xmm13
vpxor %xmm11, %xmm12, %xmm12
vpxor %xmm8, %xmm13, %xmm13
vpxor %xmm12, %xmm10, %xmm10
vpxor %xmm14, %xmm13, %xmm0
vpxor %xmm9, %xmm10, %xmm7
vmovdqu %xmm7, 96(%rsp)
vmovdqu %xmm0, 112(%rsp)
L_AES_GCM_decrypt_avx2_ghash_128:
# aesenc_128_ghash
leaq (%rdi,%rbx,1), %rcx
leaq (%r8,%rbx,1), %rdx
# aesenc_ctr
vmovdqu 128(%rsp), %xmm0
vmovdqu L_avx2_aes_gcm_bswap_epi64(%rip), %xmm1
vpaddd L_avx2_aes_gcm_one(%rip), %xmm0, %xmm9
vpshufb %xmm1, %xmm0, %xmm8
vpaddd L_avx2_aes_gcm_two(%rip), %xmm0, %xmm10
vpshufb %xmm1, %xmm9, %xmm9
vpaddd L_avx2_aes_gcm_three(%rip), %xmm0, %xmm11
vpshufb %xmm1, %xmm10, %xmm10
vpaddd L_avx2_aes_gcm_four(%rip), %xmm0, %xmm12
vpshufb %xmm1, %xmm11, %xmm11
vpaddd L_avx2_aes_gcm_five(%rip), %xmm0, %xmm13
vpshufb %xmm1, %xmm12, %xmm12
vpaddd L_avx2_aes_gcm_six(%rip), %xmm0, %xmm14
vpshufb %xmm1, %xmm13, %xmm13
vpaddd L_avx2_aes_gcm_seven(%rip), %xmm0, %xmm15
vpshufb %xmm1, %xmm14, %xmm14
vpaddd L_avx2_aes_gcm_eight(%rip), %xmm0, %xmm0
vpshufb %xmm1, %xmm15, %xmm15
# aesenc_xor
vmovdqu (%rsi), %xmm7
vmovdqu %xmm0, 128(%rsp)
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm7, %xmm9, %xmm9
vpxor %xmm7, %xmm10, %xmm10
vpxor %xmm7, %xmm11, %xmm11
vpxor %xmm7, %xmm12, %xmm12
vpxor %xmm7, %xmm13, %xmm13
vpxor %xmm7, %xmm14, %xmm14
vpxor %xmm7, %xmm15, %xmm15
# aesenc_pclmul_1
vmovdqu (%rcx), %xmm1
vmovdqu 16(%rsi), %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm1, %xmm1
vmovdqu 112(%rsp), %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpclmulqdq $16, %xmm2, %xmm1, %xmm5
vpclmulqdq $0x01, %xmm2, %xmm1, %xmm3
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm6
vpclmulqdq $0x11, %xmm2, %xmm1, %xmm7
vaesenc %xmm0, %xmm8, %xmm8
vaesenc %xmm0, %xmm9, %xmm9
vaesenc %xmm0, %xmm10, %xmm10
vaesenc %xmm0, %xmm11, %xmm11
vaesenc %xmm0, %xmm12, %xmm12
vaesenc %xmm0, %xmm13, %xmm13
vaesenc %xmm0, %xmm14, %xmm14
vaesenc %xmm0, %xmm15, %xmm15
# aesenc_pclmul_2
vmovdqu 16(%rcx), %xmm1
vmovdqu 96(%rsp), %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm1, %xmm1
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vmovdqu 32(%rsi), %xmm0
vpxor %xmm1, %xmm7, %xmm7
vaesenc %xmm0, %xmm8, %xmm8
vaesenc %xmm0, %xmm9, %xmm9
vaesenc %xmm0, %xmm10, %xmm10
vaesenc %xmm0, %xmm11, %xmm11
vaesenc %xmm0, %xmm12, %xmm12
vaesenc %xmm0, %xmm13, %xmm13
vaesenc %xmm0, %xmm14, %xmm14
vaesenc %xmm0, %xmm15, %xmm15
# aesenc_pclmul_n
vmovdqu 32(%rcx), %xmm1
vmovdqu 80(%rsp), %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm1, %xmm1
vpxor %xmm2, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpxor %xmm4, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vmovdqu 48(%rsi), %xmm0
vpxor %xmm1, %xmm7, %xmm7
vaesenc %xmm0, %xmm8, %xmm8
vaesenc %xmm0, %xmm9, %xmm9
vaesenc %xmm0, %xmm10, %xmm10
vaesenc %xmm0, %xmm11, %xmm11
vaesenc %xmm0, %xmm12, %xmm12
vaesenc %xmm0, %xmm13, %xmm13
vaesenc %xmm0, %xmm14, %xmm14
vaesenc %xmm0, %xmm15, %xmm15
# aesenc_pclmul_n
vmovdqu 48(%rcx), %xmm1
vmovdqu 64(%rsp), %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm1, %xmm1
vpxor %xmm2, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpxor %xmm4, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vmovdqu 64(%rsi), %xmm0
vpxor %xmm1, %xmm7, %xmm7
vaesenc %xmm0, %xmm8, %xmm8
vaesenc %xmm0, %xmm9, %xmm9
vaesenc %xmm0, %xmm10, %xmm10
vaesenc %xmm0, %xmm11, %xmm11
vaesenc %xmm0, %xmm12, %xmm12
vaesenc %xmm0, %xmm13, %xmm13
vaesenc %xmm0, %xmm14, %xmm14
vaesenc %xmm0, %xmm15, %xmm15
# aesenc_pclmul_n
vmovdqu 64(%rcx), %xmm1
vmovdqu 48(%rsp), %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm1, %xmm1
vpxor %xmm2, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpxor %xmm4, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vmovdqu 80(%rsi), %xmm0
vpxor %xmm1, %xmm7, %xmm7
vaesenc %xmm0, %xmm8, %xmm8
vaesenc %xmm0, %xmm9, %xmm9
vaesenc %xmm0, %xmm10, %xmm10
vaesenc %xmm0, %xmm11, %xmm11
vaesenc %xmm0, %xmm12, %xmm12
vaesenc %xmm0, %xmm13, %xmm13
vaesenc %xmm0, %xmm14, %xmm14
vaesenc %xmm0, %xmm15, %xmm15
# aesenc_pclmul_n
vmovdqu 80(%rcx), %xmm1
vmovdqu 32(%rsp), %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm1, %xmm1
vpxor %xmm2, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpxor %xmm4, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vmovdqu 96(%rsi), %xmm0
vpxor %xmm1, %xmm7, %xmm7
vaesenc %xmm0, %xmm8, %xmm8
vaesenc %xmm0, %xmm9, %xmm9
vaesenc %xmm0, %xmm10, %xmm10
vaesenc %xmm0, %xmm11, %xmm11
vaesenc %xmm0, %xmm12, %xmm12
vaesenc %xmm0, %xmm13, %xmm13
vaesenc %xmm0, %xmm14, %xmm14
vaesenc %xmm0, %xmm15, %xmm15
# aesenc_pclmul_n
vmovdqu 96(%rcx), %xmm1
vmovdqu 16(%rsp), %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm1, %xmm1
vpxor %xmm2, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpxor %xmm4, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vmovdqu 112(%rsi), %xmm0
vpxor %xmm1, %xmm7, %xmm7
vaesenc %xmm0, %xmm8, %xmm8
vaesenc %xmm0, %xmm9, %xmm9
vaesenc %xmm0, %xmm10, %xmm10
vaesenc %xmm0, %xmm11, %xmm11
vaesenc %xmm0, %xmm12, %xmm12
vaesenc %xmm0, %xmm13, %xmm13
vaesenc %xmm0, %xmm14, %xmm14
vaesenc %xmm0, %xmm15, %xmm15
# aesenc_pclmul_n
vmovdqu 112(%rcx), %xmm1
vmovdqu (%rsp), %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm1, %xmm1
vpxor %xmm2, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpxor %xmm4, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vmovdqu 128(%rsi), %xmm0
vpxor %xmm1, %xmm7, %xmm7
vaesenc %xmm0, %xmm8, %xmm8
vaesenc %xmm0, %xmm9, %xmm9
vaesenc %xmm0, %xmm10, %xmm10
vaesenc %xmm0, %xmm11, %xmm11
vaesenc %xmm0, %xmm12, %xmm12
vaesenc %xmm0, %xmm13, %xmm13
vaesenc %xmm0, %xmm14, %xmm14
vaesenc %xmm0, %xmm15, %xmm15
# aesenc_pclmul_l
vpxor %xmm2, %xmm5, %xmm5
vpxor %xmm4, %xmm6, %xmm6
vpxor %xmm3, %xmm5, %xmm5
vpslldq $8, %xmm5, %xmm1
vpsrldq $8, %xmm5, %xmm5
vmovdqu 144(%rsi), %xmm4
vmovdqu L_avx2_aes_gcm_mod2_128(%rip), %xmm0
vaesenc %xmm4, %xmm8, %xmm8
vpxor %xmm1, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
vpclmulqdq $16, %xmm0, %xmm6, %xmm3
vaesenc %xmm4, %xmm9, %xmm9
vaesenc %xmm4, %xmm10, %xmm10
vaesenc %xmm4, %xmm11, %xmm11
vpshufd $0x4e, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $16, %xmm0, %xmm6, %xmm3
vaesenc %xmm4, %xmm12, %xmm12
vaesenc %xmm4, %xmm13, %xmm13
vaesenc %xmm4, %xmm14, %xmm14
vpshufd $0x4e, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm7, %xmm6, %xmm6
vaesenc %xmm4, %xmm15, %xmm15
cmpl $11, %r9d
vmovdqu 160(%rsi), %xmm7
jl L_AES_GCM_decrypt_avx2_aesenc_128_ghash_avx_done
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqu 176(%rsi), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
cmpl $13, %r9d
vmovdqu 192(%rsi), %xmm7
jl L_AES_GCM_decrypt_avx2_aesenc_128_ghash_avx_done
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqu 208(%rsi), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqu 224(%rsi), %xmm7
L_AES_GCM_decrypt_avx2_aesenc_128_ghash_avx_done:
# aesenc_last
vaesenclast %xmm7, %xmm8, %xmm8
vaesenclast %xmm7, %xmm9, %xmm9
vaesenclast %xmm7, %xmm10, %xmm10
vaesenclast %xmm7, %xmm11, %xmm11
vmovdqu (%rcx), %xmm0
vmovdqu 16(%rcx), %xmm1
vmovdqu 32(%rcx), %xmm2
vmovdqu 48(%rcx), %xmm3
vpxor %xmm0, %xmm8, %xmm8
vpxor %xmm1, %xmm9, %xmm9
vpxor %xmm2, %xmm10, %xmm10
vpxor %xmm3, %xmm11, %xmm11
vmovdqu %xmm8, (%rdx)
vmovdqu %xmm9, 16(%rdx)
vmovdqu %xmm10, 32(%rdx)
vmovdqu %xmm11, 48(%rdx)
vaesenclast %xmm7, %xmm12, %xmm12
vaesenclast %xmm7, %xmm13, %xmm13
vaesenclast %xmm7, %xmm14, %xmm14
vaesenclast %xmm7, %xmm15, %xmm15
vmovdqu 64(%rcx), %xmm0
vmovdqu 80(%rcx), %xmm1
vmovdqu 96(%rcx), %xmm2
vmovdqu 112(%rcx), %xmm3
vpxor %xmm0, %xmm12, %xmm12
vpxor %xmm1, %xmm13, %xmm13
vpxor %xmm2, %xmm14, %xmm14
vpxor %xmm3, %xmm15, %xmm15
vmovdqu %xmm12, 64(%rdx)
vmovdqu %xmm13, 80(%rdx)
vmovdqu %xmm14, 96(%rdx)
vmovdqu %xmm15, 112(%rdx)
# aesenc_128_ghash - end
addl $0x80, %ebx
cmpl %r13d, %ebx
jl L_AES_GCM_decrypt_avx2_ghash_128
vmovdqu (%rsp), %xmm5
vmovdqu 128(%rsp), %xmm4
vmovdqu 144(%rsp), %xmm15
L_AES_GCM_decrypt_avx2_done_128:
cmpl %r10d, %ebx
jge L_AES_GCM_decrypt_avx2_done_dec
movl %r10d, %r13d
andl $0xfffffff0, %r13d
cmpl %r13d, %ebx
jge L_AES_GCM_decrypt_avx2_last_block_done
L_AES_GCM_decrypt_avx2_last_block_start:
vmovdqu (%rdi,%rbx,1), %xmm11
vpshufb L_avx2_aes_gcm_bswap_epi64(%rip), %xmm4, %xmm10
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm11, %xmm12
vpaddd L_avx2_aes_gcm_one(%rip), %xmm4, %xmm4
vpxor %xmm6, %xmm12, %xmm12
# aesenc_gfmul_sb
vpclmulqdq $0x01, %xmm5, %xmm12, %xmm2
vpclmulqdq $16, %xmm5, %xmm12, %xmm3
vpclmulqdq $0x00, %xmm5, %xmm12, %xmm1
vpclmulqdq $0x11, %xmm5, %xmm12, %xmm8
vpxor (%rsi), %xmm10, %xmm10
vaesenc 16(%rsi), %xmm10, %xmm10
vpxor %xmm2, %xmm3, %xmm3
vpslldq $8, %xmm3, %xmm2
vpsrldq $8, %xmm3, %xmm3
vaesenc 32(%rsi), %xmm10, %xmm10
vpxor %xmm1, %xmm2, %xmm2
vpclmulqdq $16, L_avx2_aes_gcm_mod2_128(%rip), %xmm2, %xmm1
vaesenc 48(%rsi), %xmm10, %xmm10
vaesenc 64(%rsi), %xmm10, %xmm10
vaesenc 80(%rsi), %xmm10, %xmm10
vpshufd $0x4e, %xmm2, %xmm2
vpxor %xmm1, %xmm2, %xmm2
vpclmulqdq $16, L_avx2_aes_gcm_mod2_128(%rip), %xmm2, %xmm1
vaesenc 96(%rsi), %xmm10, %xmm10
vaesenc 112(%rsi), %xmm10, %xmm10
vaesenc 128(%rsi), %xmm10, %xmm10
vpshufd $0x4e, %xmm2, %xmm2
vaesenc 144(%rsi), %xmm10, %xmm10
vpxor %xmm3, %xmm8, %xmm8
vpxor %xmm8, %xmm2, %xmm2
vmovdqu 160(%rsi), %xmm0
cmpl $11, %r9d
jl L_AES_GCM_decrypt_avx2_aesenc_gfmul_sb_last
vaesenc %xmm0, %xmm10, %xmm10
vaesenc 176(%rsi), %xmm10, %xmm10
vmovdqu 192(%rsi), %xmm0
cmpl $13, %r9d
jl L_AES_GCM_decrypt_avx2_aesenc_gfmul_sb_last
vaesenc %xmm0, %xmm10, %xmm10
vaesenc 208(%rsi), %xmm10, %xmm10
vmovdqu 224(%rsi), %xmm0
L_AES_GCM_decrypt_avx2_aesenc_gfmul_sb_last:
vaesenclast %xmm0, %xmm10, %xmm10
vpxor %xmm1, %xmm2, %xmm6
vpxor %xmm11, %xmm10, %xmm10
vmovdqu %xmm10, (%r8,%rbx,1)
addl $16, %ebx
cmpl %r13d, %ebx
jl L_AES_GCM_decrypt_avx2_last_block_start
L_AES_GCM_decrypt_avx2_last_block_done:
movl %r10d, %ecx
movl %r10d, %edx
andl $15, %ecx
jz L_AES_GCM_decrypt_avx2_done_dec
# aesenc_last15_dec
vpshufb L_avx2_aes_gcm_bswap_epi64(%rip), %xmm4, %xmm4
vpxor (%rsi), %xmm4, %xmm4
vaesenc 16(%rsi), %xmm4, %xmm4
vaesenc 32(%rsi), %xmm4, %xmm4
vaesenc 48(%rsi), %xmm4, %xmm4
vaesenc 64(%rsi), %xmm4, %xmm4
vaesenc 80(%rsi), %xmm4, %xmm4
vaesenc 96(%rsi), %xmm4, %xmm4
vaesenc 112(%rsi), %xmm4, %xmm4
vaesenc 128(%rsi), %xmm4, %xmm4
vaesenc 144(%rsi), %xmm4, %xmm4
cmpl $11, %r9d
vmovdqu 160(%rsi), %xmm1
jl L_AES_GCM_decrypt_avx2_aesenc_last15_dec_avx_aesenc_avx_last
vaesenc %xmm1, %xmm4, %xmm4
vaesenc 176(%rsi), %xmm4, %xmm4
cmpl $13, %r9d
vmovdqu 192(%rsi), %xmm1
jl L_AES_GCM_decrypt_avx2_aesenc_last15_dec_avx_aesenc_avx_last
vaesenc %xmm1, %xmm4, %xmm4
vaesenc 208(%rsi), %xmm4, %xmm4
vmovdqu 224(%rsi), %xmm1
L_AES_GCM_decrypt_avx2_aesenc_last15_dec_avx_aesenc_avx_last:
vaesenclast %xmm1, %xmm4, %xmm4
xorl %ecx, %ecx
vpxor %xmm0, %xmm0, %xmm0
vmovdqu %xmm4, (%rsp)
vmovdqu %xmm0, 16(%rsp)
L_AES_GCM_decrypt_avx2_aesenc_last15_dec_avx_loop:
movzbl (%rdi,%rbx,1), %r13d
movb %r13b, 16(%rsp,%rcx,1)
xorb (%rsp,%rcx,1), %r13b
movb %r13b, (%r8,%rbx,1)
incl %ebx
incl %ecx
cmpl %edx, %ebx
jl L_AES_GCM_decrypt_avx2_aesenc_last15_dec_avx_loop
vmovdqu 16(%rsp), %xmm4
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm4, %xmm4
vpxor %xmm4, %xmm6, %xmm6
# ghash_gfmul_red
vpclmulqdq $16, %xmm5, %xmm6, %xmm2
vpclmulqdq $0x01, %xmm5, %xmm6, %xmm1
vpclmulqdq $0x00, %xmm5, %xmm6, %xmm0
vpxor %xmm1, %xmm2, %xmm2
vpslldq $8, %xmm2, %xmm1
vpsrldq $8, %xmm2, %xmm2
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $0x11, %xmm5, %xmm6, %xmm6
vpclmulqdq $16, L_avx2_aes_gcm_mod2_128(%rip), %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $16, L_avx2_aes_gcm_mod2_128(%rip), %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm1, %xmm6, %xmm6
vpxor %xmm0, %xmm6, %xmm6
L_AES_GCM_decrypt_avx2_done_dec:
# calc_tag
shlq $3, %r10
shlq $3, %r11
vmovq %r10, %xmm0
vmovq %r11, %xmm1
vpunpcklqdq %xmm1, %xmm0, %xmm0
vpxor %xmm6, %xmm0, %xmm0
# ghash_gfmul_red
vpclmulqdq $16, %xmm5, %xmm0, %xmm4
vpclmulqdq $0x01, %xmm5, %xmm0, %xmm3
vpclmulqdq $0x00, %xmm5, %xmm0, %xmm2
vpxor %xmm3, %xmm4, %xmm4
vpslldq $8, %xmm4, %xmm3
vpsrldq $8, %xmm4, %xmm4
vpxor %xmm2, %xmm3, %xmm3
vpclmulqdq $0x11, %xmm5, %xmm0, %xmm0
vpclmulqdq $16, L_avx2_aes_gcm_mod2_128(%rip), %xmm3, %xmm2
vpshufd $0x4e, %xmm3, %xmm3
vpxor %xmm2, %xmm3, %xmm3
vpclmulqdq $16, L_avx2_aes_gcm_mod2_128(%rip), %xmm3, %xmm2
vpshufd $0x4e, %xmm3, %xmm3
vpxor %xmm4, %xmm0, %xmm0
vpxor %xmm3, %xmm0, %xmm0
vpxor %xmm2, %xmm0, %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vpxor %xmm15, %xmm0, %xmm0
# cmp_tag
cmpl $16, %r15d
je L_AES_GCM_decrypt_avx2_cmp_tag_16
xorq %rdx, %rdx
xorq %rax, %rax
vmovdqu %xmm0, (%rsp)
L_AES_GCM_decrypt_avx2_cmp_tag_loop:
movzbl (%rsp,%rdx,1), %r13d
xorb (%r14,%rdx,1), %r13b
orb %r13b, %al
incl %edx
cmpl %r15d, %edx
jne L_AES_GCM_decrypt_avx2_cmp_tag_loop
cmpb $0x00, %al
sete %al
jmp L_AES_GCM_decrypt_avx2_cmp_tag_done
L_AES_GCM_decrypt_avx2_cmp_tag_16:
vmovdqu (%r14), %xmm1
vpcmpeqb %xmm1, %xmm0, %xmm0
vpmovmskb %xmm0, %rdx
# %%edx == 0xFFFF then return 1 else => return 0
xorl %eax, %eax
cmpl $0xffff, %edx
sete %al
L_AES_GCM_decrypt_avx2_cmp_tag_done:
movl %eax, (%rbp)
vzeroupper
addq $0xa8, %rsp
popq %rbp
popq %r15
popq %rbx
popq %r14
popq %r12
popq %r13
repz retq
#ifndef __APPLE__
.size AES_GCM_decrypt_avx2,.-AES_GCM_decrypt_avx2
#endif /* __APPLE__ */
#ifdef WOLFSSL_AESGCM_STREAM
#ifndef __APPLE__
.text
.globl AES_GCM_init_avx2
.type AES_GCM_init_avx2,@function
.align 16
AES_GCM_init_avx2:
#else
.section __TEXT,__text
.globl _AES_GCM_init_avx2
.p2align 4
_AES_GCM_init_avx2:
#endif /* __APPLE__ */
pushq %rbx
pushq %r12
movq %rdx, %r10
movl %ecx, %r11d
movq 24(%rsp), %rax
subq $16, %rsp
vpxor %xmm4, %xmm4, %xmm4
movl %r11d, %edx
cmpl $12, %edx
je L_AES_GCM_init_avx2_iv_12
# Calculate values when IV is not 12 bytes
# H = Encrypt X(=0)
vmovdqu (%rdi), %xmm5
vaesenc 16(%rdi), %xmm5, %xmm5
vaesenc 32(%rdi), %xmm5, %xmm5
vaesenc 48(%rdi), %xmm5, %xmm5
vaesenc 64(%rdi), %xmm5, %xmm5
vaesenc 80(%rdi), %xmm5, %xmm5
vaesenc 96(%rdi), %xmm5, %xmm5
vaesenc 112(%rdi), %xmm5, %xmm5
vaesenc 128(%rdi), %xmm5, %xmm5
vaesenc 144(%rdi), %xmm5, %xmm5
cmpl $11, %esi
vmovdqu 160(%rdi), %xmm0
jl L_AES_GCM_init_avx2_calc_iv_1_aesenc_avx_last
vaesenc %xmm0, %xmm5, %xmm5
vaesenc 176(%rdi), %xmm5, %xmm5
cmpl $13, %esi
vmovdqu 192(%rdi), %xmm0
jl L_AES_GCM_init_avx2_calc_iv_1_aesenc_avx_last
vaesenc %xmm0, %xmm5, %xmm5
vaesenc 208(%rdi), %xmm5, %xmm5
vmovdqu 224(%rdi), %xmm0
L_AES_GCM_init_avx2_calc_iv_1_aesenc_avx_last:
vaesenclast %xmm0, %xmm5, %xmm5
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm5, %xmm5
# Calc counter
# Initialization vector
cmpl $0x00, %edx
movq $0x00, %rcx
je L_AES_GCM_init_avx2_calc_iv_done
cmpl $16, %edx
jl L_AES_GCM_init_avx2_calc_iv_lt16
andl $0xfffffff0, %edx
L_AES_GCM_init_avx2_calc_iv_16_loop:
vmovdqu (%r10,%rcx,1), %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vpxor %xmm0, %xmm4, %xmm4
# ghash_gfmul_avx
vpclmulqdq $16, %xmm4, %xmm5, %xmm2
vpclmulqdq $0x01, %xmm4, %xmm5, %xmm1
vpclmulqdq $0x00, %xmm4, %xmm5, %xmm0
vpclmulqdq $0x11, %xmm4, %xmm5, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpslldq $8, %xmm2, %xmm1
vpsrldq $8, %xmm2, %xmm2
vpxor %xmm1, %xmm0, %xmm6
vpxor %xmm2, %xmm3, %xmm4
# ghash_mid
vpsrld $31, %xmm6, %xmm0
vpsrld $31, %xmm4, %xmm1
vpslld $0x01, %xmm6, %xmm6
vpslld $0x01, %xmm4, %xmm4
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm4, %xmm4
vpor %xmm0, %xmm6, %xmm6
vpor %xmm1, %xmm4, %xmm4
# ghash_red
vmovdqu L_avx2_aes_gcm_mod2_128(%rip), %xmm2
vpclmulqdq $16, %xmm2, %xmm6, %xmm0
vpshufd $0x4e, %xmm6, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $16, %xmm2, %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm4, %xmm4
addl $16, %ecx
cmpl %edx, %ecx
jl L_AES_GCM_init_avx2_calc_iv_16_loop
movl %r11d, %edx
cmpl %edx, %ecx
je L_AES_GCM_init_avx2_calc_iv_done
L_AES_GCM_init_avx2_calc_iv_lt16:
vpxor %xmm0, %xmm0, %xmm0
xorl %ebx, %ebx
vmovdqu %xmm0, (%rsp)
L_AES_GCM_init_avx2_calc_iv_loop:
movzbl (%r10,%rcx,1), %r12d
movb %r12b, (%rsp,%rbx,1)
incl %ecx
incl %ebx
cmpl %edx, %ecx
jl L_AES_GCM_init_avx2_calc_iv_loop
vmovdqu (%rsp), %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vpxor %xmm0, %xmm4, %xmm4
# ghash_gfmul_avx
vpclmulqdq $16, %xmm4, %xmm5, %xmm2
vpclmulqdq $0x01, %xmm4, %xmm5, %xmm1
vpclmulqdq $0x00, %xmm4, %xmm5, %xmm0
vpclmulqdq $0x11, %xmm4, %xmm5, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpslldq $8, %xmm2, %xmm1
vpsrldq $8, %xmm2, %xmm2
vpxor %xmm1, %xmm0, %xmm6
vpxor %xmm2, %xmm3, %xmm4
# ghash_mid
vpsrld $31, %xmm6, %xmm0
vpsrld $31, %xmm4, %xmm1
vpslld $0x01, %xmm6, %xmm6
vpslld $0x01, %xmm4, %xmm4
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm4, %xmm4
vpor %xmm0, %xmm6, %xmm6
vpor %xmm1, %xmm4, %xmm4
# ghash_red
vmovdqu L_avx2_aes_gcm_mod2_128(%rip), %xmm2
vpclmulqdq $16, %xmm2, %xmm6, %xmm0
vpshufd $0x4e, %xmm6, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $16, %xmm2, %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm4, %xmm4
L_AES_GCM_init_avx2_calc_iv_done:
# T = Encrypt counter
vpxor %xmm0, %xmm0, %xmm0
shll $3, %edx
vmovq %rdx, %xmm0
vpxor %xmm0, %xmm4, %xmm4
# ghash_gfmul_avx
vpclmulqdq $16, %xmm4, %xmm5, %xmm2
vpclmulqdq $0x01, %xmm4, %xmm5, %xmm1
vpclmulqdq $0x00, %xmm4, %xmm5, %xmm0
vpclmulqdq $0x11, %xmm4, %xmm5, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpslldq $8, %xmm2, %xmm1
vpsrldq $8, %xmm2, %xmm2
vpxor %xmm1, %xmm0, %xmm6
vpxor %xmm2, %xmm3, %xmm4
# ghash_mid
vpsrld $31, %xmm6, %xmm0
vpsrld $31, %xmm4, %xmm1
vpslld $0x01, %xmm6, %xmm6
vpslld $0x01, %xmm4, %xmm4
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm4, %xmm4
vpor %xmm0, %xmm6, %xmm6
vpor %xmm1, %xmm4, %xmm4
# ghash_red
vmovdqu L_avx2_aes_gcm_mod2_128(%rip), %xmm2
vpclmulqdq $16, %xmm2, %xmm6, %xmm0
vpshufd $0x4e, %xmm6, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $16, %xmm2, %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm4, %xmm4
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm4, %xmm4
# Encrypt counter
vmovdqu (%rdi), %xmm7
vpxor %xmm4, %xmm7, %xmm7
vaesenc 16(%rdi), %xmm7, %xmm7
vaesenc 32(%rdi), %xmm7, %xmm7
vaesenc 48(%rdi), %xmm7, %xmm7
vaesenc 64(%rdi), %xmm7, %xmm7
vaesenc 80(%rdi), %xmm7, %xmm7
vaesenc 96(%rdi), %xmm7, %xmm7
vaesenc 112(%rdi), %xmm7, %xmm7
vaesenc 128(%rdi), %xmm7, %xmm7
vaesenc 144(%rdi), %xmm7, %xmm7
cmpl $11, %esi
vmovdqu 160(%rdi), %xmm0
jl L_AES_GCM_init_avx2_calc_iv_2_aesenc_avx_last
vaesenc %xmm0, %xmm7, %xmm7
vaesenc 176(%rdi), %xmm7, %xmm7
cmpl $13, %esi
vmovdqu 192(%rdi), %xmm0
jl L_AES_GCM_init_avx2_calc_iv_2_aesenc_avx_last
vaesenc %xmm0, %xmm7, %xmm7
vaesenc 208(%rdi), %xmm7, %xmm7
vmovdqu 224(%rdi), %xmm0
L_AES_GCM_init_avx2_calc_iv_2_aesenc_avx_last:
vaesenclast %xmm0, %xmm7, %xmm7
jmp L_AES_GCM_init_avx2_iv_done
L_AES_GCM_init_avx2_iv_12:
# # Calculate values when IV is 12 bytes
# Set counter based on IV
vmovdqu L_avx2_aes_gcm_bswap_one(%rip), %xmm4
vmovdqu (%rdi), %xmm5
vpblendd $7, (%r10), %xmm4, %xmm4
# H = Encrypt X(=0) and T = Encrypt counter
vmovdqu 16(%rdi), %xmm6
vpxor %xmm5, %xmm4, %xmm7
vaesenc %xmm6, %xmm5, %xmm5
vaesenc %xmm6, %xmm7, %xmm7
vmovdqu 32(%rdi), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm7, %xmm7
vmovdqu 48(%rdi), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm7, %xmm7
vmovdqu 64(%rdi), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm7, %xmm7
vmovdqu 80(%rdi), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm7, %xmm7
vmovdqu 96(%rdi), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm7, %xmm7
vmovdqu 112(%rdi), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm7, %xmm7
vmovdqu 128(%rdi), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm7, %xmm7
vmovdqu 144(%rdi), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm7, %xmm7
cmpl $11, %esi
vmovdqu 160(%rdi), %xmm0
jl L_AES_GCM_init_avx2_calc_iv_12_last
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm7, %xmm7
vmovdqu 176(%rdi), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm7, %xmm7
cmpl $13, %esi
vmovdqu 192(%rdi), %xmm0
jl L_AES_GCM_init_avx2_calc_iv_12_last
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm7, %xmm7
vmovdqu 208(%rdi), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm7, %xmm7
vmovdqu 224(%rdi), %xmm0
L_AES_GCM_init_avx2_calc_iv_12_last:
vaesenclast %xmm0, %xmm5, %xmm5
vaesenclast %xmm0, %xmm7, %xmm7
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm5, %xmm5
L_AES_GCM_init_avx2_iv_done:
vmovdqu %xmm7, (%rax)
vpshufb L_avx2_aes_gcm_bswap_epi64(%rip), %xmm4, %xmm4
vpaddd L_avx2_aes_gcm_one(%rip), %xmm4, %xmm4
vmovdqu %xmm5, (%r8)
vmovdqu %xmm4, (%r9)
vzeroupper
addq $16, %rsp
popq %r12
popq %rbx
repz retq
#ifndef __APPLE__
.size AES_GCM_init_avx2,.-AES_GCM_init_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl AES_GCM_aad_update_avx2
.type AES_GCM_aad_update_avx2,@function
.align 16
AES_GCM_aad_update_avx2:
#else
.section __TEXT,__text
.globl _AES_GCM_aad_update_avx2
.p2align 4
_AES_GCM_aad_update_avx2:
#endif /* __APPLE__ */
movq %rcx, %rax
vmovdqu (%rdx), %xmm4
vmovdqu (%rax), %xmm5
xorl %ecx, %ecx
L_AES_GCM_aad_update_avx2_16_loop:
vmovdqu (%rdi,%rcx,1), %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vpxor %xmm0, %xmm4, %xmm4
# ghash_gfmul_avx
vpclmulqdq $16, %xmm4, %xmm5, %xmm2
vpclmulqdq $0x01, %xmm4, %xmm5, %xmm1
vpclmulqdq $0x00, %xmm4, %xmm5, %xmm0
vpclmulqdq $0x11, %xmm4, %xmm5, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpslldq $8, %xmm2, %xmm1
vpsrldq $8, %xmm2, %xmm2
vpxor %xmm1, %xmm0, %xmm6
vpxor %xmm2, %xmm3, %xmm4
# ghash_mid
vpsrld $31, %xmm6, %xmm0
vpsrld $31, %xmm4, %xmm1
vpslld $0x01, %xmm6, %xmm6
vpslld $0x01, %xmm4, %xmm4
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm4, %xmm4
vpor %xmm0, %xmm6, %xmm6
vpor %xmm1, %xmm4, %xmm4
# ghash_red
vmovdqu L_avx2_aes_gcm_mod2_128(%rip), %xmm2
vpclmulqdq $16, %xmm2, %xmm6, %xmm0
vpshufd $0x4e, %xmm6, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $16, %xmm2, %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm4, %xmm4
addl $16, %ecx
cmpl %esi, %ecx
jl L_AES_GCM_aad_update_avx2_16_loop
vmovdqu %xmm4, (%rdx)
vzeroupper
repz retq
#ifndef __APPLE__
.size AES_GCM_aad_update_avx2,.-AES_GCM_aad_update_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl AES_GCM_encrypt_block_avx2
.type AES_GCM_encrypt_block_avx2,@function
.align 16
AES_GCM_encrypt_block_avx2:
#else
.section __TEXT,__text
.globl _AES_GCM_encrypt_block_avx2
.p2align 4
_AES_GCM_encrypt_block_avx2:
#endif /* __APPLE__ */
movq %rdx, %r10
movq %rcx, %r11
subq $0x98, %rsp
vmovdqu (%r8), %xmm3
# aesenc_block
vmovdqu %xmm3, %xmm1
vpshufb L_avx2_aes_gcm_bswap_epi64(%rip), %xmm1, %xmm0
vpaddd L_avx2_aes_gcm_one(%rip), %xmm1, %xmm1
vpxor (%rdi), %xmm0, %xmm0
vmovdqu 16(%rdi), %xmm2
vaesenc %xmm2, %xmm0, %xmm0
vmovdqu 32(%rdi), %xmm2
vaesenc %xmm2, %xmm0, %xmm0
vmovdqu 48(%rdi), %xmm2
vaesenc %xmm2, %xmm0, %xmm0
vmovdqu 64(%rdi), %xmm2
vaesenc %xmm2, %xmm0, %xmm0
vmovdqu 80(%rdi), %xmm2
vaesenc %xmm2, %xmm0, %xmm0
vmovdqu 96(%rdi), %xmm2
vaesenc %xmm2, %xmm0, %xmm0
vmovdqu 112(%rdi), %xmm2
vaesenc %xmm2, %xmm0, %xmm0
vmovdqu 128(%rdi), %xmm2
vaesenc %xmm2, %xmm0, %xmm0
vmovdqu 144(%rdi), %xmm2
vaesenc %xmm2, %xmm0, %xmm0
vmovdqu %xmm1, %xmm3
cmpl $11, %esi
vmovdqu 160(%rdi), %xmm1
jl L_AES_GCM_encrypt_block_avx2_aesenc_block_last
vaesenc %xmm1, %xmm0, %xmm0
vmovdqu 176(%rdi), %xmm2
vaesenc %xmm2, %xmm0, %xmm0
cmpl $13, %esi
vmovdqu 192(%rdi), %xmm1
jl L_AES_GCM_encrypt_block_avx2_aesenc_block_last
vaesenc %xmm1, %xmm0, %xmm0
vmovdqu 208(%rdi), %xmm2
vaesenc %xmm2, %xmm0, %xmm0
vmovdqu 224(%rdi), %xmm1
L_AES_GCM_encrypt_block_avx2_aesenc_block_last:
vaesenclast %xmm1, %xmm0, %xmm0
vmovdqu (%r11), %xmm1
vpxor %xmm1, %xmm0, %xmm0
vmovdqu %xmm0, (%r10)
vmovdqu %xmm3, (%r8)
vzeroupper
addq $0x98, %rsp
repz retq
#ifndef __APPLE__
.size AES_GCM_encrypt_block_avx2,.-AES_GCM_encrypt_block_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl AES_GCM_ghash_block_avx2
.type AES_GCM_ghash_block_avx2,@function
.align 16
AES_GCM_ghash_block_avx2:
#else
.section __TEXT,__text
.globl _AES_GCM_ghash_block_avx2
.p2align 4
_AES_GCM_ghash_block_avx2:
#endif /* __APPLE__ */
vmovdqu (%rsi), %xmm4
vmovdqu (%rdx), %xmm5
vmovdqu (%rdi), %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vpxor %xmm0, %xmm4, %xmm4
# ghash_gfmul_avx
vpclmulqdq $16, %xmm4, %xmm5, %xmm2
vpclmulqdq $0x01, %xmm4, %xmm5, %xmm1
vpclmulqdq $0x00, %xmm4, %xmm5, %xmm0
vpclmulqdq $0x11, %xmm4, %xmm5, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpslldq $8, %xmm2, %xmm1
vpsrldq $8, %xmm2, %xmm2
vpxor %xmm1, %xmm0, %xmm6
vpxor %xmm2, %xmm3, %xmm4
# ghash_mid
vpsrld $31, %xmm6, %xmm0
vpsrld $31, %xmm4, %xmm1
vpslld $0x01, %xmm6, %xmm6
vpslld $0x01, %xmm4, %xmm4
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm4, %xmm4
vpor %xmm0, %xmm6, %xmm6
vpor %xmm1, %xmm4, %xmm4
# ghash_red
vmovdqu L_avx2_aes_gcm_mod2_128(%rip), %xmm2
vpclmulqdq $16, %xmm2, %xmm6, %xmm0
vpshufd $0x4e, %xmm6, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $16, %xmm2, %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm4, %xmm4
vmovdqu %xmm4, (%rsi)
vzeroupper
repz retq
#ifndef __APPLE__
.size AES_GCM_ghash_block_avx2,.-AES_GCM_ghash_block_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl AES_GCM_encrypt_update_avx2
.type AES_GCM_encrypt_update_avx2,@function
.align 16
AES_GCM_encrypt_update_avx2:
#else
.section __TEXT,__text
.globl _AES_GCM_encrypt_update_avx2
.p2align 4
_AES_GCM_encrypt_update_avx2:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
movq %rdx, %r10
movq %rcx, %r11
movq 32(%rsp), %rax
movq 40(%rsp), %r12
subq $0x98, %rsp
vmovdqu (%r9), %xmm6
vmovdqu (%rax), %xmm5
vmovdqu (%r12), %xmm4
vpsrlq $63, %xmm5, %xmm1
vpsllq $0x01, %xmm5, %xmm0
vpslldq $8, %xmm1, %xmm1
vpor %xmm1, %xmm0, %xmm0
vpshufd $0xff, %xmm5, %xmm5
vpsrad $31, %xmm5, %xmm5
vpand L_avx2_aes_gcm_mod2_128(%rip), %xmm5, %xmm5
vpxor %xmm0, %xmm5, %xmm5
xorl %r14d, %r14d
cmpl $0x80, %r8d
movl %r8d, %r13d
jl L_AES_GCM_encrypt_update_avx2_done_128
andl $0xffffff80, %r13d
vmovdqu %xmm4, 128(%rsp)
vmovdqu L_avx2_aes_gcm_mod2_128(%rip), %xmm3
# H ^ 1 and H ^ 2
vpclmulqdq $0x00, %xmm5, %xmm5, %xmm9
vpclmulqdq $0x11, %xmm5, %xmm5, %xmm10
vpclmulqdq $16, %xmm3, %xmm9, %xmm8
vpshufd $0x4e, %xmm9, %xmm9
vpxor %xmm8, %xmm9, %xmm9
vpclmulqdq $16, %xmm3, %xmm9, %xmm8
vpshufd $0x4e, %xmm9, %xmm9
vpxor %xmm8, %xmm9, %xmm9
vpxor %xmm9, %xmm10, %xmm0
vmovdqu %xmm5, (%rsp)
vmovdqu %xmm0, 16(%rsp)
# H ^ 3 and H ^ 4
vpclmulqdq $16, %xmm5, %xmm0, %xmm11
vpclmulqdq $0x01, %xmm5, %xmm0, %xmm10
vpclmulqdq $0x00, %xmm5, %xmm0, %xmm9
vpclmulqdq $0x11, %xmm5, %xmm0, %xmm12
vpclmulqdq $0x00, %xmm0, %xmm0, %xmm13
vpclmulqdq $0x11, %xmm0, %xmm0, %xmm14
vpxor %xmm10, %xmm11, %xmm11
vpslldq $8, %xmm11, %xmm10
vpsrldq $8, %xmm11, %xmm11
vpxor %xmm9, %xmm10, %xmm10
vpclmulqdq $16, %xmm3, %xmm13, %xmm8
vpclmulqdq $16, %xmm3, %xmm10, %xmm9
vpshufd $0x4e, %xmm10, %xmm10
vpshufd $0x4e, %xmm13, %xmm13
vpxor %xmm9, %xmm10, %xmm10
vpxor %xmm8, %xmm13, %xmm13
vpclmulqdq $16, %xmm3, %xmm10, %xmm9
vpclmulqdq $16, %xmm3, %xmm13, %xmm8
vpshufd $0x4e, %xmm10, %xmm10
vpshufd $0x4e, %xmm13, %xmm13
vpxor %xmm11, %xmm12, %xmm12
vpxor %xmm8, %xmm13, %xmm13
vpxor %xmm12, %xmm10, %xmm10
vpxor %xmm14, %xmm13, %xmm2
vpxor %xmm9, %xmm10, %xmm1
vmovdqu %xmm1, 32(%rsp)
vmovdqu %xmm2, 48(%rsp)
# H ^ 5 and H ^ 6
vpclmulqdq $16, %xmm0, %xmm1, %xmm11
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm10
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm9
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm12
vpclmulqdq $0x00, %xmm1, %xmm1, %xmm13
vpclmulqdq $0x11, %xmm1, %xmm1, %xmm14
vpxor %xmm10, %xmm11, %xmm11
vpslldq $8, %xmm11, %xmm10
vpsrldq $8, %xmm11, %xmm11
vpxor %xmm9, %xmm10, %xmm10
vpclmulqdq $16, %xmm3, %xmm13, %xmm8
vpclmulqdq $16, %xmm3, %xmm10, %xmm9
vpshufd $0x4e, %xmm10, %xmm10
vpshufd $0x4e, %xmm13, %xmm13
vpxor %xmm9, %xmm10, %xmm10
vpxor %xmm8, %xmm13, %xmm13
vpclmulqdq $16, %xmm3, %xmm10, %xmm9
vpclmulqdq $16, %xmm3, %xmm13, %xmm8
vpshufd $0x4e, %xmm10, %xmm10
vpshufd $0x4e, %xmm13, %xmm13
vpxor %xmm11, %xmm12, %xmm12
vpxor %xmm8, %xmm13, %xmm13
vpxor %xmm12, %xmm10, %xmm10
vpxor %xmm14, %xmm13, %xmm0
vpxor %xmm9, %xmm10, %xmm7
vmovdqu %xmm7, 64(%rsp)
vmovdqu %xmm0, 80(%rsp)
# H ^ 7 and H ^ 8
vpclmulqdq $16, %xmm1, %xmm2, %xmm11
vpclmulqdq $0x01, %xmm1, %xmm2, %xmm10
vpclmulqdq $0x00, %xmm1, %xmm2, %xmm9
vpclmulqdq $0x11, %xmm1, %xmm2, %xmm12
vpclmulqdq $0x00, %xmm2, %xmm2, %xmm13
vpclmulqdq $0x11, %xmm2, %xmm2, %xmm14
vpxor %xmm10, %xmm11, %xmm11
vpslldq $8, %xmm11, %xmm10
vpsrldq $8, %xmm11, %xmm11
vpxor %xmm9, %xmm10, %xmm10
vpclmulqdq $16, %xmm3, %xmm13, %xmm8
vpclmulqdq $16, %xmm3, %xmm10, %xmm9
vpshufd $0x4e, %xmm10, %xmm10
vpshufd $0x4e, %xmm13, %xmm13
vpxor %xmm9, %xmm10, %xmm10
vpxor %xmm8, %xmm13, %xmm13
vpclmulqdq $16, %xmm3, %xmm10, %xmm9
vpclmulqdq $16, %xmm3, %xmm13, %xmm8
vpshufd $0x4e, %xmm10, %xmm10
vpshufd $0x4e, %xmm13, %xmm13
vpxor %xmm11, %xmm12, %xmm12
vpxor %xmm8, %xmm13, %xmm13
vpxor %xmm12, %xmm10, %xmm10
vpxor %xmm14, %xmm13, %xmm0
vpxor %xmm9, %xmm10, %xmm7
vmovdqu %xmm7, 96(%rsp)
vmovdqu %xmm0, 112(%rsp)
# First 128 bytes of input
# aesenc_128
# aesenc_ctr
vmovdqu 128(%rsp), %xmm0
vmovdqu L_avx2_aes_gcm_bswap_epi64(%rip), %xmm1
vpaddd L_avx2_aes_gcm_one(%rip), %xmm0, %xmm9
vpshufb %xmm1, %xmm0, %xmm8
vpaddd L_avx2_aes_gcm_two(%rip), %xmm0, %xmm10
vpshufb %xmm1, %xmm9, %xmm9
vpaddd L_avx2_aes_gcm_three(%rip), %xmm0, %xmm11
vpshufb %xmm1, %xmm10, %xmm10
vpaddd L_avx2_aes_gcm_four(%rip), %xmm0, %xmm12
vpshufb %xmm1, %xmm11, %xmm11
vpaddd L_avx2_aes_gcm_five(%rip), %xmm0, %xmm13
vpshufb %xmm1, %xmm12, %xmm12
vpaddd L_avx2_aes_gcm_six(%rip), %xmm0, %xmm14
vpshufb %xmm1, %xmm13, %xmm13
vpaddd L_avx2_aes_gcm_seven(%rip), %xmm0, %xmm15
vpshufb %xmm1, %xmm14, %xmm14
vpaddd L_avx2_aes_gcm_eight(%rip), %xmm0, %xmm0
vpshufb %xmm1, %xmm15, %xmm15
# aesenc_xor
vmovdqu (%rdi), %xmm7
vmovdqu %xmm0, 128(%rsp)
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm7, %xmm9, %xmm9
vpxor %xmm7, %xmm10, %xmm10
vpxor %xmm7, %xmm11, %xmm11
vpxor %xmm7, %xmm12, %xmm12
vpxor %xmm7, %xmm13, %xmm13
vpxor %xmm7, %xmm14, %xmm14
vpxor %xmm7, %xmm15, %xmm15
vmovdqu 16(%rdi), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqu 32(%rdi), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqu 48(%rdi), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqu 64(%rdi), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqu 80(%rdi), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqu 96(%rdi), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqu 112(%rdi), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqu 128(%rdi), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqu 144(%rdi), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
cmpl $11, %esi
vmovdqu 160(%rdi), %xmm7
jl L_AES_GCM_encrypt_update_avx2_aesenc_128_enc_done
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqu 176(%rdi), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
cmpl $13, %esi
vmovdqu 192(%rdi), %xmm7
jl L_AES_GCM_encrypt_update_avx2_aesenc_128_enc_done
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqu 208(%rdi), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqu 224(%rdi), %xmm7
L_AES_GCM_encrypt_update_avx2_aesenc_128_enc_done:
# aesenc_last
vaesenclast %xmm7, %xmm8, %xmm8
vaesenclast %xmm7, %xmm9, %xmm9
vaesenclast %xmm7, %xmm10, %xmm10
vaesenclast %xmm7, %xmm11, %xmm11
vmovdqu (%r11), %xmm0
vmovdqu 16(%r11), %xmm1
vmovdqu 32(%r11), %xmm2
vmovdqu 48(%r11), %xmm3
vpxor %xmm0, %xmm8, %xmm8
vpxor %xmm1, %xmm9, %xmm9
vpxor %xmm2, %xmm10, %xmm10
vpxor %xmm3, %xmm11, %xmm11
vmovdqu %xmm8, (%r10)
vmovdqu %xmm9, 16(%r10)
vmovdqu %xmm10, 32(%r10)
vmovdqu %xmm11, 48(%r10)
vaesenclast %xmm7, %xmm12, %xmm12
vaesenclast %xmm7, %xmm13, %xmm13
vaesenclast %xmm7, %xmm14, %xmm14
vaesenclast %xmm7, %xmm15, %xmm15
vmovdqu 64(%r11), %xmm0
vmovdqu 80(%r11), %xmm1
vmovdqu 96(%r11), %xmm2
vmovdqu 112(%r11), %xmm3
vpxor %xmm0, %xmm12, %xmm12
vpxor %xmm1, %xmm13, %xmm13
vpxor %xmm2, %xmm14, %xmm14
vpxor %xmm3, %xmm15, %xmm15
vmovdqu %xmm12, 64(%r10)
vmovdqu %xmm13, 80(%r10)
vmovdqu %xmm14, 96(%r10)
vmovdqu %xmm15, 112(%r10)
cmpl $0x80, %r13d
movl $0x80, %r14d
jle L_AES_GCM_encrypt_update_avx2_end_128
# More 128 bytes of input
L_AES_GCM_encrypt_update_avx2_ghash_128:
# aesenc_128_ghash
leaq (%r11,%r14,1), %rcx
leaq (%r10,%r14,1), %rdx
# aesenc_ctr
vmovdqu 128(%rsp), %xmm0
vmovdqu L_avx2_aes_gcm_bswap_epi64(%rip), %xmm1
vpaddd L_avx2_aes_gcm_one(%rip), %xmm0, %xmm9
vpshufb %xmm1, %xmm0, %xmm8
vpaddd L_avx2_aes_gcm_two(%rip), %xmm0, %xmm10
vpshufb %xmm1, %xmm9, %xmm9
vpaddd L_avx2_aes_gcm_three(%rip), %xmm0, %xmm11
vpshufb %xmm1, %xmm10, %xmm10
vpaddd L_avx2_aes_gcm_four(%rip), %xmm0, %xmm12
vpshufb %xmm1, %xmm11, %xmm11
vpaddd L_avx2_aes_gcm_five(%rip), %xmm0, %xmm13
vpshufb %xmm1, %xmm12, %xmm12
vpaddd L_avx2_aes_gcm_six(%rip), %xmm0, %xmm14
vpshufb %xmm1, %xmm13, %xmm13
vpaddd L_avx2_aes_gcm_seven(%rip), %xmm0, %xmm15
vpshufb %xmm1, %xmm14, %xmm14
vpaddd L_avx2_aes_gcm_eight(%rip), %xmm0, %xmm0
vpshufb %xmm1, %xmm15, %xmm15
# aesenc_xor
vmovdqu (%rdi), %xmm7
vmovdqu %xmm0, 128(%rsp)
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm7, %xmm9, %xmm9
vpxor %xmm7, %xmm10, %xmm10
vpxor %xmm7, %xmm11, %xmm11
vpxor %xmm7, %xmm12, %xmm12
vpxor %xmm7, %xmm13, %xmm13
vpxor %xmm7, %xmm14, %xmm14
vpxor %xmm7, %xmm15, %xmm15
# aesenc_pclmul_1
vmovdqu -128(%rdx), %xmm1
vmovdqu 16(%rdi), %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm1, %xmm1
vmovdqu 112(%rsp), %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpclmulqdq $16, %xmm2, %xmm1, %xmm5
vpclmulqdq $0x01, %xmm2, %xmm1, %xmm3
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm6
vpclmulqdq $0x11, %xmm2, %xmm1, %xmm7
vaesenc %xmm0, %xmm8, %xmm8
vaesenc %xmm0, %xmm9, %xmm9
vaesenc %xmm0, %xmm10, %xmm10
vaesenc %xmm0, %xmm11, %xmm11
vaesenc %xmm0, %xmm12, %xmm12
vaesenc %xmm0, %xmm13, %xmm13
vaesenc %xmm0, %xmm14, %xmm14
vaesenc %xmm0, %xmm15, %xmm15
# aesenc_pclmul_2
vmovdqu -112(%rdx), %xmm1
vmovdqu 96(%rsp), %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm1, %xmm1
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vmovdqu 32(%rdi), %xmm0
vpxor %xmm1, %xmm7, %xmm7
vaesenc %xmm0, %xmm8, %xmm8
vaesenc %xmm0, %xmm9, %xmm9
vaesenc %xmm0, %xmm10, %xmm10
vaesenc %xmm0, %xmm11, %xmm11
vaesenc %xmm0, %xmm12, %xmm12
vaesenc %xmm0, %xmm13, %xmm13
vaesenc %xmm0, %xmm14, %xmm14
vaesenc %xmm0, %xmm15, %xmm15
# aesenc_pclmul_n
vmovdqu -96(%rdx), %xmm1
vmovdqu 80(%rsp), %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm1, %xmm1
vpxor %xmm2, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpxor %xmm4, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vmovdqu 48(%rdi), %xmm0
vpxor %xmm1, %xmm7, %xmm7
vaesenc %xmm0, %xmm8, %xmm8
vaesenc %xmm0, %xmm9, %xmm9
vaesenc %xmm0, %xmm10, %xmm10
vaesenc %xmm0, %xmm11, %xmm11
vaesenc %xmm0, %xmm12, %xmm12
vaesenc %xmm0, %xmm13, %xmm13
vaesenc %xmm0, %xmm14, %xmm14
vaesenc %xmm0, %xmm15, %xmm15
# aesenc_pclmul_n
vmovdqu -80(%rdx), %xmm1
vmovdqu 64(%rsp), %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm1, %xmm1
vpxor %xmm2, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpxor %xmm4, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vmovdqu 64(%rdi), %xmm0
vpxor %xmm1, %xmm7, %xmm7
vaesenc %xmm0, %xmm8, %xmm8
vaesenc %xmm0, %xmm9, %xmm9
vaesenc %xmm0, %xmm10, %xmm10
vaesenc %xmm0, %xmm11, %xmm11
vaesenc %xmm0, %xmm12, %xmm12
vaesenc %xmm0, %xmm13, %xmm13
vaesenc %xmm0, %xmm14, %xmm14
vaesenc %xmm0, %xmm15, %xmm15
# aesenc_pclmul_n
vmovdqu -64(%rdx), %xmm1
vmovdqu 48(%rsp), %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm1, %xmm1
vpxor %xmm2, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpxor %xmm4, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vmovdqu 80(%rdi), %xmm0
vpxor %xmm1, %xmm7, %xmm7
vaesenc %xmm0, %xmm8, %xmm8
vaesenc %xmm0, %xmm9, %xmm9
vaesenc %xmm0, %xmm10, %xmm10
vaesenc %xmm0, %xmm11, %xmm11
vaesenc %xmm0, %xmm12, %xmm12
vaesenc %xmm0, %xmm13, %xmm13
vaesenc %xmm0, %xmm14, %xmm14
vaesenc %xmm0, %xmm15, %xmm15
# aesenc_pclmul_n
vmovdqu -48(%rdx), %xmm1
vmovdqu 32(%rsp), %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm1, %xmm1
vpxor %xmm2, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpxor %xmm4, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vmovdqu 96(%rdi), %xmm0
vpxor %xmm1, %xmm7, %xmm7
vaesenc %xmm0, %xmm8, %xmm8
vaesenc %xmm0, %xmm9, %xmm9
vaesenc %xmm0, %xmm10, %xmm10
vaesenc %xmm0, %xmm11, %xmm11
vaesenc %xmm0, %xmm12, %xmm12
vaesenc %xmm0, %xmm13, %xmm13
vaesenc %xmm0, %xmm14, %xmm14
vaesenc %xmm0, %xmm15, %xmm15
# aesenc_pclmul_n
vmovdqu -32(%rdx), %xmm1
vmovdqu 16(%rsp), %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm1, %xmm1
vpxor %xmm2, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpxor %xmm4, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vmovdqu 112(%rdi), %xmm0
vpxor %xmm1, %xmm7, %xmm7
vaesenc %xmm0, %xmm8, %xmm8
vaesenc %xmm0, %xmm9, %xmm9
vaesenc %xmm0, %xmm10, %xmm10
vaesenc %xmm0, %xmm11, %xmm11
vaesenc %xmm0, %xmm12, %xmm12
vaesenc %xmm0, %xmm13, %xmm13
vaesenc %xmm0, %xmm14, %xmm14
vaesenc %xmm0, %xmm15, %xmm15
# aesenc_pclmul_n
vmovdqu -16(%rdx), %xmm1
vmovdqu (%rsp), %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm1, %xmm1
vpxor %xmm2, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpxor %xmm4, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vmovdqu 128(%rdi), %xmm0
vpxor %xmm1, %xmm7, %xmm7
vaesenc %xmm0, %xmm8, %xmm8
vaesenc %xmm0, %xmm9, %xmm9
vaesenc %xmm0, %xmm10, %xmm10
vaesenc %xmm0, %xmm11, %xmm11
vaesenc %xmm0, %xmm12, %xmm12
vaesenc %xmm0, %xmm13, %xmm13
vaesenc %xmm0, %xmm14, %xmm14
vaesenc %xmm0, %xmm15, %xmm15
# aesenc_pclmul_l
vpxor %xmm2, %xmm5, %xmm5
vpxor %xmm4, %xmm6, %xmm6
vpxor %xmm3, %xmm5, %xmm5
vpslldq $8, %xmm5, %xmm1
vpsrldq $8, %xmm5, %xmm5
vmovdqu 144(%rdi), %xmm4
vmovdqu L_avx2_aes_gcm_mod2_128(%rip), %xmm0
vaesenc %xmm4, %xmm8, %xmm8
vpxor %xmm1, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
vpclmulqdq $16, %xmm0, %xmm6, %xmm3
vaesenc %xmm4, %xmm9, %xmm9
vaesenc %xmm4, %xmm10, %xmm10
vaesenc %xmm4, %xmm11, %xmm11
vpshufd $0x4e, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $16, %xmm0, %xmm6, %xmm3
vaesenc %xmm4, %xmm12, %xmm12
vaesenc %xmm4, %xmm13, %xmm13
vaesenc %xmm4, %xmm14, %xmm14
vpshufd $0x4e, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm7, %xmm6, %xmm6
vaesenc %xmm4, %xmm15, %xmm15
cmpl $11, %esi
vmovdqu 160(%rdi), %xmm7
jl L_AES_GCM_encrypt_update_avx2_aesenc_128_ghash_avx_done
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqu 176(%rdi), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
cmpl $13, %esi
vmovdqu 192(%rdi), %xmm7
jl L_AES_GCM_encrypt_update_avx2_aesenc_128_ghash_avx_done
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqu 208(%rdi), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqu 224(%rdi), %xmm7
L_AES_GCM_encrypt_update_avx2_aesenc_128_ghash_avx_done:
# aesenc_last
vaesenclast %xmm7, %xmm8, %xmm8
vaesenclast %xmm7, %xmm9, %xmm9
vaesenclast %xmm7, %xmm10, %xmm10
vaesenclast %xmm7, %xmm11, %xmm11
vmovdqu (%rcx), %xmm0
vmovdqu 16(%rcx), %xmm1
vmovdqu 32(%rcx), %xmm2
vmovdqu 48(%rcx), %xmm3
vpxor %xmm0, %xmm8, %xmm8
vpxor %xmm1, %xmm9, %xmm9
vpxor %xmm2, %xmm10, %xmm10
vpxor %xmm3, %xmm11, %xmm11
vmovdqu %xmm8, (%rdx)
vmovdqu %xmm9, 16(%rdx)
vmovdqu %xmm10, 32(%rdx)
vmovdqu %xmm11, 48(%rdx)
vaesenclast %xmm7, %xmm12, %xmm12
vaesenclast %xmm7, %xmm13, %xmm13
vaesenclast %xmm7, %xmm14, %xmm14
vaesenclast %xmm7, %xmm15, %xmm15
vmovdqu 64(%rcx), %xmm0
vmovdqu 80(%rcx), %xmm1
vmovdqu 96(%rcx), %xmm2
vmovdqu 112(%rcx), %xmm3
vpxor %xmm0, %xmm12, %xmm12
vpxor %xmm1, %xmm13, %xmm13
vpxor %xmm2, %xmm14, %xmm14
vpxor %xmm3, %xmm15, %xmm15
vmovdqu %xmm12, 64(%rdx)
vmovdqu %xmm13, 80(%rdx)
vmovdqu %xmm14, 96(%rdx)
vmovdqu %xmm15, 112(%rdx)
# aesenc_128_ghash - end
addl $0x80, %r14d
cmpl %r13d, %r14d
jl L_AES_GCM_encrypt_update_avx2_ghash_128
L_AES_GCM_encrypt_update_avx2_end_128:
vmovdqu L_avx2_aes_gcm_bswap_mask(%rip), %xmm4
vpshufb %xmm4, %xmm8, %xmm8
vpshufb %xmm4, %xmm9, %xmm9
vpshufb %xmm4, %xmm10, %xmm10
vpshufb %xmm4, %xmm11, %xmm11
vpshufb %xmm4, %xmm12, %xmm12
vpshufb %xmm4, %xmm13, %xmm13
vpshufb %xmm4, %xmm14, %xmm14
vpshufb %xmm4, %xmm15, %xmm15
vpxor %xmm6, %xmm8, %xmm8
vmovdqu (%rsp), %xmm7
vpclmulqdq $16, %xmm15, %xmm7, %xmm5
vpclmulqdq $0x01, %xmm15, %xmm7, %xmm1
vpclmulqdq $0x00, %xmm15, %xmm7, %xmm4
vpclmulqdq $0x11, %xmm15, %xmm7, %xmm6
vpxor %xmm1, %xmm5, %xmm5
vmovdqu 16(%rsp), %xmm7
vpclmulqdq $16, %xmm14, %xmm7, %xmm2
vpclmulqdq $0x01, %xmm14, %xmm7, %xmm1
vpclmulqdq $0x00, %xmm14, %xmm7, %xmm0
vpclmulqdq $0x11, %xmm14, %xmm7, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm2, %xmm5, %xmm5
vpxor %xmm0, %xmm4, %xmm4
vmovdqu 32(%rsp), %xmm15
vmovdqu 48(%rsp), %xmm7
vpclmulqdq $16, %xmm13, %xmm15, %xmm2
vpclmulqdq $0x01, %xmm13, %xmm15, %xmm1
vpclmulqdq $0x00, %xmm13, %xmm15, %xmm0
vpclmulqdq $0x11, %xmm13, %xmm15, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm2, %xmm5, %xmm5
vpxor %xmm0, %xmm4, %xmm4
vpclmulqdq $16, %xmm12, %xmm7, %xmm2
vpclmulqdq $0x01, %xmm12, %xmm7, %xmm1
vpclmulqdq $0x00, %xmm12, %xmm7, %xmm0
vpclmulqdq $0x11, %xmm12, %xmm7, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm2, %xmm5, %xmm5
vpxor %xmm0, %xmm4, %xmm4
vmovdqu 64(%rsp), %xmm15
vmovdqu 80(%rsp), %xmm7
vpclmulqdq $16, %xmm11, %xmm15, %xmm2
vpclmulqdq $0x01, %xmm11, %xmm15, %xmm1
vpclmulqdq $0x00, %xmm11, %xmm15, %xmm0
vpclmulqdq $0x11, %xmm11, %xmm15, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm2, %xmm5, %xmm5
vpxor %xmm0, %xmm4, %xmm4
vpclmulqdq $16, %xmm10, %xmm7, %xmm2
vpclmulqdq $0x01, %xmm10, %xmm7, %xmm1
vpclmulqdq $0x00, %xmm10, %xmm7, %xmm0
vpclmulqdq $0x11, %xmm10, %xmm7, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm2, %xmm5, %xmm5
vpxor %xmm0, %xmm4, %xmm4
vmovdqu 96(%rsp), %xmm15
vmovdqu 112(%rsp), %xmm7
vpclmulqdq $16, %xmm9, %xmm15, %xmm2
vpclmulqdq $0x01, %xmm9, %xmm15, %xmm1
vpclmulqdq $0x00, %xmm9, %xmm15, %xmm0
vpclmulqdq $0x11, %xmm9, %xmm15, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm2, %xmm5, %xmm5
vpxor %xmm0, %xmm4, %xmm4
vpclmulqdq $16, %xmm8, %xmm7, %xmm2
vpclmulqdq $0x01, %xmm8, %xmm7, %xmm1
vpclmulqdq $0x00, %xmm8, %xmm7, %xmm0
vpclmulqdq $0x11, %xmm8, %xmm7, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm2, %xmm5, %xmm5
vpxor %xmm0, %xmm4, %xmm4
vpslldq $8, %xmm5, %xmm7
vpsrldq $8, %xmm5, %xmm5
vpxor %xmm7, %xmm4, %xmm4
vpxor %xmm5, %xmm6, %xmm6
# ghash_red
vmovdqu L_avx2_aes_gcm_mod2_128(%rip), %xmm2
vpclmulqdq $16, %xmm2, %xmm4, %xmm0
vpshufd $0x4e, %xmm4, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $16, %xmm2, %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm6, %xmm6
vmovdqu (%rsp), %xmm5
vmovdqu 128(%rsp), %xmm4
L_AES_GCM_encrypt_update_avx2_done_128:
cmpl %r8d, %r14d
je L_AES_GCM_encrypt_update_avx2_done_enc
movl %r8d, %r13d
andl $0xfffffff0, %r13d
cmpl %r13d, %r14d
jge L_AES_GCM_encrypt_update_avx2_last_block_done
# aesenc_block
vmovdqu %xmm4, %xmm1
vpshufb L_avx2_aes_gcm_bswap_epi64(%rip), %xmm1, %xmm0
vpaddd L_avx2_aes_gcm_one(%rip), %xmm1, %xmm1
vpxor (%rdi), %xmm0, %xmm0
vmovdqu 16(%rdi), %xmm2
vaesenc %xmm2, %xmm0, %xmm0
vmovdqu 32(%rdi), %xmm2
vaesenc %xmm2, %xmm0, %xmm0
vmovdqu 48(%rdi), %xmm2
vaesenc %xmm2, %xmm0, %xmm0
vmovdqu 64(%rdi), %xmm2
vaesenc %xmm2, %xmm0, %xmm0
vmovdqu 80(%rdi), %xmm2
vaesenc %xmm2, %xmm0, %xmm0
vmovdqu 96(%rdi), %xmm2
vaesenc %xmm2, %xmm0, %xmm0
vmovdqu 112(%rdi), %xmm2
vaesenc %xmm2, %xmm0, %xmm0
vmovdqu 128(%rdi), %xmm2
vaesenc %xmm2, %xmm0, %xmm0
vmovdqu 144(%rdi), %xmm2
vaesenc %xmm2, %xmm0, %xmm0
vmovdqu %xmm1, %xmm4
cmpl $11, %esi
vmovdqu 160(%rdi), %xmm1
jl L_AES_GCM_encrypt_update_avx2_aesenc_block_last
vaesenc %xmm1, %xmm0, %xmm0
vmovdqu 176(%rdi), %xmm2
vaesenc %xmm2, %xmm0, %xmm0
cmpl $13, %esi
vmovdqu 192(%rdi), %xmm1
jl L_AES_GCM_encrypt_update_avx2_aesenc_block_last
vaesenc %xmm1, %xmm0, %xmm0
vmovdqu 208(%rdi), %xmm2
vaesenc %xmm2, %xmm0, %xmm0
vmovdqu 224(%rdi), %xmm1
L_AES_GCM_encrypt_update_avx2_aesenc_block_last:
vaesenclast %xmm1, %xmm0, %xmm0
vmovdqu (%r11,%r14,1), %xmm1
vpxor %xmm1, %xmm0, %xmm0
vmovdqu %xmm0, (%r10,%r14,1)
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vpxor %xmm0, %xmm6, %xmm6
addl $16, %r14d
cmpl %r13d, %r14d
jge L_AES_GCM_encrypt_update_avx2_last_block_ghash
L_AES_GCM_encrypt_update_avx2_last_block_start:
vmovdqu (%r11,%r14,1), %xmm12
vpshufb L_avx2_aes_gcm_bswap_epi64(%rip), %xmm4, %xmm11
vpaddd L_avx2_aes_gcm_one(%rip), %xmm4, %xmm4
# aesenc_gfmul_sb
vpclmulqdq $0x01, %xmm5, %xmm6, %xmm2
vpclmulqdq $16, %xmm5, %xmm6, %xmm3
vpclmulqdq $0x00, %xmm5, %xmm6, %xmm1
vpclmulqdq $0x11, %xmm5, %xmm6, %xmm8
vpxor (%rdi), %xmm11, %xmm11
vaesenc 16(%rdi), %xmm11, %xmm11
vpxor %xmm2, %xmm3, %xmm3
vpslldq $8, %xmm3, %xmm2
vpsrldq $8, %xmm3, %xmm3
vaesenc 32(%rdi), %xmm11, %xmm11
vpxor %xmm1, %xmm2, %xmm2
vpclmulqdq $16, L_avx2_aes_gcm_mod2_128(%rip), %xmm2, %xmm1
vaesenc 48(%rdi), %xmm11, %xmm11
vaesenc 64(%rdi), %xmm11, %xmm11
vaesenc 80(%rdi), %xmm11, %xmm11
vpshufd $0x4e, %xmm2, %xmm2
vpxor %xmm1, %xmm2, %xmm2
vpclmulqdq $16, L_avx2_aes_gcm_mod2_128(%rip), %xmm2, %xmm1
vaesenc 96(%rdi), %xmm11, %xmm11
vaesenc 112(%rdi), %xmm11, %xmm11
vaesenc 128(%rdi), %xmm11, %xmm11
vpshufd $0x4e, %xmm2, %xmm2
vaesenc 144(%rdi), %xmm11, %xmm11
vpxor %xmm3, %xmm8, %xmm8
vpxor %xmm8, %xmm2, %xmm2
vmovdqu 160(%rdi), %xmm0
cmpl $11, %esi
jl L_AES_GCM_encrypt_update_avx2_aesenc_gfmul_sb_last
vaesenc %xmm0, %xmm11, %xmm11
vaesenc 176(%rdi), %xmm11, %xmm11
vmovdqu 192(%rdi), %xmm0
cmpl $13, %esi
jl L_AES_GCM_encrypt_update_avx2_aesenc_gfmul_sb_last
vaesenc %xmm0, %xmm11, %xmm11
vaesenc 208(%rdi), %xmm11, %xmm11
vmovdqu 224(%rdi), %xmm0
L_AES_GCM_encrypt_update_avx2_aesenc_gfmul_sb_last:
vaesenclast %xmm0, %xmm11, %xmm11
vpxor %xmm1, %xmm2, %xmm6
vpxor %xmm12, %xmm11, %xmm11
vmovdqu %xmm11, (%r10,%r14,1)
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm11, %xmm11
vpxor %xmm11, %xmm6, %xmm6
addl $16, %r14d
cmpl %r13d, %r14d
jl L_AES_GCM_encrypt_update_avx2_last_block_start
L_AES_GCM_encrypt_update_avx2_last_block_ghash:
# ghash_gfmul_red
vpclmulqdq $16, %xmm5, %xmm6, %xmm10
vpclmulqdq $0x01, %xmm5, %xmm6, %xmm9
vpclmulqdq $0x00, %xmm5, %xmm6, %xmm8
vpxor %xmm9, %xmm10, %xmm10
vpslldq $8, %xmm10, %xmm9
vpsrldq $8, %xmm10, %xmm10
vpxor %xmm8, %xmm9, %xmm9
vpclmulqdq $0x11, %xmm5, %xmm6, %xmm6
vpclmulqdq $16, L_avx2_aes_gcm_mod2_128(%rip), %xmm9, %xmm8
vpshufd $0x4e, %xmm9, %xmm9
vpxor %xmm8, %xmm9, %xmm9
vpclmulqdq $16, L_avx2_aes_gcm_mod2_128(%rip), %xmm9, %xmm8
vpshufd $0x4e, %xmm9, %xmm9
vpxor %xmm10, %xmm6, %xmm6
vpxor %xmm9, %xmm6, %xmm6
vpxor %xmm8, %xmm6, %xmm6
L_AES_GCM_encrypt_update_avx2_last_block_done:
L_AES_GCM_encrypt_update_avx2_done_enc:
vmovdqu %xmm6, (%r9)
vmovdqu %xmm4, (%r12)
vzeroupper
addq $0x98, %rsp
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size AES_GCM_encrypt_update_avx2,.-AES_GCM_encrypt_update_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl AES_GCM_encrypt_final_avx2
.type AES_GCM_encrypt_final_avx2,@function
.align 16
AES_GCM_encrypt_final_avx2:
#else
.section __TEXT,__text
.globl _AES_GCM_encrypt_final_avx2
.p2align 4
_AES_GCM_encrypt_final_avx2:
#endif /* __APPLE__ */
movq 8(%rsp), %rax
subq $16, %rsp
vmovdqu (%rdi), %xmm4
vmovdqu (%r9), %xmm5
vmovdqu (%rax), %xmm6
vpsrlq $63, %xmm5, %xmm1
vpsllq $0x01, %xmm5, %xmm0
vpslldq $8, %xmm1, %xmm1
vpor %xmm1, %xmm0, %xmm0
vpshufd $0xff, %xmm5, %xmm5
vpsrad $31, %xmm5, %xmm5
vpand L_avx2_aes_gcm_mod2_128(%rip), %xmm5, %xmm5
vpxor %xmm0, %xmm5, %xmm5
# calc_tag
shlq $3, %rcx
shlq $3, %r8
vmovq %rcx, %xmm0
vmovq %r8, %xmm1
vpunpcklqdq %xmm1, %xmm0, %xmm0
vpxor %xmm4, %xmm0, %xmm0
# ghash_gfmul_red
vpclmulqdq $16, %xmm5, %xmm0, %xmm7
vpclmulqdq $0x01, %xmm5, %xmm0, %xmm3
vpclmulqdq $0x00, %xmm5, %xmm0, %xmm2
vpxor %xmm3, %xmm7, %xmm7
vpslldq $8, %xmm7, %xmm3
vpsrldq $8, %xmm7, %xmm7
vpxor %xmm2, %xmm3, %xmm3
vpclmulqdq $0x11, %xmm5, %xmm0, %xmm0
vpclmulqdq $16, L_avx2_aes_gcm_mod2_128(%rip), %xmm3, %xmm2
vpshufd $0x4e, %xmm3, %xmm3
vpxor %xmm2, %xmm3, %xmm3
vpclmulqdq $16, L_avx2_aes_gcm_mod2_128(%rip), %xmm3, %xmm2
vpshufd $0x4e, %xmm3, %xmm3
vpxor %xmm7, %xmm0, %xmm0
vpxor %xmm3, %xmm0, %xmm0
vpxor %xmm2, %xmm0, %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vpxor %xmm6, %xmm0, %xmm0
# store_tag
cmpl $16, %edx
je L_AES_GCM_encrypt_final_avx2_store_tag_16
xorq %r10, %r10
vmovdqu %xmm0, (%rsp)
L_AES_GCM_encrypt_final_avx2_store_tag_loop:
movzbl (%rsp,%r10,1), %r11d
movb %r11b, (%rsi,%r10,1)
incl %r10d
cmpl %edx, %r10d
jne L_AES_GCM_encrypt_final_avx2_store_tag_loop
jmp L_AES_GCM_encrypt_final_avx2_store_tag_done
L_AES_GCM_encrypt_final_avx2_store_tag_16:
vmovdqu %xmm0, (%rsi)
L_AES_GCM_encrypt_final_avx2_store_tag_done:
vzeroupper
addq $16, %rsp
repz retq
#ifndef __APPLE__
.size AES_GCM_encrypt_final_avx2,.-AES_GCM_encrypt_final_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl AES_GCM_decrypt_update_avx2
.type AES_GCM_decrypt_update_avx2,@function
.align 16
AES_GCM_decrypt_update_avx2:
#else
.section __TEXT,__text
.globl _AES_GCM_decrypt_update_avx2
.p2align 4
_AES_GCM_decrypt_update_avx2:
#endif /* __APPLE__ */
pushq %r13
pushq %r12
pushq %r14
movq %rdx, %r10
movq %rcx, %r11
movq 32(%rsp), %rax
movq 40(%rsp), %r12
subq $0xa8, %rsp
vmovdqu (%r9), %xmm6
vmovdqu (%rax), %xmm5
vmovdqu (%r12), %xmm4
# Calculate H
vpsrlq $63, %xmm5, %xmm1
vpsllq $0x01, %xmm5, %xmm0
vpslldq $8, %xmm1, %xmm1
vpor %xmm1, %xmm0, %xmm0
vpshufd $0xff, %xmm5, %xmm5
vpsrad $31, %xmm5, %xmm5
vpand L_avx2_aes_gcm_mod2_128(%rip), %xmm5, %xmm5
vpxor %xmm0, %xmm5, %xmm5
xorl %r14d, %r14d
cmpl $0x80, %r8d
movl %r8d, %r13d
jl L_AES_GCM_decrypt_update_avx2_done_128
andl $0xffffff80, %r13d
vmovdqu %xmm4, 128(%rsp)
vmovdqu %xmm15, 144(%rsp)
vmovdqu L_avx2_aes_gcm_mod2_128(%rip), %xmm3
# H ^ 1 and H ^ 2
vpclmulqdq $0x00, %xmm5, %xmm5, %xmm9
vpclmulqdq $0x11, %xmm5, %xmm5, %xmm10
vpclmulqdq $16, %xmm3, %xmm9, %xmm8
vpshufd $0x4e, %xmm9, %xmm9
vpxor %xmm8, %xmm9, %xmm9
vpclmulqdq $16, %xmm3, %xmm9, %xmm8
vpshufd $0x4e, %xmm9, %xmm9
vpxor %xmm8, %xmm9, %xmm9
vpxor %xmm9, %xmm10, %xmm0
vmovdqu %xmm5, (%rsp)
vmovdqu %xmm0, 16(%rsp)
# H ^ 3 and H ^ 4
vpclmulqdq $16, %xmm5, %xmm0, %xmm11
vpclmulqdq $0x01, %xmm5, %xmm0, %xmm10
vpclmulqdq $0x00, %xmm5, %xmm0, %xmm9
vpclmulqdq $0x11, %xmm5, %xmm0, %xmm12
vpclmulqdq $0x00, %xmm0, %xmm0, %xmm13
vpclmulqdq $0x11, %xmm0, %xmm0, %xmm14
vpxor %xmm10, %xmm11, %xmm11
vpslldq $8, %xmm11, %xmm10
vpsrldq $8, %xmm11, %xmm11
vpxor %xmm9, %xmm10, %xmm10
vpclmulqdq $16, %xmm3, %xmm13, %xmm8
vpclmulqdq $16, %xmm3, %xmm10, %xmm9
vpshufd $0x4e, %xmm10, %xmm10
vpshufd $0x4e, %xmm13, %xmm13
vpxor %xmm9, %xmm10, %xmm10
vpxor %xmm8, %xmm13, %xmm13
vpclmulqdq $16, %xmm3, %xmm10, %xmm9
vpclmulqdq $16, %xmm3, %xmm13, %xmm8
vpshufd $0x4e, %xmm10, %xmm10
vpshufd $0x4e, %xmm13, %xmm13
vpxor %xmm11, %xmm12, %xmm12
vpxor %xmm8, %xmm13, %xmm13
vpxor %xmm12, %xmm10, %xmm10
vpxor %xmm14, %xmm13, %xmm2
vpxor %xmm9, %xmm10, %xmm1
vmovdqu %xmm1, 32(%rsp)
vmovdqu %xmm2, 48(%rsp)
# H ^ 5 and H ^ 6
vpclmulqdq $16, %xmm0, %xmm1, %xmm11
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm10
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm9
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm12
vpclmulqdq $0x00, %xmm1, %xmm1, %xmm13
vpclmulqdq $0x11, %xmm1, %xmm1, %xmm14
vpxor %xmm10, %xmm11, %xmm11
vpslldq $8, %xmm11, %xmm10
vpsrldq $8, %xmm11, %xmm11
vpxor %xmm9, %xmm10, %xmm10
vpclmulqdq $16, %xmm3, %xmm13, %xmm8
vpclmulqdq $16, %xmm3, %xmm10, %xmm9
vpshufd $0x4e, %xmm10, %xmm10
vpshufd $0x4e, %xmm13, %xmm13
vpxor %xmm9, %xmm10, %xmm10
vpxor %xmm8, %xmm13, %xmm13
vpclmulqdq $16, %xmm3, %xmm10, %xmm9
vpclmulqdq $16, %xmm3, %xmm13, %xmm8
vpshufd $0x4e, %xmm10, %xmm10
vpshufd $0x4e, %xmm13, %xmm13
vpxor %xmm11, %xmm12, %xmm12
vpxor %xmm8, %xmm13, %xmm13
vpxor %xmm12, %xmm10, %xmm10
vpxor %xmm14, %xmm13, %xmm0
vpxor %xmm9, %xmm10, %xmm7
vmovdqu %xmm7, 64(%rsp)
vmovdqu %xmm0, 80(%rsp)
# H ^ 7 and H ^ 8
vpclmulqdq $16, %xmm1, %xmm2, %xmm11
vpclmulqdq $0x01, %xmm1, %xmm2, %xmm10
vpclmulqdq $0x00, %xmm1, %xmm2, %xmm9
vpclmulqdq $0x11, %xmm1, %xmm2, %xmm12
vpclmulqdq $0x00, %xmm2, %xmm2, %xmm13
vpclmulqdq $0x11, %xmm2, %xmm2, %xmm14
vpxor %xmm10, %xmm11, %xmm11
vpslldq $8, %xmm11, %xmm10
vpsrldq $8, %xmm11, %xmm11
vpxor %xmm9, %xmm10, %xmm10
vpclmulqdq $16, %xmm3, %xmm13, %xmm8
vpclmulqdq $16, %xmm3, %xmm10, %xmm9
vpshufd $0x4e, %xmm10, %xmm10
vpshufd $0x4e, %xmm13, %xmm13
vpxor %xmm9, %xmm10, %xmm10
vpxor %xmm8, %xmm13, %xmm13
vpclmulqdq $16, %xmm3, %xmm10, %xmm9
vpclmulqdq $16, %xmm3, %xmm13, %xmm8
vpshufd $0x4e, %xmm10, %xmm10
vpshufd $0x4e, %xmm13, %xmm13
vpxor %xmm11, %xmm12, %xmm12
vpxor %xmm8, %xmm13, %xmm13
vpxor %xmm12, %xmm10, %xmm10
vpxor %xmm14, %xmm13, %xmm0
vpxor %xmm9, %xmm10, %xmm7
vmovdqu %xmm7, 96(%rsp)
vmovdqu %xmm0, 112(%rsp)
L_AES_GCM_decrypt_update_avx2_ghash_128:
# aesenc_128_ghash
leaq (%r11,%r14,1), %rcx
leaq (%r10,%r14,1), %rdx
# aesenc_ctr
vmovdqu 128(%rsp), %xmm0
vmovdqu L_avx2_aes_gcm_bswap_epi64(%rip), %xmm1
vpaddd L_avx2_aes_gcm_one(%rip), %xmm0, %xmm9
vpshufb %xmm1, %xmm0, %xmm8
vpaddd L_avx2_aes_gcm_two(%rip), %xmm0, %xmm10
vpshufb %xmm1, %xmm9, %xmm9
vpaddd L_avx2_aes_gcm_three(%rip), %xmm0, %xmm11
vpshufb %xmm1, %xmm10, %xmm10
vpaddd L_avx2_aes_gcm_four(%rip), %xmm0, %xmm12
vpshufb %xmm1, %xmm11, %xmm11
vpaddd L_avx2_aes_gcm_five(%rip), %xmm0, %xmm13
vpshufb %xmm1, %xmm12, %xmm12
vpaddd L_avx2_aes_gcm_six(%rip), %xmm0, %xmm14
vpshufb %xmm1, %xmm13, %xmm13
vpaddd L_avx2_aes_gcm_seven(%rip), %xmm0, %xmm15
vpshufb %xmm1, %xmm14, %xmm14
vpaddd L_avx2_aes_gcm_eight(%rip), %xmm0, %xmm0
vpshufb %xmm1, %xmm15, %xmm15
# aesenc_xor
vmovdqu (%rdi), %xmm7
vmovdqu %xmm0, 128(%rsp)
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm7, %xmm9, %xmm9
vpxor %xmm7, %xmm10, %xmm10
vpxor %xmm7, %xmm11, %xmm11
vpxor %xmm7, %xmm12, %xmm12
vpxor %xmm7, %xmm13, %xmm13
vpxor %xmm7, %xmm14, %xmm14
vpxor %xmm7, %xmm15, %xmm15
# aesenc_pclmul_1
vmovdqu (%rcx), %xmm1
vmovdqu 16(%rdi), %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm1, %xmm1
vmovdqu 112(%rsp), %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpclmulqdq $16, %xmm2, %xmm1, %xmm5
vpclmulqdq $0x01, %xmm2, %xmm1, %xmm3
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm6
vpclmulqdq $0x11, %xmm2, %xmm1, %xmm7
vaesenc %xmm0, %xmm8, %xmm8
vaesenc %xmm0, %xmm9, %xmm9
vaesenc %xmm0, %xmm10, %xmm10
vaesenc %xmm0, %xmm11, %xmm11
vaesenc %xmm0, %xmm12, %xmm12
vaesenc %xmm0, %xmm13, %xmm13
vaesenc %xmm0, %xmm14, %xmm14
vaesenc %xmm0, %xmm15, %xmm15
# aesenc_pclmul_2
vmovdqu 16(%rcx), %xmm1
vmovdqu 96(%rsp), %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm1, %xmm1
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vmovdqu 32(%rdi), %xmm0
vpxor %xmm1, %xmm7, %xmm7
vaesenc %xmm0, %xmm8, %xmm8
vaesenc %xmm0, %xmm9, %xmm9
vaesenc %xmm0, %xmm10, %xmm10
vaesenc %xmm0, %xmm11, %xmm11
vaesenc %xmm0, %xmm12, %xmm12
vaesenc %xmm0, %xmm13, %xmm13
vaesenc %xmm0, %xmm14, %xmm14
vaesenc %xmm0, %xmm15, %xmm15
# aesenc_pclmul_n
vmovdqu 32(%rcx), %xmm1
vmovdqu 80(%rsp), %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm1, %xmm1
vpxor %xmm2, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpxor %xmm4, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vmovdqu 48(%rdi), %xmm0
vpxor %xmm1, %xmm7, %xmm7
vaesenc %xmm0, %xmm8, %xmm8
vaesenc %xmm0, %xmm9, %xmm9
vaesenc %xmm0, %xmm10, %xmm10
vaesenc %xmm0, %xmm11, %xmm11
vaesenc %xmm0, %xmm12, %xmm12
vaesenc %xmm0, %xmm13, %xmm13
vaesenc %xmm0, %xmm14, %xmm14
vaesenc %xmm0, %xmm15, %xmm15
# aesenc_pclmul_n
vmovdqu 48(%rcx), %xmm1
vmovdqu 64(%rsp), %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm1, %xmm1
vpxor %xmm2, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpxor %xmm4, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vmovdqu 64(%rdi), %xmm0
vpxor %xmm1, %xmm7, %xmm7
vaesenc %xmm0, %xmm8, %xmm8
vaesenc %xmm0, %xmm9, %xmm9
vaesenc %xmm0, %xmm10, %xmm10
vaesenc %xmm0, %xmm11, %xmm11
vaesenc %xmm0, %xmm12, %xmm12
vaesenc %xmm0, %xmm13, %xmm13
vaesenc %xmm0, %xmm14, %xmm14
vaesenc %xmm0, %xmm15, %xmm15
# aesenc_pclmul_n
vmovdqu 64(%rcx), %xmm1
vmovdqu 48(%rsp), %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm1, %xmm1
vpxor %xmm2, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpxor %xmm4, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vmovdqu 80(%rdi), %xmm0
vpxor %xmm1, %xmm7, %xmm7
vaesenc %xmm0, %xmm8, %xmm8
vaesenc %xmm0, %xmm9, %xmm9
vaesenc %xmm0, %xmm10, %xmm10
vaesenc %xmm0, %xmm11, %xmm11
vaesenc %xmm0, %xmm12, %xmm12
vaesenc %xmm0, %xmm13, %xmm13
vaesenc %xmm0, %xmm14, %xmm14
vaesenc %xmm0, %xmm15, %xmm15
# aesenc_pclmul_n
vmovdqu 80(%rcx), %xmm1
vmovdqu 32(%rsp), %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm1, %xmm1
vpxor %xmm2, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpxor %xmm4, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vmovdqu 96(%rdi), %xmm0
vpxor %xmm1, %xmm7, %xmm7
vaesenc %xmm0, %xmm8, %xmm8
vaesenc %xmm0, %xmm9, %xmm9
vaesenc %xmm0, %xmm10, %xmm10
vaesenc %xmm0, %xmm11, %xmm11
vaesenc %xmm0, %xmm12, %xmm12
vaesenc %xmm0, %xmm13, %xmm13
vaesenc %xmm0, %xmm14, %xmm14
vaesenc %xmm0, %xmm15, %xmm15
# aesenc_pclmul_n
vmovdqu 96(%rcx), %xmm1
vmovdqu 16(%rsp), %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm1, %xmm1
vpxor %xmm2, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpxor %xmm4, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vmovdqu 112(%rdi), %xmm0
vpxor %xmm1, %xmm7, %xmm7
vaesenc %xmm0, %xmm8, %xmm8
vaesenc %xmm0, %xmm9, %xmm9
vaesenc %xmm0, %xmm10, %xmm10
vaesenc %xmm0, %xmm11, %xmm11
vaesenc %xmm0, %xmm12, %xmm12
vaesenc %xmm0, %xmm13, %xmm13
vaesenc %xmm0, %xmm14, %xmm14
vaesenc %xmm0, %xmm15, %xmm15
# aesenc_pclmul_n
vmovdqu 112(%rcx), %xmm1
vmovdqu (%rsp), %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm1, %xmm1
vpxor %xmm2, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpxor %xmm4, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vmovdqu 128(%rdi), %xmm0
vpxor %xmm1, %xmm7, %xmm7
vaesenc %xmm0, %xmm8, %xmm8
vaesenc %xmm0, %xmm9, %xmm9
vaesenc %xmm0, %xmm10, %xmm10
vaesenc %xmm0, %xmm11, %xmm11
vaesenc %xmm0, %xmm12, %xmm12
vaesenc %xmm0, %xmm13, %xmm13
vaesenc %xmm0, %xmm14, %xmm14
vaesenc %xmm0, %xmm15, %xmm15
# aesenc_pclmul_l
vpxor %xmm2, %xmm5, %xmm5
vpxor %xmm4, %xmm6, %xmm6
vpxor %xmm3, %xmm5, %xmm5
vpslldq $8, %xmm5, %xmm1
vpsrldq $8, %xmm5, %xmm5
vmovdqu 144(%rdi), %xmm4
vmovdqu L_avx2_aes_gcm_mod2_128(%rip), %xmm0
vaesenc %xmm4, %xmm8, %xmm8
vpxor %xmm1, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
vpclmulqdq $16, %xmm0, %xmm6, %xmm3
vaesenc %xmm4, %xmm9, %xmm9
vaesenc %xmm4, %xmm10, %xmm10
vaesenc %xmm4, %xmm11, %xmm11
vpshufd $0x4e, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $16, %xmm0, %xmm6, %xmm3
vaesenc %xmm4, %xmm12, %xmm12
vaesenc %xmm4, %xmm13, %xmm13
vaesenc %xmm4, %xmm14, %xmm14
vpshufd $0x4e, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm7, %xmm6, %xmm6
vaesenc %xmm4, %xmm15, %xmm15
cmpl $11, %esi
vmovdqu 160(%rdi), %xmm7
jl L_AES_GCM_decrypt_update_avx2_aesenc_128_ghash_avx_done
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqu 176(%rdi), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
cmpl $13, %esi
vmovdqu 192(%rdi), %xmm7
jl L_AES_GCM_decrypt_update_avx2_aesenc_128_ghash_avx_done
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqu 208(%rdi), %xmm7
vaesenc %xmm7, %xmm8, %xmm8
vaesenc %xmm7, %xmm9, %xmm9
vaesenc %xmm7, %xmm10, %xmm10
vaesenc %xmm7, %xmm11, %xmm11
vaesenc %xmm7, %xmm12, %xmm12
vaesenc %xmm7, %xmm13, %xmm13
vaesenc %xmm7, %xmm14, %xmm14
vaesenc %xmm7, %xmm15, %xmm15
vmovdqu 224(%rdi), %xmm7
L_AES_GCM_decrypt_update_avx2_aesenc_128_ghash_avx_done:
# aesenc_last
vaesenclast %xmm7, %xmm8, %xmm8
vaesenclast %xmm7, %xmm9, %xmm9
vaesenclast %xmm7, %xmm10, %xmm10
vaesenclast %xmm7, %xmm11, %xmm11
vmovdqu (%rcx), %xmm0
vmovdqu 16(%rcx), %xmm1
vmovdqu 32(%rcx), %xmm2
vmovdqu 48(%rcx), %xmm3
vpxor %xmm0, %xmm8, %xmm8
vpxor %xmm1, %xmm9, %xmm9
vpxor %xmm2, %xmm10, %xmm10
vpxor %xmm3, %xmm11, %xmm11
vmovdqu %xmm8, (%rdx)
vmovdqu %xmm9, 16(%rdx)
vmovdqu %xmm10, 32(%rdx)
vmovdqu %xmm11, 48(%rdx)
vaesenclast %xmm7, %xmm12, %xmm12
vaesenclast %xmm7, %xmm13, %xmm13
vaesenclast %xmm7, %xmm14, %xmm14
vaesenclast %xmm7, %xmm15, %xmm15
vmovdqu 64(%rcx), %xmm0
vmovdqu 80(%rcx), %xmm1
vmovdqu 96(%rcx), %xmm2
vmovdqu 112(%rcx), %xmm3
vpxor %xmm0, %xmm12, %xmm12
vpxor %xmm1, %xmm13, %xmm13
vpxor %xmm2, %xmm14, %xmm14
vpxor %xmm3, %xmm15, %xmm15
vmovdqu %xmm12, 64(%rdx)
vmovdqu %xmm13, 80(%rdx)
vmovdqu %xmm14, 96(%rdx)
vmovdqu %xmm15, 112(%rdx)
# aesenc_128_ghash - end
addl $0x80, %r14d
cmpl %r13d, %r14d
jl L_AES_GCM_decrypt_update_avx2_ghash_128
vmovdqu (%rsp), %xmm5
vmovdqu 128(%rsp), %xmm4
vmovdqu 144(%rsp), %xmm15
L_AES_GCM_decrypt_update_avx2_done_128:
cmpl %r8d, %r14d
jge L_AES_GCM_decrypt_update_avx2_done_dec
movl %r8d, %r13d
andl $0xfffffff0, %r13d
cmpl %r13d, %r14d
jge L_AES_GCM_decrypt_update_avx2_last_block_done
L_AES_GCM_decrypt_update_avx2_last_block_start:
vmovdqu (%r11,%r14,1), %xmm11
vpshufb L_avx2_aes_gcm_bswap_epi64(%rip), %xmm4, %xmm10
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm11, %xmm12
vpaddd L_avx2_aes_gcm_one(%rip), %xmm4, %xmm4
vpxor %xmm6, %xmm12, %xmm12
# aesenc_gfmul_sb
vpclmulqdq $0x01, %xmm5, %xmm12, %xmm2
vpclmulqdq $16, %xmm5, %xmm12, %xmm3
vpclmulqdq $0x00, %xmm5, %xmm12, %xmm1
vpclmulqdq $0x11, %xmm5, %xmm12, %xmm8
vpxor (%rdi), %xmm10, %xmm10
vaesenc 16(%rdi), %xmm10, %xmm10
vpxor %xmm2, %xmm3, %xmm3
vpslldq $8, %xmm3, %xmm2
vpsrldq $8, %xmm3, %xmm3
vaesenc 32(%rdi), %xmm10, %xmm10
vpxor %xmm1, %xmm2, %xmm2
vpclmulqdq $16, L_avx2_aes_gcm_mod2_128(%rip), %xmm2, %xmm1
vaesenc 48(%rdi), %xmm10, %xmm10
vaesenc 64(%rdi), %xmm10, %xmm10
vaesenc 80(%rdi), %xmm10, %xmm10
vpshufd $0x4e, %xmm2, %xmm2
vpxor %xmm1, %xmm2, %xmm2
vpclmulqdq $16, L_avx2_aes_gcm_mod2_128(%rip), %xmm2, %xmm1
vaesenc 96(%rdi), %xmm10, %xmm10
vaesenc 112(%rdi), %xmm10, %xmm10
vaesenc 128(%rdi), %xmm10, %xmm10
vpshufd $0x4e, %xmm2, %xmm2
vaesenc 144(%rdi), %xmm10, %xmm10
vpxor %xmm3, %xmm8, %xmm8
vpxor %xmm8, %xmm2, %xmm2
vmovdqu 160(%rdi), %xmm0
cmpl $11, %esi
jl L_AES_GCM_decrypt_update_avx2_aesenc_gfmul_sb_last
vaesenc %xmm0, %xmm10, %xmm10
vaesenc 176(%rdi), %xmm10, %xmm10
vmovdqu 192(%rdi), %xmm0
cmpl $13, %esi
jl L_AES_GCM_decrypt_update_avx2_aesenc_gfmul_sb_last
vaesenc %xmm0, %xmm10, %xmm10
vaesenc 208(%rdi), %xmm10, %xmm10
vmovdqu 224(%rdi), %xmm0
L_AES_GCM_decrypt_update_avx2_aesenc_gfmul_sb_last:
vaesenclast %xmm0, %xmm10, %xmm10
vpxor %xmm1, %xmm2, %xmm6
vpxor %xmm11, %xmm10, %xmm10
vmovdqu %xmm10, (%r10,%r14,1)
addl $16, %r14d
cmpl %r13d, %r14d
jl L_AES_GCM_decrypt_update_avx2_last_block_start
L_AES_GCM_decrypt_update_avx2_last_block_done:
L_AES_GCM_decrypt_update_avx2_done_dec:
vmovdqu %xmm6, (%r9)
vmovdqu %xmm4, (%r12)
vzeroupper
addq $0xa8, %rsp
popq %r14
popq %r12
popq %r13
repz retq
#ifndef __APPLE__
.size AES_GCM_decrypt_update_avx2,.-AES_GCM_decrypt_update_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl AES_GCM_decrypt_final_avx2
.type AES_GCM_decrypt_final_avx2,@function
.align 16
AES_GCM_decrypt_final_avx2:
#else
.section __TEXT,__text
.globl _AES_GCM_decrypt_final_avx2
.p2align 4
_AES_GCM_decrypt_final_avx2:
#endif /* __APPLE__ */
pushq %r12
movq 16(%rsp), %rax
movq 24(%rsp), %r10
subq $16, %rsp
vmovdqu (%rdi), %xmm4
vmovdqu (%r9), %xmm5
vmovdqu (%rax), %xmm6
vpsrlq $63, %xmm5, %xmm1
vpsllq $0x01, %xmm5, %xmm0
vpslldq $8, %xmm1, %xmm1
vpor %xmm1, %xmm0, %xmm0
vpshufd $0xff, %xmm5, %xmm5
vpsrad $31, %xmm5, %xmm5
vpand L_avx2_aes_gcm_mod2_128(%rip), %xmm5, %xmm5
vpxor %xmm0, %xmm5, %xmm5
# calc_tag
shlq $3, %rcx
shlq $3, %r8
vmovq %rcx, %xmm0
vmovq %r8, %xmm1
vpunpcklqdq %xmm1, %xmm0, %xmm0
vpxor %xmm4, %xmm0, %xmm0
# ghash_gfmul_red
vpclmulqdq $16, %xmm5, %xmm0, %xmm7
vpclmulqdq $0x01, %xmm5, %xmm0, %xmm3
vpclmulqdq $0x00, %xmm5, %xmm0, %xmm2
vpxor %xmm3, %xmm7, %xmm7
vpslldq $8, %xmm7, %xmm3
vpsrldq $8, %xmm7, %xmm7
vpxor %xmm2, %xmm3, %xmm3
vpclmulqdq $0x11, %xmm5, %xmm0, %xmm0
vpclmulqdq $16, L_avx2_aes_gcm_mod2_128(%rip), %xmm3, %xmm2
vpshufd $0x4e, %xmm3, %xmm3
vpxor %xmm2, %xmm3, %xmm3
vpclmulqdq $16, L_avx2_aes_gcm_mod2_128(%rip), %xmm3, %xmm2
vpshufd $0x4e, %xmm3, %xmm3
vpxor %xmm7, %xmm0, %xmm0
vpxor %xmm3, %xmm0, %xmm0
vpxor %xmm2, %xmm0, %xmm0
vpshufb L_avx2_aes_gcm_bswap_mask(%rip), %xmm0, %xmm0
vpxor %xmm6, %xmm0, %xmm0
# cmp_tag
cmpl $16, %edx
je L_AES_GCM_decrypt_final_avx2_cmp_tag_16
xorq %r11, %r11
xorq %r9, %r9
vmovdqu %xmm0, (%rsp)
L_AES_GCM_decrypt_final_avx2_cmp_tag_loop:
movzbl (%rsp,%r11,1), %r12d
xorb (%rsi,%r11,1), %r12b
orb %r12b, %r9b
incl %r11d
cmpl %edx, %r11d
jne L_AES_GCM_decrypt_final_avx2_cmp_tag_loop
cmpb $0x00, %r9b
sete %r9b
jmp L_AES_GCM_decrypt_final_avx2_cmp_tag_done
L_AES_GCM_decrypt_final_avx2_cmp_tag_16:
vmovdqu (%rsi), %xmm1
vpcmpeqb %xmm1, %xmm0, %xmm0
vpmovmskb %xmm0, %r11
# %%edx == 0xFFFF then return 1 else => return 0
xorl %r9d, %r9d
cmpl $0xffff, %r11d
sete %r9b
L_AES_GCM_decrypt_final_avx2_cmp_tag_done:
movl %r9d, (%r10)
vzeroupper
addq $16, %rsp
popq %r12
repz retq
#ifndef __APPLE__
.size AES_GCM_decrypt_final_avx2,.-AES_GCM_decrypt_final_avx2
#endif /* __APPLE__ */
#endif /* WOLFSSL_AESGCM_STREAM */
#endif /* HAVE_INTEL_AVX2 */
#endif /* WOLFSSL_X86_64_BUILD */
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,394
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/CRC/CRC_UserDefinedPolynomial/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400;
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200;
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/CRC/CRC_UserDefinedPolynomial/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/CRC/CRC_UserDefinedPolynomial/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,394
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/CRC/CRC_Example/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400;
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200;
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/CRC/CRC_Example/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/CRC/CRC_Example/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,444
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/CRC/CRC_Bytes_Stream_7bit_CRC/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
;
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/CRC/CRC_Bytes_Stream_7bit_CRC/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/CRC/CRC_Bytes_Stream_7bit_CRC/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,394
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/DMA/DMA_FLASHToRAM/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400;
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200;
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/DMA/DMA_FLASHToRAM/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/DMA/DMA_FLASHToRAM/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,394
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/UART/UART_TwoBoards_ComIT/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400;
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200;
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/UART/UART_TwoBoards_ComIT/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/UART/UART_TwoBoards_ComIT/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,394
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/UART/UART_TwoBoards_ComPolling/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400;
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200;
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/UART/UART_TwoBoards_ComPolling/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/UART/UART_TwoBoards_ComPolling/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,394
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/UART/UART_TwoBoards_ComDMA/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400;
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200;
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aenu1/aps3e
| 169,867
|
app/src/main/cpp/rpcs3/3rdparty/wolfssl/wolfssl/wolfcrypt/src/port/arm/armv8-32-curve25519.S
|
/* armv8-32-curve25519
*
* Copyright (C) 2006-2023 wolfSSL Inc.
*
* This file is part of wolfSSL.
*
* wolfSSL is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* wolfSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
*/
/* Generated using (from wolfssl):
* cd ../scripts
* ruby ./x25519/x25519.rb arm32 ../wolfssl/wolfcrypt/src/port/arm/armv8-32-curve25519.S
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif /* HAVE_CONFIG_H */
#include <wolfssl/wolfcrypt/settings.h>
#ifdef WOLFSSL_ARMASM
#if !defined(__aarch64__) && defined(__arm__) && !defined(__thumb__)
#ifndef WOLFSSL_ARMASM_INLINE
#if defined(HAVE_CURVE25519) || defined(HAVE_ED25519)
#if !defined(CURVE25519_SMALL) || !defined(ED25519_SMALL)
.text
.align 4
.globl fe_init
.type fe_init, %function
fe_init:
bx lr
.size fe_init,.-fe_init
.text
.align 4
.globl fe_add_sub_op
.type fe_add_sub_op, %function
fe_add_sub_op:
push {lr}
# Add-Sub
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r2]
ldr r5, [r2, #4]
#else
ldrd r4, r5, [r2]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r3]
ldr r7, [r3, #4]
#else
ldrd r6, r7, [r3]
#endif
# Add
adds r8, r4, r6
mov r12, #0
adcs r9, r5, r7
adc r12, r12, #0
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r0]
str r9, [r0, #4]
#else
strd r8, r9, [r0]
#endif
# Sub
subs r10, r4, r6
sbcs r11, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r10, [r1]
str r11, [r1, #4]
#else
strd r10, r11, [r1]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r2, #8]
ldr r5, [r2, #12]
#else
ldrd r4, r5, [r2, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r3, #8]
ldr r7, [r3, #12]
#else
ldrd r6, r7, [r3, #8]
#endif
# Sub
sbcs r10, r4, r6
mov lr, #0
sbcs r11, r5, r7
adc lr, lr, #0
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r10, [r1, #8]
str r11, [r1, #12]
#else
strd r10, r11, [r1, #8]
#endif
# Add
subs r12, r12, #1
adcs r8, r4, r6
adcs r9, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r0, #8]
str r9, [r0, #12]
#else
strd r8, r9, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r2, #16]
ldr r5, [r2, #20]
#else
ldrd r4, r5, [r2, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r3, #16]
ldr r7, [r3, #20]
#else
ldrd r6, r7, [r3, #16]
#endif
# Add
adcs r8, r4, r6
mov r12, #0
adcs r9, r5, r7
adc r12, r12, #0
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r0, #16]
str r9, [r0, #20]
#else
strd r8, r9, [r0, #16]
#endif
# Sub
subs lr, lr, #1
sbcs r10, r4, r6
sbcs r11, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r10, [r1, #16]
str r11, [r1, #20]
#else
strd r10, r11, [r1, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r2, #24]
ldr r5, [r2, #28]
#else
ldrd r4, r5, [r2, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r3, #24]
ldr r7, [r3, #28]
#else
ldrd r6, r7, [r3, #24]
#endif
# Sub
sbcs r10, r4, r6
sbcs r11, r5, r7
sbc lr, lr, lr
# Add
subs r12, r12, #1
adcs r8, r4, r6
mov r12, #0
adcs r9, r5, r7
adc r12, r12, #0
# Multiply -modulus by overflow
lsl r3, r12, #1
mov r12, #19
orr r3, r3, r9, lsr #31
mul r12, r3, r12
# Add -x*modulus (if overflow)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0]
ldr r5, [r0, #4]
#else
ldrd r4, r5, [r0]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #8]
ldr r7, [r0, #12]
#else
ldrd r6, r7, [r0, #8]
#endif
adds r4, r4, r12
adcs r5, r5, #0
adcs r6, r6, #0
adcs r7, r7, #0
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0]
str r5, [r0, #4]
#else
strd r4, r5, [r0]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0, #8]
str r7, [r0, #12]
#else
strd r6, r7, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #16]
ldr r5, [r0, #20]
#else
ldrd r4, r5, [r0, #16]
#endif
adcs r4, r4, #0
adcs r5, r5, #0
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #16]
str r5, [r0, #20]
#else
strd r4, r5, [r0, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
bic r9, r9, #0x80000000
#else
bfc r9, #31, #1
#endif
adcs r8, r8, #0
adc r9, r9, #0
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r0, #24]
str r9, [r0, #28]
#else
strd r8, r9, [r0, #24]
#endif
# Multiply -modulus by underflow
lsl r3, lr, #1
mvn lr, #18
orr r3, r3, r11, lsr #31
mul lr, r3, lr
# Sub -x*modulus (if overflow)
ldm r1, {r4, r5, r6, r7, r8, r9}
subs r4, r4, lr
sbcs r5, r5, #0
sbcs r6, r6, #0
sbcs r7, r7, #0
sbcs r8, r8, #0
sbcs r9, r9, #0
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
bic r11, r11, #0x80000000
#else
bfc r11, #31, #1
#endif
sbcs r10, r10, #0
sbc r11, r11, #0
stm r1, {r4, r5, r6, r7, r8, r9, r10, r11}
# Done Add-Sub
pop {pc}
.size fe_add_sub_op,.-fe_add_sub_op
.text
.align 4
.globl fe_sub_op
.type fe_sub_op, %function
fe_sub_op:
push {lr}
# Sub
ldm r2!, {r6, r7, r8, r9, r10, r11, r12, lr}
ldm r1!, {r2, r3, r4, r5}
subs r6, r2, r6
sbcs r7, r3, r7
sbcs r8, r4, r8
sbcs r9, r5, r9
ldm r1!, {r2, r3, r4, r5}
sbcs r10, r2, r10
sbcs r11, r3, r11
sbcs r12, r4, r12
sbcs lr, r5, lr
sbc r3, r3, r3
mvn r2, #18
lsl r3, r3, #1
orr r3, r3, lr, lsr #31
mul r2, r3, r2
subs r6, r6, r2
sbcs r7, r7, #0
sbcs r8, r8, #0
sbcs r9, r9, #0
sbcs r10, r10, #0
sbcs r11, r11, #0
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
bic lr, lr, #0x80000000
#else
bfc lr, #31, #1
#endif
sbcs r12, r12, #0
sbc lr, lr, #0
stm r0, {r6, r7, r8, r9, r10, r11, r12, lr}
# Done Sub
pop {pc}
.size fe_sub_op,.-fe_sub_op
.text
.align 4
.globl fe_sub
.type fe_sub, %function
fe_sub:
push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
bl fe_sub_op
pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.size fe_sub,.-fe_sub
.text
.align 4
.globl fe_add_op
.type fe_add_op, %function
fe_add_op:
push {lr}
# Add
ldm r2!, {r6, r7, r8, r9, r10, r11, r12, lr}
ldm r1!, {r2, r3, r4, r5}
adds r6, r2, r6
adcs r7, r3, r7
adcs r8, r4, r8
adcs r9, r5, r9
ldm r1!, {r2, r3, r4, r5}
adcs r10, r2, r10
adcs r11, r3, r11
adcs r12, r4, r12
mov r3, #0
adcs lr, r5, lr
adc r3, r3, #0
mov r2, #19
lsl r3, r3, #1
orr r3, r3, lr, lsr #31
mul r2, r3, r2
adds r6, r6, r2
adcs r7, r7, #0
adcs r8, r8, #0
adcs r9, r9, #0
adcs r10, r10, #0
adcs r11, r11, #0
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
bic lr, lr, #0x80000000
#else
bfc lr, #31, #1
#endif
adcs r12, r12, #0
adc lr, lr, #0
stm r0, {r6, r7, r8, r9, r10, r11, r12, lr}
# Done Add
pop {pc}
.size fe_add_op,.-fe_add_op
.text
.align 4
.globl fe_add
.type fe_add, %function
fe_add:
push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
bl fe_add_op
pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.size fe_add,.-fe_add
#ifdef HAVE_ED25519
.text
.align 4
.globl fe_frombytes
.type fe_frombytes, %function
fe_frombytes:
push {r4, r5, r6, r7, r8, r9, lr}
ldr r2, [r1]
ldr r3, [r1, #4]
ldr r4, [r1, #8]
ldr r5, [r1, #12]
ldr r6, [r1, #16]
ldr r7, [r1, #20]
ldr r8, [r1, #24]
ldr r9, [r1, #28]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
bic r9, r9, #0x80000000
#else
bfc r9, #31, #1
#endif
str r2, [r0]
str r3, [r0, #4]
str r4, [r0, #8]
str r5, [r0, #12]
str r6, [r0, #16]
str r7, [r0, #20]
str r8, [r0, #24]
str r9, [r0, #28]
pop {r4, r5, r6, r7, r8, r9, pc}
.size fe_frombytes,.-fe_frombytes
.text
.align 4
.globl fe_tobytes
.type fe_tobytes, %function
fe_tobytes:
push {r4, r5, r6, r7, r8, r9, lr}
ldm r1, {r2, r3, r4, r5, r6, r7, r8, r9}
adds r12, r2, #19
adcs r12, r3, #0
adcs r12, r4, #0
adcs r12, r5, #0
adcs r12, r6, #0
adcs r12, r7, #0
adcs r12, r8, #0
adc r12, r9, #0
asr r12, r12, #31
and r12, r12, #19
adds r2, r2, r12
adcs r3, r3, #0
adcs r4, r4, #0
adcs r5, r5, #0
adcs r6, r6, #0
adcs r7, r7, #0
adcs r8, r8, #0
adc r9, r9, #0
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
bic r9, r9, #0x80000000
#else
bfc r9, #31, #1
#endif
str r2, [r0]
str r3, [r0, #4]
str r4, [r0, #8]
str r5, [r0, #12]
str r6, [r0, #16]
str r7, [r0, #20]
str r8, [r0, #24]
str r9, [r0, #28]
pop {r4, r5, r6, r7, r8, r9, pc}
.size fe_tobytes,.-fe_tobytes
.text
.align 4
.globl fe_1
.type fe_1, %function
fe_1:
push {r4, r5, r6, r7, r8, r9, lr}
# Set one
mov r2, #1
mov r3, #0
mov r4, #0
mov r5, #0
mov r6, #0
mov r7, #0
mov r8, #0
mov r9, #0
stm r0, {r2, r3, r4, r5, r6, r7, r8, r9}
pop {r4, r5, r6, r7, r8, r9, pc}
.size fe_1,.-fe_1
.text
.align 4
.globl fe_0
.type fe_0, %function
fe_0:
push {r4, r5, r6, r7, r8, r9, lr}
# Set zero
mov r2, #0
mov r3, #0
mov r4, #0
mov r5, #0
mov r6, #0
mov r7, #0
mov r8, #0
mov r9, #0
stm r0, {r2, r3, r4, r5, r6, r7, r8, r9}
pop {r4, r5, r6, r7, r8, r9, pc}
.size fe_0,.-fe_0
.text
.align 4
.globl fe_copy
.type fe_copy, %function
fe_copy:
push {r4, r5, lr}
# Copy
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r2, [r1]
ldr r3, [r1, #4]
#else
ldrd r2, r3, [r1]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r1, #8]
ldr r5, [r1, #12]
#else
ldrd r4, r5, [r1, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r2, [r0]
str r3, [r0, #4]
#else
strd r2, r3, [r0]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #8]
str r5, [r0, #12]
#else
strd r4, r5, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r2, [r1, #16]
ldr r3, [r1, #20]
#else
ldrd r2, r3, [r1, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r1, #24]
ldr r5, [r1, #28]
#else
ldrd r4, r5, [r1, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r2, [r0, #16]
str r3, [r0, #20]
#else
strd r2, r3, [r0, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #24]
str r5, [r0, #28]
#else
strd r4, r5, [r0, #24]
#endif
pop {r4, r5, pc}
.size fe_copy,.-fe_copy
.text
.align 4
.globl fe_neg
.type fe_neg, %function
fe_neg:
push {r4, r5, lr}
mvn lr, #0
mvn r12, #18
ldm r1!, {r2, r3, r4, r5}
subs r2, r12, r2
sbcs r3, lr, r3
sbcs r4, lr, r4
sbcs r5, lr, r5
stm r0!, {r2, r3, r4, r5}
mvn r12, #0x80000000
ldm r1!, {r2, r3, r4, r5}
sbcs r2, lr, r2
sbcs r3, lr, r3
sbcs r4, lr, r4
sbc r5, r12, r5
stm r0!, {r2, r3, r4, r5}
pop {r4, r5, pc}
.size fe_neg,.-fe_neg
.text
.align 4
.globl fe_isnonzero
.type fe_isnonzero, %function
fe_isnonzero:
push {r4, r5, r6, r7, r8, r9, lr}
ldm r0, {r2, r3, r4, r5, r6, r7, r8, r9}
adds r1, r2, #19
adcs r1, r3, #0
adcs r1, r4, #0
adcs r1, r5, #0
adcs r1, r6, #0
adcs r1, r7, #0
adcs r1, r8, #0
adc r1, r9, #0
asr r1, r1, #31
and r1, r1, #19
adds r2, r2, r1
adcs r3, r3, #0
adcs r4, r4, #0
adcs r5, r5, #0
adcs r6, r6, #0
adcs r7, r7, #0
adcs r8, r8, #0
adc r9, r9, #0
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
bic r9, r9, #0x80000000
#else
bfc r9, #31, #1
#endif
orr r2, r2, r3
orr r4, r4, r5
orr r6, r6, r7
orr r8, r8, r9
orr r4, r4, r6
orr r2, r2, r8
orr r0, r2, r4
pop {r4, r5, r6, r7, r8, r9, pc}
.size fe_isnonzero,.-fe_isnonzero
.text
.align 4
.globl fe_isnegative
.type fe_isnegative, %function
fe_isnegative:
push {r4, r5, lr}
ldm r0!, {r2, r3, r4, r5}
adds r1, r2, #19
adcs r1, r3, #0
adcs r1, r4, #0
adcs r1, r5, #0
ldm r0, {r2, r3, r4, r5}
adcs r1, r2, #0
adcs r1, r3, #0
adcs r1, r4, #0
ldr r2, [r0, #-16]
adc r1, r5, #0
and r0, r2, #1
lsr r1, r1, #31
eor r0, r0, r1
pop {r4, r5, pc}
.size fe_isnegative,.-fe_isnegative
#if defined(HAVE_ED25519_MAKE_KEY) || defined(HAVE_ED25519_SIGN)
#ifndef WC_NO_CACHE_RESISTANT
.text
.align 4
.globl fe_cmov_table
.type fe_cmov_table, %function
fe_cmov_table:
push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r2, r2, #24
asr r2, r2, #24
#else
sxtb r2, r2
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
lsl r3, r2, #24
asr r3, r2, #31
#else
sbfx r3, r2, #7, #1
#endif
eor r12, r2, r3
sub r12, r12, r3
mov r4, #1
mov r5, #0
mov r6, #1
mov r7, #0
mov r8, #0
mov r9, #0
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r3, #0x800000
lsl r3, r3, #8
add r3, r3, #0x0
#else
mov r3, #0x80000000
#endif
ror r3, r3, #31
ror r3, r3, r12
asr r3, r3, #31
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1]
ldr r11, [r1, #4]
#else
ldrd r10, r11, [r1]
#endif
eor r10, r10, r4
eor r11, r11, r5
and r10, r10, r3
and r11, r11, r3
eor r4, r4, r10
eor r5, r5, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #32]
ldr r11, [r1, #36]
#else
ldrd r10, r11, [r1, #32]
#endif
eor r10, r10, r6
eor r11, r11, r7
and r10, r10, r3
and r11, r11, r3
eor r6, r6, r10
eor r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #64]
ldr r11, [r1, #68]
#else
ldrd r10, r11, [r1, #64]
#endif
eor r10, r10, r8
eor r11, r11, r9
and r10, r10, r3
and r11, r11, r3
eor r8, r8, r10
eor r9, r9, r11
add r1, r1, #0x60
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r3, #0x800000
lsl r3, r3, #8
add r3, r3, #0x0
#else
mov r3, #0x80000000
#endif
ror r3, r3, #30
ror r3, r3, r12
asr r3, r3, #31
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1]
ldr r11, [r1, #4]
#else
ldrd r10, r11, [r1]
#endif
eor r10, r10, r4
eor r11, r11, r5
and r10, r10, r3
and r11, r11, r3
eor r4, r4, r10
eor r5, r5, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #32]
ldr r11, [r1, #36]
#else
ldrd r10, r11, [r1, #32]
#endif
eor r10, r10, r6
eor r11, r11, r7
and r10, r10, r3
and r11, r11, r3
eor r6, r6, r10
eor r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #64]
ldr r11, [r1, #68]
#else
ldrd r10, r11, [r1, #64]
#endif
eor r10, r10, r8
eor r11, r11, r9
and r10, r10, r3
and r11, r11, r3
eor r8, r8, r10
eor r9, r9, r11
add r1, r1, #0x60
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r3, #0x800000
lsl r3, r3, #8
add r3, r3, #0x0
#else
mov r3, #0x80000000
#endif
ror r3, r3, #29
ror r3, r3, r12
asr r3, r3, #31
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1]
ldr r11, [r1, #4]
#else
ldrd r10, r11, [r1]
#endif
eor r10, r10, r4
eor r11, r11, r5
and r10, r10, r3
and r11, r11, r3
eor r4, r4, r10
eor r5, r5, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #32]
ldr r11, [r1, #36]
#else
ldrd r10, r11, [r1, #32]
#endif
eor r10, r10, r6
eor r11, r11, r7
and r10, r10, r3
and r11, r11, r3
eor r6, r6, r10
eor r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #64]
ldr r11, [r1, #68]
#else
ldrd r10, r11, [r1, #64]
#endif
eor r10, r10, r8
eor r11, r11, r9
and r10, r10, r3
and r11, r11, r3
eor r8, r8, r10
eor r9, r9, r11
add r1, r1, #0x60
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r3, #0x800000
lsl r3, r3, #8
add r3, r3, #0x0
#else
mov r3, #0x80000000
#endif
ror r3, r3, #28
ror r3, r3, r12
asr r3, r3, #31
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1]
ldr r11, [r1, #4]
#else
ldrd r10, r11, [r1]
#endif
eor r10, r10, r4
eor r11, r11, r5
and r10, r10, r3
and r11, r11, r3
eor r4, r4, r10
eor r5, r5, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #32]
ldr r11, [r1, #36]
#else
ldrd r10, r11, [r1, #32]
#endif
eor r10, r10, r6
eor r11, r11, r7
and r10, r10, r3
and r11, r11, r3
eor r6, r6, r10
eor r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #64]
ldr r11, [r1, #68]
#else
ldrd r10, r11, [r1, #64]
#endif
eor r10, r10, r8
eor r11, r11, r9
and r10, r10, r3
and r11, r11, r3
eor r8, r8, r10
eor r9, r9, r11
add r1, r1, #0x60
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r3, #0x800000
lsl r3, r3, #8
add r3, r3, #0x0
#else
mov r3, #0x80000000
#endif
ror r3, r3, #27
ror r3, r3, r12
asr r3, r3, #31
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1]
ldr r11, [r1, #4]
#else
ldrd r10, r11, [r1]
#endif
eor r10, r10, r4
eor r11, r11, r5
and r10, r10, r3
and r11, r11, r3
eor r4, r4, r10
eor r5, r5, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #32]
ldr r11, [r1, #36]
#else
ldrd r10, r11, [r1, #32]
#endif
eor r10, r10, r6
eor r11, r11, r7
and r10, r10, r3
and r11, r11, r3
eor r6, r6, r10
eor r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #64]
ldr r11, [r1, #68]
#else
ldrd r10, r11, [r1, #64]
#endif
eor r10, r10, r8
eor r11, r11, r9
and r10, r10, r3
and r11, r11, r3
eor r8, r8, r10
eor r9, r9, r11
add r1, r1, #0x60
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r3, #0x800000
lsl r3, r3, #8
add r3, r3, #0x0
#else
mov r3, #0x80000000
#endif
ror r3, r3, #26
ror r3, r3, r12
asr r3, r3, #31
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1]
ldr r11, [r1, #4]
#else
ldrd r10, r11, [r1]
#endif
eor r10, r10, r4
eor r11, r11, r5
and r10, r10, r3
and r11, r11, r3
eor r4, r4, r10
eor r5, r5, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #32]
ldr r11, [r1, #36]
#else
ldrd r10, r11, [r1, #32]
#endif
eor r10, r10, r6
eor r11, r11, r7
and r10, r10, r3
and r11, r11, r3
eor r6, r6, r10
eor r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #64]
ldr r11, [r1, #68]
#else
ldrd r10, r11, [r1, #64]
#endif
eor r10, r10, r8
eor r11, r11, r9
and r10, r10, r3
and r11, r11, r3
eor r8, r8, r10
eor r9, r9, r11
add r1, r1, #0x60
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r3, #0x800000
lsl r3, r3, #8
add r3, r3, #0x0
#else
mov r3, #0x80000000
#endif
ror r3, r3, #25
ror r3, r3, r12
asr r3, r3, #31
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1]
ldr r11, [r1, #4]
#else
ldrd r10, r11, [r1]
#endif
eor r10, r10, r4
eor r11, r11, r5
and r10, r10, r3
and r11, r11, r3
eor r4, r4, r10
eor r5, r5, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #32]
ldr r11, [r1, #36]
#else
ldrd r10, r11, [r1, #32]
#endif
eor r10, r10, r6
eor r11, r11, r7
and r10, r10, r3
and r11, r11, r3
eor r6, r6, r10
eor r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #64]
ldr r11, [r1, #68]
#else
ldrd r10, r11, [r1, #64]
#endif
eor r10, r10, r8
eor r11, r11, r9
and r10, r10, r3
and r11, r11, r3
eor r8, r8, r10
eor r9, r9, r11
add r1, r1, #0x60
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r3, #0x800000
lsl r3, r3, #8
add r3, r3, #0x0
#else
mov r3, #0x80000000
#endif
ror r3, r3, #24
ror r3, r3, r12
asr r3, r3, #31
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1]
ldr r11, [r1, #4]
#else
ldrd r10, r11, [r1]
#endif
eor r10, r10, r4
eor r11, r11, r5
and r10, r10, r3
and r11, r11, r3
eor r4, r4, r10
eor r5, r5, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #32]
ldr r11, [r1, #36]
#else
ldrd r10, r11, [r1, #32]
#endif
eor r10, r10, r6
eor r11, r11, r7
and r10, r10, r3
and r11, r11, r3
eor r6, r6, r10
eor r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #64]
ldr r11, [r1, #68]
#else
ldrd r10, r11, [r1, #64]
#endif
eor r10, r10, r8
eor r11, r11, r9
and r10, r10, r3
and r11, r11, r3
eor r8, r8, r10
eor r9, r9, r11
sub r1, r1, #0x2a0
mvn r10, #18
mvn r11, #0
subs r10, r10, r8
sbcs r11, r11, r9
sbc lr, lr, lr
asr r12, r2, #31
eor r3, r4, r6
and r3, r3, r12
eor r4, r4, r3
eor r6, r6, r3
eor r3, r5, r7
and r3, r3, r12
eor r5, r5, r3
eor r7, r7, r3
eor r10, r10, r8
and r10, r10, r12
eor r8, r8, r10
eor r11, r11, r9
and r11, r11, r12
eor r9, r9, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0]
str r5, [r0, #4]
#else
strd r4, r5, [r0]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0, #32]
str r7, [r0, #36]
#else
strd r6, r7, [r0, #32]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r0, #64]
str r9, [r0, #68]
#else
strd r8, r9, [r0, #64]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
lsl r3, r2, #24
asr r3, r2, #31
#else
sbfx r3, r2, #7, #1
#endif
eor r12, r2, r3
sub r12, r12, r3
mov r4, #0
mov r5, #0
mov r6, #0
mov r7, #0
mov r8, #0
mov r9, #0
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r3, #0x800000
lsl r3, r3, #8
add r3, r3, #0x0
#else
mov r3, #0x80000000
#endif
ror r3, r3, #31
ror r3, r3, r12
asr r3, r3, #31
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #8]
ldr r11, [r1, #12]
#else
ldrd r10, r11, [r1, #8]
#endif
eor r10, r10, r4
eor r11, r11, r5
and r10, r10, r3
and r11, r11, r3
eor r4, r4, r10
eor r5, r5, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #40]
ldr r11, [r1, #44]
#else
ldrd r10, r11, [r1, #40]
#endif
eor r10, r10, r6
eor r11, r11, r7
and r10, r10, r3
and r11, r11, r3
eor r6, r6, r10
eor r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #72]
ldr r11, [r1, #76]
#else
ldrd r10, r11, [r1, #72]
#endif
eor r10, r10, r8
eor r11, r11, r9
and r10, r10, r3
and r11, r11, r3
eor r8, r8, r10
eor r9, r9, r11
add r1, r1, #0x60
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r3, #0x800000
lsl r3, r3, #8
add r3, r3, #0x0
#else
mov r3, #0x80000000
#endif
ror r3, r3, #30
ror r3, r3, r12
asr r3, r3, #31
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #8]
ldr r11, [r1, #12]
#else
ldrd r10, r11, [r1, #8]
#endif
eor r10, r10, r4
eor r11, r11, r5
and r10, r10, r3
and r11, r11, r3
eor r4, r4, r10
eor r5, r5, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #40]
ldr r11, [r1, #44]
#else
ldrd r10, r11, [r1, #40]
#endif
eor r10, r10, r6
eor r11, r11, r7
and r10, r10, r3
and r11, r11, r3
eor r6, r6, r10
eor r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #72]
ldr r11, [r1, #76]
#else
ldrd r10, r11, [r1, #72]
#endif
eor r10, r10, r8
eor r11, r11, r9
and r10, r10, r3
and r11, r11, r3
eor r8, r8, r10
eor r9, r9, r11
add r1, r1, #0x60
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r3, #0x800000
lsl r3, r3, #8
add r3, r3, #0x0
#else
mov r3, #0x80000000
#endif
ror r3, r3, #29
ror r3, r3, r12
asr r3, r3, #31
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #8]
ldr r11, [r1, #12]
#else
ldrd r10, r11, [r1, #8]
#endif
eor r10, r10, r4
eor r11, r11, r5
and r10, r10, r3
and r11, r11, r3
eor r4, r4, r10
eor r5, r5, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #40]
ldr r11, [r1, #44]
#else
ldrd r10, r11, [r1, #40]
#endif
eor r10, r10, r6
eor r11, r11, r7
and r10, r10, r3
and r11, r11, r3
eor r6, r6, r10
eor r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #72]
ldr r11, [r1, #76]
#else
ldrd r10, r11, [r1, #72]
#endif
eor r10, r10, r8
eor r11, r11, r9
and r10, r10, r3
and r11, r11, r3
eor r8, r8, r10
eor r9, r9, r11
add r1, r1, #0x60
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r3, #0x800000
lsl r3, r3, #8
add r3, r3, #0x0
#else
mov r3, #0x80000000
#endif
ror r3, r3, #28
ror r3, r3, r12
asr r3, r3, #31
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #8]
ldr r11, [r1, #12]
#else
ldrd r10, r11, [r1, #8]
#endif
eor r10, r10, r4
eor r11, r11, r5
and r10, r10, r3
and r11, r11, r3
eor r4, r4, r10
eor r5, r5, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #40]
ldr r11, [r1, #44]
#else
ldrd r10, r11, [r1, #40]
#endif
eor r10, r10, r6
eor r11, r11, r7
and r10, r10, r3
and r11, r11, r3
eor r6, r6, r10
eor r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #72]
ldr r11, [r1, #76]
#else
ldrd r10, r11, [r1, #72]
#endif
eor r10, r10, r8
eor r11, r11, r9
and r10, r10, r3
and r11, r11, r3
eor r8, r8, r10
eor r9, r9, r11
add r1, r1, #0x60
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r3, #0x800000
lsl r3, r3, #8
add r3, r3, #0x0
#else
mov r3, #0x80000000
#endif
ror r3, r3, #27
ror r3, r3, r12
asr r3, r3, #31
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #8]
ldr r11, [r1, #12]
#else
ldrd r10, r11, [r1, #8]
#endif
eor r10, r10, r4
eor r11, r11, r5
and r10, r10, r3
and r11, r11, r3
eor r4, r4, r10
eor r5, r5, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #40]
ldr r11, [r1, #44]
#else
ldrd r10, r11, [r1, #40]
#endif
eor r10, r10, r6
eor r11, r11, r7
and r10, r10, r3
and r11, r11, r3
eor r6, r6, r10
eor r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #72]
ldr r11, [r1, #76]
#else
ldrd r10, r11, [r1, #72]
#endif
eor r10, r10, r8
eor r11, r11, r9
and r10, r10, r3
and r11, r11, r3
eor r8, r8, r10
eor r9, r9, r11
add r1, r1, #0x60
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r3, #0x800000
lsl r3, r3, #8
add r3, r3, #0x0
#else
mov r3, #0x80000000
#endif
ror r3, r3, #26
ror r3, r3, r12
asr r3, r3, #31
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #8]
ldr r11, [r1, #12]
#else
ldrd r10, r11, [r1, #8]
#endif
eor r10, r10, r4
eor r11, r11, r5
and r10, r10, r3
and r11, r11, r3
eor r4, r4, r10
eor r5, r5, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #40]
ldr r11, [r1, #44]
#else
ldrd r10, r11, [r1, #40]
#endif
eor r10, r10, r6
eor r11, r11, r7
and r10, r10, r3
and r11, r11, r3
eor r6, r6, r10
eor r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #72]
ldr r11, [r1, #76]
#else
ldrd r10, r11, [r1, #72]
#endif
eor r10, r10, r8
eor r11, r11, r9
and r10, r10, r3
and r11, r11, r3
eor r8, r8, r10
eor r9, r9, r11
add r1, r1, #0x60
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r3, #0x800000
lsl r3, r3, #8
add r3, r3, #0x0
#else
mov r3, #0x80000000
#endif
ror r3, r3, #25
ror r3, r3, r12
asr r3, r3, #31
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #8]
ldr r11, [r1, #12]
#else
ldrd r10, r11, [r1, #8]
#endif
eor r10, r10, r4
eor r11, r11, r5
and r10, r10, r3
and r11, r11, r3
eor r4, r4, r10
eor r5, r5, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #40]
ldr r11, [r1, #44]
#else
ldrd r10, r11, [r1, #40]
#endif
eor r10, r10, r6
eor r11, r11, r7
and r10, r10, r3
and r11, r11, r3
eor r6, r6, r10
eor r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #72]
ldr r11, [r1, #76]
#else
ldrd r10, r11, [r1, #72]
#endif
eor r10, r10, r8
eor r11, r11, r9
and r10, r10, r3
and r11, r11, r3
eor r8, r8, r10
eor r9, r9, r11
add r1, r1, #0x60
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r3, #0x800000
lsl r3, r3, #8
add r3, r3, #0x0
#else
mov r3, #0x80000000
#endif
ror r3, r3, #24
ror r3, r3, r12
asr r3, r3, #31
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #8]
ldr r11, [r1, #12]
#else
ldrd r10, r11, [r1, #8]
#endif
eor r10, r10, r4
eor r11, r11, r5
and r10, r10, r3
and r11, r11, r3
eor r4, r4, r10
eor r5, r5, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #40]
ldr r11, [r1, #44]
#else
ldrd r10, r11, [r1, #40]
#endif
eor r10, r10, r6
eor r11, r11, r7
and r10, r10, r3
and r11, r11, r3
eor r6, r6, r10
eor r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #72]
ldr r11, [r1, #76]
#else
ldrd r10, r11, [r1, #72]
#endif
eor r10, r10, r8
eor r11, r11, r9
and r10, r10, r3
and r11, r11, r3
eor r8, r8, r10
eor r9, r9, r11
sub r1, r1, #0x2a0
mvn r10, #0
mvn r11, #0
rsbs lr, lr, #0
sbcs r10, r10, r8
sbcs r11, r11, r9
sbc lr, lr, lr
asr r12, r2, #31
eor r3, r4, r6
and r3, r3, r12
eor r4, r4, r3
eor r6, r6, r3
eor r3, r5, r7
and r3, r3, r12
eor r5, r5, r3
eor r7, r7, r3
eor r10, r10, r8
and r10, r10, r12
eor r8, r8, r10
eor r11, r11, r9
and r11, r11, r12
eor r9, r9, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #8]
str r5, [r0, #12]
#else
strd r4, r5, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0, #40]
str r7, [r0, #44]
#else
strd r6, r7, [r0, #40]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r0, #72]
str r9, [r0, #76]
#else
strd r8, r9, [r0, #72]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
lsl r3, r2, #24
asr r3, r2, #31
#else
sbfx r3, r2, #7, #1
#endif
eor r12, r2, r3
sub r12, r12, r3
mov r4, #0
mov r5, #0
mov r6, #0
mov r7, #0
mov r8, #0
mov r9, #0
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r3, #0x800000
lsl r3, r3, #8
add r3, r3, #0x0
#else
mov r3, #0x80000000
#endif
ror r3, r3, #31
ror r3, r3, r12
asr r3, r3, #31
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #16]
ldr r11, [r1, #20]
#else
ldrd r10, r11, [r1, #16]
#endif
eor r10, r10, r4
eor r11, r11, r5
and r10, r10, r3
and r11, r11, r3
eor r4, r4, r10
eor r5, r5, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #48]
ldr r11, [r1, #52]
#else
ldrd r10, r11, [r1, #48]
#endif
eor r10, r10, r6
eor r11, r11, r7
and r10, r10, r3
and r11, r11, r3
eor r6, r6, r10
eor r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #80]
ldr r11, [r1, #84]
#else
ldrd r10, r11, [r1, #80]
#endif
eor r10, r10, r8
eor r11, r11, r9
and r10, r10, r3
and r11, r11, r3
eor r8, r8, r10
eor r9, r9, r11
add r1, r1, #0x60
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r3, #0x800000
lsl r3, r3, #8
add r3, r3, #0x0
#else
mov r3, #0x80000000
#endif
ror r3, r3, #30
ror r3, r3, r12
asr r3, r3, #31
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #16]
ldr r11, [r1, #20]
#else
ldrd r10, r11, [r1, #16]
#endif
eor r10, r10, r4
eor r11, r11, r5
and r10, r10, r3
and r11, r11, r3
eor r4, r4, r10
eor r5, r5, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #48]
ldr r11, [r1, #52]
#else
ldrd r10, r11, [r1, #48]
#endif
eor r10, r10, r6
eor r11, r11, r7
and r10, r10, r3
and r11, r11, r3
eor r6, r6, r10
eor r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #80]
ldr r11, [r1, #84]
#else
ldrd r10, r11, [r1, #80]
#endif
eor r10, r10, r8
eor r11, r11, r9
and r10, r10, r3
and r11, r11, r3
eor r8, r8, r10
eor r9, r9, r11
add r1, r1, #0x60
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r3, #0x800000
lsl r3, r3, #8
add r3, r3, #0x0
#else
mov r3, #0x80000000
#endif
ror r3, r3, #29
ror r3, r3, r12
asr r3, r3, #31
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #16]
ldr r11, [r1, #20]
#else
ldrd r10, r11, [r1, #16]
#endif
eor r10, r10, r4
eor r11, r11, r5
and r10, r10, r3
and r11, r11, r3
eor r4, r4, r10
eor r5, r5, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #48]
ldr r11, [r1, #52]
#else
ldrd r10, r11, [r1, #48]
#endif
eor r10, r10, r6
eor r11, r11, r7
and r10, r10, r3
and r11, r11, r3
eor r6, r6, r10
eor r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #80]
ldr r11, [r1, #84]
#else
ldrd r10, r11, [r1, #80]
#endif
eor r10, r10, r8
eor r11, r11, r9
and r10, r10, r3
and r11, r11, r3
eor r8, r8, r10
eor r9, r9, r11
add r1, r1, #0x60
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r3, #0x800000
lsl r3, r3, #8
add r3, r3, #0x0
#else
mov r3, #0x80000000
#endif
ror r3, r3, #28
ror r3, r3, r12
asr r3, r3, #31
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #16]
ldr r11, [r1, #20]
#else
ldrd r10, r11, [r1, #16]
#endif
eor r10, r10, r4
eor r11, r11, r5
and r10, r10, r3
and r11, r11, r3
eor r4, r4, r10
eor r5, r5, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #48]
ldr r11, [r1, #52]
#else
ldrd r10, r11, [r1, #48]
#endif
eor r10, r10, r6
eor r11, r11, r7
and r10, r10, r3
and r11, r11, r3
eor r6, r6, r10
eor r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #80]
ldr r11, [r1, #84]
#else
ldrd r10, r11, [r1, #80]
#endif
eor r10, r10, r8
eor r11, r11, r9
and r10, r10, r3
and r11, r11, r3
eor r8, r8, r10
eor r9, r9, r11
add r1, r1, #0x60
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r3, #0x800000
lsl r3, r3, #8
add r3, r3, #0x0
#else
mov r3, #0x80000000
#endif
ror r3, r3, #27
ror r3, r3, r12
asr r3, r3, #31
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #16]
ldr r11, [r1, #20]
#else
ldrd r10, r11, [r1, #16]
#endif
eor r10, r10, r4
eor r11, r11, r5
and r10, r10, r3
and r11, r11, r3
eor r4, r4, r10
eor r5, r5, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #48]
ldr r11, [r1, #52]
#else
ldrd r10, r11, [r1, #48]
#endif
eor r10, r10, r6
eor r11, r11, r7
and r10, r10, r3
and r11, r11, r3
eor r6, r6, r10
eor r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #80]
ldr r11, [r1, #84]
#else
ldrd r10, r11, [r1, #80]
#endif
eor r10, r10, r8
eor r11, r11, r9
and r10, r10, r3
and r11, r11, r3
eor r8, r8, r10
eor r9, r9, r11
add r1, r1, #0x60
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r3, #0x800000
lsl r3, r3, #8
add r3, r3, #0x0
#else
mov r3, #0x80000000
#endif
ror r3, r3, #26
ror r3, r3, r12
asr r3, r3, #31
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #16]
ldr r11, [r1, #20]
#else
ldrd r10, r11, [r1, #16]
#endif
eor r10, r10, r4
eor r11, r11, r5
and r10, r10, r3
and r11, r11, r3
eor r4, r4, r10
eor r5, r5, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #48]
ldr r11, [r1, #52]
#else
ldrd r10, r11, [r1, #48]
#endif
eor r10, r10, r6
eor r11, r11, r7
and r10, r10, r3
and r11, r11, r3
eor r6, r6, r10
eor r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #80]
ldr r11, [r1, #84]
#else
ldrd r10, r11, [r1, #80]
#endif
eor r10, r10, r8
eor r11, r11, r9
and r10, r10, r3
and r11, r11, r3
eor r8, r8, r10
eor r9, r9, r11
add r1, r1, #0x60
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r3, #0x800000
lsl r3, r3, #8
add r3, r3, #0x0
#else
mov r3, #0x80000000
#endif
ror r3, r3, #25
ror r3, r3, r12
asr r3, r3, #31
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #16]
ldr r11, [r1, #20]
#else
ldrd r10, r11, [r1, #16]
#endif
eor r10, r10, r4
eor r11, r11, r5
and r10, r10, r3
and r11, r11, r3
eor r4, r4, r10
eor r5, r5, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #48]
ldr r11, [r1, #52]
#else
ldrd r10, r11, [r1, #48]
#endif
eor r10, r10, r6
eor r11, r11, r7
and r10, r10, r3
and r11, r11, r3
eor r6, r6, r10
eor r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #80]
ldr r11, [r1, #84]
#else
ldrd r10, r11, [r1, #80]
#endif
eor r10, r10, r8
eor r11, r11, r9
and r10, r10, r3
and r11, r11, r3
eor r8, r8, r10
eor r9, r9, r11
add r1, r1, #0x60
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r3, #0x800000
lsl r3, r3, #8
add r3, r3, #0x0
#else
mov r3, #0x80000000
#endif
ror r3, r3, #24
ror r3, r3, r12
asr r3, r3, #31
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #16]
ldr r11, [r1, #20]
#else
ldrd r10, r11, [r1, #16]
#endif
eor r10, r10, r4
eor r11, r11, r5
and r10, r10, r3
and r11, r11, r3
eor r4, r4, r10
eor r5, r5, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #48]
ldr r11, [r1, #52]
#else
ldrd r10, r11, [r1, #48]
#endif
eor r10, r10, r6
eor r11, r11, r7
and r10, r10, r3
and r11, r11, r3
eor r6, r6, r10
eor r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #80]
ldr r11, [r1, #84]
#else
ldrd r10, r11, [r1, #80]
#endif
eor r10, r10, r8
eor r11, r11, r9
and r10, r10, r3
and r11, r11, r3
eor r8, r8, r10
eor r9, r9, r11
sub r1, r1, #0x2a0
mvn r10, #0
mvn r11, #0
rsbs lr, lr, #0
sbcs r10, r10, r8
sbcs r11, r11, r9
sbc lr, lr, lr
asr r12, r2, #31
eor r3, r4, r6
and r3, r3, r12
eor r4, r4, r3
eor r6, r6, r3
eor r3, r5, r7
and r3, r3, r12
eor r5, r5, r3
eor r7, r7, r3
eor r10, r10, r8
and r10, r10, r12
eor r8, r8, r10
eor r11, r11, r9
and r11, r11, r12
eor r9, r9, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #16]
str r5, [r0, #20]
#else
strd r4, r5, [r0, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0, #48]
str r7, [r0, #52]
#else
strd r6, r7, [r0, #48]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r0, #80]
str r9, [r0, #84]
#else
strd r8, r9, [r0, #80]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
lsl r3, r2, #24
asr r3, r2, #31
#else
sbfx r3, r2, #7, #1
#endif
eor r12, r2, r3
sub r12, r12, r3
mov r4, #0
mov r5, #0
mov r6, #0
mov r7, #0
mov r8, #0
mov r9, #0
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r3, #0x800000
lsl r3, r3, #8
add r3, r3, #0x0
#else
mov r3, #0x80000000
#endif
ror r3, r3, #31
ror r3, r3, r12
asr r3, r3, #31
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #24]
ldr r11, [r1, #28]
#else
ldrd r10, r11, [r1, #24]
#endif
eor r10, r10, r4
eor r11, r11, r5
and r10, r10, r3
and r11, r11, r3
eor r4, r4, r10
eor r5, r5, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #56]
ldr r11, [r1, #60]
#else
ldrd r10, r11, [r1, #56]
#endif
eor r10, r10, r6
eor r11, r11, r7
and r10, r10, r3
and r11, r11, r3
eor r6, r6, r10
eor r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #88]
ldr r11, [r1, #92]
#else
ldrd r10, r11, [r1, #88]
#endif
eor r10, r10, r8
eor r11, r11, r9
and r10, r10, r3
and r11, r11, r3
eor r8, r8, r10
eor r9, r9, r11
add r1, r1, #0x60
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r3, #0x800000
lsl r3, r3, #8
add r3, r3, #0x0
#else
mov r3, #0x80000000
#endif
ror r3, r3, #30
ror r3, r3, r12
asr r3, r3, #31
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #24]
ldr r11, [r1, #28]
#else
ldrd r10, r11, [r1, #24]
#endif
eor r10, r10, r4
eor r11, r11, r5
and r10, r10, r3
and r11, r11, r3
eor r4, r4, r10
eor r5, r5, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #56]
ldr r11, [r1, #60]
#else
ldrd r10, r11, [r1, #56]
#endif
eor r10, r10, r6
eor r11, r11, r7
and r10, r10, r3
and r11, r11, r3
eor r6, r6, r10
eor r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #88]
ldr r11, [r1, #92]
#else
ldrd r10, r11, [r1, #88]
#endif
eor r10, r10, r8
eor r11, r11, r9
and r10, r10, r3
and r11, r11, r3
eor r8, r8, r10
eor r9, r9, r11
add r1, r1, #0x60
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r3, #0x800000
lsl r3, r3, #8
add r3, r3, #0x0
#else
mov r3, #0x80000000
#endif
ror r3, r3, #29
ror r3, r3, r12
asr r3, r3, #31
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #24]
ldr r11, [r1, #28]
#else
ldrd r10, r11, [r1, #24]
#endif
eor r10, r10, r4
eor r11, r11, r5
and r10, r10, r3
and r11, r11, r3
eor r4, r4, r10
eor r5, r5, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #56]
ldr r11, [r1, #60]
#else
ldrd r10, r11, [r1, #56]
#endif
eor r10, r10, r6
eor r11, r11, r7
and r10, r10, r3
and r11, r11, r3
eor r6, r6, r10
eor r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #88]
ldr r11, [r1, #92]
#else
ldrd r10, r11, [r1, #88]
#endif
eor r10, r10, r8
eor r11, r11, r9
and r10, r10, r3
and r11, r11, r3
eor r8, r8, r10
eor r9, r9, r11
add r1, r1, #0x60
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r3, #0x800000
lsl r3, r3, #8
add r3, r3, #0x0
#else
mov r3, #0x80000000
#endif
ror r3, r3, #28
ror r3, r3, r12
asr r3, r3, #31
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #24]
ldr r11, [r1, #28]
#else
ldrd r10, r11, [r1, #24]
#endif
eor r10, r10, r4
eor r11, r11, r5
and r10, r10, r3
and r11, r11, r3
eor r4, r4, r10
eor r5, r5, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #56]
ldr r11, [r1, #60]
#else
ldrd r10, r11, [r1, #56]
#endif
eor r10, r10, r6
eor r11, r11, r7
and r10, r10, r3
and r11, r11, r3
eor r6, r6, r10
eor r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #88]
ldr r11, [r1, #92]
#else
ldrd r10, r11, [r1, #88]
#endif
eor r10, r10, r8
eor r11, r11, r9
and r10, r10, r3
and r11, r11, r3
eor r8, r8, r10
eor r9, r9, r11
add r1, r1, #0x60
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r3, #0x800000
lsl r3, r3, #8
add r3, r3, #0x0
#else
mov r3, #0x80000000
#endif
ror r3, r3, #27
ror r3, r3, r12
asr r3, r3, #31
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #24]
ldr r11, [r1, #28]
#else
ldrd r10, r11, [r1, #24]
#endif
eor r10, r10, r4
eor r11, r11, r5
and r10, r10, r3
and r11, r11, r3
eor r4, r4, r10
eor r5, r5, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #56]
ldr r11, [r1, #60]
#else
ldrd r10, r11, [r1, #56]
#endif
eor r10, r10, r6
eor r11, r11, r7
and r10, r10, r3
and r11, r11, r3
eor r6, r6, r10
eor r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #88]
ldr r11, [r1, #92]
#else
ldrd r10, r11, [r1, #88]
#endif
eor r10, r10, r8
eor r11, r11, r9
and r10, r10, r3
and r11, r11, r3
eor r8, r8, r10
eor r9, r9, r11
add r1, r1, #0x60
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r3, #0x800000
lsl r3, r3, #8
add r3, r3, #0x0
#else
mov r3, #0x80000000
#endif
ror r3, r3, #26
ror r3, r3, r12
asr r3, r3, #31
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #24]
ldr r11, [r1, #28]
#else
ldrd r10, r11, [r1, #24]
#endif
eor r10, r10, r4
eor r11, r11, r5
and r10, r10, r3
and r11, r11, r3
eor r4, r4, r10
eor r5, r5, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #56]
ldr r11, [r1, #60]
#else
ldrd r10, r11, [r1, #56]
#endif
eor r10, r10, r6
eor r11, r11, r7
and r10, r10, r3
and r11, r11, r3
eor r6, r6, r10
eor r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #88]
ldr r11, [r1, #92]
#else
ldrd r10, r11, [r1, #88]
#endif
eor r10, r10, r8
eor r11, r11, r9
and r10, r10, r3
and r11, r11, r3
eor r8, r8, r10
eor r9, r9, r11
add r1, r1, #0x60
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r3, #0x800000
lsl r3, r3, #8
add r3, r3, #0x0
#else
mov r3, #0x80000000
#endif
ror r3, r3, #25
ror r3, r3, r12
asr r3, r3, #31
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #24]
ldr r11, [r1, #28]
#else
ldrd r10, r11, [r1, #24]
#endif
eor r10, r10, r4
eor r11, r11, r5
and r10, r10, r3
and r11, r11, r3
eor r4, r4, r10
eor r5, r5, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #56]
ldr r11, [r1, #60]
#else
ldrd r10, r11, [r1, #56]
#endif
eor r10, r10, r6
eor r11, r11, r7
and r10, r10, r3
and r11, r11, r3
eor r6, r6, r10
eor r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #88]
ldr r11, [r1, #92]
#else
ldrd r10, r11, [r1, #88]
#endif
eor r10, r10, r8
eor r11, r11, r9
and r10, r10, r3
and r11, r11, r3
eor r8, r8, r10
eor r9, r9, r11
add r1, r1, #0x60
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r3, #0x800000
lsl r3, r3, #8
add r3, r3, #0x0
#else
mov r3, #0x80000000
#endif
ror r3, r3, #24
ror r3, r3, r12
asr r3, r3, #31
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #24]
ldr r11, [r1, #28]
#else
ldrd r10, r11, [r1, #24]
#endif
eor r10, r10, r4
eor r11, r11, r5
and r10, r10, r3
and r11, r11, r3
eor r4, r4, r10
eor r5, r5, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #56]
ldr r11, [r1, #60]
#else
ldrd r10, r11, [r1, #56]
#endif
eor r10, r10, r6
eor r11, r11, r7
and r10, r10, r3
and r11, r11, r3
eor r6, r6, r10
eor r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1, #88]
ldr r11, [r1, #92]
#else
ldrd r10, r11, [r1, #88]
#endif
eor r10, r10, r8
eor r11, r11, r9
and r10, r10, r3
and r11, r11, r3
eor r8, r8, r10
eor r9, r9, r11
sub r1, r1, #0x2a0
mvn r10, #0
mvn r11, #0x80000000
rsbs lr, lr, #0
sbcs r10, r10, r8
sbc r11, r11, r9
asr r12, r2, #31
eor r3, r4, r6
and r3, r3, r12
eor r4, r4, r3
eor r6, r6, r3
eor r3, r5, r7
and r3, r3, r12
eor r5, r5, r3
eor r7, r7, r3
eor r10, r10, r8
and r10, r10, r12
eor r8, r8, r10
eor r11, r11, r9
and r11, r11, r12
eor r9, r9, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #24]
str r5, [r0, #28]
#else
strd r4, r5, [r0, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0, #56]
str r7, [r0, #60]
#else
strd r6, r7, [r0, #56]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r0, #88]
str r9, [r0, #92]
#else
strd r8, r9, [r0, #88]
#endif
pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.size fe_cmov_table,.-fe_cmov_table
#else
.text
.align 4
.globl fe_cmov_table
.type fe_cmov_table, %function
fe_cmov_table:
push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r2, r2, #24
asr r2, r2, #24
#else
sxtb r2, r2
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
lsl r3, r2, #24
asr r3, r2, #31
#else
sbfx r3, r2, #7, #1
#endif
eor r2, r2, r3
sub r2, r2, r3
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
sub lr, r2, #1
#else
clz lr, r2
lsl lr, lr, #26
#endif /* defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6) */
asr lr, lr, #31
mvn lr, lr
add r2, r2, lr
mov r12, #0x60
mul r2, r2, r12
add r1, r1, r2
ldm r1!, {r4, r5, r6, r7, r8, r9, r10, r11}
and r4, r4, lr
and r5, r5, lr
and r6, r6, lr
and r7, r7, lr
and r8, r8, lr
and r9, r9, lr
and r10, r10, lr
and r11, r11, lr
mvn r12, lr
sub r4, r4, r12
mov r12, #32
and r12, r12, r3
add r0, r0, r12
stm r0, {r4, r5, r6, r7, r8, r9, r10, r11}
sub r0, r0, r12
ldm r1!, {r4, r5, r6, r7, r8, r9, r10, r11}
and r4, r4, lr
and r5, r5, lr
and r6, r6, lr
and r7, r7, lr
and r8, r8, lr
and r9, r9, lr
and r10, r10, lr
and r11, r11, lr
mvn r12, lr
sub r4, r4, r12
mov r12, #32
bic r12, r12, r3
add r0, r0, r12
stm r0, {r4, r5, r6, r7, r8, r9, r10, r11}
sub r0, r0, r12
add r0, r0, #0x40
ldm r1!, {r4, r5, r6, r7}
mvn r12, #18
subs r8, r12, r4
sbcs r9, r3, r5
sbcs r10, r3, r6
sbcs r11, r3, r7
bic r4, r4, r3
bic r5, r5, r3
bic r6, r6, r3
bic r7, r7, r3
and r8, r8, r3
and r9, r9, r3
and r10, r10, r3
and r11, r11, r3
orr r4, r4, r8
orr r5, r5, r9
orr r6, r6, r10
orr r7, r7, r11
and r4, r4, lr
and r5, r5, lr
and r6, r6, lr
and r7, r7, lr
stm r0!, {r4, r5, r6, r7}
ldm r1!, {r4, r5, r6, r7}
mvn r12, #0x80000000
sbcs r8, r3, r4
sbcs r9, r3, r5
sbcs r10, r3, r6
sbc r11, r12, r7
bic r4, r4, r3
bic r5, r5, r3
bic r6, r6, r3
bic r7, r7, r3
and r8, r8, r3
and r9, r9, r3
and r10, r10, r3
and r11, r11, r3
orr r4, r4, r8
orr r5, r5, r9
orr r6, r6, r10
orr r7, r7, r11
and r4, r4, lr
and r5, r5, lr
and r6, r6, lr
and r7, r7, lr
stm r0!, {r4, r5, r6, r7}
sub r1, r1, r2
pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.size fe_cmov_table,.-fe_cmov_table
#endif /* WC_NO_CACHE_RESISTANT */
#endif /* HAVE_ED25519_MAKE_KEY || HAVE_ED25519_SIGN */
#endif /* HAVE_ED25519 */
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
.text
.align 4
.globl fe_mul_op
.type fe_mul_op, %function
fe_mul_op:
push {lr}
sub sp, sp, #40
str r0, [sp, #36]
mov r0, #0
ldr r12, [r1]
# A[0] * B[0]
ldr lr, [r2]
umull r3, r4, r12, lr
# A[0] * B[2]
ldr lr, [r2, #8]
umull r5, r6, r12, lr
# A[0] * B[4]
ldr lr, [r2, #16]
umull r7, r8, r12, lr
# A[0] * B[6]
ldr lr, [r2, #24]
umull r9, r10, r12, lr
str r3, [sp]
# A[0] * B[1]
ldr lr, [r2, #4]
mov r11, r0
umlal r4, r11, r12, lr
adds r5, r5, r11
# A[0] * B[3]
ldr lr, [r2, #12]
adcs r6, r6, #0
adc r11, r0, #0
umlal r6, r11, r12, lr
adds r7, r7, r11
# A[0] * B[5]
ldr lr, [r2, #20]
adcs r8, r8, #0
adc r11, r0, #0
umlal r8, r11, r12, lr
adds r9, r9, r11
# A[0] * B[7]
ldr lr, [r2, #28]
adcs r10, r10, #0
adc r3, r0, #0
umlal r10, r3, r12, lr
# A[1] * B[0]
ldr r12, [r1, #4]
ldr lr, [r2]
mov r11, #0
umlal r4, r11, r12, lr
str r4, [sp, #4]
adds r5, r5, r11
# A[1] * B[1]
ldr lr, [r2, #4]
adc r11, r0, #0
umlal r5, r11, r12, lr
adds r6, r6, r11
# A[1] * B[2]
ldr lr, [r2, #8]
adc r11, r0, #0
umlal r6, r11, r12, lr
adds r7, r7, r11
# A[1] * B[3]
ldr lr, [r2, #12]
adc r11, r0, #0
umlal r7, r11, r12, lr
adds r8, r8, r11
# A[1] * B[4]
ldr lr, [r2, #16]
adc r11, r0, #0
umlal r8, r11, r12, lr
adds r9, r9, r11
# A[1] * B[5]
ldr lr, [r2, #20]
adc r11, r0, #0
umlal r9, r11, r12, lr
adds r10, r10, r11
# A[1] * B[6]
ldr lr, [r2, #24]
adc r11, r0, #0
umlal r10, r11, r12, lr
adds r3, r3, r11
# A[1] * B[7]
ldr lr, [r2, #28]
adc r4, r0, #0
umlal r3, r4, r12, lr
# A[2] * B[0]
ldr r12, [r1, #8]
ldr lr, [r2]
mov r11, #0
umlal r5, r11, r12, lr
str r5, [sp, #8]
adds r6, r6, r11
# A[2] * B[1]
ldr lr, [r2, #4]
adc r11, r0, #0
umlal r6, r11, r12, lr
adds r7, r7, r11
# A[2] * B[2]
ldr lr, [r2, #8]
adc r11, r0, #0
umlal r7, r11, r12, lr
adds r8, r8, r11
# A[2] * B[3]
ldr lr, [r2, #12]
adc r11, r0, #0
umlal r8, r11, r12, lr
adds r9, r9, r11
# A[2] * B[4]
ldr lr, [r2, #16]
adc r11, r0, #0
umlal r9, r11, r12, lr
adds r10, r10, r11
# A[2] * B[5]
ldr lr, [r2, #20]
adc r11, r0, #0
umlal r10, r11, r12, lr
adds r3, r3, r11
# A[2] * B[6]
ldr lr, [r2, #24]
adc r11, r0, #0
umlal r3, r11, r12, lr
adds r4, r4, r11
# A[2] * B[7]
ldr lr, [r2, #28]
adc r5, r0, #0
umlal r4, r5, r12, lr
# A[3] * B[0]
ldr r12, [r1, #12]
ldr lr, [r2]
mov r11, #0
umlal r6, r11, r12, lr
str r6, [sp, #12]
adds r7, r7, r11
# A[3] * B[1]
ldr lr, [r2, #4]
adc r11, r0, #0
umlal r7, r11, r12, lr
adds r8, r8, r11
# A[3] * B[2]
ldr lr, [r2, #8]
adc r11, r0, #0
umlal r8, r11, r12, lr
adds r9, r9, r11
# A[3] * B[3]
ldr lr, [r2, #12]
adc r11, r0, #0
umlal r9, r11, r12, lr
adds r10, r10, r11
# A[3] * B[4]
ldr lr, [r2, #16]
adc r11, r0, #0
umlal r10, r11, r12, lr
adds r3, r3, r11
# A[3] * B[5]
ldr lr, [r2, #20]
adc r11, r0, #0
umlal r3, r11, r12, lr
adds r4, r4, r11
# A[3] * B[6]
ldr lr, [r2, #24]
adc r11, r0, #0
umlal r4, r11, r12, lr
adds r5, r5, r11
# A[3] * B[7]
ldr lr, [r2, #28]
adc r6, r0, #0
umlal r5, r6, r12, lr
# A[4] * B[0]
ldr r12, [r1, #16]
ldr lr, [r2]
mov r11, #0
umlal r7, r11, r12, lr
str r7, [sp, #16]
adds r8, r8, r11
# A[4] * B[1]
ldr lr, [r2, #4]
adc r11, r0, #0
umlal r8, r11, r12, lr
adds r9, r9, r11
# A[4] * B[2]
ldr lr, [r2, #8]
adc r11, r0, #0
umlal r9, r11, r12, lr
adds r10, r10, r11
# A[4] * B[3]
ldr lr, [r2, #12]
adc r11, r0, #0
umlal r10, r11, r12, lr
adds r3, r3, r11
# A[4] * B[4]
ldr lr, [r2, #16]
adc r11, r0, #0
umlal r3, r11, r12, lr
adds r4, r4, r11
# A[4] * B[5]
ldr lr, [r2, #20]
adc r11, r0, #0
umlal r4, r11, r12, lr
adds r5, r5, r11
# A[4] * B[6]
ldr lr, [r2, #24]
adc r11, r0, #0
umlal r5, r11, r12, lr
adds r6, r6, r11
# A[4] * B[7]
ldr lr, [r2, #28]
adc r7, r0, #0
umlal r6, r7, r12, lr
# A[5] * B[0]
ldr r12, [r1, #20]
ldr lr, [r2]
mov r11, #0
umlal r8, r11, r12, lr
str r8, [sp, #20]
adds r9, r9, r11
# A[5] * B[1]
ldr lr, [r2, #4]
adc r11, r0, #0
umlal r9, r11, r12, lr
adds r10, r10, r11
# A[5] * B[2]
ldr lr, [r2, #8]
adc r11, r0, #0
umlal r10, r11, r12, lr
adds r3, r3, r11
# A[5] * B[3]
ldr lr, [r2, #12]
adc r11, r0, #0
umlal r3, r11, r12, lr
adds r4, r4, r11
# A[5] * B[4]
ldr lr, [r2, #16]
adc r11, r0, #0
umlal r4, r11, r12, lr
adds r5, r5, r11
# A[5] * B[5]
ldr lr, [r2, #20]
adc r11, r0, #0
umlal r5, r11, r12, lr
adds r6, r6, r11
# A[5] * B[6]
ldr lr, [r2, #24]
adc r11, r0, #0
umlal r6, r11, r12, lr
adds r7, r7, r11
# A[5] * B[7]
ldr lr, [r2, #28]
adc r8, r0, #0
umlal r7, r8, r12, lr
# A[6] * B[0]
ldr r12, [r1, #24]
ldr lr, [r2]
mov r11, #0
umlal r9, r11, r12, lr
str r9, [sp, #24]
adds r10, r10, r11
# A[6] * B[1]
ldr lr, [r2, #4]
adc r11, r0, #0
umlal r10, r11, r12, lr
adds r3, r3, r11
# A[6] * B[2]
ldr lr, [r2, #8]
adc r11, r0, #0
umlal r3, r11, r12, lr
adds r4, r4, r11
# A[6] * B[3]
ldr lr, [r2, #12]
adc r11, r0, #0
umlal r4, r11, r12, lr
adds r5, r5, r11
# A[6] * B[4]
ldr lr, [r2, #16]
adc r11, r0, #0
umlal r5, r11, r12, lr
adds r6, r6, r11
# A[6] * B[5]
ldr lr, [r2, #20]
adc r11, r0, #0
umlal r6, r11, r12, lr
adds r7, r7, r11
# A[6] * B[6]
ldr lr, [r2, #24]
adc r11, r0, #0
umlal r7, r11, r12, lr
adds r8, r8, r11
# A[6] * B[7]
ldr lr, [r2, #28]
adc r9, r0, #0
umlal r8, r9, r12, lr
# A[7] * B[0]
ldr r12, [r1, #28]
ldr lr, [r2]
mov r11, #0
umlal r10, r11, r12, lr
str r10, [sp, #28]
adds r3, r3, r11
# A[7] * B[1]
ldr lr, [r2, #4]
adc r11, r0, #0
umlal r3, r11, r12, lr
adds r4, r4, r11
# A[7] * B[2]
ldr lr, [r2, #8]
adc r11, r0, #0
umlal r4, r11, r12, lr
adds r5, r5, r11
# A[7] * B[3]
ldr lr, [r2, #12]
adc r11, r0, #0
umlal r5, r11, r12, lr
adds r6, r6, r11
# A[7] * B[4]
ldr lr, [r2, #16]
adc r11, r0, #0
umlal r6, r11, r12, lr
adds r7, r7, r11
# A[7] * B[5]
ldr lr, [r2, #20]
adc r11, r0, #0
umlal r7, r11, r12, lr
adds r8, r8, r11
# A[7] * B[6]
ldr lr, [r2, #24]
adc r11, r0, #0
umlal r8, r11, r12, lr
adds r9, r9, r11
# A[7] * B[7]
ldr lr, [r2, #28]
adc r10, r0, #0
umlal r9, r10, r12, lr
# Reduce
ldr r2, [sp, #28]
mov lr, sp
mov r12, #38
umull r10, r11, r12, r10
adds r10, r10, r2
adc r11, r11, #0
mov r12, #19
lsl r11, r11, #1
orr r11, r11, r10, LSR #31
mul r11, r12, r11
ldm lr!, {r1, r2}
mov r12, #38
adds r1, r1, r11
adc r11, r0, #0
umlal r1, r11, r3, r12
adds r2, r2, r11
adc r11, r0, #0
umlal r2, r11, r4, r12
ldm lr!, {r3, r4}
adds r3, r3, r11
adc r11, r0, #0
umlal r3, r11, r5, r12
adds r4, r4, r11
adc r11, r0, #0
umlal r4, r11, r6, r12
ldm lr!, {r5, r6}
adds r5, r5, r11
adc r11, r0, #0
umlal r5, r11, r7, r12
adds r6, r6, r11
adc r11, r0, #0
umlal r6, r11, r8, r12
ldm lr!, {r7, r8}
adds r7, r7, r11
adc r11, r0, #0
umlal r7, r11, r9, r12
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
bic r10, r10, #0x80000000
#else
bfc r10, #31, #1
#endif
adds r8, r10, r11
# Store
ldr r0, [sp, #36]
stm r0, {r1, r2, r3, r4, r5, r6, r7, r8}
add sp, sp, #40
pop {pc}
.size fe_mul_op,.-fe_mul_op
#else
.text
.align 4
.globl fe_mul_op
.type fe_mul_op, %function
fe_mul_op:
push {lr}
sub sp, sp, #44
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r0, [sp, #36]
str r1, [sp, #40]
#else
strd r0, r1, [sp, #36]
#endif
mov lr, r2
ldm r1, {r0, r1, r2, r3}
ldm lr!, {r4, r5, r6}
umull r10, r11, r0, r4
umull r12, r7, r1, r4
umaal r11, r12, r0, r5
umull r8, r9, r2, r4
umaal r12, r8, r1, r5
umaal r12, r7, r0, r6
umaal r8, r9, r3, r4
stm sp, {r10, r11, r12}
umaal r7, r8, r2, r5
ldm lr!, {r4}
umull r10, r11, r1, r6
umaal r8, r9, r2, r6
umaal r7, r10, r0, r4
umaal r8, r11, r3, r5
str r7, [sp, #12]
umaal r8, r10, r1, r4
umaal r9, r11, r3, r6
umaal r9, r10, r2, r4
umaal r10, r11, r3, r4
ldm lr, {r4, r5, r6, r7}
mov r12, #0
umlal r8, r12, r0, r4
umaal r9, r12, r1, r4
umaal r10, r12, r2, r4
umaal r11, r12, r3, r4
mov r4, #0
umlal r9, r4, r0, r5
umaal r10, r4, r1, r5
umaal r11, r4, r2, r5
umaal r12, r4, r3, r5
mov r5, #0
umlal r10, r5, r0, r6
umaal r11, r5, r1, r6
umaal r12, r5, r2, r6
umaal r4, r5, r3, r6
mov r6, #0
umlal r11, r6, r0, r7
ldr r0, [sp, #40]
umaal r12, r6, r1, r7
add r0, r0, #16
umaal r4, r6, r2, r7
sub lr, lr, #16
umaal r5, r6, r3, r7
ldm r0, {r0, r1, r2, r3}
str r6, [sp, #32]
ldm lr!, {r6}
mov r7, #0
umlal r8, r7, r0, r6
umaal r9, r7, r1, r6
str r8, [sp, #16]
umaal r10, r7, r2, r6
umaal r11, r7, r3, r6
ldm lr!, {r6}
mov r8, #0
umlal r9, r8, r0, r6
umaal r10, r8, r1, r6
str r9, [sp, #20]
umaal r11, r8, r2, r6
umaal r12, r8, r3, r6
ldm lr!, {r6}
mov r9, #0
umlal r10, r9, r0, r6
umaal r11, r9, r1, r6
str r10, [sp, #24]
umaal r12, r9, r2, r6
umaal r4, r9, r3, r6
ldm lr!, {r6}
mov r10, #0
umlal r11, r10, r0, r6
umaal r12, r10, r1, r6
str r11, [sp, #28]
umaal r4, r10, r2, r6
umaal r5, r10, r3, r6
ldm lr!, {r11}
umaal r12, r7, r0, r11
umaal r4, r7, r1, r11
ldr r6, [sp, #32]
umaal r5, r7, r2, r11
umaal r6, r7, r3, r11
ldm lr!, {r11}
umaal r4, r8, r0, r11
umaal r5, r8, r1, r11
umaal r6, r8, r2, r11
umaal r7, r8, r3, r11
ldm lr, {r11, lr}
umaal r5, r9, r0, r11
umaal r6, r10, r0, lr
umaal r6, r9, r1, r11
umaal r7, r10, r1, lr
umaal r7, r9, r2, r11
umaal r8, r10, r2, lr
umaal r8, r9, r3, r11
umaal r9, r10, r3, lr
# Reduce
ldr r0, [sp, #28]
mov lr, #37
umaal r10, r0, r10, lr
mov lr, #19
lsl r0, r0, #1
orr r0, r0, r10, lsr #31
mul r11, r0, lr
pop {r0, r1, r2}
mov lr, #38
umaal r0, r11, r12, lr
umaal r1, r11, r4, lr
umaal r2, r11, r5, lr
pop {r3, r4, r5}
umaal r3, r11, r6, lr
umaal r4, r11, r7, lr
umaal r5, r11, r8, lr
pop {r6}
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
bic r10, r10, #0x80000000
#else
bfc r10, #31, #1
#endif
umaal r6, r11, r9, lr
add r7, r10, r11
ldr lr, [sp, #8]
# Store
stm lr, {r0, r1, r2, r3, r4, r5, r6, r7}
add sp, sp, #16
pop {pc}
.size fe_mul_op,.-fe_mul_op
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
.text
.align 4
.globl fe_mul
.type fe_mul, %function
fe_mul:
push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
bl fe_mul_op
pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.size fe_mul,.-fe_mul
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
.text
.align 4
.globl fe_sq_op
.type fe_sq_op, %function
fe_sq_op:
push {lr}
sub sp, sp, #0x44
str r0, [sp, #64]
# Square
mov r0, #0
ldr r12, [r1]
# A[0] * A[1]
ldr lr, [r1, #4]
umull r4, r5, r12, lr
# A[0] * A[3]
ldr lr, [r1, #12]
umull r6, r7, r12, lr
# A[0] * A[5]
ldr lr, [r1, #20]
umull r8, r9, r12, lr
# A[0] * A[7]
ldr lr, [r1, #28]
umull r10, r3, r12, lr
# A[0] * A[2]
ldr lr, [r1, #8]
mov r11, #0
umlal r5, r11, r12, lr
adds r6, r6, r11
# A[0] * A[4]
ldr lr, [r1, #16]
adcs r7, r7, #0
adc r11, r0, #0
umlal r7, r11, r12, lr
adds r8, r8, r11
# A[0] * A[6]
ldr lr, [r1, #24]
adcs r9, r9, #0
adc r11, r0, #0
umlal r9, r11, r12, lr
adds r10, r10, r11
adcs r3, r3, #0
str r4, [sp, #4]
str r5, [sp, #8]
# A[1] * A[2]
ldr r12, [r1, #4]
ldr lr, [r1, #8]
mov r11, #0
umlal r6, r11, r12, lr
str r6, [sp, #12]
adds r7, r7, r11
# A[1] * A[3]
ldr lr, [r1, #12]
adc r11, r0, #0
umlal r7, r11, r12, lr
str r7, [sp, #16]
adds r8, r8, r11
# A[1] * A[4]
ldr lr, [r1, #16]
adc r11, r0, #0
umlal r8, r11, r12, lr
adds r9, r9, r11
# A[1] * A[5]
ldr lr, [r1, #20]
adc r11, r0, #0
umlal r9, r11, r12, lr
adds r10, r10, r11
# A[1] * A[6]
ldr lr, [r1, #24]
adc r11, r0, #0
umlal r10, r11, r12, lr
adds r3, r3, r11
# A[1] * A[7]
ldr lr, [r1, #28]
adc r4, r0, #0
umlal r3, r4, r12, lr
# A[2] * A[3]
ldr r12, [r1, #8]
ldr lr, [r1, #12]
mov r11, #0
umlal r8, r11, r12, lr
str r8, [sp, #20]
adds r9, r9, r11
# A[2] * A[4]
ldr lr, [r1, #16]
adc r11, r0, #0
umlal r9, r11, r12, lr
str r9, [sp, #24]
adds r10, r10, r11
# A[2] * A[5]
ldr lr, [r1, #20]
adc r11, r0, #0
umlal r10, r11, r12, lr
adds r3, r3, r11
# A[2] * A[6]
ldr lr, [r1, #24]
adc r11, r0, #0
umlal r3, r11, r12, lr
adds r4, r4, r11
# A[2] * A[7]
ldr lr, [r1, #28]
adc r5, r0, #0
umlal r4, r5, r12, lr
# A[3] * A[4]
ldr r12, [r1, #12]
ldr lr, [r1, #16]
mov r11, #0
umlal r10, r11, r12, lr
str r10, [sp, #28]
adds r3, r3, r11
# A[3] * A[5]
ldr lr, [r1, #20]
adc r11, r0, #0
umlal r3, r11, r12, lr
adds r4, r4, r11
# A[3] * A[6]
ldr lr, [r1, #24]
adc r11, r0, #0
umlal r4, r11, r12, lr
adds r5, r5, r11
# A[3] * A[7]
ldr lr, [r1, #28]
adc r6, r0, #0
umlal r5, r6, r12, lr
# A[4] * A[5]
ldr r12, [r1, #16]
ldr lr, [r1, #20]
mov r11, #0
umlal r4, r11, r12, lr
adds r5, r5, r11
# A[4] * A[6]
ldr lr, [r1, #24]
adc r11, r0, #0
umlal r5, r11, r12, lr
adds r6, r6, r11
# A[4] * A[7]
ldr lr, [r1, #28]
adc r7, r0, #0
umlal r6, r7, r12, lr
# A[5] * A[6]
ldr r12, [r1, #20]
ldr lr, [r1, #24]
mov r11, #0
umlal r6, r11, r12, lr
adds r7, r7, r11
# A[5] * A[7]
ldr lr, [r1, #28]
adc r8, r0, #0
umlal r7, r8, r12, lr
# A[6] * A[7]
ldr r12, [r1, #24]
ldr lr, [r1, #28]
mov r9, #0
umlal r8, r9, r12, lr
add lr, sp, #32
stm lr, {r3, r4, r5, r6, r7, r8, r9}
add lr, sp, #4
ldm lr, {r4, r5, r6, r7, r8, r9, r10}
adds r4, r4, r4
adcs r5, r5, r5
adcs r6, r6, r6
adcs r7, r7, r7
adcs r8, r8, r8
adcs r9, r9, r9
adcs r10, r10, r10
stm lr!, {r4, r5, r6, r7, r8, r9, r10}
ldm lr, {r3, r4, r5, r6, r7, r8, r9}
adcs r3, r3, r3
adcs r4, r4, r4
adcs r5, r5, r5
adcs r6, r6, r6
adcs r7, r7, r7
adcs r8, r8, r8
adcs r9, r9, r9
adc r10, r0, #0
stm lr, {r3, r4, r5, r6, r7, r8, r9, r10}
add lr, sp, #4
ldm lr, {r4, r5, r6, r7, r8, r9, r10}
mov lr, sp
# A[0] * A[0]
ldr r12, [r1]
umull r3, r11, r12, r12
adds r4, r4, r11
# A[1] * A[1]
ldr r12, [r1, #4]
adcs r5, r5, #0
adc r11, r0, #0
umlal r5, r11, r12, r12
adds r6, r6, r11
# A[2] * A[2]
ldr r12, [r1, #8]
adcs r7, r7, #0
adc r11, r0, #0
umlal r7, r11, r12, r12
adds r8, r8, r11
# A[3] * A[3]
ldr r12, [r1, #12]
adcs r9, r9, #0
adc r11, r0, #0
umlal r9, r11, r12, r12
adds r10, r10, r11
stm lr!, {r3, r4, r5, r6, r7, r8, r9, r10}
ldm lr, {r3, r4, r5, r6, r7, r8, r9, r10}
# A[4] * A[4]
ldr r12, [r1, #16]
adcs r3, r3, #0
adc r11, r0, #0
umlal r3, r11, r12, r12
adds r4, r4, r11
# A[5] * A[5]
ldr r12, [r1, #20]
adcs r5, r5, #0
adc r11, r0, #0
umlal r5, r11, r12, r12
adds r6, r6, r11
# A[6] * A[6]
ldr r12, [r1, #24]
adcs r7, r7, #0
adc r11, r0, #0
umlal r7, r11, r12, r12
adds r8, r8, r11
# A[7] * A[7]
ldr r12, [r1, #28]
adcs r9, r9, #0
adc r10, r10, #0
umlal r9, r10, r12, r12
# Reduce
ldr r2, [sp, #28]
mov lr, sp
mov r12, #38
umull r10, r11, r12, r10
adds r10, r10, r2
adc r11, r11, #0
mov r12, #19
lsl r11, r11, #1
orr r11, r11, r10, LSR #31
mul r11, r12, r11
ldm lr!, {r1, r2}
mov r12, #38
adds r1, r1, r11
adc r11, r0, #0
umlal r1, r11, r3, r12
adds r2, r2, r11
adc r11, r0, #0
umlal r2, r11, r4, r12
ldm lr!, {r3, r4}
adds r3, r3, r11
adc r11, r0, #0
umlal r3, r11, r5, r12
adds r4, r4, r11
adc r11, r0, #0
umlal r4, r11, r6, r12
ldm lr!, {r5, r6}
adds r5, r5, r11
adc r11, r0, #0
umlal r5, r11, r7, r12
adds r6, r6, r11
adc r11, r0, #0
umlal r6, r11, r8, r12
ldm lr!, {r7, r8}
adds r7, r7, r11
adc r11, r0, #0
umlal r7, r11, r9, r12
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
bic r10, r10, #0x80000000
#else
bfc r10, #31, #1
#endif
adds r8, r10, r11
# Store
ldr r0, [sp, #64]
stm r0, {r1, r2, r3, r4, r5, r6, r7, r8}
add sp, sp, #0x44
pop {pc}
.size fe_sq_op,.-fe_sq_op
#else
.text
.align 4
.globl fe_sq_op
.type fe_sq_op, %function
fe_sq_op:
push {lr}
sub sp, sp, #32
str r0, [sp, #28]
ldm r1, {r0, r1, r2, r3, r4, r5, r6, r7}
# Square
umull r9, r10, r0, r0
umull r11, r12, r0, r1
adds r11, r11, r11
mov lr, #0
umaal r10, r11, lr, lr
stm sp, {r9, r10}
mov r8, lr
umaal r8, r12, r0, r2
adcs r8, r8, r8
umaal r8, r11, r1, r1
umull r9, r10, r0, r3
umaal r9, r12, r1, r2
adcs r9, r9, r9
umaal r9, r11, lr, lr
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [sp, #8]
str r9, [sp, #12]
#else
strd r8, r9, [sp, #8]
#endif
mov r9, lr
umaal r9, r10, r0, r4
umaal r9, r12, r1, r3
adcs r9, r9, r9
umaal r9, r11, r2, r2
str r9, [sp, #16]
umull r9, r8, r0, r5
umaal r9, r12, r1, r4
umaal r9, r10, r2, r3
adcs r9, r9, r9
umaal r9, r11, lr, lr
str r9, [sp, #20]
mov r9, lr
umaal r9, r8, r0, r6
umaal r9, r12, r1, r5
umaal r9, r10, r2, r4
adcs r9, r9, r9
umaal r9, r11, r3, r3
str r9, [sp, #24]
umull r0, r9, r0, r7
umaal r0, r8, r1, r6
umaal r0, r12, r2, r5
umaal r0, r10, r3, r4
adcs r0, r0, r0
umaal r0, r11, lr, lr
# R[7] = r0
umaal r9, r8, r1, r7
umaal r9, r10, r2, r6
umaal r12, r9, r3, r5
adcs r12, r12, r12
umaal r12, r11, r4, r4
# R[8] = r12
umaal r9, r8, r2, r7
umaal r10, r9, r3, r6
mov r2, lr
umaal r10, r2, r4, r5
adcs r10, r10, r10
umaal r11, r10, lr, lr
# R[9] = r11
umaal r2, r8, r3, r7
umaal r2, r9, r4, r6
adcs r3, r2, r2
umaal r10, r3, r5, r5
# R[10] = r10
mov r1, lr
umaal r1, r8, r4, r7
umaal r1, r9, r5, r6
adcs r4, r1, r1
umaal r3, r4, lr, lr
# R[11] = r3
umaal r8, r9, r5, r7
adcs r8, r8, r8
umaal r4, r8, r6, r6
# R[12] = r4
mov r5, lr
umaal r5, r9, r6, r7
adcs r5, r5, r5
umaal r8, r5, lr, lr
# R[13] = r8
adcs r9, r9, r9
umaal r9, r5, r7, r7
adcs r7, r5, lr
# R[14] = r9
# R[15] = r7
# Reduce
mov r6, #37
umaal r7, r0, r7, r6
mov r6, #19
lsl r0, r0, #1
orr r0, r0, r7, lsr #31
mul lr, r0, r6
pop {r0, r1}
mov r6, #38
umaal r0, lr, r12, r6
umaal r1, lr, r11, r6
mov r12, r3
mov r11, r4
pop {r2, r3, r4}
umaal r2, lr, r10, r6
umaal r3, lr, r12, r6
umaal r4, lr, r11, r6
mov r12, r6
pop {r5, r6}
umaal r5, lr, r8, r12
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
bic r7, r7, #0x80000000
#else
bfc r7, #31, #1
#endif
umaal r6, lr, r9, r12
add r7, r7, lr
pop {lr}
# Store
stm lr, {r0, r1, r2, r3, r4, r5, r6, r7}
pop {pc}
.size fe_sq_op,.-fe_sq_op
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
.text
.align 4
.globl fe_sq
.type fe_sq, %function
fe_sq:
push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
bl fe_sq_op
pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.size fe_sq,.-fe_sq
#ifdef HAVE_CURVE25519
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
.text
.align 4
.globl fe_mul121666
.type fe_mul121666, %function
fe_mul121666:
push {r4, r5, r6, r7, r8, r9, r10, lr}
# Multiply by 121666
ldm r1, {r2, r3, r4, r5, r6, r7, r8, r9}
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r10, #1
lsl r10, r10, #8
orr r10, r10, #0xdb
lsl r10, r10, #8
orr r10, r10, #0x42
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r10, #0xdb
lsl r10, r10, #8
add r10, r10, #0x42
#else
mov r10, #0xdb42
#endif
movt r10, #1
#endif
umull r2, r12, r10, r2
umull r3, lr, r10, r3
adds r3, r3, r12
adc lr, lr, #0
umull r4, r12, r10, r4
adds r4, r4, lr
adc r12, r12, #0
umull r5, lr, r10, r5
adds r5, r5, r12
adc lr, lr, #0
umull r6, r12, r10, r6
adds r6, r6, lr
adc r12, r12, #0
umull r7, lr, r10, r7
adds r7, r7, r12
adc lr, lr, #0
umull r8, r12, r10, r8
adds r8, r8, lr
adc r12, r12, #0
umull r9, lr, r10, r9
adds r9, r9, r12
mov r10, #19
adc lr, lr, #0
lsl lr, lr, #1
orr lr, lr, r9, LSR #31
mul lr, r10, lr
adds r2, r2, lr
adcs r3, r3, #0
adcs r4, r4, #0
adcs r5, r5, #0
adcs r6, r6, #0
adcs r7, r7, #0
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
bic r9, r9, #0x80000000
#else
bfc r9, #31, #1
#endif
adcs r8, r8, #0
adc r9, r9, #0
stm r0, {r2, r3, r4, r5, r6, r7, r8, r9}
pop {r4, r5, r6, r7, r8, r9, r10, pc}
.size fe_mul121666,.-fe_mul121666
#else
.text
.align 4
.globl fe_mul121666
.type fe_mul121666, %function
fe_mul121666:
push {r4, r5, r6, r7, r8, r9, r10, lr}
# Multiply by 121666
ldm r1, {r2, r3, r4, r5, r6, r7, r8, r9}
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov lr, #1
lsl lr, lr, #8
orr lr, lr, #0xdb
lsl lr, lr, #8
orr lr, lr, #0x42
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov lr, #0xdb
lsl lr, lr, #8
add lr, lr, #0x42
#else
mov lr, #0xdb42
#endif
movt lr, #1
#endif
umull r2, r10, lr, r2
sub r12, lr, #1
umaal r3, r10, r12, r3
umaal r4, r10, r12, r4
umaal r5, r10, r12, r5
umaal r6, r10, r12, r6
umaal r7, r10, r12, r7
umaal r8, r10, r12, r8
mov lr, #19
umaal r9, r10, r12, r9
lsl r10, r10, #1
orr r10, r10, r9, lsr #31
mul r10, lr, r10
adds r2, r2, r10
adcs r3, r3, #0
adcs r4, r4, #0
adcs r5, r5, #0
adcs r6, r6, #0
adcs r7, r7, #0
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
bic r9, r9, #0x80000000
#else
bfc r9, #31, #1
#endif
adcs r8, r8, #0
adc r9, r9, #0
stm r0, {r2, r3, r4, r5, r6, r7, r8, r9}
pop {r4, r5, r6, r7, r8, r9, r10, pc}
.size fe_mul121666,.-fe_mul121666
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
#ifndef WC_NO_CACHE_RESISTANT
.text
.align 4
.globl curve25519
.type curve25519, %function
curve25519:
push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
sub sp, sp, #0xbc
str r0, [sp, #160]
str r1, [sp, #164]
str r2, [sp, #168]
mov r1, #0
str r1, [sp, #172]
mov r4, #1
mov r5, #0
mov r6, #0
mov r7, #0
mov r8, #0
mov r9, #0
mov r10, #0
mov r11, #0
stm r0, {r4, r5, r6, r7, r8, r9, r10, r11}
add r3, sp, #32
stm r3, {r4, r5, r6, r7, r8, r9, r10, r11}
mov r4, #0
mov r3, sp
stm r3, {r4, r5, r6, r7, r8, r9, r10, r11}
add r3, sp, #0x40
# Copy
ldm r2, {r4, r5, r6, r7, r8, r9, r10, r11}
stm r3, {r4, r5, r6, r7, r8, r9, r10, r11}
mov r1, #30
str r1, [sp, #180]
mov r2, #28
str r2, [sp, #176]
L_curve25519_words:
L_curve25519_bits:
ldr r1, [sp, #164]
ldr r2, [r1, r2]
ldr r1, [sp, #180]
lsr r2, r2, r1
and r2, r2, #1
str r2, [sp, #184]
ldr r1, [sp, #172]
eor r1, r1, r2
str r1, [sp, #172]
ldr r0, [sp, #160]
# Conditional Swap
rsb r1, r1, #0
mov r3, r0
add r12, sp, #0x40
ldm r3, {r4, r5}
ldm r12, {r6, r7}
eor r8, r4, r6
eor r9, r5, r7
and r8, r8, r1
and r9, r9, r1
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r8
eor r7, r7, r9
stm r3!, {r4, r5}
stm r12!, {r6, r7}
ldm r3, {r4, r5}
ldm r12, {r6, r7}
eor r8, r4, r6
eor r9, r5, r7
and r8, r8, r1
and r9, r9, r1
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r8
eor r7, r7, r9
stm r3!, {r4, r5}
stm r12!, {r6, r7}
ldm r3, {r4, r5}
ldm r12, {r6, r7}
eor r8, r4, r6
eor r9, r5, r7
and r8, r8, r1
and r9, r9, r1
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r8
eor r7, r7, r9
stm r3!, {r4, r5}
stm r12!, {r6, r7}
ldm r3, {r4, r5}
ldm r12, {r6, r7}
eor r8, r4, r6
eor r9, r5, r7
and r8, r8, r1
and r9, r9, r1
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r8
eor r7, r7, r9
stm r3!, {r4, r5}
stm r12!, {r6, r7}
ldr r1, [sp, #172]
# Conditional Swap
rsb r1, r1, #0
mov r3, sp
add r12, sp, #32
ldm r3, {r4, r5}
ldm r12, {r6, r7}
eor r8, r4, r6
eor r9, r5, r7
and r8, r8, r1
and r9, r9, r1
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r8
eor r7, r7, r9
stm r3!, {r4, r5}
stm r12!, {r6, r7}
ldm r3, {r4, r5}
ldm r12, {r6, r7}
eor r8, r4, r6
eor r9, r5, r7
and r8, r8, r1
and r9, r9, r1
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r8
eor r7, r7, r9
stm r3!, {r4, r5}
stm r12!, {r6, r7}
ldm r3, {r4, r5}
ldm r12, {r6, r7}
eor r8, r4, r6
eor r9, r5, r7
and r8, r8, r1
and r9, r9, r1
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r8
eor r7, r7, r9
stm r3!, {r4, r5}
stm r12!, {r6, r7}
ldm r3, {r4, r5}
ldm r12, {r6, r7}
eor r8, r4, r6
eor r9, r5, r7
and r8, r8, r1
and r9, r9, r1
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r8
eor r7, r7, r9
stm r3!, {r4, r5}
stm r12!, {r6, r7}
ldr r1, [sp, #184]
str r1, [sp, #172]
mov r3, sp
ldr r2, [sp, #160]
add r1, sp, #0x80
ldr r0, [sp, #160]
bl fe_add_sub_op
add r3, sp, #32
add r2, sp, #0x40
add r1, sp, #0x60
mov r0, sp
bl fe_add_sub_op
ldr r2, [sp, #160]
add r1, sp, #0x60
add r0, sp, #32
bl fe_mul_op
add r2, sp, #0x80
mov r1, sp
mov r0, sp
bl fe_mul_op
add r1, sp, #0x80
add r0, sp, #0x80
bl fe_sq_op
ldr r1, [sp, #160]
add r0, sp, #0x60
bl fe_sq_op
mov r3, sp
add r2, sp, #32
mov r1, sp
add r0, sp, #0x40
bl fe_add_sub_op
add r2, sp, #0x80
add r1, sp, #0x60
ldr r0, [sp, #160]
bl fe_mul_op
add r2, sp, #0x80
add r1, sp, #0x60
add r0, sp, #0x60
bl fe_sub_op
mov r1, sp
mov r0, sp
bl fe_sq_op
add r1, sp, #0x60
add r0, sp, #32
bl fe_mul121666
add r1, sp, #0x40
add r0, sp, #0x40
bl fe_sq_op
add r2, sp, #32
add r1, sp, #0x80
add r0, sp, #0x80
bl fe_add_op
mov r2, sp
ldr r1, [sp, #168]
add r0, sp, #32
bl fe_mul_op
add r2, sp, #0x80
add r1, sp, #0x60
mov r0, sp
bl fe_mul_op
ldr r2, [sp, #176]
ldr r1, [sp, #180]
subs r1, r1, #1
str r1, [sp, #180]
bge L_curve25519_bits
mov r1, #31
str r1, [sp, #180]
subs r2, r2, #4
str r2, [sp, #176]
bge L_curve25519_words
# Invert
add r1, sp, #0
add r0, sp, #32
bl fe_sq_op
add r1, sp, #32
add r0, sp, #0x40
bl fe_sq_op
add r1, sp, #0x40
add r0, sp, #0x40
bl fe_sq_op
add r2, sp, #0x40
add r1, sp, #0
add r0, sp, #0x40
bl fe_mul_op
add r2, sp, #0x40
add r1, sp, #32
add r0, sp, #32
bl fe_mul_op
add r1, sp, #32
add r0, sp, #0x60
bl fe_sq_op
add r2, sp, #0x60
add r1, sp, #0x40
add r0, sp, #0x40
bl fe_mul_op
add r1, sp, #0x40
add r0, sp, #0x60
bl fe_sq_op
mov r12, #4
L_curve25519_inv_1:
add r1, sp, #0x60
add r0, sp, #0x60
push {r12}
bl fe_sq_op
pop {r12}
subs r12, r12, #1
bne L_curve25519_inv_1
add r2, sp, #0x40
add r1, sp, #0x60
add r0, sp, #0x40
bl fe_mul_op
add r1, sp, #0x40
add r0, sp, #0x60
bl fe_sq_op
mov r12, #9
L_curve25519_inv_2:
add r1, sp, #0x60
add r0, sp, #0x60
push {r12}
bl fe_sq_op
pop {r12}
subs r12, r12, #1
bne L_curve25519_inv_2
add r2, sp, #0x40
add r1, sp, #0x60
add r0, sp, #0x60
bl fe_mul_op
add r1, sp, #0x60
add r0, sp, #0x80
bl fe_sq_op
mov r12, #19
L_curve25519_inv_3:
add r1, sp, #0x80
add r0, sp, #0x80
push {r12}
bl fe_sq_op
pop {r12}
subs r12, r12, #1
bne L_curve25519_inv_3
add r2, sp, #0x60
add r1, sp, #0x80
add r0, sp, #0x60
bl fe_mul_op
mov r12, #10
L_curve25519_inv_4:
add r1, sp, #0x60
add r0, sp, #0x60
push {r12}
bl fe_sq_op
pop {r12}
subs r12, r12, #1
bne L_curve25519_inv_4
add r2, sp, #0x40
add r1, sp, #0x60
add r0, sp, #0x40
bl fe_mul_op
add r1, sp, #0x40
add r0, sp, #0x60
bl fe_sq_op
mov r12, #49
L_curve25519_inv_5:
add r1, sp, #0x60
add r0, sp, #0x60
push {r12}
bl fe_sq_op
pop {r12}
subs r12, r12, #1
bne L_curve25519_inv_5
add r2, sp, #0x40
add r1, sp, #0x60
add r0, sp, #0x60
bl fe_mul_op
add r1, sp, #0x60
add r0, sp, #0x80
bl fe_sq_op
mov r12, #0x63
L_curve25519_inv_6:
add r1, sp, #0x80
add r0, sp, #0x80
push {r12}
bl fe_sq_op
pop {r12}
subs r12, r12, #1
bne L_curve25519_inv_6
add r2, sp, #0x60
add r1, sp, #0x80
add r0, sp, #0x60
bl fe_mul_op
mov r12, #50
L_curve25519_inv_7:
add r1, sp, #0x60
add r0, sp, #0x60
push {r12}
bl fe_sq_op
pop {r12}
subs r12, r12, #1
bne L_curve25519_inv_7
add r2, sp, #0x40
add r1, sp, #0x60
add r0, sp, #0x40
bl fe_mul_op
mov r12, #5
L_curve25519_inv_8:
add r1, sp, #0x40
add r0, sp, #0x40
push {r12}
bl fe_sq_op
pop {r12}
subs r12, r12, #1
bne L_curve25519_inv_8
add r2, sp, #32
add r1, sp, #0x40
add r0, sp, #0
bl fe_mul_op
mov r2, sp
ldr r1, [sp, #160]
ldr r0, [sp, #160]
bl fe_mul_op
mov r0, #0
add sp, sp, #0xbc
pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.size curve25519,.-curve25519
#else
.text
.align 4
.globl curve25519
.type curve25519, %function
curve25519:
push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
sub sp, sp, #0xc0
str r0, [sp, #176]
str r1, [sp, #160]
str r2, [sp, #172]
add r5, sp, #0x40
add r4, sp, #32
str sp, [sp, #184]
str r5, [sp, #180]
str r4, [sp, #188]
mov r1, #0
str r1, [sp, #164]
mov r4, #1
mov r5, #0
mov r6, #0
mov r7, #0
mov r8, #0
mov r9, #0
mov r10, #0
mov r11, #0
stm r0, {r4, r5, r6, r7, r8, r9, r10, r11}
add r3, sp, #32
stm r3, {r4, r5, r6, r7, r8, r9, r10, r11}
mov r4, #0
mov r3, sp
stm r3, {r4, r5, r6, r7, r8, r9, r10, r11}
add r3, sp, #0x40
# Copy
ldm r2, {r4, r5, r6, r7, r8, r9, r10, r11}
stm r3, {r4, r5, r6, r7, r8, r9, r10, r11}
mov r2, #0xfe
L_curve25519_bits:
str r2, [sp, #168]
ldr r1, [sp, #160]
and r4, r2, #31
lsr r2, r2, #5
ldr r2, [r1, r2, lsl #2]
rsb r4, r4, #31
lsl r2, r2, r4
ldr r1, [sp, #164]
eor r1, r1, r2
asr r1, r1, #31
str r2, [sp, #164]
# Conditional Swap
add r11, sp, #0xb0
ldm r11, {r4, r5, r6, r7}
eor r8, r4, r5
eor r9, r6, r7
and r8, r8, r1
and r9, r9, r1
eor r4, r4, r8
eor r5, r5, r8
eor r6, r6, r9
eor r7, r7, r9
stm r11, {r4, r5, r6, r7}
# Ladder step
ldr r3, [sp, #184]
ldr r2, [sp, #176]
add r1, sp, #0x80
ldr r0, [sp, #176]
bl fe_add_sub_op
ldr r3, [sp, #188]
ldr r2, [sp, #180]
add r1, sp, #0x60
ldr r0, [sp, #184]
bl fe_add_sub_op
ldr r2, [sp, #176]
add r1, sp, #0x60
ldr r0, [sp, #188]
bl fe_mul_op
add r2, sp, #0x80
ldr r1, [sp, #184]
ldr r0, [sp, #184]
bl fe_mul_op
add r1, sp, #0x80
add r0, sp, #0x60
bl fe_sq_op
ldr r1, [sp, #176]
add r0, sp, #0x80
bl fe_sq_op
ldr r3, [sp, #184]
ldr r2, [sp, #188]
ldr r1, [sp, #184]
ldr r0, [sp, #180]
bl fe_add_sub_op
add r2, sp, #0x60
add r1, sp, #0x80
ldr r0, [sp, #176]
bl fe_mul_op
add r2, sp, #0x60
add r1, sp, #0x80
add r0, sp, #0x80
bl fe_sub_op
ldr r1, [sp, #184]
ldr r0, [sp, #184]
bl fe_sq_op
add r1, sp, #0x80
ldr r0, [sp, #188]
bl fe_mul121666
ldr r1, [sp, #180]
ldr r0, [sp, #180]
bl fe_sq_op
ldr r2, [sp, #188]
add r1, sp, #0x60
add r0, sp, #0x60
bl fe_add_op
ldr r2, [sp, #184]
ldr r1, [sp, #172]
ldr r0, [sp, #188]
bl fe_mul_op
add r2, sp, #0x60
add r1, sp, #0x80
ldr r0, [sp, #184]
bl fe_mul_op
ldr r2, [sp, #168]
subs r2, r2, #1
bge L_curve25519_bits
ldr r1, [sp, #184]
# Copy
ldm r1, {r4, r5, r6, r7, r8, r9, r10, r11}
stm sp, {r4, r5, r6, r7, r8, r9, r10, r11}
# Invert
add r1, sp, #0
add r0, sp, #32
bl fe_sq_op
add r1, sp, #32
add r0, sp, #0x40
bl fe_sq_op
add r1, sp, #0x40
add r0, sp, #0x40
bl fe_sq_op
add r2, sp, #0x40
add r1, sp, #0
add r0, sp, #0x40
bl fe_mul_op
add r2, sp, #0x40
add r1, sp, #32
add r0, sp, #32
bl fe_mul_op
add r1, sp, #32
add r0, sp, #0x60
bl fe_sq_op
add r2, sp, #0x60
add r1, sp, #0x40
add r0, sp, #0x40
bl fe_mul_op
add r1, sp, #0x40
add r0, sp, #0x60
bl fe_sq_op
mov r12, #4
L_curve25519_inv_1:
add r1, sp, #0x60
add r0, sp, #0x60
push {r12}
bl fe_sq_op
pop {r12}
subs r12, r12, #1
bne L_curve25519_inv_1
add r2, sp, #0x40
add r1, sp, #0x60
add r0, sp, #0x40
bl fe_mul_op
add r1, sp, #0x40
add r0, sp, #0x60
bl fe_sq_op
mov r12, #9
L_curve25519_inv_2:
add r1, sp, #0x60
add r0, sp, #0x60
push {r12}
bl fe_sq_op
pop {r12}
subs r12, r12, #1
bne L_curve25519_inv_2
add r2, sp, #0x40
add r1, sp, #0x60
add r0, sp, #0x60
bl fe_mul_op
add r1, sp, #0x60
add r0, sp, #0x80
bl fe_sq_op
mov r12, #19
L_curve25519_inv_3:
add r1, sp, #0x80
add r0, sp, #0x80
push {r12}
bl fe_sq_op
pop {r12}
subs r12, r12, #1
bne L_curve25519_inv_3
add r2, sp, #0x60
add r1, sp, #0x80
add r0, sp, #0x60
bl fe_mul_op
mov r12, #10
L_curve25519_inv_4:
add r1, sp, #0x60
add r0, sp, #0x60
push {r12}
bl fe_sq_op
pop {r12}
subs r12, r12, #1
bne L_curve25519_inv_4
add r2, sp, #0x40
add r1, sp, #0x60
add r0, sp, #0x40
bl fe_mul_op
add r1, sp, #0x40
add r0, sp, #0x60
bl fe_sq_op
mov r12, #49
L_curve25519_inv_5:
add r1, sp, #0x60
add r0, sp, #0x60
push {r12}
bl fe_sq_op
pop {r12}
subs r12, r12, #1
bne L_curve25519_inv_5
add r2, sp, #0x40
add r1, sp, #0x60
add r0, sp, #0x60
bl fe_mul_op
add r1, sp, #0x60
add r0, sp, #0x80
bl fe_sq_op
mov r12, #0x63
L_curve25519_inv_6:
add r1, sp, #0x80
add r0, sp, #0x80
push {r12}
bl fe_sq_op
pop {r12}
subs r12, r12, #1
bne L_curve25519_inv_6
add r2, sp, #0x60
add r1, sp, #0x80
add r0, sp, #0x60
bl fe_mul_op
mov r12, #50
L_curve25519_inv_7:
add r1, sp, #0x60
add r0, sp, #0x60
push {r12}
bl fe_sq_op
pop {r12}
subs r12, r12, #1
bne L_curve25519_inv_7
add r2, sp, #0x40
add r1, sp, #0x60
add r0, sp, #0x40
bl fe_mul_op
mov r12, #5
L_curve25519_inv_8:
add r1, sp, #0x40
add r0, sp, #0x40
push {r12}
bl fe_sq_op
pop {r12}
subs r12, r12, #1
bne L_curve25519_inv_8
add r2, sp, #32
add r1, sp, #0x40
add r0, sp, #0
bl fe_mul_op
ldr r2, [sp, #184]
ldr r1, [sp, #176]
ldr r0, [sp, #176]
bl fe_mul_op
# Ensure result is less than modulus
ldr r0, [sp, #176]
ldm r0, {r4, r5, r6, r7, r8, r9, r10, r11}
mov r2, #19
and r2, r2, r11, asr #31
adds r4, r4, r2
adcs r5, r5, #0
adcs r6, r6, #0
adcs r7, r7, #0
adcs r8, r8, #0
adcs r9, r9, #0
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
bic r11, r11, #0x80000000
#else
bfc r11, #31, #1
#endif
adcs r10, r10, #0
adc r11, r11, #0
stm r0, {r4, r5, r6, r7, r8, r9, r10, r11}
mov r0, #0
add sp, sp, #0xc0
pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.size curve25519,.-curve25519
#endif /* WC_NO_CACHE_RESISTANT */
#endif /* HAVE_CURVE25519 */
#ifdef HAVE_ED25519
.text
.align 4
.globl fe_invert
.type fe_invert, %function
fe_invert:
push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
sub sp, sp, #0x88
# Invert
str r0, [sp, #128]
str r1, [sp, #132]
ldr r1, [sp, #132]
mov r0, sp
bl fe_sq_op
mov r1, sp
add r0, sp, #32
bl fe_sq_op
add r1, sp, #32
add r0, sp, #32
bl fe_sq_op
add r2, sp, #32
ldr r1, [sp, #132]
add r0, sp, #32
bl fe_mul_op
add r2, sp, #32
mov r1, sp
mov r0, sp
bl fe_mul_op
mov r1, sp
add r0, sp, #0x40
bl fe_sq_op
add r2, sp, #0x40
add r1, sp, #32
add r0, sp, #32
bl fe_mul_op
add r1, sp, #32
add r0, sp, #0x40
bl fe_sq_op
mov r12, #4
L_fe_invert1:
add r1, sp, #0x40
add r0, sp, #0x40
push {r12}
bl fe_sq_op
pop {r12}
subs r12, r12, #1
bne L_fe_invert1
add r2, sp, #32
add r1, sp, #0x40
add r0, sp, #32
bl fe_mul_op
add r1, sp, #32
add r0, sp, #0x40
bl fe_sq_op
mov r12, #9
L_fe_invert2:
add r1, sp, #0x40
add r0, sp, #0x40
push {r12}
bl fe_sq_op
pop {r12}
subs r12, r12, #1
bne L_fe_invert2
add r2, sp, #32
add r1, sp, #0x40
add r0, sp, #0x40
bl fe_mul_op
add r1, sp, #0x40
add r0, sp, #0x60
bl fe_sq_op
mov r12, #19
L_fe_invert3:
add r1, sp, #0x60
add r0, sp, #0x60
push {r12}
bl fe_sq_op
pop {r12}
subs r12, r12, #1
bne L_fe_invert3
add r2, sp, #0x40
add r1, sp, #0x60
add r0, sp, #0x40
bl fe_mul_op
mov r12, #10
L_fe_invert4:
add r1, sp, #0x40
add r0, sp, #0x40
push {r12}
bl fe_sq_op
pop {r12}
subs r12, r12, #1
bne L_fe_invert4
add r2, sp, #32
add r1, sp, #0x40
add r0, sp, #32
bl fe_mul_op
add r1, sp, #32
add r0, sp, #0x40
bl fe_sq_op
mov r12, #49
L_fe_invert5:
add r1, sp, #0x40
add r0, sp, #0x40
push {r12}
bl fe_sq_op
pop {r12}
subs r12, r12, #1
bne L_fe_invert5
add r2, sp, #32
add r1, sp, #0x40
add r0, sp, #0x40
bl fe_mul_op
add r1, sp, #0x40
add r0, sp, #0x60
bl fe_sq_op
mov r12, #0x63
L_fe_invert6:
add r1, sp, #0x60
add r0, sp, #0x60
push {r12}
bl fe_sq_op
pop {r12}
subs r12, r12, #1
bne L_fe_invert6
add r2, sp, #0x40
add r1, sp, #0x60
add r0, sp, #0x40
bl fe_mul_op
mov r12, #50
L_fe_invert7:
add r1, sp, #0x40
add r0, sp, #0x40
push {r12}
bl fe_sq_op
pop {r12}
subs r12, r12, #1
bne L_fe_invert7
add r2, sp, #32
add r1, sp, #0x40
add r0, sp, #32
bl fe_mul_op
mov r12, #5
L_fe_invert8:
add r1, sp, #32
add r0, sp, #32
push {r12}
bl fe_sq_op
pop {r12}
subs r12, r12, #1
bne L_fe_invert8
mov r2, sp
add r1, sp, #32
ldr r0, [sp, #128]
bl fe_mul_op
ldr r1, [sp, #132]
ldr r0, [sp, #128]
add sp, sp, #0x88
pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.size fe_invert,.-fe_invert
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
.text
.align 4
.globl fe_sq2
.type fe_sq2, %function
fe_sq2:
push {lr}
sub sp, sp, #0x44
str r0, [sp, #64]
# Square * 2
mov r0, #0
ldr r12, [r1]
# A[0] * A[1]
ldr lr, [r1, #4]
umull r4, r5, r12, lr
# A[0] * A[3]
ldr lr, [r1, #12]
umull r6, r7, r12, lr
# A[0] * A[5]
ldr lr, [r1, #20]
umull r8, r9, r12, lr
# A[0] * A[7]
ldr lr, [r1, #28]
umull r10, r3, r12, lr
# A[0] * A[2]
ldr lr, [r1, #8]
mov r11, #0
umlal r5, r11, r12, lr
adds r6, r6, r11
# A[0] * A[4]
ldr lr, [r1, #16]
adcs r7, r7, #0
adc r11, r0, #0
umlal r7, r11, r12, lr
adds r8, r8, r11
# A[0] * A[6]
ldr lr, [r1, #24]
adcs r9, r9, #0
adc r11, r0, #0
umlal r9, r11, r12, lr
adds r10, r10, r11
adcs r3, r3, #0
str r4, [sp, #4]
str r5, [sp, #8]
# A[1] * A[2]
ldr r12, [r1, #4]
ldr lr, [r1, #8]
mov r11, #0
umlal r6, r11, r12, lr
str r6, [sp, #12]
adds r7, r7, r11
# A[1] * A[3]
ldr lr, [r1, #12]
adc r11, r0, #0
umlal r7, r11, r12, lr
str r7, [sp, #16]
adds r8, r8, r11
# A[1] * A[4]
ldr lr, [r1, #16]
adc r11, r0, #0
umlal r8, r11, r12, lr
adds r9, r9, r11
# A[1] * A[5]
ldr lr, [r1, #20]
adc r11, r0, #0
umlal r9, r11, r12, lr
adds r10, r10, r11
# A[1] * A[6]
ldr lr, [r1, #24]
adc r11, r0, #0
umlal r10, r11, r12, lr
adds r3, r3, r11
# A[1] * A[7]
ldr lr, [r1, #28]
adc r4, r0, #0
umlal r3, r4, r12, lr
# A[2] * A[3]
ldr r12, [r1, #8]
ldr lr, [r1, #12]
mov r11, #0
umlal r8, r11, r12, lr
str r8, [sp, #20]
adds r9, r9, r11
# A[2] * A[4]
ldr lr, [r1, #16]
adc r11, r0, #0
umlal r9, r11, r12, lr
str r9, [sp, #24]
adds r10, r10, r11
# A[2] * A[5]
ldr lr, [r1, #20]
adc r11, r0, #0
umlal r10, r11, r12, lr
adds r3, r3, r11
# A[2] * A[6]
ldr lr, [r1, #24]
adc r11, r0, #0
umlal r3, r11, r12, lr
adds r4, r4, r11
# A[2] * A[7]
ldr lr, [r1, #28]
adc r5, r0, #0
umlal r4, r5, r12, lr
# A[3] * A[4]
ldr r12, [r1, #12]
ldr lr, [r1, #16]
mov r11, #0
umlal r10, r11, r12, lr
str r10, [sp, #28]
adds r3, r3, r11
# A[3] * A[5]
ldr lr, [r1, #20]
adc r11, r0, #0
umlal r3, r11, r12, lr
adds r4, r4, r11
# A[3] * A[6]
ldr lr, [r1, #24]
adc r11, r0, #0
umlal r4, r11, r12, lr
adds r5, r5, r11
# A[3] * A[7]
ldr lr, [r1, #28]
adc r6, r0, #0
umlal r5, r6, r12, lr
# A[4] * A[5]
ldr r12, [r1, #16]
ldr lr, [r1, #20]
mov r11, #0
umlal r4, r11, r12, lr
adds r5, r5, r11
# A[4] * A[6]
ldr lr, [r1, #24]
adc r11, r0, #0
umlal r5, r11, r12, lr
adds r6, r6, r11
# A[4] * A[7]
ldr lr, [r1, #28]
adc r7, r0, #0
umlal r6, r7, r12, lr
# A[5] * A[6]
ldr r12, [r1, #20]
ldr lr, [r1, #24]
mov r11, #0
umlal r6, r11, r12, lr
adds r7, r7, r11
# A[5] * A[7]
ldr lr, [r1, #28]
adc r8, r0, #0
umlal r7, r8, r12, lr
# A[6] * A[7]
ldr r12, [r1, #24]
ldr lr, [r1, #28]
mov r9, #0
umlal r8, r9, r12, lr
add lr, sp, #32
stm lr, {r3, r4, r5, r6, r7, r8, r9}
add lr, sp, #4
ldm lr, {r4, r5, r6, r7, r8, r9, r10}
adds r4, r4, r4
adcs r5, r5, r5
adcs r6, r6, r6
adcs r7, r7, r7
adcs r8, r8, r8
adcs r9, r9, r9
adcs r10, r10, r10
stm lr!, {r4, r5, r6, r7, r8, r9, r10}
ldm lr, {r3, r4, r5, r6, r7, r8, r9}
adcs r3, r3, r3
adcs r4, r4, r4
adcs r5, r5, r5
adcs r6, r6, r6
adcs r7, r7, r7
adcs r8, r8, r8
adcs r9, r9, r9
adc r10, r0, #0
stm lr, {r3, r4, r5, r6, r7, r8, r9, r10}
add lr, sp, #4
ldm lr, {r4, r5, r6, r7, r8, r9, r10}
mov lr, sp
# A[0] * A[0]
ldr r12, [r1]
umull r3, r11, r12, r12
adds r4, r4, r11
# A[1] * A[1]
ldr r12, [r1, #4]
adcs r5, r5, #0
adc r11, r0, #0
umlal r5, r11, r12, r12
adds r6, r6, r11
# A[2] * A[2]
ldr r12, [r1, #8]
adcs r7, r7, #0
adc r11, r0, #0
umlal r7, r11, r12, r12
adds r8, r8, r11
# A[3] * A[3]
ldr r12, [r1, #12]
adcs r9, r9, #0
adc r11, r0, #0
umlal r9, r11, r12, r12
adds r10, r10, r11
stm lr!, {r3, r4, r5, r6, r7, r8, r9, r10}
ldm lr, {r3, r4, r5, r6, r7, r8, r9, r10}
# A[4] * A[4]
ldr r12, [r1, #16]
adcs r3, r3, #0
adc r11, r0, #0
umlal r3, r11, r12, r12
adds r4, r4, r11
# A[5] * A[5]
ldr r12, [r1, #20]
adcs r5, r5, #0
adc r11, r0, #0
umlal r5, r11, r12, r12
adds r6, r6, r11
# A[6] * A[6]
ldr r12, [r1, #24]
adcs r7, r7, #0
adc r11, r0, #0
umlal r7, r11, r12, r12
adds r8, r8, r11
# A[7] * A[7]
ldr r12, [r1, #28]
adcs r9, r9, #0
adc r10, r10, #0
umlal r9, r10, r12, r12
# Reduce
ldr r2, [sp, #28]
mov lr, sp
mov r12, #38
umull r10, r11, r12, r10
adds r10, r10, r2
adc r11, r11, #0
mov r12, #19
lsl r11, r11, #1
orr r11, r11, r10, LSR #31
mul r11, r12, r11
ldm lr!, {r1, r2}
mov r12, #38
adds r1, r1, r11
adc r11, r0, #0
umlal r1, r11, r3, r12
adds r2, r2, r11
adc r11, r0, #0
umlal r2, r11, r4, r12
ldm lr!, {r3, r4}
adds r3, r3, r11
adc r11, r0, #0
umlal r3, r11, r5, r12
adds r4, r4, r11
adc r11, r0, #0
umlal r4, r11, r6, r12
ldm lr!, {r5, r6}
adds r5, r5, r11
adc r11, r0, #0
umlal r5, r11, r7, r12
adds r6, r6, r11
adc r11, r0, #0
umlal r6, r11, r8, r12
ldm lr!, {r7, r8}
adds r7, r7, r11
adc r11, r0, #0
umlal r7, r11, r9, r12
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
bic r10, r10, #0x80000000
#else
bfc r10, #31, #1
#endif
adds r8, r10, r11
# Reduce if top bit set
mov r12, #19
and r11, r12, r8, ASR #31
adds r1, r1, r11
adcs r2, r2, #0
adcs r3, r3, #0
adcs r4, r4, #0
adcs r5, r5, #0
adcs r6, r6, #0
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
bic r8, r8, #0x80000000
#else
bfc r8, #31, #1
#endif
adcs r7, r7, #0
adc r8, r8, #0
# Double
adds r1, r1, r1
adcs r2, r2, r2
adcs r3, r3, r3
adcs r4, r4, r4
adcs r5, r5, r5
adcs r6, r6, r6
adcs r7, r7, r7
adc r8, r8, r8
# Reduce if top bit set
mov r12, #19
and r11, r12, r8, ASR #31
adds r1, r1, r11
adcs r2, r2, #0
adcs r3, r3, #0
adcs r4, r4, #0
adcs r5, r5, #0
adcs r6, r6, #0
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
bic r8, r8, #0x80000000
#else
bfc r8, #31, #1
#endif
adcs r7, r7, #0
adc r8, r8, #0
# Store
ldr r0, [sp, #64]
stm r0, {r1, r2, r3, r4, r5, r6, r7, r8}
add sp, sp, #0x44
pop {pc}
.size fe_sq2,.-fe_sq2
#else
.text
.align 4
.globl fe_sq2
.type fe_sq2, %function
fe_sq2:
push {lr}
sub sp, sp, #36
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r0, [sp, #28]
str r1, [sp, #32]
#else
strd r0, r1, [sp, #28]
#endif
ldm r1, {r0, r1, r2, r3, r4, r5, r6, r7}
# Square * 2
umull r9, r10, r0, r0
umull r11, r12, r0, r1
adds r11, r11, r11
mov lr, #0
umaal r10, r11, lr, lr
stm sp, {r9, r10}
mov r8, lr
umaal r8, r12, r0, r2
adcs r8, r8, r8
umaal r8, r11, r1, r1
umull r9, r10, r0, r3
umaal r9, r12, r1, r2
adcs r9, r9, r9
umaal r9, r11, lr, lr
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [sp, #8]
str r9, [sp, #12]
#else
strd r8, r9, [sp, #8]
#endif
mov r9, lr
umaal r9, r10, r0, r4
umaal r9, r12, r1, r3
adcs r9, r9, r9
umaal r9, r11, r2, r2
str r9, [sp, #16]
umull r9, r8, r0, r5
umaal r9, r12, r1, r4
umaal r9, r10, r2, r3
adcs r9, r9, r9
umaal r9, r11, lr, lr
str r9, [sp, #20]
mov r9, lr
umaal r9, r8, r0, r6
umaal r9, r12, r1, r5
umaal r9, r10, r2, r4
adcs r9, r9, r9
umaal r9, r11, r3, r3
str r9, [sp, #24]
umull r0, r9, r0, r7
umaal r0, r8, r1, r6
umaal r0, r12, r2, r5
umaal r0, r10, r3, r4
adcs r0, r0, r0
umaal r0, r11, lr, lr
# R[7] = r0
umaal r9, r8, r1, r7
umaal r9, r10, r2, r6
umaal r12, r9, r3, r5
adcs r12, r12, r12
umaal r12, r11, r4, r4
# R[8] = r12
umaal r9, r8, r2, r7
umaal r10, r9, r3, r6
mov r2, lr
umaal r10, r2, r4, r5
adcs r10, r10, r10
umaal r11, r10, lr, lr
# R[9] = r11
umaal r2, r8, r3, r7
umaal r2, r9, r4, r6
adcs r3, r2, r2
umaal r10, r3, r5, r5
# R[10] = r10
mov r1, lr
umaal r1, r8, r4, r7
umaal r1, r9, r5, r6
adcs r4, r1, r1
umaal r3, r4, lr, lr
# R[11] = r3
umaal r8, r9, r5, r7
adcs r8, r8, r8
umaal r4, r8, r6, r6
# R[12] = r4
mov r5, lr
umaal r5, r9, r6, r7
adcs r5, r5, r5
umaal r8, r5, lr, lr
# R[13] = r8
adcs r9, r9, r9
umaal r9, r5, r7, r7
adcs r7, r5, lr
# R[14] = r9
# R[15] = r7
# Reduce
mov r6, #37
umaal r7, r0, r7, r6
mov r6, #19
lsl r0, r0, #1
orr r0, r0, r7, lsr #31
mul lr, r0, r6
pop {r0, r1}
mov r6, #38
umaal r0, lr, r12, r6
umaal r1, lr, r11, r6
mov r12, r3
mov r11, r4
pop {r2, r3, r4}
umaal r2, lr, r10, r6
umaal r3, lr, r12, r6
umaal r4, lr, r11, r6
mov r12, r6
pop {r5, r6}
umaal r5, lr, r8, r12
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
bic r7, r7, #0x80000000
#else
bfc r7, #31, #1
#endif
umaal r6, lr, r9, r12
add r7, r7, lr
# Reduce if top bit set
mov r11, #19
and r12, r11, r7, ASR #31
adds r0, r0, r12
adcs r1, r1, #0
adcs r2, r2, #0
adcs r3, r3, #0
adcs r4, r4, #0
adcs r5, r5, #0
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
bic r7, r7, #0x80000000
#else
bfc r7, #31, #1
#endif
adcs r6, r6, #0
adc r7, r7, #0
# Double
adds r0, r0, r0
adcs r1, r1, r1
adcs r2, r2, r2
adcs r3, r3, r3
adcs r4, r4, r4
adcs r5, r5, r5
adcs r6, r6, r6
adc r7, r7, r7
# Reduce if top bit set
mov r11, #19
and r12, r11, r7, ASR #31
adds r0, r0, r12
adcs r1, r1, #0
adcs r2, r2, #0
adcs r3, r3, #0
adcs r4, r4, #0
adcs r5, r5, #0
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
bic r7, r7, #0x80000000
#else
bfc r7, #31, #1
#endif
adcs r6, r6, #0
adc r7, r7, #0
pop {r12, lr}
# Store
stm r12, {r0, r1, r2, r3, r4, r5, r6, r7}
mov r0, r12
mov r1, lr
pop {pc}
.size fe_sq2,.-fe_sq2
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
.text
.align 4
.globl fe_pow22523
.type fe_pow22523, %function
fe_pow22523:
push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
sub sp, sp, #0x68
# pow22523
str r0, [sp, #96]
str r1, [sp, #100]
ldr r1, [sp, #100]
mov r0, sp
bl fe_sq_op
mov r1, sp
add r0, sp, #32
bl fe_sq_op
add r1, sp, #32
add r0, sp, #32
bl fe_sq_op
add r2, sp, #32
ldr r1, [sp, #100]
add r0, sp, #32
bl fe_mul_op
add r2, sp, #32
mov r1, sp
mov r0, sp
bl fe_mul_op
mov r1, sp
mov r0, sp
bl fe_sq_op
mov r2, sp
add r1, sp, #32
mov r0, sp
bl fe_mul_op
mov r1, sp
add r0, sp, #32
bl fe_sq_op
mov r12, #4
L_fe_pow22523_1:
add r1, sp, #32
add r0, sp, #32
push {r12}
bl fe_sq_op
pop {r12}
subs r12, r12, #1
bne L_fe_pow22523_1
mov r2, sp
add r1, sp, #32
mov r0, sp
bl fe_mul_op
mov r1, sp
add r0, sp, #32
bl fe_sq_op
mov r12, #9
L_fe_pow22523_2:
add r1, sp, #32
add r0, sp, #32
push {r12}
bl fe_sq_op
pop {r12}
subs r12, r12, #1
bne L_fe_pow22523_2
mov r2, sp
add r1, sp, #32
add r0, sp, #32
bl fe_mul_op
add r1, sp, #32
add r0, sp, #0x40
bl fe_sq_op
mov r12, #19
L_fe_pow22523_3:
add r1, sp, #0x40
add r0, sp, #0x40
push {r12}
bl fe_sq_op
pop {r12}
subs r12, r12, #1
bne L_fe_pow22523_3
add r2, sp, #32
add r1, sp, #0x40
add r0, sp, #32
bl fe_mul_op
mov r12, #10
L_fe_pow22523_4:
add r1, sp, #32
add r0, sp, #32
push {r12}
bl fe_sq_op
pop {r12}
subs r12, r12, #1
bne L_fe_pow22523_4
mov r2, sp
add r1, sp, #32
mov r0, sp
bl fe_mul_op
mov r1, sp
add r0, sp, #32
bl fe_sq_op
mov r12, #49
L_fe_pow22523_5:
add r1, sp, #32
add r0, sp, #32
push {r12}
bl fe_sq_op
pop {r12}
subs r12, r12, #1
bne L_fe_pow22523_5
mov r2, sp
add r1, sp, #32
add r0, sp, #32
bl fe_mul_op
add r1, sp, #32
add r0, sp, #0x40
bl fe_sq_op
mov r12, #0x63
L_fe_pow22523_6:
add r1, sp, #0x40
add r0, sp, #0x40
push {r12}
bl fe_sq_op
pop {r12}
subs r12, r12, #1
bne L_fe_pow22523_6
add r2, sp, #32
add r1, sp, #0x40
add r0, sp, #32
bl fe_mul_op
mov r12, #50
L_fe_pow22523_7:
add r1, sp, #32
add r0, sp, #32
push {r12}
bl fe_sq_op
pop {r12}
subs r12, r12, #1
bne L_fe_pow22523_7
mov r2, sp
add r1, sp, #32
mov r0, sp
bl fe_mul_op
mov r12, #2
L_fe_pow22523_8:
mov r1, sp
mov r0, sp
push {r12}
bl fe_sq_op
pop {r12}
subs r12, r12, #1
bne L_fe_pow22523_8
ldr r2, [sp, #100]
mov r1, sp
ldr r0, [sp, #96]
bl fe_mul_op
ldr r1, [sp, #100]
ldr r0, [sp, #96]
add sp, sp, #0x68
pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.size fe_pow22523,.-fe_pow22523
.text
.align 4
.globl ge_p1p1_to_p2
.type ge_p1p1_to_p2, %function
ge_p1p1_to_p2:
push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
sub sp, sp, #8
str r0, [sp]
str r1, [sp, #4]
add r2, r1, #0x60
bl fe_mul_op
ldr r0, [sp]
ldr r1, [sp, #4]
add r2, r1, #0x40
add r1, r1, #32
add r0, r0, #32
bl fe_mul_op
ldr r0, [sp]
ldr r1, [sp, #4]
add r2, r1, #0x60
add r1, r1, #0x40
add r0, r0, #0x40
bl fe_mul_op
add sp, sp, #8
pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.size ge_p1p1_to_p2,.-ge_p1p1_to_p2
.text
.align 4
.globl ge_p1p1_to_p3
.type ge_p1p1_to_p3, %function
ge_p1p1_to_p3:
push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
sub sp, sp, #8
str r0, [sp]
str r1, [sp, #4]
add r2, r1, #0x60
bl fe_mul_op
ldr r0, [sp]
ldr r1, [sp, #4]
add r2, r1, #0x40
add r1, r1, #32
add r0, r0, #32
bl fe_mul_op
ldr r0, [sp]
ldr r1, [sp, #4]
add r2, r1, #0x60
add r1, r1, #0x40
add r0, r0, #0x40
bl fe_mul_op
ldr r0, [sp]
ldr r1, [sp, #4]
add r2, r1, #32
add r0, r0, #0x60
bl fe_mul_op
add sp, sp, #8
pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.size ge_p1p1_to_p3,.-ge_p1p1_to_p3
.text
.align 4
.globl ge_p2_dbl
.type ge_p2_dbl, %function
ge_p2_dbl:
push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
sub sp, sp, #8
str r0, [sp]
str r1, [sp, #4]
bl fe_sq_op
ldr r0, [sp]
ldr r1, [sp, #4]
add r1, r1, #32
add r0, r0, #0x40
bl fe_sq_op
ldr r0, [sp]
ldr r1, [sp, #4]
add r2, r1, #32
add r0, r0, #32
bl fe_add_op
mov r1, r0
add r0, r0, #0x40
bl fe_sq_op
ldr r0, [sp]
mov r3, r0
add r2, r0, #0x40
add r1, r0, #0x40
add r0, r0, #32
bl fe_add_sub_op
mov r2, r0
add r1, r0, #0x40
sub r0, r0, #32
bl fe_sub_op
ldr r1, [sp, #4]
add r1, r1, #0x40
add r0, r0, #0x60
bl fe_sq2
sub r2, r0, #32
mov r1, r0
bl fe_sub_op
add sp, sp, #8
pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.size ge_p2_dbl,.-ge_p2_dbl
.text
.align 4
.globl ge_madd
.type ge_madd, %function
ge_madd:
push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
sub sp, sp, #12
str r0, [sp]
str r1, [sp, #4]
str r2, [sp, #8]
mov r2, r1
add r1, r1, #32
bl fe_add_op
ldr r1, [sp, #4]
mov r2, r1
add r1, r1, #32
add r0, r0, #32
bl fe_sub_op
ldr r2, [sp, #8]
sub r1, r0, #32
add r0, r0, #32
bl fe_mul_op
ldr r0, [sp]
ldr r2, [sp, #8]
add r2, r2, #32
add r1, r0, #32
add r0, r0, #32
bl fe_mul_op
ldr r0, [sp]
ldr r1, [sp, #8]
ldr r2, [sp, #4]
add r2, r2, #0x60
add r1, r1, #0x40
add r0, r0, #0x60
bl fe_mul_op
ldr r0, [sp]
add r3, r0, #32
add r2, r0, #0x40
mov r1, r0
add r0, r0, #32
bl fe_add_sub_op
ldr r1, [sp, #4]
add r1, r1, #0x40
add r0, r0, #32
# Double
ldm r1, {r4, r5, r6, r7, r8, r9, r10, r11}
adds r4, r4, r4
adcs r5, r5, r5
adcs r6, r6, r6
adcs r7, r7, r7
adcs r8, r8, r8
adcs r9, r9, r9
adcs r10, r10, r10
mov lr, #0
adcs r11, r11, r11
adc lr, lr, #0
mov r12, #19
lsl lr, lr, #1
orr lr, lr, r11, lsr #31
mul r12, lr, r12
adds r4, r4, r12
adcs r5, r5, #0
adcs r6, r6, #0
adcs r7, r7, #0
adcs r8, r8, #0
adcs r9, r9, #0
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
bic r11, r11, #0x80000000
#else
bfc r11, #31, #1
#endif
adcs r10, r10, #0
adc r11, r11, #0
stm r0, {r4, r5, r6, r7, r8, r9, r10, r11}
# Done Double
add r3, r0, #32
add r1, r0, #32
bl fe_add_sub_op
add sp, sp, #12
pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.size ge_madd,.-ge_madd
.text
.align 4
.globl ge_msub
.type ge_msub, %function
ge_msub:
push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
sub sp, sp, #12
str r0, [sp]
str r1, [sp, #4]
str r2, [sp, #8]
mov r2, r1
add r1, r1, #32
bl fe_add_op
ldr r1, [sp, #4]
mov r2, r1
add r1, r1, #32
add r0, r0, #32
bl fe_sub_op
ldr r2, [sp, #8]
add r2, r2, #32
sub r1, r0, #32
add r0, r0, #32
bl fe_mul_op
ldr r0, [sp]
ldr r2, [sp, #8]
add r1, r0, #32
add r0, r0, #32
bl fe_mul_op
ldr r0, [sp]
ldr r1, [sp, #8]
ldr r2, [sp, #4]
add r2, r2, #0x60
add r1, r1, #0x40
add r0, r0, #0x60
bl fe_mul_op
ldr r0, [sp]
add r3, r0, #32
add r2, r0, #0x40
mov r1, r0
add r0, r0, #32
bl fe_add_sub_op
ldr r1, [sp, #4]
add r1, r1, #0x40
add r0, r0, #32
# Double
ldm r1, {r4, r5, r6, r7, r8, r9, r10, r11}
adds r4, r4, r4
adcs r5, r5, r5
adcs r6, r6, r6
adcs r7, r7, r7
adcs r8, r8, r8
adcs r9, r9, r9
adcs r10, r10, r10
mov lr, #0
adcs r11, r11, r11
adc lr, lr, #0
mov r12, #19
lsl lr, lr, #1
orr lr, lr, r11, lsr #31
mul r12, lr, r12
adds r4, r4, r12
adcs r5, r5, #0
adcs r6, r6, #0
adcs r7, r7, #0
adcs r8, r8, #0
adcs r9, r9, #0
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
bic r11, r11, #0x80000000
#else
bfc r11, #31, #1
#endif
adcs r10, r10, #0
adc r11, r11, #0
stm r0, {r4, r5, r6, r7, r8, r9, r10, r11}
# Done Double
add r3, r0, #32
mov r1, r0
add r0, r0, #32
bl fe_add_sub_op
add sp, sp, #12
pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.size ge_msub,.-ge_msub
.text
.align 4
.globl ge_add
.type ge_add, %function
ge_add:
push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
sub sp, sp, #44
str r0, [sp]
str r1, [sp, #4]
str r2, [sp, #8]
mov r3, r1
add r2, r1, #32
add r1, r0, #32
bl fe_add_sub_op
ldr r2, [sp, #8]
mov r1, r0
add r0, r0, #0x40
bl fe_mul_op
ldr r0, [sp]
ldr r2, [sp, #8]
add r2, r2, #32
add r1, r0, #32
add r0, r0, #32
bl fe_mul_op
ldr r0, [sp]
ldr r1, [sp, #8]
ldr r2, [sp, #4]
add r2, r2, #0x60
add r1, r1, #0x60
add r0, r0, #0x60
bl fe_mul_op
ldr r0, [sp]
ldr r1, [sp, #4]
ldr r2, [sp, #8]
add r2, r2, #0x40
add r1, r1, #0x40
bl fe_mul_op
ldr r1, [sp]
add r0, sp, #12
# Double
ldm r1, {r4, r5, r6, r7, r8, r9, r10, r11}
adds r4, r4, r4
adcs r5, r5, r5
adcs r6, r6, r6
adcs r7, r7, r7
adcs r8, r8, r8
adcs r9, r9, r9
adcs r10, r10, r10
mov lr, #0
adcs r11, r11, r11
adc lr, lr, #0
mov r12, #19
lsl lr, lr, #1
orr lr, lr, r11, lsr #31
mul r12, lr, r12
adds r4, r4, r12
adcs r5, r5, #0
adcs r6, r6, #0
adcs r7, r7, #0
adcs r8, r8, #0
adcs r9, r9, #0
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
bic r11, r11, #0x80000000
#else
bfc r11, #31, #1
#endif
adcs r10, r10, #0
adc r11, r11, #0
stm r0, {r4, r5, r6, r7, r8, r9, r10, r11}
# Done Double
add r3, r1, #32
add r2, r1, #0x40
add r0, r1, #32
bl fe_add_sub_op
add r3, r0, #0x40
add r2, sp, #12
add r1, r0, #0x40
add r0, r0, #32
bl fe_add_sub_op
add sp, sp, #44
pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.size ge_add,.-ge_add
.text
.align 4
.globl ge_sub
.type ge_sub, %function
ge_sub:
push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
sub sp, sp, #44
str r0, [sp]
str r1, [sp, #4]
str r2, [sp, #8]
mov r3, r1
add r2, r1, #32
add r1, r0, #32
bl fe_add_sub_op
ldr r2, [sp, #8]
add r2, r2, #32
mov r1, r0
add r0, r0, #0x40
bl fe_mul_op
ldr r0, [sp]
ldr r2, [sp, #8]
add r1, r0, #32
add r0, r0, #32
bl fe_mul_op
ldr r0, [sp]
ldr r1, [sp, #8]
ldr r2, [sp, #4]
add r2, r2, #0x60
add r1, r1, #0x60
add r0, r0, #0x60
bl fe_mul_op
ldr r0, [sp]
ldr r1, [sp, #4]
ldr r2, [sp, #8]
add r2, r2, #0x40
add r1, r1, #0x40
bl fe_mul_op
ldr r1, [sp]
add r0, sp, #12
# Double
ldm r1, {r4, r5, r6, r7, r8, r9, r10, r11}
adds r4, r4, r4
adcs r5, r5, r5
adcs r6, r6, r6
adcs r7, r7, r7
adcs r8, r8, r8
adcs r9, r9, r9
adcs r10, r10, r10
mov lr, #0
adcs r11, r11, r11
adc lr, lr, #0
mov r12, #19
lsl lr, lr, #1
orr lr, lr, r11, lsr #31
mul r12, lr, r12
adds r4, r4, r12
adcs r5, r5, #0
adcs r6, r6, #0
adcs r7, r7, #0
adcs r8, r8, #0
adcs r9, r9, #0
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
bic r11, r11, #0x80000000
#else
bfc r11, #31, #1
#endif
adcs r10, r10, #0
adc r11, r11, #0
stm r0, {r4, r5, r6, r7, r8, r9, r10, r11}
# Done Double
add r3, r1, #32
add r2, r1, #0x40
add r0, r1, #32
bl fe_add_sub_op
add r3, r0, #0x40
add r2, sp, #12
add r1, r0, #32
add r0, r0, #0x40
bl fe_add_sub_op
add sp, sp, #44
pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.size ge_sub,.-ge_sub
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
.text
.align 4
.globl sc_reduce
.type sc_reduce, %function
sc_reduce:
push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
sub sp, sp, #56
str r0, [sp, #52]
# Load bits 252-511
add r0, r0, #28
ldm r0, {r1, r2, r3, r4, r5, r6, r7, r8, r9}
lsr lr, r9, #24
lsl r9, r9, #4
orr r9, r9, r8, LSR #28
lsl r8, r8, #4
orr r8, r8, r7, LSR #28
lsl r7, r7, #4
orr r7, r7, r6, LSR #28
lsl r6, r6, #4
orr r6, r6, r5, LSR #28
lsl r5, r5, #4
orr r5, r5, r4, LSR #28
lsl r4, r4, #4
orr r4, r4, r3, LSR #28
lsl r3, r3, #4
orr r3, r3, r2, LSR #28
lsl r2, r2, #4
orr r2, r2, r1, LSR #28
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
bic r9, r9, #0xf0000000
#else
bfc r9, #28, #4
#endif
sub r0, r0, #28
# Add order times bits 504..511
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r10, #0xa3
lsl r10, r10, #8
orr r10, r10, #10
lsl r10, r10, #8
orr r10, r10, #44
lsl r10, r10, #8
orr r10, r10, #19
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r10, #0x2c
lsl r10, r10, #8
add r10, r10, #0x13
#else
mov r10, #0x2c13
#endif
movt r10, #0xa30a
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r11, #0xa7
lsl r11, r11, #8
orr r11, r11, #0xed
lsl r11, r11, #8
orr r11, r11, #0x9c
lsl r11, r11, #8
orr r11, r11, #0xe5
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r11, #0x9c
lsl r11, r11, #8
add r11, r11, #0xe5
#else
mov r11, #0x9ce5
#endif
movt r11, #0xa7ed
#endif
mov r1, #0
umlal r2, r1, r10, lr
adds r3, r3, r1
mov r1, #0
adc r1, r1, #0
umlal r3, r1, r11, lr
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r10, #0x5d
lsl r10, r10, #8
orr r10, r10, #8
lsl r10, r10, #8
orr r10, r10, #0x63
lsl r10, r10, #8
orr r10, r10, #41
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r10, #0x63
lsl r10, r10, #8
add r10, r10, #0x29
#else
mov r10, #0x6329
#endif
movt r10, #0x5d08
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r11, #0xeb
lsl r11, r11, #8
orr r11, r11, #33
lsl r11, r11, #8
orr r11, r11, #6
lsl r11, r11, #8
orr r11, r11, #33
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r11, #0x6
lsl r11, r11, #8
add r11, r11, #0x21
#else
mov r11, #0x621
#endif
movt r11, #0xeb21
#endif
adds r4, r4, r1
mov r1, #0
adc r1, r1, #0
umlal r4, r1, r10, lr
adds r5, r5, r1
mov r1, #0
adc r1, r1, #0
umlal r5, r1, r11, lr
adds r6, r6, r1
adcs r7, r7, #0
adcs r8, r8, #0
adc r9, r9, #0
subs r6, r6, lr
sbcs r7, r7, #0
sbcs r8, r8, #0
sbc r9, r9, #0
# Sub product of top 8 words and order
mov r12, sp
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0xa3
lsl r1, r1, #8
orr r1, r1, #10
lsl r1, r1, #8
orr r1, r1, #44
lsl r1, r1, #8
orr r1, r1, #19
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0x2c
lsl r1, r1, #8
add r1, r1, #0x13
#else
mov r1, #0x2c13
#endif
movt r1, #0xa30a
#endif
mov lr, #0
ldm r0!, {r10, r11}
umlal r10, lr, r2, r1
adds r11, r11, lr
mov lr, #0
adc lr, lr, #0
umlal r11, lr, r3, r1
stm r12!, {r10, r11}
ldm r0!, {r10, r11}
adds r10, r10, lr
mov lr, #0
adc lr, lr, #0
umlal r10, lr, r4, r1
adds r11, r11, lr
mov lr, #0
adc lr, lr, #0
umlal r11, lr, r5, r1
stm r12!, {r10, r11}
ldm r0!, {r10, r11}
adds r10, r10, lr
mov lr, #0
adc lr, lr, #0
umlal r10, lr, r6, r1
adds r11, r11, lr
mov lr, #0
adc lr, lr, #0
umlal r11, lr, r7, r1
stm r12!, {r10, r11}
ldm r0!, {r10, r11}
adds r10, r10, lr
mov lr, #0
adc lr, lr, #0
umlal r10, lr, r8, r1
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
bic r11, r11, #0xf0000000
#else
bfc r11, #28, #4
#endif
adds r11, r11, lr
mov lr, #0
adc lr, lr, #0
umlal r11, lr, r9, r1
stm r12!, {r10, r11, lr}
sub r0, r0, #16
sub r12, r12, #32
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0xa7
lsl r1, r1, #8
orr r1, r1, #0xed
lsl r1, r1, #8
orr r1, r1, #0x9c
lsl r1, r1, #8
orr r1, r1, #0xe5
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0x9c
lsl r1, r1, #8
add r1, r1, #0xe5
#else
mov r1, #0x9ce5
#endif
movt r1, #0xa7ed
#endif
mov lr, #0
ldm r12, {r10, r11}
umlal r10, lr, r2, r1
adds r11, r11, lr
mov lr, #0
adc lr, lr, #0
umlal r11, lr, r3, r1
stm r12!, {r10, r11}
ldm r12, {r10, r11}
adds r10, r10, lr
mov lr, #0
adc lr, lr, #0
umlal r10, lr, r4, r1
adds r11, r11, lr
mov lr, #0
adc lr, lr, #0
umlal r11, lr, r5, r1
stm r12!, {r10, r11}
ldm r12, {r10, r11}
adds r10, r10, lr
mov lr, #0
adc lr, lr, #0
umlal r10, lr, r6, r1
adds r11, r11, lr
mov lr, #0
adc lr, lr, #0
umlal r11, lr, r7, r1
stm r12!, {r10, r11}
ldm r12, {r10, r11}
adds r10, r10, lr
mov lr, #0
adc lr, lr, #0
umlal r10, lr, r8, r1
adds r11, r11, lr
mov lr, #0
adc lr, lr, #0
umlal r11, lr, r9, r1
stm r12!, {r10, r11, lr}
sub r12, r12, #32
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0x5d
lsl r1, r1, #8
orr r1, r1, #8
lsl r1, r1, #8
orr r1, r1, #0x63
lsl r1, r1, #8
orr r1, r1, #41
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0x63
lsl r1, r1, #8
add r1, r1, #0x29
#else
mov r1, #0x6329
#endif
movt r1, #0x5d08
#endif
mov lr, #0
ldm r12, {r10, r11}
umlal r10, lr, r2, r1
adds r11, r11, lr
mov lr, #0
adc lr, lr, #0
umlal r11, lr, r3, r1
stm r12!, {r10, r11}
ldm r12, {r10, r11}
adds r10, r10, lr
mov lr, #0
adc lr, lr, #0
umlal r10, lr, r4, r1
adds r11, r11, lr
mov lr, #0
adc lr, lr, #0
umlal r11, lr, r5, r1
stm r12!, {r10, r11}
ldm r12, {r10, r11}
adds r10, r10, lr
mov lr, #0
adc lr, lr, #0
umlal r10, lr, r6, r1
adds r11, r11, lr
mov lr, #0
adc lr, lr, #0
umlal r11, lr, r7, r1
stm r12!, {r10, r11}
ldm r12, {r10, r11}
adds r10, r10, lr
mov lr, #0
adc lr, lr, #0
umlal r10, lr, r8, r1
adds r11, r11, lr
mov lr, #0
adc lr, lr, #0
umlal r11, lr, r9, r1
stm r12!, {r10, r11, lr}
sub r12, r12, #32
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0xeb
lsl r1, r1, #8
orr r1, r1, #33
lsl r1, r1, #8
orr r1, r1, #6
lsl r1, r1, #8
orr r1, r1, #33
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0x6
lsl r1, r1, #8
add r1, r1, #0x21
#else
mov r1, #0x621
#endif
movt r1, #0xeb21
#endif
mov lr, #0
ldm r12, {r10, r11}
umlal r10, lr, r2, r1
adds r11, r11, lr
mov lr, #0
adc lr, lr, #0
umlal r11, lr, r3, r1
stm r12!, {r10, r11}
ldm r12, {r10, r11}
adds r10, r10, lr
mov lr, #0
adc lr, lr, #0
umlal r10, lr, r4, r1
adds r11, r11, lr
mov lr, #0
adc lr, lr, #0
umlal r11, lr, r5, r1
stm r12!, {r10, r11}
ldm r12, {r10, r11}
adds r10, r10, lr
mov lr, #0
adc lr, lr, #0
umlal r10, lr, r6, r1
adds r11, r11, lr
mov lr, #0
adc lr, lr, #0
umlal r11, lr, r7, r1
stm r12!, {r10, r11}
ldm r12, {r10, r11}
adds r10, r10, lr
mov lr, #0
adc lr, lr, #0
umlal r10, lr, r8, r1
adds r11, r11, lr
mov lr, #0
adc lr, lr, #0
umlal r11, lr, r9, r1
stm r12!, {r10, r11, lr}
sub r12, r12, #32
# Subtract at 4 * 32
ldm r12, {r10, r11}
subs r10, r10, r2
sbcs r11, r11, r3
stm r12!, {r10, r11}
ldm r12, {r10, r11}
sbcs r10, r10, r4
sbcs r11, r11, r5
stm r12!, {r10, r11}
ldm r12, {r10, r11}
sbcs r10, r10, r6
sbcs r11, r11, r7
stm r12!, {r10, r11}
ldm r12, {r10, r11}
sbcs r10, r10, r8
sbc r11, r11, r9
stm r12!, {r10, r11}
sub r12, r12, #36
asr lr, r11, #25
# Conditionally subtract order starting at bit 125
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0xa00000
lsl r1, r1, #8
add r1, r1, #0x0
#else
mov r1, #0xa0000000
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r2, #0x4b
lsl r2, r2, #8
orr r2, r2, #0x9e
lsl r2, r2, #8
orr r2, r2, #0xba
lsl r2, r2, #8
orr r2, r2, #0x7d
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r2, #0xba
lsl r2, r2, #8
add r2, r2, #0x7d
#else
mov r2, #0xba7d
#endif
movt r2, #0x4b9e
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r3, #0xcb
lsl r3, r3, #8
orr r3, r3, #2
lsl r3, r3, #8
orr r3, r3, #0x4c
lsl r3, r3, #8
orr r3, r3, #0x63
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r3, #0x4c
lsl r3, r3, #8
add r3, r3, #0x63
#else
mov r3, #0x4c63
#endif
movt r3, #0xcb02
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r4, #0xd4
lsl r4, r4, #8
orr r4, r4, #0x5e
lsl r4, r4, #8
orr r4, r4, #0xf3
lsl r4, r4, #8
orr r4, r4, #0x9a
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r4, #0xf3
lsl r4, r4, #8
add r4, r4, #0x9a
#else
mov r4, #0xf39a
#endif
movt r4, #0xd45e
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r5, #2
lsl r5, r5, #8
orr r5, r5, #0x9b
lsl r5, r5, #8
orr r5, r5, #0xdf
lsl r5, r5, #8
orr r5, r5, #59
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r5, #0xdf
lsl r5, r5, #8
add r5, r5, #0x3b
#else
mov r5, #0xdf3b
#endif
movt r5, #0x29b
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r9, #0x20000
lsl r9, r9, #8
add r9, r9, #0x0
#else
mov r9, #0x2000000
#endif
and r1, r1, lr
and r2, r2, lr
and r3, r3, lr
and r4, r4, lr
and r5, r5, lr
and r9, r9, lr
ldm r12, {r10, r11}
adds r10, r10, r1
adcs r11, r11, r2
stm r12!, {r10, r11}
ldm r12, {r10, r11}
adcs r10, r10, r3
adcs r11, r11, r4
stm r12!, {r10, r11}
ldm r12, {r10, r11}
adcs r10, r10, r5
adcs r11, r11, #0
stm r12!, {r10, r11}
ldm r12, {r10, r11}
adcs r10, r10, #0
adcs r11, r11, #0
stm r12!, {r10, r11}
ldm r12, {r10}
adcs r10, r10, #0
stm r12!, {r10}
sub r0, r0, #16
mov r12, sp
# Load bits 252-376
add r12, r12, #28
ldm r12, {r1, r2, r3, r4, r5}
lsl r5, r5, #4
orr r5, r5, r4, lsr #28
lsl r4, r4, #4
orr r4, r4, r3, lsr #28
lsl r3, r3, #4
orr r3, r3, r2, lsr #28
lsl r2, r2, #4
orr r2, r2, r1, lsr #28
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
bic r5, r5, #0xe0000000
#else
bfc r5, #29, #3
#endif
sub r12, r12, #28
# Sub product of top 4 words and order
mov r0, sp
# * -5cf5d3ed
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0xa3
lsl r1, r1, #8
orr r1, r1, #10
lsl r1, r1, #8
orr r1, r1, #44
lsl r1, r1, #8
orr r1, r1, #19
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0x2c
lsl r1, r1, #8
add r1, r1, #0x13
#else
mov r1, #0x2c13
#endif
movt r1, #0xa30a
#endif
mov lr, #0
ldm r0, {r6, r7, r8, r9}
umlal r6, lr, r2, r1
adds r7, r7, lr
mov lr, #0
adc lr, lr, #0
umlal r7, lr, r3, r1
adds r8, r8, lr
mov lr, #0
adc lr, lr, #0
umlal r8, lr, r4, r1
adds r9, r9, lr
mov lr, #0
adc lr, lr, #0
umlal r9, lr, r5, r1
stm r0, {r6, r7, r8, r9}
add r0, r0, #4
# * -5812631b
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0xa7
lsl r1, r1, #8
orr r1, r1, #0xed
lsl r1, r1, #8
orr r1, r1, #0x9c
lsl r1, r1, #8
orr r1, r1, #0xe5
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0x9c
lsl r1, r1, #8
add r1, r1, #0xe5
#else
mov r1, #0x9ce5
#endif
movt r1, #0xa7ed
#endif
mov r10, #0
ldm r0, {r6, r7, r8, r9}
umlal r6, r10, r2, r1
adds r7, r7, r10
mov r10, #0
adc r10, r10, #0
umlal r7, r10, r3, r1
adds r8, r8, r10
mov r10, #0
adc r10, r10, #0
umlal r8, r10, r4, r1
adds r9, r9, r10
mov r10, #0
adc r10, r10, #0
umlal r9, r10, r5, r1
stm r0, {r6, r7, r8, r9}
add r0, r0, #4
# * -a2f79cd7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0x5d
lsl r1, r1, #8
orr r1, r1, #8
lsl r1, r1, #8
orr r1, r1, #0x63
lsl r1, r1, #8
orr r1, r1, #41
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0x63
lsl r1, r1, #8
add r1, r1, #0x29
#else
mov r1, #0x6329
#endif
movt r1, #0x5d08
#endif
mov r11, #0
ldm r0, {r6, r7, r8, r9}
umlal r6, r11, r2, r1
adds r7, r7, r11
mov r11, #0
adc r11, r11, #0
umlal r7, r11, r3, r1
adds r8, r8, r11
mov r11, #0
adc r11, r11, #0
umlal r8, r11, r4, r1
adds r9, r9, r11
mov r11, #0
adc r11, r11, #0
umlal r9, r11, r5, r1
stm r0, {r6, r7, r8, r9}
add r0, r0, #4
# * -14def9df
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0xeb
lsl r1, r1, #8
orr r1, r1, #33
lsl r1, r1, #8
orr r1, r1, #6
lsl r1, r1, #8
orr r1, r1, #33
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0x6
lsl r1, r1, #8
add r1, r1, #0x21
#else
mov r1, #0x621
#endif
movt r1, #0xeb21
#endif
mov r12, #0
ldm r0, {r6, r7, r8, r9}
umlal r6, r12, r2, r1
adds r7, r7, r12
mov r12, #0
adc r12, r12, #0
umlal r7, r12, r3, r1
adds r8, r8, r12
mov r12, #0
adc r12, r12, #0
umlal r8, r12, r4, r1
adds r9, r9, r12
mov r12, #0
adc r12, r12, #0
umlal r9, r12, r5, r1
stm r0, {r6, r7, r8, r9}
add r0, r0, #4
# Add overflows at 4 * 32
ldm r0, {r6, r7, r8, r9}
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
bic r9, r9, #0xf0000000
#else
bfc r9, #28, #4
#endif
adds r6, r6, lr
adcs r7, r7, r10
adcs r8, r8, r11
adc r9, r9, r12
# Subtract top at 4 * 32
subs r6, r6, r2
sbcs r7, r7, r3
sbcs r8, r8, r4
sbcs r9, r9, r5
sbc r1, r1, r1
sub r0, r0, #16
ldm r0, {r2, r3, r4, r5}
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r10, #0x5c
lsl r10, r10, #8
orr r10, r10, #0xf5
lsl r10, r10, #8
orr r10, r10, #0xd3
lsl r10, r10, #8
orr r10, r10, #0xed
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r10, #0xd3
lsl r10, r10, #8
add r10, r10, #0xed
#else
mov r10, #0xd3ed
#endif
movt r10, #0x5cf5
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r11, #0x58
lsl r11, r11, #8
orr r11, r11, #18
lsl r11, r11, #8
orr r11, r11, #0x63
lsl r11, r11, #8
orr r11, r11, #26
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r11, #0x63
lsl r11, r11, #8
add r11, r11, #0x1a
#else
mov r11, #0x631a
#endif
movt r11, #0x5812
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r12, #0xa2
lsl r12, r12, #8
orr r12, r12, #0xf7
lsl r12, r12, #8
orr r12, r12, #0x9c
lsl r12, r12, #8
orr r12, r12, #0xd6
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r12, #0x9c
lsl r12, r12, #8
add r12, r12, #0xd6
#else
mov r12, #0x9cd6
#endif
movt r12, #0xa2f7
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov lr, #20
lsl lr, lr, #8
orr lr, lr, #0xde
lsl lr, lr, #8
orr lr, lr, #0xf9
lsl lr, lr, #8
orr lr, lr, #0xde
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov lr, #0xf9
lsl lr, lr, #8
add lr, lr, #0xde
#else
mov lr, #0xf9de
#endif
movt lr, #0x14de
#endif
and r10, r10, r1
and r11, r11, r1
and r12, r12, r1
and lr, lr, r1
adds r2, r2, r10
adcs r3, r3, r11
adcs r4, r4, r12
adcs r5, r5, lr
adcs r6, r6, #0
adcs r7, r7, #0
and r1, r1, #0x10000000
adcs r8, r8, #0
adc r9, r9, r1
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
bic r9, r9, #0xf0000000
#else
bfc r9, #28, #4
#endif
# Store result
ldr r0, [sp, #52]
stm r0, {r2, r3, r4, r5, r6, r7, r8, r9}
add sp, sp, #56
pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.size sc_reduce,.-sc_reduce
#else
.text
.align 4
.globl sc_reduce
.type sc_reduce, %function
sc_reduce:
push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
sub sp, sp, #56
str r0, [sp, #52]
# Load bits 252-511
add r0, r0, #28
ldm r0, {r1, r2, r3, r4, r5, r6, r7, r8, r9}
lsr lr, r9, #24
lsl r9, r9, #4
orr r9, r9, r8, LSR #28
lsl r8, r8, #4
orr r8, r8, r7, LSR #28
lsl r7, r7, #4
orr r7, r7, r6, LSR #28
lsl r6, r6, #4
orr r6, r6, r5, LSR #28
lsl r5, r5, #4
orr r5, r5, r4, LSR #28
lsl r4, r4, #4
orr r4, r4, r3, LSR #28
lsl r3, r3, #4
orr r3, r3, r2, LSR #28
lsl r2, r2, #4
orr r2, r2, r1, LSR #28
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
bic r9, r9, #0xf0000000
#else
bfc r9, #28, #4
#endif
sub r0, r0, #28
# Add order times bits 504..511
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r10, #0xa3
lsl r10, r10, #8
orr r10, r10, #10
lsl r10, r10, #8
orr r10, r10, #44
lsl r10, r10, #8
orr r10, r10, #19
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r10, #0x2c
lsl r10, r10, #8
add r10, r10, #0x13
#else
mov r10, #0x2c13
#endif
movt r10, #0xa30a
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r11, #0xa7
lsl r11, r11, #8
orr r11, r11, #0xed
lsl r11, r11, #8
orr r11, r11, #0x9c
lsl r11, r11, #8
orr r11, r11, #0xe5
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r11, #0x9c
lsl r11, r11, #8
add r11, r11, #0xe5
#else
mov r11, #0x9ce5
#endif
movt r11, #0xa7ed
#endif
mov r1, #0
umlal r2, r1, r10, lr
umaal r3, r1, r11, lr
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r10, #0x5d
lsl r10, r10, #8
orr r10, r10, #8
lsl r10, r10, #8
orr r10, r10, #0x63
lsl r10, r10, #8
orr r10, r10, #41
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r10, #0x63
lsl r10, r10, #8
add r10, r10, #0x29
#else
mov r10, #0x6329
#endif
movt r10, #0x5d08
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r11, #0xeb
lsl r11, r11, #8
orr r11, r11, #33
lsl r11, r11, #8
orr r11, r11, #6
lsl r11, r11, #8
orr r11, r11, #33
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r11, #0x6
lsl r11, r11, #8
add r11, r11, #0x21
#else
mov r11, #0x621
#endif
movt r11, #0xeb21
#endif
umaal r4, r1, r10, lr
umaal r5, r1, r11, lr
adds r6, r6, r1
adcs r7, r7, #0
adcs r8, r8, #0
adc r9, r9, #0
subs r6, r6, lr
sbcs r7, r7, #0
sbcs r8, r8, #0
sbc r9, r9, #0
# Sub product of top 8 words and order
mov r12, sp
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0xa3
lsl r1, r1, #8
orr r1, r1, #10
lsl r1, r1, #8
orr r1, r1, #44
lsl r1, r1, #8
orr r1, r1, #19
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0x2c
lsl r1, r1, #8
add r1, r1, #0x13
#else
mov r1, #0x2c13
#endif
movt r1, #0xa30a
#endif
mov lr, #0
ldm r0!, {r10, r11}
umlal r10, lr, r2, r1
umaal r11, lr, r3, r1
stm r12!, {r10, r11}
ldm r0!, {r10, r11}
umaal r10, lr, r4, r1
umaal r11, lr, r5, r1
stm r12!, {r10, r11}
ldm r0!, {r10, r11}
umaal r10, lr, r6, r1
umaal r11, lr, r7, r1
stm r12!, {r10, r11}
ldm r0!, {r10, r11}
umaal r10, lr, r8, r1
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
bic r11, r11, #0xf0000000
#else
bfc r11, #28, #4
#endif
umaal r11, lr, r9, r1
stm r12!, {r10, r11, lr}
sub r0, r0, #16
sub r12, r12, #32
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0xa7
lsl r1, r1, #8
orr r1, r1, #0xed
lsl r1, r1, #8
orr r1, r1, #0x9c
lsl r1, r1, #8
orr r1, r1, #0xe5
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0x9c
lsl r1, r1, #8
add r1, r1, #0xe5
#else
mov r1, #0x9ce5
#endif
movt r1, #0xa7ed
#endif
mov lr, #0
ldm r12, {r10, r11}
umlal r10, lr, r2, r1
umaal r11, lr, r3, r1
stm r12!, {r10, r11}
ldm r12, {r10, r11}
umaal r10, lr, r4, r1
umaal r11, lr, r5, r1
stm r12!, {r10, r11}
ldm r12, {r10, r11}
umaal r10, lr, r6, r1
umaal r11, lr, r7, r1
stm r12!, {r10, r11}
ldm r12, {r10, r11}
umaal r10, lr, r8, r1
umaal r11, lr, r9, r1
stm r12!, {r10, r11, lr}
sub r12, r12, #32
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0x5d
lsl r1, r1, #8
orr r1, r1, #8
lsl r1, r1, #8
orr r1, r1, #0x63
lsl r1, r1, #8
orr r1, r1, #41
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0x63
lsl r1, r1, #8
add r1, r1, #0x29
#else
mov r1, #0x6329
#endif
movt r1, #0x5d08
#endif
mov lr, #0
ldm r12, {r10, r11}
umlal r10, lr, r2, r1
umaal r11, lr, r3, r1
stm r12!, {r10, r11}
ldm r12, {r10, r11}
umaal r10, lr, r4, r1
umaal r11, lr, r5, r1
stm r12!, {r10, r11}
ldm r12, {r10, r11}
umaal r10, lr, r6, r1
umaal r11, lr, r7, r1
stm r12!, {r10, r11}
ldm r12, {r10, r11}
umaal r10, lr, r8, r1
umaal r11, lr, r9, r1
stm r12!, {r10, r11, lr}
sub r12, r12, #32
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0xeb
lsl r1, r1, #8
orr r1, r1, #33
lsl r1, r1, #8
orr r1, r1, #6
lsl r1, r1, #8
orr r1, r1, #33
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0x6
lsl r1, r1, #8
add r1, r1, #0x21
#else
mov r1, #0x621
#endif
movt r1, #0xeb21
#endif
mov lr, #0
ldm r12, {r10, r11}
umlal r10, lr, r2, r1
umaal r11, lr, r3, r1
stm r12!, {r10, r11}
ldm r12, {r10, r11}
umaal r10, lr, r4, r1
umaal r11, lr, r5, r1
stm r12!, {r10, r11}
ldm r12, {r10, r11}
umaal r10, lr, r6, r1
umaal r11, lr, r7, r1
stm r12!, {r10, r11}
ldm r12, {r10, r11}
umaal r10, lr, r8, r1
umaal r11, lr, r9, r1
stm r12!, {r10, r11, lr}
sub r12, r12, #32
# Subtract at 4 * 32
ldm r12, {r10, r11}
subs r10, r10, r2
sbcs r11, r11, r3
stm r12!, {r10, r11}
ldm r12, {r10, r11}
sbcs r10, r10, r4
sbcs r11, r11, r5
stm r12!, {r10, r11}
ldm r12, {r10, r11}
sbcs r10, r10, r6
sbcs r11, r11, r7
stm r12!, {r10, r11}
ldm r12, {r10, r11}
sbcs r10, r10, r8
sbc r11, r11, r9
stm r12!, {r10, r11}
sub r12, r12, #36
asr lr, r11, #25
# Conditionally subtract order starting at bit 125
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0xa00000
lsl r1, r1, #8
add r1, r1, #0x0
#else
mov r1, #0xa0000000
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r2, #0x4b
lsl r2, r2, #8
orr r2, r2, #0x9e
lsl r2, r2, #8
orr r2, r2, #0xba
lsl r2, r2, #8
orr r2, r2, #0x7d
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r2, #0xba
lsl r2, r2, #8
add r2, r2, #0x7d
#else
mov r2, #0xba7d
#endif
movt r2, #0x4b9e
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r3, #0xcb
lsl r3, r3, #8
orr r3, r3, #2
lsl r3, r3, #8
orr r3, r3, #0x4c
lsl r3, r3, #8
orr r3, r3, #0x63
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r3, #0x4c
lsl r3, r3, #8
add r3, r3, #0x63
#else
mov r3, #0x4c63
#endif
movt r3, #0xcb02
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r4, #0xd4
lsl r4, r4, #8
orr r4, r4, #0x5e
lsl r4, r4, #8
orr r4, r4, #0xf3
lsl r4, r4, #8
orr r4, r4, #0x9a
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r4, #0xf3
lsl r4, r4, #8
add r4, r4, #0x9a
#else
mov r4, #0xf39a
#endif
movt r4, #0xd45e
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r5, #2
lsl r5, r5, #8
orr r5, r5, #0x9b
lsl r5, r5, #8
orr r5, r5, #0xdf
lsl r5, r5, #8
orr r5, r5, #59
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r5, #0xdf
lsl r5, r5, #8
add r5, r5, #0x3b
#else
mov r5, #0xdf3b
#endif
movt r5, #0x29b
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r9, #0x20000
lsl r9, r9, #8
add r9, r9, #0x0
#else
mov r9, #0x2000000
#endif
and r1, r1, lr
and r2, r2, lr
and r3, r3, lr
and r4, r4, lr
and r5, r5, lr
and r9, r9, lr
ldm r12, {r10, r11}
adds r10, r10, r1
adcs r11, r11, r2
stm r12!, {r10, r11}
ldm r12, {r10, r11}
adcs r10, r10, r3
adcs r11, r11, r4
stm r12!, {r10, r11}
ldm r12, {r10, r11}
adcs r10, r10, r5
adcs r11, r11, #0
stm r12!, {r10, r11}
ldm r12, {r10, r11}
adcs r10, r10, #0
adcs r11, r11, #0
stm r12!, {r10, r11}
ldm r12, {r10}
adcs r10, r10, #0
stm r12!, {r10}
sub r0, r0, #16
mov r12, sp
# Load bits 252-376
add r12, r12, #28
ldm r12, {r1, r2, r3, r4, r5}
lsl r5, r5, #4
orr r5, r5, r4, lsr #28
lsl r4, r4, #4
orr r4, r4, r3, lsr #28
lsl r3, r3, #4
orr r3, r3, r2, lsr #28
lsl r2, r2, #4
orr r2, r2, r1, lsr #28
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
bic r5, r5, #0xe0000000
#else
bfc r5, #29, #3
#endif
sub r12, r12, #28
# Sub product of top 4 words and order
mov r0, sp
# * -5cf5d3ed
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0xa3
lsl r1, r1, #8
orr r1, r1, #10
lsl r1, r1, #8
orr r1, r1, #44
lsl r1, r1, #8
orr r1, r1, #19
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0x2c
lsl r1, r1, #8
add r1, r1, #0x13
#else
mov r1, #0x2c13
#endif
movt r1, #0xa30a
#endif
mov lr, #0
ldm r0, {r6, r7, r8, r9}
umlal r6, lr, r2, r1
umaal r7, lr, r3, r1
umaal r8, lr, r4, r1
umaal r9, lr, r5, r1
stm r0, {r6, r7, r8, r9}
add r0, r0, #4
# * -5812631b
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0xa7
lsl r1, r1, #8
orr r1, r1, #0xed
lsl r1, r1, #8
orr r1, r1, #0x9c
lsl r1, r1, #8
orr r1, r1, #0xe5
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0x9c
lsl r1, r1, #8
add r1, r1, #0xe5
#else
mov r1, #0x9ce5
#endif
movt r1, #0xa7ed
#endif
mov r10, #0
ldm r0, {r6, r7, r8, r9}
umlal r6, r10, r2, r1
umaal r7, r10, r3, r1
umaal r8, r10, r4, r1
umaal r9, r10, r5, r1
stm r0, {r6, r7, r8, r9}
add r0, r0, #4
# * -a2f79cd7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0x5d
lsl r1, r1, #8
orr r1, r1, #8
lsl r1, r1, #8
orr r1, r1, #0x63
lsl r1, r1, #8
orr r1, r1, #41
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0x63
lsl r1, r1, #8
add r1, r1, #0x29
#else
mov r1, #0x6329
#endif
movt r1, #0x5d08
#endif
mov r11, #0
ldm r0, {r6, r7, r8, r9}
umlal r6, r11, r2, r1
umaal r7, r11, r3, r1
umaal r8, r11, r4, r1
umaal r9, r11, r5, r1
stm r0, {r6, r7, r8, r9}
add r0, r0, #4
# * -14def9df
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0xeb
lsl r1, r1, #8
orr r1, r1, #33
lsl r1, r1, #8
orr r1, r1, #6
lsl r1, r1, #8
orr r1, r1, #33
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0x6
lsl r1, r1, #8
add r1, r1, #0x21
#else
mov r1, #0x621
#endif
movt r1, #0xeb21
#endif
mov r12, #0
ldm r0, {r6, r7, r8, r9}
umlal r6, r12, r2, r1
umaal r7, r12, r3, r1
umaal r8, r12, r4, r1
umaal r9, r12, r5, r1
stm r0, {r6, r7, r8, r9}
add r0, r0, #4
# Add overflows at 4 * 32
ldm r0, {r6, r7, r8, r9}
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
bic r9, r9, #0xf0000000
#else
bfc r9, #28, #4
#endif
adds r6, r6, lr
adcs r7, r7, r10
adcs r8, r8, r11
adc r9, r9, r12
# Subtract top at 4 * 32
subs r6, r6, r2
sbcs r7, r7, r3
sbcs r8, r8, r4
sbcs r9, r9, r5
sbc r1, r1, r1
sub r0, r0, #16
ldm r0, {r2, r3, r4, r5}
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r10, #0x5c
lsl r10, r10, #8
orr r10, r10, #0xf5
lsl r10, r10, #8
orr r10, r10, #0xd3
lsl r10, r10, #8
orr r10, r10, #0xed
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r10, #0xd3
lsl r10, r10, #8
add r10, r10, #0xed
#else
mov r10, #0xd3ed
#endif
movt r10, #0x5cf5
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r11, #0x58
lsl r11, r11, #8
orr r11, r11, #18
lsl r11, r11, #8
orr r11, r11, #0x63
lsl r11, r11, #8
orr r11, r11, #26
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r11, #0x63
lsl r11, r11, #8
add r11, r11, #0x1a
#else
mov r11, #0x631a
#endif
movt r11, #0x5812
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r12, #0xa2
lsl r12, r12, #8
orr r12, r12, #0xf7
lsl r12, r12, #8
orr r12, r12, #0x9c
lsl r12, r12, #8
orr r12, r12, #0xd6
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r12, #0x9c
lsl r12, r12, #8
add r12, r12, #0xd6
#else
mov r12, #0x9cd6
#endif
movt r12, #0xa2f7
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov lr, #20
lsl lr, lr, #8
orr lr, lr, #0xde
lsl lr, lr, #8
orr lr, lr, #0xf9
lsl lr, lr, #8
orr lr, lr, #0xde
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov lr, #0xf9
lsl lr, lr, #8
add lr, lr, #0xde
#else
mov lr, #0xf9de
#endif
movt lr, #0x14de
#endif
and r10, r10, r1
and r11, r11, r1
and r12, r12, r1
and lr, lr, r1
adds r2, r2, r10
adcs r3, r3, r11
adcs r4, r4, r12
adcs r5, r5, lr
adcs r6, r6, #0
adcs r7, r7, #0
and r1, r1, #0x10000000
adcs r8, r8, #0
adc r9, r9, r1
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
bic r9, r9, #0xf0000000
#else
bfc r9, #28, #4
#endif
# Store result
ldr r0, [sp, #52]
stm r0, {r2, r3, r4, r5, r6, r7, r8, r9}
add sp, sp, #56
pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.size sc_reduce,.-sc_reduce
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
#ifdef HAVE_ED25519_SIGN
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
.text
.align 4
.globl sc_muladd
.type sc_muladd, %function
sc_muladd:
push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
sub sp, sp, #0x50
add lr, sp, #0x44
stm lr, {r0, r1, r3}
mov r0, #0
ldr r12, [r1]
# A[0] * B[0]
ldr lr, [r2]
umull r3, r4, r12, lr
# A[0] * B[2]
ldr lr, [r2, #8]
umull r5, r6, r12, lr
# A[0] * B[4]
ldr lr, [r2, #16]
umull r7, r8, r12, lr
# A[0] * B[6]
ldr lr, [r2, #24]
umull r9, r10, r12, lr
str r3, [sp]
# A[0] * B[1]
ldr lr, [r2, #4]
mov r11, r0
umlal r4, r11, r12, lr
adds r5, r5, r11
# A[0] * B[3]
ldr lr, [r2, #12]
adcs r6, r6, #0
adc r11, r0, #0
umlal r6, r11, r12, lr
adds r7, r7, r11
# A[0] * B[5]
ldr lr, [r2, #20]
adcs r8, r8, #0
adc r11, r0, #0
umlal r8, r11, r12, lr
adds r9, r9, r11
# A[0] * B[7]
ldr lr, [r2, #28]
adcs r10, r10, #0
adc r3, r0, #0
umlal r10, r3, r12, lr
# A[1] * B[0]
ldr r12, [r1, #4]
ldr lr, [r2]
mov r11, #0
umlal r4, r11, r12, lr
str r4, [sp, #4]
adds r5, r5, r11
# A[1] * B[1]
ldr lr, [r2, #4]
adc r11, r0, #0
umlal r5, r11, r12, lr
adds r6, r6, r11
# A[1] * B[2]
ldr lr, [r2, #8]
adc r11, r0, #0
umlal r6, r11, r12, lr
adds r7, r7, r11
# A[1] * B[3]
ldr lr, [r2, #12]
adc r11, r0, #0
umlal r7, r11, r12, lr
adds r8, r8, r11
# A[1] * B[4]
ldr lr, [r2, #16]
adc r11, r0, #0
umlal r8, r11, r12, lr
adds r9, r9, r11
# A[1] * B[5]
ldr lr, [r2, #20]
adc r11, r0, #0
umlal r9, r11, r12, lr
adds r10, r10, r11
# A[1] * B[6]
ldr lr, [r2, #24]
adc r11, r0, #0
umlal r10, r11, r12, lr
adds r3, r3, r11
# A[1] * B[7]
ldr lr, [r2, #28]
adc r4, r0, #0
umlal r3, r4, r12, lr
# A[2] * B[0]
ldr r12, [r1, #8]
ldr lr, [r2]
mov r11, #0
umlal r5, r11, r12, lr
str r5, [sp, #8]
adds r6, r6, r11
# A[2] * B[1]
ldr lr, [r2, #4]
adc r11, r0, #0
umlal r6, r11, r12, lr
adds r7, r7, r11
# A[2] * B[2]
ldr lr, [r2, #8]
adc r11, r0, #0
umlal r7, r11, r12, lr
adds r8, r8, r11
# A[2] * B[3]
ldr lr, [r2, #12]
adc r11, r0, #0
umlal r8, r11, r12, lr
adds r9, r9, r11
# A[2] * B[4]
ldr lr, [r2, #16]
adc r11, r0, #0
umlal r9, r11, r12, lr
adds r10, r10, r11
# A[2] * B[5]
ldr lr, [r2, #20]
adc r11, r0, #0
umlal r10, r11, r12, lr
adds r3, r3, r11
# A[2] * B[6]
ldr lr, [r2, #24]
adc r11, r0, #0
umlal r3, r11, r12, lr
adds r4, r4, r11
# A[2] * B[7]
ldr lr, [r2, #28]
adc r5, r0, #0
umlal r4, r5, r12, lr
# A[3] * B[0]
ldr r12, [r1, #12]
ldr lr, [r2]
mov r11, #0
umlal r6, r11, r12, lr
str r6, [sp, #12]
adds r7, r7, r11
# A[3] * B[1]
ldr lr, [r2, #4]
adc r11, r0, #0
umlal r7, r11, r12, lr
adds r8, r8, r11
# A[3] * B[2]
ldr lr, [r2, #8]
adc r11, r0, #0
umlal r8, r11, r12, lr
adds r9, r9, r11
# A[3] * B[3]
ldr lr, [r2, #12]
adc r11, r0, #0
umlal r9, r11, r12, lr
adds r10, r10, r11
# A[3] * B[4]
ldr lr, [r2, #16]
adc r11, r0, #0
umlal r10, r11, r12, lr
adds r3, r3, r11
# A[3] * B[5]
ldr lr, [r2, #20]
adc r11, r0, #0
umlal r3, r11, r12, lr
adds r4, r4, r11
# A[3] * B[6]
ldr lr, [r2, #24]
adc r11, r0, #0
umlal r4, r11, r12, lr
adds r5, r5, r11
# A[3] * B[7]
ldr lr, [r2, #28]
adc r6, r0, #0
umlal r5, r6, r12, lr
# A[4] * B[0]
ldr r12, [r1, #16]
ldr lr, [r2]
mov r11, #0
umlal r7, r11, r12, lr
str r7, [sp, #16]
adds r8, r8, r11
# A[4] * B[1]
ldr lr, [r2, #4]
adc r11, r0, #0
umlal r8, r11, r12, lr
adds r9, r9, r11
# A[4] * B[2]
ldr lr, [r2, #8]
adc r11, r0, #0
umlal r9, r11, r12, lr
adds r10, r10, r11
# A[4] * B[3]
ldr lr, [r2, #12]
adc r11, r0, #0
umlal r10, r11, r12, lr
adds r3, r3, r11
# A[4] * B[4]
ldr lr, [r2, #16]
adc r11, r0, #0
umlal r3, r11, r12, lr
adds r4, r4, r11
# A[4] * B[5]
ldr lr, [r2, #20]
adc r11, r0, #0
umlal r4, r11, r12, lr
adds r5, r5, r11
# A[4] * B[6]
ldr lr, [r2, #24]
adc r11, r0, #0
umlal r5, r11, r12, lr
adds r6, r6, r11
# A[4] * B[7]
ldr lr, [r2, #28]
adc r7, r0, #0
umlal r6, r7, r12, lr
# A[5] * B[0]
ldr r12, [r1, #20]
ldr lr, [r2]
mov r11, #0
umlal r8, r11, r12, lr
str r8, [sp, #20]
adds r9, r9, r11
# A[5] * B[1]
ldr lr, [r2, #4]
adc r11, r0, #0
umlal r9, r11, r12, lr
adds r10, r10, r11
# A[5] * B[2]
ldr lr, [r2, #8]
adc r11, r0, #0
umlal r10, r11, r12, lr
adds r3, r3, r11
# A[5] * B[3]
ldr lr, [r2, #12]
adc r11, r0, #0
umlal r3, r11, r12, lr
adds r4, r4, r11
# A[5] * B[4]
ldr lr, [r2, #16]
adc r11, r0, #0
umlal r4, r11, r12, lr
adds r5, r5, r11
# A[5] * B[5]
ldr lr, [r2, #20]
adc r11, r0, #0
umlal r5, r11, r12, lr
adds r6, r6, r11
# A[5] * B[6]
ldr lr, [r2, #24]
adc r11, r0, #0
umlal r6, r11, r12, lr
adds r7, r7, r11
# A[5] * B[7]
ldr lr, [r2, #28]
adc r8, r0, #0
umlal r7, r8, r12, lr
# A[6] * B[0]
ldr r12, [r1, #24]
ldr lr, [r2]
mov r11, #0
umlal r9, r11, r12, lr
str r9, [sp, #24]
adds r10, r10, r11
# A[6] * B[1]
ldr lr, [r2, #4]
adc r11, r0, #0
umlal r10, r11, r12, lr
adds r3, r3, r11
# A[6] * B[2]
ldr lr, [r2, #8]
adc r11, r0, #0
umlal r3, r11, r12, lr
adds r4, r4, r11
# A[6] * B[3]
ldr lr, [r2, #12]
adc r11, r0, #0
umlal r4, r11, r12, lr
adds r5, r5, r11
# A[6] * B[4]
ldr lr, [r2, #16]
adc r11, r0, #0
umlal r5, r11, r12, lr
adds r6, r6, r11
# A[6] * B[5]
ldr lr, [r2, #20]
adc r11, r0, #0
umlal r6, r11, r12, lr
adds r7, r7, r11
# A[6] * B[6]
ldr lr, [r2, #24]
adc r11, r0, #0
umlal r7, r11, r12, lr
adds r8, r8, r11
# A[6] * B[7]
ldr lr, [r2, #28]
adc r9, r0, #0
umlal r8, r9, r12, lr
# A[7] * B[0]
ldr r12, [r1, #28]
ldr lr, [r2]
mov r11, #0
umlal r10, r11, r12, lr
str r10, [sp, #28]
adds r3, r3, r11
# A[7] * B[1]
ldr lr, [r2, #4]
adc r11, r0, #0
umlal r3, r11, r12, lr
adds r4, r4, r11
# A[7] * B[2]
ldr lr, [r2, #8]
adc r11, r0, #0
umlal r4, r11, r12, lr
adds r5, r5, r11
# A[7] * B[3]
ldr lr, [r2, #12]
adc r11, r0, #0
umlal r5, r11, r12, lr
adds r6, r6, r11
# A[7] * B[4]
ldr lr, [r2, #16]
adc r11, r0, #0
umlal r6, r11, r12, lr
adds r7, r7, r11
# A[7] * B[5]
ldr lr, [r2, #20]
adc r11, r0, #0
umlal r7, r11, r12, lr
adds r8, r8, r11
# A[7] * B[6]
ldr lr, [r2, #24]
adc r11, r0, #0
umlal r8, r11, r12, lr
adds r9, r9, r11
# A[7] * B[7]
ldr lr, [r2, #28]
adc r10, r0, #0
umlal r9, r10, r12, lr
add lr, sp, #32
stm lr, {r3, r4, r5, r6, r7, r8, r9, r10}
mov r0, sp
# Add c to a * b
ldr lr, [sp, #76]
ldm r0, {r2, r3, r4, r5, r6, r7, r8, r9}
ldm lr!, {r1, r10, r11, r12}
adds r2, r2, r1
adcs r3, r3, r10
adcs r4, r4, r11
adcs r5, r5, r12
ldm lr!, {r1, r10, r11, r12}
adcs r6, r6, r1
adcs r7, r7, r10
adcs r8, r8, r11
adcs r9, r9, r12
mov r1, r9
stm r0!, {r2, r3, r4, r5, r6, r7, r8, r9}
ldm r0, {r2, r3, r4, r5, r6, r7, r8, r9}
adcs r2, r2, #0
adcs r3, r3, #0
adcs r4, r4, #0
adcs r5, r5, #0
adcs r6, r6, #0
adcs r7, r7, #0
adcs r8, r8, #0
adc r9, r9, #0
sub r0, r0, #32
# Get 252..503 and 504..507
lsr lr, r9, #24
lsl r9, r9, #4
orr r9, r9, r8, LSR #28
lsl r8, r8, #4
orr r8, r8, r7, LSR #28
lsl r7, r7, #4
orr r7, r7, r6, LSR #28
lsl r6, r6, #4
orr r6, r6, r5, LSR #28
lsl r5, r5, #4
orr r5, r5, r4, LSR #28
lsl r4, r4, #4
orr r4, r4, r3, LSR #28
lsl r3, r3, #4
orr r3, r3, r2, LSR #28
lsl r2, r2, #4
orr r2, r2, r1, LSR #28
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
bic r9, r9, #0xf0000000
#else
bfc r9, #28, #4
#endif
# Add order times bits 504..507
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r10, #0xa3
lsl r10, r10, #8
orr r10, r10, #10
lsl r10, r10, #8
orr r10, r10, #44
lsl r10, r10, #8
orr r10, r10, #19
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r10, #0x2c
lsl r10, r10, #8
add r10, r10, #0x13
#else
mov r10, #0x2c13
#endif
movt r10, #0xa30a
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r11, #0xa7
lsl r11, r11, #8
orr r11, r11, #0xed
lsl r11, r11, #8
orr r11, r11, #0x9c
lsl r11, r11, #8
orr r11, r11, #0xe5
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r11, #0x9c
lsl r11, r11, #8
add r11, r11, #0xe5
#else
mov r11, #0x9ce5
#endif
movt r11, #0xa7ed
#endif
mov r1, #0
umlal r2, r1, r10, lr
adds r3, r3, r1
mov r1, #0
adc r1, r1, #0
umlal r3, r1, r11, lr
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r10, #0x5d
lsl r10, r10, #8
orr r10, r10, #8
lsl r10, r10, #8
orr r10, r10, #0x63
lsl r10, r10, #8
orr r10, r10, #41
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r10, #0x63
lsl r10, r10, #8
add r10, r10, #0x29
#else
mov r10, #0x6329
#endif
movt r10, #0x5d08
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r11, #0xeb
lsl r11, r11, #8
orr r11, r11, #33
lsl r11, r11, #8
orr r11, r11, #6
lsl r11, r11, #8
orr r11, r11, #33
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r11, #0x6
lsl r11, r11, #8
add r11, r11, #0x21
#else
mov r11, #0x621
#endif
movt r11, #0xeb21
#endif
adds r4, r4, r1
mov r1, #0
adc r1, r1, #0
umlal r4, r1, r10, lr
adds r5, r5, r1
mov r1, #0
adc r1, r1, #0
umlal r5, r1, r11, lr
adds r6, r6, r1
adcs r7, r7, #0
adcs r8, r8, #0
adc r9, r9, #0
subs r6, r6, lr
sbcs r7, r7, #0
sbcs r8, r8, #0
sbc r9, r9, #0
# Sub product of top 8 words and order
mov r12, sp
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0xa3
lsl r1, r1, #8
orr r1, r1, #10
lsl r1, r1, #8
orr r1, r1, #44
lsl r1, r1, #8
orr r1, r1, #19
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0x2c
lsl r1, r1, #8
add r1, r1, #0x13
#else
mov r1, #0x2c13
#endif
movt r1, #0xa30a
#endif
mov lr, #0
ldm r0!, {r10, r11}
umlal r10, lr, r2, r1
adds r11, r11, lr
mov lr, #0
adc lr, lr, #0
umlal r11, lr, r3, r1
stm r12!, {r10, r11}
ldm r0!, {r10, r11}
adds r10, r10, lr
mov lr, #0
adc lr, lr, #0
umlal r10, lr, r4, r1
adds r11, r11, lr
mov lr, #0
adc lr, lr, #0
umlal r11, lr, r5, r1
stm r12!, {r10, r11}
ldm r0!, {r10, r11}
adds r10, r10, lr
mov lr, #0
adc lr, lr, #0
umlal r10, lr, r6, r1
adds r11, r11, lr
mov lr, #0
adc lr, lr, #0
umlal r11, lr, r7, r1
stm r12!, {r10, r11}
ldm r0!, {r10, r11}
adds r10, r10, lr
mov lr, #0
adc lr, lr, #0
umlal r10, lr, r8, r1
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
bic r11, r11, #0xf0000000
#else
bfc r11, #28, #4
#endif
adds r11, r11, lr
mov lr, #0
adc lr, lr, #0
umlal r11, lr, r9, r1
stm r12!, {r10, r11, lr}
sub r0, r0, #16
sub r12, r12, #32
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0xa7
lsl r1, r1, #8
orr r1, r1, #0xed
lsl r1, r1, #8
orr r1, r1, #0x9c
lsl r1, r1, #8
orr r1, r1, #0xe5
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0x9c
lsl r1, r1, #8
add r1, r1, #0xe5
#else
mov r1, #0x9ce5
#endif
movt r1, #0xa7ed
#endif
mov lr, #0
ldm r12, {r10, r11}
umlal r10, lr, r2, r1
adds r11, r11, lr
mov lr, #0
adc lr, lr, #0
umlal r11, lr, r3, r1
stm r12!, {r10, r11}
ldm r12, {r10, r11}
adds r10, r10, lr
mov lr, #0
adc lr, lr, #0
umlal r10, lr, r4, r1
adds r11, r11, lr
mov lr, #0
adc lr, lr, #0
umlal r11, lr, r5, r1
stm r12!, {r10, r11}
ldm r12, {r10, r11}
adds r10, r10, lr
mov lr, #0
adc lr, lr, #0
umlal r10, lr, r6, r1
adds r11, r11, lr
mov lr, #0
adc lr, lr, #0
umlal r11, lr, r7, r1
stm r12!, {r10, r11}
ldm r12, {r10, r11}
adds r10, r10, lr
mov lr, #0
adc lr, lr, #0
umlal r10, lr, r8, r1
adds r11, r11, lr
mov lr, #0
adc lr, lr, #0
umlal r11, lr, r9, r1
stm r12!, {r10, r11, lr}
sub r12, r12, #32
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0x5d
lsl r1, r1, #8
orr r1, r1, #8
lsl r1, r1, #8
orr r1, r1, #0x63
lsl r1, r1, #8
orr r1, r1, #41
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0x63
lsl r1, r1, #8
add r1, r1, #0x29
#else
mov r1, #0x6329
#endif
movt r1, #0x5d08
#endif
mov lr, #0
ldm r12, {r10, r11}
umlal r10, lr, r2, r1
adds r11, r11, lr
mov lr, #0
adc lr, lr, #0
umlal r11, lr, r3, r1
stm r12!, {r10, r11}
ldm r12, {r10, r11}
adds r10, r10, lr
mov lr, #0
adc lr, lr, #0
umlal r10, lr, r4, r1
adds r11, r11, lr
mov lr, #0
adc lr, lr, #0
umlal r11, lr, r5, r1
stm r12!, {r10, r11}
ldm r12, {r10, r11}
adds r10, r10, lr
mov lr, #0
adc lr, lr, #0
umlal r10, lr, r6, r1
adds r11, r11, lr
mov lr, #0
adc lr, lr, #0
umlal r11, lr, r7, r1
stm r12!, {r10, r11}
ldm r12, {r10, r11}
adds r10, r10, lr
mov lr, #0
adc lr, lr, #0
umlal r10, lr, r8, r1
adds r11, r11, lr
mov lr, #0
adc lr, lr, #0
umlal r11, lr, r9, r1
stm r12!, {r10, r11, lr}
sub r12, r12, #32
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0xeb
lsl r1, r1, #8
orr r1, r1, #33
lsl r1, r1, #8
orr r1, r1, #6
lsl r1, r1, #8
orr r1, r1, #33
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0x6
lsl r1, r1, #8
add r1, r1, #0x21
#else
mov r1, #0x621
#endif
movt r1, #0xeb21
#endif
mov lr, #0
ldm r12, {r10, r11}
umlal r10, lr, r2, r1
adds r11, r11, lr
mov lr, #0
adc lr, lr, #0
umlal r11, lr, r3, r1
stm r12!, {r10, r11}
ldm r12, {r10, r11}
adds r10, r10, lr
mov lr, #0
adc lr, lr, #0
umlal r10, lr, r4, r1
adds r11, r11, lr
mov lr, #0
adc lr, lr, #0
umlal r11, lr, r5, r1
stm r12!, {r10, r11}
ldm r12, {r10, r11}
adds r10, r10, lr
mov lr, #0
adc lr, lr, #0
umlal r10, lr, r6, r1
adds r11, r11, lr
mov lr, #0
adc lr, lr, #0
umlal r11, lr, r7, r1
stm r12!, {r10, r11}
ldm r12, {r10, r11}
adds r10, r10, lr
mov lr, #0
adc lr, lr, #0
umlal r10, lr, r8, r1
adds r11, r11, lr
mov lr, #0
adc lr, lr, #0
umlal r11, lr, r9, r1
stm r12!, {r10, r11, lr}
sub r12, r12, #32
# Subtract at 4 * 32
ldm r12, {r10, r11}
subs r10, r10, r2
sbcs r11, r11, r3
stm r12!, {r10, r11}
ldm r12, {r10, r11}
sbcs r10, r10, r4
sbcs r11, r11, r5
stm r12!, {r10, r11}
ldm r12, {r10, r11}
sbcs r10, r10, r6
sbcs r11, r11, r7
stm r12!, {r10, r11}
ldm r12, {r10, r11}
sbcs r10, r10, r8
sbc r11, r11, r9
stm r12!, {r10, r11}
sub r12, r12, #36
asr lr, r11, #25
# Conditionally subtract order starting at bit 125
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0xa00000
lsl r1, r1, #8
add r1, r1, #0x0
#else
mov r1, #0xa0000000
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r2, #0x4b
lsl r2, r2, #8
orr r2, r2, #0x9e
lsl r2, r2, #8
orr r2, r2, #0xba
lsl r2, r2, #8
orr r2, r2, #0x7d
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r2, #0xba
lsl r2, r2, #8
add r2, r2, #0x7d
#else
mov r2, #0xba7d
#endif
movt r2, #0x4b9e
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r3, #0xcb
lsl r3, r3, #8
orr r3, r3, #2
lsl r3, r3, #8
orr r3, r3, #0x4c
lsl r3, r3, #8
orr r3, r3, #0x63
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r3, #0x4c
lsl r3, r3, #8
add r3, r3, #0x63
#else
mov r3, #0x4c63
#endif
movt r3, #0xcb02
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r4, #0xd4
lsl r4, r4, #8
orr r4, r4, #0x5e
lsl r4, r4, #8
orr r4, r4, #0xf3
lsl r4, r4, #8
orr r4, r4, #0x9a
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r4, #0xf3
lsl r4, r4, #8
add r4, r4, #0x9a
#else
mov r4, #0xf39a
#endif
movt r4, #0xd45e
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r5, #2
lsl r5, r5, #8
orr r5, r5, #0x9b
lsl r5, r5, #8
orr r5, r5, #0xdf
lsl r5, r5, #8
orr r5, r5, #59
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r5, #0xdf
lsl r5, r5, #8
add r5, r5, #0x3b
#else
mov r5, #0xdf3b
#endif
movt r5, #0x29b
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r9, #0x20000
lsl r9, r9, #8
add r9, r9, #0x0
#else
mov r9, #0x2000000
#endif
and r1, r1, lr
and r2, r2, lr
and r3, r3, lr
and r4, r4, lr
and r5, r5, lr
and r9, r9, lr
ldm r12, {r10, r11}
adds r10, r10, r1
adcs r11, r11, r2
stm r12!, {r10, r11}
ldm r12, {r10, r11}
adcs r10, r10, r3
adcs r11, r11, r4
stm r12!, {r10, r11}
ldm r12, {r10, r11}
adcs r10, r10, r5
adcs r11, r11, #0
stm r12!, {r10, r11}
ldm r12, {r10, r11}
adcs r10, r10, #0
adcs r11, r11, #0
stm r12!, {r10, r11}
ldm r12, {r10}
adcs r10, r10, #0
stm r12!, {r10}
sub r0, r0, #16
mov r12, sp
# Load bits 252-376
add r12, r12, #28
ldm r12, {r1, r2, r3, r4, r5}
lsl r5, r5, #4
orr r5, r5, r4, lsr #28
lsl r4, r4, #4
orr r4, r4, r3, lsr #28
lsl r3, r3, #4
orr r3, r3, r2, lsr #28
lsl r2, r2, #4
orr r2, r2, r1, lsr #28
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
bic r5, r5, #0xe0000000
#else
bfc r5, #29, #3
#endif
sub r12, r12, #28
# Sub product of top 4 words and order
mov r0, sp
# * -5cf5d3ed
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0xa3
lsl r1, r1, #8
orr r1, r1, #10
lsl r1, r1, #8
orr r1, r1, #44
lsl r1, r1, #8
orr r1, r1, #19
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0x2c
lsl r1, r1, #8
add r1, r1, #0x13
#else
mov r1, #0x2c13
#endif
movt r1, #0xa30a
#endif
mov lr, #0
ldm r0, {r6, r7, r8, r9}
umlal r6, lr, r2, r1
adds r7, r7, lr
mov lr, #0
adc lr, lr, #0
umlal r7, lr, r3, r1
adds r8, r8, lr
mov lr, #0
adc lr, lr, #0
umlal r8, lr, r4, r1
adds r9, r9, lr
mov lr, #0
adc lr, lr, #0
umlal r9, lr, r5, r1
stm r0, {r6, r7, r8, r9}
add r0, r0, #4
# * -5812631b
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0xa7
lsl r1, r1, #8
orr r1, r1, #0xed
lsl r1, r1, #8
orr r1, r1, #0x9c
lsl r1, r1, #8
orr r1, r1, #0xe5
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0x9c
lsl r1, r1, #8
add r1, r1, #0xe5
#else
mov r1, #0x9ce5
#endif
movt r1, #0xa7ed
#endif
mov r10, #0
ldm r0, {r6, r7, r8, r9}
umlal r6, r10, r2, r1
adds r7, r7, r10
mov r10, #0
adc r10, r10, #0
umlal r7, r10, r3, r1
adds r8, r8, r10
mov r10, #0
adc r10, r10, #0
umlal r8, r10, r4, r1
adds r9, r9, r10
mov r10, #0
adc r10, r10, #0
umlal r9, r10, r5, r1
stm r0, {r6, r7, r8, r9}
add r0, r0, #4
# * -a2f79cd7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0x5d
lsl r1, r1, #8
orr r1, r1, #8
lsl r1, r1, #8
orr r1, r1, #0x63
lsl r1, r1, #8
orr r1, r1, #41
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0x63
lsl r1, r1, #8
add r1, r1, #0x29
#else
mov r1, #0x6329
#endif
movt r1, #0x5d08
#endif
mov r11, #0
ldm r0, {r6, r7, r8, r9}
umlal r6, r11, r2, r1
adds r7, r7, r11
mov r11, #0
adc r11, r11, #0
umlal r7, r11, r3, r1
adds r8, r8, r11
mov r11, #0
adc r11, r11, #0
umlal r8, r11, r4, r1
adds r9, r9, r11
mov r11, #0
adc r11, r11, #0
umlal r9, r11, r5, r1
stm r0, {r6, r7, r8, r9}
add r0, r0, #4
# * -14def9df
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0xeb
lsl r1, r1, #8
orr r1, r1, #33
lsl r1, r1, #8
orr r1, r1, #6
lsl r1, r1, #8
orr r1, r1, #33
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0x6
lsl r1, r1, #8
add r1, r1, #0x21
#else
mov r1, #0x621
#endif
movt r1, #0xeb21
#endif
mov r12, #0
ldm r0, {r6, r7, r8, r9}
umlal r6, r12, r2, r1
adds r7, r7, r12
mov r12, #0
adc r12, r12, #0
umlal r7, r12, r3, r1
adds r8, r8, r12
mov r12, #0
adc r12, r12, #0
umlal r8, r12, r4, r1
adds r9, r9, r12
mov r12, #0
adc r12, r12, #0
umlal r9, r12, r5, r1
stm r0, {r6, r7, r8, r9}
add r0, r0, #4
# Add overflows at 4 * 32
ldm r0, {r6, r7, r8, r9}
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
bic r9, r9, #0xf0000000
#else
bfc r9, #28, #4
#endif
adds r6, r6, lr
adcs r7, r7, r10
adcs r8, r8, r11
adc r9, r9, r12
# Subtract top at 4 * 32
subs r6, r6, r2
sbcs r7, r7, r3
sbcs r8, r8, r4
sbcs r9, r9, r5
sbc r1, r1, r1
sub r0, r0, #16
ldm r0, {r2, r3, r4, r5}
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r10, #0x5c
lsl r10, r10, #8
orr r10, r10, #0xf5
lsl r10, r10, #8
orr r10, r10, #0xd3
lsl r10, r10, #8
orr r10, r10, #0xed
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r10, #0xd3
lsl r10, r10, #8
add r10, r10, #0xed
#else
mov r10, #0xd3ed
#endif
movt r10, #0x5cf5
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r11, #0x58
lsl r11, r11, #8
orr r11, r11, #18
lsl r11, r11, #8
orr r11, r11, #0x63
lsl r11, r11, #8
orr r11, r11, #26
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r11, #0x63
lsl r11, r11, #8
add r11, r11, #0x1a
#else
mov r11, #0x631a
#endif
movt r11, #0x5812
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r12, #0xa2
lsl r12, r12, #8
orr r12, r12, #0xf7
lsl r12, r12, #8
orr r12, r12, #0x9c
lsl r12, r12, #8
orr r12, r12, #0xd6
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r12, #0x9c
lsl r12, r12, #8
add r12, r12, #0xd6
#else
mov r12, #0x9cd6
#endif
movt r12, #0xa2f7
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov lr, #20
lsl lr, lr, #8
orr lr, lr, #0xde
lsl lr, lr, #8
orr lr, lr, #0xf9
lsl lr, lr, #8
orr lr, lr, #0xde
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov lr, #0xf9
lsl lr, lr, #8
add lr, lr, #0xde
#else
mov lr, #0xf9de
#endif
movt lr, #0x14de
#endif
and r10, r10, r1
and r11, r11, r1
and r12, r12, r1
and lr, lr, r1
adds r2, r2, r10
adcs r3, r3, r11
adcs r4, r4, r12
adcs r5, r5, lr
adcs r6, r6, #0
adcs r7, r7, #0
and r1, r1, #0x10000000
adcs r8, r8, #0
adc r9, r9, r1
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
bic r9, r9, #0xf0000000
#else
bfc r9, #28, #4
#endif
ldr r0, [sp, #68]
# Store result
str r2, [r0]
str r3, [r0, #4]
str r4, [r0, #8]
str r5, [r0, #12]
str r6, [r0, #16]
str r7, [r0, #20]
str r8, [r0, #24]
str r9, [r0, #28]
add sp, sp, #0x50
pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.size sc_muladd,.-sc_muladd
#else
.text
.align 4
.globl sc_muladd
.type sc_muladd, %function
sc_muladd:
push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
sub sp, sp, #0x50
add lr, sp, #0x44
stm lr, {r0, r1, r3}
mov lr, r2
ldm r1, {r0, r1, r2, r3}
ldm lr!, {r4, r5, r6}
umull r10, r11, r0, r4
umull r12, r7, r1, r4
umaal r11, r12, r0, r5
umull r8, r9, r2, r4
umaal r12, r8, r1, r5
umaal r12, r7, r0, r6
umaal r8, r9, r3, r4
stm sp, {r10, r11, r12}
umaal r7, r8, r2, r5
ldm lr!, {r4}
umull r10, r11, r1, r6
umaal r8, r9, r2, r6
umaal r7, r10, r0, r4
umaal r8, r11, r3, r5
str r7, [sp, #12]
umaal r8, r10, r1, r4
umaal r9, r11, r3, r6
umaal r9, r10, r2, r4
umaal r10, r11, r3, r4
ldm lr, {r4, r5, r6, r7}
mov r12, #0
umlal r8, r12, r0, r4
umaal r9, r12, r1, r4
umaal r10, r12, r2, r4
umaal r11, r12, r3, r4
mov r4, #0
umlal r9, r4, r0, r5
umaal r10, r4, r1, r5
umaal r11, r4, r2, r5
umaal r12, r4, r3, r5
mov r5, #0
umlal r10, r5, r0, r6
umaal r11, r5, r1, r6
umaal r12, r5, r2, r6
umaal r4, r5, r3, r6
mov r6, #0
umlal r11, r6, r0, r7
ldr r0, [sp, #72]
umaal r12, r6, r1, r7
add r0, r0, #16
umaal r4, r6, r2, r7
sub lr, lr, #16
umaal r5, r6, r3, r7
ldm r0, {r0, r1, r2, r3}
str r6, [sp, #64]
ldm lr!, {r6}
mov r7, #0
umlal r8, r7, r0, r6
umaal r9, r7, r1, r6
str r8, [sp, #16]
umaal r10, r7, r2, r6
umaal r11, r7, r3, r6
ldm lr!, {r6}
mov r8, #0
umlal r9, r8, r0, r6
umaal r10, r8, r1, r6
str r9, [sp, #20]
umaal r11, r8, r2, r6
umaal r12, r8, r3, r6
ldm lr!, {r6}
mov r9, #0
umlal r10, r9, r0, r6
umaal r11, r9, r1, r6
str r10, [sp, #24]
umaal r12, r9, r2, r6
umaal r4, r9, r3, r6
ldm lr!, {r6}
mov r10, #0
umlal r11, r10, r0, r6
umaal r12, r10, r1, r6
str r11, [sp, #28]
umaal r4, r10, r2, r6
umaal r5, r10, r3, r6
ldm lr!, {r11}
umaal r12, r7, r0, r11
umaal r4, r7, r1, r11
ldr r6, [sp, #64]
umaal r5, r7, r2, r11
umaal r6, r7, r3, r11
ldm lr!, {r11}
umaal r4, r8, r0, r11
umaal r5, r8, r1, r11
umaal r6, r8, r2, r11
umaal r7, r8, r3, r11
ldm lr, {r11, lr}
umaal r5, r9, r0, r11
umaal r6, r10, r0, lr
umaal r6, r9, r1, r11
umaal r7, r10, r1, lr
umaal r7, r9, r2, r11
umaal r8, r10, r2, lr
umaal r8, r9, r3, r11
umaal r9, r10, r3, lr
mov r3, r12
add lr, sp, #32
stm lr, {r3, r4, r5, r6, r7, r8, r9, r10}
mov r0, sp
# Add c to a * b
ldr lr, [sp, #76]
ldm r0, {r2, r3, r4, r5, r6, r7, r8, r9}
ldm lr!, {r1, r10, r11, r12}
adds r2, r2, r1
adcs r3, r3, r10
adcs r4, r4, r11
adcs r5, r5, r12
ldm lr!, {r1, r10, r11, r12}
adcs r6, r6, r1
adcs r7, r7, r10
adcs r8, r8, r11
adcs r9, r9, r12
mov r1, r9
stm r0!, {r2, r3, r4, r5, r6, r7, r8, r9}
ldm r0, {r2, r3, r4, r5, r6, r7, r8, r9}
adcs r2, r2, #0
adcs r3, r3, #0
adcs r4, r4, #0
adcs r5, r5, #0
adcs r6, r6, #0
adcs r7, r7, #0
adcs r8, r8, #0
adc r9, r9, #0
sub r0, r0, #32
# Get 252..503 and 504..507
lsr lr, r9, #24
lsl r9, r9, #4
orr r9, r9, r8, LSR #28
lsl r8, r8, #4
orr r8, r8, r7, LSR #28
lsl r7, r7, #4
orr r7, r7, r6, LSR #28
lsl r6, r6, #4
orr r6, r6, r5, LSR #28
lsl r5, r5, #4
orr r5, r5, r4, LSR #28
lsl r4, r4, #4
orr r4, r4, r3, LSR #28
lsl r3, r3, #4
orr r3, r3, r2, LSR #28
lsl r2, r2, #4
orr r2, r2, r1, LSR #28
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
bic r9, r9, #0xf0000000
#else
bfc r9, #28, #4
#endif
# Add order times bits 504..507
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r10, #0xa3
lsl r10, r10, #8
orr r10, r10, #10
lsl r10, r10, #8
orr r10, r10, #44
lsl r10, r10, #8
orr r10, r10, #19
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r10, #0x2c
lsl r10, r10, #8
add r10, r10, #0x13
#else
mov r10, #0x2c13
#endif
movt r10, #0xa30a
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r11, #0xa7
lsl r11, r11, #8
orr r11, r11, #0xed
lsl r11, r11, #8
orr r11, r11, #0x9c
lsl r11, r11, #8
orr r11, r11, #0xe5
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r11, #0x9c
lsl r11, r11, #8
add r11, r11, #0xe5
#else
mov r11, #0x9ce5
#endif
movt r11, #0xa7ed
#endif
mov r1, #0
umlal r2, r1, r10, lr
umaal r3, r1, r11, lr
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r10, #0x5d
lsl r10, r10, #8
orr r10, r10, #8
lsl r10, r10, #8
orr r10, r10, #0x63
lsl r10, r10, #8
orr r10, r10, #41
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r10, #0x63
lsl r10, r10, #8
add r10, r10, #0x29
#else
mov r10, #0x6329
#endif
movt r10, #0x5d08
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r11, #0xeb
lsl r11, r11, #8
orr r11, r11, #33
lsl r11, r11, #8
orr r11, r11, #6
lsl r11, r11, #8
orr r11, r11, #33
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r11, #0x6
lsl r11, r11, #8
add r11, r11, #0x21
#else
mov r11, #0x621
#endif
movt r11, #0xeb21
#endif
umaal r4, r1, r10, lr
umaal r5, r1, r11, lr
adds r6, r6, r1
adcs r7, r7, #0
adcs r8, r8, #0
adc r9, r9, #0
subs r6, r6, lr
sbcs r7, r7, #0
sbcs r8, r8, #0
sbc r9, r9, #0
# Sub product of top 8 words and order
mov r12, sp
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0xa3
lsl r1, r1, #8
orr r1, r1, #10
lsl r1, r1, #8
orr r1, r1, #44
lsl r1, r1, #8
orr r1, r1, #19
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0x2c
lsl r1, r1, #8
add r1, r1, #0x13
#else
mov r1, #0x2c13
#endif
movt r1, #0xa30a
#endif
mov lr, #0
ldm r0!, {r10, r11}
umlal r10, lr, r2, r1
umaal r11, lr, r3, r1
stm r12!, {r10, r11}
ldm r0!, {r10, r11}
umaal r10, lr, r4, r1
umaal r11, lr, r5, r1
stm r12!, {r10, r11}
ldm r0!, {r10, r11}
umaal r10, lr, r6, r1
umaal r11, lr, r7, r1
stm r12!, {r10, r11}
ldm r0!, {r10, r11}
umaal r10, lr, r8, r1
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
bic r11, r11, #0xf0000000
#else
bfc r11, #28, #4
#endif
umaal r11, lr, r9, r1
stm r12!, {r10, r11, lr}
sub r0, r0, #16
sub r12, r12, #32
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0xa7
lsl r1, r1, #8
orr r1, r1, #0xed
lsl r1, r1, #8
orr r1, r1, #0x9c
lsl r1, r1, #8
orr r1, r1, #0xe5
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0x9c
lsl r1, r1, #8
add r1, r1, #0xe5
#else
mov r1, #0x9ce5
#endif
movt r1, #0xa7ed
#endif
mov lr, #0
ldm r12, {r10, r11}
umlal r10, lr, r2, r1
umaal r11, lr, r3, r1
stm r12!, {r10, r11}
ldm r12, {r10, r11}
umaal r10, lr, r4, r1
umaal r11, lr, r5, r1
stm r12!, {r10, r11}
ldm r12, {r10, r11}
umaal r10, lr, r6, r1
umaal r11, lr, r7, r1
stm r12!, {r10, r11}
ldm r12, {r10, r11}
umaal r10, lr, r8, r1
umaal r11, lr, r9, r1
stm r12!, {r10, r11, lr}
sub r12, r12, #32
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0x5d
lsl r1, r1, #8
orr r1, r1, #8
lsl r1, r1, #8
orr r1, r1, #0x63
lsl r1, r1, #8
orr r1, r1, #41
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0x63
lsl r1, r1, #8
add r1, r1, #0x29
#else
mov r1, #0x6329
#endif
movt r1, #0x5d08
#endif
mov lr, #0
ldm r12, {r10, r11}
umlal r10, lr, r2, r1
umaal r11, lr, r3, r1
stm r12!, {r10, r11}
ldm r12, {r10, r11}
umaal r10, lr, r4, r1
umaal r11, lr, r5, r1
stm r12!, {r10, r11}
ldm r12, {r10, r11}
umaal r10, lr, r6, r1
umaal r11, lr, r7, r1
stm r12!, {r10, r11}
ldm r12, {r10, r11}
umaal r10, lr, r8, r1
umaal r11, lr, r9, r1
stm r12!, {r10, r11, lr}
sub r12, r12, #32
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0xeb
lsl r1, r1, #8
orr r1, r1, #33
lsl r1, r1, #8
orr r1, r1, #6
lsl r1, r1, #8
orr r1, r1, #33
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0x6
lsl r1, r1, #8
add r1, r1, #0x21
#else
mov r1, #0x621
#endif
movt r1, #0xeb21
#endif
mov lr, #0
ldm r12, {r10, r11}
umlal r10, lr, r2, r1
umaal r11, lr, r3, r1
stm r12!, {r10, r11}
ldm r12, {r10, r11}
umaal r10, lr, r4, r1
umaal r11, lr, r5, r1
stm r12!, {r10, r11}
ldm r12, {r10, r11}
umaal r10, lr, r6, r1
umaal r11, lr, r7, r1
stm r12!, {r10, r11}
ldm r12, {r10, r11}
umaal r10, lr, r8, r1
umaal r11, lr, r9, r1
stm r12!, {r10, r11, lr}
sub r12, r12, #32
# Subtract at 4 * 32
ldm r12, {r10, r11}
subs r10, r10, r2
sbcs r11, r11, r3
stm r12!, {r10, r11}
ldm r12, {r10, r11}
sbcs r10, r10, r4
sbcs r11, r11, r5
stm r12!, {r10, r11}
ldm r12, {r10, r11}
sbcs r10, r10, r6
sbcs r11, r11, r7
stm r12!, {r10, r11}
ldm r12, {r10, r11}
sbcs r10, r10, r8
sbc r11, r11, r9
stm r12!, {r10, r11}
sub r12, r12, #36
asr lr, r11, #25
# Conditionally subtract order starting at bit 125
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0xa00000
lsl r1, r1, #8
add r1, r1, #0x0
#else
mov r1, #0xa0000000
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r2, #0x4b
lsl r2, r2, #8
orr r2, r2, #0x9e
lsl r2, r2, #8
orr r2, r2, #0xba
lsl r2, r2, #8
orr r2, r2, #0x7d
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r2, #0xba
lsl r2, r2, #8
add r2, r2, #0x7d
#else
mov r2, #0xba7d
#endif
movt r2, #0x4b9e
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r3, #0xcb
lsl r3, r3, #8
orr r3, r3, #2
lsl r3, r3, #8
orr r3, r3, #0x4c
lsl r3, r3, #8
orr r3, r3, #0x63
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r3, #0x4c
lsl r3, r3, #8
add r3, r3, #0x63
#else
mov r3, #0x4c63
#endif
movt r3, #0xcb02
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r4, #0xd4
lsl r4, r4, #8
orr r4, r4, #0x5e
lsl r4, r4, #8
orr r4, r4, #0xf3
lsl r4, r4, #8
orr r4, r4, #0x9a
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r4, #0xf3
lsl r4, r4, #8
add r4, r4, #0x9a
#else
mov r4, #0xf39a
#endif
movt r4, #0xd45e
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r5, #2
lsl r5, r5, #8
orr r5, r5, #0x9b
lsl r5, r5, #8
orr r5, r5, #0xdf
lsl r5, r5, #8
orr r5, r5, #59
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r5, #0xdf
lsl r5, r5, #8
add r5, r5, #0x3b
#else
mov r5, #0xdf3b
#endif
movt r5, #0x29b
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r9, #0x20000
lsl r9, r9, #8
add r9, r9, #0x0
#else
mov r9, #0x2000000
#endif
and r1, r1, lr
and r2, r2, lr
and r3, r3, lr
and r4, r4, lr
and r5, r5, lr
and r9, r9, lr
ldm r12, {r10, r11}
adds r10, r10, r1
adcs r11, r11, r2
stm r12!, {r10, r11}
ldm r12, {r10, r11}
adcs r10, r10, r3
adcs r11, r11, r4
stm r12!, {r10, r11}
ldm r12, {r10, r11}
adcs r10, r10, r5
adcs r11, r11, #0
stm r12!, {r10, r11}
ldm r12, {r10, r11}
adcs r10, r10, #0
adcs r11, r11, #0
stm r12!, {r10, r11}
ldm r12, {r10}
adcs r10, r10, #0
stm r12!, {r10}
sub r0, r0, #16
mov r12, sp
# Load bits 252-376
add r12, r12, #28
ldm r12, {r1, r2, r3, r4, r5}
lsl r5, r5, #4
orr r5, r5, r4, lsr #28
lsl r4, r4, #4
orr r4, r4, r3, lsr #28
lsl r3, r3, #4
orr r3, r3, r2, lsr #28
lsl r2, r2, #4
orr r2, r2, r1, lsr #28
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
bic r5, r5, #0xe0000000
#else
bfc r5, #29, #3
#endif
sub r12, r12, #28
# Sub product of top 4 words and order
mov r0, sp
# * -5cf5d3ed
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0xa3
lsl r1, r1, #8
orr r1, r1, #10
lsl r1, r1, #8
orr r1, r1, #44
lsl r1, r1, #8
orr r1, r1, #19
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0x2c
lsl r1, r1, #8
add r1, r1, #0x13
#else
mov r1, #0x2c13
#endif
movt r1, #0xa30a
#endif
mov lr, #0
ldm r0, {r6, r7, r8, r9}
umlal r6, lr, r2, r1
umaal r7, lr, r3, r1
umaal r8, lr, r4, r1
umaal r9, lr, r5, r1
stm r0, {r6, r7, r8, r9}
add r0, r0, #4
# * -5812631b
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0xa7
lsl r1, r1, #8
orr r1, r1, #0xed
lsl r1, r1, #8
orr r1, r1, #0x9c
lsl r1, r1, #8
orr r1, r1, #0xe5
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0x9c
lsl r1, r1, #8
add r1, r1, #0xe5
#else
mov r1, #0x9ce5
#endif
movt r1, #0xa7ed
#endif
mov r10, #0
ldm r0, {r6, r7, r8, r9}
umlal r6, r10, r2, r1
umaal r7, r10, r3, r1
umaal r8, r10, r4, r1
umaal r9, r10, r5, r1
stm r0, {r6, r7, r8, r9}
add r0, r0, #4
# * -a2f79cd7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0x5d
lsl r1, r1, #8
orr r1, r1, #8
lsl r1, r1, #8
orr r1, r1, #0x63
lsl r1, r1, #8
orr r1, r1, #41
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0x63
lsl r1, r1, #8
add r1, r1, #0x29
#else
mov r1, #0x6329
#endif
movt r1, #0x5d08
#endif
mov r11, #0
ldm r0, {r6, r7, r8, r9}
umlal r6, r11, r2, r1
umaal r7, r11, r3, r1
umaal r8, r11, r4, r1
umaal r9, r11, r5, r1
stm r0, {r6, r7, r8, r9}
add r0, r0, #4
# * -14def9df
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0xeb
lsl r1, r1, #8
orr r1, r1, #33
lsl r1, r1, #8
orr r1, r1, #6
lsl r1, r1, #8
orr r1, r1, #33
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r1, #0x6
lsl r1, r1, #8
add r1, r1, #0x21
#else
mov r1, #0x621
#endif
movt r1, #0xeb21
#endif
mov r12, #0
ldm r0, {r6, r7, r8, r9}
umlal r6, r12, r2, r1
umaal r7, r12, r3, r1
umaal r8, r12, r4, r1
umaal r9, r12, r5, r1
stm r0, {r6, r7, r8, r9}
add r0, r0, #4
# Add overflows at 4 * 32
ldm r0, {r6, r7, r8, r9}
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
bic r9, r9, #0xf0000000
#else
bfc r9, #28, #4
#endif
adds r6, r6, lr
adcs r7, r7, r10
adcs r8, r8, r11
adc r9, r9, r12
# Subtract top at 4 * 32
subs r6, r6, r2
sbcs r7, r7, r3
sbcs r8, r8, r4
sbcs r9, r9, r5
sbc r1, r1, r1
sub r0, r0, #16
ldm r0, {r2, r3, r4, r5}
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r10, #0x5c
lsl r10, r10, #8
orr r10, r10, #0xf5
lsl r10, r10, #8
orr r10, r10, #0xd3
lsl r10, r10, #8
orr r10, r10, #0xed
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r10, #0xd3
lsl r10, r10, #8
add r10, r10, #0xed
#else
mov r10, #0xd3ed
#endif
movt r10, #0x5cf5
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r11, #0x58
lsl r11, r11, #8
orr r11, r11, #18
lsl r11, r11, #8
orr r11, r11, #0x63
lsl r11, r11, #8
orr r11, r11, #26
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r11, #0x63
lsl r11, r11, #8
add r11, r11, #0x1a
#else
mov r11, #0x631a
#endif
movt r11, #0x5812
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r12, #0xa2
lsl r12, r12, #8
orr r12, r12, #0xf7
lsl r12, r12, #8
orr r12, r12, #0x9c
lsl r12, r12, #8
orr r12, r12, #0xd6
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov r12, #0x9c
lsl r12, r12, #8
add r12, r12, #0xd6
#else
mov r12, #0x9cd6
#endif
movt r12, #0xa2f7
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov lr, #20
lsl lr, lr, #8
orr lr, lr, #0xde
lsl lr, lr, #8
orr lr, lr, #0xf9
lsl lr, lr, #8
orr lr, lr, #0xde
#else
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
mov lr, #0xf9
lsl lr, lr, #8
add lr, lr, #0xde
#else
mov lr, #0xf9de
#endif
movt lr, #0x14de
#endif
and r10, r10, r1
and r11, r11, r1
and r12, r12, r1
and lr, lr, r1
adds r2, r2, r10
adcs r3, r3, r11
adcs r4, r4, r12
adcs r5, r5, lr
adcs r6, r6, #0
adcs r7, r7, #0
and r1, r1, #0x10000000
adcs r8, r8, #0
adc r9, r9, r1
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
bic r9, r9, #0xf0000000
#else
bfc r9, #28, #4
#endif
ldr r0, [sp, #68]
# Store result
str r2, [r0]
str r3, [r0, #4]
str r4, [r0, #8]
str r5, [r0, #12]
str r6, [r0, #16]
str r7, [r0, #20]
str r8, [r0, #24]
str r9, [r0, #28]
add sp, sp, #0x50
pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.size sc_muladd,.-sc_muladd
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
#endif /* HAVE_ED25519_SIGN */
#endif /* HAVE_ED25519 */
#endif /* !CURVE25519_SMALL || !ED25519_SMALL */
#endif /* HAVE_CURVE25519 || HAVE_ED25519 */
#endif /* !__aarch64__ && __arm__ && !__thumb__ */
#endif /* WOLFSSL_ARMASM */
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
#endif /* !WOLFSSL_ARMASM_INLINE */
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/UART/UART_TwoBoards_ComDMA/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aenu1/aps3e
| 190,385
|
app/src/main/cpp/rpcs3/3rdparty/wolfssl/wolfssl/wolfcrypt/src/port/arm/armv8-32-sha512-asm.S
|
/* armv8-32-sha512-asm
*
* Copyright (C) 2006-2023 wolfSSL Inc.
*
* This file is part of wolfSSL.
*
* wolfSSL is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* wolfSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
*/
/* Generated using (from wolfssl):
* cd ../scripts
* ruby ./sha2/sha512.rb arm32 ../wolfssl/wolfcrypt/src/port/arm/armv8-32-sha512-asm.S
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif /* HAVE_CONFIG_H */
#include <wolfssl/wolfcrypt/settings.h>
#ifdef WOLFSSL_ARMASM
#if !defined(__aarch64__) && defined(__arm__) && !defined(__thumb__)
#ifndef WOLFSSL_ARMASM_INLINE
#ifdef WOLFSSL_SHA512
#ifdef WOLFSSL_ARMASM_NO_NEON
.text
.type L_SHA512_transform_len_k, %object
.size L_SHA512_transform_len_k, 640
.align 4
L_SHA512_transform_len_k:
.word 0xd728ae22
.word 0x428a2f98
.word 0x23ef65cd
.word 0x71374491
.word 0xec4d3b2f
.word 0xb5c0fbcf
.word 0x8189dbbc
.word 0xe9b5dba5
.word 0xf348b538
.word 0x3956c25b
.word 0xb605d019
.word 0x59f111f1
.word 0xaf194f9b
.word 0x923f82a4
.word 0xda6d8118
.word 0xab1c5ed5
.word 0xa3030242
.word 0xd807aa98
.word 0x45706fbe
.word 0x12835b01
.word 0x4ee4b28c
.word 0x243185be
.word 0xd5ffb4e2
.word 0x550c7dc3
.word 0xf27b896f
.word 0x72be5d74
.word 0x3b1696b1
.word 0x80deb1fe
.word 0x25c71235
.word 0x9bdc06a7
.word 0xcf692694
.word 0xc19bf174
.word 0x9ef14ad2
.word 0xe49b69c1
.word 0x384f25e3
.word 0xefbe4786
.word 0x8b8cd5b5
.word 0xfc19dc6
.word 0x77ac9c65
.word 0x240ca1cc
.word 0x592b0275
.word 0x2de92c6f
.word 0x6ea6e483
.word 0x4a7484aa
.word 0xbd41fbd4
.word 0x5cb0a9dc
.word 0x831153b5
.word 0x76f988da
.word 0xee66dfab
.word 0x983e5152
.word 0x2db43210
.word 0xa831c66d
.word 0x98fb213f
.word 0xb00327c8
.word 0xbeef0ee4
.word 0xbf597fc7
.word 0x3da88fc2
.word 0xc6e00bf3
.word 0x930aa725
.word 0xd5a79147
.word 0xe003826f
.word 0x6ca6351
.word 0xa0e6e70
.word 0x14292967
.word 0x46d22ffc
.word 0x27b70a85
.word 0x5c26c926
.word 0x2e1b2138
.word 0x5ac42aed
.word 0x4d2c6dfc
.word 0x9d95b3df
.word 0x53380d13
.word 0x8baf63de
.word 0x650a7354
.word 0x3c77b2a8
.word 0x766a0abb
.word 0x47edaee6
.word 0x81c2c92e
.word 0x1482353b
.word 0x92722c85
.word 0x4cf10364
.word 0xa2bfe8a1
.word 0xbc423001
.word 0xa81a664b
.word 0xd0f89791
.word 0xc24b8b70
.word 0x654be30
.word 0xc76c51a3
.word 0xd6ef5218
.word 0xd192e819
.word 0x5565a910
.word 0xd6990624
.word 0x5771202a
.word 0xf40e3585
.word 0x32bbd1b8
.word 0x106aa070
.word 0xb8d2d0c8
.word 0x19a4c116
.word 0x5141ab53
.word 0x1e376c08
.word 0xdf8eeb99
.word 0x2748774c
.word 0xe19b48a8
.word 0x34b0bcb5
.word 0xc5c95a63
.word 0x391c0cb3
.word 0xe3418acb
.word 0x4ed8aa4a
.word 0x7763e373
.word 0x5b9cca4f
.word 0xd6b2b8a3
.word 0x682e6ff3
.word 0x5defb2fc
.word 0x748f82ee
.word 0x43172f60
.word 0x78a5636f
.word 0xa1f0ab72
.word 0x84c87814
.word 0x1a6439ec
.word 0x8cc70208
.word 0x23631e28
.word 0x90befffa
.word 0xde82bde9
.word 0xa4506ceb
.word 0xb2c67915
.word 0xbef9a3f7
.word 0xe372532b
.word 0xc67178f2
.word 0xea26619c
.word 0xca273ece
.word 0x21c0c207
.word 0xd186b8c7
.word 0xcde0eb1e
.word 0xeada7dd6
.word 0xee6ed178
.word 0xf57d4f7f
.word 0x72176fba
.word 0x6f067aa
.word 0xa2c898a6
.word 0xa637dc5
.word 0xbef90dae
.word 0x113f9804
.word 0x131c471b
.word 0x1b710b35
.word 0x23047d84
.word 0x28db77f5
.word 0x40c72493
.word 0x32caab7b
.word 0x15c9bebc
.word 0x3c9ebe0a
.word 0x9c100d4c
.word 0x431d67c4
.word 0xcb3e42b6
.word 0x4cc5d4be
.word 0xfc657e2a
.word 0x597f299c
.word 0x3ad6faec
.word 0x5fcb6fab
.word 0x4a475817
.word 0x6c44198c
.text
.align 4
.globl Transform_Sha512_Len
.type Transform_Sha512_Len, %function
Transform_Sha512_Len:
push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
sub sp, sp, #0xc0
adr r3, L_SHA512_transform_len_k
# Copy digest to add in at end
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0]
ldr r5, [r0, #4]
#else
ldrd r4, r5, [r0]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #8]
ldr r7, [r0, #12]
#else
ldrd r6, r7, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #16]
ldr r9, [r0, #20]
#else
ldrd r8, r9, [r0, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r0, #24]
ldr r11, [r0, #28]
#else
ldrd r10, r11, [r0, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #128]
str r5, [sp, #132]
#else
strd r4, r5, [sp, #128]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [sp, #136]
str r7, [sp, #140]
#else
strd r6, r7, [sp, #136]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [sp, #144]
str r9, [sp, #148]
#else
strd r8, r9, [sp, #144]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r10, [sp, #152]
str r11, [sp, #156]
#else
strd r10, r11, [sp, #152]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #32]
ldr r5, [r0, #36]
#else
ldrd r4, r5, [r0, #32]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #40]
ldr r7, [r0, #44]
#else
ldrd r6, r7, [r0, #40]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #48]
ldr r9, [r0, #52]
#else
ldrd r8, r9, [r0, #48]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r0, #56]
ldr r11, [r0, #60]
#else
ldrd r10, r11, [r0, #56]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #160]
str r5, [sp, #164]
#else
strd r4, r5, [sp, #160]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [sp, #168]
str r7, [sp, #172]
#else
strd r6, r7, [sp, #168]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [sp, #176]
str r9, [sp, #180]
#else
strd r8, r9, [sp, #176]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r10, [sp, #184]
str r11, [sp, #188]
#else
strd r10, r11, [sp, #184]
#endif
# Start of loop processing a block
L_SHA512_transform_len_begin:
# Load, Reverse and Store W - 64 bytes
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
ldr r4, [r1]
ldr r5, [r1, #4]
ldr r6, [r1, #8]
ldr r7, [r1, #12]
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
str r5, [sp]
str r4, [sp, #4]
str r7, [sp, #8]
str r6, [sp, #12]
ldr r4, [r1, #16]
ldr r5, [r1, #20]
ldr r6, [r1, #24]
ldr r7, [r1, #28]
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
str r5, [sp, #16]
str r4, [sp, #20]
str r7, [sp, #24]
str r6, [sp, #28]
ldr r4, [r1, #32]
ldr r5, [r1, #36]
ldr r6, [r1, #40]
ldr r7, [r1, #44]
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
str r5, [sp, #32]
str r4, [sp, #36]
str r7, [sp, #40]
str r6, [sp, #44]
ldr r4, [r1, #48]
ldr r5, [r1, #52]
ldr r6, [r1, #56]
ldr r7, [r1, #60]
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
str r5, [sp, #48]
str r4, [sp, #52]
str r7, [sp, #56]
str r6, [sp, #60]
ldr r4, [r1, #64]
ldr r5, [r1, #68]
ldr r6, [r1, #72]
ldr r7, [r1, #76]
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
str r5, [sp, #64]
str r4, [sp, #68]
str r7, [sp, #72]
str r6, [sp, #76]
ldr r4, [r1, #80]
ldr r5, [r1, #84]
ldr r6, [r1, #88]
ldr r7, [r1, #92]
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
str r5, [sp, #80]
str r4, [sp, #84]
str r7, [sp, #88]
str r6, [sp, #92]
ldr r4, [r1, #96]
ldr r5, [r1, #100]
ldr r6, [r1, #104]
ldr r7, [r1, #108]
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
str r5, [sp, #96]
str r4, [sp, #100]
str r7, [sp, #104]
str r6, [sp, #108]
ldr r4, [r1, #112]
ldr r5, [r1, #116]
ldr r6, [r1, #120]
ldr r7, [r1, #124]
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
str r5, [sp, #112]
str r4, [sp, #116]
str r7, [sp, #120]
str r6, [sp, #124]
#else
ldr r4, [r1]
ldr r5, [r1, #4]
ldr r6, [r1, #8]
ldr r7, [r1, #12]
ldr r8, [r1, #16]
ldr r9, [r1, #20]
ldr r10, [r1, #24]
ldr r11, [r1, #28]
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
rev r8, r8
rev r9, r9
rev r10, r10
rev r11, r11
str r5, [sp]
str r4, [sp, #4]
str r7, [sp, #8]
str r6, [sp, #12]
str r9, [sp, #16]
str r8, [sp, #20]
str r11, [sp, #24]
str r10, [sp, #28]
ldr r4, [r1, #32]
ldr r5, [r1, #36]
ldr r6, [r1, #40]
ldr r7, [r1, #44]
ldr r8, [r1, #48]
ldr r9, [r1, #52]
ldr r10, [r1, #56]
ldr r11, [r1, #60]
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
rev r8, r8
rev r9, r9
rev r10, r10
rev r11, r11
str r5, [sp, #32]
str r4, [sp, #36]
str r7, [sp, #40]
str r6, [sp, #44]
str r9, [sp, #48]
str r8, [sp, #52]
str r11, [sp, #56]
str r10, [sp, #60]
ldr r4, [r1, #64]
ldr r5, [r1, #68]
ldr r6, [r1, #72]
ldr r7, [r1, #76]
ldr r8, [r1, #80]
ldr r9, [r1, #84]
ldr r10, [r1, #88]
ldr r11, [r1, #92]
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
rev r8, r8
rev r9, r9
rev r10, r10
rev r11, r11
str r5, [sp, #64]
str r4, [sp, #68]
str r7, [sp, #72]
str r6, [sp, #76]
str r9, [sp, #80]
str r8, [sp, #84]
str r11, [sp, #88]
str r10, [sp, #92]
ldr r4, [r1, #96]
ldr r5, [r1, #100]
ldr r6, [r1, #104]
ldr r7, [r1, #108]
ldr r8, [r1, #112]
ldr r9, [r1, #116]
ldr r10, [r1, #120]
ldr r11, [r1, #124]
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
rev r8, r8
rev r9, r9
rev r10, r10
rev r11, r11
str r5, [sp, #96]
str r4, [sp, #100]
str r7, [sp, #104]
str r6, [sp, #108]
str r9, [sp, #112]
str r8, [sp, #116]
str r11, [sp, #120]
str r10, [sp, #124]
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
# Pre-calc: b ^ c
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r0, #8]
ldr r11, [r0, #12]
#else
ldrd r10, r11, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #16]
ldr r5, [r0, #20]
#else
ldrd r4, r5, [r0, #16]
#endif
eor r10, r10, r4
eor r11, r11, r5
mov r12, #4
# Start of 16 rounds
L_SHA512_transform_len_start:
# Round 0
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #32]
ldr r5, [r0, #36]
#else
ldrd r4, r5, [r0, #32]
#endif
lsrs r6, r4, #14
lsrs r7, r5, #14
orr r7, r7, r4, lsl #18
orr r6, r6, r5, lsl #18
lsrs r8, r4, #18
lsrs r9, r5, #18
orr r9, r9, r4, lsl #14
orr r8, r8, r5, lsl #14
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #23
lsls r9, r5, #23
orr r9, r9, r4, lsr #9
orr r8, r8, r5, lsr #9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #56]
ldr r5, [r0, #60]
#else
ldrd r4, r5, [r0, #56]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #56]
str r5, [r0, #60]
#else
strd r4, r5, [r0, #56]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #32]
ldr r5, [r0, #36]
#else
ldrd r4, r5, [r0, #32]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #40]
ldr r7, [r0, #44]
#else
ldrd r6, r7, [r0, #40]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #48]
ldr r9, [r0, #52]
#else
ldrd r8, r9, [r0, #48]
#endif
eor r6, r6, r8
eor r7, r7, r9
and r6, r6, r4
and r7, r7, r5
eor r6, r6, r8
eor r7, r7, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #56]
ldr r5, [r0, #60]
#else
ldrd r4, r5, [r0, #56]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp]
ldr r9, [sp, #4]
#else
ldrd r8, r9, [sp]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r3]
ldr r7, [r3, #4]
#else
ldrd r6, r7, [r3]
#endif
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #24]
ldr r9, [r0, #28]
#else
ldrd r8, r9, [r0, #24]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #56]
str r5, [r0, #60]
#else
strd r4, r5, [r0, #56]
#endif
adds r8, r8, r4
adc r9, r9, r5
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0]
ldr r5, [r0, #4]
#else
ldrd r4, r5, [r0]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r0, #24]
str r9, [r0, #28]
#else
strd r8, r9, [r0, #24]
#endif
lsrs r6, r4, #28
lsrs r7, r5, #28
orr r7, r7, r4, lsl #4
orr r6, r6, r5, lsl #4
lsls r8, r4, #30
lsls r9, r5, #30
orr r9, r9, r4, lsr #2
orr r8, r8, r5, lsr #2
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #25
lsls r9, r5, #25
orr r9, r9, r4, lsr #7
orr r8, r8, r5, lsr #7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #56]
ldr r5, [r0, #60]
#else
ldrd r4, r5, [r0, #56]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0]
ldr r9, [r0, #4]
#else
ldrd r8, r9, [r0]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #8]
ldr r7, [r0, #12]
#else
ldrd r6, r7, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #56]
str r5, [r0, #60]
#else
strd r4, r5, [r0, #56]
#endif
eor r8, r8, r6
eor r9, r9, r7
and r10, r10, r8
and r11, r11, r9
eor r10, r10, r6
eor r11, r11, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #56]
ldr r7, [r0, #60]
#else
ldrd r6, r7, [r0, #56]
#endif
adds r6, r6, r10
adc r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0, #56]
str r7, [r0, #60]
#else
strd r6, r7, [r0, #56]
#endif
mov r10, r8
mov r11, r9
# Calc new W[0]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #112]
ldr r5, [sp, #116]
#else
ldrd r4, r5, [sp, #112]
#endif
lsrs r6, r4, #19
lsrs r7, r5, #19
orr r7, r7, r4, lsl #13
orr r6, r6, r5, lsl #13
lsls r8, r4, #3
lsls r9, r5, #3
orr r9, r9, r4, lsr #29
orr r8, r8, r5, lsr #29
eor r7, r7, r9
eor r6, r6, r8
lsrs r8, r4, #6
lsrs r9, r5, #6
orr r8, r8, r5, lsl #26
eor r7, r7, r9
eor r6, r6, r8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp]
ldr r5, [sp, #4]
#else
ldrd r4, r5, [sp]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #72]
ldr r9, [sp, #76]
#else
ldrd r8, r9, [sp, #72]
#endif
adds r4, r4, r6
adc r5, r5, r7
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp]
str r5, [sp, #4]
#else
strd r4, r5, [sp]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #8]
ldr r5, [sp, #12]
#else
ldrd r4, r5, [sp, #8]
#endif
lsrs r6, r4, #1
lsrs r7, r5, #1
orr r7, r7, r4, lsl #31
orr r6, r6, r5, lsl #31
lsrs r8, r4, #8
lsrs r9, r5, #8
orr r9, r9, r4, lsl #24
orr r8, r8, r5, lsl #24
eor r7, r7, r9
eor r6, r6, r8
lsrs r8, r4, #7
lsrs r9, r5, #7
orr r8, r8, r5, lsl #25
eor r7, r7, r9
eor r6, r6, r8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp]
ldr r5, [sp, #4]
#else
ldrd r4, r5, [sp]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp]
str r5, [sp, #4]
#else
strd r4, r5, [sp]
#endif
# Round 1
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #24]
ldr r5, [r0, #28]
#else
ldrd r4, r5, [r0, #24]
#endif
lsrs r6, r4, #14
lsrs r7, r5, #14
orr r7, r7, r4, lsl #18
orr r6, r6, r5, lsl #18
lsrs r8, r4, #18
lsrs r9, r5, #18
orr r9, r9, r4, lsl #14
orr r8, r8, r5, lsl #14
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #23
lsls r9, r5, #23
orr r9, r9, r4, lsr #9
orr r8, r8, r5, lsr #9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #48]
ldr r5, [r0, #52]
#else
ldrd r4, r5, [r0, #48]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #48]
str r5, [r0, #52]
#else
strd r4, r5, [r0, #48]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #24]
ldr r5, [r0, #28]
#else
ldrd r4, r5, [r0, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #32]
ldr r7, [r0, #36]
#else
ldrd r6, r7, [r0, #32]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #40]
ldr r9, [r0, #44]
#else
ldrd r8, r9, [r0, #40]
#endif
eor r6, r6, r8
eor r7, r7, r9
and r6, r6, r4
and r7, r7, r5
eor r6, r6, r8
eor r7, r7, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #48]
ldr r5, [r0, #52]
#else
ldrd r4, r5, [r0, #48]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #8]
ldr r9, [sp, #12]
#else
ldrd r8, r9, [sp, #8]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r3, #8]
ldr r7, [r3, #12]
#else
ldrd r6, r7, [r3, #8]
#endif
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #16]
ldr r9, [r0, #20]
#else
ldrd r8, r9, [r0, #16]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #48]
str r5, [r0, #52]
#else
strd r4, r5, [r0, #48]
#endif
adds r8, r8, r4
adc r9, r9, r5
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #56]
ldr r5, [r0, #60]
#else
ldrd r4, r5, [r0, #56]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r0, #16]
str r9, [r0, #20]
#else
strd r8, r9, [r0, #16]
#endif
lsrs r6, r4, #28
lsrs r7, r5, #28
orr r7, r7, r4, lsl #4
orr r6, r6, r5, lsl #4
lsls r8, r4, #30
lsls r9, r5, #30
orr r9, r9, r4, lsr #2
orr r8, r8, r5, lsr #2
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #25
lsls r9, r5, #25
orr r9, r9, r4, lsr #7
orr r8, r8, r5, lsr #7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #48]
ldr r5, [r0, #52]
#else
ldrd r4, r5, [r0, #48]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #56]
ldr r9, [r0, #60]
#else
ldrd r8, r9, [r0, #56]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0]
ldr r7, [r0, #4]
#else
ldrd r6, r7, [r0]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #48]
str r5, [r0, #52]
#else
strd r4, r5, [r0, #48]
#endif
eor r8, r8, r6
eor r9, r9, r7
and r10, r10, r8
and r11, r11, r9
eor r10, r10, r6
eor r11, r11, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #48]
ldr r7, [r0, #52]
#else
ldrd r6, r7, [r0, #48]
#endif
adds r6, r6, r10
adc r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0, #48]
str r7, [r0, #52]
#else
strd r6, r7, [r0, #48]
#endif
mov r10, r8
mov r11, r9
# Calc new W[1]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #120]
ldr r5, [sp, #124]
#else
ldrd r4, r5, [sp, #120]
#endif
lsrs r6, r4, #19
lsrs r7, r5, #19
orr r7, r7, r4, lsl #13
orr r6, r6, r5, lsl #13
lsls r8, r4, #3
lsls r9, r5, #3
orr r9, r9, r4, lsr #29
orr r8, r8, r5, lsr #29
eor r7, r7, r9
eor r6, r6, r8
lsrs r8, r4, #6
lsrs r9, r5, #6
orr r8, r8, r5, lsl #26
eor r7, r7, r9
eor r6, r6, r8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #8]
ldr r5, [sp, #12]
#else
ldrd r4, r5, [sp, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #80]
ldr r9, [sp, #84]
#else
ldrd r8, r9, [sp, #80]
#endif
adds r4, r4, r6
adc r5, r5, r7
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #8]
str r5, [sp, #12]
#else
strd r4, r5, [sp, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #16]
ldr r5, [sp, #20]
#else
ldrd r4, r5, [sp, #16]
#endif
lsrs r6, r4, #1
lsrs r7, r5, #1
orr r7, r7, r4, lsl #31
orr r6, r6, r5, lsl #31
lsrs r8, r4, #8
lsrs r9, r5, #8
orr r9, r9, r4, lsl #24
orr r8, r8, r5, lsl #24
eor r7, r7, r9
eor r6, r6, r8
lsrs r8, r4, #7
lsrs r9, r5, #7
orr r8, r8, r5, lsl #25
eor r7, r7, r9
eor r6, r6, r8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #8]
ldr r5, [sp, #12]
#else
ldrd r4, r5, [sp, #8]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #8]
str r5, [sp, #12]
#else
strd r4, r5, [sp, #8]
#endif
# Round 2
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #16]
ldr r5, [r0, #20]
#else
ldrd r4, r5, [r0, #16]
#endif
lsrs r6, r4, #14
lsrs r7, r5, #14
orr r7, r7, r4, lsl #18
orr r6, r6, r5, lsl #18
lsrs r8, r4, #18
lsrs r9, r5, #18
orr r9, r9, r4, lsl #14
orr r8, r8, r5, lsl #14
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #23
lsls r9, r5, #23
orr r9, r9, r4, lsr #9
orr r8, r8, r5, lsr #9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #40]
ldr r5, [r0, #44]
#else
ldrd r4, r5, [r0, #40]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #40]
str r5, [r0, #44]
#else
strd r4, r5, [r0, #40]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #16]
ldr r5, [r0, #20]
#else
ldrd r4, r5, [r0, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #24]
ldr r7, [r0, #28]
#else
ldrd r6, r7, [r0, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #32]
ldr r9, [r0, #36]
#else
ldrd r8, r9, [r0, #32]
#endif
eor r6, r6, r8
eor r7, r7, r9
and r6, r6, r4
and r7, r7, r5
eor r6, r6, r8
eor r7, r7, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #40]
ldr r5, [r0, #44]
#else
ldrd r4, r5, [r0, #40]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #16]
ldr r9, [sp, #20]
#else
ldrd r8, r9, [sp, #16]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r3, #16]
ldr r7, [r3, #20]
#else
ldrd r6, r7, [r3, #16]
#endif
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #8]
ldr r9, [r0, #12]
#else
ldrd r8, r9, [r0, #8]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #40]
str r5, [r0, #44]
#else
strd r4, r5, [r0, #40]
#endif
adds r8, r8, r4
adc r9, r9, r5
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #48]
ldr r5, [r0, #52]
#else
ldrd r4, r5, [r0, #48]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r0, #8]
str r9, [r0, #12]
#else
strd r8, r9, [r0, #8]
#endif
lsrs r6, r4, #28
lsrs r7, r5, #28
orr r7, r7, r4, lsl #4
orr r6, r6, r5, lsl #4
lsls r8, r4, #30
lsls r9, r5, #30
orr r9, r9, r4, lsr #2
orr r8, r8, r5, lsr #2
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #25
lsls r9, r5, #25
orr r9, r9, r4, lsr #7
orr r8, r8, r5, lsr #7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #40]
ldr r5, [r0, #44]
#else
ldrd r4, r5, [r0, #40]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #48]
ldr r9, [r0, #52]
#else
ldrd r8, r9, [r0, #48]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #56]
ldr r7, [r0, #60]
#else
ldrd r6, r7, [r0, #56]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #40]
str r5, [r0, #44]
#else
strd r4, r5, [r0, #40]
#endif
eor r8, r8, r6
eor r9, r9, r7
and r10, r10, r8
and r11, r11, r9
eor r10, r10, r6
eor r11, r11, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #40]
ldr r7, [r0, #44]
#else
ldrd r6, r7, [r0, #40]
#endif
adds r6, r6, r10
adc r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0, #40]
str r7, [r0, #44]
#else
strd r6, r7, [r0, #40]
#endif
mov r10, r8
mov r11, r9
# Calc new W[2]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp]
ldr r5, [sp, #4]
#else
ldrd r4, r5, [sp]
#endif
lsrs r6, r4, #19
lsrs r7, r5, #19
orr r7, r7, r4, lsl #13
orr r6, r6, r5, lsl #13
lsls r8, r4, #3
lsls r9, r5, #3
orr r9, r9, r4, lsr #29
orr r8, r8, r5, lsr #29
eor r7, r7, r9
eor r6, r6, r8
lsrs r8, r4, #6
lsrs r9, r5, #6
orr r8, r8, r5, lsl #26
eor r7, r7, r9
eor r6, r6, r8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #16]
ldr r5, [sp, #20]
#else
ldrd r4, r5, [sp, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #88]
ldr r9, [sp, #92]
#else
ldrd r8, r9, [sp, #88]
#endif
adds r4, r4, r6
adc r5, r5, r7
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #16]
str r5, [sp, #20]
#else
strd r4, r5, [sp, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #24]
ldr r5, [sp, #28]
#else
ldrd r4, r5, [sp, #24]
#endif
lsrs r6, r4, #1
lsrs r7, r5, #1
orr r7, r7, r4, lsl #31
orr r6, r6, r5, lsl #31
lsrs r8, r4, #8
lsrs r9, r5, #8
orr r9, r9, r4, lsl #24
orr r8, r8, r5, lsl #24
eor r7, r7, r9
eor r6, r6, r8
lsrs r8, r4, #7
lsrs r9, r5, #7
orr r8, r8, r5, lsl #25
eor r7, r7, r9
eor r6, r6, r8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #16]
ldr r5, [sp, #20]
#else
ldrd r4, r5, [sp, #16]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #16]
str r5, [sp, #20]
#else
strd r4, r5, [sp, #16]
#endif
# Round 3
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #8]
ldr r5, [r0, #12]
#else
ldrd r4, r5, [r0, #8]
#endif
lsrs r6, r4, #14
lsrs r7, r5, #14
orr r7, r7, r4, lsl #18
orr r6, r6, r5, lsl #18
lsrs r8, r4, #18
lsrs r9, r5, #18
orr r9, r9, r4, lsl #14
orr r8, r8, r5, lsl #14
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #23
lsls r9, r5, #23
orr r9, r9, r4, lsr #9
orr r8, r8, r5, lsr #9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #32]
ldr r5, [r0, #36]
#else
ldrd r4, r5, [r0, #32]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #32]
str r5, [r0, #36]
#else
strd r4, r5, [r0, #32]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #8]
ldr r5, [r0, #12]
#else
ldrd r4, r5, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #16]
ldr r7, [r0, #20]
#else
ldrd r6, r7, [r0, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #24]
ldr r9, [r0, #28]
#else
ldrd r8, r9, [r0, #24]
#endif
eor r6, r6, r8
eor r7, r7, r9
and r6, r6, r4
and r7, r7, r5
eor r6, r6, r8
eor r7, r7, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #32]
ldr r5, [r0, #36]
#else
ldrd r4, r5, [r0, #32]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #24]
ldr r9, [sp, #28]
#else
ldrd r8, r9, [sp, #24]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r3, #24]
ldr r7, [r3, #28]
#else
ldrd r6, r7, [r3, #24]
#endif
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0]
ldr r9, [r0, #4]
#else
ldrd r8, r9, [r0]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #32]
str r5, [r0, #36]
#else
strd r4, r5, [r0, #32]
#endif
adds r8, r8, r4
adc r9, r9, r5
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #40]
ldr r5, [r0, #44]
#else
ldrd r4, r5, [r0, #40]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r0]
str r9, [r0, #4]
#else
strd r8, r9, [r0]
#endif
lsrs r6, r4, #28
lsrs r7, r5, #28
orr r7, r7, r4, lsl #4
orr r6, r6, r5, lsl #4
lsls r8, r4, #30
lsls r9, r5, #30
orr r9, r9, r4, lsr #2
orr r8, r8, r5, lsr #2
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #25
lsls r9, r5, #25
orr r9, r9, r4, lsr #7
orr r8, r8, r5, lsr #7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #32]
ldr r5, [r0, #36]
#else
ldrd r4, r5, [r0, #32]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #40]
ldr r9, [r0, #44]
#else
ldrd r8, r9, [r0, #40]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #48]
ldr r7, [r0, #52]
#else
ldrd r6, r7, [r0, #48]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #32]
str r5, [r0, #36]
#else
strd r4, r5, [r0, #32]
#endif
eor r8, r8, r6
eor r9, r9, r7
and r10, r10, r8
and r11, r11, r9
eor r10, r10, r6
eor r11, r11, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #32]
ldr r7, [r0, #36]
#else
ldrd r6, r7, [r0, #32]
#endif
adds r6, r6, r10
adc r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0, #32]
str r7, [r0, #36]
#else
strd r6, r7, [r0, #32]
#endif
mov r10, r8
mov r11, r9
# Calc new W[3]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #8]
ldr r5, [sp, #12]
#else
ldrd r4, r5, [sp, #8]
#endif
lsrs r6, r4, #19
lsrs r7, r5, #19
orr r7, r7, r4, lsl #13
orr r6, r6, r5, lsl #13
lsls r8, r4, #3
lsls r9, r5, #3
orr r9, r9, r4, lsr #29
orr r8, r8, r5, lsr #29
eor r7, r7, r9
eor r6, r6, r8
lsrs r8, r4, #6
lsrs r9, r5, #6
orr r8, r8, r5, lsl #26
eor r7, r7, r9
eor r6, r6, r8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #24]
ldr r5, [sp, #28]
#else
ldrd r4, r5, [sp, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #96]
ldr r9, [sp, #100]
#else
ldrd r8, r9, [sp, #96]
#endif
adds r4, r4, r6
adc r5, r5, r7
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #24]
str r5, [sp, #28]
#else
strd r4, r5, [sp, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #32]
ldr r5, [sp, #36]
#else
ldrd r4, r5, [sp, #32]
#endif
lsrs r6, r4, #1
lsrs r7, r5, #1
orr r7, r7, r4, lsl #31
orr r6, r6, r5, lsl #31
lsrs r8, r4, #8
lsrs r9, r5, #8
orr r9, r9, r4, lsl #24
orr r8, r8, r5, lsl #24
eor r7, r7, r9
eor r6, r6, r8
lsrs r8, r4, #7
lsrs r9, r5, #7
orr r8, r8, r5, lsl #25
eor r7, r7, r9
eor r6, r6, r8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #24]
ldr r5, [sp, #28]
#else
ldrd r4, r5, [sp, #24]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #24]
str r5, [sp, #28]
#else
strd r4, r5, [sp, #24]
#endif
# Round 4
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0]
ldr r5, [r0, #4]
#else
ldrd r4, r5, [r0]
#endif
lsrs r6, r4, #14
lsrs r7, r5, #14
orr r7, r7, r4, lsl #18
orr r6, r6, r5, lsl #18
lsrs r8, r4, #18
lsrs r9, r5, #18
orr r9, r9, r4, lsl #14
orr r8, r8, r5, lsl #14
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #23
lsls r9, r5, #23
orr r9, r9, r4, lsr #9
orr r8, r8, r5, lsr #9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #24]
ldr r5, [r0, #28]
#else
ldrd r4, r5, [r0, #24]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #24]
str r5, [r0, #28]
#else
strd r4, r5, [r0, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0]
ldr r5, [r0, #4]
#else
ldrd r4, r5, [r0]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #8]
ldr r7, [r0, #12]
#else
ldrd r6, r7, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #16]
ldr r9, [r0, #20]
#else
ldrd r8, r9, [r0, #16]
#endif
eor r6, r6, r8
eor r7, r7, r9
and r6, r6, r4
and r7, r7, r5
eor r6, r6, r8
eor r7, r7, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #24]
ldr r5, [r0, #28]
#else
ldrd r4, r5, [r0, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #32]
ldr r9, [sp, #36]
#else
ldrd r8, r9, [sp, #32]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r3, #32]
ldr r7, [r3, #36]
#else
ldrd r6, r7, [r3, #32]
#endif
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #56]
ldr r9, [r0, #60]
#else
ldrd r8, r9, [r0, #56]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #24]
str r5, [r0, #28]
#else
strd r4, r5, [r0, #24]
#endif
adds r8, r8, r4
adc r9, r9, r5
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #32]
ldr r5, [r0, #36]
#else
ldrd r4, r5, [r0, #32]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r0, #56]
str r9, [r0, #60]
#else
strd r8, r9, [r0, #56]
#endif
lsrs r6, r4, #28
lsrs r7, r5, #28
orr r7, r7, r4, lsl #4
orr r6, r6, r5, lsl #4
lsls r8, r4, #30
lsls r9, r5, #30
orr r9, r9, r4, lsr #2
orr r8, r8, r5, lsr #2
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #25
lsls r9, r5, #25
orr r9, r9, r4, lsr #7
orr r8, r8, r5, lsr #7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #24]
ldr r5, [r0, #28]
#else
ldrd r4, r5, [r0, #24]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #32]
ldr r9, [r0, #36]
#else
ldrd r8, r9, [r0, #32]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #40]
ldr r7, [r0, #44]
#else
ldrd r6, r7, [r0, #40]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #24]
str r5, [r0, #28]
#else
strd r4, r5, [r0, #24]
#endif
eor r8, r8, r6
eor r9, r9, r7
and r10, r10, r8
and r11, r11, r9
eor r10, r10, r6
eor r11, r11, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #24]
ldr r7, [r0, #28]
#else
ldrd r6, r7, [r0, #24]
#endif
adds r6, r6, r10
adc r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0, #24]
str r7, [r0, #28]
#else
strd r6, r7, [r0, #24]
#endif
mov r10, r8
mov r11, r9
# Calc new W[4]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #16]
ldr r5, [sp, #20]
#else
ldrd r4, r5, [sp, #16]
#endif
lsrs r6, r4, #19
lsrs r7, r5, #19
orr r7, r7, r4, lsl #13
orr r6, r6, r5, lsl #13
lsls r8, r4, #3
lsls r9, r5, #3
orr r9, r9, r4, lsr #29
orr r8, r8, r5, lsr #29
eor r7, r7, r9
eor r6, r6, r8
lsrs r8, r4, #6
lsrs r9, r5, #6
orr r8, r8, r5, lsl #26
eor r7, r7, r9
eor r6, r6, r8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #32]
ldr r5, [sp, #36]
#else
ldrd r4, r5, [sp, #32]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #104]
ldr r9, [sp, #108]
#else
ldrd r8, r9, [sp, #104]
#endif
adds r4, r4, r6
adc r5, r5, r7
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #32]
str r5, [sp, #36]
#else
strd r4, r5, [sp, #32]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #40]
ldr r5, [sp, #44]
#else
ldrd r4, r5, [sp, #40]
#endif
lsrs r6, r4, #1
lsrs r7, r5, #1
orr r7, r7, r4, lsl #31
orr r6, r6, r5, lsl #31
lsrs r8, r4, #8
lsrs r9, r5, #8
orr r9, r9, r4, lsl #24
orr r8, r8, r5, lsl #24
eor r7, r7, r9
eor r6, r6, r8
lsrs r8, r4, #7
lsrs r9, r5, #7
orr r8, r8, r5, lsl #25
eor r7, r7, r9
eor r6, r6, r8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #32]
ldr r5, [sp, #36]
#else
ldrd r4, r5, [sp, #32]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #32]
str r5, [sp, #36]
#else
strd r4, r5, [sp, #32]
#endif
# Round 5
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #56]
ldr r5, [r0, #60]
#else
ldrd r4, r5, [r0, #56]
#endif
lsrs r6, r4, #14
lsrs r7, r5, #14
orr r7, r7, r4, lsl #18
orr r6, r6, r5, lsl #18
lsrs r8, r4, #18
lsrs r9, r5, #18
orr r9, r9, r4, lsl #14
orr r8, r8, r5, lsl #14
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #23
lsls r9, r5, #23
orr r9, r9, r4, lsr #9
orr r8, r8, r5, lsr #9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #16]
ldr r5, [r0, #20]
#else
ldrd r4, r5, [r0, #16]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #16]
str r5, [r0, #20]
#else
strd r4, r5, [r0, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #56]
ldr r5, [r0, #60]
#else
ldrd r4, r5, [r0, #56]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0]
ldr r7, [r0, #4]
#else
ldrd r6, r7, [r0]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #8]
ldr r9, [r0, #12]
#else
ldrd r8, r9, [r0, #8]
#endif
eor r6, r6, r8
eor r7, r7, r9
and r6, r6, r4
and r7, r7, r5
eor r6, r6, r8
eor r7, r7, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #16]
ldr r5, [r0, #20]
#else
ldrd r4, r5, [r0, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #40]
ldr r9, [sp, #44]
#else
ldrd r8, r9, [sp, #40]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r3, #40]
ldr r7, [r3, #44]
#else
ldrd r6, r7, [r3, #40]
#endif
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #48]
ldr r9, [r0, #52]
#else
ldrd r8, r9, [r0, #48]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #16]
str r5, [r0, #20]
#else
strd r4, r5, [r0, #16]
#endif
adds r8, r8, r4
adc r9, r9, r5
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #24]
ldr r5, [r0, #28]
#else
ldrd r4, r5, [r0, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r0, #48]
str r9, [r0, #52]
#else
strd r8, r9, [r0, #48]
#endif
lsrs r6, r4, #28
lsrs r7, r5, #28
orr r7, r7, r4, lsl #4
orr r6, r6, r5, lsl #4
lsls r8, r4, #30
lsls r9, r5, #30
orr r9, r9, r4, lsr #2
orr r8, r8, r5, lsr #2
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #25
lsls r9, r5, #25
orr r9, r9, r4, lsr #7
orr r8, r8, r5, lsr #7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #16]
ldr r5, [r0, #20]
#else
ldrd r4, r5, [r0, #16]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #24]
ldr r9, [r0, #28]
#else
ldrd r8, r9, [r0, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #32]
ldr r7, [r0, #36]
#else
ldrd r6, r7, [r0, #32]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #16]
str r5, [r0, #20]
#else
strd r4, r5, [r0, #16]
#endif
eor r8, r8, r6
eor r9, r9, r7
and r10, r10, r8
and r11, r11, r9
eor r10, r10, r6
eor r11, r11, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #16]
ldr r7, [r0, #20]
#else
ldrd r6, r7, [r0, #16]
#endif
adds r6, r6, r10
adc r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0, #16]
str r7, [r0, #20]
#else
strd r6, r7, [r0, #16]
#endif
mov r10, r8
mov r11, r9
# Calc new W[5]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #24]
ldr r5, [sp, #28]
#else
ldrd r4, r5, [sp, #24]
#endif
lsrs r6, r4, #19
lsrs r7, r5, #19
orr r7, r7, r4, lsl #13
orr r6, r6, r5, lsl #13
lsls r8, r4, #3
lsls r9, r5, #3
orr r9, r9, r4, lsr #29
orr r8, r8, r5, lsr #29
eor r7, r7, r9
eor r6, r6, r8
lsrs r8, r4, #6
lsrs r9, r5, #6
orr r8, r8, r5, lsl #26
eor r7, r7, r9
eor r6, r6, r8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #40]
ldr r5, [sp, #44]
#else
ldrd r4, r5, [sp, #40]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #112]
ldr r9, [sp, #116]
#else
ldrd r8, r9, [sp, #112]
#endif
adds r4, r4, r6
adc r5, r5, r7
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #40]
str r5, [sp, #44]
#else
strd r4, r5, [sp, #40]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #48]
ldr r5, [sp, #52]
#else
ldrd r4, r5, [sp, #48]
#endif
lsrs r6, r4, #1
lsrs r7, r5, #1
orr r7, r7, r4, lsl #31
orr r6, r6, r5, lsl #31
lsrs r8, r4, #8
lsrs r9, r5, #8
orr r9, r9, r4, lsl #24
orr r8, r8, r5, lsl #24
eor r7, r7, r9
eor r6, r6, r8
lsrs r8, r4, #7
lsrs r9, r5, #7
orr r8, r8, r5, lsl #25
eor r7, r7, r9
eor r6, r6, r8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #40]
ldr r5, [sp, #44]
#else
ldrd r4, r5, [sp, #40]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #40]
str r5, [sp, #44]
#else
strd r4, r5, [sp, #40]
#endif
# Round 6
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #48]
ldr r5, [r0, #52]
#else
ldrd r4, r5, [r0, #48]
#endif
lsrs r6, r4, #14
lsrs r7, r5, #14
orr r7, r7, r4, lsl #18
orr r6, r6, r5, lsl #18
lsrs r8, r4, #18
lsrs r9, r5, #18
orr r9, r9, r4, lsl #14
orr r8, r8, r5, lsl #14
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #23
lsls r9, r5, #23
orr r9, r9, r4, lsr #9
orr r8, r8, r5, lsr #9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #8]
ldr r5, [r0, #12]
#else
ldrd r4, r5, [r0, #8]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #8]
str r5, [r0, #12]
#else
strd r4, r5, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #48]
ldr r5, [r0, #52]
#else
ldrd r4, r5, [r0, #48]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #56]
ldr r7, [r0, #60]
#else
ldrd r6, r7, [r0, #56]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0]
ldr r9, [r0, #4]
#else
ldrd r8, r9, [r0]
#endif
eor r6, r6, r8
eor r7, r7, r9
and r6, r6, r4
and r7, r7, r5
eor r6, r6, r8
eor r7, r7, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #8]
ldr r5, [r0, #12]
#else
ldrd r4, r5, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #48]
ldr r9, [sp, #52]
#else
ldrd r8, r9, [sp, #48]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r3, #48]
ldr r7, [r3, #52]
#else
ldrd r6, r7, [r3, #48]
#endif
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #40]
ldr r9, [r0, #44]
#else
ldrd r8, r9, [r0, #40]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #8]
str r5, [r0, #12]
#else
strd r4, r5, [r0, #8]
#endif
adds r8, r8, r4
adc r9, r9, r5
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #16]
ldr r5, [r0, #20]
#else
ldrd r4, r5, [r0, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r0, #40]
str r9, [r0, #44]
#else
strd r8, r9, [r0, #40]
#endif
lsrs r6, r4, #28
lsrs r7, r5, #28
orr r7, r7, r4, lsl #4
orr r6, r6, r5, lsl #4
lsls r8, r4, #30
lsls r9, r5, #30
orr r9, r9, r4, lsr #2
orr r8, r8, r5, lsr #2
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #25
lsls r9, r5, #25
orr r9, r9, r4, lsr #7
orr r8, r8, r5, lsr #7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #8]
ldr r5, [r0, #12]
#else
ldrd r4, r5, [r0, #8]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #16]
ldr r9, [r0, #20]
#else
ldrd r8, r9, [r0, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #24]
ldr r7, [r0, #28]
#else
ldrd r6, r7, [r0, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #8]
str r5, [r0, #12]
#else
strd r4, r5, [r0, #8]
#endif
eor r8, r8, r6
eor r9, r9, r7
and r10, r10, r8
and r11, r11, r9
eor r10, r10, r6
eor r11, r11, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #8]
ldr r7, [r0, #12]
#else
ldrd r6, r7, [r0, #8]
#endif
adds r6, r6, r10
adc r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0, #8]
str r7, [r0, #12]
#else
strd r6, r7, [r0, #8]
#endif
mov r10, r8
mov r11, r9
# Calc new W[6]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #32]
ldr r5, [sp, #36]
#else
ldrd r4, r5, [sp, #32]
#endif
lsrs r6, r4, #19
lsrs r7, r5, #19
orr r7, r7, r4, lsl #13
orr r6, r6, r5, lsl #13
lsls r8, r4, #3
lsls r9, r5, #3
orr r9, r9, r4, lsr #29
orr r8, r8, r5, lsr #29
eor r7, r7, r9
eor r6, r6, r8
lsrs r8, r4, #6
lsrs r9, r5, #6
orr r8, r8, r5, lsl #26
eor r7, r7, r9
eor r6, r6, r8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #48]
ldr r5, [sp, #52]
#else
ldrd r4, r5, [sp, #48]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #120]
ldr r9, [sp, #124]
#else
ldrd r8, r9, [sp, #120]
#endif
adds r4, r4, r6
adc r5, r5, r7
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #48]
str r5, [sp, #52]
#else
strd r4, r5, [sp, #48]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #56]
ldr r5, [sp, #60]
#else
ldrd r4, r5, [sp, #56]
#endif
lsrs r6, r4, #1
lsrs r7, r5, #1
orr r7, r7, r4, lsl #31
orr r6, r6, r5, lsl #31
lsrs r8, r4, #8
lsrs r9, r5, #8
orr r9, r9, r4, lsl #24
orr r8, r8, r5, lsl #24
eor r7, r7, r9
eor r6, r6, r8
lsrs r8, r4, #7
lsrs r9, r5, #7
orr r8, r8, r5, lsl #25
eor r7, r7, r9
eor r6, r6, r8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #48]
ldr r5, [sp, #52]
#else
ldrd r4, r5, [sp, #48]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #48]
str r5, [sp, #52]
#else
strd r4, r5, [sp, #48]
#endif
# Round 7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #40]
ldr r5, [r0, #44]
#else
ldrd r4, r5, [r0, #40]
#endif
lsrs r6, r4, #14
lsrs r7, r5, #14
orr r7, r7, r4, lsl #18
orr r6, r6, r5, lsl #18
lsrs r8, r4, #18
lsrs r9, r5, #18
orr r9, r9, r4, lsl #14
orr r8, r8, r5, lsl #14
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #23
lsls r9, r5, #23
orr r9, r9, r4, lsr #9
orr r8, r8, r5, lsr #9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0]
ldr r5, [r0, #4]
#else
ldrd r4, r5, [r0]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0]
str r5, [r0, #4]
#else
strd r4, r5, [r0]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #40]
ldr r5, [r0, #44]
#else
ldrd r4, r5, [r0, #40]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #48]
ldr r7, [r0, #52]
#else
ldrd r6, r7, [r0, #48]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #56]
ldr r9, [r0, #60]
#else
ldrd r8, r9, [r0, #56]
#endif
eor r6, r6, r8
eor r7, r7, r9
and r6, r6, r4
and r7, r7, r5
eor r6, r6, r8
eor r7, r7, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0]
ldr r5, [r0, #4]
#else
ldrd r4, r5, [r0]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #56]
ldr r9, [sp, #60]
#else
ldrd r8, r9, [sp, #56]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r3, #56]
ldr r7, [r3, #60]
#else
ldrd r6, r7, [r3, #56]
#endif
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #32]
ldr r9, [r0, #36]
#else
ldrd r8, r9, [r0, #32]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0]
str r5, [r0, #4]
#else
strd r4, r5, [r0]
#endif
adds r8, r8, r4
adc r9, r9, r5
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #8]
ldr r5, [r0, #12]
#else
ldrd r4, r5, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r0, #32]
str r9, [r0, #36]
#else
strd r8, r9, [r0, #32]
#endif
lsrs r6, r4, #28
lsrs r7, r5, #28
orr r7, r7, r4, lsl #4
orr r6, r6, r5, lsl #4
lsls r8, r4, #30
lsls r9, r5, #30
orr r9, r9, r4, lsr #2
orr r8, r8, r5, lsr #2
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #25
lsls r9, r5, #25
orr r9, r9, r4, lsr #7
orr r8, r8, r5, lsr #7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0]
ldr r5, [r0, #4]
#else
ldrd r4, r5, [r0]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #8]
ldr r9, [r0, #12]
#else
ldrd r8, r9, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #16]
ldr r7, [r0, #20]
#else
ldrd r6, r7, [r0, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0]
str r5, [r0, #4]
#else
strd r4, r5, [r0]
#endif
eor r8, r8, r6
eor r9, r9, r7
and r10, r10, r8
and r11, r11, r9
eor r10, r10, r6
eor r11, r11, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0]
ldr r7, [r0, #4]
#else
ldrd r6, r7, [r0]
#endif
adds r6, r6, r10
adc r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0]
str r7, [r0, #4]
#else
strd r6, r7, [r0]
#endif
mov r10, r8
mov r11, r9
# Calc new W[7]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #40]
ldr r5, [sp, #44]
#else
ldrd r4, r5, [sp, #40]
#endif
lsrs r6, r4, #19
lsrs r7, r5, #19
orr r7, r7, r4, lsl #13
orr r6, r6, r5, lsl #13
lsls r8, r4, #3
lsls r9, r5, #3
orr r9, r9, r4, lsr #29
orr r8, r8, r5, lsr #29
eor r7, r7, r9
eor r6, r6, r8
lsrs r8, r4, #6
lsrs r9, r5, #6
orr r8, r8, r5, lsl #26
eor r7, r7, r9
eor r6, r6, r8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #56]
ldr r5, [sp, #60]
#else
ldrd r4, r5, [sp, #56]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp]
ldr r9, [sp, #4]
#else
ldrd r8, r9, [sp]
#endif
adds r4, r4, r6
adc r5, r5, r7
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #56]
str r5, [sp, #60]
#else
strd r4, r5, [sp, #56]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #64]
ldr r5, [sp, #68]
#else
ldrd r4, r5, [sp, #64]
#endif
lsrs r6, r4, #1
lsrs r7, r5, #1
orr r7, r7, r4, lsl #31
orr r6, r6, r5, lsl #31
lsrs r8, r4, #8
lsrs r9, r5, #8
orr r9, r9, r4, lsl #24
orr r8, r8, r5, lsl #24
eor r7, r7, r9
eor r6, r6, r8
lsrs r8, r4, #7
lsrs r9, r5, #7
orr r8, r8, r5, lsl #25
eor r7, r7, r9
eor r6, r6, r8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #56]
ldr r5, [sp, #60]
#else
ldrd r4, r5, [sp, #56]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #56]
str r5, [sp, #60]
#else
strd r4, r5, [sp, #56]
#endif
# Round 8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #32]
ldr r5, [r0, #36]
#else
ldrd r4, r5, [r0, #32]
#endif
lsrs r6, r4, #14
lsrs r7, r5, #14
orr r7, r7, r4, lsl #18
orr r6, r6, r5, lsl #18
lsrs r8, r4, #18
lsrs r9, r5, #18
orr r9, r9, r4, lsl #14
orr r8, r8, r5, lsl #14
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #23
lsls r9, r5, #23
orr r9, r9, r4, lsr #9
orr r8, r8, r5, lsr #9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #56]
ldr r5, [r0, #60]
#else
ldrd r4, r5, [r0, #56]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #56]
str r5, [r0, #60]
#else
strd r4, r5, [r0, #56]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #32]
ldr r5, [r0, #36]
#else
ldrd r4, r5, [r0, #32]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #40]
ldr r7, [r0, #44]
#else
ldrd r6, r7, [r0, #40]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #48]
ldr r9, [r0, #52]
#else
ldrd r8, r9, [r0, #48]
#endif
eor r6, r6, r8
eor r7, r7, r9
and r6, r6, r4
and r7, r7, r5
eor r6, r6, r8
eor r7, r7, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #56]
ldr r5, [r0, #60]
#else
ldrd r4, r5, [r0, #56]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #64]
ldr r9, [sp, #68]
#else
ldrd r8, r9, [sp, #64]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r3, #64]
ldr r7, [r3, #68]
#else
ldrd r6, r7, [r3, #64]
#endif
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #24]
ldr r9, [r0, #28]
#else
ldrd r8, r9, [r0, #24]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #56]
str r5, [r0, #60]
#else
strd r4, r5, [r0, #56]
#endif
adds r8, r8, r4
adc r9, r9, r5
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0]
ldr r5, [r0, #4]
#else
ldrd r4, r5, [r0]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r0, #24]
str r9, [r0, #28]
#else
strd r8, r9, [r0, #24]
#endif
lsrs r6, r4, #28
lsrs r7, r5, #28
orr r7, r7, r4, lsl #4
orr r6, r6, r5, lsl #4
lsls r8, r4, #30
lsls r9, r5, #30
orr r9, r9, r4, lsr #2
orr r8, r8, r5, lsr #2
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #25
lsls r9, r5, #25
orr r9, r9, r4, lsr #7
orr r8, r8, r5, lsr #7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #56]
ldr r5, [r0, #60]
#else
ldrd r4, r5, [r0, #56]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0]
ldr r9, [r0, #4]
#else
ldrd r8, r9, [r0]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #8]
ldr r7, [r0, #12]
#else
ldrd r6, r7, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #56]
str r5, [r0, #60]
#else
strd r4, r5, [r0, #56]
#endif
eor r8, r8, r6
eor r9, r9, r7
and r10, r10, r8
and r11, r11, r9
eor r10, r10, r6
eor r11, r11, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #56]
ldr r7, [r0, #60]
#else
ldrd r6, r7, [r0, #56]
#endif
adds r6, r6, r10
adc r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0, #56]
str r7, [r0, #60]
#else
strd r6, r7, [r0, #56]
#endif
mov r10, r8
mov r11, r9
# Calc new W[8]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #48]
ldr r5, [sp, #52]
#else
ldrd r4, r5, [sp, #48]
#endif
lsrs r6, r4, #19
lsrs r7, r5, #19
orr r7, r7, r4, lsl #13
orr r6, r6, r5, lsl #13
lsls r8, r4, #3
lsls r9, r5, #3
orr r9, r9, r4, lsr #29
orr r8, r8, r5, lsr #29
eor r7, r7, r9
eor r6, r6, r8
lsrs r8, r4, #6
lsrs r9, r5, #6
orr r8, r8, r5, lsl #26
eor r7, r7, r9
eor r6, r6, r8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #64]
ldr r5, [sp, #68]
#else
ldrd r4, r5, [sp, #64]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #8]
ldr r9, [sp, #12]
#else
ldrd r8, r9, [sp, #8]
#endif
adds r4, r4, r6
adc r5, r5, r7
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #64]
str r5, [sp, #68]
#else
strd r4, r5, [sp, #64]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #72]
ldr r5, [sp, #76]
#else
ldrd r4, r5, [sp, #72]
#endif
lsrs r6, r4, #1
lsrs r7, r5, #1
orr r7, r7, r4, lsl #31
orr r6, r6, r5, lsl #31
lsrs r8, r4, #8
lsrs r9, r5, #8
orr r9, r9, r4, lsl #24
orr r8, r8, r5, lsl #24
eor r7, r7, r9
eor r6, r6, r8
lsrs r8, r4, #7
lsrs r9, r5, #7
orr r8, r8, r5, lsl #25
eor r7, r7, r9
eor r6, r6, r8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #64]
ldr r5, [sp, #68]
#else
ldrd r4, r5, [sp, #64]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #64]
str r5, [sp, #68]
#else
strd r4, r5, [sp, #64]
#endif
# Round 9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #24]
ldr r5, [r0, #28]
#else
ldrd r4, r5, [r0, #24]
#endif
lsrs r6, r4, #14
lsrs r7, r5, #14
orr r7, r7, r4, lsl #18
orr r6, r6, r5, lsl #18
lsrs r8, r4, #18
lsrs r9, r5, #18
orr r9, r9, r4, lsl #14
orr r8, r8, r5, lsl #14
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #23
lsls r9, r5, #23
orr r9, r9, r4, lsr #9
orr r8, r8, r5, lsr #9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #48]
ldr r5, [r0, #52]
#else
ldrd r4, r5, [r0, #48]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #48]
str r5, [r0, #52]
#else
strd r4, r5, [r0, #48]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #24]
ldr r5, [r0, #28]
#else
ldrd r4, r5, [r0, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #32]
ldr r7, [r0, #36]
#else
ldrd r6, r7, [r0, #32]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #40]
ldr r9, [r0, #44]
#else
ldrd r8, r9, [r0, #40]
#endif
eor r6, r6, r8
eor r7, r7, r9
and r6, r6, r4
and r7, r7, r5
eor r6, r6, r8
eor r7, r7, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #48]
ldr r5, [r0, #52]
#else
ldrd r4, r5, [r0, #48]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #72]
ldr r9, [sp, #76]
#else
ldrd r8, r9, [sp, #72]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r3, #72]
ldr r7, [r3, #76]
#else
ldrd r6, r7, [r3, #72]
#endif
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #16]
ldr r9, [r0, #20]
#else
ldrd r8, r9, [r0, #16]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #48]
str r5, [r0, #52]
#else
strd r4, r5, [r0, #48]
#endif
adds r8, r8, r4
adc r9, r9, r5
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #56]
ldr r5, [r0, #60]
#else
ldrd r4, r5, [r0, #56]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r0, #16]
str r9, [r0, #20]
#else
strd r8, r9, [r0, #16]
#endif
lsrs r6, r4, #28
lsrs r7, r5, #28
orr r7, r7, r4, lsl #4
orr r6, r6, r5, lsl #4
lsls r8, r4, #30
lsls r9, r5, #30
orr r9, r9, r4, lsr #2
orr r8, r8, r5, lsr #2
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #25
lsls r9, r5, #25
orr r9, r9, r4, lsr #7
orr r8, r8, r5, lsr #7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #48]
ldr r5, [r0, #52]
#else
ldrd r4, r5, [r0, #48]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #56]
ldr r9, [r0, #60]
#else
ldrd r8, r9, [r0, #56]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0]
ldr r7, [r0, #4]
#else
ldrd r6, r7, [r0]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #48]
str r5, [r0, #52]
#else
strd r4, r5, [r0, #48]
#endif
eor r8, r8, r6
eor r9, r9, r7
and r10, r10, r8
and r11, r11, r9
eor r10, r10, r6
eor r11, r11, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #48]
ldr r7, [r0, #52]
#else
ldrd r6, r7, [r0, #48]
#endif
adds r6, r6, r10
adc r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0, #48]
str r7, [r0, #52]
#else
strd r6, r7, [r0, #48]
#endif
mov r10, r8
mov r11, r9
# Calc new W[9]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #56]
ldr r5, [sp, #60]
#else
ldrd r4, r5, [sp, #56]
#endif
lsrs r6, r4, #19
lsrs r7, r5, #19
orr r7, r7, r4, lsl #13
orr r6, r6, r5, lsl #13
lsls r8, r4, #3
lsls r9, r5, #3
orr r9, r9, r4, lsr #29
orr r8, r8, r5, lsr #29
eor r7, r7, r9
eor r6, r6, r8
lsrs r8, r4, #6
lsrs r9, r5, #6
orr r8, r8, r5, lsl #26
eor r7, r7, r9
eor r6, r6, r8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #72]
ldr r5, [sp, #76]
#else
ldrd r4, r5, [sp, #72]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #16]
ldr r9, [sp, #20]
#else
ldrd r8, r9, [sp, #16]
#endif
adds r4, r4, r6
adc r5, r5, r7
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #72]
str r5, [sp, #76]
#else
strd r4, r5, [sp, #72]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #80]
ldr r5, [sp, #84]
#else
ldrd r4, r5, [sp, #80]
#endif
lsrs r6, r4, #1
lsrs r7, r5, #1
orr r7, r7, r4, lsl #31
orr r6, r6, r5, lsl #31
lsrs r8, r4, #8
lsrs r9, r5, #8
orr r9, r9, r4, lsl #24
orr r8, r8, r5, lsl #24
eor r7, r7, r9
eor r6, r6, r8
lsrs r8, r4, #7
lsrs r9, r5, #7
orr r8, r8, r5, lsl #25
eor r7, r7, r9
eor r6, r6, r8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #72]
ldr r5, [sp, #76]
#else
ldrd r4, r5, [sp, #72]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #72]
str r5, [sp, #76]
#else
strd r4, r5, [sp, #72]
#endif
# Round 10
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #16]
ldr r5, [r0, #20]
#else
ldrd r4, r5, [r0, #16]
#endif
lsrs r6, r4, #14
lsrs r7, r5, #14
orr r7, r7, r4, lsl #18
orr r6, r6, r5, lsl #18
lsrs r8, r4, #18
lsrs r9, r5, #18
orr r9, r9, r4, lsl #14
orr r8, r8, r5, lsl #14
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #23
lsls r9, r5, #23
orr r9, r9, r4, lsr #9
orr r8, r8, r5, lsr #9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #40]
ldr r5, [r0, #44]
#else
ldrd r4, r5, [r0, #40]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #40]
str r5, [r0, #44]
#else
strd r4, r5, [r0, #40]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #16]
ldr r5, [r0, #20]
#else
ldrd r4, r5, [r0, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #24]
ldr r7, [r0, #28]
#else
ldrd r6, r7, [r0, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #32]
ldr r9, [r0, #36]
#else
ldrd r8, r9, [r0, #32]
#endif
eor r6, r6, r8
eor r7, r7, r9
and r6, r6, r4
and r7, r7, r5
eor r6, r6, r8
eor r7, r7, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #40]
ldr r5, [r0, #44]
#else
ldrd r4, r5, [r0, #40]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #80]
ldr r9, [sp, #84]
#else
ldrd r8, r9, [sp, #80]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r3, #80]
ldr r7, [r3, #84]
#else
ldrd r6, r7, [r3, #80]
#endif
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #8]
ldr r9, [r0, #12]
#else
ldrd r8, r9, [r0, #8]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #40]
str r5, [r0, #44]
#else
strd r4, r5, [r0, #40]
#endif
adds r8, r8, r4
adc r9, r9, r5
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #48]
ldr r5, [r0, #52]
#else
ldrd r4, r5, [r0, #48]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r0, #8]
str r9, [r0, #12]
#else
strd r8, r9, [r0, #8]
#endif
lsrs r6, r4, #28
lsrs r7, r5, #28
orr r7, r7, r4, lsl #4
orr r6, r6, r5, lsl #4
lsls r8, r4, #30
lsls r9, r5, #30
orr r9, r9, r4, lsr #2
orr r8, r8, r5, lsr #2
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #25
lsls r9, r5, #25
orr r9, r9, r4, lsr #7
orr r8, r8, r5, lsr #7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #40]
ldr r5, [r0, #44]
#else
ldrd r4, r5, [r0, #40]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #48]
ldr r9, [r0, #52]
#else
ldrd r8, r9, [r0, #48]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #56]
ldr r7, [r0, #60]
#else
ldrd r6, r7, [r0, #56]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #40]
str r5, [r0, #44]
#else
strd r4, r5, [r0, #40]
#endif
eor r8, r8, r6
eor r9, r9, r7
and r10, r10, r8
and r11, r11, r9
eor r10, r10, r6
eor r11, r11, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #40]
ldr r7, [r0, #44]
#else
ldrd r6, r7, [r0, #40]
#endif
adds r6, r6, r10
adc r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0, #40]
str r7, [r0, #44]
#else
strd r6, r7, [r0, #40]
#endif
mov r10, r8
mov r11, r9
# Calc new W[10]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #64]
ldr r5, [sp, #68]
#else
ldrd r4, r5, [sp, #64]
#endif
lsrs r6, r4, #19
lsrs r7, r5, #19
orr r7, r7, r4, lsl #13
orr r6, r6, r5, lsl #13
lsls r8, r4, #3
lsls r9, r5, #3
orr r9, r9, r4, lsr #29
orr r8, r8, r5, lsr #29
eor r7, r7, r9
eor r6, r6, r8
lsrs r8, r4, #6
lsrs r9, r5, #6
orr r8, r8, r5, lsl #26
eor r7, r7, r9
eor r6, r6, r8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #80]
ldr r5, [sp, #84]
#else
ldrd r4, r5, [sp, #80]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #24]
ldr r9, [sp, #28]
#else
ldrd r8, r9, [sp, #24]
#endif
adds r4, r4, r6
adc r5, r5, r7
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #80]
str r5, [sp, #84]
#else
strd r4, r5, [sp, #80]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #88]
ldr r5, [sp, #92]
#else
ldrd r4, r5, [sp, #88]
#endif
lsrs r6, r4, #1
lsrs r7, r5, #1
orr r7, r7, r4, lsl #31
orr r6, r6, r5, lsl #31
lsrs r8, r4, #8
lsrs r9, r5, #8
orr r9, r9, r4, lsl #24
orr r8, r8, r5, lsl #24
eor r7, r7, r9
eor r6, r6, r8
lsrs r8, r4, #7
lsrs r9, r5, #7
orr r8, r8, r5, lsl #25
eor r7, r7, r9
eor r6, r6, r8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #80]
ldr r5, [sp, #84]
#else
ldrd r4, r5, [sp, #80]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #80]
str r5, [sp, #84]
#else
strd r4, r5, [sp, #80]
#endif
# Round 11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #8]
ldr r5, [r0, #12]
#else
ldrd r4, r5, [r0, #8]
#endif
lsrs r6, r4, #14
lsrs r7, r5, #14
orr r7, r7, r4, lsl #18
orr r6, r6, r5, lsl #18
lsrs r8, r4, #18
lsrs r9, r5, #18
orr r9, r9, r4, lsl #14
orr r8, r8, r5, lsl #14
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #23
lsls r9, r5, #23
orr r9, r9, r4, lsr #9
orr r8, r8, r5, lsr #9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #32]
ldr r5, [r0, #36]
#else
ldrd r4, r5, [r0, #32]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #32]
str r5, [r0, #36]
#else
strd r4, r5, [r0, #32]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #8]
ldr r5, [r0, #12]
#else
ldrd r4, r5, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #16]
ldr r7, [r0, #20]
#else
ldrd r6, r7, [r0, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #24]
ldr r9, [r0, #28]
#else
ldrd r8, r9, [r0, #24]
#endif
eor r6, r6, r8
eor r7, r7, r9
and r6, r6, r4
and r7, r7, r5
eor r6, r6, r8
eor r7, r7, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #32]
ldr r5, [r0, #36]
#else
ldrd r4, r5, [r0, #32]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #88]
ldr r9, [sp, #92]
#else
ldrd r8, r9, [sp, #88]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r3, #88]
ldr r7, [r3, #92]
#else
ldrd r6, r7, [r3, #88]
#endif
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0]
ldr r9, [r0, #4]
#else
ldrd r8, r9, [r0]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #32]
str r5, [r0, #36]
#else
strd r4, r5, [r0, #32]
#endif
adds r8, r8, r4
adc r9, r9, r5
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #40]
ldr r5, [r0, #44]
#else
ldrd r4, r5, [r0, #40]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r0]
str r9, [r0, #4]
#else
strd r8, r9, [r0]
#endif
lsrs r6, r4, #28
lsrs r7, r5, #28
orr r7, r7, r4, lsl #4
orr r6, r6, r5, lsl #4
lsls r8, r4, #30
lsls r9, r5, #30
orr r9, r9, r4, lsr #2
orr r8, r8, r5, lsr #2
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #25
lsls r9, r5, #25
orr r9, r9, r4, lsr #7
orr r8, r8, r5, lsr #7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #32]
ldr r5, [r0, #36]
#else
ldrd r4, r5, [r0, #32]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #40]
ldr r9, [r0, #44]
#else
ldrd r8, r9, [r0, #40]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #48]
ldr r7, [r0, #52]
#else
ldrd r6, r7, [r0, #48]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #32]
str r5, [r0, #36]
#else
strd r4, r5, [r0, #32]
#endif
eor r8, r8, r6
eor r9, r9, r7
and r10, r10, r8
and r11, r11, r9
eor r10, r10, r6
eor r11, r11, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #32]
ldr r7, [r0, #36]
#else
ldrd r6, r7, [r0, #32]
#endif
adds r6, r6, r10
adc r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0, #32]
str r7, [r0, #36]
#else
strd r6, r7, [r0, #32]
#endif
mov r10, r8
mov r11, r9
# Calc new W[11]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #72]
ldr r5, [sp, #76]
#else
ldrd r4, r5, [sp, #72]
#endif
lsrs r6, r4, #19
lsrs r7, r5, #19
orr r7, r7, r4, lsl #13
orr r6, r6, r5, lsl #13
lsls r8, r4, #3
lsls r9, r5, #3
orr r9, r9, r4, lsr #29
orr r8, r8, r5, lsr #29
eor r7, r7, r9
eor r6, r6, r8
lsrs r8, r4, #6
lsrs r9, r5, #6
orr r8, r8, r5, lsl #26
eor r7, r7, r9
eor r6, r6, r8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #88]
ldr r5, [sp, #92]
#else
ldrd r4, r5, [sp, #88]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #32]
ldr r9, [sp, #36]
#else
ldrd r8, r9, [sp, #32]
#endif
adds r4, r4, r6
adc r5, r5, r7
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #88]
str r5, [sp, #92]
#else
strd r4, r5, [sp, #88]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #96]
ldr r5, [sp, #100]
#else
ldrd r4, r5, [sp, #96]
#endif
lsrs r6, r4, #1
lsrs r7, r5, #1
orr r7, r7, r4, lsl #31
orr r6, r6, r5, lsl #31
lsrs r8, r4, #8
lsrs r9, r5, #8
orr r9, r9, r4, lsl #24
orr r8, r8, r5, lsl #24
eor r7, r7, r9
eor r6, r6, r8
lsrs r8, r4, #7
lsrs r9, r5, #7
orr r8, r8, r5, lsl #25
eor r7, r7, r9
eor r6, r6, r8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #88]
ldr r5, [sp, #92]
#else
ldrd r4, r5, [sp, #88]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #88]
str r5, [sp, #92]
#else
strd r4, r5, [sp, #88]
#endif
# Round 12
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0]
ldr r5, [r0, #4]
#else
ldrd r4, r5, [r0]
#endif
lsrs r6, r4, #14
lsrs r7, r5, #14
orr r7, r7, r4, lsl #18
orr r6, r6, r5, lsl #18
lsrs r8, r4, #18
lsrs r9, r5, #18
orr r9, r9, r4, lsl #14
orr r8, r8, r5, lsl #14
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #23
lsls r9, r5, #23
orr r9, r9, r4, lsr #9
orr r8, r8, r5, lsr #9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #24]
ldr r5, [r0, #28]
#else
ldrd r4, r5, [r0, #24]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #24]
str r5, [r0, #28]
#else
strd r4, r5, [r0, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0]
ldr r5, [r0, #4]
#else
ldrd r4, r5, [r0]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #8]
ldr r7, [r0, #12]
#else
ldrd r6, r7, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #16]
ldr r9, [r0, #20]
#else
ldrd r8, r9, [r0, #16]
#endif
eor r6, r6, r8
eor r7, r7, r9
and r6, r6, r4
and r7, r7, r5
eor r6, r6, r8
eor r7, r7, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #24]
ldr r5, [r0, #28]
#else
ldrd r4, r5, [r0, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #96]
ldr r9, [sp, #100]
#else
ldrd r8, r9, [sp, #96]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r3, #96]
ldr r7, [r3, #100]
#else
ldrd r6, r7, [r3, #96]
#endif
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #56]
ldr r9, [r0, #60]
#else
ldrd r8, r9, [r0, #56]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #24]
str r5, [r0, #28]
#else
strd r4, r5, [r0, #24]
#endif
adds r8, r8, r4
adc r9, r9, r5
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #32]
ldr r5, [r0, #36]
#else
ldrd r4, r5, [r0, #32]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r0, #56]
str r9, [r0, #60]
#else
strd r8, r9, [r0, #56]
#endif
lsrs r6, r4, #28
lsrs r7, r5, #28
orr r7, r7, r4, lsl #4
orr r6, r6, r5, lsl #4
lsls r8, r4, #30
lsls r9, r5, #30
orr r9, r9, r4, lsr #2
orr r8, r8, r5, lsr #2
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #25
lsls r9, r5, #25
orr r9, r9, r4, lsr #7
orr r8, r8, r5, lsr #7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #24]
ldr r5, [r0, #28]
#else
ldrd r4, r5, [r0, #24]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #32]
ldr r9, [r0, #36]
#else
ldrd r8, r9, [r0, #32]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #40]
ldr r7, [r0, #44]
#else
ldrd r6, r7, [r0, #40]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #24]
str r5, [r0, #28]
#else
strd r4, r5, [r0, #24]
#endif
eor r8, r8, r6
eor r9, r9, r7
and r10, r10, r8
and r11, r11, r9
eor r10, r10, r6
eor r11, r11, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #24]
ldr r7, [r0, #28]
#else
ldrd r6, r7, [r0, #24]
#endif
adds r6, r6, r10
adc r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0, #24]
str r7, [r0, #28]
#else
strd r6, r7, [r0, #24]
#endif
mov r10, r8
mov r11, r9
# Calc new W[12]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #80]
ldr r5, [sp, #84]
#else
ldrd r4, r5, [sp, #80]
#endif
lsrs r6, r4, #19
lsrs r7, r5, #19
orr r7, r7, r4, lsl #13
orr r6, r6, r5, lsl #13
lsls r8, r4, #3
lsls r9, r5, #3
orr r9, r9, r4, lsr #29
orr r8, r8, r5, lsr #29
eor r7, r7, r9
eor r6, r6, r8
lsrs r8, r4, #6
lsrs r9, r5, #6
orr r8, r8, r5, lsl #26
eor r7, r7, r9
eor r6, r6, r8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #96]
ldr r5, [sp, #100]
#else
ldrd r4, r5, [sp, #96]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #40]
ldr r9, [sp, #44]
#else
ldrd r8, r9, [sp, #40]
#endif
adds r4, r4, r6
adc r5, r5, r7
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #96]
str r5, [sp, #100]
#else
strd r4, r5, [sp, #96]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #104]
ldr r5, [sp, #108]
#else
ldrd r4, r5, [sp, #104]
#endif
lsrs r6, r4, #1
lsrs r7, r5, #1
orr r7, r7, r4, lsl #31
orr r6, r6, r5, lsl #31
lsrs r8, r4, #8
lsrs r9, r5, #8
orr r9, r9, r4, lsl #24
orr r8, r8, r5, lsl #24
eor r7, r7, r9
eor r6, r6, r8
lsrs r8, r4, #7
lsrs r9, r5, #7
orr r8, r8, r5, lsl #25
eor r7, r7, r9
eor r6, r6, r8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #96]
ldr r5, [sp, #100]
#else
ldrd r4, r5, [sp, #96]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #96]
str r5, [sp, #100]
#else
strd r4, r5, [sp, #96]
#endif
# Round 13
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #56]
ldr r5, [r0, #60]
#else
ldrd r4, r5, [r0, #56]
#endif
lsrs r6, r4, #14
lsrs r7, r5, #14
orr r7, r7, r4, lsl #18
orr r6, r6, r5, lsl #18
lsrs r8, r4, #18
lsrs r9, r5, #18
orr r9, r9, r4, lsl #14
orr r8, r8, r5, lsl #14
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #23
lsls r9, r5, #23
orr r9, r9, r4, lsr #9
orr r8, r8, r5, lsr #9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #16]
ldr r5, [r0, #20]
#else
ldrd r4, r5, [r0, #16]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #16]
str r5, [r0, #20]
#else
strd r4, r5, [r0, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #56]
ldr r5, [r0, #60]
#else
ldrd r4, r5, [r0, #56]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0]
ldr r7, [r0, #4]
#else
ldrd r6, r7, [r0]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #8]
ldr r9, [r0, #12]
#else
ldrd r8, r9, [r0, #8]
#endif
eor r6, r6, r8
eor r7, r7, r9
and r6, r6, r4
and r7, r7, r5
eor r6, r6, r8
eor r7, r7, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #16]
ldr r5, [r0, #20]
#else
ldrd r4, r5, [r0, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #104]
ldr r9, [sp, #108]
#else
ldrd r8, r9, [sp, #104]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r3, #104]
ldr r7, [r3, #108]
#else
ldrd r6, r7, [r3, #104]
#endif
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #48]
ldr r9, [r0, #52]
#else
ldrd r8, r9, [r0, #48]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #16]
str r5, [r0, #20]
#else
strd r4, r5, [r0, #16]
#endif
adds r8, r8, r4
adc r9, r9, r5
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #24]
ldr r5, [r0, #28]
#else
ldrd r4, r5, [r0, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r0, #48]
str r9, [r0, #52]
#else
strd r8, r9, [r0, #48]
#endif
lsrs r6, r4, #28
lsrs r7, r5, #28
orr r7, r7, r4, lsl #4
orr r6, r6, r5, lsl #4
lsls r8, r4, #30
lsls r9, r5, #30
orr r9, r9, r4, lsr #2
orr r8, r8, r5, lsr #2
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #25
lsls r9, r5, #25
orr r9, r9, r4, lsr #7
orr r8, r8, r5, lsr #7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #16]
ldr r5, [r0, #20]
#else
ldrd r4, r5, [r0, #16]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #24]
ldr r9, [r0, #28]
#else
ldrd r8, r9, [r0, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #32]
ldr r7, [r0, #36]
#else
ldrd r6, r7, [r0, #32]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #16]
str r5, [r0, #20]
#else
strd r4, r5, [r0, #16]
#endif
eor r8, r8, r6
eor r9, r9, r7
and r10, r10, r8
and r11, r11, r9
eor r10, r10, r6
eor r11, r11, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #16]
ldr r7, [r0, #20]
#else
ldrd r6, r7, [r0, #16]
#endif
adds r6, r6, r10
adc r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0, #16]
str r7, [r0, #20]
#else
strd r6, r7, [r0, #16]
#endif
mov r10, r8
mov r11, r9
# Calc new W[13]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #88]
ldr r5, [sp, #92]
#else
ldrd r4, r5, [sp, #88]
#endif
lsrs r6, r4, #19
lsrs r7, r5, #19
orr r7, r7, r4, lsl #13
orr r6, r6, r5, lsl #13
lsls r8, r4, #3
lsls r9, r5, #3
orr r9, r9, r4, lsr #29
orr r8, r8, r5, lsr #29
eor r7, r7, r9
eor r6, r6, r8
lsrs r8, r4, #6
lsrs r9, r5, #6
orr r8, r8, r5, lsl #26
eor r7, r7, r9
eor r6, r6, r8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #104]
ldr r5, [sp, #108]
#else
ldrd r4, r5, [sp, #104]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #48]
ldr r9, [sp, #52]
#else
ldrd r8, r9, [sp, #48]
#endif
adds r4, r4, r6
adc r5, r5, r7
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #104]
str r5, [sp, #108]
#else
strd r4, r5, [sp, #104]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #112]
ldr r5, [sp, #116]
#else
ldrd r4, r5, [sp, #112]
#endif
lsrs r6, r4, #1
lsrs r7, r5, #1
orr r7, r7, r4, lsl #31
orr r6, r6, r5, lsl #31
lsrs r8, r4, #8
lsrs r9, r5, #8
orr r9, r9, r4, lsl #24
orr r8, r8, r5, lsl #24
eor r7, r7, r9
eor r6, r6, r8
lsrs r8, r4, #7
lsrs r9, r5, #7
orr r8, r8, r5, lsl #25
eor r7, r7, r9
eor r6, r6, r8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #104]
ldr r5, [sp, #108]
#else
ldrd r4, r5, [sp, #104]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #104]
str r5, [sp, #108]
#else
strd r4, r5, [sp, #104]
#endif
# Round 14
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #48]
ldr r5, [r0, #52]
#else
ldrd r4, r5, [r0, #48]
#endif
lsrs r6, r4, #14
lsrs r7, r5, #14
orr r7, r7, r4, lsl #18
orr r6, r6, r5, lsl #18
lsrs r8, r4, #18
lsrs r9, r5, #18
orr r9, r9, r4, lsl #14
orr r8, r8, r5, lsl #14
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #23
lsls r9, r5, #23
orr r9, r9, r4, lsr #9
orr r8, r8, r5, lsr #9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #8]
ldr r5, [r0, #12]
#else
ldrd r4, r5, [r0, #8]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #8]
str r5, [r0, #12]
#else
strd r4, r5, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #48]
ldr r5, [r0, #52]
#else
ldrd r4, r5, [r0, #48]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #56]
ldr r7, [r0, #60]
#else
ldrd r6, r7, [r0, #56]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0]
ldr r9, [r0, #4]
#else
ldrd r8, r9, [r0]
#endif
eor r6, r6, r8
eor r7, r7, r9
and r6, r6, r4
and r7, r7, r5
eor r6, r6, r8
eor r7, r7, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #8]
ldr r5, [r0, #12]
#else
ldrd r4, r5, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #112]
ldr r9, [sp, #116]
#else
ldrd r8, r9, [sp, #112]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r3, #112]
ldr r7, [r3, #116]
#else
ldrd r6, r7, [r3, #112]
#endif
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #40]
ldr r9, [r0, #44]
#else
ldrd r8, r9, [r0, #40]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #8]
str r5, [r0, #12]
#else
strd r4, r5, [r0, #8]
#endif
adds r8, r8, r4
adc r9, r9, r5
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #16]
ldr r5, [r0, #20]
#else
ldrd r4, r5, [r0, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r0, #40]
str r9, [r0, #44]
#else
strd r8, r9, [r0, #40]
#endif
lsrs r6, r4, #28
lsrs r7, r5, #28
orr r7, r7, r4, lsl #4
orr r6, r6, r5, lsl #4
lsls r8, r4, #30
lsls r9, r5, #30
orr r9, r9, r4, lsr #2
orr r8, r8, r5, lsr #2
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #25
lsls r9, r5, #25
orr r9, r9, r4, lsr #7
orr r8, r8, r5, lsr #7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #8]
ldr r5, [r0, #12]
#else
ldrd r4, r5, [r0, #8]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #16]
ldr r9, [r0, #20]
#else
ldrd r8, r9, [r0, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #24]
ldr r7, [r0, #28]
#else
ldrd r6, r7, [r0, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #8]
str r5, [r0, #12]
#else
strd r4, r5, [r0, #8]
#endif
eor r8, r8, r6
eor r9, r9, r7
and r10, r10, r8
and r11, r11, r9
eor r10, r10, r6
eor r11, r11, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #8]
ldr r7, [r0, #12]
#else
ldrd r6, r7, [r0, #8]
#endif
adds r6, r6, r10
adc r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0, #8]
str r7, [r0, #12]
#else
strd r6, r7, [r0, #8]
#endif
mov r10, r8
mov r11, r9
# Calc new W[14]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #96]
ldr r5, [sp, #100]
#else
ldrd r4, r5, [sp, #96]
#endif
lsrs r6, r4, #19
lsrs r7, r5, #19
orr r7, r7, r4, lsl #13
orr r6, r6, r5, lsl #13
lsls r8, r4, #3
lsls r9, r5, #3
orr r9, r9, r4, lsr #29
orr r8, r8, r5, lsr #29
eor r7, r7, r9
eor r6, r6, r8
lsrs r8, r4, #6
lsrs r9, r5, #6
orr r8, r8, r5, lsl #26
eor r7, r7, r9
eor r6, r6, r8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #112]
ldr r5, [sp, #116]
#else
ldrd r4, r5, [sp, #112]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #56]
ldr r9, [sp, #60]
#else
ldrd r8, r9, [sp, #56]
#endif
adds r4, r4, r6
adc r5, r5, r7
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #112]
str r5, [sp, #116]
#else
strd r4, r5, [sp, #112]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #120]
ldr r5, [sp, #124]
#else
ldrd r4, r5, [sp, #120]
#endif
lsrs r6, r4, #1
lsrs r7, r5, #1
orr r7, r7, r4, lsl #31
orr r6, r6, r5, lsl #31
lsrs r8, r4, #8
lsrs r9, r5, #8
orr r9, r9, r4, lsl #24
orr r8, r8, r5, lsl #24
eor r7, r7, r9
eor r6, r6, r8
lsrs r8, r4, #7
lsrs r9, r5, #7
orr r8, r8, r5, lsl #25
eor r7, r7, r9
eor r6, r6, r8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #112]
ldr r5, [sp, #116]
#else
ldrd r4, r5, [sp, #112]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #112]
str r5, [sp, #116]
#else
strd r4, r5, [sp, #112]
#endif
# Round 15
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #40]
ldr r5, [r0, #44]
#else
ldrd r4, r5, [r0, #40]
#endif
lsrs r6, r4, #14
lsrs r7, r5, #14
orr r7, r7, r4, lsl #18
orr r6, r6, r5, lsl #18
lsrs r8, r4, #18
lsrs r9, r5, #18
orr r9, r9, r4, lsl #14
orr r8, r8, r5, lsl #14
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #23
lsls r9, r5, #23
orr r9, r9, r4, lsr #9
orr r8, r8, r5, lsr #9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0]
ldr r5, [r0, #4]
#else
ldrd r4, r5, [r0]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0]
str r5, [r0, #4]
#else
strd r4, r5, [r0]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #40]
ldr r5, [r0, #44]
#else
ldrd r4, r5, [r0, #40]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #48]
ldr r7, [r0, #52]
#else
ldrd r6, r7, [r0, #48]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #56]
ldr r9, [r0, #60]
#else
ldrd r8, r9, [r0, #56]
#endif
eor r6, r6, r8
eor r7, r7, r9
and r6, r6, r4
and r7, r7, r5
eor r6, r6, r8
eor r7, r7, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0]
ldr r5, [r0, #4]
#else
ldrd r4, r5, [r0]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #120]
ldr r9, [sp, #124]
#else
ldrd r8, r9, [sp, #120]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r3, #120]
ldr r7, [r3, #124]
#else
ldrd r6, r7, [r3, #120]
#endif
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #32]
ldr r9, [r0, #36]
#else
ldrd r8, r9, [r0, #32]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0]
str r5, [r0, #4]
#else
strd r4, r5, [r0]
#endif
adds r8, r8, r4
adc r9, r9, r5
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #8]
ldr r5, [r0, #12]
#else
ldrd r4, r5, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r0, #32]
str r9, [r0, #36]
#else
strd r8, r9, [r0, #32]
#endif
lsrs r6, r4, #28
lsrs r7, r5, #28
orr r7, r7, r4, lsl #4
orr r6, r6, r5, lsl #4
lsls r8, r4, #30
lsls r9, r5, #30
orr r9, r9, r4, lsr #2
orr r8, r8, r5, lsr #2
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #25
lsls r9, r5, #25
orr r9, r9, r4, lsr #7
orr r8, r8, r5, lsr #7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0]
ldr r5, [r0, #4]
#else
ldrd r4, r5, [r0]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #8]
ldr r9, [r0, #12]
#else
ldrd r8, r9, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #16]
ldr r7, [r0, #20]
#else
ldrd r6, r7, [r0, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0]
str r5, [r0, #4]
#else
strd r4, r5, [r0]
#endif
eor r8, r8, r6
eor r9, r9, r7
and r10, r10, r8
and r11, r11, r9
eor r10, r10, r6
eor r11, r11, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0]
ldr r7, [r0, #4]
#else
ldrd r6, r7, [r0]
#endif
adds r6, r6, r10
adc r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0]
str r7, [r0, #4]
#else
strd r6, r7, [r0]
#endif
mov r10, r8
mov r11, r9
# Calc new W[15]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #104]
ldr r5, [sp, #108]
#else
ldrd r4, r5, [sp, #104]
#endif
lsrs r6, r4, #19
lsrs r7, r5, #19
orr r7, r7, r4, lsl #13
orr r6, r6, r5, lsl #13
lsls r8, r4, #3
lsls r9, r5, #3
orr r9, r9, r4, lsr #29
orr r8, r8, r5, lsr #29
eor r7, r7, r9
eor r6, r6, r8
lsrs r8, r4, #6
lsrs r9, r5, #6
orr r8, r8, r5, lsl #26
eor r7, r7, r9
eor r6, r6, r8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #120]
ldr r5, [sp, #124]
#else
ldrd r4, r5, [sp, #120]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #64]
ldr r9, [sp, #68]
#else
ldrd r8, r9, [sp, #64]
#endif
adds r4, r4, r6
adc r5, r5, r7
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #120]
str r5, [sp, #124]
#else
strd r4, r5, [sp, #120]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp]
ldr r5, [sp, #4]
#else
ldrd r4, r5, [sp]
#endif
lsrs r6, r4, #1
lsrs r7, r5, #1
orr r7, r7, r4, lsl #31
orr r6, r6, r5, lsl #31
lsrs r8, r4, #8
lsrs r9, r5, #8
orr r9, r9, r4, lsl #24
orr r8, r8, r5, lsl #24
eor r7, r7, r9
eor r6, r6, r8
lsrs r8, r4, #7
lsrs r9, r5, #7
orr r8, r8, r5, lsl #25
eor r7, r7, r9
eor r6, r6, r8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #120]
ldr r5, [sp, #124]
#else
ldrd r4, r5, [sp, #120]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #120]
str r5, [sp, #124]
#else
strd r4, r5, [sp, #120]
#endif
add r3, r3, #0x80
subs r12, r12, #1
bne L_SHA512_transform_len_start
# Round 0
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #32]
ldr r5, [r0, #36]
#else
ldrd r4, r5, [r0, #32]
#endif
lsrs r6, r4, #14
lsrs r7, r5, #14
orr r7, r7, r4, lsl #18
orr r6, r6, r5, lsl #18
lsrs r8, r4, #18
lsrs r9, r5, #18
orr r9, r9, r4, lsl #14
orr r8, r8, r5, lsl #14
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #23
lsls r9, r5, #23
orr r9, r9, r4, lsr #9
orr r8, r8, r5, lsr #9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #56]
ldr r5, [r0, #60]
#else
ldrd r4, r5, [r0, #56]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #56]
str r5, [r0, #60]
#else
strd r4, r5, [r0, #56]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #32]
ldr r5, [r0, #36]
#else
ldrd r4, r5, [r0, #32]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #40]
ldr r7, [r0, #44]
#else
ldrd r6, r7, [r0, #40]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #48]
ldr r9, [r0, #52]
#else
ldrd r8, r9, [r0, #48]
#endif
eor r6, r6, r8
eor r7, r7, r9
and r6, r6, r4
and r7, r7, r5
eor r6, r6, r8
eor r7, r7, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #56]
ldr r5, [r0, #60]
#else
ldrd r4, r5, [r0, #56]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp]
ldr r9, [sp, #4]
#else
ldrd r8, r9, [sp]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r3]
ldr r7, [r3, #4]
#else
ldrd r6, r7, [r3]
#endif
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #24]
ldr r9, [r0, #28]
#else
ldrd r8, r9, [r0, #24]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #56]
str r5, [r0, #60]
#else
strd r4, r5, [r0, #56]
#endif
adds r8, r8, r4
adc r9, r9, r5
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0]
ldr r5, [r0, #4]
#else
ldrd r4, r5, [r0]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r0, #24]
str r9, [r0, #28]
#else
strd r8, r9, [r0, #24]
#endif
lsrs r6, r4, #28
lsrs r7, r5, #28
orr r7, r7, r4, lsl #4
orr r6, r6, r5, lsl #4
lsls r8, r4, #30
lsls r9, r5, #30
orr r9, r9, r4, lsr #2
orr r8, r8, r5, lsr #2
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #25
lsls r9, r5, #25
orr r9, r9, r4, lsr #7
orr r8, r8, r5, lsr #7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #56]
ldr r5, [r0, #60]
#else
ldrd r4, r5, [r0, #56]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0]
ldr r9, [r0, #4]
#else
ldrd r8, r9, [r0]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #8]
ldr r7, [r0, #12]
#else
ldrd r6, r7, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #56]
str r5, [r0, #60]
#else
strd r4, r5, [r0, #56]
#endif
eor r8, r8, r6
eor r9, r9, r7
and r10, r10, r8
and r11, r11, r9
eor r10, r10, r6
eor r11, r11, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #56]
ldr r7, [r0, #60]
#else
ldrd r6, r7, [r0, #56]
#endif
adds r6, r6, r10
adc r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0, #56]
str r7, [r0, #60]
#else
strd r6, r7, [r0, #56]
#endif
mov r10, r8
mov r11, r9
# Round 1
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #24]
ldr r5, [r0, #28]
#else
ldrd r4, r5, [r0, #24]
#endif
lsrs r6, r4, #14
lsrs r7, r5, #14
orr r7, r7, r4, lsl #18
orr r6, r6, r5, lsl #18
lsrs r8, r4, #18
lsrs r9, r5, #18
orr r9, r9, r4, lsl #14
orr r8, r8, r5, lsl #14
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #23
lsls r9, r5, #23
orr r9, r9, r4, lsr #9
orr r8, r8, r5, lsr #9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #48]
ldr r5, [r0, #52]
#else
ldrd r4, r5, [r0, #48]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #48]
str r5, [r0, #52]
#else
strd r4, r5, [r0, #48]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #24]
ldr r5, [r0, #28]
#else
ldrd r4, r5, [r0, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #32]
ldr r7, [r0, #36]
#else
ldrd r6, r7, [r0, #32]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #40]
ldr r9, [r0, #44]
#else
ldrd r8, r9, [r0, #40]
#endif
eor r6, r6, r8
eor r7, r7, r9
and r6, r6, r4
and r7, r7, r5
eor r6, r6, r8
eor r7, r7, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #48]
ldr r5, [r0, #52]
#else
ldrd r4, r5, [r0, #48]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #8]
ldr r9, [sp, #12]
#else
ldrd r8, r9, [sp, #8]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r3, #8]
ldr r7, [r3, #12]
#else
ldrd r6, r7, [r3, #8]
#endif
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #16]
ldr r9, [r0, #20]
#else
ldrd r8, r9, [r0, #16]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #48]
str r5, [r0, #52]
#else
strd r4, r5, [r0, #48]
#endif
adds r8, r8, r4
adc r9, r9, r5
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #56]
ldr r5, [r0, #60]
#else
ldrd r4, r5, [r0, #56]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r0, #16]
str r9, [r0, #20]
#else
strd r8, r9, [r0, #16]
#endif
lsrs r6, r4, #28
lsrs r7, r5, #28
orr r7, r7, r4, lsl #4
orr r6, r6, r5, lsl #4
lsls r8, r4, #30
lsls r9, r5, #30
orr r9, r9, r4, lsr #2
orr r8, r8, r5, lsr #2
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #25
lsls r9, r5, #25
orr r9, r9, r4, lsr #7
orr r8, r8, r5, lsr #7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #48]
ldr r5, [r0, #52]
#else
ldrd r4, r5, [r0, #48]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #56]
ldr r9, [r0, #60]
#else
ldrd r8, r9, [r0, #56]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0]
ldr r7, [r0, #4]
#else
ldrd r6, r7, [r0]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #48]
str r5, [r0, #52]
#else
strd r4, r5, [r0, #48]
#endif
eor r8, r8, r6
eor r9, r9, r7
and r10, r10, r8
and r11, r11, r9
eor r10, r10, r6
eor r11, r11, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #48]
ldr r7, [r0, #52]
#else
ldrd r6, r7, [r0, #48]
#endif
adds r6, r6, r10
adc r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0, #48]
str r7, [r0, #52]
#else
strd r6, r7, [r0, #48]
#endif
mov r10, r8
mov r11, r9
# Round 2
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #16]
ldr r5, [r0, #20]
#else
ldrd r4, r5, [r0, #16]
#endif
lsrs r6, r4, #14
lsrs r7, r5, #14
orr r7, r7, r4, lsl #18
orr r6, r6, r5, lsl #18
lsrs r8, r4, #18
lsrs r9, r5, #18
orr r9, r9, r4, lsl #14
orr r8, r8, r5, lsl #14
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #23
lsls r9, r5, #23
orr r9, r9, r4, lsr #9
orr r8, r8, r5, lsr #9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #40]
ldr r5, [r0, #44]
#else
ldrd r4, r5, [r0, #40]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #40]
str r5, [r0, #44]
#else
strd r4, r5, [r0, #40]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #16]
ldr r5, [r0, #20]
#else
ldrd r4, r5, [r0, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #24]
ldr r7, [r0, #28]
#else
ldrd r6, r7, [r0, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #32]
ldr r9, [r0, #36]
#else
ldrd r8, r9, [r0, #32]
#endif
eor r6, r6, r8
eor r7, r7, r9
and r6, r6, r4
and r7, r7, r5
eor r6, r6, r8
eor r7, r7, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #40]
ldr r5, [r0, #44]
#else
ldrd r4, r5, [r0, #40]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #16]
ldr r9, [sp, #20]
#else
ldrd r8, r9, [sp, #16]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r3, #16]
ldr r7, [r3, #20]
#else
ldrd r6, r7, [r3, #16]
#endif
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #8]
ldr r9, [r0, #12]
#else
ldrd r8, r9, [r0, #8]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #40]
str r5, [r0, #44]
#else
strd r4, r5, [r0, #40]
#endif
adds r8, r8, r4
adc r9, r9, r5
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #48]
ldr r5, [r0, #52]
#else
ldrd r4, r5, [r0, #48]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r0, #8]
str r9, [r0, #12]
#else
strd r8, r9, [r0, #8]
#endif
lsrs r6, r4, #28
lsrs r7, r5, #28
orr r7, r7, r4, lsl #4
orr r6, r6, r5, lsl #4
lsls r8, r4, #30
lsls r9, r5, #30
orr r9, r9, r4, lsr #2
orr r8, r8, r5, lsr #2
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #25
lsls r9, r5, #25
orr r9, r9, r4, lsr #7
orr r8, r8, r5, lsr #7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #40]
ldr r5, [r0, #44]
#else
ldrd r4, r5, [r0, #40]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #48]
ldr r9, [r0, #52]
#else
ldrd r8, r9, [r0, #48]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #56]
ldr r7, [r0, #60]
#else
ldrd r6, r7, [r0, #56]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #40]
str r5, [r0, #44]
#else
strd r4, r5, [r0, #40]
#endif
eor r8, r8, r6
eor r9, r9, r7
and r10, r10, r8
and r11, r11, r9
eor r10, r10, r6
eor r11, r11, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #40]
ldr r7, [r0, #44]
#else
ldrd r6, r7, [r0, #40]
#endif
adds r6, r6, r10
adc r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0, #40]
str r7, [r0, #44]
#else
strd r6, r7, [r0, #40]
#endif
mov r10, r8
mov r11, r9
# Round 3
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #8]
ldr r5, [r0, #12]
#else
ldrd r4, r5, [r0, #8]
#endif
lsrs r6, r4, #14
lsrs r7, r5, #14
orr r7, r7, r4, lsl #18
orr r6, r6, r5, lsl #18
lsrs r8, r4, #18
lsrs r9, r5, #18
orr r9, r9, r4, lsl #14
orr r8, r8, r5, lsl #14
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #23
lsls r9, r5, #23
orr r9, r9, r4, lsr #9
orr r8, r8, r5, lsr #9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #32]
ldr r5, [r0, #36]
#else
ldrd r4, r5, [r0, #32]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #32]
str r5, [r0, #36]
#else
strd r4, r5, [r0, #32]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #8]
ldr r5, [r0, #12]
#else
ldrd r4, r5, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #16]
ldr r7, [r0, #20]
#else
ldrd r6, r7, [r0, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #24]
ldr r9, [r0, #28]
#else
ldrd r8, r9, [r0, #24]
#endif
eor r6, r6, r8
eor r7, r7, r9
and r6, r6, r4
and r7, r7, r5
eor r6, r6, r8
eor r7, r7, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #32]
ldr r5, [r0, #36]
#else
ldrd r4, r5, [r0, #32]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #24]
ldr r9, [sp, #28]
#else
ldrd r8, r9, [sp, #24]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r3, #24]
ldr r7, [r3, #28]
#else
ldrd r6, r7, [r3, #24]
#endif
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0]
ldr r9, [r0, #4]
#else
ldrd r8, r9, [r0]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #32]
str r5, [r0, #36]
#else
strd r4, r5, [r0, #32]
#endif
adds r8, r8, r4
adc r9, r9, r5
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #40]
ldr r5, [r0, #44]
#else
ldrd r4, r5, [r0, #40]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r0]
str r9, [r0, #4]
#else
strd r8, r9, [r0]
#endif
lsrs r6, r4, #28
lsrs r7, r5, #28
orr r7, r7, r4, lsl #4
orr r6, r6, r5, lsl #4
lsls r8, r4, #30
lsls r9, r5, #30
orr r9, r9, r4, lsr #2
orr r8, r8, r5, lsr #2
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #25
lsls r9, r5, #25
orr r9, r9, r4, lsr #7
orr r8, r8, r5, lsr #7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #32]
ldr r5, [r0, #36]
#else
ldrd r4, r5, [r0, #32]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #40]
ldr r9, [r0, #44]
#else
ldrd r8, r9, [r0, #40]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #48]
ldr r7, [r0, #52]
#else
ldrd r6, r7, [r0, #48]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #32]
str r5, [r0, #36]
#else
strd r4, r5, [r0, #32]
#endif
eor r8, r8, r6
eor r9, r9, r7
and r10, r10, r8
and r11, r11, r9
eor r10, r10, r6
eor r11, r11, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #32]
ldr r7, [r0, #36]
#else
ldrd r6, r7, [r0, #32]
#endif
adds r6, r6, r10
adc r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0, #32]
str r7, [r0, #36]
#else
strd r6, r7, [r0, #32]
#endif
mov r10, r8
mov r11, r9
# Round 4
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0]
ldr r5, [r0, #4]
#else
ldrd r4, r5, [r0]
#endif
lsrs r6, r4, #14
lsrs r7, r5, #14
orr r7, r7, r4, lsl #18
orr r6, r6, r5, lsl #18
lsrs r8, r4, #18
lsrs r9, r5, #18
orr r9, r9, r4, lsl #14
orr r8, r8, r5, lsl #14
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #23
lsls r9, r5, #23
orr r9, r9, r4, lsr #9
orr r8, r8, r5, lsr #9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #24]
ldr r5, [r0, #28]
#else
ldrd r4, r5, [r0, #24]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #24]
str r5, [r0, #28]
#else
strd r4, r5, [r0, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0]
ldr r5, [r0, #4]
#else
ldrd r4, r5, [r0]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #8]
ldr r7, [r0, #12]
#else
ldrd r6, r7, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #16]
ldr r9, [r0, #20]
#else
ldrd r8, r9, [r0, #16]
#endif
eor r6, r6, r8
eor r7, r7, r9
and r6, r6, r4
and r7, r7, r5
eor r6, r6, r8
eor r7, r7, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #24]
ldr r5, [r0, #28]
#else
ldrd r4, r5, [r0, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #32]
ldr r9, [sp, #36]
#else
ldrd r8, r9, [sp, #32]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r3, #32]
ldr r7, [r3, #36]
#else
ldrd r6, r7, [r3, #32]
#endif
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #56]
ldr r9, [r0, #60]
#else
ldrd r8, r9, [r0, #56]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #24]
str r5, [r0, #28]
#else
strd r4, r5, [r0, #24]
#endif
adds r8, r8, r4
adc r9, r9, r5
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #32]
ldr r5, [r0, #36]
#else
ldrd r4, r5, [r0, #32]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r0, #56]
str r9, [r0, #60]
#else
strd r8, r9, [r0, #56]
#endif
lsrs r6, r4, #28
lsrs r7, r5, #28
orr r7, r7, r4, lsl #4
orr r6, r6, r5, lsl #4
lsls r8, r4, #30
lsls r9, r5, #30
orr r9, r9, r4, lsr #2
orr r8, r8, r5, lsr #2
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #25
lsls r9, r5, #25
orr r9, r9, r4, lsr #7
orr r8, r8, r5, lsr #7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #24]
ldr r5, [r0, #28]
#else
ldrd r4, r5, [r0, #24]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #32]
ldr r9, [r0, #36]
#else
ldrd r8, r9, [r0, #32]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #40]
ldr r7, [r0, #44]
#else
ldrd r6, r7, [r0, #40]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #24]
str r5, [r0, #28]
#else
strd r4, r5, [r0, #24]
#endif
eor r8, r8, r6
eor r9, r9, r7
and r10, r10, r8
and r11, r11, r9
eor r10, r10, r6
eor r11, r11, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #24]
ldr r7, [r0, #28]
#else
ldrd r6, r7, [r0, #24]
#endif
adds r6, r6, r10
adc r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0, #24]
str r7, [r0, #28]
#else
strd r6, r7, [r0, #24]
#endif
mov r10, r8
mov r11, r9
# Round 5
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #56]
ldr r5, [r0, #60]
#else
ldrd r4, r5, [r0, #56]
#endif
lsrs r6, r4, #14
lsrs r7, r5, #14
orr r7, r7, r4, lsl #18
orr r6, r6, r5, lsl #18
lsrs r8, r4, #18
lsrs r9, r5, #18
orr r9, r9, r4, lsl #14
orr r8, r8, r5, lsl #14
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #23
lsls r9, r5, #23
orr r9, r9, r4, lsr #9
orr r8, r8, r5, lsr #9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #16]
ldr r5, [r0, #20]
#else
ldrd r4, r5, [r0, #16]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #16]
str r5, [r0, #20]
#else
strd r4, r5, [r0, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #56]
ldr r5, [r0, #60]
#else
ldrd r4, r5, [r0, #56]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0]
ldr r7, [r0, #4]
#else
ldrd r6, r7, [r0]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #8]
ldr r9, [r0, #12]
#else
ldrd r8, r9, [r0, #8]
#endif
eor r6, r6, r8
eor r7, r7, r9
and r6, r6, r4
and r7, r7, r5
eor r6, r6, r8
eor r7, r7, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #16]
ldr r5, [r0, #20]
#else
ldrd r4, r5, [r0, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #40]
ldr r9, [sp, #44]
#else
ldrd r8, r9, [sp, #40]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r3, #40]
ldr r7, [r3, #44]
#else
ldrd r6, r7, [r3, #40]
#endif
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #48]
ldr r9, [r0, #52]
#else
ldrd r8, r9, [r0, #48]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #16]
str r5, [r0, #20]
#else
strd r4, r5, [r0, #16]
#endif
adds r8, r8, r4
adc r9, r9, r5
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #24]
ldr r5, [r0, #28]
#else
ldrd r4, r5, [r0, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r0, #48]
str r9, [r0, #52]
#else
strd r8, r9, [r0, #48]
#endif
lsrs r6, r4, #28
lsrs r7, r5, #28
orr r7, r7, r4, lsl #4
orr r6, r6, r5, lsl #4
lsls r8, r4, #30
lsls r9, r5, #30
orr r9, r9, r4, lsr #2
orr r8, r8, r5, lsr #2
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #25
lsls r9, r5, #25
orr r9, r9, r4, lsr #7
orr r8, r8, r5, lsr #7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #16]
ldr r5, [r0, #20]
#else
ldrd r4, r5, [r0, #16]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #24]
ldr r9, [r0, #28]
#else
ldrd r8, r9, [r0, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #32]
ldr r7, [r0, #36]
#else
ldrd r6, r7, [r0, #32]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #16]
str r5, [r0, #20]
#else
strd r4, r5, [r0, #16]
#endif
eor r8, r8, r6
eor r9, r9, r7
and r10, r10, r8
and r11, r11, r9
eor r10, r10, r6
eor r11, r11, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #16]
ldr r7, [r0, #20]
#else
ldrd r6, r7, [r0, #16]
#endif
adds r6, r6, r10
adc r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0, #16]
str r7, [r0, #20]
#else
strd r6, r7, [r0, #16]
#endif
mov r10, r8
mov r11, r9
# Round 6
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #48]
ldr r5, [r0, #52]
#else
ldrd r4, r5, [r0, #48]
#endif
lsrs r6, r4, #14
lsrs r7, r5, #14
orr r7, r7, r4, lsl #18
orr r6, r6, r5, lsl #18
lsrs r8, r4, #18
lsrs r9, r5, #18
orr r9, r9, r4, lsl #14
orr r8, r8, r5, lsl #14
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #23
lsls r9, r5, #23
orr r9, r9, r4, lsr #9
orr r8, r8, r5, lsr #9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #8]
ldr r5, [r0, #12]
#else
ldrd r4, r5, [r0, #8]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #8]
str r5, [r0, #12]
#else
strd r4, r5, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #48]
ldr r5, [r0, #52]
#else
ldrd r4, r5, [r0, #48]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #56]
ldr r7, [r0, #60]
#else
ldrd r6, r7, [r0, #56]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0]
ldr r9, [r0, #4]
#else
ldrd r8, r9, [r0]
#endif
eor r6, r6, r8
eor r7, r7, r9
and r6, r6, r4
and r7, r7, r5
eor r6, r6, r8
eor r7, r7, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #8]
ldr r5, [r0, #12]
#else
ldrd r4, r5, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #48]
ldr r9, [sp, #52]
#else
ldrd r8, r9, [sp, #48]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r3, #48]
ldr r7, [r3, #52]
#else
ldrd r6, r7, [r3, #48]
#endif
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #40]
ldr r9, [r0, #44]
#else
ldrd r8, r9, [r0, #40]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #8]
str r5, [r0, #12]
#else
strd r4, r5, [r0, #8]
#endif
adds r8, r8, r4
adc r9, r9, r5
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #16]
ldr r5, [r0, #20]
#else
ldrd r4, r5, [r0, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r0, #40]
str r9, [r0, #44]
#else
strd r8, r9, [r0, #40]
#endif
lsrs r6, r4, #28
lsrs r7, r5, #28
orr r7, r7, r4, lsl #4
orr r6, r6, r5, lsl #4
lsls r8, r4, #30
lsls r9, r5, #30
orr r9, r9, r4, lsr #2
orr r8, r8, r5, lsr #2
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #25
lsls r9, r5, #25
orr r9, r9, r4, lsr #7
orr r8, r8, r5, lsr #7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #8]
ldr r5, [r0, #12]
#else
ldrd r4, r5, [r0, #8]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #16]
ldr r9, [r0, #20]
#else
ldrd r8, r9, [r0, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #24]
ldr r7, [r0, #28]
#else
ldrd r6, r7, [r0, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #8]
str r5, [r0, #12]
#else
strd r4, r5, [r0, #8]
#endif
eor r8, r8, r6
eor r9, r9, r7
and r10, r10, r8
and r11, r11, r9
eor r10, r10, r6
eor r11, r11, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #8]
ldr r7, [r0, #12]
#else
ldrd r6, r7, [r0, #8]
#endif
adds r6, r6, r10
adc r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0, #8]
str r7, [r0, #12]
#else
strd r6, r7, [r0, #8]
#endif
mov r10, r8
mov r11, r9
# Round 7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #40]
ldr r5, [r0, #44]
#else
ldrd r4, r5, [r0, #40]
#endif
lsrs r6, r4, #14
lsrs r7, r5, #14
orr r7, r7, r4, lsl #18
orr r6, r6, r5, lsl #18
lsrs r8, r4, #18
lsrs r9, r5, #18
orr r9, r9, r4, lsl #14
orr r8, r8, r5, lsl #14
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #23
lsls r9, r5, #23
orr r9, r9, r4, lsr #9
orr r8, r8, r5, lsr #9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0]
ldr r5, [r0, #4]
#else
ldrd r4, r5, [r0]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0]
str r5, [r0, #4]
#else
strd r4, r5, [r0]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #40]
ldr r5, [r0, #44]
#else
ldrd r4, r5, [r0, #40]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #48]
ldr r7, [r0, #52]
#else
ldrd r6, r7, [r0, #48]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #56]
ldr r9, [r0, #60]
#else
ldrd r8, r9, [r0, #56]
#endif
eor r6, r6, r8
eor r7, r7, r9
and r6, r6, r4
and r7, r7, r5
eor r6, r6, r8
eor r7, r7, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0]
ldr r5, [r0, #4]
#else
ldrd r4, r5, [r0]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #56]
ldr r9, [sp, #60]
#else
ldrd r8, r9, [sp, #56]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r3, #56]
ldr r7, [r3, #60]
#else
ldrd r6, r7, [r3, #56]
#endif
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #32]
ldr r9, [r0, #36]
#else
ldrd r8, r9, [r0, #32]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0]
str r5, [r0, #4]
#else
strd r4, r5, [r0]
#endif
adds r8, r8, r4
adc r9, r9, r5
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #8]
ldr r5, [r0, #12]
#else
ldrd r4, r5, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r0, #32]
str r9, [r0, #36]
#else
strd r8, r9, [r0, #32]
#endif
lsrs r6, r4, #28
lsrs r7, r5, #28
orr r7, r7, r4, lsl #4
orr r6, r6, r5, lsl #4
lsls r8, r4, #30
lsls r9, r5, #30
orr r9, r9, r4, lsr #2
orr r8, r8, r5, lsr #2
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #25
lsls r9, r5, #25
orr r9, r9, r4, lsr #7
orr r8, r8, r5, lsr #7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0]
ldr r5, [r0, #4]
#else
ldrd r4, r5, [r0]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #8]
ldr r9, [r0, #12]
#else
ldrd r8, r9, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #16]
ldr r7, [r0, #20]
#else
ldrd r6, r7, [r0, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0]
str r5, [r0, #4]
#else
strd r4, r5, [r0]
#endif
eor r8, r8, r6
eor r9, r9, r7
and r10, r10, r8
and r11, r11, r9
eor r10, r10, r6
eor r11, r11, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0]
ldr r7, [r0, #4]
#else
ldrd r6, r7, [r0]
#endif
adds r6, r6, r10
adc r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0]
str r7, [r0, #4]
#else
strd r6, r7, [r0]
#endif
mov r10, r8
mov r11, r9
# Round 8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #32]
ldr r5, [r0, #36]
#else
ldrd r4, r5, [r0, #32]
#endif
lsrs r6, r4, #14
lsrs r7, r5, #14
orr r7, r7, r4, lsl #18
orr r6, r6, r5, lsl #18
lsrs r8, r4, #18
lsrs r9, r5, #18
orr r9, r9, r4, lsl #14
orr r8, r8, r5, lsl #14
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #23
lsls r9, r5, #23
orr r9, r9, r4, lsr #9
orr r8, r8, r5, lsr #9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #56]
ldr r5, [r0, #60]
#else
ldrd r4, r5, [r0, #56]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #56]
str r5, [r0, #60]
#else
strd r4, r5, [r0, #56]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #32]
ldr r5, [r0, #36]
#else
ldrd r4, r5, [r0, #32]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #40]
ldr r7, [r0, #44]
#else
ldrd r6, r7, [r0, #40]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #48]
ldr r9, [r0, #52]
#else
ldrd r8, r9, [r0, #48]
#endif
eor r6, r6, r8
eor r7, r7, r9
and r6, r6, r4
and r7, r7, r5
eor r6, r6, r8
eor r7, r7, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #56]
ldr r5, [r0, #60]
#else
ldrd r4, r5, [r0, #56]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #64]
ldr r9, [sp, #68]
#else
ldrd r8, r9, [sp, #64]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r3, #64]
ldr r7, [r3, #68]
#else
ldrd r6, r7, [r3, #64]
#endif
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #24]
ldr r9, [r0, #28]
#else
ldrd r8, r9, [r0, #24]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #56]
str r5, [r0, #60]
#else
strd r4, r5, [r0, #56]
#endif
adds r8, r8, r4
adc r9, r9, r5
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0]
ldr r5, [r0, #4]
#else
ldrd r4, r5, [r0]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r0, #24]
str r9, [r0, #28]
#else
strd r8, r9, [r0, #24]
#endif
lsrs r6, r4, #28
lsrs r7, r5, #28
orr r7, r7, r4, lsl #4
orr r6, r6, r5, lsl #4
lsls r8, r4, #30
lsls r9, r5, #30
orr r9, r9, r4, lsr #2
orr r8, r8, r5, lsr #2
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #25
lsls r9, r5, #25
orr r9, r9, r4, lsr #7
orr r8, r8, r5, lsr #7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #56]
ldr r5, [r0, #60]
#else
ldrd r4, r5, [r0, #56]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0]
ldr r9, [r0, #4]
#else
ldrd r8, r9, [r0]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #8]
ldr r7, [r0, #12]
#else
ldrd r6, r7, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #56]
str r5, [r0, #60]
#else
strd r4, r5, [r0, #56]
#endif
eor r8, r8, r6
eor r9, r9, r7
and r10, r10, r8
and r11, r11, r9
eor r10, r10, r6
eor r11, r11, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #56]
ldr r7, [r0, #60]
#else
ldrd r6, r7, [r0, #56]
#endif
adds r6, r6, r10
adc r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0, #56]
str r7, [r0, #60]
#else
strd r6, r7, [r0, #56]
#endif
mov r10, r8
mov r11, r9
# Round 9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #24]
ldr r5, [r0, #28]
#else
ldrd r4, r5, [r0, #24]
#endif
lsrs r6, r4, #14
lsrs r7, r5, #14
orr r7, r7, r4, lsl #18
orr r6, r6, r5, lsl #18
lsrs r8, r4, #18
lsrs r9, r5, #18
orr r9, r9, r4, lsl #14
orr r8, r8, r5, lsl #14
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #23
lsls r9, r5, #23
orr r9, r9, r4, lsr #9
orr r8, r8, r5, lsr #9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #48]
ldr r5, [r0, #52]
#else
ldrd r4, r5, [r0, #48]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #48]
str r5, [r0, #52]
#else
strd r4, r5, [r0, #48]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #24]
ldr r5, [r0, #28]
#else
ldrd r4, r5, [r0, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #32]
ldr r7, [r0, #36]
#else
ldrd r6, r7, [r0, #32]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #40]
ldr r9, [r0, #44]
#else
ldrd r8, r9, [r0, #40]
#endif
eor r6, r6, r8
eor r7, r7, r9
and r6, r6, r4
and r7, r7, r5
eor r6, r6, r8
eor r7, r7, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #48]
ldr r5, [r0, #52]
#else
ldrd r4, r5, [r0, #48]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #72]
ldr r9, [sp, #76]
#else
ldrd r8, r9, [sp, #72]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r3, #72]
ldr r7, [r3, #76]
#else
ldrd r6, r7, [r3, #72]
#endif
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #16]
ldr r9, [r0, #20]
#else
ldrd r8, r9, [r0, #16]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #48]
str r5, [r0, #52]
#else
strd r4, r5, [r0, #48]
#endif
adds r8, r8, r4
adc r9, r9, r5
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #56]
ldr r5, [r0, #60]
#else
ldrd r4, r5, [r0, #56]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r0, #16]
str r9, [r0, #20]
#else
strd r8, r9, [r0, #16]
#endif
lsrs r6, r4, #28
lsrs r7, r5, #28
orr r7, r7, r4, lsl #4
orr r6, r6, r5, lsl #4
lsls r8, r4, #30
lsls r9, r5, #30
orr r9, r9, r4, lsr #2
orr r8, r8, r5, lsr #2
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #25
lsls r9, r5, #25
orr r9, r9, r4, lsr #7
orr r8, r8, r5, lsr #7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #48]
ldr r5, [r0, #52]
#else
ldrd r4, r5, [r0, #48]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #56]
ldr r9, [r0, #60]
#else
ldrd r8, r9, [r0, #56]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0]
ldr r7, [r0, #4]
#else
ldrd r6, r7, [r0]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #48]
str r5, [r0, #52]
#else
strd r4, r5, [r0, #48]
#endif
eor r8, r8, r6
eor r9, r9, r7
and r10, r10, r8
and r11, r11, r9
eor r10, r10, r6
eor r11, r11, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #48]
ldr r7, [r0, #52]
#else
ldrd r6, r7, [r0, #48]
#endif
adds r6, r6, r10
adc r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0, #48]
str r7, [r0, #52]
#else
strd r6, r7, [r0, #48]
#endif
mov r10, r8
mov r11, r9
# Round 10
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #16]
ldr r5, [r0, #20]
#else
ldrd r4, r5, [r0, #16]
#endif
lsrs r6, r4, #14
lsrs r7, r5, #14
orr r7, r7, r4, lsl #18
orr r6, r6, r5, lsl #18
lsrs r8, r4, #18
lsrs r9, r5, #18
orr r9, r9, r4, lsl #14
orr r8, r8, r5, lsl #14
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #23
lsls r9, r5, #23
orr r9, r9, r4, lsr #9
orr r8, r8, r5, lsr #9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #40]
ldr r5, [r0, #44]
#else
ldrd r4, r5, [r0, #40]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #40]
str r5, [r0, #44]
#else
strd r4, r5, [r0, #40]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #16]
ldr r5, [r0, #20]
#else
ldrd r4, r5, [r0, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #24]
ldr r7, [r0, #28]
#else
ldrd r6, r7, [r0, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #32]
ldr r9, [r0, #36]
#else
ldrd r8, r9, [r0, #32]
#endif
eor r6, r6, r8
eor r7, r7, r9
and r6, r6, r4
and r7, r7, r5
eor r6, r6, r8
eor r7, r7, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #40]
ldr r5, [r0, #44]
#else
ldrd r4, r5, [r0, #40]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #80]
ldr r9, [sp, #84]
#else
ldrd r8, r9, [sp, #80]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r3, #80]
ldr r7, [r3, #84]
#else
ldrd r6, r7, [r3, #80]
#endif
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #8]
ldr r9, [r0, #12]
#else
ldrd r8, r9, [r0, #8]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #40]
str r5, [r0, #44]
#else
strd r4, r5, [r0, #40]
#endif
adds r8, r8, r4
adc r9, r9, r5
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #48]
ldr r5, [r0, #52]
#else
ldrd r4, r5, [r0, #48]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r0, #8]
str r9, [r0, #12]
#else
strd r8, r9, [r0, #8]
#endif
lsrs r6, r4, #28
lsrs r7, r5, #28
orr r7, r7, r4, lsl #4
orr r6, r6, r5, lsl #4
lsls r8, r4, #30
lsls r9, r5, #30
orr r9, r9, r4, lsr #2
orr r8, r8, r5, lsr #2
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #25
lsls r9, r5, #25
orr r9, r9, r4, lsr #7
orr r8, r8, r5, lsr #7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #40]
ldr r5, [r0, #44]
#else
ldrd r4, r5, [r0, #40]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #48]
ldr r9, [r0, #52]
#else
ldrd r8, r9, [r0, #48]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #56]
ldr r7, [r0, #60]
#else
ldrd r6, r7, [r0, #56]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #40]
str r5, [r0, #44]
#else
strd r4, r5, [r0, #40]
#endif
eor r8, r8, r6
eor r9, r9, r7
and r10, r10, r8
and r11, r11, r9
eor r10, r10, r6
eor r11, r11, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #40]
ldr r7, [r0, #44]
#else
ldrd r6, r7, [r0, #40]
#endif
adds r6, r6, r10
adc r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0, #40]
str r7, [r0, #44]
#else
strd r6, r7, [r0, #40]
#endif
mov r10, r8
mov r11, r9
# Round 11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #8]
ldr r5, [r0, #12]
#else
ldrd r4, r5, [r0, #8]
#endif
lsrs r6, r4, #14
lsrs r7, r5, #14
orr r7, r7, r4, lsl #18
orr r6, r6, r5, lsl #18
lsrs r8, r4, #18
lsrs r9, r5, #18
orr r9, r9, r4, lsl #14
orr r8, r8, r5, lsl #14
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #23
lsls r9, r5, #23
orr r9, r9, r4, lsr #9
orr r8, r8, r5, lsr #9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #32]
ldr r5, [r0, #36]
#else
ldrd r4, r5, [r0, #32]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #32]
str r5, [r0, #36]
#else
strd r4, r5, [r0, #32]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #8]
ldr r5, [r0, #12]
#else
ldrd r4, r5, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #16]
ldr r7, [r0, #20]
#else
ldrd r6, r7, [r0, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #24]
ldr r9, [r0, #28]
#else
ldrd r8, r9, [r0, #24]
#endif
eor r6, r6, r8
eor r7, r7, r9
and r6, r6, r4
and r7, r7, r5
eor r6, r6, r8
eor r7, r7, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #32]
ldr r5, [r0, #36]
#else
ldrd r4, r5, [r0, #32]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #88]
ldr r9, [sp, #92]
#else
ldrd r8, r9, [sp, #88]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r3, #88]
ldr r7, [r3, #92]
#else
ldrd r6, r7, [r3, #88]
#endif
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0]
ldr r9, [r0, #4]
#else
ldrd r8, r9, [r0]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #32]
str r5, [r0, #36]
#else
strd r4, r5, [r0, #32]
#endif
adds r8, r8, r4
adc r9, r9, r5
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #40]
ldr r5, [r0, #44]
#else
ldrd r4, r5, [r0, #40]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r0]
str r9, [r0, #4]
#else
strd r8, r9, [r0]
#endif
lsrs r6, r4, #28
lsrs r7, r5, #28
orr r7, r7, r4, lsl #4
orr r6, r6, r5, lsl #4
lsls r8, r4, #30
lsls r9, r5, #30
orr r9, r9, r4, lsr #2
orr r8, r8, r5, lsr #2
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #25
lsls r9, r5, #25
orr r9, r9, r4, lsr #7
orr r8, r8, r5, lsr #7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #32]
ldr r5, [r0, #36]
#else
ldrd r4, r5, [r0, #32]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #40]
ldr r9, [r0, #44]
#else
ldrd r8, r9, [r0, #40]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #48]
ldr r7, [r0, #52]
#else
ldrd r6, r7, [r0, #48]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #32]
str r5, [r0, #36]
#else
strd r4, r5, [r0, #32]
#endif
eor r8, r8, r6
eor r9, r9, r7
and r10, r10, r8
and r11, r11, r9
eor r10, r10, r6
eor r11, r11, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #32]
ldr r7, [r0, #36]
#else
ldrd r6, r7, [r0, #32]
#endif
adds r6, r6, r10
adc r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0, #32]
str r7, [r0, #36]
#else
strd r6, r7, [r0, #32]
#endif
mov r10, r8
mov r11, r9
# Round 12
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0]
ldr r5, [r0, #4]
#else
ldrd r4, r5, [r0]
#endif
lsrs r6, r4, #14
lsrs r7, r5, #14
orr r7, r7, r4, lsl #18
orr r6, r6, r5, lsl #18
lsrs r8, r4, #18
lsrs r9, r5, #18
orr r9, r9, r4, lsl #14
orr r8, r8, r5, lsl #14
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #23
lsls r9, r5, #23
orr r9, r9, r4, lsr #9
orr r8, r8, r5, lsr #9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #24]
ldr r5, [r0, #28]
#else
ldrd r4, r5, [r0, #24]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #24]
str r5, [r0, #28]
#else
strd r4, r5, [r0, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0]
ldr r5, [r0, #4]
#else
ldrd r4, r5, [r0]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #8]
ldr r7, [r0, #12]
#else
ldrd r6, r7, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #16]
ldr r9, [r0, #20]
#else
ldrd r8, r9, [r0, #16]
#endif
eor r6, r6, r8
eor r7, r7, r9
and r6, r6, r4
and r7, r7, r5
eor r6, r6, r8
eor r7, r7, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #24]
ldr r5, [r0, #28]
#else
ldrd r4, r5, [r0, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #96]
ldr r9, [sp, #100]
#else
ldrd r8, r9, [sp, #96]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r3, #96]
ldr r7, [r3, #100]
#else
ldrd r6, r7, [r3, #96]
#endif
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #56]
ldr r9, [r0, #60]
#else
ldrd r8, r9, [r0, #56]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #24]
str r5, [r0, #28]
#else
strd r4, r5, [r0, #24]
#endif
adds r8, r8, r4
adc r9, r9, r5
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #32]
ldr r5, [r0, #36]
#else
ldrd r4, r5, [r0, #32]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r0, #56]
str r9, [r0, #60]
#else
strd r8, r9, [r0, #56]
#endif
lsrs r6, r4, #28
lsrs r7, r5, #28
orr r7, r7, r4, lsl #4
orr r6, r6, r5, lsl #4
lsls r8, r4, #30
lsls r9, r5, #30
orr r9, r9, r4, lsr #2
orr r8, r8, r5, lsr #2
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #25
lsls r9, r5, #25
orr r9, r9, r4, lsr #7
orr r8, r8, r5, lsr #7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #24]
ldr r5, [r0, #28]
#else
ldrd r4, r5, [r0, #24]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #32]
ldr r9, [r0, #36]
#else
ldrd r8, r9, [r0, #32]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #40]
ldr r7, [r0, #44]
#else
ldrd r6, r7, [r0, #40]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #24]
str r5, [r0, #28]
#else
strd r4, r5, [r0, #24]
#endif
eor r8, r8, r6
eor r9, r9, r7
and r10, r10, r8
and r11, r11, r9
eor r10, r10, r6
eor r11, r11, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #24]
ldr r7, [r0, #28]
#else
ldrd r6, r7, [r0, #24]
#endif
adds r6, r6, r10
adc r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0, #24]
str r7, [r0, #28]
#else
strd r6, r7, [r0, #24]
#endif
mov r10, r8
mov r11, r9
# Round 13
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #56]
ldr r5, [r0, #60]
#else
ldrd r4, r5, [r0, #56]
#endif
lsrs r6, r4, #14
lsrs r7, r5, #14
orr r7, r7, r4, lsl #18
orr r6, r6, r5, lsl #18
lsrs r8, r4, #18
lsrs r9, r5, #18
orr r9, r9, r4, lsl #14
orr r8, r8, r5, lsl #14
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #23
lsls r9, r5, #23
orr r9, r9, r4, lsr #9
orr r8, r8, r5, lsr #9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #16]
ldr r5, [r0, #20]
#else
ldrd r4, r5, [r0, #16]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #16]
str r5, [r0, #20]
#else
strd r4, r5, [r0, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #56]
ldr r5, [r0, #60]
#else
ldrd r4, r5, [r0, #56]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0]
ldr r7, [r0, #4]
#else
ldrd r6, r7, [r0]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #8]
ldr r9, [r0, #12]
#else
ldrd r8, r9, [r0, #8]
#endif
eor r6, r6, r8
eor r7, r7, r9
and r6, r6, r4
and r7, r7, r5
eor r6, r6, r8
eor r7, r7, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #16]
ldr r5, [r0, #20]
#else
ldrd r4, r5, [r0, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #104]
ldr r9, [sp, #108]
#else
ldrd r8, r9, [sp, #104]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r3, #104]
ldr r7, [r3, #108]
#else
ldrd r6, r7, [r3, #104]
#endif
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #48]
ldr r9, [r0, #52]
#else
ldrd r8, r9, [r0, #48]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #16]
str r5, [r0, #20]
#else
strd r4, r5, [r0, #16]
#endif
adds r8, r8, r4
adc r9, r9, r5
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #24]
ldr r5, [r0, #28]
#else
ldrd r4, r5, [r0, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r0, #48]
str r9, [r0, #52]
#else
strd r8, r9, [r0, #48]
#endif
lsrs r6, r4, #28
lsrs r7, r5, #28
orr r7, r7, r4, lsl #4
orr r6, r6, r5, lsl #4
lsls r8, r4, #30
lsls r9, r5, #30
orr r9, r9, r4, lsr #2
orr r8, r8, r5, lsr #2
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #25
lsls r9, r5, #25
orr r9, r9, r4, lsr #7
orr r8, r8, r5, lsr #7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #16]
ldr r5, [r0, #20]
#else
ldrd r4, r5, [r0, #16]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #24]
ldr r9, [r0, #28]
#else
ldrd r8, r9, [r0, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #32]
ldr r7, [r0, #36]
#else
ldrd r6, r7, [r0, #32]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #16]
str r5, [r0, #20]
#else
strd r4, r5, [r0, #16]
#endif
eor r8, r8, r6
eor r9, r9, r7
and r10, r10, r8
and r11, r11, r9
eor r10, r10, r6
eor r11, r11, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #16]
ldr r7, [r0, #20]
#else
ldrd r6, r7, [r0, #16]
#endif
adds r6, r6, r10
adc r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0, #16]
str r7, [r0, #20]
#else
strd r6, r7, [r0, #16]
#endif
mov r10, r8
mov r11, r9
# Round 14
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #48]
ldr r5, [r0, #52]
#else
ldrd r4, r5, [r0, #48]
#endif
lsrs r6, r4, #14
lsrs r7, r5, #14
orr r7, r7, r4, lsl #18
orr r6, r6, r5, lsl #18
lsrs r8, r4, #18
lsrs r9, r5, #18
orr r9, r9, r4, lsl #14
orr r8, r8, r5, lsl #14
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #23
lsls r9, r5, #23
orr r9, r9, r4, lsr #9
orr r8, r8, r5, lsr #9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #8]
ldr r5, [r0, #12]
#else
ldrd r4, r5, [r0, #8]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #8]
str r5, [r0, #12]
#else
strd r4, r5, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #48]
ldr r5, [r0, #52]
#else
ldrd r4, r5, [r0, #48]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #56]
ldr r7, [r0, #60]
#else
ldrd r6, r7, [r0, #56]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0]
ldr r9, [r0, #4]
#else
ldrd r8, r9, [r0]
#endif
eor r6, r6, r8
eor r7, r7, r9
and r6, r6, r4
and r7, r7, r5
eor r6, r6, r8
eor r7, r7, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #8]
ldr r5, [r0, #12]
#else
ldrd r4, r5, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #112]
ldr r9, [sp, #116]
#else
ldrd r8, r9, [sp, #112]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r3, #112]
ldr r7, [r3, #116]
#else
ldrd r6, r7, [r3, #112]
#endif
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #40]
ldr r9, [r0, #44]
#else
ldrd r8, r9, [r0, #40]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #8]
str r5, [r0, #12]
#else
strd r4, r5, [r0, #8]
#endif
adds r8, r8, r4
adc r9, r9, r5
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #16]
ldr r5, [r0, #20]
#else
ldrd r4, r5, [r0, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r0, #40]
str r9, [r0, #44]
#else
strd r8, r9, [r0, #40]
#endif
lsrs r6, r4, #28
lsrs r7, r5, #28
orr r7, r7, r4, lsl #4
orr r6, r6, r5, lsl #4
lsls r8, r4, #30
lsls r9, r5, #30
orr r9, r9, r4, lsr #2
orr r8, r8, r5, lsr #2
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #25
lsls r9, r5, #25
orr r9, r9, r4, lsr #7
orr r8, r8, r5, lsr #7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #8]
ldr r5, [r0, #12]
#else
ldrd r4, r5, [r0, #8]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #16]
ldr r9, [r0, #20]
#else
ldrd r8, r9, [r0, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #24]
ldr r7, [r0, #28]
#else
ldrd r6, r7, [r0, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #8]
str r5, [r0, #12]
#else
strd r4, r5, [r0, #8]
#endif
eor r8, r8, r6
eor r9, r9, r7
and r10, r10, r8
and r11, r11, r9
eor r10, r10, r6
eor r11, r11, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #8]
ldr r7, [r0, #12]
#else
ldrd r6, r7, [r0, #8]
#endif
adds r6, r6, r10
adc r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0, #8]
str r7, [r0, #12]
#else
strd r6, r7, [r0, #8]
#endif
mov r10, r8
mov r11, r9
# Round 15
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #40]
ldr r5, [r0, #44]
#else
ldrd r4, r5, [r0, #40]
#endif
lsrs r6, r4, #14
lsrs r7, r5, #14
orr r7, r7, r4, lsl #18
orr r6, r6, r5, lsl #18
lsrs r8, r4, #18
lsrs r9, r5, #18
orr r9, r9, r4, lsl #14
orr r8, r8, r5, lsl #14
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #23
lsls r9, r5, #23
orr r9, r9, r4, lsr #9
orr r8, r8, r5, lsr #9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0]
ldr r5, [r0, #4]
#else
ldrd r4, r5, [r0]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0]
str r5, [r0, #4]
#else
strd r4, r5, [r0]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #40]
ldr r5, [r0, #44]
#else
ldrd r4, r5, [r0, #40]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #48]
ldr r7, [r0, #52]
#else
ldrd r6, r7, [r0, #48]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #56]
ldr r9, [r0, #60]
#else
ldrd r8, r9, [r0, #56]
#endif
eor r6, r6, r8
eor r7, r7, r9
and r6, r6, r4
and r7, r7, r5
eor r6, r6, r8
eor r7, r7, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0]
ldr r5, [r0, #4]
#else
ldrd r4, r5, [r0]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #120]
ldr r9, [sp, #124]
#else
ldrd r8, r9, [sp, #120]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r3, #120]
ldr r7, [r3, #124]
#else
ldrd r6, r7, [r3, #120]
#endif
adds r4, r4, r8
adc r5, r5, r9
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #32]
ldr r9, [r0, #36]
#else
ldrd r8, r9, [r0, #32]
#endif
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0]
str r5, [r0, #4]
#else
strd r4, r5, [r0]
#endif
adds r8, r8, r4
adc r9, r9, r5
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #8]
ldr r5, [r0, #12]
#else
ldrd r4, r5, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r0, #32]
str r9, [r0, #36]
#else
strd r8, r9, [r0, #32]
#endif
lsrs r6, r4, #28
lsrs r7, r5, #28
orr r7, r7, r4, lsl #4
orr r6, r6, r5, lsl #4
lsls r8, r4, #30
lsls r9, r5, #30
orr r9, r9, r4, lsr #2
orr r8, r8, r5, lsr #2
eor r6, r6, r8
eor r7, r7, r9
lsls r8, r4, #25
lsls r9, r5, #25
orr r9, r9, r4, lsr #7
orr r8, r8, r5, lsr #7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0]
ldr r5, [r0, #4]
#else
ldrd r4, r5, [r0]
#endif
eor r6, r6, r8
eor r7, r7, r9
adds r4, r4, r6
adc r5, r5, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #8]
ldr r9, [r0, #12]
#else
ldrd r8, r9, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #16]
ldr r7, [r0, #20]
#else
ldrd r6, r7, [r0, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0]
str r5, [r0, #4]
#else
strd r4, r5, [r0]
#endif
eor r8, r8, r6
eor r9, r9, r7
and r10, r10, r8
and r11, r11, r9
eor r10, r10, r6
eor r11, r11, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0]
ldr r7, [r0, #4]
#else
ldrd r6, r7, [r0]
#endif
adds r6, r6, r10
adc r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0]
str r7, [r0, #4]
#else
strd r6, r7, [r0]
#endif
mov r10, r8
mov r11, r9
# Add in digest from start
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0]
ldr r5, [r0, #4]
#else
ldrd r4, r5, [r0]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #8]
ldr r7, [r0, #12]
#else
ldrd r6, r7, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #128]
ldr r9, [sp, #132]
#else
ldrd r8, r9, [sp, #128]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [sp, #136]
ldr r11, [sp, #140]
#else
ldrd r10, r11, [sp, #136]
#endif
adds r4, r4, r8
adc r5, r5, r9
adds r6, r6, r10
adc r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0]
str r5, [r0, #4]
#else
strd r4, r5, [r0]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0, #8]
str r7, [r0, #12]
#else
strd r6, r7, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #128]
str r5, [sp, #132]
#else
strd r4, r5, [sp, #128]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [sp, #136]
str r7, [sp, #140]
#else
strd r6, r7, [sp, #136]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #16]
ldr r5, [r0, #20]
#else
ldrd r4, r5, [r0, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #24]
ldr r7, [r0, #28]
#else
ldrd r6, r7, [r0, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #144]
ldr r9, [sp, #148]
#else
ldrd r8, r9, [sp, #144]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [sp, #152]
ldr r11, [sp, #156]
#else
ldrd r10, r11, [sp, #152]
#endif
adds r4, r4, r8
adc r5, r5, r9
adds r6, r6, r10
adc r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #16]
str r5, [r0, #20]
#else
strd r4, r5, [r0, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0, #24]
str r7, [r0, #28]
#else
strd r6, r7, [r0, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #144]
str r5, [sp, #148]
#else
strd r4, r5, [sp, #144]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [sp, #152]
str r7, [sp, #156]
#else
strd r6, r7, [sp, #152]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #32]
ldr r5, [r0, #36]
#else
ldrd r4, r5, [r0, #32]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #40]
ldr r7, [r0, #44]
#else
ldrd r6, r7, [r0, #40]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #160]
ldr r9, [sp, #164]
#else
ldrd r8, r9, [sp, #160]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [sp, #168]
ldr r11, [sp, #172]
#else
ldrd r10, r11, [sp, #168]
#endif
adds r4, r4, r8
adc r5, r5, r9
adds r6, r6, r10
adc r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #32]
str r5, [r0, #36]
#else
strd r4, r5, [r0, #32]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0, #40]
str r7, [r0, #44]
#else
strd r6, r7, [r0, #40]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #160]
str r5, [sp, #164]
#else
strd r4, r5, [sp, #160]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [sp, #168]
str r7, [sp, #172]
#else
strd r6, r7, [sp, #168]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #48]
ldr r5, [r0, #52]
#else
ldrd r4, r5, [r0, #48]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #56]
ldr r7, [r0, #60]
#else
ldrd r6, r7, [r0, #56]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #176]
ldr r9, [sp, #180]
#else
ldrd r8, r9, [sp, #176]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [sp, #184]
ldr r11, [sp, #188]
#else
ldrd r10, r11, [sp, #184]
#endif
adds r4, r4, r8
adc r5, r5, r9
adds r6, r6, r10
adc r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #48]
str r5, [r0, #52]
#else
strd r4, r5, [r0, #48]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0, #56]
str r7, [r0, #60]
#else
strd r6, r7, [r0, #56]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #176]
str r5, [sp, #180]
#else
strd r4, r5, [sp, #176]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [sp, #184]
str r7, [sp, #188]
#else
strd r6, r7, [sp, #184]
#endif
subs r2, r2, #0x80
sub r3, r3, #0x200
add r1, r1, #0x80
bne L_SHA512_transform_len_begin
eor r0, r0, r0
add sp, sp, #0xc0
pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.size Transform_Sha512_Len,.-Transform_Sha512_Len
#endif /* WOLFSSL_ARMASM_NO_NEON */
#ifndef WOLFSSL_ARMASM_NO_NEON
.text
.type L_SHA512_transform_neon_len_k, %object
.size L_SHA512_transform_neon_len_k, 640
.align 4
L_SHA512_transform_neon_len_k:
.word 0xd728ae22
.word 0x428a2f98
.word 0x23ef65cd
.word 0x71374491
.word 0xec4d3b2f
.word 0xb5c0fbcf
.word 0x8189dbbc
.word 0xe9b5dba5
.word 0xf348b538
.word 0x3956c25b
.word 0xb605d019
.word 0x59f111f1
.word 0xaf194f9b
.word 0x923f82a4
.word 0xda6d8118
.word 0xab1c5ed5
.word 0xa3030242
.word 0xd807aa98
.word 0x45706fbe
.word 0x12835b01
.word 0x4ee4b28c
.word 0x243185be
.word 0xd5ffb4e2
.word 0x550c7dc3
.word 0xf27b896f
.word 0x72be5d74
.word 0x3b1696b1
.word 0x80deb1fe
.word 0x25c71235
.word 0x9bdc06a7
.word 0xcf692694
.word 0xc19bf174
.word 0x9ef14ad2
.word 0xe49b69c1
.word 0x384f25e3
.word 0xefbe4786
.word 0x8b8cd5b5
.word 0xfc19dc6
.word 0x77ac9c65
.word 0x240ca1cc
.word 0x592b0275
.word 0x2de92c6f
.word 0x6ea6e483
.word 0x4a7484aa
.word 0xbd41fbd4
.word 0x5cb0a9dc
.word 0x831153b5
.word 0x76f988da
.word 0xee66dfab
.word 0x983e5152
.word 0x2db43210
.word 0xa831c66d
.word 0x98fb213f
.word 0xb00327c8
.word 0xbeef0ee4
.word 0xbf597fc7
.word 0x3da88fc2
.word 0xc6e00bf3
.word 0x930aa725
.word 0xd5a79147
.word 0xe003826f
.word 0x6ca6351
.word 0xa0e6e70
.word 0x14292967
.word 0x46d22ffc
.word 0x27b70a85
.word 0x5c26c926
.word 0x2e1b2138
.word 0x5ac42aed
.word 0x4d2c6dfc
.word 0x9d95b3df
.word 0x53380d13
.word 0x8baf63de
.word 0x650a7354
.word 0x3c77b2a8
.word 0x766a0abb
.word 0x47edaee6
.word 0x81c2c92e
.word 0x1482353b
.word 0x92722c85
.word 0x4cf10364
.word 0xa2bfe8a1
.word 0xbc423001
.word 0xa81a664b
.word 0xd0f89791
.word 0xc24b8b70
.word 0x654be30
.word 0xc76c51a3
.word 0xd6ef5218
.word 0xd192e819
.word 0x5565a910
.word 0xd6990624
.word 0x5771202a
.word 0xf40e3585
.word 0x32bbd1b8
.word 0x106aa070
.word 0xb8d2d0c8
.word 0x19a4c116
.word 0x5141ab53
.word 0x1e376c08
.word 0xdf8eeb99
.word 0x2748774c
.word 0xe19b48a8
.word 0x34b0bcb5
.word 0xc5c95a63
.word 0x391c0cb3
.word 0xe3418acb
.word 0x4ed8aa4a
.word 0x7763e373
.word 0x5b9cca4f
.word 0xd6b2b8a3
.word 0x682e6ff3
.word 0x5defb2fc
.word 0x748f82ee
.word 0x43172f60
.word 0x78a5636f
.word 0xa1f0ab72
.word 0x84c87814
.word 0x1a6439ec
.word 0x8cc70208
.word 0x23631e28
.word 0x90befffa
.word 0xde82bde9
.word 0xa4506ceb
.word 0xb2c67915
.word 0xbef9a3f7
.word 0xe372532b
.word 0xc67178f2
.word 0xea26619c
.word 0xca273ece
.word 0x21c0c207
.word 0xd186b8c7
.word 0xcde0eb1e
.word 0xeada7dd6
.word 0xee6ed178
.word 0xf57d4f7f
.word 0x72176fba
.word 0x6f067aa
.word 0xa2c898a6
.word 0xa637dc5
.word 0xbef90dae
.word 0x113f9804
.word 0x131c471b
.word 0x1b710b35
.word 0x23047d84
.word 0x28db77f5
.word 0x40c72493
.word 0x32caab7b
.word 0x15c9bebc
.word 0x3c9ebe0a
.word 0x9c100d4c
.word 0x431d67c4
.word 0xcb3e42b6
.word 0x4cc5d4be
.word 0xfc657e2a
.word 0x597f299c
.word 0x3ad6faec
.word 0x5fcb6fab
.word 0x4a475817
.word 0x6c44198c
.text
.align 4
.fpu neon
.globl Transform_Sha512_Len
.type Transform_Sha512_Len, %function
Transform_Sha512_Len:
vpush {d8-d15}
adr r3, L_SHA512_transform_neon_len_k
# Load digest into working vars
vldm.64 r0, {d0-d7}
# Start of loop processing a block
L_SHA512_transform_neon_len_begin:
# Load W
vld1.8 {q8, q9}, [r1]!
vld1.8 {q10, q11}, [r1]!
vld1.8 {q12, q13}, [r1]!
vld1.8 {q14, q15}, [r1]!
#ifndef WOLFSSL_ARM_ARCH_NEON_64BIT
vrev64.8 q8, q8
vrev64.8 q9, q9
vrev64.8 q10, q10
vrev64.8 q11, q11
vrev64.8 q12, q12
vrev64.8 q13, q13
vrev64.8 q14, q14
vrev64.8 q15, q15
#else
vrev64.8 d16, d16
vrev64.8 d17, d17
vrev64.8 d18, d18
vrev64.8 d19, d19
vrev64.8 d20, d20
vrev64.8 d21, d21
vrev64.8 d22, d22
vrev64.8 d23, d23
vrev64.8 d24, d24
vrev64.8 d25, d25
vrev64.8 d26, d26
vrev64.8 d27, d27
vrev64.8 d28, d28
vrev64.8 d29, d29
vrev64.8 d30, d30
vrev64.8 d31, d31
#endif /* WOLFSSL_ARM_ARCH_NEON_64BIT */
mov r12, #4
# Start of 16 rounds
L_SHA512_transform_neon_len_start:
# Round 0
vld1.64 {d12}, [r3:64]!
vshl.u64 d8, d4, #50
vsri.u64 d8, d4, #14
vshl.u64 d9, d0, #36
vsri.u64 d9, d0, #28
vshl.u64 d10, d4, #46
vsri.u64 d10, d4, #18
vshl.u64 d11, d0, #30
vsri.u64 d11, d0, #34
veor d8, d10
veor d9, d11
vshl.u64 d10, d4, #23
vsri.u64 d10, d4, #41
vshl.u64 d11, d0, #25
vsri.u64 d11, d0, #39
veor d8, d10
veor d9, d11
vadd.i64 d7, d8
vadd.i64 d12, d16
vmov d8, d4
veor d10, d1, d2
vadd.i64 d7, d12
vbsl d8, d5, d6
vbsl d10, d0, d2
vadd.i64 d7, d8
vadd.i64 d10, d9
vadd.i64 d3, d7
vadd.i64 d7, d10
# Round 1
vld1.64 {d12}, [r3:64]!
vshl.u64 d8, d3, #50
vsri.u64 d8, d3, #14
vshl.u64 d9, d7, #36
vsri.u64 d9, d7, #28
vshl.u64 d10, d3, #46
vsri.u64 d10, d3, #18
vshl.u64 d11, d7, #30
vsri.u64 d11, d7, #34
veor d8, d10
veor d9, d11
vshl.u64 d10, d3, #23
vsri.u64 d10, d3, #41
vshl.u64 d11, d7, #25
vsri.u64 d11, d7, #39
veor d8, d10
veor d9, d11
vadd.i64 d6, d8
vadd.i64 d12, d17
vmov d8, d3
veor d10, d0, d1
vadd.i64 d6, d12
vbsl d8, d4, d5
vbsl d10, d7, d1
vadd.i64 d6, d8
vadd.i64 d10, d9
vadd.i64 d2, d6
vadd.i64 d6, d10
#ifndef WOLFSSL_ARM_ARCH_NEON_64BIT
# Calc new W[0]-W[1]
vext.8 q6, q8, q9, #8
vshl.u64 q4, q15, #45
vsri.u64 q4, q15, #19
vshl.u64 q5, q15, #3
vsri.u64 q5, q15, #61
veor q5, q4
vshr.u64 q4, q15, #6
veor q5, q4
vadd.i64 q8, q5
vext.8 q7, q12, q13, #8
vadd.i64 q8, q7
vshl.u64 q4, q6, #63
vsri.u64 q4, q6, #1
vshl.u64 q5, q6, #56
vsri.u64 q5, q6, #8
veor q5, q4
vshr.u64 q6, #7
veor q5, q6
vadd.i64 q8, q5
#else
# Calc new W[0]-W[1]
vmov d12, d17
vmov d13, d18
vshl.u64 d8, d30, #45
vshl.u64 d9, d31, #45
vsri.u64 d8, d30, #19
vsri.u64 d9, d31, #19
vshl.u64 d10, d30, #3
vshl.u64 d11, d31, #3
vsri.u64 d10, d30, #61
vsri.u64 d11, d31, #61
veor d10, d8
veor d11, d9
vshr.u64 d8, d30, #6
vshr.u64 d9, d31, #6
veor d10, d8
veor d11, d9
vadd.i64 d16, d10
vadd.i64 d17, d11
vmov d14, d25
vmov d15, d26
vadd.i64 d16, d14
vadd.i64 d17, d15
vshl.u64 d8, d12, #63
vshl.u64 d9, d13, #63
vsri.u64 d8, d12, #1
vsri.u64 d9, d13, #1
vshl.u64 d10, d12, #56
vshl.u64 d11, d13, #56
vsri.u64 d10, d12, #8
vsri.u64 d11, d13, #8
veor d10, d8
veor d11, d9
vshr.u64 d12, #7
vshr.u64 d13, #7
veor d10, d12
veor d11, d13
vadd.i64 d16, d10
vadd.i64 d17, d11
#endif /* WOLFSSL_ARM_ARCH_NEON_64BIT */
# Round 2
vld1.64 {d12}, [r3:64]!
vshl.u64 d8, d2, #50
vsri.u64 d8, d2, #14
vshl.u64 d9, d6, #36
vsri.u64 d9, d6, #28
vshl.u64 d10, d2, #46
vsri.u64 d10, d2, #18
vshl.u64 d11, d6, #30
vsri.u64 d11, d6, #34
veor d8, d10
veor d9, d11
vshl.u64 d10, d2, #23
vsri.u64 d10, d2, #41
vshl.u64 d11, d6, #25
vsri.u64 d11, d6, #39
veor d8, d10
veor d9, d11
vadd.i64 d5, d8
vadd.i64 d12, d18
vmov d8, d2
veor d10, d7, d0
vadd.i64 d5, d12
vbsl d8, d3, d4
vbsl d10, d6, d0
vadd.i64 d5, d8
vadd.i64 d10, d9
vadd.i64 d1, d5
vadd.i64 d5, d10
# Round 3
vld1.64 {d12}, [r3:64]!
vshl.u64 d8, d1, #50
vsri.u64 d8, d1, #14
vshl.u64 d9, d5, #36
vsri.u64 d9, d5, #28
vshl.u64 d10, d1, #46
vsri.u64 d10, d1, #18
vshl.u64 d11, d5, #30
vsri.u64 d11, d5, #34
veor d8, d10
veor d9, d11
vshl.u64 d10, d1, #23
vsri.u64 d10, d1, #41
vshl.u64 d11, d5, #25
vsri.u64 d11, d5, #39
veor d8, d10
veor d9, d11
vadd.i64 d4, d8
vadd.i64 d12, d19
vmov d8, d1
veor d10, d6, d7
vadd.i64 d4, d12
vbsl d8, d2, d3
vbsl d10, d5, d7
vadd.i64 d4, d8
vadd.i64 d10, d9
vadd.i64 d0, d4
vadd.i64 d4, d10
#ifndef WOLFSSL_ARM_ARCH_NEON_64BIT
# Calc new W[2]-W[3]
vext.8 q6, q9, q10, #8
vshl.u64 q4, q8, #45
vsri.u64 q4, q8, #19
vshl.u64 q5, q8, #3
vsri.u64 q5, q8, #61
veor q5, q4
vshr.u64 q4, q8, #6
veor q5, q4
vadd.i64 q9, q5
vext.8 q7, q13, q14, #8
vadd.i64 q9, q7
vshl.u64 q4, q6, #63
vsri.u64 q4, q6, #1
vshl.u64 q5, q6, #56
vsri.u64 q5, q6, #8
veor q5, q4
vshr.u64 q6, #7
veor q5, q6
vadd.i64 q9, q5
#else
# Calc new W[2]-W[3]
vmov d12, d19
vmov d13, d20
vshl.u64 d8, d16, #45
vshl.u64 d9, d17, #45
vsri.u64 d8, d16, #19
vsri.u64 d9, d17, #19
vshl.u64 d10, d16, #3
vshl.u64 d11, d17, #3
vsri.u64 d10, d16, #61
vsri.u64 d11, d17, #61
veor d10, d8
veor d11, d9
vshr.u64 d8, d16, #6
vshr.u64 d9, d17, #6
veor d10, d8
veor d11, d9
vadd.i64 d18, d10
vadd.i64 d19, d11
vmov d14, d27
vmov d15, d28
vadd.i64 d18, d14
vadd.i64 d19, d15
vshl.u64 d8, d12, #63
vshl.u64 d9, d13, #63
vsri.u64 d8, d12, #1
vsri.u64 d9, d13, #1
vshl.u64 d10, d12, #56
vshl.u64 d11, d13, #56
vsri.u64 d10, d12, #8
vsri.u64 d11, d13, #8
veor d10, d8
veor d11, d9
vshr.u64 d12, #7
vshr.u64 d13, #7
veor d10, d12
veor d11, d13
vadd.i64 d18, d10
vadd.i64 d19, d11
#endif /* WOLFSSL_ARM_ARCH_NEON_64BIT */
# Round 4
vld1.64 {d12}, [r3:64]!
vshl.u64 d8, d0, #50
vsri.u64 d8, d0, #14
vshl.u64 d9, d4, #36
vsri.u64 d9, d4, #28
vshl.u64 d10, d0, #46
vsri.u64 d10, d0, #18
vshl.u64 d11, d4, #30
vsri.u64 d11, d4, #34
veor d8, d10
veor d9, d11
vshl.u64 d10, d0, #23
vsri.u64 d10, d0, #41
vshl.u64 d11, d4, #25
vsri.u64 d11, d4, #39
veor d8, d10
veor d9, d11
vadd.i64 d3, d8
vadd.i64 d12, d20
vmov d8, d0
veor d10, d5, d6
vadd.i64 d3, d12
vbsl d8, d1, d2
vbsl d10, d4, d6
vadd.i64 d3, d8
vadd.i64 d10, d9
vadd.i64 d7, d3
vadd.i64 d3, d10
# Round 5
vld1.64 {d12}, [r3:64]!
vshl.u64 d8, d7, #50
vsri.u64 d8, d7, #14
vshl.u64 d9, d3, #36
vsri.u64 d9, d3, #28
vshl.u64 d10, d7, #46
vsri.u64 d10, d7, #18
vshl.u64 d11, d3, #30
vsri.u64 d11, d3, #34
veor d8, d10
veor d9, d11
vshl.u64 d10, d7, #23
vsri.u64 d10, d7, #41
vshl.u64 d11, d3, #25
vsri.u64 d11, d3, #39
veor d8, d10
veor d9, d11
vadd.i64 d2, d8
vadd.i64 d12, d21
vmov d8, d7
veor d10, d4, d5
vadd.i64 d2, d12
vbsl d8, d0, d1
vbsl d10, d3, d5
vadd.i64 d2, d8
vadd.i64 d10, d9
vadd.i64 d6, d2
vadd.i64 d2, d10
#ifndef WOLFSSL_ARM_ARCH_NEON_64BIT
# Calc new W[4]-W[5]
vext.8 q6, q10, q11, #8
vshl.u64 q4, q9, #45
vsri.u64 q4, q9, #19
vshl.u64 q5, q9, #3
vsri.u64 q5, q9, #61
veor q5, q4
vshr.u64 q4, q9, #6
veor q5, q4
vadd.i64 q10, q5
vext.8 q7, q14, q15, #8
vadd.i64 q10, q7
vshl.u64 q4, q6, #63
vsri.u64 q4, q6, #1
vshl.u64 q5, q6, #56
vsri.u64 q5, q6, #8
veor q5, q4
vshr.u64 q6, #7
veor q5, q6
vadd.i64 q10, q5
#else
# Calc new W[4]-W[5]
vmov d12, d21
vmov d13, d22
vshl.u64 d8, d18, #45
vshl.u64 d9, d19, #45
vsri.u64 d8, d18, #19
vsri.u64 d9, d19, #19
vshl.u64 d10, d18, #3
vshl.u64 d11, d19, #3
vsri.u64 d10, d18, #61
vsri.u64 d11, d19, #61
veor d10, d8
veor d11, d9
vshr.u64 d8, d18, #6
vshr.u64 d9, d19, #6
veor d10, d8
veor d11, d9
vadd.i64 d20, d10
vadd.i64 d21, d11
vmov d14, d29
vmov d15, d30
vadd.i64 d20, d14
vadd.i64 d21, d15
vshl.u64 d8, d12, #63
vshl.u64 d9, d13, #63
vsri.u64 d8, d12, #1
vsri.u64 d9, d13, #1
vshl.u64 d10, d12, #56
vshl.u64 d11, d13, #56
vsri.u64 d10, d12, #8
vsri.u64 d11, d13, #8
veor d10, d8
veor d11, d9
vshr.u64 d12, #7
vshr.u64 d13, #7
veor d10, d12
veor d11, d13
vadd.i64 d20, d10
vadd.i64 d21, d11
#endif /* WOLFSSL_ARM_ARCH_NEON_64BIT */
# Round 6
vld1.64 {d12}, [r3:64]!
vshl.u64 d8, d6, #50
vsri.u64 d8, d6, #14
vshl.u64 d9, d2, #36
vsri.u64 d9, d2, #28
vshl.u64 d10, d6, #46
vsri.u64 d10, d6, #18
vshl.u64 d11, d2, #30
vsri.u64 d11, d2, #34
veor d8, d10
veor d9, d11
vshl.u64 d10, d6, #23
vsri.u64 d10, d6, #41
vshl.u64 d11, d2, #25
vsri.u64 d11, d2, #39
veor d8, d10
veor d9, d11
vadd.i64 d1, d8
vadd.i64 d12, d22
vmov d8, d6
veor d10, d3, d4
vadd.i64 d1, d12
vbsl d8, d7, d0
vbsl d10, d2, d4
vadd.i64 d1, d8
vadd.i64 d10, d9
vadd.i64 d5, d1
vadd.i64 d1, d10
# Round 7
vld1.64 {d12}, [r3:64]!
vshl.u64 d8, d5, #50
vsri.u64 d8, d5, #14
vshl.u64 d9, d1, #36
vsri.u64 d9, d1, #28
vshl.u64 d10, d5, #46
vsri.u64 d10, d5, #18
vshl.u64 d11, d1, #30
vsri.u64 d11, d1, #34
veor d8, d10
veor d9, d11
vshl.u64 d10, d5, #23
vsri.u64 d10, d5, #41
vshl.u64 d11, d1, #25
vsri.u64 d11, d1, #39
veor d8, d10
veor d9, d11
vadd.i64 d0, d8
vadd.i64 d12, d23
vmov d8, d5
veor d10, d2, d3
vadd.i64 d0, d12
vbsl d8, d6, d7
vbsl d10, d1, d3
vadd.i64 d0, d8
vadd.i64 d10, d9
vadd.i64 d4, d0
vadd.i64 d0, d10
#ifndef WOLFSSL_ARM_ARCH_NEON_64BIT
# Calc new W[6]-W[7]
vext.8 q6, q11, q12, #8
vshl.u64 q4, q10, #45
vsri.u64 q4, q10, #19
vshl.u64 q5, q10, #3
vsri.u64 q5, q10, #61
veor q5, q4
vshr.u64 q4, q10, #6
veor q5, q4
vadd.i64 q11, q5
vext.8 q7, q15, q8, #8
vadd.i64 q11, q7
vshl.u64 q4, q6, #63
vsri.u64 q4, q6, #1
vshl.u64 q5, q6, #56
vsri.u64 q5, q6, #8
veor q5, q4
vshr.u64 q6, #7
veor q5, q6
vadd.i64 q11, q5
#else
# Calc new W[6]-W[7]
vmov d12, d23
vmov d13, d24
vshl.u64 d8, d20, #45
vshl.u64 d9, d21, #45
vsri.u64 d8, d20, #19
vsri.u64 d9, d21, #19
vshl.u64 d10, d20, #3
vshl.u64 d11, d21, #3
vsri.u64 d10, d20, #61
vsri.u64 d11, d21, #61
veor d10, d8
veor d11, d9
vshr.u64 d8, d20, #6
vshr.u64 d9, d21, #6
veor d10, d8
veor d11, d9
vadd.i64 d22, d10
vadd.i64 d23, d11
vmov d14, d31
vmov d15, d16
vadd.i64 d22, d14
vadd.i64 d23, d15
vshl.u64 d8, d12, #63
vshl.u64 d9, d13, #63
vsri.u64 d8, d12, #1
vsri.u64 d9, d13, #1
vshl.u64 d10, d12, #56
vshl.u64 d11, d13, #56
vsri.u64 d10, d12, #8
vsri.u64 d11, d13, #8
veor d10, d8
veor d11, d9
vshr.u64 d12, #7
vshr.u64 d13, #7
veor d10, d12
veor d11, d13
vadd.i64 d22, d10
vadd.i64 d23, d11
#endif /* WOLFSSL_ARM_ARCH_NEON_64BIT */
# Round 8
vld1.64 {d12}, [r3:64]!
vshl.u64 d8, d4, #50
vsri.u64 d8, d4, #14
vshl.u64 d9, d0, #36
vsri.u64 d9, d0, #28
vshl.u64 d10, d4, #46
vsri.u64 d10, d4, #18
vshl.u64 d11, d0, #30
vsri.u64 d11, d0, #34
veor d8, d10
veor d9, d11
vshl.u64 d10, d4, #23
vsri.u64 d10, d4, #41
vshl.u64 d11, d0, #25
vsri.u64 d11, d0, #39
veor d8, d10
veor d9, d11
vadd.i64 d7, d8
vadd.i64 d12, d24
vmov d8, d4
veor d10, d1, d2
vadd.i64 d7, d12
vbsl d8, d5, d6
vbsl d10, d0, d2
vadd.i64 d7, d8
vadd.i64 d10, d9
vadd.i64 d3, d7
vadd.i64 d7, d10
# Round 9
vld1.64 {d12}, [r3:64]!
vshl.u64 d8, d3, #50
vsri.u64 d8, d3, #14
vshl.u64 d9, d7, #36
vsri.u64 d9, d7, #28
vshl.u64 d10, d3, #46
vsri.u64 d10, d3, #18
vshl.u64 d11, d7, #30
vsri.u64 d11, d7, #34
veor d8, d10
veor d9, d11
vshl.u64 d10, d3, #23
vsri.u64 d10, d3, #41
vshl.u64 d11, d7, #25
vsri.u64 d11, d7, #39
veor d8, d10
veor d9, d11
vadd.i64 d6, d8
vadd.i64 d12, d25
vmov d8, d3
veor d10, d0, d1
vadd.i64 d6, d12
vbsl d8, d4, d5
vbsl d10, d7, d1
vadd.i64 d6, d8
vadd.i64 d10, d9
vadd.i64 d2, d6
vadd.i64 d6, d10
#ifndef WOLFSSL_ARM_ARCH_NEON_64BIT
# Calc new W[8]-W[9]
vext.8 q6, q12, q13, #8
vshl.u64 q4, q11, #45
vsri.u64 q4, q11, #19
vshl.u64 q5, q11, #3
vsri.u64 q5, q11, #61
veor q5, q4
vshr.u64 q4, q11, #6
veor q5, q4
vadd.i64 q12, q5
vext.8 q7, q8, q9, #8
vadd.i64 q12, q7
vshl.u64 q4, q6, #63
vsri.u64 q4, q6, #1
vshl.u64 q5, q6, #56
vsri.u64 q5, q6, #8
veor q5, q4
vshr.u64 q6, #7
veor q5, q6
vadd.i64 q12, q5
#else
# Calc new W[8]-W[9]
vmov d12, d25
vmov d13, d26
vshl.u64 d8, d22, #45
vshl.u64 d9, d23, #45
vsri.u64 d8, d22, #19
vsri.u64 d9, d23, #19
vshl.u64 d10, d22, #3
vshl.u64 d11, d23, #3
vsri.u64 d10, d22, #61
vsri.u64 d11, d23, #61
veor d10, d8
veor d11, d9
vshr.u64 d8, d22, #6
vshr.u64 d9, d23, #6
veor d10, d8
veor d11, d9
vadd.i64 d24, d10
vadd.i64 d25, d11
vmov d14, d17
vmov d15, d18
vadd.i64 d24, d14
vadd.i64 d25, d15
vshl.u64 d8, d12, #63
vshl.u64 d9, d13, #63
vsri.u64 d8, d12, #1
vsri.u64 d9, d13, #1
vshl.u64 d10, d12, #56
vshl.u64 d11, d13, #56
vsri.u64 d10, d12, #8
vsri.u64 d11, d13, #8
veor d10, d8
veor d11, d9
vshr.u64 d12, #7
vshr.u64 d13, #7
veor d10, d12
veor d11, d13
vadd.i64 d24, d10
vadd.i64 d25, d11
#endif /* WOLFSSL_ARM_ARCH_NEON_64BIT */
# Round 10
vld1.64 {d12}, [r3:64]!
vshl.u64 d8, d2, #50
vsri.u64 d8, d2, #14
vshl.u64 d9, d6, #36
vsri.u64 d9, d6, #28
vshl.u64 d10, d2, #46
vsri.u64 d10, d2, #18
vshl.u64 d11, d6, #30
vsri.u64 d11, d6, #34
veor d8, d10
veor d9, d11
vshl.u64 d10, d2, #23
vsri.u64 d10, d2, #41
vshl.u64 d11, d6, #25
vsri.u64 d11, d6, #39
veor d8, d10
veor d9, d11
vadd.i64 d5, d8
vadd.i64 d12, d26
vmov d8, d2
veor d10, d7, d0
vadd.i64 d5, d12
vbsl d8, d3, d4
vbsl d10, d6, d0
vadd.i64 d5, d8
vadd.i64 d10, d9
vadd.i64 d1, d5
vadd.i64 d5, d10
# Round 11
vld1.64 {d12}, [r3:64]!
vshl.u64 d8, d1, #50
vsri.u64 d8, d1, #14
vshl.u64 d9, d5, #36
vsri.u64 d9, d5, #28
vshl.u64 d10, d1, #46
vsri.u64 d10, d1, #18
vshl.u64 d11, d5, #30
vsri.u64 d11, d5, #34
veor d8, d10
veor d9, d11
vshl.u64 d10, d1, #23
vsri.u64 d10, d1, #41
vshl.u64 d11, d5, #25
vsri.u64 d11, d5, #39
veor d8, d10
veor d9, d11
vadd.i64 d4, d8
vadd.i64 d12, d27
vmov d8, d1
veor d10, d6, d7
vadd.i64 d4, d12
vbsl d8, d2, d3
vbsl d10, d5, d7
vadd.i64 d4, d8
vadd.i64 d10, d9
vadd.i64 d0, d4
vadd.i64 d4, d10
#ifndef WOLFSSL_ARM_ARCH_NEON_64BIT
# Calc new W[10]-W[11]
vext.8 q6, q13, q14, #8
vshl.u64 q4, q12, #45
vsri.u64 q4, q12, #19
vshl.u64 q5, q12, #3
vsri.u64 q5, q12, #61
veor q5, q4
vshr.u64 q4, q12, #6
veor q5, q4
vadd.i64 q13, q5
vext.8 q7, q9, q10, #8
vadd.i64 q13, q7
vshl.u64 q4, q6, #63
vsri.u64 q4, q6, #1
vshl.u64 q5, q6, #56
vsri.u64 q5, q6, #8
veor q5, q4
vshr.u64 q6, #7
veor q5, q6
vadd.i64 q13, q5
#else
# Calc new W[10]-W[11]
vmov d12, d27
vmov d13, d28
vshl.u64 d8, d24, #45
vshl.u64 d9, d25, #45
vsri.u64 d8, d24, #19
vsri.u64 d9, d25, #19
vshl.u64 d10, d24, #3
vshl.u64 d11, d25, #3
vsri.u64 d10, d24, #61
vsri.u64 d11, d25, #61
veor d10, d8
veor d11, d9
vshr.u64 d8, d24, #6
vshr.u64 d9, d25, #6
veor d10, d8
veor d11, d9
vadd.i64 d26, d10
vadd.i64 d27, d11
vmov d14, d19
vmov d15, d20
vadd.i64 d26, d14
vadd.i64 d27, d15
vshl.u64 d8, d12, #63
vshl.u64 d9, d13, #63
vsri.u64 d8, d12, #1
vsri.u64 d9, d13, #1
vshl.u64 d10, d12, #56
vshl.u64 d11, d13, #56
vsri.u64 d10, d12, #8
vsri.u64 d11, d13, #8
veor d10, d8
veor d11, d9
vshr.u64 d12, #7
vshr.u64 d13, #7
veor d10, d12
veor d11, d13
vadd.i64 d26, d10
vadd.i64 d27, d11
#endif /* WOLFSSL_ARM_ARCH_NEON_64BIT */
# Round 12
vld1.64 {d12}, [r3:64]!
vshl.u64 d8, d0, #50
vsri.u64 d8, d0, #14
vshl.u64 d9, d4, #36
vsri.u64 d9, d4, #28
vshl.u64 d10, d0, #46
vsri.u64 d10, d0, #18
vshl.u64 d11, d4, #30
vsri.u64 d11, d4, #34
veor d8, d10
veor d9, d11
vshl.u64 d10, d0, #23
vsri.u64 d10, d0, #41
vshl.u64 d11, d4, #25
vsri.u64 d11, d4, #39
veor d8, d10
veor d9, d11
vadd.i64 d3, d8
vadd.i64 d12, d28
vmov d8, d0
veor d10, d5, d6
vadd.i64 d3, d12
vbsl d8, d1, d2
vbsl d10, d4, d6
vadd.i64 d3, d8
vadd.i64 d10, d9
vadd.i64 d7, d3
vadd.i64 d3, d10
# Round 13
vld1.64 {d12}, [r3:64]!
vshl.u64 d8, d7, #50
vsri.u64 d8, d7, #14
vshl.u64 d9, d3, #36
vsri.u64 d9, d3, #28
vshl.u64 d10, d7, #46
vsri.u64 d10, d7, #18
vshl.u64 d11, d3, #30
vsri.u64 d11, d3, #34
veor d8, d10
veor d9, d11
vshl.u64 d10, d7, #23
vsri.u64 d10, d7, #41
vshl.u64 d11, d3, #25
vsri.u64 d11, d3, #39
veor d8, d10
veor d9, d11
vadd.i64 d2, d8
vadd.i64 d12, d29
vmov d8, d7
veor d10, d4, d5
vadd.i64 d2, d12
vbsl d8, d0, d1
vbsl d10, d3, d5
vadd.i64 d2, d8
vadd.i64 d10, d9
vadd.i64 d6, d2
vadd.i64 d2, d10
#ifndef WOLFSSL_ARM_ARCH_NEON_64BIT
# Calc new W[12]-W[13]
vext.8 q6, q14, q15, #8
vshl.u64 q4, q13, #45
vsri.u64 q4, q13, #19
vshl.u64 q5, q13, #3
vsri.u64 q5, q13, #61
veor q5, q4
vshr.u64 q4, q13, #6
veor q5, q4
vadd.i64 q14, q5
vext.8 q7, q10, q11, #8
vadd.i64 q14, q7
vshl.u64 q4, q6, #63
vsri.u64 q4, q6, #1
vshl.u64 q5, q6, #56
vsri.u64 q5, q6, #8
veor q5, q4
vshr.u64 q6, #7
veor q5, q6
vadd.i64 q14, q5
#else
# Calc new W[12]-W[13]
vmov d12, d29
vmov d13, d30
vshl.u64 d8, d26, #45
vshl.u64 d9, d27, #45
vsri.u64 d8, d26, #19
vsri.u64 d9, d27, #19
vshl.u64 d10, d26, #3
vshl.u64 d11, d27, #3
vsri.u64 d10, d26, #61
vsri.u64 d11, d27, #61
veor d10, d8
veor d11, d9
vshr.u64 d8, d26, #6
vshr.u64 d9, d27, #6
veor d10, d8
veor d11, d9
vadd.i64 d28, d10
vadd.i64 d29, d11
vmov d14, d21
vmov d15, d22
vadd.i64 d28, d14
vadd.i64 d29, d15
vshl.u64 d8, d12, #63
vshl.u64 d9, d13, #63
vsri.u64 d8, d12, #1
vsri.u64 d9, d13, #1
vshl.u64 d10, d12, #56
vshl.u64 d11, d13, #56
vsri.u64 d10, d12, #8
vsri.u64 d11, d13, #8
veor d10, d8
veor d11, d9
vshr.u64 d12, #7
vshr.u64 d13, #7
veor d10, d12
veor d11, d13
vadd.i64 d28, d10
vadd.i64 d29, d11
#endif /* WOLFSSL_ARM_ARCH_NEON_64BIT */
# Round 14
vld1.64 {d12}, [r3:64]!
vshl.u64 d8, d6, #50
vsri.u64 d8, d6, #14
vshl.u64 d9, d2, #36
vsri.u64 d9, d2, #28
vshl.u64 d10, d6, #46
vsri.u64 d10, d6, #18
vshl.u64 d11, d2, #30
vsri.u64 d11, d2, #34
veor d8, d10
veor d9, d11
vshl.u64 d10, d6, #23
vsri.u64 d10, d6, #41
vshl.u64 d11, d2, #25
vsri.u64 d11, d2, #39
veor d8, d10
veor d9, d11
vadd.i64 d1, d8
vadd.i64 d12, d30
vmov d8, d6
veor d10, d3, d4
vadd.i64 d1, d12
vbsl d8, d7, d0
vbsl d10, d2, d4
vadd.i64 d1, d8
vadd.i64 d10, d9
vadd.i64 d5, d1
vadd.i64 d1, d10
# Round 15
vld1.64 {d12}, [r3:64]!
vshl.u64 d8, d5, #50
vsri.u64 d8, d5, #14
vshl.u64 d9, d1, #36
vsri.u64 d9, d1, #28
vshl.u64 d10, d5, #46
vsri.u64 d10, d5, #18
vshl.u64 d11, d1, #30
vsri.u64 d11, d1, #34
veor d8, d10
veor d9, d11
vshl.u64 d10, d5, #23
vsri.u64 d10, d5, #41
vshl.u64 d11, d1, #25
vsri.u64 d11, d1, #39
veor d8, d10
veor d9, d11
vadd.i64 d0, d8
vadd.i64 d12, d31
vmov d8, d5
veor d10, d2, d3
vadd.i64 d0, d12
vbsl d8, d6, d7
vbsl d10, d1, d3
vadd.i64 d0, d8
vadd.i64 d10, d9
vadd.i64 d4, d0
vadd.i64 d0, d10
#ifndef WOLFSSL_ARM_ARCH_NEON_64BIT
# Calc new W[14]-W[15]
vext.8 q6, q15, q8, #8
vshl.u64 q4, q14, #45
vsri.u64 q4, q14, #19
vshl.u64 q5, q14, #3
vsri.u64 q5, q14, #61
veor q5, q4
vshr.u64 q4, q14, #6
veor q5, q4
vadd.i64 q15, q5
vext.8 q7, q11, q12, #8
vadd.i64 q15, q7
vshl.u64 q4, q6, #63
vsri.u64 q4, q6, #1
vshl.u64 q5, q6, #56
vsri.u64 q5, q6, #8
veor q5, q4
vshr.u64 q6, #7
veor q5, q6
vadd.i64 q15, q5
#else
# Calc new W[14]-W[15]
vmov d12, d31
vmov d13, d16
vshl.u64 d8, d28, #45
vshl.u64 d9, d29, #45
vsri.u64 d8, d28, #19
vsri.u64 d9, d29, #19
vshl.u64 d10, d28, #3
vshl.u64 d11, d29, #3
vsri.u64 d10, d28, #61
vsri.u64 d11, d29, #61
veor d10, d8
veor d11, d9
vshr.u64 d8, d28, #6
vshr.u64 d9, d29, #6
veor d10, d8
veor d11, d9
vadd.i64 d30, d10
vadd.i64 d31, d11
vmov d14, d23
vmov d15, d24
vadd.i64 d30, d14
vadd.i64 d31, d15
vshl.u64 d8, d12, #63
vshl.u64 d9, d13, #63
vsri.u64 d8, d12, #1
vsri.u64 d9, d13, #1
vshl.u64 d10, d12, #56
vshl.u64 d11, d13, #56
vsri.u64 d10, d12, #8
vsri.u64 d11, d13, #8
veor d10, d8
veor d11, d9
vshr.u64 d12, #7
vshr.u64 d13, #7
veor d10, d12
veor d11, d13
vadd.i64 d30, d10
vadd.i64 d31, d11
#endif /* WOLFSSL_ARM_ARCH_NEON_64BIT */
subs r12, r12, #1
bne L_SHA512_transform_neon_len_start
# Round 0
vld1.64 {d12}, [r3:64]!
vshl.u64 d8, d4, #50
vsri.u64 d8, d4, #14
vshl.u64 d9, d0, #36
vsri.u64 d9, d0, #28
vshl.u64 d10, d4, #46
vsri.u64 d10, d4, #18
vshl.u64 d11, d0, #30
vsri.u64 d11, d0, #34
veor d8, d10
veor d9, d11
vshl.u64 d10, d4, #23
vsri.u64 d10, d4, #41
vshl.u64 d11, d0, #25
vsri.u64 d11, d0, #39
veor d8, d10
veor d9, d11
vadd.i64 d7, d8
vadd.i64 d12, d16
vmov d8, d4
veor d10, d1, d2
vadd.i64 d7, d12
vbsl d8, d5, d6
vbsl d10, d0, d2
vadd.i64 d7, d8
vadd.i64 d10, d9
vadd.i64 d3, d7
vadd.i64 d7, d10
# Round 1
vld1.64 {d12}, [r3:64]!
vshl.u64 d8, d3, #50
vsri.u64 d8, d3, #14
vshl.u64 d9, d7, #36
vsri.u64 d9, d7, #28
vshl.u64 d10, d3, #46
vsri.u64 d10, d3, #18
vshl.u64 d11, d7, #30
vsri.u64 d11, d7, #34
veor d8, d10
veor d9, d11
vshl.u64 d10, d3, #23
vsri.u64 d10, d3, #41
vshl.u64 d11, d7, #25
vsri.u64 d11, d7, #39
veor d8, d10
veor d9, d11
vadd.i64 d6, d8
vadd.i64 d12, d17
vmov d8, d3
veor d10, d0, d1
vadd.i64 d6, d12
vbsl d8, d4, d5
vbsl d10, d7, d1
vadd.i64 d6, d8
vadd.i64 d10, d9
vadd.i64 d2, d6
vadd.i64 d6, d10
# Round 2
vld1.64 {d12}, [r3:64]!
vshl.u64 d8, d2, #50
vsri.u64 d8, d2, #14
vshl.u64 d9, d6, #36
vsri.u64 d9, d6, #28
vshl.u64 d10, d2, #46
vsri.u64 d10, d2, #18
vshl.u64 d11, d6, #30
vsri.u64 d11, d6, #34
veor d8, d10
veor d9, d11
vshl.u64 d10, d2, #23
vsri.u64 d10, d2, #41
vshl.u64 d11, d6, #25
vsri.u64 d11, d6, #39
veor d8, d10
veor d9, d11
vadd.i64 d5, d8
vadd.i64 d12, d18
vmov d8, d2
veor d10, d7, d0
vadd.i64 d5, d12
vbsl d8, d3, d4
vbsl d10, d6, d0
vadd.i64 d5, d8
vadd.i64 d10, d9
vadd.i64 d1, d5
vadd.i64 d5, d10
# Round 3
vld1.64 {d12}, [r3:64]!
vshl.u64 d8, d1, #50
vsri.u64 d8, d1, #14
vshl.u64 d9, d5, #36
vsri.u64 d9, d5, #28
vshl.u64 d10, d1, #46
vsri.u64 d10, d1, #18
vshl.u64 d11, d5, #30
vsri.u64 d11, d5, #34
veor d8, d10
veor d9, d11
vshl.u64 d10, d1, #23
vsri.u64 d10, d1, #41
vshl.u64 d11, d5, #25
vsri.u64 d11, d5, #39
veor d8, d10
veor d9, d11
vadd.i64 d4, d8
vadd.i64 d12, d19
vmov d8, d1
veor d10, d6, d7
vadd.i64 d4, d12
vbsl d8, d2, d3
vbsl d10, d5, d7
vadd.i64 d4, d8
vadd.i64 d10, d9
vadd.i64 d0, d4
vadd.i64 d4, d10
# Round 4
vld1.64 {d12}, [r3:64]!
vshl.u64 d8, d0, #50
vsri.u64 d8, d0, #14
vshl.u64 d9, d4, #36
vsri.u64 d9, d4, #28
vshl.u64 d10, d0, #46
vsri.u64 d10, d0, #18
vshl.u64 d11, d4, #30
vsri.u64 d11, d4, #34
veor d8, d10
veor d9, d11
vshl.u64 d10, d0, #23
vsri.u64 d10, d0, #41
vshl.u64 d11, d4, #25
vsri.u64 d11, d4, #39
veor d8, d10
veor d9, d11
vadd.i64 d3, d8
vadd.i64 d12, d20
vmov d8, d0
veor d10, d5, d6
vadd.i64 d3, d12
vbsl d8, d1, d2
vbsl d10, d4, d6
vadd.i64 d3, d8
vadd.i64 d10, d9
vadd.i64 d7, d3
vadd.i64 d3, d10
# Round 5
vld1.64 {d12}, [r3:64]!
vshl.u64 d8, d7, #50
vsri.u64 d8, d7, #14
vshl.u64 d9, d3, #36
vsri.u64 d9, d3, #28
vshl.u64 d10, d7, #46
vsri.u64 d10, d7, #18
vshl.u64 d11, d3, #30
vsri.u64 d11, d3, #34
veor d8, d10
veor d9, d11
vshl.u64 d10, d7, #23
vsri.u64 d10, d7, #41
vshl.u64 d11, d3, #25
vsri.u64 d11, d3, #39
veor d8, d10
veor d9, d11
vadd.i64 d2, d8
vadd.i64 d12, d21
vmov d8, d7
veor d10, d4, d5
vadd.i64 d2, d12
vbsl d8, d0, d1
vbsl d10, d3, d5
vadd.i64 d2, d8
vadd.i64 d10, d9
vadd.i64 d6, d2
vadd.i64 d2, d10
# Round 6
vld1.64 {d12}, [r3:64]!
vshl.u64 d8, d6, #50
vsri.u64 d8, d6, #14
vshl.u64 d9, d2, #36
vsri.u64 d9, d2, #28
vshl.u64 d10, d6, #46
vsri.u64 d10, d6, #18
vshl.u64 d11, d2, #30
vsri.u64 d11, d2, #34
veor d8, d10
veor d9, d11
vshl.u64 d10, d6, #23
vsri.u64 d10, d6, #41
vshl.u64 d11, d2, #25
vsri.u64 d11, d2, #39
veor d8, d10
veor d9, d11
vadd.i64 d1, d8
vadd.i64 d12, d22
vmov d8, d6
veor d10, d3, d4
vadd.i64 d1, d12
vbsl d8, d7, d0
vbsl d10, d2, d4
vadd.i64 d1, d8
vadd.i64 d10, d9
vadd.i64 d5, d1
vadd.i64 d1, d10
# Round 7
vld1.64 {d12}, [r3:64]!
vshl.u64 d8, d5, #50
vsri.u64 d8, d5, #14
vshl.u64 d9, d1, #36
vsri.u64 d9, d1, #28
vshl.u64 d10, d5, #46
vsri.u64 d10, d5, #18
vshl.u64 d11, d1, #30
vsri.u64 d11, d1, #34
veor d8, d10
veor d9, d11
vshl.u64 d10, d5, #23
vsri.u64 d10, d5, #41
vshl.u64 d11, d1, #25
vsri.u64 d11, d1, #39
veor d8, d10
veor d9, d11
vadd.i64 d0, d8
vadd.i64 d12, d23
vmov d8, d5
veor d10, d2, d3
vadd.i64 d0, d12
vbsl d8, d6, d7
vbsl d10, d1, d3
vadd.i64 d0, d8
vadd.i64 d10, d9
vadd.i64 d4, d0
vadd.i64 d0, d10
# Round 8
vld1.64 {d12}, [r3:64]!
vshl.u64 d8, d4, #50
vsri.u64 d8, d4, #14
vshl.u64 d9, d0, #36
vsri.u64 d9, d0, #28
vshl.u64 d10, d4, #46
vsri.u64 d10, d4, #18
vshl.u64 d11, d0, #30
vsri.u64 d11, d0, #34
veor d8, d10
veor d9, d11
vshl.u64 d10, d4, #23
vsri.u64 d10, d4, #41
vshl.u64 d11, d0, #25
vsri.u64 d11, d0, #39
veor d8, d10
veor d9, d11
vadd.i64 d7, d8
vadd.i64 d12, d24
vmov d8, d4
veor d10, d1, d2
vadd.i64 d7, d12
vbsl d8, d5, d6
vbsl d10, d0, d2
vadd.i64 d7, d8
vadd.i64 d10, d9
vadd.i64 d3, d7
vadd.i64 d7, d10
# Round 9
vld1.64 {d12}, [r3:64]!
vshl.u64 d8, d3, #50
vsri.u64 d8, d3, #14
vshl.u64 d9, d7, #36
vsri.u64 d9, d7, #28
vshl.u64 d10, d3, #46
vsri.u64 d10, d3, #18
vshl.u64 d11, d7, #30
vsri.u64 d11, d7, #34
veor d8, d10
veor d9, d11
vshl.u64 d10, d3, #23
vsri.u64 d10, d3, #41
vshl.u64 d11, d7, #25
vsri.u64 d11, d7, #39
veor d8, d10
veor d9, d11
vadd.i64 d6, d8
vadd.i64 d12, d25
vmov d8, d3
veor d10, d0, d1
vadd.i64 d6, d12
vbsl d8, d4, d5
vbsl d10, d7, d1
vadd.i64 d6, d8
vadd.i64 d10, d9
vadd.i64 d2, d6
vadd.i64 d6, d10
# Round 10
vld1.64 {d12}, [r3:64]!
vshl.u64 d8, d2, #50
vsri.u64 d8, d2, #14
vshl.u64 d9, d6, #36
vsri.u64 d9, d6, #28
vshl.u64 d10, d2, #46
vsri.u64 d10, d2, #18
vshl.u64 d11, d6, #30
vsri.u64 d11, d6, #34
veor d8, d10
veor d9, d11
vshl.u64 d10, d2, #23
vsri.u64 d10, d2, #41
vshl.u64 d11, d6, #25
vsri.u64 d11, d6, #39
veor d8, d10
veor d9, d11
vadd.i64 d5, d8
vadd.i64 d12, d26
vmov d8, d2
veor d10, d7, d0
vadd.i64 d5, d12
vbsl d8, d3, d4
vbsl d10, d6, d0
vadd.i64 d5, d8
vadd.i64 d10, d9
vadd.i64 d1, d5
vadd.i64 d5, d10
# Round 11
vld1.64 {d12}, [r3:64]!
vshl.u64 d8, d1, #50
vsri.u64 d8, d1, #14
vshl.u64 d9, d5, #36
vsri.u64 d9, d5, #28
vshl.u64 d10, d1, #46
vsri.u64 d10, d1, #18
vshl.u64 d11, d5, #30
vsri.u64 d11, d5, #34
veor d8, d10
veor d9, d11
vshl.u64 d10, d1, #23
vsri.u64 d10, d1, #41
vshl.u64 d11, d5, #25
vsri.u64 d11, d5, #39
veor d8, d10
veor d9, d11
vadd.i64 d4, d8
vadd.i64 d12, d27
vmov d8, d1
veor d10, d6, d7
vadd.i64 d4, d12
vbsl d8, d2, d3
vbsl d10, d5, d7
vadd.i64 d4, d8
vadd.i64 d10, d9
vadd.i64 d0, d4
vadd.i64 d4, d10
# Round 12
vld1.64 {d12}, [r3:64]!
vshl.u64 d8, d0, #50
vsri.u64 d8, d0, #14
vshl.u64 d9, d4, #36
vsri.u64 d9, d4, #28
vshl.u64 d10, d0, #46
vsri.u64 d10, d0, #18
vshl.u64 d11, d4, #30
vsri.u64 d11, d4, #34
veor d8, d10
veor d9, d11
vshl.u64 d10, d0, #23
vsri.u64 d10, d0, #41
vshl.u64 d11, d4, #25
vsri.u64 d11, d4, #39
veor d8, d10
veor d9, d11
vadd.i64 d3, d8
vadd.i64 d12, d28
vmov d8, d0
veor d10, d5, d6
vadd.i64 d3, d12
vbsl d8, d1, d2
vbsl d10, d4, d6
vadd.i64 d3, d8
vadd.i64 d10, d9
vadd.i64 d7, d3
vadd.i64 d3, d10
# Round 13
vld1.64 {d12}, [r3:64]!
vshl.u64 d8, d7, #50
vsri.u64 d8, d7, #14
vshl.u64 d9, d3, #36
vsri.u64 d9, d3, #28
vshl.u64 d10, d7, #46
vsri.u64 d10, d7, #18
vshl.u64 d11, d3, #30
vsri.u64 d11, d3, #34
veor d8, d10
veor d9, d11
vshl.u64 d10, d7, #23
vsri.u64 d10, d7, #41
vshl.u64 d11, d3, #25
vsri.u64 d11, d3, #39
veor d8, d10
veor d9, d11
vadd.i64 d2, d8
vadd.i64 d12, d29
vmov d8, d7
veor d10, d4, d5
vadd.i64 d2, d12
vbsl d8, d0, d1
vbsl d10, d3, d5
vadd.i64 d2, d8
vadd.i64 d10, d9
vadd.i64 d6, d2
vadd.i64 d2, d10
# Round 14
vld1.64 {d12}, [r3:64]!
vshl.u64 d8, d6, #50
vsri.u64 d8, d6, #14
vshl.u64 d9, d2, #36
vsri.u64 d9, d2, #28
vshl.u64 d10, d6, #46
vsri.u64 d10, d6, #18
vshl.u64 d11, d2, #30
vsri.u64 d11, d2, #34
veor d8, d10
veor d9, d11
vshl.u64 d10, d6, #23
vsri.u64 d10, d6, #41
vshl.u64 d11, d2, #25
vsri.u64 d11, d2, #39
veor d8, d10
veor d9, d11
vadd.i64 d1, d8
vadd.i64 d12, d30
vmov d8, d6
veor d10, d3, d4
vadd.i64 d1, d12
vbsl d8, d7, d0
vbsl d10, d2, d4
vadd.i64 d1, d8
vadd.i64 d10, d9
vadd.i64 d5, d1
vadd.i64 d1, d10
# Round 15
vld1.64 {d12}, [r3:64]!
vshl.u64 d8, d5, #50
vsri.u64 d8, d5, #14
vshl.u64 d9, d1, #36
vsri.u64 d9, d1, #28
vshl.u64 d10, d5, #46
vsri.u64 d10, d5, #18
vshl.u64 d11, d1, #30
vsri.u64 d11, d1, #34
veor d8, d10
veor d9, d11
vshl.u64 d10, d5, #23
vsri.u64 d10, d5, #41
vshl.u64 d11, d1, #25
vsri.u64 d11, d1, #39
veor d8, d10
veor d9, d11
vadd.i64 d0, d8
vadd.i64 d12, d31
vmov d8, d5
veor d10, d2, d3
vadd.i64 d0, d12
vbsl d8, d6, d7
vbsl d10, d1, d3
vadd.i64 d0, d8
vadd.i64 d10, d9
vadd.i64 d4, d0
vadd.i64 d0, d10
# Add in digest from start
vldm.64 r0, {d8-d15}
#ifndef WOLFSSL_ARM_ARCH_NEON_64BIT
vadd.i64 q0, q0, q4
vadd.i64 q1, q1, q5
vadd.i64 q2, q2, q6
vadd.i64 q3, q3, q7
#else
vadd.i64 d0, d0, d8
vadd.i64 d1, d1, d9
vadd.i64 d2, d2, d10
vadd.i64 d3, d3, d11
vadd.i64 d4, d4, d12
vadd.i64 d5, d5, d13
vadd.i64 d6, d6, d14
vadd.i64 d7, d7, d15
#endif /* WOLFSSL_ARM_ARCH_NEON_64BIT */
vstm.64 r0, {d0-d7}
subs r2, r2, #0x80
sub r3, r3, #0x280
bne L_SHA512_transform_neon_len_begin
vpop {d8-d15}
bx lr
.size Transform_Sha512_Len,.-Transform_Sha512_Len
#endif /* !WOLFSSL_ARMASM_NO_NEON */
#endif /* WOLFSSL_SHA512 */
#endif /* !__aarch64__ && __arm__ && !__thumb__ */
#endif /* WOLFSSL_ARMASM */
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
#endif /* !WOLFSSL_ARMASM_INLINE */
|
aenu1/aps3e
| 6,374
|
app/src/main/cpp/rpcs3/3rdparty/wolfssl/wolfssl/wolfcrypt/src/port/arm/armv8-sha3-asm.S
|
/* armv8-sha3-asm
*
* Copyright (C) 2006-2023 wolfSSL Inc.
*
* This file is part of wolfSSL.
*
* wolfSSL is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* wolfSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif /* HAVE_CONFIG_H */
#include <wolfssl/wolfcrypt/settings.h>
/* Generated using (from wolfssl):
* cd ../scripts
* ruby ./sha3/sha3.rb arm64 ../wolfssl/wolfcrypt/src/port/arm/armv8-sha3-asm.S
*/
#ifdef WOLFSSL_ARMASM
#ifdef __aarch64__
#ifndef WOLFSSL_ARMASM_INLINE
#ifdef WOLFSSL_SHA3
#ifdef WOLFSSL_ARMASM_CRYPTO_SHA3
#ifndef __APPLE__
.text
.type L_SHA3_transform_crypto_r, %object
.section .rodata
.size L_SHA3_transform_crypto_r, 192
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 3
#else
.p2align 3
#endif /* __APPLE__ */
L_SHA3_transform_crypto_r:
.xword 0x1
.xword 0x8082
.xword 0x800000000000808a
.xword 0x8000000080008000
.xword 0x808b
.xword 0x80000001
.xword 0x8000000080008081
.xword 0x8000000000008009
.xword 0x8a
.xword 0x88
.xword 0x80008009
.xword 0x8000000a
.xword 0x8000808b
.xword 0x800000000000008b
.xword 0x8000000000008089
.xword 0x8000000000008003
.xword 0x8000000000008002
.xword 0x8000000000000080
.xword 0x800a
.xword 0x800000008000000a
.xword 0x8000000080008081
.xword 0x8000000000008080
.xword 0x80000001
.xword 0x8000000080008008
#ifndef __APPLE__
.text
.globl BlockSha3
.type BlockSha3,@function
.align 2
BlockSha3:
#else
.section __TEXT,__text
.globl _BlockSha3
.p2align 2
_BlockSha3:
#endif /* __APPLE__ */
stp x29, x30, [sp, #-80]!
add x29, sp, #0
stp d8, d9, [x29, #16]
stp d10, d11, [x29, #32]
stp d12, d13, [x29, #48]
stp d14, d15, [x29, #64]
#ifdef __APPLE__
.arch_extension sha3
#endif /* __APPLE__ */
#ifndef __APPLE__
adrp x1, L_SHA3_transform_crypto_r
add x1, x1, :lo12:L_SHA3_transform_crypto_r
#else
adrp x1, L_SHA3_transform_crypto_r@PAGE
add x1, x1, :lo12:L_SHA3_transform_crypto_r@PAGEOFF
#endif /* __APPLE__ */
ld4 {v0.d, v1.d, v2.d, v3.d}[0], [x0], #32
ld4 {v4.d, v5.d, v6.d, v7.d}[0], [x0], #32
ld4 {v8.d, v9.d, v10.d, v11.d}[0], [x0], #32
ld4 {v12.d, v13.d, v14.d, v15.d}[0], [x0], #32
ld4 {v16.d, v17.d, v18.d, v19.d}[0], [x0], #32
ld4 {v20.d, v21.d, v22.d, v23.d}[0], [x0], #32
ld1 {v24.1d}, [x0]
sub x0, x0, #0xc0
mov x2, #24
# Start of 24 rounds
L_sha3_crypto_begin:
# Col Mix
eor3 v31.16b, v0.16b, v5.16b, v10.16b
eor3 v27.16b, v1.16b, v6.16b, v11.16b
eor3 v28.16b, v2.16b, v7.16b, v12.16b
eor3 v29.16b, v3.16b, v8.16b, v13.16b
eor3 v30.16b, v4.16b, v9.16b, v14.16b
eor3 v31.16b, v31.16b, v15.16b, v20.16b
eor3 v27.16b, v27.16b, v16.16b, v21.16b
eor3 v28.16b, v28.16b, v17.16b, v22.16b
eor3 v29.16b, v29.16b, v18.16b, v23.16b
eor3 v30.16b, v30.16b, v19.16b, v24.16b
rax1 v25.2d, v30.2d, v27.2d
rax1 v26.2d, v31.2d, v28.2d
rax1 v27.2d, v27.2d, v29.2d
rax1 v28.2d, v28.2d, v30.2d
rax1 v29.2d, v29.2d, v31.2d
eor v0.16b, v0.16b, v25.16b
xar v30.2d, v1.2d, v26.2d, #63
xar v1.2d, v6.2d, v26.2d, #20
xar v6.2d, v9.2d, v29.2d, #44
xar v9.2d, v22.2d, v27.2d, #3
xar v22.2d, v14.2d, v29.2d, #25
xar v14.2d, v20.2d, v25.2d, #46
xar v20.2d, v2.2d, v27.2d, #2
xar v2.2d, v12.2d, v27.2d, #21
xar v12.2d, v13.2d, v28.2d, #39
xar v13.2d, v19.2d, v29.2d, #56
xar v19.2d, v23.2d, v28.2d, #8
xar v23.2d, v15.2d, v25.2d, #23
xar v15.2d, v4.2d, v29.2d, #37
xar v4.2d, v24.2d, v29.2d, #50
xar v24.2d, v21.2d, v26.2d, #62
xar v21.2d, v8.2d, v28.2d, #9
xar v8.2d, v16.2d, v26.2d, #19
xar v16.2d, v5.2d, v25.2d, #28
xar v5.2d, v3.2d, v28.2d, #36
xar v3.2d, v18.2d, v28.2d, #43
xar v18.2d, v17.2d, v27.2d, #49
xar v17.2d, v11.2d, v26.2d, #54
xar v11.2d, v7.2d, v27.2d, #58
xar v7.2d, v10.2d, v25.2d, #61
# Row Mix
mov v25.16b, v0.16b
mov v26.16b, v1.16b
bcax v0.16b, v25.16b, v2.16b, v26.16b
bcax v1.16b, v26.16b, v3.16b, v2.16b
bcax v2.16b, v2.16b, v4.16b, v3.16b
bcax v3.16b, v3.16b, v25.16b, v4.16b
bcax v4.16b, v4.16b, v26.16b, v25.16b
mov v25.16b, v5.16b
mov v26.16b, v6.16b
bcax v5.16b, v25.16b, v7.16b, v26.16b
bcax v6.16b, v26.16b, v8.16b, v7.16b
bcax v7.16b, v7.16b, v9.16b, v8.16b
bcax v8.16b, v8.16b, v25.16b, v9.16b
bcax v9.16b, v9.16b, v26.16b, v25.16b
mov v26.16b, v11.16b
bcax v10.16b, v30.16b, v12.16b, v26.16b
bcax v11.16b, v26.16b, v13.16b, v12.16b
bcax v12.16b, v12.16b, v14.16b, v13.16b
bcax v13.16b, v13.16b, v30.16b, v14.16b
bcax v14.16b, v14.16b, v26.16b, v30.16b
mov v25.16b, v15.16b
mov v26.16b, v16.16b
bcax v15.16b, v25.16b, v17.16b, v26.16b
bcax v16.16b, v26.16b, v18.16b, v17.16b
bcax v17.16b, v17.16b, v19.16b, v18.16b
bcax v18.16b, v18.16b, v25.16b, v19.16b
bcax v19.16b, v19.16b, v26.16b, v25.16b
mov v25.16b, v20.16b
mov v26.16b, v21.16b
bcax v20.16b, v25.16b, v22.16b, v26.16b
bcax v21.16b, v26.16b, v23.16b, v22.16b
bcax v22.16b, v22.16b, v24.16b, v23.16b
bcax v23.16b, v23.16b, v25.16b, v24.16b
bcax v24.16b, v24.16b, v26.16b, v25.16b
ld1r {v30.2d}, [x1], #8
subs x2, x2, #1
eor v0.16b, v0.16b, v30.16b
bne L_sha3_crypto_begin
st4 {v0.d, v1.d, v2.d, v3.d}[0], [x0], #32
st4 {v4.d, v5.d, v6.d, v7.d}[0], [x0], #32
st4 {v8.d, v9.d, v10.d, v11.d}[0], [x0], #32
st4 {v12.d, v13.d, v14.d, v15.d}[0], [x0], #32
st4 {v16.d, v17.d, v18.d, v19.d}[0], [x0], #32
st4 {v20.d, v21.d, v22.d, v23.d}[0], [x0], #32
st1 {v24.1d}, [x0]
ldp d8, d9, [x29, #16]
ldp d10, d11, [x29, #32]
ldp d12, d13, [x29, #48]
ldp d14, d15, [x29, #64]
ldp x29, x30, [sp], #0x50
ret
#ifndef __APPLE__
.size BlockSha3,.-BlockSha3
#endif /* __APPLE__ */
#endif /* WOLFSSL_ARMASM_CRYPTO_SHA3 */
#endif /* WOLFSSL_SHA3 */
#endif /* __aarch64__ */
#endif /* WOLFSSL_ARMASM */
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
#endif /* !WOLFSSL_ARMASM_INLINE */
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/UART/UART_TwoBoards_ComDMA/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aenu1/aps3e
| 112,124
|
app/src/main/cpp/rpcs3/3rdparty/wolfssl/wolfssl/wolfcrypt/src/port/arm/armv8-32-aes-asm.S
|
/* armv8-32-aes-asm
*
* Copyright (C) 2006-2023 wolfSSL Inc.
*
* This file is part of wolfSSL.
*
* wolfSSL is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* wolfSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
*/
/* Generated using (from wolfssl):
* cd ../scripts
* ruby ./aes/aes.rb arm32 ../wolfssl/wolfcrypt/src/port/arm/armv8-32-aes-asm.S
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif /* HAVE_CONFIG_H */
#include <wolfssl/wolfcrypt/settings.h>
#ifdef WOLFSSL_ARMASM
#if !defined(__aarch64__) && defined(__arm__) && !defined(__thumb__)
#ifndef WOLFSSL_ARMASM_INLINE
#ifndef NO_AES
#ifdef HAVE_AES_DECRYPT
.text
.type L_AES_ARM32_td_data, %object
.size L_AES_ARM32_td_data, 1024
.align 4
L_AES_ARM32_td_data:
.word 0x5051f4a7
.word 0x537e4165
.word 0xc31a17a4
.word 0x963a275e
.word 0xcb3bab6b
.word 0xf11f9d45
.word 0xabacfa58
.word 0x934be303
.word 0x552030fa
.word 0xf6ad766d
.word 0x9188cc76
.word 0x25f5024c
.word 0xfc4fe5d7
.word 0xd7c52acb
.word 0x80263544
.word 0x8fb562a3
.word 0x49deb15a
.word 0x6725ba1b
.word 0x9845ea0e
.word 0xe15dfec0
.word 0x2c32f75
.word 0x12814cf0
.word 0xa38d4697
.word 0xc66bd3f9
.word 0xe7038f5f
.word 0x9515929c
.word 0xebbf6d7a
.word 0xda955259
.word 0x2dd4be83
.word 0xd3587421
.word 0x2949e069
.word 0x448ec9c8
.word 0x6a75c289
.word 0x78f48e79
.word 0x6b99583e
.word 0xdd27b971
.word 0xb6bee14f
.word 0x17f088ad
.word 0x66c920ac
.word 0xb47dce3a
.word 0x1863df4a
.word 0x82e51a31
.word 0x60975133
.word 0x4562537f
.word 0xe0b16477
.word 0x84bb6bae
.word 0x1cfe81a0
.word 0x94f9082b
.word 0x58704868
.word 0x198f45fd
.word 0x8794de6c
.word 0xb7527bf8
.word 0x23ab73d3
.word 0xe2724b02
.word 0x57e31f8f
.word 0x2a6655ab
.word 0x7b2eb28
.word 0x32fb5c2
.word 0x9a86c57b
.word 0xa5d33708
.word 0xf2302887
.word 0xb223bfa5
.word 0xba02036a
.word 0x5ced1682
.word 0x2b8acf1c
.word 0x92a779b4
.word 0xf0f307f2
.word 0xa14e69e2
.word 0xcd65daf4
.word 0xd50605be
.word 0x1fd13462
.word 0x8ac4a6fe
.word 0x9d342e53
.word 0xa0a2f355
.word 0x32058ae1
.word 0x75a4f6eb
.word 0x390b83ec
.word 0xaa4060ef
.word 0x65e719f
.word 0x51bd6e10
.word 0xf93e218a
.word 0x3d96dd06
.word 0xaedd3e05
.word 0x464de6bd
.word 0xb591548d
.word 0x571c45d
.word 0x6f0406d4
.word 0xff605015
.word 0x241998fb
.word 0x97d6bde9
.word 0xcc894043
.word 0x7767d99e
.word 0xbdb0e842
.word 0x8807898b
.word 0x38e7195b
.word 0xdb79c8ee
.word 0x47a17c0a
.word 0xe97c420f
.word 0xc9f8841e
.word 0x0
.word 0x83098086
.word 0x48322bed
.word 0xac1e1170
.word 0x4e6c5a72
.word 0xfbfd0eff
.word 0x560f8538
.word 0x1e3daed5
.word 0x27362d39
.word 0x640a0fd9
.word 0x21685ca6
.word 0xd19b5b54
.word 0x3a24362e
.word 0xb10c0a67
.word 0xf9357e7
.word 0xd2b4ee96
.word 0x9e1b9b91
.word 0x4f80c0c5
.word 0xa261dc20
.word 0x695a774b
.word 0x161c121a
.word 0xae293ba
.word 0xe5c0a02a
.word 0x433c22e0
.word 0x1d121b17
.word 0xb0e090d
.word 0xadf28bc7
.word 0xb92db6a8
.word 0xc8141ea9
.word 0x8557f119
.word 0x4caf7507
.word 0xbbee99dd
.word 0xfda37f60
.word 0x9ff70126
.word 0xbc5c72f5
.word 0xc544663b
.word 0x345bfb7e
.word 0x768b4329
.word 0xdccb23c6
.word 0x68b6edfc
.word 0x63b8e4f1
.word 0xcad731dc
.word 0x10426385
.word 0x40139722
.word 0x2084c611
.word 0x7d854a24
.word 0xf8d2bb3d
.word 0x11aef932
.word 0x6dc729a1
.word 0x4b1d9e2f
.word 0xf3dcb230
.word 0xec0d8652
.word 0xd077c1e3
.word 0x6c2bb316
.word 0x99a970b9
.word 0xfa119448
.word 0x2247e964
.word 0xc4a8fc8c
.word 0x1aa0f03f
.word 0xd8567d2c
.word 0xef223390
.word 0xc787494e
.word 0xc1d938d1
.word 0xfe8ccaa2
.word 0x3698d40b
.word 0xcfa6f581
.word 0x28a57ade
.word 0x26dab78e
.word 0xa43fadbf
.word 0xe42c3a9d
.word 0xd507892
.word 0x9b6a5fcc
.word 0x62547e46
.word 0xc2f68d13
.word 0xe890d8b8
.word 0x5e2e39f7
.word 0xf582c3af
.word 0xbe9f5d80
.word 0x7c69d093
.word 0xa96fd52d
.word 0xb3cf2512
.word 0x3bc8ac99
.word 0xa710187d
.word 0x6ee89c63
.word 0x7bdb3bbb
.word 0x9cd2678
.word 0xf46e5918
.word 0x1ec9ab7
.word 0xa8834f9a
.word 0x65e6956e
.word 0x7eaaffe6
.word 0x821bccf
.word 0xe6ef15e8
.word 0xd9bae79b
.word 0xce4a6f36
.word 0xd4ea9f09
.word 0xd629b07c
.word 0xaf31a4b2
.word 0x312a3f23
.word 0x30c6a594
.word 0xc035a266
.word 0x37744ebc
.word 0xa6fc82ca
.word 0xb0e090d0
.word 0x1533a7d8
.word 0x4af10498
.word 0xf741ecda
.word 0xe7fcd50
.word 0x2f1791f6
.word 0x8d764dd6
.word 0x4d43efb0
.word 0x54ccaa4d
.word 0xdfe49604
.word 0xe39ed1b5
.word 0x1b4c6a88
.word 0xb8c12c1f
.word 0x7f466551
.word 0x49d5eea
.word 0x5d018c35
.word 0x73fa8774
.word 0x2efb0b41
.word 0x5ab3671d
.word 0x5292dbd2
.word 0x33e91056
.word 0x136dd647
.word 0x8c9ad761
.word 0x7a37a10c
.word 0x8e59f814
.word 0x89eb133c
.word 0xeecea927
.word 0x35b761c9
.word 0xede11ce5
.word 0x3c7a47b1
.word 0x599cd2df
.word 0x3f55f273
.word 0x791814ce
.word 0xbf73c737
.word 0xea53f7cd
.word 0x5b5ffdaa
.word 0x14df3d6f
.word 0x867844db
.word 0x81caaff3
.word 0x3eb968c4
.word 0x2c382434
.word 0x5fc2a340
.word 0x72161dc3
.word 0xcbce225
.word 0x8b283c49
.word 0x41ff0d95
.word 0x7139a801
.word 0xde080cb3
.word 0x9cd8b4e4
.word 0x906456c1
.word 0x617bcb84
.word 0x70d532b6
.word 0x74486c5c
.word 0x42d0b857
#endif /* HAVE_AES_DECRYPT */
#if defined(HAVE_AES_DECRYPT) || defined(HAVE_AES_CBC) || defined(HAVE_AESCCM) || defined(HAVE_AESGCM) || defined(WOLFSSL_AES_DIRECT) || defined(WOLFSSL_AES_COUNTER)
.text
.type L_AES_ARM32_te_data, %object
.size L_AES_ARM32_te_data, 1024
.align 4
L_AES_ARM32_te_data:
.word 0xa5c66363
.word 0x84f87c7c
.word 0x99ee7777
.word 0x8df67b7b
.word 0xdfff2f2
.word 0xbdd66b6b
.word 0xb1de6f6f
.word 0x5491c5c5
.word 0x50603030
.word 0x3020101
.word 0xa9ce6767
.word 0x7d562b2b
.word 0x19e7fefe
.word 0x62b5d7d7
.word 0xe64dabab
.word 0x9aec7676
.word 0x458fcaca
.word 0x9d1f8282
.word 0x4089c9c9
.word 0x87fa7d7d
.word 0x15effafa
.word 0xebb25959
.word 0xc98e4747
.word 0xbfbf0f0
.word 0xec41adad
.word 0x67b3d4d4
.word 0xfd5fa2a2
.word 0xea45afaf
.word 0xbf239c9c
.word 0xf753a4a4
.word 0x96e47272
.word 0x5b9bc0c0
.word 0xc275b7b7
.word 0x1ce1fdfd
.word 0xae3d9393
.word 0x6a4c2626
.word 0x5a6c3636
.word 0x417e3f3f
.word 0x2f5f7f7
.word 0x4f83cccc
.word 0x5c683434
.word 0xf451a5a5
.word 0x34d1e5e5
.word 0x8f9f1f1
.word 0x93e27171
.word 0x73abd8d8
.word 0x53623131
.word 0x3f2a1515
.word 0xc080404
.word 0x5295c7c7
.word 0x65462323
.word 0x5e9dc3c3
.word 0x28301818
.word 0xa1379696
.word 0xf0a0505
.word 0xb52f9a9a
.word 0x90e0707
.word 0x36241212
.word 0x9b1b8080
.word 0x3ddfe2e2
.word 0x26cdebeb
.word 0x694e2727
.word 0xcd7fb2b2
.word 0x9fea7575
.word 0x1b120909
.word 0x9e1d8383
.word 0x74582c2c
.word 0x2e341a1a
.word 0x2d361b1b
.word 0xb2dc6e6e
.word 0xeeb45a5a
.word 0xfb5ba0a0
.word 0xf6a45252
.word 0x4d763b3b
.word 0x61b7d6d6
.word 0xce7db3b3
.word 0x7b522929
.word 0x3edde3e3
.word 0x715e2f2f
.word 0x97138484
.word 0xf5a65353
.word 0x68b9d1d1
.word 0x0
.word 0x2cc1eded
.word 0x60402020
.word 0x1fe3fcfc
.word 0xc879b1b1
.word 0xedb65b5b
.word 0xbed46a6a
.word 0x468dcbcb
.word 0xd967bebe
.word 0x4b723939
.word 0xde944a4a
.word 0xd4984c4c
.word 0xe8b05858
.word 0x4a85cfcf
.word 0x6bbbd0d0
.word 0x2ac5efef
.word 0xe54faaaa
.word 0x16edfbfb
.word 0xc5864343
.word 0xd79a4d4d
.word 0x55663333
.word 0x94118585
.word 0xcf8a4545
.word 0x10e9f9f9
.word 0x6040202
.word 0x81fe7f7f
.word 0xf0a05050
.word 0x44783c3c
.word 0xba259f9f
.word 0xe34ba8a8
.word 0xf3a25151
.word 0xfe5da3a3
.word 0xc0804040
.word 0x8a058f8f
.word 0xad3f9292
.word 0xbc219d9d
.word 0x48703838
.word 0x4f1f5f5
.word 0xdf63bcbc
.word 0xc177b6b6
.word 0x75afdada
.word 0x63422121
.word 0x30201010
.word 0x1ae5ffff
.word 0xefdf3f3
.word 0x6dbfd2d2
.word 0x4c81cdcd
.word 0x14180c0c
.word 0x35261313
.word 0x2fc3ecec
.word 0xe1be5f5f
.word 0xa2359797
.word 0xcc884444
.word 0x392e1717
.word 0x5793c4c4
.word 0xf255a7a7
.word 0x82fc7e7e
.word 0x477a3d3d
.word 0xacc86464
.word 0xe7ba5d5d
.word 0x2b321919
.word 0x95e67373
.word 0xa0c06060
.word 0x98198181
.word 0xd19e4f4f
.word 0x7fa3dcdc
.word 0x66442222
.word 0x7e542a2a
.word 0xab3b9090
.word 0x830b8888
.word 0xca8c4646
.word 0x29c7eeee
.word 0xd36bb8b8
.word 0x3c281414
.word 0x79a7dede
.word 0xe2bc5e5e
.word 0x1d160b0b
.word 0x76addbdb
.word 0x3bdbe0e0
.word 0x56643232
.word 0x4e743a3a
.word 0x1e140a0a
.word 0xdb924949
.word 0xa0c0606
.word 0x6c482424
.word 0xe4b85c5c
.word 0x5d9fc2c2
.word 0x6ebdd3d3
.word 0xef43acac
.word 0xa6c46262
.word 0xa8399191
.word 0xa4319595
.word 0x37d3e4e4
.word 0x8bf27979
.word 0x32d5e7e7
.word 0x438bc8c8
.word 0x596e3737
.word 0xb7da6d6d
.word 0x8c018d8d
.word 0x64b1d5d5
.word 0xd29c4e4e
.word 0xe049a9a9
.word 0xb4d86c6c
.word 0xfaac5656
.word 0x7f3f4f4
.word 0x25cfeaea
.word 0xafca6565
.word 0x8ef47a7a
.word 0xe947aeae
.word 0x18100808
.word 0xd56fbaba
.word 0x88f07878
.word 0x6f4a2525
.word 0x725c2e2e
.word 0x24381c1c
.word 0xf157a6a6
.word 0xc773b4b4
.word 0x5197c6c6
.word 0x23cbe8e8
.word 0x7ca1dddd
.word 0x9ce87474
.word 0x213e1f1f
.word 0xdd964b4b
.word 0xdc61bdbd
.word 0x860d8b8b
.word 0x850f8a8a
.word 0x90e07070
.word 0x427c3e3e
.word 0xc471b5b5
.word 0xaacc6666
.word 0xd8904848
.word 0x5060303
.word 0x1f7f6f6
.word 0x121c0e0e
.word 0xa3c26161
.word 0x5f6a3535
.word 0xf9ae5757
.word 0xd069b9b9
.word 0x91178686
.word 0x5899c1c1
.word 0x273a1d1d
.word 0xb9279e9e
.word 0x38d9e1e1
.word 0x13ebf8f8
.word 0xb32b9898
.word 0x33221111
.word 0xbbd26969
.word 0x70a9d9d9
.word 0x89078e8e
.word 0xa7339494
.word 0xb62d9b9b
.word 0x223c1e1e
.word 0x92158787
.word 0x20c9e9e9
.word 0x4987cece
.word 0xffaa5555
.word 0x78502828
.word 0x7aa5dfdf
.word 0x8f038c8c
.word 0xf859a1a1
.word 0x80098989
.word 0x171a0d0d
.word 0xda65bfbf
.word 0x31d7e6e6
.word 0xc6844242
.word 0xb8d06868
.word 0xc3824141
.word 0xb0299999
.word 0x775a2d2d
.word 0x111e0f0f
.word 0xcb7bb0b0
.word 0xfca85454
.word 0xd66dbbbb
.word 0x3a2c1616
#endif /* HAVE_AES_DECRYPT || HAVE_AES_CBC || HAVE_AESCCM || HAVE_AESGCM || WOLFSSL_AES_DIRECT || WOLFSSL_AES_COUNTER */
#ifdef HAVE_AES_DECRYPT
.text
.type L_AES_ARM32_td, %object
.size L_AES_ARM32_td, 12
.align 4
L_AES_ARM32_td:
.word L_AES_ARM32_td_data
#endif /* HAVE_AES_DECRYPT */
#if defined(HAVE_AES_DECRYPT) || defined(HAVE_AES_CBC) || defined(HAVE_AESCCM) || defined(HAVE_AESGCM) || defined(WOLFSSL_AES_DIRECT) || defined(WOLFSSL_AES_COUNTER)
.text
.type L_AES_ARM32_te, %object
.size L_AES_ARM32_te, 12
.align 4
L_AES_ARM32_te:
.word L_AES_ARM32_te_data
#endif /* HAVE_AES_DECRYPT || HAVE_AES_CBC || HAVE_AESCCM || HAVE_AESGCM || WOLFSSL_AES_DIRECT || WOLFSSL_AES_COUNTER */
#ifdef HAVE_AES_DECRYPT
.text
.align 4
.globl AES_invert_key
.type AES_invert_key, %function
AES_invert_key:
push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
adr r12, L_AES_ARM32_te
ldr r12, [r12]
adr lr, L_AES_ARM32_td
ldr lr, [lr]
add r10, r0, r1, lsl #4
mov r11, r1
L_AES_invert_key_loop:
ldm r0, {r2, r3, r4, r5}
ldm r10, {r6, r7, r8, r9}
stm r10, {r2, r3, r4, r5}
stm r0!, {r6, r7, r8, r9}
subs r11, r11, #2
sub r10, r10, #16
bne L_AES_invert_key_loop
sub r0, r0, r1, lsl #3
add r0, r0, #16
sub r11, r1, #1
L_AES_invert_key_mix_loop:
ldm r0, {r2, r3, r4, r5}
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r6, r2, #24
lsr r6, r6, #24
#else
uxtb r6, r2
#endif
#else
ubfx r6, r2, #0, #8
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r7, r2, #16
lsr r7, r7, #24
#else
uxtb r7, r2, ror #8
#endif
#else
ubfx r7, r2, #8, #8
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r8, r2, #8
lsr r8, r8, #24
#else
uxtb r8, r2, ror #16
#endif
#else
ubfx r8, r2, #16, #8
#endif
lsr r9, r2, #24
ldrb r6, [r12, r6, lsl #2]
ldrb r7, [r12, r7, lsl #2]
ldrb r8, [r12, r8, lsl #2]
ldrb r9, [r12, r9, lsl #2]
ldr r6, [lr, r6, lsl #2]
ldr r7, [lr, r7, lsl #2]
ldr r8, [lr, r8, lsl #2]
ldr r9, [lr, r9, lsl #2]
eor r8, r8, r6, ror #16
eor r8, r8, r7, ror #8
eor r8, r8, r9, ror #24
str r8, [r0], #4
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r6, r3, #24
lsr r6, r6, #24
#else
uxtb r6, r3
#endif
#else
ubfx r6, r3, #0, #8
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r7, r3, #16
lsr r7, r7, #24
#else
uxtb r7, r3, ror #8
#endif
#else
ubfx r7, r3, #8, #8
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r8, r3, #8
lsr r8, r8, #24
#else
uxtb r8, r3, ror #16
#endif
#else
ubfx r8, r3, #16, #8
#endif
lsr r9, r3, #24
ldrb r6, [r12, r6, lsl #2]
ldrb r7, [r12, r7, lsl #2]
ldrb r8, [r12, r8, lsl #2]
ldrb r9, [r12, r9, lsl #2]
ldr r6, [lr, r6, lsl #2]
ldr r7, [lr, r7, lsl #2]
ldr r8, [lr, r8, lsl #2]
ldr r9, [lr, r9, lsl #2]
eor r8, r8, r6, ror #16
eor r8, r8, r7, ror #8
eor r8, r8, r9, ror #24
str r8, [r0], #4
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r6, r4, #24
lsr r6, r6, #24
#else
uxtb r6, r4
#endif
#else
ubfx r6, r4, #0, #8
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r7, r4, #16
lsr r7, r7, #24
#else
uxtb r7, r4, ror #8
#endif
#else
ubfx r7, r4, #8, #8
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r8, r4, #8
lsr r8, r8, #24
#else
uxtb r8, r4, ror #16
#endif
#else
ubfx r8, r4, #16, #8
#endif
lsr r9, r4, #24
ldrb r6, [r12, r6, lsl #2]
ldrb r7, [r12, r7, lsl #2]
ldrb r8, [r12, r8, lsl #2]
ldrb r9, [r12, r9, lsl #2]
ldr r6, [lr, r6, lsl #2]
ldr r7, [lr, r7, lsl #2]
ldr r8, [lr, r8, lsl #2]
ldr r9, [lr, r9, lsl #2]
eor r8, r8, r6, ror #16
eor r8, r8, r7, ror #8
eor r8, r8, r9, ror #24
str r8, [r0], #4
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r6, r5, #24
lsr r6, r6, #24
#else
uxtb r6, r5
#endif
#else
ubfx r6, r5, #0, #8
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r7, r5, #16
lsr r7, r7, #24
#else
uxtb r7, r5, ror #8
#endif
#else
ubfx r7, r5, #8, #8
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r8, r5, #8
lsr r8, r8, #24
#else
uxtb r8, r5, ror #16
#endif
#else
ubfx r8, r5, #16, #8
#endif
lsr r9, r5, #24
ldrb r6, [r12, r6, lsl #2]
ldrb r7, [r12, r7, lsl #2]
ldrb r8, [r12, r8, lsl #2]
ldrb r9, [r12, r9, lsl #2]
ldr r6, [lr, r6, lsl #2]
ldr r7, [lr, r7, lsl #2]
ldr r8, [lr, r8, lsl #2]
ldr r9, [lr, r9, lsl #2]
eor r8, r8, r6, ror #16
eor r8, r8, r7, ror #8
eor r8, r8, r9, ror #24
str r8, [r0], #4
subs r11, r11, #1
bne L_AES_invert_key_mix_loop
pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.size AES_invert_key,.-AES_invert_key
#endif /* HAVE_AES_DECRYPT */
.text
.type L_AES_ARM32_rcon, %object
.size L_AES_ARM32_rcon, 40
.align 4
L_AES_ARM32_rcon:
.word 0x1000000
.word 0x2000000
.word 0x4000000
.word 0x8000000
.word 0x10000000
.word 0x20000000
.word 0x40000000
.word 0x80000000
.word 0x1b000000
.word 0x36000000
.text
.align 4
.globl AES_set_encrypt_key
.type AES_set_encrypt_key, %function
AES_set_encrypt_key:
push {r4, r5, r6, r7, r8, lr}
adr r8, L_AES_ARM32_te
ldr r8, [r8]
adr lr, L_AES_ARM32_rcon
cmp r1, #0x80
beq L_AES_set_encrypt_key_start_128
cmp r1, #0xc0
beq L_AES_set_encrypt_key_start_192
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0]
ldr r5, [r0, #4]
#else
ldrd r4, r5, [r0]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #8]
ldr r7, [r0, #12]
#else
ldrd r6, r7, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
# REV r4, r4
eor r3, r4, r4, ror #16
bic r3, r3, #0xff0000
ror r4, r4, #8
eor r4, r4, r3, lsr #8
# REV r5, r5
eor r3, r5, r5, ror #16
bic r3, r3, #0xff0000
ror r5, r5, #8
eor r5, r5, r3, lsr #8
# REV r6, r6
eor r3, r6, r6, ror #16
bic r3, r3, #0xff0000
ror r6, r6, #8
eor r6, r6, r3, lsr #8
# REV r7, r7
eor r3, r7, r7, ror #16
bic r3, r3, #0xff0000
ror r7, r7, #8
eor r7, r7, r3, lsr #8
#else
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
stm r2!, {r4, r5, r6, r7}
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #16]
ldr r5, [r0, #20]
#else
ldrd r4, r5, [r0, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #24]
ldr r7, [r0, #28]
#else
ldrd r6, r7, [r0, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
# REV r4, r4
eor r3, r4, r4, ror #16
bic r3, r3, #0xff0000
ror r4, r4, #8
eor r4, r4, r3, lsr #8
# REV r5, r5
eor r3, r5, r5, ror #16
bic r3, r3, #0xff0000
ror r5, r5, #8
eor r5, r5, r3, lsr #8
# REV r6, r6
eor r3, r6, r6, ror #16
bic r3, r3, #0xff0000
ror r6, r6, #8
eor r6, r6, r3, lsr #8
# REV r7, r7
eor r3, r7, r7, ror #16
bic r3, r3, #0xff0000
ror r7, r7, #8
eor r7, r7, r3, lsr #8
#else
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
stm r2, {r4, r5, r6, r7}
sub r2, r2, #16
mov r12, #6
L_AES_set_encrypt_key_loop_256:
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r4, r7, #24
lsr r4, r4, #24
#else
uxtb r4, r7
#endif
#else
ubfx r4, r7, #0, #8
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r5, r7, #16
lsr r5, r5, #24
#else
uxtb r5, r7, ror #8
#endif
#else
ubfx r5, r7, #8, #8
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r6, r7, #8
lsr r6, r6, #24
#else
uxtb r6, r7, ror #16
#endif
#else
ubfx r6, r7, #16, #8
#endif
lsr r7, r7, #24
ldrb r4, [r8, r4, lsl #2]
ldrb r5, [r8, r5, lsl #2]
ldrb r6, [r8, r6, lsl #2]
ldrb r7, [r8, r7, lsl #2]
eor r3, r7, r4, lsl #8
eor r3, r3, r5, lsl #16
eor r3, r3, r6, lsl #24
ldm r2!, {r4, r5, r6, r7}
eor r4, r4, r3
ldm lr!, {r3}
eor r4, r4, r3
eor r5, r5, r4
eor r6, r6, r5
eor r7, r7, r6
add r2, r2, #16
stm r2, {r4, r5, r6, r7}
sub r2, r2, #16
mov r3, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r4, r3, #16
lsr r4, r4, #24
#else
uxtb r4, r3, ror #8
#endif
#else
ubfx r4, r3, #8, #8
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r5, r3, #8
lsr r5, r5, #24
#else
uxtb r5, r3, ror #16
#endif
#else
ubfx r5, r3, #16, #8
#endif
lsr r6, r3, #24
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r3, r3, #24
lsr r3, r3, #24
#else
uxtb r3, r3
#endif
#else
ubfx r3, r3, #0, #8
#endif
ldrb r4, [r8, r4, lsl #2]
ldrb r6, [r8, r6, lsl #2]
ldrb r5, [r8, r5, lsl #2]
ldrb r3, [r8, r3, lsl #2]
eor r3, r3, r4, lsl #8
eor r3, r3, r5, lsl #16
eor r3, r3, r6, lsl #24
ldm r2!, {r4, r5, r6, r7}
eor r4, r4, r3
eor r5, r5, r4
eor r6, r6, r5
eor r7, r7, r6
add r2, r2, #16
stm r2, {r4, r5, r6, r7}
sub r2, r2, #16
subs r12, r12, #1
bne L_AES_set_encrypt_key_loop_256
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r4, r7, #24
lsr r4, r4, #24
#else
uxtb r4, r7
#endif
#else
ubfx r4, r7, #0, #8
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r5, r7, #16
lsr r5, r5, #24
#else
uxtb r5, r7, ror #8
#endif
#else
ubfx r5, r7, #8, #8
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r6, r7, #8
lsr r6, r6, #24
#else
uxtb r6, r7, ror #16
#endif
#else
ubfx r6, r7, #16, #8
#endif
lsr r7, r7, #24
ldrb r4, [r8, r4, lsl #2]
ldrb r5, [r8, r5, lsl #2]
ldrb r6, [r8, r6, lsl #2]
ldrb r7, [r8, r7, lsl #2]
eor r3, r7, r4, lsl #8
eor r3, r3, r5, lsl #16
eor r3, r3, r6, lsl #24
ldm r2!, {r4, r5, r6, r7}
eor r4, r4, r3
ldm lr!, {r3}
eor r4, r4, r3
eor r5, r5, r4
eor r6, r6, r5
eor r7, r7, r6
add r2, r2, #16
stm r2, {r4, r5, r6, r7}
sub r2, r2, #16
b L_AES_set_encrypt_key_end
L_AES_set_encrypt_key_start_192:
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0]
ldr r5, [r0, #4]
#else
ldrd r4, r5, [r0]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #8]
ldr r7, [r0, #12]
#else
ldrd r6, r7, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r1, [r0, #20]
ldr r0, [r0, #16]
#else
ldrd r0, r1, [r0, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
# REV r4, r4
eor r3, r4, r4, ror #16
bic r3, r3, #0xff0000
ror r4, r4, #8
eor r4, r4, r3, lsr #8
# REV r5, r5
eor r3, r5, r5, ror #16
bic r3, r3, #0xff0000
ror r5, r5, #8
eor r5, r5, r3, lsr #8
# REV r6, r6
eor r3, r6, r6, ror #16
bic r3, r3, #0xff0000
ror r6, r6, #8
eor r6, r6, r3, lsr #8
# REV r7, r7
eor r3, r7, r7, ror #16
bic r3, r3, #0xff0000
ror r7, r7, #8
eor r7, r7, r3, lsr #8
# REV r0, r0
eor r3, r0, r0, ror #16
bic r3, r3, #0xff0000
ror r0, r0, #8
eor r0, r0, r3, lsr #8
# REV r1, r1
eor r3, r1, r1, ror #16
bic r3, r3, #0xff0000
ror r1, r1, #8
eor r1, r1, r3, lsr #8
#else
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
rev r0, r0
rev r1, r1
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
stm r2, {r4, r5, r6, r7}
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r0, [r2, #16]
str r1, [r2, #20]
#else
strd r0, r1, [r2, #16]
#endif
mov r7, r1
mov r12, #7
L_AES_set_encrypt_key_loop_192:
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r0, r7, #24
lsr r0, r0, #24
#else
uxtb r0, r7
#endif
#else
ubfx r0, r7, #0, #8
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r1, r7, #16
lsr r1, r1, #24
#else
uxtb r1, r7, ror #8
#endif
#else
ubfx r1, r7, #8, #8
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r4, r7, #8
lsr r4, r4, #24
#else
uxtb r4, r7, ror #16
#endif
#else
ubfx r4, r7, #16, #8
#endif
lsr r7, r7, #24
ldrb r0, [r8, r0, lsl #2]
ldrb r1, [r8, r1, lsl #2]
ldrb r4, [r8, r4, lsl #2]
ldrb r7, [r8, r7, lsl #2]
eor r3, r7, r0, lsl #8
eor r3, r3, r1, lsl #16
eor r3, r3, r4, lsl #24
ldm r2!, {r0, r1, r4, r5, r6, r7}
eor r0, r0, r3
ldm lr!, {r3}
eor r0, r0, r3
eor r1, r1, r0
eor r4, r4, r1
eor r5, r5, r4
eor r6, r6, r5
eor r7, r7, r6
stm r2, {r0, r1, r4, r5, r6, r7}
subs r12, r12, #1
bne L_AES_set_encrypt_key_loop_192
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r0, r7, #24
lsr r0, r0, #24
#else
uxtb r0, r7
#endif
#else
ubfx r0, r7, #0, #8
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r1, r7, #16
lsr r1, r1, #24
#else
uxtb r1, r7, ror #8
#endif
#else
ubfx r1, r7, #8, #8
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r4, r7, #8
lsr r4, r4, #24
#else
uxtb r4, r7, ror #16
#endif
#else
ubfx r4, r7, #16, #8
#endif
lsr r7, r7, #24
ldrb r0, [r8, r0, lsl #2]
ldrb r1, [r8, r1, lsl #2]
ldrb r4, [r8, r4, lsl #2]
ldrb r7, [r8, r7, lsl #2]
eor r3, r7, r0, lsl #8
eor r3, r3, r1, lsl #16
eor r3, r3, r4, lsl #24
ldm r2!, {r0, r1, r4, r5, r6, r7}
eor r0, r0, r3
ldm lr!, {r3}
eor r0, r0, r3
eor r1, r1, r0
eor r4, r4, r1
eor r5, r5, r4
stm r2, {r0, r1, r4, r5}
b L_AES_set_encrypt_key_end
L_AES_set_encrypt_key_start_128:
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0]
ldr r5, [r0, #4]
#else
ldrd r4, r5, [r0]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #8]
ldr r7, [r0, #12]
#else
ldrd r6, r7, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
# REV r4, r4
eor r3, r4, r4, ror #16
bic r3, r3, #0xff0000
ror r4, r4, #8
eor r4, r4, r3, lsr #8
# REV r5, r5
eor r3, r5, r5, ror #16
bic r3, r3, #0xff0000
ror r5, r5, #8
eor r5, r5, r3, lsr #8
# REV r6, r6
eor r3, r6, r6, ror #16
bic r3, r3, #0xff0000
ror r6, r6, #8
eor r6, r6, r3, lsr #8
# REV r7, r7
eor r3, r7, r7, ror #16
bic r3, r3, #0xff0000
ror r7, r7, #8
eor r7, r7, r3, lsr #8
#else
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
stm r2, {r4, r5, r6, r7}
mov r12, #10
L_AES_set_encrypt_key_loop_128:
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r4, r7, #24
lsr r4, r4, #24
#else
uxtb r4, r7
#endif
#else
ubfx r4, r7, #0, #8
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r5, r7, #16
lsr r5, r5, #24
#else
uxtb r5, r7, ror #8
#endif
#else
ubfx r5, r7, #8, #8
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r6, r7, #8
lsr r6, r6, #24
#else
uxtb r6, r7, ror #16
#endif
#else
ubfx r6, r7, #16, #8
#endif
lsr r7, r7, #24
ldrb r4, [r8, r4, lsl #2]
ldrb r5, [r8, r5, lsl #2]
ldrb r6, [r8, r6, lsl #2]
ldrb r7, [r8, r7, lsl #2]
eor r3, r7, r4, lsl #8
eor r3, r3, r5, lsl #16
eor r3, r3, r6, lsl #24
ldm r2!, {r4, r5, r6, r7}
eor r4, r4, r3
ldm lr!, {r3}
eor r4, r4, r3
eor r5, r5, r4
eor r6, r6, r5
eor r7, r7, r6
stm r2, {r4, r5, r6, r7}
subs r12, r12, #1
bne L_AES_set_encrypt_key_loop_128
L_AES_set_encrypt_key_end:
pop {r4, r5, r6, r7, r8, pc}
.size AES_set_encrypt_key,.-AES_set_encrypt_key
.text
.align 4
.globl AES_encrypt_block
.type AES_encrypt_block, %function
AES_encrypt_block:
push {lr}
L_AES_encrypt_block_nr:
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r8, r5, #8
lsr r8, r8, #24
#else
uxtb r8, r5, ror #16
#endif
#else
ubfx r8, r5, #16, #8
#endif
lsr r11, r4, #24
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl lr, r6, #16
lsr lr, lr, #24
#else
uxtb lr, r6, ror #8
#endif
#else
ubfx lr, r6, #8, #8
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r2, r7, #24
lsr r2, r2, #24
#else
uxtb r2, r7
#endif
#else
ubfx r2, r7, #0, #8
#endif
ldr r8, [r0, r8, lsl #2]
ldr r11, [r0, r11, lsl #2]
ldr lr, [r0, lr, lsl #2]
ldr r2, [r0, r2, lsl #2]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r9, r6, #8
lsr r9, r9, #24
#else
uxtb r9, r6, ror #16
#endif
#else
ubfx r9, r6, #16, #8
#endif
eor r8, r8, r11, ror #24
lsr r11, r5, #24
eor r8, r8, lr, ror #8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl lr, r7, #16
lsr lr, lr, #24
#else
uxtb lr, r7, ror #8
#endif
#else
ubfx lr, r7, #8, #8
#endif
eor r8, r8, r2, ror #16
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r2, r4, #24
lsr r2, r2, #24
#else
uxtb r2, r4
#endif
#else
ubfx r2, r4, #0, #8
#endif
ldr r9, [r0, r9, lsl #2]
ldr r11, [r0, r11, lsl #2]
ldr lr, [r0, lr, lsl #2]
ldr r2, [r0, r2, lsl #2]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r10, r7, #8
lsr r10, r10, #24
#else
uxtb r10, r7, ror #16
#endif
#else
ubfx r10, r7, #16, #8
#endif
eor r9, r9, r11, ror #24
lsr r11, r6, #24
eor r9, r9, lr, ror #8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl lr, r4, #16
lsr lr, lr, #24
#else
uxtb lr, r4, ror #8
#endif
#else
ubfx lr, r4, #8, #8
#endif
eor r9, r9, r2, ror #16
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r2, r5, #24
lsr r2, r2, #24
#else
uxtb r2, r5
#endif
#else
ubfx r2, r5, #0, #8
#endif
ldr r10, [r0, r10, lsl #2]
ldr r11, [r0, r11, lsl #2]
ldr lr, [r0, lr, lsl #2]
ldr r2, [r0, r2, lsl #2]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r6, r6, #24
lsr r6, r6, #24
#else
uxtb r6, r6
#endif
#else
ubfx r6, r6, #0, #8
#endif
eor r10, r10, r11, ror #24
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r11, r4, #8
lsr r11, r11, #24
#else
uxtb r11, r4, ror #16
#endif
#else
ubfx r11, r4, #16, #8
#endif
eor r10, r10, lr, ror #8
lsr lr, r7, #24
eor r10, r10, r2, ror #16
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r2, r5, #16
lsr r2, r2, #24
#else
uxtb r2, r5, ror #8
#endif
#else
ubfx r2, r5, #8, #8
#endif
ldr r6, [r0, r6, lsl #2]
ldr lr, [r0, lr, lsl #2]
ldr r11, [r0, r11, lsl #2]
ldr r2, [r0, r2, lsl #2]
eor lr, lr, r6, ror #24
ldm r3!, {r4, r5, r6, r7}
eor r11, r11, lr, ror #24
eor r11, r11, r2, ror #8
# XOR in Key Schedule
eor r8, r8, r4
eor r9, r9, r5
eor r10, r10, r6
eor r11, r11, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r4, r9, #8
lsr r4, r4, #24
#else
uxtb r4, r9, ror #16
#endif
#else
ubfx r4, r9, #16, #8
#endif
lsr r7, r8, #24
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl lr, r10, #16
lsr lr, lr, #24
#else
uxtb lr, r10, ror #8
#endif
#else
ubfx lr, r10, #8, #8
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r2, r11, #24
lsr r2, r2, #24
#else
uxtb r2, r11
#endif
#else
ubfx r2, r11, #0, #8
#endif
ldr r4, [r0, r4, lsl #2]
ldr r7, [r0, r7, lsl #2]
ldr lr, [r0, lr, lsl #2]
ldr r2, [r0, r2, lsl #2]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r5, r10, #8
lsr r5, r5, #24
#else
uxtb r5, r10, ror #16
#endif
#else
ubfx r5, r10, #16, #8
#endif
eor r4, r4, r7, ror #24
lsr r7, r9, #24
eor r4, r4, lr, ror #8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl lr, r11, #16
lsr lr, lr, #24
#else
uxtb lr, r11, ror #8
#endif
#else
ubfx lr, r11, #8, #8
#endif
eor r4, r4, r2, ror #16
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r2, r8, #24
lsr r2, r2, #24
#else
uxtb r2, r8
#endif
#else
ubfx r2, r8, #0, #8
#endif
ldr r5, [r0, r5, lsl #2]
ldr r7, [r0, r7, lsl #2]
ldr lr, [r0, lr, lsl #2]
ldr r2, [r0, r2, lsl #2]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r6, r11, #8
lsr r6, r6, #24
#else
uxtb r6, r11, ror #16
#endif
#else
ubfx r6, r11, #16, #8
#endif
eor r5, r5, r7, ror #24
lsr r7, r10, #24
eor r5, r5, lr, ror #8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl lr, r8, #16
lsr lr, lr, #24
#else
uxtb lr, r8, ror #8
#endif
#else
ubfx lr, r8, #8, #8
#endif
eor r5, r5, r2, ror #16
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r2, r9, #24
lsr r2, r2, #24
#else
uxtb r2, r9
#endif
#else
ubfx r2, r9, #0, #8
#endif
ldr r6, [r0, r6, lsl #2]
ldr r7, [r0, r7, lsl #2]
ldr lr, [r0, lr, lsl #2]
ldr r2, [r0, r2, lsl #2]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r10, r10, #24
lsr r10, r10, #24
#else
uxtb r10, r10
#endif
#else
ubfx r10, r10, #0, #8
#endif
eor r6, r6, r7, ror #24
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r7, r8, #8
lsr r7, r7, #24
#else
uxtb r7, r8, ror #16
#endif
#else
ubfx r7, r8, #16, #8
#endif
eor r6, r6, lr, ror #8
lsr lr, r11, #24
eor r6, r6, r2, ror #16
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r2, r9, #16
lsr r2, r2, #24
#else
uxtb r2, r9, ror #8
#endif
#else
ubfx r2, r9, #8, #8
#endif
ldr r10, [r0, r10, lsl #2]
ldr lr, [r0, lr, lsl #2]
ldr r7, [r0, r7, lsl #2]
ldr r2, [r0, r2, lsl #2]
eor lr, lr, r10, ror #24
ldm r3!, {r8, r9, r10, r11}
eor r7, r7, lr, ror #24
eor r7, r7, r2, ror #8
# XOR in Key Schedule
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
eor r7, r7, r11
subs r1, r1, #1
bne L_AES_encrypt_block_nr
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r8, r5, #8
lsr r8, r8, #24
#else
uxtb r8, r5, ror #16
#endif
#else
ubfx r8, r5, #16, #8
#endif
lsr r11, r4, #24
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl lr, r6, #16
lsr lr, lr, #24
#else
uxtb lr, r6, ror #8
#endif
#else
ubfx lr, r6, #8, #8
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r2, r7, #24
lsr r2, r2, #24
#else
uxtb r2, r7
#endif
#else
ubfx r2, r7, #0, #8
#endif
ldr r8, [r0, r8, lsl #2]
ldr r11, [r0, r11, lsl #2]
ldr lr, [r0, lr, lsl #2]
ldr r2, [r0, r2, lsl #2]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r9, r6, #8
lsr r9, r9, #24
#else
uxtb r9, r6, ror #16
#endif
#else
ubfx r9, r6, #16, #8
#endif
eor r8, r8, r11, ror #24
lsr r11, r5, #24
eor r8, r8, lr, ror #8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl lr, r7, #16
lsr lr, lr, #24
#else
uxtb lr, r7, ror #8
#endif
#else
ubfx lr, r7, #8, #8
#endif
eor r8, r8, r2, ror #16
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r2, r4, #24
lsr r2, r2, #24
#else
uxtb r2, r4
#endif
#else
ubfx r2, r4, #0, #8
#endif
ldr r9, [r0, r9, lsl #2]
ldr r11, [r0, r11, lsl #2]
ldr lr, [r0, lr, lsl #2]
ldr r2, [r0, r2, lsl #2]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r10, r7, #8
lsr r10, r10, #24
#else
uxtb r10, r7, ror #16
#endif
#else
ubfx r10, r7, #16, #8
#endif
eor r9, r9, r11, ror #24
lsr r11, r6, #24
eor r9, r9, lr, ror #8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl lr, r4, #16
lsr lr, lr, #24
#else
uxtb lr, r4, ror #8
#endif
#else
ubfx lr, r4, #8, #8
#endif
eor r9, r9, r2, ror #16
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r2, r5, #24
lsr r2, r2, #24
#else
uxtb r2, r5
#endif
#else
ubfx r2, r5, #0, #8
#endif
ldr r10, [r0, r10, lsl #2]
ldr r11, [r0, r11, lsl #2]
ldr lr, [r0, lr, lsl #2]
ldr r2, [r0, r2, lsl #2]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r6, r6, #24
lsr r6, r6, #24
#else
uxtb r6, r6
#endif
#else
ubfx r6, r6, #0, #8
#endif
eor r10, r10, r11, ror #24
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r11, r4, #8
lsr r11, r11, #24
#else
uxtb r11, r4, ror #16
#endif
#else
ubfx r11, r4, #16, #8
#endif
eor r10, r10, lr, ror #8
lsr lr, r7, #24
eor r10, r10, r2, ror #16
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r2, r5, #16
lsr r2, r2, #24
#else
uxtb r2, r5, ror #8
#endif
#else
ubfx r2, r5, #8, #8
#endif
ldr r6, [r0, r6, lsl #2]
ldr lr, [r0, lr, lsl #2]
ldr r11, [r0, r11, lsl #2]
ldr r2, [r0, r2, lsl #2]
eor lr, lr, r6, ror #24
ldm r3!, {r4, r5, r6, r7}
eor r11, r11, lr, ror #24
eor r11, r11, r2, ror #8
# XOR in Key Schedule
eor r8, r8, r4
eor r9, r9, r5
eor r10, r10, r6
eor r11, r11, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r4, r11, #24
lsr r4, r4, #24
#else
uxtb r4, r11
#endif
#else
ubfx r4, r11, #0, #8
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r7, r10, #16
lsr r7, r7, #24
#else
uxtb r7, r10, ror #8
#endif
#else
ubfx r7, r10, #8, #8
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl lr, r9, #8
lsr lr, lr, #24
#else
uxtb lr, r9, ror #16
#endif
#else
ubfx lr, r9, #16, #8
#endif
lsr r2, r8, #24
ldrb r4, [r0, r4, lsl #2]
ldrb r7, [r0, r7, lsl #2]
ldrb lr, [r0, lr, lsl #2]
ldrb r2, [r0, r2, lsl #2]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r5, r8, #24
lsr r5, r5, #24
#else
uxtb r5, r8
#endif
#else
ubfx r5, r8, #0, #8
#endif
eor r4, r4, r7, lsl #8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r7, r11, #16
lsr r7, r7, #24
#else
uxtb r7, r11, ror #8
#endif
#else
ubfx r7, r11, #8, #8
#endif
eor r4, r4, lr, lsl #16
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl lr, r10, #8
lsr lr, lr, #24
#else
uxtb lr, r10, ror #16
#endif
#else
ubfx lr, r10, #16, #8
#endif
eor r4, r4, r2, lsl #24
lsr r2, r9, #24
ldrb r5, [r0, r5, lsl #2]
ldrb r7, [r0, r7, lsl #2]
ldrb lr, [r0, lr, lsl #2]
ldrb r2, [r0, r2, lsl #2]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r6, r9, #24
lsr r6, r6, #24
#else
uxtb r6, r9
#endif
#else
ubfx r6, r9, #0, #8
#endif
eor r5, r5, r7, lsl #8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r7, r8, #16
lsr r7, r7, #24
#else
uxtb r7, r8, ror #8
#endif
#else
ubfx r7, r8, #8, #8
#endif
eor r5, r5, lr, lsl #16
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl lr, r11, #8
lsr lr, lr, #24
#else
uxtb lr, r11, ror #16
#endif
#else
ubfx lr, r11, #16, #8
#endif
eor r5, r5, r2, lsl #24
lsr r2, r10, #24
ldrb r6, [r0, r6, lsl #2]
ldrb r7, [r0, r7, lsl #2]
ldrb lr, [r0, lr, lsl #2]
ldrb r2, [r0, r2, lsl #2]
lsr r11, r11, #24
eor r6, r6, r7, lsl #8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r7, r10, #24
lsr r7, r7, #24
#else
uxtb r7, r10
#endif
#else
ubfx r7, r10, #0, #8
#endif
eor r6, r6, lr, lsl #16
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl lr, r9, #16
lsr lr, lr, #24
#else
uxtb lr, r9, ror #8
#endif
#else
ubfx lr, r9, #8, #8
#endif
eor r6, r6, r2, lsl #24
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r2, r8, #8
lsr r2, r2, #24
#else
uxtb r2, r8, ror #16
#endif
#else
ubfx r2, r8, #16, #8
#endif
ldrb r11, [r0, r11, lsl #2]
ldrb r7, [r0, r7, lsl #2]
ldrb lr, [r0, lr, lsl #2]
ldrb r2, [r0, r2, lsl #2]
eor lr, lr, r11, lsl #16
ldm r3, {r8, r9, r10, r11}
eor r7, r7, lr, lsl #8
eor r7, r7, r2, lsl #16
# XOR in Key Schedule
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
eor r7, r7, r11
pop {pc}
.size AES_encrypt_block,.-AES_encrypt_block
#if defined(HAVE_AESCCM) || defined(HAVE_AESGCM) || defined(WOLFSSL_AES_DIRECT) || defined(WOLFSSL_AES_COUNTER)
.text
.type L_AES_ARM32_te_ecb, %object
.size L_AES_ARM32_te_ecb, 12
.align 4
L_AES_ARM32_te_ecb:
.word L_AES_ARM32_te_data
.text
.align 4
.globl AES_ECB_encrypt
.type AES_ECB_encrypt, %function
AES_ECB_encrypt:
push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
mov lr, r0
adr r0, L_AES_ARM32_te_ecb
ldr r0, [r0]
ldr r12, [sp, #36]
push {r3}
cmp r12, #10
beq L_AES_ECB_encrypt_start_block_128
cmp r12, #12
beq L_AES_ECB_encrypt_start_block_192
L_AES_ECB_encrypt_loop_block_256:
ldr r4, [lr]
ldr r5, [lr, #4]
ldr r6, [lr, #8]
ldr r7, [lr, #12]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
#else
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
push {r1, r2, lr}
ldm r3!, {r8, r9, r10, r11}
# Round: 0 - XOR in key schedule
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
eor r7, r7, r11
mov r1, #6
bl AES_encrypt_block
pop {r1, r2, lr}
ldr r3, [sp]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
#else
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
str r4, [r1]
str r5, [r1, #4]
str r6, [r1, #8]
str r7, [r1, #12]
subs r2, r2, #16
add lr, lr, #16
add r1, r1, #16
bne L_AES_ECB_encrypt_loop_block_256
b L_AES_ECB_encrypt_end
L_AES_ECB_encrypt_start_block_192:
L_AES_ECB_encrypt_loop_block_192:
ldr r4, [lr]
ldr r5, [lr, #4]
ldr r6, [lr, #8]
ldr r7, [lr, #12]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
#else
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
push {r1, r2, lr}
ldm r3!, {r8, r9, r10, r11}
# Round: 0 - XOR in key schedule
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
eor r7, r7, r11
mov r1, #5
bl AES_encrypt_block
pop {r1, r2, lr}
ldr r3, [sp]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
#else
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
str r4, [r1]
str r5, [r1, #4]
str r6, [r1, #8]
str r7, [r1, #12]
subs r2, r2, #16
add lr, lr, #16
add r1, r1, #16
bne L_AES_ECB_encrypt_loop_block_192
b L_AES_ECB_encrypt_end
L_AES_ECB_encrypt_start_block_128:
L_AES_ECB_encrypt_loop_block_128:
ldr r4, [lr]
ldr r5, [lr, #4]
ldr r6, [lr, #8]
ldr r7, [lr, #12]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
#else
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
push {r1, r2, lr}
ldm r3!, {r8, r9, r10, r11}
# Round: 0 - XOR in key schedule
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
eor r7, r7, r11
mov r1, #4
bl AES_encrypt_block
pop {r1, r2, lr}
ldr r3, [sp]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
#else
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
str r4, [r1]
str r5, [r1, #4]
str r6, [r1, #8]
str r7, [r1, #12]
subs r2, r2, #16
add lr, lr, #16
add r1, r1, #16
bne L_AES_ECB_encrypt_loop_block_128
L_AES_ECB_encrypt_end:
pop {r3}
pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.size AES_ECB_encrypt,.-AES_ECB_encrypt
#endif /* HAVE_AESCCM || HAVE_AESGCM || WOLFSSL_AES_DIRECT || WOLFSSL_AES_COUNTER */
#ifdef HAVE_AES_CBC
.text
.type L_AES_ARM32_te_cbc, %object
.size L_AES_ARM32_te_cbc, 12
.align 4
L_AES_ARM32_te_cbc:
.word L_AES_ARM32_te_data
.text
.align 4
.globl AES_CBC_encrypt
.type AES_CBC_encrypt, %function
AES_CBC_encrypt:
push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
ldr r8, [sp, #36]
ldr r9, [sp, #40]
mov lr, r0
adr r0, L_AES_ARM32_te_cbc
ldr r0, [r0]
ldm r9, {r4, r5, r6, r7}
push {r3, r9}
cmp r8, #10
beq L_AES_CBC_encrypt_start_block_128
cmp r8, #12
beq L_AES_CBC_encrypt_start_block_192
L_AES_CBC_encrypt_loop_block_256:
ldr r8, [lr]
ldr r9, [lr, #4]
ldr r10, [lr, #8]
ldr r11, [lr, #12]
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
eor r7, r7, r11
push {r1, r2, lr}
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
#else
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
ldm r3!, {r8, r9, r10, r11}
# Round: 0 - XOR in key schedule
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
eor r7, r7, r11
mov r1, #6
bl AES_encrypt_block
pop {r1, r2, lr}
ldr r3, [sp]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
#else
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
str r4, [r1]
str r5, [r1, #4]
str r6, [r1, #8]
str r7, [r1, #12]
subs r2, r2, #16
add lr, lr, #16
add r1, r1, #16
bne L_AES_CBC_encrypt_loop_block_256
b L_AES_CBC_encrypt_end
L_AES_CBC_encrypt_start_block_192:
L_AES_CBC_encrypt_loop_block_192:
ldr r8, [lr]
ldr r9, [lr, #4]
ldr r10, [lr, #8]
ldr r11, [lr, #12]
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
eor r7, r7, r11
push {r1, r2, lr}
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
#else
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
ldm r3!, {r8, r9, r10, r11}
# Round: 0 - XOR in key schedule
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
eor r7, r7, r11
mov r1, #5
bl AES_encrypt_block
pop {r1, r2, lr}
ldr r3, [sp]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
#else
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
str r4, [r1]
str r5, [r1, #4]
str r6, [r1, #8]
str r7, [r1, #12]
subs r2, r2, #16
add lr, lr, #16
add r1, r1, #16
bne L_AES_CBC_encrypt_loop_block_192
b L_AES_CBC_encrypt_end
L_AES_CBC_encrypt_start_block_128:
L_AES_CBC_encrypt_loop_block_128:
ldr r8, [lr]
ldr r9, [lr, #4]
ldr r10, [lr, #8]
ldr r11, [lr, #12]
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
eor r7, r7, r11
push {r1, r2, lr}
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
#else
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
ldm r3!, {r8, r9, r10, r11}
# Round: 0 - XOR in key schedule
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
eor r7, r7, r11
mov r1, #4
bl AES_encrypt_block
pop {r1, r2, lr}
ldr r3, [sp]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
#else
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
str r4, [r1]
str r5, [r1, #4]
str r6, [r1, #8]
str r7, [r1, #12]
subs r2, r2, #16
add lr, lr, #16
add r1, r1, #16
bne L_AES_CBC_encrypt_loop_block_128
L_AES_CBC_encrypt_end:
pop {r3, r9}
stm r9, {r4, r5, r6, r7}
pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.size AES_CBC_encrypt,.-AES_CBC_encrypt
#endif /* HAVE_AES_CBC */
#ifdef WOLFSSL_AES_COUNTER
.text
.type L_AES_ARM32_te_ctr, %object
.size L_AES_ARM32_te_ctr, 12
.align 4
L_AES_ARM32_te_ctr:
.word L_AES_ARM32_te_data
.text
.align 4
.globl AES_CTR_encrypt
.type AES_CTR_encrypt, %function
AES_CTR_encrypt:
push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
ldr r12, [sp, #36]
ldr r8, [sp, #40]
mov lr, r0
adr r0, L_AES_ARM32_te_ctr
ldr r0, [r0]
ldm r8, {r4, r5, r6, r7}
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
eor r10, r4, r4, ror #16
eor r11, r5, r5, ror #16
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
eor r4, r4, r10, lsr #8
eor r5, r5, r11, lsr #8
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r6, r6, #8
ror r7, r7, #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
#else
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
stm r8, {r4, r5, r6, r7}
push {r3, r8}
cmp r12, #10
beq L_AES_CTR_encrypt_start_block_128
cmp r12, #12
beq L_AES_CTR_encrypt_start_block_192
L_AES_CTR_encrypt_loop_block_256:
push {r1, r2, lr}
ldr lr, [sp, #16]
adds r11, r7, #1
adcs r10, r6, #0
adcs r9, r5, #0
adc r8, r4, #0
stm lr, {r8, r9, r10, r11}
ldm r3!, {r8, r9, r10, r11}
# Round: 0 - XOR in key schedule
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
eor r7, r7, r11
mov r1, #6
bl AES_encrypt_block
pop {r1, r2, lr}
ldr r3, [sp]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
#else
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
ldr r8, [lr]
ldr r9, [lr, #4]
ldr r10, [lr, #8]
ldr r11, [lr, #12]
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
eor r7, r7, r11
ldr r8, [sp, #4]
str r4, [r1]
str r5, [r1, #4]
str r6, [r1, #8]
str r7, [r1, #12]
ldm r8, {r4, r5, r6, r7}
subs r2, r2, #16
add lr, lr, #16
add r1, r1, #16
bne L_AES_CTR_encrypt_loop_block_256
b L_AES_CTR_encrypt_end
L_AES_CTR_encrypt_start_block_192:
L_AES_CTR_encrypt_loop_block_192:
push {r1, r2, lr}
ldr lr, [sp, #16]
adds r11, r7, #1
adcs r10, r6, #0
adcs r9, r5, #0
adc r8, r4, #0
stm lr, {r8, r9, r10, r11}
ldm r3!, {r8, r9, r10, r11}
# Round: 0 - XOR in key schedule
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
eor r7, r7, r11
mov r1, #5
bl AES_encrypt_block
pop {r1, r2, lr}
ldr r3, [sp]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
#else
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
ldr r8, [lr]
ldr r9, [lr, #4]
ldr r10, [lr, #8]
ldr r11, [lr, #12]
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
eor r7, r7, r11
ldr r8, [sp, #4]
str r4, [r1]
str r5, [r1, #4]
str r6, [r1, #8]
str r7, [r1, #12]
ldm r8, {r4, r5, r6, r7}
subs r2, r2, #16
add lr, lr, #16
add r1, r1, #16
bne L_AES_CTR_encrypt_loop_block_192
b L_AES_CTR_encrypt_end
L_AES_CTR_encrypt_start_block_128:
L_AES_CTR_encrypt_loop_block_128:
push {r1, r2, lr}
ldr lr, [sp, #16]
adds r11, r7, #1
adcs r10, r6, #0
adcs r9, r5, #0
adc r8, r4, #0
stm lr, {r8, r9, r10, r11}
ldm r3!, {r8, r9, r10, r11}
# Round: 0 - XOR in key schedule
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
eor r7, r7, r11
mov r1, #4
bl AES_encrypt_block
pop {r1, r2, lr}
ldr r3, [sp]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
#else
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
ldr r8, [lr]
ldr r9, [lr, #4]
ldr r10, [lr, #8]
ldr r11, [lr, #12]
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
eor r7, r7, r11
ldr r8, [sp, #4]
str r4, [r1]
str r5, [r1, #4]
str r6, [r1, #8]
str r7, [r1, #12]
ldm r8, {r4, r5, r6, r7}
subs r2, r2, #16
add lr, lr, #16
add r1, r1, #16
bne L_AES_CTR_encrypt_loop_block_128
L_AES_CTR_encrypt_end:
pop {r3, r8}
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
eor r10, r4, r4, ror #16
eor r11, r5, r5, ror #16
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
eor r4, r4, r10, lsr #8
eor r5, r5, r11, lsr #8
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r6, r6, #8
ror r7, r7, #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
#else
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
stm r8, {r4, r5, r6, r7}
pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.size AES_CTR_encrypt,.-AES_CTR_encrypt
#endif /* WOLFSSL_AES_COUNTER */
#ifdef HAVE_AES_DECRYPT
#if defined(WOLFSSL_AES_DIRECT) || defined(WOLFSSL_AES_COUNTER) || defined(HAVE_AES_CBC)
.text
.align 4
.globl AES_decrypt_block
.type AES_decrypt_block, %function
AES_decrypt_block:
push {lr}
L_AES_decrypt_block_nr:
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r8, r7, #8
lsr r8, r8, #24
#else
uxtb r8, r7, ror #16
#endif
#else
ubfx r8, r7, #16, #8
#endif
lsr r11, r4, #24
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r12, r6, #16
lsr r12, r12, #24
#else
uxtb r12, r6, ror #8
#endif
#else
ubfx r12, r6, #8, #8
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl lr, r5, #24
lsr lr, lr, #24
#else
uxtb lr, r5
#endif
#else
ubfx lr, r5, #0, #8
#endif
ldr r8, [r0, r8, lsl #2]
ldr r11, [r0, r11, lsl #2]
ldr r12, [r0, r12, lsl #2]
ldr lr, [r0, lr, lsl #2]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r9, r4, #8
lsr r9, r9, #24
#else
uxtb r9, r4, ror #16
#endif
#else
ubfx r9, r4, #16, #8
#endif
eor r8, r8, r11, ror #24
lsr r11, r5, #24
eor r8, r8, r12, ror #8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r12, r7, #16
lsr r12, r12, #24
#else
uxtb r12, r7, ror #8
#endif
#else
ubfx r12, r7, #8, #8
#endif
eor r8, r8, lr, ror #16
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl lr, r6, #24
lsr lr, lr, #24
#else
uxtb lr, r6
#endif
#else
ubfx lr, r6, #0, #8
#endif
ldr r9, [r0, r9, lsl #2]
ldr r11, [r0, r11, lsl #2]
ldr r12, [r0, r12, lsl #2]
ldr lr, [r0, lr, lsl #2]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r10, r5, #8
lsr r10, r10, #24
#else
uxtb r10, r5, ror #16
#endif
#else
ubfx r10, r5, #16, #8
#endif
eor r9, r9, r11, ror #24
lsr r11, r6, #24
eor r9, r9, r12, ror #8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r12, r4, #16
lsr r12, r12, #24
#else
uxtb r12, r4, ror #8
#endif
#else
ubfx r12, r4, #8, #8
#endif
eor r9, r9, lr, ror #16
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl lr, r7, #24
lsr lr, lr, #24
#else
uxtb lr, r7
#endif
#else
ubfx lr, r7, #0, #8
#endif
ldr r10, [r0, r10, lsl #2]
ldr r11, [r0, r11, lsl #2]
ldr r12, [r0, r12, lsl #2]
ldr lr, [r0, lr, lsl #2]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r4, r4, #24
lsr r4, r4, #24
#else
uxtb r4, r4
#endif
#else
ubfx r4, r4, #0, #8
#endif
eor r10, r10, r11, ror #24
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r11, r6, #8
lsr r11, r11, #24
#else
uxtb r11, r6, ror #16
#endif
#else
ubfx r11, r6, #16, #8
#endif
eor r10, r10, r12, ror #8
lsr r12, r7, #24
eor r10, r10, lr, ror #16
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl lr, r5, #16
lsr lr, lr, #24
#else
uxtb lr, r5, ror #8
#endif
#else
ubfx lr, r5, #8, #8
#endif
ldr r4, [r0, r4, lsl #2]
ldr r12, [r0, r12, lsl #2]
ldr r11, [r0, r11, lsl #2]
ldr lr, [r0, lr, lsl #2]
eor r12, r12, r4, ror #24
ldm r3!, {r4, r5, r6, r7}
eor r11, r11, lr, ror #8
eor r11, r11, r12, ror #24
# XOR in Key Schedule
eor r8, r8, r4
eor r9, r9, r5
eor r10, r10, r6
eor r11, r11, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r4, r11, #8
lsr r4, r4, #24
#else
uxtb r4, r11, ror #16
#endif
#else
ubfx r4, r11, #16, #8
#endif
lsr r7, r8, #24
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r12, r10, #16
lsr r12, r12, #24
#else
uxtb r12, r10, ror #8
#endif
#else
ubfx r12, r10, #8, #8
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl lr, r9, #24
lsr lr, lr, #24
#else
uxtb lr, r9
#endif
#else
ubfx lr, r9, #0, #8
#endif
ldr r4, [r0, r4, lsl #2]
ldr r7, [r0, r7, lsl #2]
ldr r12, [r0, r12, lsl #2]
ldr lr, [r0, lr, lsl #2]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r5, r8, #8
lsr r5, r5, #24
#else
uxtb r5, r8, ror #16
#endif
#else
ubfx r5, r8, #16, #8
#endif
eor r4, r4, r7, ror #24
lsr r7, r9, #24
eor r4, r4, r12, ror #8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r12, r11, #16
lsr r12, r12, #24
#else
uxtb r12, r11, ror #8
#endif
#else
ubfx r12, r11, #8, #8
#endif
eor r4, r4, lr, ror #16
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl lr, r10, #24
lsr lr, lr, #24
#else
uxtb lr, r10
#endif
#else
ubfx lr, r10, #0, #8
#endif
ldr r5, [r0, r5, lsl #2]
ldr r7, [r0, r7, lsl #2]
ldr r12, [r0, r12, lsl #2]
ldr lr, [r0, lr, lsl #2]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r6, r9, #8
lsr r6, r6, #24
#else
uxtb r6, r9, ror #16
#endif
#else
ubfx r6, r9, #16, #8
#endif
eor r5, r5, r7, ror #24
lsr r7, r10, #24
eor r5, r5, r12, ror #8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r12, r8, #16
lsr r12, r12, #24
#else
uxtb r12, r8, ror #8
#endif
#else
ubfx r12, r8, #8, #8
#endif
eor r5, r5, lr, ror #16
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl lr, r11, #24
lsr lr, lr, #24
#else
uxtb lr, r11
#endif
#else
ubfx lr, r11, #0, #8
#endif
ldr r6, [r0, r6, lsl #2]
ldr r7, [r0, r7, lsl #2]
ldr r12, [r0, r12, lsl #2]
ldr lr, [r0, lr, lsl #2]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r8, r8, #24
lsr r8, r8, #24
#else
uxtb r8, r8
#endif
#else
ubfx r8, r8, #0, #8
#endif
eor r6, r6, r7, ror #24
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r7, r10, #8
lsr r7, r7, #24
#else
uxtb r7, r10, ror #16
#endif
#else
ubfx r7, r10, #16, #8
#endif
eor r6, r6, r12, ror #8
lsr r12, r11, #24
eor r6, r6, lr, ror #16
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl lr, r9, #16
lsr lr, lr, #24
#else
uxtb lr, r9, ror #8
#endif
#else
ubfx lr, r9, #8, #8
#endif
ldr r8, [r0, r8, lsl #2]
ldr r12, [r0, r12, lsl #2]
ldr r7, [r0, r7, lsl #2]
ldr lr, [r0, lr, lsl #2]
eor r12, r12, r8, ror #24
ldm r3!, {r8, r9, r10, r11}
eor r7, r7, lr, ror #8
eor r7, r7, r12, ror #24
# XOR in Key Schedule
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
eor r7, r7, r11
subs r1, r1, #1
bne L_AES_decrypt_block_nr
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r8, r7, #8
lsr r8, r8, #24
#else
uxtb r8, r7, ror #16
#endif
#else
ubfx r8, r7, #16, #8
#endif
lsr r11, r4, #24
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r12, r6, #16
lsr r12, r12, #24
#else
uxtb r12, r6, ror #8
#endif
#else
ubfx r12, r6, #8, #8
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl lr, r5, #24
lsr lr, lr, #24
#else
uxtb lr, r5
#endif
#else
ubfx lr, r5, #0, #8
#endif
ldr r8, [r0, r8, lsl #2]
ldr r11, [r0, r11, lsl #2]
ldr r12, [r0, r12, lsl #2]
ldr lr, [r0, lr, lsl #2]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r9, r4, #8
lsr r9, r9, #24
#else
uxtb r9, r4, ror #16
#endif
#else
ubfx r9, r4, #16, #8
#endif
eor r8, r8, r11, ror #24
lsr r11, r5, #24
eor r8, r8, r12, ror #8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r12, r7, #16
lsr r12, r12, #24
#else
uxtb r12, r7, ror #8
#endif
#else
ubfx r12, r7, #8, #8
#endif
eor r8, r8, lr, ror #16
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl lr, r6, #24
lsr lr, lr, #24
#else
uxtb lr, r6
#endif
#else
ubfx lr, r6, #0, #8
#endif
ldr r9, [r0, r9, lsl #2]
ldr r11, [r0, r11, lsl #2]
ldr r12, [r0, r12, lsl #2]
ldr lr, [r0, lr, lsl #2]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r10, r5, #8
lsr r10, r10, #24
#else
uxtb r10, r5, ror #16
#endif
#else
ubfx r10, r5, #16, #8
#endif
eor r9, r9, r11, ror #24
lsr r11, r6, #24
eor r9, r9, r12, ror #8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r12, r4, #16
lsr r12, r12, #24
#else
uxtb r12, r4, ror #8
#endif
#else
ubfx r12, r4, #8, #8
#endif
eor r9, r9, lr, ror #16
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl lr, r7, #24
lsr lr, lr, #24
#else
uxtb lr, r7
#endif
#else
ubfx lr, r7, #0, #8
#endif
ldr r10, [r0, r10, lsl #2]
ldr r11, [r0, r11, lsl #2]
ldr r12, [r0, r12, lsl #2]
ldr lr, [r0, lr, lsl #2]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r4, r4, #24
lsr r4, r4, #24
#else
uxtb r4, r4
#endif
#else
ubfx r4, r4, #0, #8
#endif
eor r10, r10, r11, ror #24
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r11, r6, #8
lsr r11, r11, #24
#else
uxtb r11, r6, ror #16
#endif
#else
ubfx r11, r6, #16, #8
#endif
eor r10, r10, r12, ror #8
lsr r12, r7, #24
eor r10, r10, lr, ror #16
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl lr, r5, #16
lsr lr, lr, #24
#else
uxtb lr, r5, ror #8
#endif
#else
ubfx lr, r5, #8, #8
#endif
ldr r4, [r0, r4, lsl #2]
ldr r12, [r0, r12, lsl #2]
ldr r11, [r0, r11, lsl #2]
ldr lr, [r0, lr, lsl #2]
eor r12, r12, r4, ror #24
ldm r3!, {r4, r5, r6, r7}
eor r11, r11, lr, ror #8
eor r11, r11, r12, ror #24
# XOR in Key Schedule
eor r8, r8, r4
eor r9, r9, r5
eor r10, r10, r6
eor r11, r11, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r4, r9, #24
lsr r4, r4, #24
#else
uxtb r4, r9
#endif
#else
ubfx r4, r9, #0, #8
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r7, r10, #16
lsr r7, r7, #24
#else
uxtb r7, r10, ror #8
#endif
#else
ubfx r7, r10, #8, #8
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r12, r11, #8
lsr r12, r12, #24
#else
uxtb r12, r11, ror #16
#endif
#else
ubfx r12, r11, #16, #8
#endif
lsr lr, r8, #24
ldrb r4, [r2, r4]
ldrb r7, [r2, r7]
ldrb r12, [r2, r12]
ldrb lr, [r2, lr]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r5, r10, #24
lsr r5, r5, #24
#else
uxtb r5, r10
#endif
#else
ubfx r5, r10, #0, #8
#endif
eor r4, r4, r7, lsl #8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r7, r11, #16
lsr r7, r7, #24
#else
uxtb r7, r11, ror #8
#endif
#else
ubfx r7, r11, #8, #8
#endif
eor r4, r4, r12, lsl #16
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r12, r8, #8
lsr r12, r12, #24
#else
uxtb r12, r8, ror #16
#endif
#else
ubfx r12, r8, #16, #8
#endif
eor r4, r4, lr, lsl #24
lsr lr, r9, #24
ldrb r7, [r2, r7]
ldrb lr, [r2, lr]
ldrb r5, [r2, r5]
ldrb r12, [r2, r12]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r6, r11, #24
lsr r6, r6, #24
#else
uxtb r6, r11
#endif
#else
ubfx r6, r11, #0, #8
#endif
eor r5, r5, r7, lsl #8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r7, r8, #16
lsr r7, r7, #24
#else
uxtb r7, r8, ror #8
#endif
#else
ubfx r7, r8, #8, #8
#endif
eor r5, r5, r12, lsl #16
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r12, r9, #8
lsr r12, r12, #24
#else
uxtb r12, r9, ror #16
#endif
#else
ubfx r12, r9, #16, #8
#endif
eor r5, r5, lr, lsl #24
lsr lr, r10, #24
ldrb r7, [r2, r7]
ldrb lr, [r2, lr]
ldrb r6, [r2, r6]
ldrb r12, [r2, r12]
lsr r11, r11, #24
eor r6, r6, r7, lsl #8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r7, r8, #24
lsr r7, r7, #24
#else
uxtb r7, r8
#endif
#else
ubfx r7, r8, #0, #8
#endif
eor r6, r6, r12, lsl #16
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl r12, r9, #16
lsr r12, r12, #24
#else
uxtb r12, r9, ror #8
#endif
#else
ubfx r12, r9, #8, #8
#endif
eor r6, r6, lr, lsl #24
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
lsl lr, r10, #8
lsr lr, lr, #24
#else
uxtb lr, r10, ror #16
#endif
#else
ubfx lr, r10, #16, #8
#endif
ldrb r11, [r2, r11]
ldrb r12, [r2, r12]
ldrb r7, [r2, r7]
ldrb lr, [r2, lr]
eor r12, r12, r11, lsl #16
ldm r3, {r8, r9, r10, r11}
eor r7, r7, r12, lsl #8
eor r7, r7, lr, lsl #16
# XOR in Key Schedule
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
eor r7, r7, r11
pop {pc}
.size AES_decrypt_block,.-AES_decrypt_block
.text
.type L_AES_ARM32_td_ecb, %object
.size L_AES_ARM32_td_ecb, 12
.align 4
L_AES_ARM32_td_ecb:
.word L_AES_ARM32_td_data
.text
.type L_AES_ARM32_td4, %object
.size L_AES_ARM32_td4, 256
.align 4
L_AES_ARM32_td4:
.byte 0x52
.byte 0x9
.byte 0x6a
.byte 0xd5
.byte 0x30
.byte 0x36
.byte 0xa5
.byte 0x38
.byte 0xbf
.byte 0x40
.byte 0xa3
.byte 0x9e
.byte 0x81
.byte 0xf3
.byte 0xd7
.byte 0xfb
.byte 0x7c
.byte 0xe3
.byte 0x39
.byte 0x82
.byte 0x9b
.byte 0x2f
.byte 0xff
.byte 0x87
.byte 0x34
.byte 0x8e
.byte 0x43
.byte 0x44
.byte 0xc4
.byte 0xde
.byte 0xe9
.byte 0xcb
.byte 0x54
.byte 0x7b
.byte 0x94
.byte 0x32
.byte 0xa6
.byte 0xc2
.byte 0x23
.byte 0x3d
.byte 0xee
.byte 0x4c
.byte 0x95
.byte 0xb
.byte 0x42
.byte 0xfa
.byte 0xc3
.byte 0x4e
.byte 0x8
.byte 0x2e
.byte 0xa1
.byte 0x66
.byte 0x28
.byte 0xd9
.byte 0x24
.byte 0xb2
.byte 0x76
.byte 0x5b
.byte 0xa2
.byte 0x49
.byte 0x6d
.byte 0x8b
.byte 0xd1
.byte 0x25
.byte 0x72
.byte 0xf8
.byte 0xf6
.byte 0x64
.byte 0x86
.byte 0x68
.byte 0x98
.byte 0x16
.byte 0xd4
.byte 0xa4
.byte 0x5c
.byte 0xcc
.byte 0x5d
.byte 0x65
.byte 0xb6
.byte 0x92
.byte 0x6c
.byte 0x70
.byte 0x48
.byte 0x50
.byte 0xfd
.byte 0xed
.byte 0xb9
.byte 0xda
.byte 0x5e
.byte 0x15
.byte 0x46
.byte 0x57
.byte 0xa7
.byte 0x8d
.byte 0x9d
.byte 0x84
.byte 0x90
.byte 0xd8
.byte 0xab
.byte 0x0
.byte 0x8c
.byte 0xbc
.byte 0xd3
.byte 0xa
.byte 0xf7
.byte 0xe4
.byte 0x58
.byte 0x5
.byte 0xb8
.byte 0xb3
.byte 0x45
.byte 0x6
.byte 0xd0
.byte 0x2c
.byte 0x1e
.byte 0x8f
.byte 0xca
.byte 0x3f
.byte 0xf
.byte 0x2
.byte 0xc1
.byte 0xaf
.byte 0xbd
.byte 0x3
.byte 0x1
.byte 0x13
.byte 0x8a
.byte 0x6b
.byte 0x3a
.byte 0x91
.byte 0x11
.byte 0x41
.byte 0x4f
.byte 0x67
.byte 0xdc
.byte 0xea
.byte 0x97
.byte 0xf2
.byte 0xcf
.byte 0xce
.byte 0xf0
.byte 0xb4
.byte 0xe6
.byte 0x73
.byte 0x96
.byte 0xac
.byte 0x74
.byte 0x22
.byte 0xe7
.byte 0xad
.byte 0x35
.byte 0x85
.byte 0xe2
.byte 0xf9
.byte 0x37
.byte 0xe8
.byte 0x1c
.byte 0x75
.byte 0xdf
.byte 0x6e
.byte 0x47
.byte 0xf1
.byte 0x1a
.byte 0x71
.byte 0x1d
.byte 0x29
.byte 0xc5
.byte 0x89
.byte 0x6f
.byte 0xb7
.byte 0x62
.byte 0xe
.byte 0xaa
.byte 0x18
.byte 0xbe
.byte 0x1b
.byte 0xfc
.byte 0x56
.byte 0x3e
.byte 0x4b
.byte 0xc6
.byte 0xd2
.byte 0x79
.byte 0x20
.byte 0x9a
.byte 0xdb
.byte 0xc0
.byte 0xfe
.byte 0x78
.byte 0xcd
.byte 0x5a
.byte 0xf4
.byte 0x1f
.byte 0xdd
.byte 0xa8
.byte 0x33
.byte 0x88
.byte 0x7
.byte 0xc7
.byte 0x31
.byte 0xb1
.byte 0x12
.byte 0x10
.byte 0x59
.byte 0x27
.byte 0x80
.byte 0xec
.byte 0x5f
.byte 0x60
.byte 0x51
.byte 0x7f
.byte 0xa9
.byte 0x19
.byte 0xb5
.byte 0x4a
.byte 0xd
.byte 0x2d
.byte 0xe5
.byte 0x7a
.byte 0x9f
.byte 0x93
.byte 0xc9
.byte 0x9c
.byte 0xef
.byte 0xa0
.byte 0xe0
.byte 0x3b
.byte 0x4d
.byte 0xae
.byte 0x2a
.byte 0xf5
.byte 0xb0
.byte 0xc8
.byte 0xeb
.byte 0xbb
.byte 0x3c
.byte 0x83
.byte 0x53
.byte 0x99
.byte 0x61
.byte 0x17
.byte 0x2b
.byte 0x4
.byte 0x7e
.byte 0xba
.byte 0x77
.byte 0xd6
.byte 0x26
.byte 0xe1
.byte 0x69
.byte 0x14
.byte 0x63
.byte 0x55
.byte 0x21
.byte 0xc
.byte 0x7d
#if defined(WOLFSSL_AES_DIRECT) || defined(WOLFSSL_AES_COUNTER)
.text
.align 4
.globl AES_ECB_decrypt
.type AES_ECB_decrypt, %function
AES_ECB_decrypt:
push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
ldr r8, [sp, #36]
mov lr, r0
adr r0, L_AES_ARM32_td_ecb
ldr r0, [r0]
mov r12, r2
adr r2, L_AES_ARM32_td4
cmp r8, #10
beq L_AES_ECB_decrypt_start_block_128
cmp r8, #12
beq L_AES_ECB_decrypt_start_block_192
L_AES_ECB_decrypt_loop_block_256:
ldr r4, [lr]
ldr r5, [lr, #4]
ldr r6, [lr, #8]
ldr r7, [lr, #12]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
#else
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
push {r1, r3, r12, lr}
ldm r3!, {r8, r9, r10, r11}
# Round: 0 - XOR in key schedule
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
eor r7, r7, r11
mov r1, #6
bl AES_decrypt_block
pop {r1, r3, r12, lr}
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
#else
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
str r4, [r1]
str r5, [r1, #4]
str r6, [r1, #8]
str r7, [r1, #12]
subs r12, r12, #16
add lr, lr, #16
add r1, r1, #16
bne L_AES_ECB_decrypt_loop_block_256
b L_AES_ECB_decrypt_end
L_AES_ECB_decrypt_start_block_192:
L_AES_ECB_decrypt_loop_block_192:
ldr r4, [lr]
ldr r5, [lr, #4]
ldr r6, [lr, #8]
ldr r7, [lr, #12]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
#else
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
push {r1, r3, r12, lr}
ldm r3!, {r8, r9, r10, r11}
# Round: 0 - XOR in key schedule
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
eor r7, r7, r11
mov r1, #5
bl AES_decrypt_block
pop {r1, r3, r12, lr}
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
#else
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
str r4, [r1]
str r5, [r1, #4]
str r6, [r1, #8]
str r7, [r1, #12]
subs r12, r12, #16
add lr, lr, #16
add r1, r1, #16
bne L_AES_ECB_decrypt_loop_block_192
b L_AES_ECB_decrypt_end
L_AES_ECB_decrypt_start_block_128:
L_AES_ECB_decrypt_loop_block_128:
ldr r4, [lr]
ldr r5, [lr, #4]
ldr r6, [lr, #8]
ldr r7, [lr, #12]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
#else
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
push {r1, r3, r12, lr}
ldm r3!, {r8, r9, r10, r11}
# Round: 0 - XOR in key schedule
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
eor r7, r7, r11
mov r1, #4
bl AES_decrypt_block
pop {r1, r3, r12, lr}
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
#else
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
str r4, [r1]
str r5, [r1, #4]
str r6, [r1, #8]
str r7, [r1, #12]
subs r12, r12, #16
add lr, lr, #16
add r1, r1, #16
bne L_AES_ECB_decrypt_loop_block_128
L_AES_ECB_decrypt_end:
pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.size AES_ECB_decrypt,.-AES_ECB_decrypt
#endif /* WOLFSSL_AES_DIRECT || WOLFSSL_AES_COUNTER */
#ifdef HAVE_AES_CBC
.text
.align 4
.globl AES_CBC_decrypt
.type AES_CBC_decrypt, %function
AES_CBC_decrypt:
push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
ldr r8, [sp, #36]
ldr r4, [sp, #40]
mov lr, r0
adr r0, L_AES_ARM32_td_ecb
ldr r0, [r0]
mov r12, r2
adr r2, L_AES_ARM32_td4
push {r3, r4}
cmp r8, #10
beq L_AES_CBC_decrypt_loop_block_128
cmp r8, #12
beq L_AES_CBC_decrypt_loop_block_192
L_AES_CBC_decrypt_loop_block_256:
push {r1, r12, lr}
ldr r4, [lr]
ldr r5, [lr, #4]
ldr r6, [lr, #8]
ldr r7, [lr, #12]
ldr lr, [sp, #16]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [lr, #16]
str r5, [lr, #20]
#else
strd r4, r5, [lr, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [lr, #24]
str r7, [lr, #28]
#else
strd r6, r7, [lr, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
#else
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
ldm r3!, {r8, r9, r10, r11}
# Round: 0 - XOR in key schedule
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
eor r7, r7, r11
mov r1, #6
bl AES_decrypt_block
ldr lr, [sp, #16]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
#else
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
ldm lr, {r8, r9, r10, r11}
pop {r1, r12, lr}
ldr r3, [sp]
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
eor r7, r7, r11
str r4, [r1]
str r5, [r1, #4]
str r6, [r1, #8]
str r7, [r1, #12]
subs r12, r12, #16
add lr, lr, #16
add r1, r1, #16
beq L_AES_CBC_decrypt_end_odd
push {r1, r12, lr}
ldr r4, [lr]
ldr r5, [lr, #4]
ldr r6, [lr, #8]
ldr r7, [lr, #12]
ldr lr, [sp, #16]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [lr]
str r5, [lr, #4]
#else
strd r4, r5, [lr]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [lr, #8]
str r7, [lr, #12]
#else
strd r6, r7, [lr, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
#else
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
ldm r3!, {r8, r9, r10, r11}
# Round: 0 - XOR in key schedule
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
eor r7, r7, r11
mov r1, #6
bl AES_decrypt_block
ldr lr, [sp, #16]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
#else
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [lr, #16]
ldr r9, [lr, #20]
#else
ldrd r8, r9, [lr, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [lr, #24]
ldr r11, [lr, #28]
#else
ldrd r10, r11, [lr, #24]
#endif
pop {r1, r12, lr}
ldr r3, [sp]
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
eor r7, r7, r11
str r4, [r1]
str r5, [r1, #4]
str r6, [r1, #8]
str r7, [r1, #12]
subs r12, r12, #16
add lr, lr, #16
add r1, r1, #16
bne L_AES_CBC_decrypt_loop_block_256
b L_AES_CBC_decrypt_end
L_AES_CBC_decrypt_loop_block_192:
push {r1, r12, lr}
ldr r4, [lr]
ldr r5, [lr, #4]
ldr r6, [lr, #8]
ldr r7, [lr, #12]
ldr lr, [sp, #16]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [lr, #16]
str r5, [lr, #20]
#else
strd r4, r5, [lr, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [lr, #24]
str r7, [lr, #28]
#else
strd r6, r7, [lr, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
#else
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
ldm r3!, {r8, r9, r10, r11}
# Round: 0 - XOR in key schedule
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
eor r7, r7, r11
mov r1, #5
bl AES_decrypt_block
ldr lr, [sp, #16]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
#else
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
ldm lr, {r8, r9, r10, r11}
pop {r1, r12, lr}
ldr r3, [sp]
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
eor r7, r7, r11
str r4, [r1]
str r5, [r1, #4]
str r6, [r1, #8]
str r7, [r1, #12]
subs r12, r12, #16
add lr, lr, #16
add r1, r1, #16
beq L_AES_CBC_decrypt_end_odd
push {r1, r12, lr}
ldr r4, [lr]
ldr r5, [lr, #4]
ldr r6, [lr, #8]
ldr r7, [lr, #12]
ldr lr, [sp, #16]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [lr]
str r5, [lr, #4]
#else
strd r4, r5, [lr]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [lr, #8]
str r7, [lr, #12]
#else
strd r6, r7, [lr, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
#else
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
ldm r3!, {r8, r9, r10, r11}
# Round: 0 - XOR in key schedule
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
eor r7, r7, r11
mov r1, #5
bl AES_decrypt_block
ldr lr, [sp, #16]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
#else
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [lr, #16]
ldr r9, [lr, #20]
#else
ldrd r8, r9, [lr, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [lr, #24]
ldr r11, [lr, #28]
#else
ldrd r10, r11, [lr, #24]
#endif
pop {r1, r12, lr}
ldr r3, [sp]
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
eor r7, r7, r11
str r4, [r1]
str r5, [r1, #4]
str r6, [r1, #8]
str r7, [r1, #12]
subs r12, r12, #16
add lr, lr, #16
add r1, r1, #16
bne L_AES_CBC_decrypt_loop_block_192
b L_AES_CBC_decrypt_end
L_AES_CBC_decrypt_loop_block_128:
push {r1, r12, lr}
ldr r4, [lr]
ldr r5, [lr, #4]
ldr r6, [lr, #8]
ldr r7, [lr, #12]
ldr lr, [sp, #16]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [lr, #16]
str r5, [lr, #20]
#else
strd r4, r5, [lr, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [lr, #24]
str r7, [lr, #28]
#else
strd r6, r7, [lr, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
#else
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
ldm r3!, {r8, r9, r10, r11}
# Round: 0 - XOR in key schedule
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
eor r7, r7, r11
mov r1, #4
bl AES_decrypt_block
ldr lr, [sp, #16]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
#else
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
ldm lr, {r8, r9, r10, r11}
pop {r1, r12, lr}
ldr r3, [sp]
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
eor r7, r7, r11
str r4, [r1]
str r5, [r1, #4]
str r6, [r1, #8]
str r7, [r1, #12]
subs r12, r12, #16
add lr, lr, #16
add r1, r1, #16
beq L_AES_CBC_decrypt_end_odd
push {r1, r12, lr}
ldr r4, [lr]
ldr r5, [lr, #4]
ldr r6, [lr, #8]
ldr r7, [lr, #12]
ldr lr, [sp, #16]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [lr]
str r5, [lr, #4]
#else
strd r4, r5, [lr]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [lr, #8]
str r7, [lr, #12]
#else
strd r6, r7, [lr, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
#else
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
ldm r3!, {r8, r9, r10, r11}
# Round: 0 - XOR in key schedule
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
eor r7, r7, r11
mov r1, #4
bl AES_decrypt_block
ldr lr, [sp, #16]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
#else
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [lr, #16]
ldr r9, [lr, #20]
#else
ldrd r8, r9, [lr, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [lr, #24]
ldr r11, [lr, #28]
#else
ldrd r10, r11, [lr, #24]
#endif
pop {r1, r12, lr}
ldr r3, [sp]
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
eor r7, r7, r11
str r4, [r1]
str r5, [r1, #4]
str r6, [r1, #8]
str r7, [r1, #12]
subs r12, r12, #16
add lr, lr, #16
add r1, r1, #16
bne L_AES_CBC_decrypt_loop_block_128
b L_AES_CBC_decrypt_end
L_AES_CBC_decrypt_end_odd:
ldr r4, [sp, #4]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r4, #16]
ldr r9, [r4, #20]
#else
ldrd r8, r9, [r4, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r4, #24]
ldr r11, [r4, #28]
#else
ldrd r10, r11, [r4, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r4]
str r9, [r4, #4]
#else
strd r8, r9, [r4]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r10, [r4, #8]
str r11, [r4, #12]
#else
strd r10, r11, [r4, #8]
#endif
L_AES_CBC_decrypt_end:
pop {r3, r4}
pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.size AES_CBC_decrypt,.-AES_CBC_decrypt
#endif /* HAVE_AES_CBC */
#endif /* WOLFSSL_AES_DIRECT || WOLFSSL_AES_COUNTER || HAVE_AES_CBC */
#endif /* HAVE_AES_DECRYPT */
#ifdef HAVE_AESGCM
.text
.type L_GCM_gmult_len_r, %object
.size L_GCM_gmult_len_r, 64
.align 4
L_GCM_gmult_len_r:
.word 0x0
.word 0x1c200000
.word 0x38400000
.word 0x24600000
.word 0x70800000
.word 0x6ca00000
.word 0x48c00000
.word 0x54e00000
.word 0xe1000000
.word 0xfd200000
.word 0xd9400000
.word 0xc5600000
.word 0x91800000
.word 0x8da00000
.word 0xa9c00000
.word 0xb5e00000
.text
.align 4
.globl GCM_gmult_len
.type GCM_gmult_len, %function
GCM_gmult_len:
push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
adr lr, L_GCM_gmult_len_r
L_GCM_gmult_len_start_block:
push {r3}
ldr r12, [r0, #12]
ldr r3, [r2, #12]
eor r12, r12, r3
lsr r3, r12, #24
and r3, r3, #15
add r3, r1, r3, lsl #4
ldm r3, {r8, r9, r10, r11}
lsr r6, r10, #4
and r3, r11, #15
lsr r11, r11, #4
lsr r4, r12, #28
eor r11, r11, r10, lsl #28
ldr r3, [lr, r3, lsl #2]
add r4, r1, r4, lsl #4
eor r10, r6, r9, lsl #28
lsr r9, r9, #4
ldm r4, {r4, r5, r6, r7}
eor r9, r9, r8, lsl #28
eor r8, r3, r8, lsr #4
eor r8, r8, r4
eor r9, r9, r5
eor r10, r10, r6
eor r11, r11, r7
lsr r6, r10, #4
and r3, r11, #15
lsr r11, r11, #4
lsr r4, r12, #16
eor r11, r11, r10, lsl #28
and r4, r4, #15
ldr r3, [lr, r3, lsl #2]
add r4, r1, r4, lsl #4
eor r10, r6, r9, lsl #28
lsr r9, r9, #4
ldm r4, {r4, r5, r6, r7}
eor r9, r9, r8, lsl #28
eor r8, r3, r8, lsr #4
eor r8, r8, r4
eor r9, r9, r5
eor r10, r10, r6
eor r11, r11, r7
lsr r6, r10, #4
and r3, r11, #15
lsr r11, r11, #4
lsr r4, r12, #20
eor r11, r11, r10, lsl #28
and r4, r4, #15
ldr r3, [lr, r3, lsl #2]
add r4, r1, r4, lsl #4
eor r10, r6, r9, lsl #28
lsr r9, r9, #4
ldm r4, {r4, r5, r6, r7}
eor r9, r9, r8, lsl #28
eor r8, r3, r8, lsr #4
eor r8, r8, r4
eor r9, r9, r5
eor r10, r10, r6
eor r11, r11, r7
lsr r6, r10, #4
and r3, r11, #15
lsr r11, r11, #4
lsr r4, r12, #8
eor r11, r11, r10, lsl #28
and r4, r4, #15
ldr r3, [lr, r3, lsl #2]
add r4, r1, r4, lsl #4
eor r10, r6, r9, lsl #28
lsr r9, r9, #4
ldm r4, {r4, r5, r6, r7}
eor r9, r9, r8, lsl #28
eor r8, r3, r8, lsr #4
eor r8, r8, r4
eor r9, r9, r5
eor r10, r10, r6
eor r11, r11, r7
lsr r6, r10, #4
and r3, r11, #15
lsr r11, r11, #4
lsr r4, r12, #12
eor r11, r11, r10, lsl #28
and r4, r4, #15
ldr r3, [lr, r3, lsl #2]
add r4, r1, r4, lsl #4
eor r10, r6, r9, lsl #28
lsr r9, r9, #4
ldm r4, {r4, r5, r6, r7}
eor r9, r9, r8, lsl #28
eor r8, r3, r8, lsr #4
eor r8, r8, r4
eor r9, r9, r5
eor r10, r10, r6
eor r11, r11, r7
lsr r6, r10, #4
and r3, r11, #15
lsr r11, r11, #4
and r4, r12, #15
eor r11, r11, r10, lsl #28
ldr r3, [lr, r3, lsl #2]
add r4, r1, r4, lsl #4
eor r10, r6, r9, lsl #28
lsr r9, r9, #4
ldm r4, {r4, r5, r6, r7}
eor r9, r9, r8, lsl #28
eor r8, r3, r8, lsr #4
eor r8, r8, r4
eor r9, r9, r5
eor r10, r10, r6
eor r11, r11, r7
lsr r6, r10, #4
and r3, r11, #15
lsr r11, r11, #4
lsr r4, r12, #4
eor r11, r11, r10, lsl #28
and r4, r4, #15
ldr r3, [lr, r3, lsl #2]
add r4, r1, r4, lsl #4
eor r10, r6, r9, lsl #28
lsr r9, r9, #4
ldm r4, {r4, r5, r6, r7}
eor r9, r9, r8, lsl #28
eor r8, r3, r8, lsr #4
eor r8, r8, r4
eor r9, r9, r5
eor r10, r10, r6
eor r11, r11, r7
lsr r6, r10, #4
and r3, r11, #15
lsr r11, r11, #4
eor r11, r11, r10, lsl #28
ldr r3, [lr, r3, lsl #2]
eor r10, r6, r9, lsl #28
lsr r9, r9, #4
eor r9, r9, r8, lsl #28
eor r8, r3, r8, lsr #4
ldr r12, [r0, #8]
ldr r3, [r2, #8]
eor r12, r12, r3
lsr r3, r12, #24
and r3, r3, #15
add r3, r1, r3, lsl #4
ldm r3, {r4, r5, r6, r7}
eor r8, r8, r4
eor r9, r9, r5
eor r10, r10, r6
eor r11, r11, r7
lsr r6, r10, #4
and r3, r11, #15
lsr r11, r11, #4
lsr r4, r12, #28
eor r11, r11, r10, lsl #28
ldr r3, [lr, r3, lsl #2]
add r4, r1, r4, lsl #4
eor r10, r6, r9, lsl #28
lsr r9, r9, #4
ldm r4, {r4, r5, r6, r7}
eor r9, r9, r8, lsl #28
eor r8, r3, r8, lsr #4
eor r8, r8, r4
eor r9, r9, r5
eor r10, r10, r6
eor r11, r11, r7
lsr r6, r10, #4
and r3, r11, #15
lsr r11, r11, #4
lsr r4, r12, #16
eor r11, r11, r10, lsl #28
and r4, r4, #15
ldr r3, [lr, r3, lsl #2]
add r4, r1, r4, lsl #4
eor r10, r6, r9, lsl #28
lsr r9, r9, #4
ldm r4, {r4, r5, r6, r7}
eor r9, r9, r8, lsl #28
eor r8, r3, r8, lsr #4
eor r8, r8, r4
eor r9, r9, r5
eor r10, r10, r6
eor r11, r11, r7
lsr r6, r10, #4
and r3, r11, #15
lsr r11, r11, #4
lsr r4, r12, #20
eor r11, r11, r10, lsl #28
and r4, r4, #15
ldr r3, [lr, r3, lsl #2]
add r4, r1, r4, lsl #4
eor r10, r6, r9, lsl #28
lsr r9, r9, #4
ldm r4, {r4, r5, r6, r7}
eor r9, r9, r8, lsl #28
eor r8, r3, r8, lsr #4
eor r8, r8, r4
eor r9, r9, r5
eor r10, r10, r6
eor r11, r11, r7
lsr r6, r10, #4
and r3, r11, #15
lsr r11, r11, #4
lsr r4, r12, #8
eor r11, r11, r10, lsl #28
and r4, r4, #15
ldr r3, [lr, r3, lsl #2]
add r4, r1, r4, lsl #4
eor r10, r6, r9, lsl #28
lsr r9, r9, #4
ldm r4, {r4, r5, r6, r7}
eor r9, r9, r8, lsl #28
eor r8, r3, r8, lsr #4
eor r8, r8, r4
eor r9, r9, r5
eor r10, r10, r6
eor r11, r11, r7
lsr r6, r10, #4
and r3, r11, #15
lsr r11, r11, #4
lsr r4, r12, #12
eor r11, r11, r10, lsl #28
and r4, r4, #15
ldr r3, [lr, r3, lsl #2]
add r4, r1, r4, lsl #4
eor r10, r6, r9, lsl #28
lsr r9, r9, #4
ldm r4, {r4, r5, r6, r7}
eor r9, r9, r8, lsl #28
eor r8, r3, r8, lsr #4
eor r8, r8, r4
eor r9, r9, r5
eor r10, r10, r6
eor r11, r11, r7
lsr r6, r10, #4
and r3, r11, #15
lsr r11, r11, #4
and r4, r12, #15
eor r11, r11, r10, lsl #28
ldr r3, [lr, r3, lsl #2]
add r4, r1, r4, lsl #4
eor r10, r6, r9, lsl #28
lsr r9, r9, #4
ldm r4, {r4, r5, r6, r7}
eor r9, r9, r8, lsl #28
eor r8, r3, r8, lsr #4
eor r8, r8, r4
eor r9, r9, r5
eor r10, r10, r6
eor r11, r11, r7
lsr r6, r10, #4
and r3, r11, #15
lsr r11, r11, #4
lsr r4, r12, #4
eor r11, r11, r10, lsl #28
and r4, r4, #15
ldr r3, [lr, r3, lsl #2]
add r4, r1, r4, lsl #4
eor r10, r6, r9, lsl #28
lsr r9, r9, #4
ldm r4, {r4, r5, r6, r7}
eor r9, r9, r8, lsl #28
eor r8, r3, r8, lsr #4
eor r8, r8, r4
eor r9, r9, r5
eor r10, r10, r6
eor r11, r11, r7
lsr r6, r10, #4
and r3, r11, #15
lsr r11, r11, #4
eor r11, r11, r10, lsl #28
ldr r3, [lr, r3, lsl #2]
eor r10, r6, r9, lsl #28
lsr r9, r9, #4
eor r9, r9, r8, lsl #28
eor r8, r3, r8, lsr #4
ldr r12, [r0, #4]
ldr r3, [r2, #4]
eor r12, r12, r3
lsr r3, r12, #24
and r3, r3, #15
add r3, r1, r3, lsl #4
ldm r3, {r4, r5, r6, r7}
eor r8, r8, r4
eor r9, r9, r5
eor r10, r10, r6
eor r11, r11, r7
lsr r6, r10, #4
and r3, r11, #15
lsr r11, r11, #4
lsr r4, r12, #28
eor r11, r11, r10, lsl #28
ldr r3, [lr, r3, lsl #2]
add r4, r1, r4, lsl #4
eor r10, r6, r9, lsl #28
lsr r9, r9, #4
ldm r4, {r4, r5, r6, r7}
eor r9, r9, r8, lsl #28
eor r8, r3, r8, lsr #4
eor r8, r8, r4
eor r9, r9, r5
eor r10, r10, r6
eor r11, r11, r7
lsr r6, r10, #4
and r3, r11, #15
lsr r11, r11, #4
lsr r4, r12, #16
eor r11, r11, r10, lsl #28
and r4, r4, #15
ldr r3, [lr, r3, lsl #2]
add r4, r1, r4, lsl #4
eor r10, r6, r9, lsl #28
lsr r9, r9, #4
ldm r4, {r4, r5, r6, r7}
eor r9, r9, r8, lsl #28
eor r8, r3, r8, lsr #4
eor r8, r8, r4
eor r9, r9, r5
eor r10, r10, r6
eor r11, r11, r7
lsr r6, r10, #4
and r3, r11, #15
lsr r11, r11, #4
lsr r4, r12, #20
eor r11, r11, r10, lsl #28
and r4, r4, #15
ldr r3, [lr, r3, lsl #2]
add r4, r1, r4, lsl #4
eor r10, r6, r9, lsl #28
lsr r9, r9, #4
ldm r4, {r4, r5, r6, r7}
eor r9, r9, r8, lsl #28
eor r8, r3, r8, lsr #4
eor r8, r8, r4
eor r9, r9, r5
eor r10, r10, r6
eor r11, r11, r7
lsr r6, r10, #4
and r3, r11, #15
lsr r11, r11, #4
lsr r4, r12, #8
eor r11, r11, r10, lsl #28
and r4, r4, #15
ldr r3, [lr, r3, lsl #2]
add r4, r1, r4, lsl #4
eor r10, r6, r9, lsl #28
lsr r9, r9, #4
ldm r4, {r4, r5, r6, r7}
eor r9, r9, r8, lsl #28
eor r8, r3, r8, lsr #4
eor r8, r8, r4
eor r9, r9, r5
eor r10, r10, r6
eor r11, r11, r7
lsr r6, r10, #4
and r3, r11, #15
lsr r11, r11, #4
lsr r4, r12, #12
eor r11, r11, r10, lsl #28
and r4, r4, #15
ldr r3, [lr, r3, lsl #2]
add r4, r1, r4, lsl #4
eor r10, r6, r9, lsl #28
lsr r9, r9, #4
ldm r4, {r4, r5, r6, r7}
eor r9, r9, r8, lsl #28
eor r8, r3, r8, lsr #4
eor r8, r8, r4
eor r9, r9, r5
eor r10, r10, r6
eor r11, r11, r7
lsr r6, r10, #4
and r3, r11, #15
lsr r11, r11, #4
and r4, r12, #15
eor r11, r11, r10, lsl #28
ldr r3, [lr, r3, lsl #2]
add r4, r1, r4, lsl #4
eor r10, r6, r9, lsl #28
lsr r9, r9, #4
ldm r4, {r4, r5, r6, r7}
eor r9, r9, r8, lsl #28
eor r8, r3, r8, lsr #4
eor r8, r8, r4
eor r9, r9, r5
eor r10, r10, r6
eor r11, r11, r7
lsr r6, r10, #4
and r3, r11, #15
lsr r11, r11, #4
lsr r4, r12, #4
eor r11, r11, r10, lsl #28
and r4, r4, #15
ldr r3, [lr, r3, lsl #2]
add r4, r1, r4, lsl #4
eor r10, r6, r9, lsl #28
lsr r9, r9, #4
ldm r4, {r4, r5, r6, r7}
eor r9, r9, r8, lsl #28
eor r8, r3, r8, lsr #4
eor r8, r8, r4
eor r9, r9, r5
eor r10, r10, r6
eor r11, r11, r7
lsr r6, r10, #4
and r3, r11, #15
lsr r11, r11, #4
eor r11, r11, r10, lsl #28
ldr r3, [lr, r3, lsl #2]
eor r10, r6, r9, lsl #28
lsr r9, r9, #4
eor r9, r9, r8, lsl #28
eor r8, r3, r8, lsr #4
ldr r12, [r0]
ldr r3, [r2]
eor r12, r12, r3
lsr r3, r12, #24
and r3, r3, #15
add r3, r1, r3, lsl #4
ldm r3, {r4, r5, r6, r7}
eor r8, r8, r4
eor r9, r9, r5
eor r10, r10, r6
eor r11, r11, r7
lsr r6, r10, #4
and r3, r11, #15
lsr r11, r11, #4
lsr r4, r12, #28
eor r11, r11, r10, lsl #28
ldr r3, [lr, r3, lsl #2]
add r4, r1, r4, lsl #4
eor r10, r6, r9, lsl #28
lsr r9, r9, #4
ldm r4, {r4, r5, r6, r7}
eor r9, r9, r8, lsl #28
eor r8, r3, r8, lsr #4
eor r8, r8, r4
eor r9, r9, r5
eor r10, r10, r6
eor r11, r11, r7
lsr r6, r10, #4
and r3, r11, #15
lsr r11, r11, #4
lsr r4, r12, #16
eor r11, r11, r10, lsl #28
and r4, r4, #15
ldr r3, [lr, r3, lsl #2]
add r4, r1, r4, lsl #4
eor r10, r6, r9, lsl #28
lsr r9, r9, #4
ldm r4, {r4, r5, r6, r7}
eor r9, r9, r8, lsl #28
eor r8, r3, r8, lsr #4
eor r8, r8, r4
eor r9, r9, r5
eor r10, r10, r6
eor r11, r11, r7
lsr r6, r10, #4
and r3, r11, #15
lsr r11, r11, #4
lsr r4, r12, #20
eor r11, r11, r10, lsl #28
and r4, r4, #15
ldr r3, [lr, r3, lsl #2]
add r4, r1, r4, lsl #4
eor r10, r6, r9, lsl #28
lsr r9, r9, #4
ldm r4, {r4, r5, r6, r7}
eor r9, r9, r8, lsl #28
eor r8, r3, r8, lsr #4
eor r8, r8, r4
eor r9, r9, r5
eor r10, r10, r6
eor r11, r11, r7
lsr r6, r10, #4
and r3, r11, #15
lsr r11, r11, #4
lsr r4, r12, #8
eor r11, r11, r10, lsl #28
and r4, r4, #15
ldr r3, [lr, r3, lsl #2]
add r4, r1, r4, lsl #4
eor r10, r6, r9, lsl #28
lsr r9, r9, #4
ldm r4, {r4, r5, r6, r7}
eor r9, r9, r8, lsl #28
eor r8, r3, r8, lsr #4
eor r8, r8, r4
eor r9, r9, r5
eor r10, r10, r6
eor r11, r11, r7
lsr r6, r10, #4
and r3, r11, #15
lsr r11, r11, #4
lsr r4, r12, #12
eor r11, r11, r10, lsl #28
and r4, r4, #15
ldr r3, [lr, r3, lsl #2]
add r4, r1, r4, lsl #4
eor r10, r6, r9, lsl #28
lsr r9, r9, #4
ldm r4, {r4, r5, r6, r7}
eor r9, r9, r8, lsl #28
eor r8, r3, r8, lsr #4
eor r8, r8, r4
eor r9, r9, r5
eor r10, r10, r6
eor r11, r11, r7
lsr r6, r10, #4
and r3, r11, #15
lsr r11, r11, #4
and r4, r12, #15
eor r11, r11, r10, lsl #28
ldr r3, [lr, r3, lsl #2]
add r4, r1, r4, lsl #4
eor r10, r6, r9, lsl #28
lsr r9, r9, #4
ldm r4, {r4, r5, r6, r7}
eor r9, r9, r8, lsl #28
eor r8, r3, r8, lsr #4
eor r8, r8, r4
eor r9, r9, r5
eor r10, r10, r6
eor r11, r11, r7
lsr r6, r10, #4
and r3, r11, #15
lsr r11, r11, #4
lsr r4, r12, #4
eor r11, r11, r10, lsl #28
and r4, r4, #15
ldr r3, [lr, r3, lsl #2]
add r4, r1, r4, lsl #4
eor r10, r6, r9, lsl #28
lsr r9, r9, #4
ldm r4, {r4, r5, r6, r7}
eor r9, r9, r8, lsl #28
eor r8, r3, r8, lsr #4
eor r8, r8, r4
eor r9, r9, r5
eor r10, r10, r6
eor r11, r11, r7
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
# REV r8, r8
eor r3, r8, r8, ror #16
bic r3, r3, #0xff0000
ror r8, r8, #8
eor r8, r8, r3, lsr #8
# REV r9, r9
eor r3, r9, r9, ror #16
bic r3, r3, #0xff0000
ror r9, r9, #8
eor r9, r9, r3, lsr #8
# REV r10, r10
eor r3, r10, r10, ror #16
bic r3, r3, #0xff0000
ror r10, r10, #8
eor r10, r10, r3, lsr #8
# REV r11, r11
eor r3, r11, r11, ror #16
bic r3, r3, #0xff0000
ror r11, r11, #8
eor r11, r11, r3, lsr #8
#else
rev r8, r8
rev r9, r9
rev r10, r10
rev r11, r11
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
stm r0, {r8, r9, r10, r11}
pop {r3}
subs r3, r3, #16
add r2, r2, #16
bne L_GCM_gmult_len_start_block
pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.size GCM_gmult_len,.-GCM_gmult_len
.text
.type L_AES_ARM32_te_gcm, %object
.size L_AES_ARM32_te_gcm, 12
.align 4
L_AES_ARM32_te_gcm:
.word L_AES_ARM32_te_data
.text
.align 4
.globl AES_GCM_encrypt
.type AES_GCM_encrypt, %function
AES_GCM_encrypt:
push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
ldr r12, [sp, #36]
ldr r8, [sp, #40]
mov lr, r0
adr r0, L_AES_ARM32_te_gcm
ldr r0, [r0]
ldm r8, {r4, r5, r6, r7}
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
eor r10, r4, r4, ror #16
eor r11, r5, r5, ror #16
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
eor r4, r4, r10, lsr #8
eor r5, r5, r11, lsr #8
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r6, r6, #8
ror r7, r7, #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
#else
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
stm r8, {r4, r5, r6, r7}
push {r3, r8}
cmp r12, #10
beq L_AES_GCM_encrypt_start_block_128
cmp r12, #12
beq L_AES_GCM_encrypt_start_block_192
L_AES_GCM_encrypt_loop_block_256:
push {r1, r2, lr}
ldr lr, [sp, #16]
add r7, r7, #1
ldm r3!, {r8, r9, r10, r11}
str r7, [lr, #12]
# Round: 0 - XOR in key schedule
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
eor r7, r7, r11
mov r1, #6
bl AES_encrypt_block
pop {r1, r2, lr}
ldr r3, [sp]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
#else
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
ldr r8, [lr]
ldr r9, [lr, #4]
ldr r10, [lr, #8]
ldr r11, [lr, #12]
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
eor r7, r7, r11
ldr r8, [sp, #4]
str r4, [r1]
str r5, [r1, #4]
str r6, [r1, #8]
str r7, [r1, #12]
ldm r8, {r4, r5, r6, r7}
subs r2, r2, #16
add lr, lr, #16
add r1, r1, #16
bne L_AES_GCM_encrypt_loop_block_256
b L_AES_GCM_encrypt_end
L_AES_GCM_encrypt_start_block_192:
L_AES_GCM_encrypt_loop_block_192:
push {r1, r2, lr}
ldr lr, [sp, #16]
add r7, r7, #1
ldm r3!, {r8, r9, r10, r11}
str r7, [lr, #12]
# Round: 0 - XOR in key schedule
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
eor r7, r7, r11
mov r1, #5
bl AES_encrypt_block
pop {r1, r2, lr}
ldr r3, [sp]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
#else
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
ldr r8, [lr]
ldr r9, [lr, #4]
ldr r10, [lr, #8]
ldr r11, [lr, #12]
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
eor r7, r7, r11
ldr r8, [sp, #4]
str r4, [r1]
str r5, [r1, #4]
str r6, [r1, #8]
str r7, [r1, #12]
ldm r8, {r4, r5, r6, r7}
subs r2, r2, #16
add lr, lr, #16
add r1, r1, #16
bne L_AES_GCM_encrypt_loop_block_192
b L_AES_GCM_encrypt_end
L_AES_GCM_encrypt_start_block_128:
L_AES_GCM_encrypt_loop_block_128:
push {r1, r2, lr}
ldr lr, [sp, #16]
add r7, r7, #1
ldm r3!, {r8, r9, r10, r11}
str r7, [lr, #12]
# Round: 0 - XOR in key schedule
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
eor r7, r7, r11
mov r1, #4
bl AES_encrypt_block
pop {r1, r2, lr}
ldr r3, [sp]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
#else
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
ldr r8, [lr]
ldr r9, [lr, #4]
ldr r10, [lr, #8]
ldr r11, [lr, #12]
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
eor r7, r7, r11
ldr r8, [sp, #4]
str r4, [r1]
str r5, [r1, #4]
str r6, [r1, #8]
str r7, [r1, #12]
ldm r8, {r4, r5, r6, r7}
subs r2, r2, #16
add lr, lr, #16
add r1, r1, #16
bne L_AES_GCM_encrypt_loop_block_128
L_AES_GCM_encrypt_end:
pop {r3, r8}
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
eor r10, r4, r4, ror #16
eor r11, r5, r5, ror #16
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
eor r4, r4, r10, lsr #8
eor r5, r5, r11, lsr #8
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r6, r6, #8
ror r7, r7, #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
#else
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
stm r8, {r4, r5, r6, r7}
pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.size AES_GCM_encrypt,.-AES_GCM_encrypt
#endif /* HAVE_AESGCM */
#endif /* !NO_AES */
#endif /* !__aarch64__ && __arm__ && !__thumb__ */
#endif /* WOLFSSL_ARMASM */
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
#endif /* !WOLFSSL_ARMASM_INLINE */
|
aenu1/aps3e
| 23,738
|
app/src/main/cpp/rpcs3/3rdparty/wolfssl/wolfssl/wolfcrypt/src/port/arm/thumb2-sha3-asm.S
|
/* thumb2-sha3-asm
*
* Copyright (C) 2006-2023 wolfSSL Inc.
*
* This file is part of wolfSSL.
*
* wolfSSL is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* wolfSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
*/
/* Generated using (from wolfssl):
* cd ../scripts
* ruby ./sha3/sha3.rb thumb2 ../wolfssl/wolfcrypt/src/port/arm/thumb2-sha3-asm.S
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif /* HAVE_CONFIG_H */
#include <wolfssl/wolfcrypt/settings.h>
#ifdef WOLFSSL_ARMASM
#if !defined(__aarch64__) && defined(__thumb__)
#ifndef WOLFSSL_ARMASM_INLINE
.thumb
.syntax unified
#ifdef WOLFSSL_SHA3
.text
.type L_sha3_thumb2_rt, %object
.size L_sha3_thumb2_rt, 192
.align 8
L_sha3_thumb2_rt:
.word 0x1
.word 0x0
.word 0x8082
.word 0x0
.word 0x808a
.word 0x80000000
.word 0x80008000
.word 0x80000000
.word 0x808b
.word 0x0
.word 0x80000001
.word 0x0
.word 0x80008081
.word 0x80000000
.word 0x8009
.word 0x80000000
.word 0x8a
.word 0x0
.word 0x88
.word 0x0
.word 0x80008009
.word 0x0
.word 0x8000000a
.word 0x0
.word 0x8000808b
.word 0x0
.word 0x8b
.word 0x80000000
.word 0x8089
.word 0x80000000
.word 0x8003
.word 0x80000000
.word 0x8002
.word 0x80000000
.word 0x80
.word 0x80000000
.word 0x800a
.word 0x0
.word 0x8000000a
.word 0x80000000
.word 0x80008081
.word 0x80000000
.word 0x8080
.word 0x80000000
.word 0x80000001
.word 0x0
.word 0x80008008
.word 0x80000000
.text
.align 4
.globl BlockSha3
.type BlockSha3, %function
BlockSha3:
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr}
SUB sp, sp, #0xcc
ADR r1, L_sha3_thumb2_rt
MOV r2, #0xc
L_sha3_thumb2_begin:
STR r2, [sp, #200]
/* Round even */
/* Calc b[4] */
LDRD r4, r5, [r0, #32]
LDRD r6, r7, [r0, #72]
LDRD r8, r9, [r0, #112]
LDRD r10, r11, [r0, #152]
LDR r12, [r0, #192]
LDR lr, [r0, #196]
EOR r2, r4, r6
EOR r3, r5, r7
EOR r2, r2, r8
EOR r3, r3, r9
EOR r2, r2, r10
EOR r3, r3, r11
EOR r2, r2, r12
EOR r3, r3, lr
STRD r2, r3, [sp, #32]
/* Calc b[1] */
LDRD r4, r5, [r0, #8]
LDRD r6, r7, [r0, #48]
LDRD r8, r9, [r0, #88]
LDRD r10, r11, [r0, #128]
LDR r12, [r0, #168]
LDR lr, [r0, #172]
EOR r4, r4, r6
EOR r5, r5, r7
EOR r4, r4, r8
EOR r5, r5, r9
EOR r4, r4, r10
EOR r5, r5, r11
EOR r4, r4, r12
EOR r5, r5, lr
STRD r4, r5, [sp, #8]
/* Calc t[0] */
EOR r2, r2, r5, LSR #31
EOR r3, r3, r4, LSR #31
EOR r2, r2, r4, LSL #1
EOR r3, r3, r5, LSL #1
/* Calc b[0] and XOR t[0] into s[x*5+0] */
LDRD r4, r5, [r0]
LDRD r6, r7, [r0, #40]
LDRD r8, r9, [r0, #80]
LDRD r10, r11, [r0, #120]
EOR r12, r4, r6
EOR lr, r5, r7
EOR r12, r12, r8
EOR lr, lr, r9
EOR r12, r12, r10
EOR lr, lr, r11
EOR r4, r4, r2
EOR r5, r5, r3
EOR r6, r6, r2
EOR r7, r7, r3
EOR r8, r8, r2
EOR r9, r9, r3
EOR r10, r10, r2
EOR r11, r11, r3
STRD r4, r5, [r0]
STRD r6, r7, [r0, #40]
STRD r8, r9, [r0, #80]
STRD r10, r11, [r0, #120]
LDRD r10, r11, [r0, #160]
EOR r12, r12, r10
EOR lr, lr, r11
EOR r10, r10, r2
EOR r11, r11, r3
STRD r10, r11, [r0, #160]
STR r12, [sp]
STR lr, [sp, #4]
/* Calc b[3] */
LDRD r4, r5, [r0, #24]
LDRD r6, r7, [r0, #64]
LDRD r8, r9, [r0, #104]
LDRD r10, r11, [r0, #144]
LDR r12, [r0, #184]
LDR lr, [r0, #188]
EOR r4, r4, r6
EOR r5, r5, r7
EOR r4, r4, r8
EOR r5, r5, r9
EOR r4, r4, r10
EOR r5, r5, r11
EOR r4, r4, r12
EOR r5, r5, lr
STRD r4, r5, [sp, #24]
/* Calc t[2] */
LDRD r2, r3, [sp, #8]
EOR r2, r2, r5, LSR #31
EOR r3, r3, r4, LSR #31
EOR r2, r2, r4, LSL #1
EOR r3, r3, r5, LSL #1
/* Calc b[2] and XOR t[2] into s[x*5+2] */
LDRD r4, r5, [r0, #16]
LDRD r6, r7, [r0, #56]
LDRD r8, r9, [r0, #96]
LDRD r10, r11, [r0, #136]
EOR r12, r4, r6
EOR lr, r5, r7
EOR r12, r12, r8
EOR lr, lr, r9
EOR r12, r12, r10
EOR lr, lr, r11
EOR r4, r4, r2
EOR r5, r5, r3
EOR r6, r6, r2
EOR r7, r7, r3
EOR r8, r8, r2
EOR r9, r9, r3
EOR r10, r10, r2
EOR r11, r11, r3
STRD r4, r5, [r0, #16]
STRD r6, r7, [r0, #56]
STRD r8, r9, [r0, #96]
STRD r10, r11, [r0, #136]
LDRD r10, r11, [r0, #176]
EOR r12, r12, r10
EOR lr, lr, r11
EOR r10, r10, r2
EOR r11, r11, r3
STRD r10, r11, [r0, #176]
STR r12, [sp, #16]
STR lr, [sp, #20]
/* Calc t[1] */
LDRD r2, r3, [sp]
EOR r2, r2, lr, LSR #31
EOR r3, r3, r12, LSR #31
EOR r2, r2, r12, LSL #1
EOR r3, r3, lr, LSL #1
/* XOR t[1] into s[x*5+1] */
LDRD r4, r5, [r0, #8]
LDRD r6, r7, [r0, #48]
LDRD r8, r9, [r0, #88]
LDRD r10, r11, [r0, #128]
LDR r12, [r0, #168]
LDR lr, [r0, #172]
EOR r4, r4, r2
EOR r5, r5, r3
EOR r6, r6, r2
EOR r7, r7, r3
EOR r8, r8, r2
EOR r9, r9, r3
EOR r10, r10, r2
EOR r11, r11, r3
EOR r12, r12, r2
EOR lr, lr, r3
STRD r4, r5, [r0, #8]
STRD r6, r7, [r0, #48]
STRD r8, r9, [r0, #88]
STRD r10, r11, [r0, #128]
STR r12, [r0, #168]
STR lr, [r0, #172]
/* Calc t[3] */
LDRD r2, r3, [sp, #16]
LDRD r4, r5, [sp, #32]
EOR r2, r2, r5, LSR #31
EOR r3, r3, r4, LSR #31
EOR r2, r2, r4, LSL #1
EOR r3, r3, r5, LSL #1
/* XOR t[3] into s[x*5+3] */
LDRD r4, r5, [r0, #24]
LDRD r6, r7, [r0, #64]
LDRD r8, r9, [r0, #104]
LDRD r10, r11, [r0, #144]
LDR r12, [r0, #184]
LDR lr, [r0, #188]
EOR r4, r4, r2
EOR r5, r5, r3
EOR r6, r6, r2
EOR r7, r7, r3
EOR r8, r8, r2
EOR r9, r9, r3
EOR r10, r10, r2
EOR r11, r11, r3
EOR r12, r12, r2
EOR lr, lr, r3
STRD r4, r5, [r0, #24]
STRD r6, r7, [r0, #64]
STRD r8, r9, [r0, #104]
STRD r10, r11, [r0, #144]
STR r12, [r0, #184]
STR lr, [r0, #188]
/* Calc t[4] */
LDRD r2, r3, [sp, #24]
LDRD r4, r5, [sp]
EOR r2, r2, r5, LSR #31
EOR r3, r3, r4, LSR #31
EOR r2, r2, r4, LSL #1
EOR r3, r3, r5, LSL #1
/* XOR t[4] into s[x*5+4] */
LDRD r4, r5, [r0, #32]
LDRD r6, r7, [r0, #72]
LDRD r8, r9, [r0, #112]
LDRD r10, r11, [r0, #152]
LDR r12, [r0, #192]
LDR lr, [r0, #196]
EOR r4, r4, r2
EOR r5, r5, r3
EOR r6, r6, r2
EOR r7, r7, r3
EOR r8, r8, r2
EOR r9, r9, r3
EOR r10, r10, r2
EOR r11, r11, r3
EOR r12, r12, r2
EOR lr, lr, r3
STRD r4, r5, [r0, #32]
STRD r6, r7, [r0, #72]
STRD r8, r9, [r0, #112]
STRD r10, r11, [r0, #152]
STR r12, [r0, #192]
STR lr, [r0, #196]
/* Row Mix */
/* Row 0 */
LDRD r2, r3, [r0]
LDRD r4, r5, [r0, #48]
LDRD r6, r7, [r0, #96]
LDRD r8, r9, [r0, #144]
LDRD r10, r11, [r0, #192]
/* s[1] <<< 44 */
MOV lr, r4
LSR r12, r5, #20
LSR r4, r4, #20
ORR r4, r4, r5, LSL #12
ORR r5, r12, lr, LSL #12
/* s[2] <<< 43 */
MOV lr, r6
LSR r12, r7, #21
LSR r6, r6, #21
ORR r6, r6, r7, LSL #11
ORR r7, r12, lr, LSL #11
/* s[3] <<< 21 */
LSR r12, r9, #11
LSR lr, r8, #11
ORR r8, r12, r8, LSL #21
ORR r9, lr, r9, LSL #21
/* s[4] <<< 14 */
LSR r12, r11, #18
LSR lr, r10, #18
ORR r10, r12, r10, LSL #14
ORR r11, lr, r11, LSL #14
BIC r12, r8, r6
BIC lr, r9, r7
EOR r12, r12, r4
EOR lr, lr, r5
STR r12, [sp, #8]
STR lr, [sp, #12]
BIC r12, r10, r8
BIC lr, r11, r9
EOR r12, r12, r6
EOR lr, lr, r7
STR r12, [sp, #16]
STR lr, [sp, #20]
BIC r12, r2, r10
BIC lr, r3, r11
EOR r12, r12, r8
EOR lr, lr, r9
STR r12, [sp, #24]
STR lr, [sp, #28]
BIC r12, r4, r2
BIC lr, r5, r3
EOR r12, r12, r10
EOR lr, lr, r11
STR r12, [sp, #32]
STR lr, [sp, #36]
/* Get constant */
LDRD r10, r11, [r1]
ADD r1, r1, #0x8
BIC r12, r6, r4
BIC lr, r7, r5
EOR r12, r12, r2
EOR lr, lr, r3
/* XOR in constant */
EOR r12, r12, r10
EOR lr, lr, r11
STR r12, [sp]
STR lr, [sp, #4]
/* Row 1 */
LDRD r2, r3, [r0, #24]
LDRD r4, r5, [r0, #72]
LDRD r6, r7, [r0, #80]
LDRD r8, r9, [r0, #128]
LDRD r10, r11, [r0, #176]
/* s[0] <<< 28 */
LSR r12, r3, #4
LSR lr, r2, #4
ORR r2, r12, r2, LSL #28
ORR r3, lr, r3, LSL #28
/* s[1] <<< 20 */
LSR r12, r5, #12
LSR lr, r4, #12
ORR r4, r12, r4, LSL #20
ORR r5, lr, r5, LSL #20
/* s[2] <<< 3 */
LSR r12, r7, #29
LSR lr, r6, #29
ORR r6, r12, r6, LSL #3
ORR r7, lr, r7, LSL #3
/* s[3] <<< 45 */
MOV lr, r8
LSR r12, r9, #19
LSR r8, r8, #19
ORR r8, r8, r9, LSL #13
ORR r9, r12, lr, LSL #13
/* s[4] <<< 61 */
MOV lr, r10
LSR r12, r11, #3
LSR r10, r10, #3
ORR r10, r10, r11, LSL #29
ORR r11, r12, lr, LSL #29
BIC r12, r8, r6
BIC lr, r9, r7
EOR r12, r12, r4
EOR lr, lr, r5
STR r12, [sp, #48]
STR lr, [sp, #52]
BIC r12, r10, r8
BIC lr, r11, r9
EOR r12, r12, r6
EOR lr, lr, r7
STR r12, [sp, #56]
STR lr, [sp, #60]
BIC r12, r2, r10
BIC lr, r3, r11
EOR r12, r12, r8
EOR lr, lr, r9
STR r12, [sp, #64]
STR lr, [sp, #68]
BIC r12, r4, r2
BIC lr, r5, r3
EOR r12, r12, r10
EOR lr, lr, r11
STR r12, [sp, #72]
STR lr, [sp, #76]
BIC r12, r6, r4
BIC lr, r7, r5
EOR r12, r12, r2
EOR lr, lr, r3
STR r12, [sp, #40]
STR lr, [sp, #44]
/* Row 2 */
LDRD r2, r3, [r0, #8]
LDRD r4, r5, [r0, #56]
LDRD r6, r7, [r0, #104]
LDRD r8, r9, [r0, #152]
LDRD r10, r11, [r0, #160]
/* s[0] <<< 1 */
LSR r12, r3, #31
LSR lr, r2, #31
ORR r2, r12, r2, LSL #1
ORR r3, lr, r3, LSL #1
/* s[1] <<< 6 */
LSR r12, r5, #26
LSR lr, r4, #26
ORR r4, r12, r4, LSL #6
ORR r5, lr, r5, LSL #6
/* s[2] <<< 25 */
LSR r12, r7, #7
LSR lr, r6, #7
ORR r6, r12, r6, LSL #25
ORR r7, lr, r7, LSL #25
/* s[3] <<< 8 */
LSR r12, r9, #24
LSR lr, r8, #24
ORR r8, r12, r8, LSL #8
ORR r9, lr, r9, LSL #8
/* s[4] <<< 18 */
LSR r12, r11, #14
LSR lr, r10, #14
ORR r10, r12, r10, LSL #18
ORR r11, lr, r11, LSL #18
BIC r12, r8, r6
BIC lr, r9, r7
EOR r12, r12, r4
EOR lr, lr, r5
STR r12, [sp, #88]
STR lr, [sp, #92]
BIC r12, r10, r8
BIC lr, r11, r9
EOR r12, r12, r6
EOR lr, lr, r7
STR r12, [sp, #96]
STR lr, [sp, #100]
BIC r12, r2, r10
BIC lr, r3, r11
EOR r12, r12, r8
EOR lr, lr, r9
STR r12, [sp, #104]
STR lr, [sp, #108]
BIC r12, r4, r2
BIC lr, r5, r3
EOR r12, r12, r10
EOR lr, lr, r11
STR r12, [sp, #112]
STR lr, [sp, #116]
BIC r12, r6, r4
BIC lr, r7, r5
EOR r12, r12, r2
EOR lr, lr, r3
STR r12, [sp, #80]
STR lr, [sp, #84]
/* Row 3 */
LDRD r2, r3, [r0, #32]
LDRD r4, r5, [r0, #40]
LDRD r6, r7, [r0, #88]
LDRD r8, r9, [r0, #136]
LDRD r10, r11, [r0, #184]
/* s[0] <<< 27 */
LSR r12, r3, #5
LSR lr, r2, #5
ORR r2, r12, r2, LSL #27
ORR r3, lr, r3, LSL #27
/* s[1] <<< 36 */
MOV lr, r4
LSR r12, r5, #28
LSR r4, r4, #28
ORR r4, r4, r5, LSL #4
ORR r5, r12, lr, LSL #4
/* s[2] <<< 10 */
LSR r12, r7, #22
LSR lr, r6, #22
ORR r6, r12, r6, LSL #10
ORR r7, lr, r7, LSL #10
/* s[3] <<< 15 */
LSR r12, r9, #17
LSR lr, r8, #17
ORR r8, r12, r8, LSL #15
ORR r9, lr, r9, LSL #15
/* s[4] <<< 56 */
MOV lr, r10
LSR r12, r11, #8
LSR r10, r10, #8
ORR r10, r10, r11, LSL #24
ORR r11, r12, lr, LSL #24
BIC r12, r8, r6
BIC lr, r9, r7
EOR r12, r12, r4
EOR lr, lr, r5
STR r12, [sp, #128]
STR lr, [sp, #132]
BIC r12, r10, r8
BIC lr, r11, r9
EOR r12, r12, r6
EOR lr, lr, r7
STR r12, [sp, #136]
STR lr, [sp, #140]
BIC r12, r2, r10
BIC lr, r3, r11
EOR r12, r12, r8
EOR lr, lr, r9
STR r12, [sp, #144]
STR lr, [sp, #148]
BIC r12, r4, r2
BIC lr, r5, r3
EOR r12, r12, r10
EOR lr, lr, r11
STR r12, [sp, #152]
STR lr, [sp, #156]
BIC r12, r6, r4
BIC lr, r7, r5
EOR r12, r12, r2
EOR lr, lr, r3
STR r12, [sp, #120]
STR lr, [sp, #124]
/* Row 4 */
LDRD r2, r3, [r0, #16]
LDRD r4, r5, [r0, #64]
LDRD r6, r7, [r0, #112]
LDRD r8, r9, [r0, #120]
LDRD r10, r11, [r0, #168]
/* s[0] <<< 62 */
MOV lr, r2
LSR r12, r3, #2
LSR r2, r2, #2
ORR r2, r2, r3, LSL #30
ORR r3, r12, lr, LSL #30
/* s[1] <<< 55 */
MOV lr, r4
LSR r12, r5, #9
LSR r4, r4, #9
ORR r4, r4, r5, LSL #23
ORR r5, r12, lr, LSL #23
/* s[2] <<< 39 */
MOV lr, r6
LSR r12, r7, #25
LSR r6, r6, #25
ORR r6, r6, r7, LSL #7
ORR r7, r12, lr, LSL #7
/* s[3] <<< 41 */
MOV lr, r8
LSR r12, r9, #23
LSR r8, r8, #23
ORR r8, r8, r9, LSL #9
ORR r9, r12, lr, LSL #9
/* s[4] <<< 2 */
LSR r12, r11, #30
LSR lr, r10, #30
ORR r10, r12, r10, LSL #2
ORR r11, lr, r11, LSL #2
BIC r12, r8, r6
BIC lr, r9, r7
EOR r12, r12, r4
EOR lr, lr, r5
STR r12, [sp, #168]
STR lr, [sp, #172]
BIC r12, r10, r8
BIC lr, r11, r9
EOR r12, r12, r6
EOR lr, lr, r7
STR r12, [sp, #176]
STR lr, [sp, #180]
BIC r12, r2, r10
BIC lr, r3, r11
EOR r12, r12, r8
EOR lr, lr, r9
STR r12, [sp, #184]
STR lr, [sp, #188]
BIC r12, r4, r2
BIC lr, r5, r3
EOR r12, r12, r10
EOR lr, lr, r11
STR r12, [sp, #192]
STR lr, [sp, #196]
BIC r12, r6, r4
BIC lr, r7, r5
EOR r12, r12, r2
EOR lr, lr, r3
STR r12, [sp, #160]
STR lr, [sp, #164]
/* Round odd */
/* Calc b[4] */
LDRD r4, r5, [sp, #32]
LDRD r6, r7, [sp, #72]
LDRD r8, r9, [sp, #112]
LDRD r10, r11, [sp, #152]
LDR r12, [sp, #192]
LDR lr, [sp, #196]
EOR r2, r4, r6
EOR r3, r5, r7
EOR r2, r2, r8
EOR r3, r3, r9
EOR r2, r2, r10
EOR r3, r3, r11
EOR r2, r2, r12
EOR r3, r3, lr
STRD r2, r3, [r0, #32]
/* Calc b[1] */
LDRD r4, r5, [sp, #8]
LDRD r6, r7, [sp, #48]
LDRD r8, r9, [sp, #88]
LDRD r10, r11, [sp, #128]
LDR r12, [sp, #168]
LDR lr, [sp, #172]
EOR r4, r4, r6
EOR r5, r5, r7
EOR r4, r4, r8
EOR r5, r5, r9
EOR r4, r4, r10
EOR r5, r5, r11
EOR r4, r4, r12
EOR r5, r5, lr
STRD r4, r5, [r0, #8]
/* Calc t[0] */
EOR r2, r2, r5, LSR #31
EOR r3, r3, r4, LSR #31
EOR r2, r2, r4, LSL #1
EOR r3, r3, r5, LSL #1
/* Calc b[0] and XOR t[0] into s[x*5+0] */
LDRD r4, r5, [sp]
LDRD r6, r7, [sp, #40]
LDRD r8, r9, [sp, #80]
LDRD r10, r11, [sp, #120]
EOR r12, r4, r6
EOR lr, r5, r7
EOR r12, r12, r8
EOR lr, lr, r9
EOR r12, r12, r10
EOR lr, lr, r11
EOR r4, r4, r2
EOR r5, r5, r3
EOR r6, r6, r2
EOR r7, r7, r3
EOR r8, r8, r2
EOR r9, r9, r3
EOR r10, r10, r2
EOR r11, r11, r3
STRD r4, r5, [sp]
STRD r6, r7, [sp, #40]
STRD r8, r9, [sp, #80]
STRD r10, r11, [sp, #120]
LDRD r10, r11, [sp, #160]
EOR r12, r12, r10
EOR lr, lr, r11
EOR r10, r10, r2
EOR r11, r11, r3
STRD r10, r11, [sp, #160]
STR r12, [r0]
STR lr, [r0, #4]
/* Calc b[3] */
LDRD r4, r5, [sp, #24]
LDRD r6, r7, [sp, #64]
LDRD r8, r9, [sp, #104]
LDRD r10, r11, [sp, #144]
LDR r12, [sp, #184]
LDR lr, [sp, #188]
EOR r4, r4, r6
EOR r5, r5, r7
EOR r4, r4, r8
EOR r5, r5, r9
EOR r4, r4, r10
EOR r5, r5, r11
EOR r4, r4, r12
EOR r5, r5, lr
STRD r4, r5, [r0, #24]
/* Calc t[2] */
LDRD r2, r3, [r0, #8]
EOR r2, r2, r5, LSR #31
EOR r3, r3, r4, LSR #31
EOR r2, r2, r4, LSL #1
EOR r3, r3, r5, LSL #1
/* Calc b[2] and XOR t[2] into s[x*5+2] */
LDRD r4, r5, [sp, #16]
LDRD r6, r7, [sp, #56]
LDRD r8, r9, [sp, #96]
LDRD r10, r11, [sp, #136]
EOR r12, r4, r6
EOR lr, r5, r7
EOR r12, r12, r8
EOR lr, lr, r9
EOR r12, r12, r10
EOR lr, lr, r11
EOR r4, r4, r2
EOR r5, r5, r3
EOR r6, r6, r2
EOR r7, r7, r3
EOR r8, r8, r2
EOR r9, r9, r3
EOR r10, r10, r2
EOR r11, r11, r3
STRD r4, r5, [sp, #16]
STRD r6, r7, [sp, #56]
STRD r8, r9, [sp, #96]
STRD r10, r11, [sp, #136]
LDRD r10, r11, [sp, #176]
EOR r12, r12, r10
EOR lr, lr, r11
EOR r10, r10, r2
EOR r11, r11, r3
STRD r10, r11, [sp, #176]
STR r12, [r0, #16]
STR lr, [r0, #20]
/* Calc t[1] */
LDRD r2, r3, [r0]
EOR r2, r2, lr, LSR #31
EOR r3, r3, r12, LSR #31
EOR r2, r2, r12, LSL #1
EOR r3, r3, lr, LSL #1
/* XOR t[1] into s[x*5+1] */
LDRD r4, r5, [sp, #8]
LDRD r6, r7, [sp, #48]
LDRD r8, r9, [sp, #88]
LDRD r10, r11, [sp, #128]
LDR r12, [sp, #168]
LDR lr, [sp, #172]
EOR r4, r4, r2
EOR r5, r5, r3
EOR r6, r6, r2
EOR r7, r7, r3
EOR r8, r8, r2
EOR r9, r9, r3
EOR r10, r10, r2
EOR r11, r11, r3
EOR r12, r12, r2
EOR lr, lr, r3
STRD r4, r5, [sp, #8]
STRD r6, r7, [sp, #48]
STRD r8, r9, [sp, #88]
STRD r10, r11, [sp, #128]
STR r12, [sp, #168]
STR lr, [sp, #172]
/* Calc t[3] */
LDRD r2, r3, [r0, #16]
LDRD r4, r5, [r0, #32]
EOR r2, r2, r5, LSR #31
EOR r3, r3, r4, LSR #31
EOR r2, r2, r4, LSL #1
EOR r3, r3, r5, LSL #1
/* XOR t[3] into s[x*5+3] */
LDRD r4, r5, [sp, #24]
LDRD r6, r7, [sp, #64]
LDRD r8, r9, [sp, #104]
LDRD r10, r11, [sp, #144]
LDR r12, [sp, #184]
LDR lr, [sp, #188]
EOR r4, r4, r2
EOR r5, r5, r3
EOR r6, r6, r2
EOR r7, r7, r3
EOR r8, r8, r2
EOR r9, r9, r3
EOR r10, r10, r2
EOR r11, r11, r3
EOR r12, r12, r2
EOR lr, lr, r3
STRD r4, r5, [sp, #24]
STRD r6, r7, [sp, #64]
STRD r8, r9, [sp, #104]
STRD r10, r11, [sp, #144]
STR r12, [sp, #184]
STR lr, [sp, #188]
/* Calc t[4] */
LDRD r2, r3, [r0, #24]
LDRD r4, r5, [r0]
EOR r2, r2, r5, LSR #31
EOR r3, r3, r4, LSR #31
EOR r2, r2, r4, LSL #1
EOR r3, r3, r5, LSL #1
/* XOR t[4] into s[x*5+4] */
LDRD r4, r5, [sp, #32]
LDRD r6, r7, [sp, #72]
LDRD r8, r9, [sp, #112]
LDRD r10, r11, [sp, #152]
LDR r12, [sp, #192]
LDR lr, [sp, #196]
EOR r4, r4, r2
EOR r5, r5, r3
EOR r6, r6, r2
EOR r7, r7, r3
EOR r8, r8, r2
EOR r9, r9, r3
EOR r10, r10, r2
EOR r11, r11, r3
EOR r12, r12, r2
EOR lr, lr, r3
STRD r4, r5, [sp, #32]
STRD r6, r7, [sp, #72]
STRD r8, r9, [sp, #112]
STRD r10, r11, [sp, #152]
STR r12, [sp, #192]
STR lr, [sp, #196]
/* Row Mix */
/* Row 0 */
LDRD r2, r3, [sp]
LDRD r4, r5, [sp, #48]
LDRD r6, r7, [sp, #96]
LDRD r8, r9, [sp, #144]
LDRD r10, r11, [sp, #192]
/* s[1] <<< 44 */
MOV lr, r4
LSR r12, r5, #20
LSR r4, r4, #20
ORR r4, r4, r5, LSL #12
ORR r5, r12, lr, LSL #12
/* s[2] <<< 43 */
MOV lr, r6
LSR r12, r7, #21
LSR r6, r6, #21
ORR r6, r6, r7, LSL #11
ORR r7, r12, lr, LSL #11
/* s[3] <<< 21 */
LSR r12, r9, #11
LSR lr, r8, #11
ORR r8, r12, r8, LSL #21
ORR r9, lr, r9, LSL #21
/* s[4] <<< 14 */
LSR r12, r11, #18
LSR lr, r10, #18
ORR r10, r12, r10, LSL #14
ORR r11, lr, r11, LSL #14
BIC r12, r8, r6
BIC lr, r9, r7
EOR r12, r12, r4
EOR lr, lr, r5
STR r12, [r0, #8]
STR lr, [r0, #12]
BIC r12, r10, r8
BIC lr, r11, r9
EOR r12, r12, r6
EOR lr, lr, r7
STR r12, [r0, #16]
STR lr, [r0, #20]
BIC r12, r2, r10
BIC lr, r3, r11
EOR r12, r12, r8
EOR lr, lr, r9
STR r12, [r0, #24]
STR lr, [r0, #28]
BIC r12, r4, r2
BIC lr, r5, r3
EOR r12, r12, r10
EOR lr, lr, r11
STR r12, [r0, #32]
STR lr, [r0, #36]
/* Get constant */
LDRD r10, r11, [r1]
ADD r1, r1, #0x8
BIC r12, r6, r4
BIC lr, r7, r5
EOR r12, r12, r2
EOR lr, lr, r3
/* XOR in constant */
EOR r12, r12, r10
EOR lr, lr, r11
STR r12, [r0]
STR lr, [r0, #4]
/* Row 1 */
LDRD r2, r3, [sp, #24]
LDRD r4, r5, [sp, #72]
LDRD r6, r7, [sp, #80]
LDRD r8, r9, [sp, #128]
LDRD r10, r11, [sp, #176]
/* s[0] <<< 28 */
LSR r12, r3, #4
LSR lr, r2, #4
ORR r2, r12, r2, LSL #28
ORR r3, lr, r3, LSL #28
/* s[1] <<< 20 */
LSR r12, r5, #12
LSR lr, r4, #12
ORR r4, r12, r4, LSL #20
ORR r5, lr, r5, LSL #20
/* s[2] <<< 3 */
LSR r12, r7, #29
LSR lr, r6, #29
ORR r6, r12, r6, LSL #3
ORR r7, lr, r7, LSL #3
/* s[3] <<< 45 */
MOV lr, r8
LSR r12, r9, #19
LSR r8, r8, #19
ORR r8, r8, r9, LSL #13
ORR r9, r12, lr, LSL #13
/* s[4] <<< 61 */
MOV lr, r10
LSR r12, r11, #3
LSR r10, r10, #3
ORR r10, r10, r11, LSL #29
ORR r11, r12, lr, LSL #29
BIC r12, r8, r6
BIC lr, r9, r7
EOR r12, r12, r4
EOR lr, lr, r5
STR r12, [r0, #48]
STR lr, [r0, #52]
BIC r12, r10, r8
BIC lr, r11, r9
EOR r12, r12, r6
EOR lr, lr, r7
STR r12, [r0, #56]
STR lr, [r0, #60]
BIC r12, r2, r10
BIC lr, r3, r11
EOR r12, r12, r8
EOR lr, lr, r9
STR r12, [r0, #64]
STR lr, [r0, #68]
BIC r12, r4, r2
BIC lr, r5, r3
EOR r12, r12, r10
EOR lr, lr, r11
STR r12, [r0, #72]
STR lr, [r0, #76]
BIC r12, r6, r4
BIC lr, r7, r5
EOR r12, r12, r2
EOR lr, lr, r3
STR r12, [r0, #40]
STR lr, [r0, #44]
/* Row 2 */
LDRD r2, r3, [sp, #8]
LDRD r4, r5, [sp, #56]
LDRD r6, r7, [sp, #104]
LDRD r8, r9, [sp, #152]
LDRD r10, r11, [sp, #160]
/* s[0] <<< 1 */
LSR r12, r3, #31
LSR lr, r2, #31
ORR r2, r12, r2, LSL #1
ORR r3, lr, r3, LSL #1
/* s[1] <<< 6 */
LSR r12, r5, #26
LSR lr, r4, #26
ORR r4, r12, r4, LSL #6
ORR r5, lr, r5, LSL #6
/* s[2] <<< 25 */
LSR r12, r7, #7
LSR lr, r6, #7
ORR r6, r12, r6, LSL #25
ORR r7, lr, r7, LSL #25
/* s[3] <<< 8 */
LSR r12, r9, #24
LSR lr, r8, #24
ORR r8, r12, r8, LSL #8
ORR r9, lr, r9, LSL #8
/* s[4] <<< 18 */
LSR r12, r11, #14
LSR lr, r10, #14
ORR r10, r12, r10, LSL #18
ORR r11, lr, r11, LSL #18
BIC r12, r8, r6
BIC lr, r9, r7
EOR r12, r12, r4
EOR lr, lr, r5
STR r12, [r0, #88]
STR lr, [r0, #92]
BIC r12, r10, r8
BIC lr, r11, r9
EOR r12, r12, r6
EOR lr, lr, r7
STR r12, [r0, #96]
STR lr, [r0, #100]
BIC r12, r2, r10
BIC lr, r3, r11
EOR r12, r12, r8
EOR lr, lr, r9
STR r12, [r0, #104]
STR lr, [r0, #108]
BIC r12, r4, r2
BIC lr, r5, r3
EOR r12, r12, r10
EOR lr, lr, r11
STR r12, [r0, #112]
STR lr, [r0, #116]
BIC r12, r6, r4
BIC lr, r7, r5
EOR r12, r12, r2
EOR lr, lr, r3
STR r12, [r0, #80]
STR lr, [r0, #84]
/* Row 3 */
LDRD r2, r3, [sp, #32]
LDRD r4, r5, [sp, #40]
LDRD r6, r7, [sp, #88]
LDRD r8, r9, [sp, #136]
LDRD r10, r11, [sp, #184]
/* s[0] <<< 27 */
LSR r12, r3, #5
LSR lr, r2, #5
ORR r2, r12, r2, LSL #27
ORR r3, lr, r3, LSL #27
/* s[1] <<< 36 */
MOV lr, r4
LSR r12, r5, #28
LSR r4, r4, #28
ORR r4, r4, r5, LSL #4
ORR r5, r12, lr, LSL #4
/* s[2] <<< 10 */
LSR r12, r7, #22
LSR lr, r6, #22
ORR r6, r12, r6, LSL #10
ORR r7, lr, r7, LSL #10
/* s[3] <<< 15 */
LSR r12, r9, #17
LSR lr, r8, #17
ORR r8, r12, r8, LSL #15
ORR r9, lr, r9, LSL #15
/* s[4] <<< 56 */
MOV lr, r10
LSR r12, r11, #8
LSR r10, r10, #8
ORR r10, r10, r11, LSL #24
ORR r11, r12, lr, LSL #24
BIC r12, r8, r6
BIC lr, r9, r7
EOR r12, r12, r4
EOR lr, lr, r5
STR r12, [r0, #128]
STR lr, [r0, #132]
BIC r12, r10, r8
BIC lr, r11, r9
EOR r12, r12, r6
EOR lr, lr, r7
STR r12, [r0, #136]
STR lr, [r0, #140]
BIC r12, r2, r10
BIC lr, r3, r11
EOR r12, r12, r8
EOR lr, lr, r9
STR r12, [r0, #144]
STR lr, [r0, #148]
BIC r12, r4, r2
BIC lr, r5, r3
EOR r12, r12, r10
EOR lr, lr, r11
STR r12, [r0, #152]
STR lr, [r0, #156]
BIC r12, r6, r4
BIC lr, r7, r5
EOR r12, r12, r2
EOR lr, lr, r3
STR r12, [r0, #120]
STR lr, [r0, #124]
/* Row 4 */
LDRD r2, r3, [sp, #16]
LDRD r4, r5, [sp, #64]
LDRD r6, r7, [sp, #112]
LDRD r8, r9, [sp, #120]
LDRD r10, r11, [sp, #168]
/* s[0] <<< 62 */
MOV lr, r2
LSR r12, r3, #2
LSR r2, r2, #2
ORR r2, r2, r3, LSL #30
ORR r3, r12, lr, LSL #30
/* s[1] <<< 55 */
MOV lr, r4
LSR r12, r5, #9
LSR r4, r4, #9
ORR r4, r4, r5, LSL #23
ORR r5, r12, lr, LSL #23
/* s[2] <<< 39 */
MOV lr, r6
LSR r12, r7, #25
LSR r6, r6, #25
ORR r6, r6, r7, LSL #7
ORR r7, r12, lr, LSL #7
/* s[3] <<< 41 */
MOV lr, r8
LSR r12, r9, #23
LSR r8, r8, #23
ORR r8, r8, r9, LSL #9
ORR r9, r12, lr, LSL #9
/* s[4] <<< 2 */
LSR r12, r11, #30
LSR lr, r10, #30
ORR r10, r12, r10, LSL #2
ORR r11, lr, r11, LSL #2
BIC r12, r8, r6
BIC lr, r9, r7
EOR r12, r12, r4
EOR lr, lr, r5
STR r12, [r0, #168]
STR lr, [r0, #172]
BIC r12, r10, r8
BIC lr, r11, r9
EOR r12, r12, r6
EOR lr, lr, r7
STR r12, [r0, #176]
STR lr, [r0, #180]
BIC r12, r2, r10
BIC lr, r3, r11
EOR r12, r12, r8
EOR lr, lr, r9
STR r12, [r0, #184]
STR lr, [r0, #188]
BIC r12, r4, r2
BIC lr, r5, r3
EOR r12, r12, r10
EOR lr, lr, r11
STR r12, [r0, #192]
STR lr, [r0, #196]
BIC r12, r6, r4
BIC lr, r7, r5
EOR r12, r12, r2
EOR lr, lr, r3
STR r12, [r0, #160]
STR lr, [r0, #164]
LDR r2, [sp, #200]
SUBS r2, r2, #0x1
#ifdef __GNUC__
BNE L_sha3_thumb2_begin
#else
BNE.W L_sha3_thumb2_begin
#endif
ADD sp, sp, #0xcc
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
/* Cycle Count = 1505 */
.size BlockSha3,.-BlockSha3
#endif /* WOLFSSL_SHA3 */
#endif /* !__aarch64__ && __thumb__ */
#endif /* WOLFSSL_ARMASM */
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
#endif /* !WOLFSSL_ARMASM_INLINE */
|
aenu1/aps3e
| 42,102
|
app/src/main/cpp/rpcs3/3rdparty/wolfssl/wolfssl/wolfcrypt/src/port/arm/armv8-sha512-asm.S
|
/* armv8-sha512-asm
*
* Copyright (C) 2006-2023 wolfSSL Inc.
*
* This file is part of wolfSSL.
*
* wolfSSL is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* wolfSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif /* HAVE_CONFIG_H */
#include <wolfssl/wolfcrypt/settings.h>
/* Generated using (from wolfssl):
* cd ../scripts
* ruby ./sha2/sha512.rb arm64 ../wolfssl/wolfcrypt/src/port/arm/armv8-sha512-asm.S
*/
#ifdef WOLFSSL_ARMASM
#ifdef __aarch64__
#ifndef WOLFSSL_ARMASM_INLINE
#ifdef WOLFSSL_SHA512
#ifndef WOLFSSL_ARMASM_CRYPTO_SHA512
#ifndef __APPLE__
.text
.type L_SHA512_transform_neon_len_k, %object
.section .rodata
.size L_SHA512_transform_neon_len_k, 640
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 3
#else
.p2align 3
#endif /* __APPLE__ */
L_SHA512_transform_neon_len_k:
.xword 0x428a2f98d728ae22
.xword 0x7137449123ef65cd
.xword 0xb5c0fbcfec4d3b2f
.xword 0xe9b5dba58189dbbc
.xword 0x3956c25bf348b538
.xword 0x59f111f1b605d019
.xword 0x923f82a4af194f9b
.xword 0xab1c5ed5da6d8118
.xword 0xd807aa98a3030242
.xword 0x12835b0145706fbe
.xword 0x243185be4ee4b28c
.xword 0x550c7dc3d5ffb4e2
.xword 0x72be5d74f27b896f
.xword 0x80deb1fe3b1696b1
.xword 0x9bdc06a725c71235
.xword 0xc19bf174cf692694
.xword 0xe49b69c19ef14ad2
.xword 0xefbe4786384f25e3
.xword 0xfc19dc68b8cd5b5
.xword 0x240ca1cc77ac9c65
.xword 0x2de92c6f592b0275
.xword 0x4a7484aa6ea6e483
.xword 0x5cb0a9dcbd41fbd4
.xword 0x76f988da831153b5
.xword 0x983e5152ee66dfab
.xword 0xa831c66d2db43210
.xword 0xb00327c898fb213f
.xword 0xbf597fc7beef0ee4
.xword 0xc6e00bf33da88fc2
.xword 0xd5a79147930aa725
.xword 0x6ca6351e003826f
.xword 0x142929670a0e6e70
.xword 0x27b70a8546d22ffc
.xword 0x2e1b21385c26c926
.xword 0x4d2c6dfc5ac42aed
.xword 0x53380d139d95b3df
.xword 0x650a73548baf63de
.xword 0x766a0abb3c77b2a8
.xword 0x81c2c92e47edaee6
.xword 0x92722c851482353b
.xword 0xa2bfe8a14cf10364
.xword 0xa81a664bbc423001
.xword 0xc24b8b70d0f89791
.xword 0xc76c51a30654be30
.xword 0xd192e819d6ef5218
.xword 0xd69906245565a910
.xword 0xf40e35855771202a
.xword 0x106aa07032bbd1b8
.xword 0x19a4c116b8d2d0c8
.xword 0x1e376c085141ab53
.xword 0x2748774cdf8eeb99
.xword 0x34b0bcb5e19b48a8
.xword 0x391c0cb3c5c95a63
.xword 0x4ed8aa4ae3418acb
.xword 0x5b9cca4f7763e373
.xword 0x682e6ff3d6b2b8a3
.xword 0x748f82ee5defb2fc
.xword 0x78a5636f43172f60
.xword 0x84c87814a1f0ab72
.xword 0x8cc702081a6439ec
.xword 0x90befffa23631e28
.xword 0xa4506cebde82bde9
.xword 0xbef9a3f7b2c67915
.xword 0xc67178f2e372532b
.xword 0xca273eceea26619c
.xword 0xd186b8c721c0c207
.xword 0xeada7dd6cde0eb1e
.xword 0xf57d4f7fee6ed178
.xword 0x6f067aa72176fba
.xword 0xa637dc5a2c898a6
.xword 0x113f9804bef90dae
.xword 0x1b710b35131c471b
.xword 0x28db77f523047d84
.xword 0x32caab7b40c72493
.xword 0x3c9ebe0a15c9bebc
.xword 0x431d67c49c100d4c
.xword 0x4cc5d4becb3e42b6
.xword 0x597f299cfc657e2a
.xword 0x5fcb6fab3ad6faec
.xword 0x6c44198c4a475817
#ifndef __APPLE__
.text
.type L_SHA512_transform_neon_len_ror8, %object
.section .rodata
.size L_SHA512_transform_neon_len_ror8, 16
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 4
#else
.p2align 4
#endif /* __APPLE__ */
L_SHA512_transform_neon_len_ror8:
.xword 0x7060504030201, 0x80f0e0d0c0b0a09
#ifndef __APPLE__
.text
.globl Transform_Sha512_Len_neon
.type Transform_Sha512_Len_neon,@function
.align 2
Transform_Sha512_Len_neon:
#else
.section __TEXT,__text
.globl _Transform_Sha512_Len_neon
.p2align 2
_Transform_Sha512_Len_neon:
#endif /* __APPLE__ */
stp x29, x30, [sp, #-128]!
add x29, sp, #0
str x17, [x29, #16]
str x19, [x29, #24]
stp x20, x21, [x29, #32]
stp x22, x23, [x29, #48]
stp x24, x25, [x29, #64]
stp x26, x27, [x29, #80]
stp d8, d9, [x29, #96]
stp d10, d11, [x29, #112]
#ifndef __APPLE__
adrp x3, L_SHA512_transform_neon_len_k
add x3, x3, :lo12:L_SHA512_transform_neon_len_k
#else
adrp x3, L_SHA512_transform_neon_len_k@PAGE
add x3, x3, :lo12:L_SHA512_transform_neon_len_k@PAGEOFF
#endif /* __APPLE__ */
#ifndef __APPLE__
adrp x27, L_SHA512_transform_neon_len_ror8
add x27, x27, :lo12:L_SHA512_transform_neon_len_ror8
#else
adrp x27, L_SHA512_transform_neon_len_ror8@PAGE
add x27, x27, :lo12:L_SHA512_transform_neon_len_ror8@PAGEOFF
#endif /* __APPLE__ */
ld1 {v11.16b}, [x27]
# Load digest into working vars
ldp x4, x5, [x0]
ldp x6, x7, [x0, #16]
ldp x8, x9, [x0, #32]
ldp x10, x11, [x0, #48]
# Start of loop processing a block
L_sha512_len_neon_begin:
# Load W
# Copy digest to add in at end
ld1 {v0.2d, v1.2d, v2.2d, v3.2d}, [x1], #0x40
mov x19, x4
ld1 {v4.2d, v5.2d, v6.2d, v7.2d}, [x1], #0x40
mov x20, x5
rev64 v0.16b, v0.16b
mov x21, x6
rev64 v1.16b, v1.16b
mov x22, x7
rev64 v2.16b, v2.16b
mov x23, x8
rev64 v3.16b, v3.16b
mov x24, x9
rev64 v4.16b, v4.16b
mov x25, x10
rev64 v5.16b, v5.16b
mov x26, x11
rev64 v6.16b, v6.16b
rev64 v7.16b, v7.16b
# Pre-calc: b ^ c
eor x16, x5, x6
mov x27, #4
# Start of 16 rounds
L_sha512_len_neon_start:
# Round 0
mov x13, v0.d[0]
ldr x15, [x3], #8
ror x12, x8, #14
ror x14, x4, #28
eor x12, x12, x8, ror 18
eor x14, x14, x4, ror 34
eor x12, x12, x8, ror 41
eor x14, x14, x4, ror 39
add x11, x11, x12
eor x17, x4, x5
eor x12, x9, x10
and x16, x17, x16
and x12, x12, x8
add x11, x11, x13
eor x12, x12, x10
add x11, x11, x15
eor x16, x16, x5
add x11, x11, x12
add x14, x14, x16
add x7, x7, x11
add x11, x11, x14
# Round 1
mov x13, v0.d[1]
ldr x15, [x3], #8
ext v10.16b, v0.16b, v1.16b, #8
ror x12, x7, #14
shl v8.2d, v7.2d, #45
ror x14, x11, #28
sri v8.2d, v7.2d, #19
eor x12, x12, x7, ror 18
shl v9.2d, v7.2d, #3
eor x14, x14, x11, ror 34
sri v9.2d, v7.2d, #61
eor x12, x12, x7, ror 41
eor v9.16b, v9.16b, v8.16b
eor x14, x14, x11, ror 39
ushr v8.2d, v7.2d, #6
add x10, x10, x12
eor v9.16b, v9.16b, v8.16b
eor x16, x11, x4
add v0.2d, v0.2d, v9.2d
eor x12, x8, x9
ext v9.16b, v4.16b, v5.16b, #8
and x17, x16, x17
add v0.2d, v0.2d, v9.2d
and x12, x12, x7
shl v8.2d, v10.2d, #63
add x10, x10, x13
sri v8.2d, v10.2d, #1
eor x12, x12, x9
tbl v9.16b, {v10.16b}, v11.16b
add x10, x10, x15
eor v9.16b, v9.16b, v8.16b
eor x17, x17, x4
ushr v10.2d, v10.2d, #7
add x10, x10, x12
eor v9.16b, v9.16b, v10.16b
add x14, x14, x17
add v0.2d, v0.2d, v9.2d
add x6, x6, x10
add x10, x10, x14
# Round 2
mov x13, v1.d[0]
ldr x15, [x3], #8
ror x12, x6, #14
ror x14, x10, #28
eor x12, x12, x6, ror 18
eor x14, x14, x10, ror 34
eor x12, x12, x6, ror 41
eor x14, x14, x10, ror 39
add x9, x9, x12
eor x17, x10, x11
eor x12, x7, x8
and x16, x17, x16
and x12, x12, x6
add x9, x9, x13
eor x12, x12, x8
add x9, x9, x15
eor x16, x16, x11
add x9, x9, x12
add x14, x14, x16
add x5, x5, x9
add x9, x9, x14
# Round 3
mov x13, v1.d[1]
ldr x15, [x3], #8
ext v10.16b, v1.16b, v2.16b, #8
ror x12, x5, #14
shl v8.2d, v0.2d, #45
ror x14, x9, #28
sri v8.2d, v0.2d, #19
eor x12, x12, x5, ror 18
shl v9.2d, v0.2d, #3
eor x14, x14, x9, ror 34
sri v9.2d, v0.2d, #61
eor x12, x12, x5, ror 41
eor v9.16b, v9.16b, v8.16b
eor x14, x14, x9, ror 39
ushr v8.2d, v0.2d, #6
add x8, x8, x12
eor v9.16b, v9.16b, v8.16b
eor x16, x9, x10
add v1.2d, v1.2d, v9.2d
eor x12, x6, x7
ext v9.16b, v5.16b, v6.16b, #8
and x17, x16, x17
add v1.2d, v1.2d, v9.2d
and x12, x12, x5
shl v8.2d, v10.2d, #63
add x8, x8, x13
sri v8.2d, v10.2d, #1
eor x12, x12, x7
tbl v9.16b, {v10.16b}, v11.16b
add x8, x8, x15
eor v9.16b, v9.16b, v8.16b
eor x17, x17, x10
ushr v10.2d, v10.2d, #7
add x8, x8, x12
eor v9.16b, v9.16b, v10.16b
add x14, x14, x17
add v1.2d, v1.2d, v9.2d
add x4, x4, x8
add x8, x8, x14
# Round 4
mov x13, v2.d[0]
ldr x15, [x3], #8
ror x12, x4, #14
ror x14, x8, #28
eor x12, x12, x4, ror 18
eor x14, x14, x8, ror 34
eor x12, x12, x4, ror 41
eor x14, x14, x8, ror 39
add x7, x7, x12
eor x17, x8, x9
eor x12, x5, x6
and x16, x17, x16
and x12, x12, x4
add x7, x7, x13
eor x12, x12, x6
add x7, x7, x15
eor x16, x16, x9
add x7, x7, x12
add x14, x14, x16
add x11, x11, x7
add x7, x7, x14
# Round 5
mov x13, v2.d[1]
ldr x15, [x3], #8
ext v10.16b, v2.16b, v3.16b, #8
ror x12, x11, #14
shl v8.2d, v1.2d, #45
ror x14, x7, #28
sri v8.2d, v1.2d, #19
eor x12, x12, x11, ror 18
shl v9.2d, v1.2d, #3
eor x14, x14, x7, ror 34
sri v9.2d, v1.2d, #61
eor x12, x12, x11, ror 41
eor v9.16b, v9.16b, v8.16b
eor x14, x14, x7, ror 39
ushr v8.2d, v1.2d, #6
add x6, x6, x12
eor v9.16b, v9.16b, v8.16b
eor x16, x7, x8
add v2.2d, v2.2d, v9.2d
eor x12, x4, x5
ext v9.16b, v6.16b, v7.16b, #8
and x17, x16, x17
add v2.2d, v2.2d, v9.2d
and x12, x12, x11
shl v8.2d, v10.2d, #63
add x6, x6, x13
sri v8.2d, v10.2d, #1
eor x12, x12, x5
tbl v9.16b, {v10.16b}, v11.16b
add x6, x6, x15
eor v9.16b, v9.16b, v8.16b
eor x17, x17, x8
ushr v10.2d, v10.2d, #7
add x6, x6, x12
eor v9.16b, v9.16b, v10.16b
add x14, x14, x17
add v2.2d, v2.2d, v9.2d
add x10, x10, x6
add x6, x6, x14
# Round 6
mov x13, v3.d[0]
ldr x15, [x3], #8
ror x12, x10, #14
ror x14, x6, #28
eor x12, x12, x10, ror 18
eor x14, x14, x6, ror 34
eor x12, x12, x10, ror 41
eor x14, x14, x6, ror 39
add x5, x5, x12
eor x17, x6, x7
eor x12, x11, x4
and x16, x17, x16
and x12, x12, x10
add x5, x5, x13
eor x12, x12, x4
add x5, x5, x15
eor x16, x16, x7
add x5, x5, x12
add x14, x14, x16
add x9, x9, x5
add x5, x5, x14
# Round 7
mov x13, v3.d[1]
ldr x15, [x3], #8
ext v10.16b, v3.16b, v4.16b, #8
ror x12, x9, #14
shl v8.2d, v2.2d, #45
ror x14, x5, #28
sri v8.2d, v2.2d, #19
eor x12, x12, x9, ror 18
shl v9.2d, v2.2d, #3
eor x14, x14, x5, ror 34
sri v9.2d, v2.2d, #61
eor x12, x12, x9, ror 41
eor v9.16b, v9.16b, v8.16b
eor x14, x14, x5, ror 39
ushr v8.2d, v2.2d, #6
add x4, x4, x12
eor v9.16b, v9.16b, v8.16b
eor x16, x5, x6
add v3.2d, v3.2d, v9.2d
eor x12, x10, x11
ext v9.16b, v7.16b, v0.16b, #8
and x17, x16, x17
add v3.2d, v3.2d, v9.2d
and x12, x12, x9
shl v8.2d, v10.2d, #63
add x4, x4, x13
sri v8.2d, v10.2d, #1
eor x12, x12, x11
tbl v9.16b, {v10.16b}, v11.16b
add x4, x4, x15
eor v9.16b, v9.16b, v8.16b
eor x17, x17, x6
ushr v10.2d, v10.2d, #7
add x4, x4, x12
eor v9.16b, v9.16b, v10.16b
add x14, x14, x17
add v3.2d, v3.2d, v9.2d
add x8, x8, x4
add x4, x4, x14
# Round 8
mov x13, v4.d[0]
ldr x15, [x3], #8
ror x12, x8, #14
ror x14, x4, #28
eor x12, x12, x8, ror 18
eor x14, x14, x4, ror 34
eor x12, x12, x8, ror 41
eor x14, x14, x4, ror 39
add x11, x11, x12
eor x17, x4, x5
eor x12, x9, x10
and x16, x17, x16
and x12, x12, x8
add x11, x11, x13
eor x12, x12, x10
add x11, x11, x15
eor x16, x16, x5
add x11, x11, x12
add x14, x14, x16
add x7, x7, x11
add x11, x11, x14
# Round 9
mov x13, v4.d[1]
ldr x15, [x3], #8
ext v10.16b, v4.16b, v5.16b, #8
ror x12, x7, #14
shl v8.2d, v3.2d, #45
ror x14, x11, #28
sri v8.2d, v3.2d, #19
eor x12, x12, x7, ror 18
shl v9.2d, v3.2d, #3
eor x14, x14, x11, ror 34
sri v9.2d, v3.2d, #61
eor x12, x12, x7, ror 41
eor v9.16b, v9.16b, v8.16b
eor x14, x14, x11, ror 39
ushr v8.2d, v3.2d, #6
add x10, x10, x12
eor v9.16b, v9.16b, v8.16b
eor x16, x11, x4
add v4.2d, v4.2d, v9.2d
eor x12, x8, x9
ext v9.16b, v0.16b, v1.16b, #8
and x17, x16, x17
add v4.2d, v4.2d, v9.2d
and x12, x12, x7
shl v8.2d, v10.2d, #63
add x10, x10, x13
sri v8.2d, v10.2d, #1
eor x12, x12, x9
tbl v9.16b, {v10.16b}, v11.16b
add x10, x10, x15
eor v9.16b, v9.16b, v8.16b
eor x17, x17, x4
ushr v10.2d, v10.2d, #7
add x10, x10, x12
eor v9.16b, v9.16b, v10.16b
add x14, x14, x17
add v4.2d, v4.2d, v9.2d
add x6, x6, x10
add x10, x10, x14
# Round 10
mov x13, v5.d[0]
ldr x15, [x3], #8
ror x12, x6, #14
ror x14, x10, #28
eor x12, x12, x6, ror 18
eor x14, x14, x10, ror 34
eor x12, x12, x6, ror 41
eor x14, x14, x10, ror 39
add x9, x9, x12
eor x17, x10, x11
eor x12, x7, x8
and x16, x17, x16
and x12, x12, x6
add x9, x9, x13
eor x12, x12, x8
add x9, x9, x15
eor x16, x16, x11
add x9, x9, x12
add x14, x14, x16
add x5, x5, x9
add x9, x9, x14
# Round 11
mov x13, v5.d[1]
ldr x15, [x3], #8
ext v10.16b, v5.16b, v6.16b, #8
ror x12, x5, #14
shl v8.2d, v4.2d, #45
ror x14, x9, #28
sri v8.2d, v4.2d, #19
eor x12, x12, x5, ror 18
shl v9.2d, v4.2d, #3
eor x14, x14, x9, ror 34
sri v9.2d, v4.2d, #61
eor x12, x12, x5, ror 41
eor v9.16b, v9.16b, v8.16b
eor x14, x14, x9, ror 39
ushr v8.2d, v4.2d, #6
add x8, x8, x12
eor v9.16b, v9.16b, v8.16b
eor x16, x9, x10
add v5.2d, v5.2d, v9.2d
eor x12, x6, x7
ext v9.16b, v1.16b, v2.16b, #8
and x17, x16, x17
add v5.2d, v5.2d, v9.2d
and x12, x12, x5
shl v8.2d, v10.2d, #63
add x8, x8, x13
sri v8.2d, v10.2d, #1
eor x12, x12, x7
tbl v9.16b, {v10.16b}, v11.16b
add x8, x8, x15
eor v9.16b, v9.16b, v8.16b
eor x17, x17, x10
ushr v10.2d, v10.2d, #7
add x8, x8, x12
eor v9.16b, v9.16b, v10.16b
add x14, x14, x17
add v5.2d, v5.2d, v9.2d
add x4, x4, x8
add x8, x8, x14
# Round 12
mov x13, v6.d[0]
ldr x15, [x3], #8
ror x12, x4, #14
ror x14, x8, #28
eor x12, x12, x4, ror 18
eor x14, x14, x8, ror 34
eor x12, x12, x4, ror 41
eor x14, x14, x8, ror 39
add x7, x7, x12
eor x17, x8, x9
eor x12, x5, x6
and x16, x17, x16
and x12, x12, x4
add x7, x7, x13
eor x12, x12, x6
add x7, x7, x15
eor x16, x16, x9
add x7, x7, x12
add x14, x14, x16
add x11, x11, x7
add x7, x7, x14
# Round 13
mov x13, v6.d[1]
ldr x15, [x3], #8
ext v10.16b, v6.16b, v7.16b, #8
ror x12, x11, #14
shl v8.2d, v5.2d, #45
ror x14, x7, #28
sri v8.2d, v5.2d, #19
eor x12, x12, x11, ror 18
shl v9.2d, v5.2d, #3
eor x14, x14, x7, ror 34
sri v9.2d, v5.2d, #61
eor x12, x12, x11, ror 41
eor v9.16b, v9.16b, v8.16b
eor x14, x14, x7, ror 39
ushr v8.2d, v5.2d, #6
add x6, x6, x12
eor v9.16b, v9.16b, v8.16b
eor x16, x7, x8
add v6.2d, v6.2d, v9.2d
eor x12, x4, x5
ext v9.16b, v2.16b, v3.16b, #8
and x17, x16, x17
add v6.2d, v6.2d, v9.2d
and x12, x12, x11
shl v8.2d, v10.2d, #63
add x6, x6, x13
sri v8.2d, v10.2d, #1
eor x12, x12, x5
tbl v9.16b, {v10.16b}, v11.16b
add x6, x6, x15
eor v9.16b, v9.16b, v8.16b
eor x17, x17, x8
ushr v10.2d, v10.2d, #7
add x6, x6, x12
eor v9.16b, v9.16b, v10.16b
add x14, x14, x17
add v6.2d, v6.2d, v9.2d
add x10, x10, x6
add x6, x6, x14
# Round 14
mov x13, v7.d[0]
ldr x15, [x3], #8
ror x12, x10, #14
ror x14, x6, #28
eor x12, x12, x10, ror 18
eor x14, x14, x6, ror 34
eor x12, x12, x10, ror 41
eor x14, x14, x6, ror 39
add x5, x5, x12
eor x17, x6, x7
eor x12, x11, x4
and x16, x17, x16
and x12, x12, x10
add x5, x5, x13
eor x12, x12, x4
add x5, x5, x15
eor x16, x16, x7
add x5, x5, x12
add x14, x14, x16
add x9, x9, x5
add x5, x5, x14
# Round 15
mov x13, v7.d[1]
ldr x15, [x3], #8
ext v10.16b, v7.16b, v0.16b, #8
ror x12, x9, #14
shl v8.2d, v6.2d, #45
ror x14, x5, #28
sri v8.2d, v6.2d, #19
eor x12, x12, x9, ror 18
shl v9.2d, v6.2d, #3
eor x14, x14, x5, ror 34
sri v9.2d, v6.2d, #61
eor x12, x12, x9, ror 41
eor v9.16b, v9.16b, v8.16b
eor x14, x14, x5, ror 39
ushr v8.2d, v6.2d, #6
add x4, x4, x12
eor v9.16b, v9.16b, v8.16b
eor x16, x5, x6
add v7.2d, v7.2d, v9.2d
eor x12, x10, x11
ext v9.16b, v3.16b, v4.16b, #8
and x17, x16, x17
add v7.2d, v7.2d, v9.2d
and x12, x12, x9
shl v8.2d, v10.2d, #63
add x4, x4, x13
sri v8.2d, v10.2d, #1
eor x12, x12, x11
tbl v9.16b, {v10.16b}, v11.16b
add x4, x4, x15
eor v9.16b, v9.16b, v8.16b
eor x17, x17, x6
ushr v10.2d, v10.2d, #7
add x4, x4, x12
eor v9.16b, v9.16b, v10.16b
add x14, x14, x17
add v7.2d, v7.2d, v9.2d
add x8, x8, x4
add x4, x4, x14
subs x27, x27, #1
bne L_sha512_len_neon_start
# Round 0
mov x13, v0.d[0]
ldr x15, [x3], #8
ror x12, x8, #14
ror x14, x4, #28
eor x12, x12, x8, ror 18
eor x14, x14, x4, ror 34
eor x12, x12, x8, ror 41
eor x14, x14, x4, ror 39
add x11, x11, x12
eor x17, x4, x5
eor x12, x9, x10
and x16, x17, x16
and x12, x12, x8
add x11, x11, x13
eor x12, x12, x10
add x11, x11, x15
eor x16, x16, x5
add x11, x11, x12
add x14, x14, x16
add x7, x7, x11
add x11, x11, x14
# Round 1
mov x13, v0.d[1]
ldr x15, [x3], #8
ror x12, x7, #14
ror x14, x11, #28
eor x12, x12, x7, ror 18
eor x14, x14, x11, ror 34
eor x12, x12, x7, ror 41
eor x14, x14, x11, ror 39
add x10, x10, x12
eor x16, x11, x4
eor x12, x8, x9
and x17, x16, x17
and x12, x12, x7
add x10, x10, x13
eor x12, x12, x9
add x10, x10, x15
eor x17, x17, x4
add x10, x10, x12
add x14, x14, x17
add x6, x6, x10
add x10, x10, x14
# Round 2
mov x13, v1.d[0]
ldr x15, [x3], #8
ror x12, x6, #14
ror x14, x10, #28
eor x12, x12, x6, ror 18
eor x14, x14, x10, ror 34
eor x12, x12, x6, ror 41
eor x14, x14, x10, ror 39
add x9, x9, x12
eor x17, x10, x11
eor x12, x7, x8
and x16, x17, x16
and x12, x12, x6
add x9, x9, x13
eor x12, x12, x8
add x9, x9, x15
eor x16, x16, x11
add x9, x9, x12
add x14, x14, x16
add x5, x5, x9
add x9, x9, x14
# Round 3
mov x13, v1.d[1]
ldr x15, [x3], #8
ror x12, x5, #14
ror x14, x9, #28
eor x12, x12, x5, ror 18
eor x14, x14, x9, ror 34
eor x12, x12, x5, ror 41
eor x14, x14, x9, ror 39
add x8, x8, x12
eor x16, x9, x10
eor x12, x6, x7
and x17, x16, x17
and x12, x12, x5
add x8, x8, x13
eor x12, x12, x7
add x8, x8, x15
eor x17, x17, x10
add x8, x8, x12
add x14, x14, x17
add x4, x4, x8
add x8, x8, x14
# Round 4
mov x13, v2.d[0]
ldr x15, [x3], #8
ror x12, x4, #14
ror x14, x8, #28
eor x12, x12, x4, ror 18
eor x14, x14, x8, ror 34
eor x12, x12, x4, ror 41
eor x14, x14, x8, ror 39
add x7, x7, x12
eor x17, x8, x9
eor x12, x5, x6
and x16, x17, x16
and x12, x12, x4
add x7, x7, x13
eor x12, x12, x6
add x7, x7, x15
eor x16, x16, x9
add x7, x7, x12
add x14, x14, x16
add x11, x11, x7
add x7, x7, x14
# Round 5
mov x13, v2.d[1]
ldr x15, [x3], #8
ror x12, x11, #14
ror x14, x7, #28
eor x12, x12, x11, ror 18
eor x14, x14, x7, ror 34
eor x12, x12, x11, ror 41
eor x14, x14, x7, ror 39
add x6, x6, x12
eor x16, x7, x8
eor x12, x4, x5
and x17, x16, x17
and x12, x12, x11
add x6, x6, x13
eor x12, x12, x5
add x6, x6, x15
eor x17, x17, x8
add x6, x6, x12
add x14, x14, x17
add x10, x10, x6
add x6, x6, x14
# Round 6
mov x13, v3.d[0]
ldr x15, [x3], #8
ror x12, x10, #14
ror x14, x6, #28
eor x12, x12, x10, ror 18
eor x14, x14, x6, ror 34
eor x12, x12, x10, ror 41
eor x14, x14, x6, ror 39
add x5, x5, x12
eor x17, x6, x7
eor x12, x11, x4
and x16, x17, x16
and x12, x12, x10
add x5, x5, x13
eor x12, x12, x4
add x5, x5, x15
eor x16, x16, x7
add x5, x5, x12
add x14, x14, x16
add x9, x9, x5
add x5, x5, x14
# Round 7
mov x13, v3.d[1]
ldr x15, [x3], #8
ror x12, x9, #14
ror x14, x5, #28
eor x12, x12, x9, ror 18
eor x14, x14, x5, ror 34
eor x12, x12, x9, ror 41
eor x14, x14, x5, ror 39
add x4, x4, x12
eor x16, x5, x6
eor x12, x10, x11
and x17, x16, x17
and x12, x12, x9
add x4, x4, x13
eor x12, x12, x11
add x4, x4, x15
eor x17, x17, x6
add x4, x4, x12
add x14, x14, x17
add x8, x8, x4
add x4, x4, x14
# Round 8
mov x13, v4.d[0]
ldr x15, [x3], #8
ror x12, x8, #14
ror x14, x4, #28
eor x12, x12, x8, ror 18
eor x14, x14, x4, ror 34
eor x12, x12, x8, ror 41
eor x14, x14, x4, ror 39
add x11, x11, x12
eor x17, x4, x5
eor x12, x9, x10
and x16, x17, x16
and x12, x12, x8
add x11, x11, x13
eor x12, x12, x10
add x11, x11, x15
eor x16, x16, x5
add x11, x11, x12
add x14, x14, x16
add x7, x7, x11
add x11, x11, x14
# Round 9
mov x13, v4.d[1]
ldr x15, [x3], #8
ror x12, x7, #14
ror x14, x11, #28
eor x12, x12, x7, ror 18
eor x14, x14, x11, ror 34
eor x12, x12, x7, ror 41
eor x14, x14, x11, ror 39
add x10, x10, x12
eor x16, x11, x4
eor x12, x8, x9
and x17, x16, x17
and x12, x12, x7
add x10, x10, x13
eor x12, x12, x9
add x10, x10, x15
eor x17, x17, x4
add x10, x10, x12
add x14, x14, x17
add x6, x6, x10
add x10, x10, x14
# Round 10
mov x13, v5.d[0]
ldr x15, [x3], #8
ror x12, x6, #14
ror x14, x10, #28
eor x12, x12, x6, ror 18
eor x14, x14, x10, ror 34
eor x12, x12, x6, ror 41
eor x14, x14, x10, ror 39
add x9, x9, x12
eor x17, x10, x11
eor x12, x7, x8
and x16, x17, x16
and x12, x12, x6
add x9, x9, x13
eor x12, x12, x8
add x9, x9, x15
eor x16, x16, x11
add x9, x9, x12
add x14, x14, x16
add x5, x5, x9
add x9, x9, x14
# Round 11
mov x13, v5.d[1]
ldr x15, [x3], #8
ror x12, x5, #14
ror x14, x9, #28
eor x12, x12, x5, ror 18
eor x14, x14, x9, ror 34
eor x12, x12, x5, ror 41
eor x14, x14, x9, ror 39
add x8, x8, x12
eor x16, x9, x10
eor x12, x6, x7
and x17, x16, x17
and x12, x12, x5
add x8, x8, x13
eor x12, x12, x7
add x8, x8, x15
eor x17, x17, x10
add x8, x8, x12
add x14, x14, x17
add x4, x4, x8
add x8, x8, x14
# Round 12
mov x13, v6.d[0]
ldr x15, [x3], #8
ror x12, x4, #14
ror x14, x8, #28
eor x12, x12, x4, ror 18
eor x14, x14, x8, ror 34
eor x12, x12, x4, ror 41
eor x14, x14, x8, ror 39
add x7, x7, x12
eor x17, x8, x9
eor x12, x5, x6
and x16, x17, x16
and x12, x12, x4
add x7, x7, x13
eor x12, x12, x6
add x7, x7, x15
eor x16, x16, x9
add x7, x7, x12
add x14, x14, x16
add x11, x11, x7
add x7, x7, x14
# Round 13
mov x13, v6.d[1]
ldr x15, [x3], #8
ror x12, x11, #14
ror x14, x7, #28
eor x12, x12, x11, ror 18
eor x14, x14, x7, ror 34
eor x12, x12, x11, ror 41
eor x14, x14, x7, ror 39
add x6, x6, x12
eor x16, x7, x8
eor x12, x4, x5
and x17, x16, x17
and x12, x12, x11
add x6, x6, x13
eor x12, x12, x5
add x6, x6, x15
eor x17, x17, x8
add x6, x6, x12
add x14, x14, x17
add x10, x10, x6
add x6, x6, x14
# Round 14
mov x13, v7.d[0]
ldr x15, [x3], #8
ror x12, x10, #14
ror x14, x6, #28
eor x12, x12, x10, ror 18
eor x14, x14, x6, ror 34
eor x12, x12, x10, ror 41
eor x14, x14, x6, ror 39
add x5, x5, x12
eor x17, x6, x7
eor x12, x11, x4
and x16, x17, x16
and x12, x12, x10
add x5, x5, x13
eor x12, x12, x4
add x5, x5, x15
eor x16, x16, x7
add x5, x5, x12
add x14, x14, x16
add x9, x9, x5
add x5, x5, x14
# Round 15
mov x13, v7.d[1]
ldr x15, [x3], #8
ror x12, x9, #14
ror x14, x5, #28
eor x12, x12, x9, ror 18
eor x14, x14, x5, ror 34
eor x12, x12, x9, ror 41
eor x14, x14, x5, ror 39
add x4, x4, x12
eor x16, x5, x6
eor x12, x10, x11
and x17, x16, x17
and x12, x12, x9
add x4, x4, x13
eor x12, x12, x11
add x4, x4, x15
eor x17, x17, x6
add x4, x4, x12
add x14, x14, x17
add x8, x8, x4
add x4, x4, x14
add x11, x11, x26
add x10, x10, x25
add x9, x9, x24
add x8, x8, x23
add x7, x7, x22
add x6, x6, x21
add x5, x5, x20
add x4, x4, x19
#ifndef __APPLE__
adrp x3, L_SHA512_transform_neon_len_k
add x3, x3, :lo12:L_SHA512_transform_neon_len_k
#else
adrp x3, L_SHA512_transform_neon_len_k@PAGE
add x3, x3, :lo12:L_SHA512_transform_neon_len_k@PAGEOFF
#endif /* __APPLE__ */
subs w2, w2, #0x80
bne L_sha512_len_neon_begin
stp x4, x5, [x0]
stp x6, x7, [x0, #16]
stp x8, x9, [x0, #32]
stp x10, x11, [x0, #48]
ldr x17, [x29, #16]
ldr x19, [x29, #24]
ldp x20, x21, [x29, #32]
ldp x22, x23, [x29, #48]
ldp x24, x25, [x29, #64]
ldp x26, x27, [x29, #80]
ldp d8, d9, [x29, #96]
ldp d10, d11, [x29, #112]
ldp x29, x30, [sp], #0x80
ret
#ifndef __APPLE__
.size Transform_Sha512_Len_neon,.-Transform_Sha512_Len_neon
#endif /* __APPLE__ */
#else
#ifndef __APPLE__
.text
.type L_SHA512_transform_crypto_len_k, %object
.section .rodata
.size L_SHA512_transform_crypto_len_k, 640
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 3
#else
.p2align 3
#endif /* __APPLE__ */
L_SHA512_transform_crypto_len_k:
.xword 0x428a2f98d728ae22
.xword 0x7137449123ef65cd
.xword 0xb5c0fbcfec4d3b2f
.xword 0xe9b5dba58189dbbc
.xword 0x3956c25bf348b538
.xword 0x59f111f1b605d019
.xword 0x923f82a4af194f9b
.xword 0xab1c5ed5da6d8118
.xword 0xd807aa98a3030242
.xword 0x12835b0145706fbe
.xword 0x243185be4ee4b28c
.xword 0x550c7dc3d5ffb4e2
.xword 0x72be5d74f27b896f
.xword 0x80deb1fe3b1696b1
.xword 0x9bdc06a725c71235
.xword 0xc19bf174cf692694
.xword 0xe49b69c19ef14ad2
.xword 0xefbe4786384f25e3
.xword 0xfc19dc68b8cd5b5
.xword 0x240ca1cc77ac9c65
.xword 0x2de92c6f592b0275
.xword 0x4a7484aa6ea6e483
.xword 0x5cb0a9dcbd41fbd4
.xword 0x76f988da831153b5
.xword 0x983e5152ee66dfab
.xword 0xa831c66d2db43210
.xword 0xb00327c898fb213f
.xword 0xbf597fc7beef0ee4
.xword 0xc6e00bf33da88fc2
.xword 0xd5a79147930aa725
.xword 0x6ca6351e003826f
.xword 0x142929670a0e6e70
.xword 0x27b70a8546d22ffc
.xword 0x2e1b21385c26c926
.xword 0x4d2c6dfc5ac42aed
.xword 0x53380d139d95b3df
.xword 0x650a73548baf63de
.xword 0x766a0abb3c77b2a8
.xword 0x81c2c92e47edaee6
.xword 0x92722c851482353b
.xword 0xa2bfe8a14cf10364
.xword 0xa81a664bbc423001
.xword 0xc24b8b70d0f89791
.xword 0xc76c51a30654be30
.xword 0xd192e819d6ef5218
.xword 0xd69906245565a910
.xword 0xf40e35855771202a
.xword 0x106aa07032bbd1b8
.xword 0x19a4c116b8d2d0c8
.xword 0x1e376c085141ab53
.xword 0x2748774cdf8eeb99
.xword 0x34b0bcb5e19b48a8
.xword 0x391c0cb3c5c95a63
.xword 0x4ed8aa4ae3418acb
.xword 0x5b9cca4f7763e373
.xword 0x682e6ff3d6b2b8a3
.xword 0x748f82ee5defb2fc
.xword 0x78a5636f43172f60
.xword 0x84c87814a1f0ab72
.xword 0x8cc702081a6439ec
.xword 0x90befffa23631e28
.xword 0xa4506cebde82bde9
.xword 0xbef9a3f7b2c67915
.xword 0xc67178f2e372532b
.xword 0xca273eceea26619c
.xword 0xd186b8c721c0c207
.xword 0xeada7dd6cde0eb1e
.xword 0xf57d4f7fee6ed178
.xword 0x6f067aa72176fba
.xword 0xa637dc5a2c898a6
.xword 0x113f9804bef90dae
.xword 0x1b710b35131c471b
.xword 0x28db77f523047d84
.xword 0x32caab7b40c72493
.xword 0x3c9ebe0a15c9bebc
.xword 0x431d67c49c100d4c
.xword 0x4cc5d4becb3e42b6
.xword 0x597f299cfc657e2a
.xword 0x5fcb6fab3ad6faec
.xword 0x6c44198c4a475817
#ifndef __APPLE__
.text
.globl Transform_Sha512_Len_crypto
.type Transform_Sha512_Len_crypto,@function
.align 2
Transform_Sha512_Len_crypto:
#else
.section __TEXT,__text
.globl _Transform_Sha512_Len_crypto
.p2align 2
_Transform_Sha512_Len_crypto:
#endif /* __APPLE__ */
stp x29, x30, [sp, #-80]!
add x29, sp, #0
stp d8, d9, [x29, #16]
stp d10, d11, [x29, #32]
stp d12, d13, [x29, #48]
stp d14, d15, [x29, #64]
#ifdef __APPLE__
.arch_extension sha3
#endif /* __APPLE__ */
#ifndef __APPLE__
adrp x4, L_SHA512_transform_crypto_len_k
add x4, x4, :lo12:L_SHA512_transform_crypto_len_k
#else
adrp x4, L_SHA512_transform_crypto_len_k@PAGE
add x4, x4, :lo12:L_SHA512_transform_crypto_len_k@PAGEOFF
#endif /* __APPLE__ */
# Load first 16 64-bit words of K permanently
ld1 {v8.2d, v9.2d, v10.2d, v11.2d}, [x4], #0x40
ld1 {v12.2d, v13.2d, v14.2d, v15.2d}, [x4], #0x40
# Load digest into working vars
ld1 {v24.2d, v25.2d, v26.2d, v27.2d}, [x0]
# Start of loop processing a block
L_sha512_len_crypto_begin:
mov x3, x4
# Load W
ld1 {v0.2d, v1.2d, v2.2d, v3.2d}, [x1], #0x40
ld1 {v4.2d, v5.2d, v6.2d, v7.2d}, [x1], #0x40
rev64 v0.16b, v0.16b
rev64 v1.16b, v1.16b
rev64 v2.16b, v2.16b
rev64 v3.16b, v3.16b
rev64 v4.16b, v4.16b
rev64 v5.16b, v5.16b
rev64 v6.16b, v6.16b
rev64 v7.16b, v7.16b
# Copy digest to add in at end
mov v28.16b, v24.16b
mov v29.16b, v25.16b
mov v30.16b, v26.16b
mov v31.16b, v27.16b
# Start of 16 rounds
# Round 0
add v20.2d, v0.2d, v8.2d
ext v20.16b, v20.16b, v20.16b, #8
ext v21.16b, v26.16b, v27.16b, #8
ext v22.16b, v25.16b, v26.16b, #8
add v27.2d, v27.2d, v20.2d
sha512h q27, q21, v22.2d
add v23.2d, v25.2d, v27.2d
sha512h2 q27, q25, v24.2d
# Round 1
add v20.2d, v1.2d, v9.2d
ext v20.16b, v20.16b, v20.16b, #8
ext v21.16b, v23.16b, v26.16b, #8
ext v22.16b, v24.16b, v23.16b, #8
add v26.2d, v26.2d, v20.2d
sha512h q26, q21, v22.2d
add v25.2d, v24.2d, v26.2d
sha512h2 q26, q24, v27.2d
# Round 2
add v20.2d, v2.2d, v10.2d
ext v20.16b, v20.16b, v20.16b, #8
ext v21.16b, v25.16b, v23.16b, #8
ext v22.16b, v27.16b, v25.16b, #8
add v23.2d, v23.2d, v20.2d
sha512h q23, q21, v22.2d
add v24.2d, v27.2d, v23.2d
sha512h2 q23, q27, v26.2d
# Round 3
add v20.2d, v3.2d, v11.2d
ext v20.16b, v20.16b, v20.16b, #8
ext v21.16b, v24.16b, v25.16b, #8
ext v22.16b, v26.16b, v24.16b, #8
add v25.2d, v25.2d, v20.2d
sha512h q25, q21, v22.2d
add v27.2d, v26.2d, v25.2d
sha512h2 q25, q26, v23.2d
# Round 4
add v20.2d, v4.2d, v12.2d
ext v20.16b, v20.16b, v20.16b, #8
ext v21.16b, v27.16b, v24.16b, #8
ext v22.16b, v23.16b, v27.16b, #8
add v24.2d, v24.2d, v20.2d
sha512h q24, q21, v22.2d
add v26.2d, v23.2d, v24.2d
sha512h2 q24, q23, v25.2d
# Round 5
add v20.2d, v5.2d, v13.2d
ext v20.16b, v20.16b, v20.16b, #8
ext v21.16b, v26.16b, v27.16b, #8
ext v22.16b, v25.16b, v26.16b, #8
add v27.2d, v27.2d, v20.2d
sha512h q27, q21, v22.2d
add v23.2d, v25.2d, v27.2d
sha512h2 q27, q25, v24.2d
# Round 6
add v20.2d, v6.2d, v14.2d
ext v20.16b, v20.16b, v20.16b, #8
ext v21.16b, v23.16b, v26.16b, #8
ext v22.16b, v24.16b, v23.16b, #8
add v26.2d, v26.2d, v20.2d
sha512h q26, q21, v22.2d
add v25.2d, v24.2d, v26.2d
sha512h2 q26, q24, v27.2d
# Round 7
add v20.2d, v7.2d, v15.2d
ext v20.16b, v20.16b, v20.16b, #8
ext v21.16b, v25.16b, v23.16b, #8
ext v22.16b, v27.16b, v25.16b, #8
add v23.2d, v23.2d, v20.2d
sha512h q23, q21, v22.2d
add v24.2d, v27.2d, v23.2d
sha512h2 q23, q27, v26.2d
# Load next 8 64-bit words of K
ld1 {v16.2d, v17.2d, v18.2d, v19.2d}, [x3], #0x40
# Round 8
sha512su0 v0.2d, v1.2d
ext v21.16b, v4.16b, v5.16b, #8
sha512su1 v0.2d, v7.2d, v21.2d
add v20.2d, v0.2d, v16.2d
ext v20.16b, v20.16b, v20.16b, #8
ext v21.16b, v24.16b, v25.16b, #8
ext v22.16b, v26.16b, v24.16b, #8
add v25.2d, v25.2d, v20.2d
sha512h q25, q21, v22.2d
add v27.2d, v26.2d, v25.2d
sha512h2 q25, q26, v23.2d
# Round 9
sha512su0 v1.2d, v2.2d
ext v21.16b, v5.16b, v6.16b, #8
sha512su1 v1.2d, v0.2d, v21.2d
add v20.2d, v1.2d, v17.2d
ext v20.16b, v20.16b, v20.16b, #8
ext v21.16b, v27.16b, v24.16b, #8
ext v22.16b, v23.16b, v27.16b, #8
add v24.2d, v24.2d, v20.2d
sha512h q24, q21, v22.2d
add v26.2d, v23.2d, v24.2d
sha512h2 q24, q23, v25.2d
# Round 10
sha512su0 v2.2d, v3.2d
ext v21.16b, v6.16b, v7.16b, #8
sha512su1 v2.2d, v1.2d, v21.2d
add v20.2d, v2.2d, v18.2d
ext v20.16b, v20.16b, v20.16b, #8
ext v21.16b, v26.16b, v27.16b, #8
ext v22.16b, v25.16b, v26.16b, #8
add v27.2d, v27.2d, v20.2d
sha512h q27, q21, v22.2d
add v23.2d, v25.2d, v27.2d
sha512h2 q27, q25, v24.2d
# Round 11
sha512su0 v3.2d, v4.2d
ext v21.16b, v7.16b, v0.16b, #8
sha512su1 v3.2d, v2.2d, v21.2d
add v20.2d, v3.2d, v19.2d
ext v20.16b, v20.16b, v20.16b, #8
ext v21.16b, v23.16b, v26.16b, #8
ext v22.16b, v24.16b, v23.16b, #8
add v26.2d, v26.2d, v20.2d
sha512h q26, q21, v22.2d
add v25.2d, v24.2d, v26.2d
sha512h2 q26, q24, v27.2d
# Load next 8 64-bit words of K
ld1 {v16.2d, v17.2d, v18.2d, v19.2d}, [x3], #0x40
# Round 12
sha512su0 v4.2d, v5.2d
ext v21.16b, v0.16b, v1.16b, #8
sha512su1 v4.2d, v3.2d, v21.2d
add v20.2d, v4.2d, v16.2d
ext v20.16b, v20.16b, v20.16b, #8
ext v21.16b, v25.16b, v23.16b, #8
ext v22.16b, v27.16b, v25.16b, #8
add v23.2d, v23.2d, v20.2d
sha512h q23, q21, v22.2d
add v24.2d, v27.2d, v23.2d
sha512h2 q23, q27, v26.2d
# Round 13
sha512su0 v5.2d, v6.2d
ext v21.16b, v1.16b, v2.16b, #8
sha512su1 v5.2d, v4.2d, v21.2d
add v20.2d, v5.2d, v17.2d
ext v20.16b, v20.16b, v20.16b, #8
ext v21.16b, v24.16b, v25.16b, #8
ext v22.16b, v26.16b, v24.16b, #8
add v25.2d, v25.2d, v20.2d
sha512h q25, q21, v22.2d
add v27.2d, v26.2d, v25.2d
sha512h2 q25, q26, v23.2d
# Round 14
sha512su0 v6.2d, v7.2d
ext v21.16b, v2.16b, v3.16b, #8
sha512su1 v6.2d, v5.2d, v21.2d
add v20.2d, v6.2d, v18.2d
ext v20.16b, v20.16b, v20.16b, #8
ext v21.16b, v27.16b, v24.16b, #8
ext v22.16b, v23.16b, v27.16b, #8
add v24.2d, v24.2d, v20.2d
sha512h q24, q21, v22.2d
add v26.2d, v23.2d, v24.2d
sha512h2 q24, q23, v25.2d
# Round 15
sha512su0 v7.2d, v0.2d
ext v21.16b, v3.16b, v4.16b, #8
sha512su1 v7.2d, v6.2d, v21.2d
add v20.2d, v7.2d, v19.2d
ext v20.16b, v20.16b, v20.16b, #8
ext v21.16b, v26.16b, v27.16b, #8
ext v22.16b, v25.16b, v26.16b, #8
add v27.2d, v27.2d, v20.2d
sha512h q27, q21, v22.2d
add v23.2d, v25.2d, v27.2d
sha512h2 q27, q25, v24.2d
# Load next 8 64-bit words of K
ld1 {v16.2d, v17.2d, v18.2d, v19.2d}, [x3], #0x40
# Round 16
sha512su0 v0.2d, v1.2d
ext v21.16b, v4.16b, v5.16b, #8
sha512su1 v0.2d, v7.2d, v21.2d
add v20.2d, v0.2d, v16.2d
ext v20.16b, v20.16b, v20.16b, #8
ext v21.16b, v23.16b, v26.16b, #8
ext v22.16b, v24.16b, v23.16b, #8
add v26.2d, v26.2d, v20.2d
sha512h q26, q21, v22.2d
add v25.2d, v24.2d, v26.2d
sha512h2 q26, q24, v27.2d
# Round 17
sha512su0 v1.2d, v2.2d
ext v21.16b, v5.16b, v6.16b, #8
sha512su1 v1.2d, v0.2d, v21.2d
add v20.2d, v1.2d, v17.2d
ext v20.16b, v20.16b, v20.16b, #8
ext v21.16b, v25.16b, v23.16b, #8
ext v22.16b, v27.16b, v25.16b, #8
add v23.2d, v23.2d, v20.2d
sha512h q23, q21, v22.2d
add v24.2d, v27.2d, v23.2d
sha512h2 q23, q27, v26.2d
# Round 18
sha512su0 v2.2d, v3.2d
ext v21.16b, v6.16b, v7.16b, #8
sha512su1 v2.2d, v1.2d, v21.2d
add v20.2d, v2.2d, v18.2d
ext v20.16b, v20.16b, v20.16b, #8
ext v21.16b, v24.16b, v25.16b, #8
ext v22.16b, v26.16b, v24.16b, #8
add v25.2d, v25.2d, v20.2d
sha512h q25, q21, v22.2d
add v27.2d, v26.2d, v25.2d
sha512h2 q25, q26, v23.2d
# Round 19
sha512su0 v3.2d, v4.2d
ext v21.16b, v7.16b, v0.16b, #8
sha512su1 v3.2d, v2.2d, v21.2d
add v20.2d, v3.2d, v19.2d
ext v20.16b, v20.16b, v20.16b, #8
ext v21.16b, v27.16b, v24.16b, #8
ext v22.16b, v23.16b, v27.16b, #8
add v24.2d, v24.2d, v20.2d
sha512h q24, q21, v22.2d
add v26.2d, v23.2d, v24.2d
sha512h2 q24, q23, v25.2d
# Load next 8 64-bit words of K
ld1 {v16.2d, v17.2d, v18.2d, v19.2d}, [x3], #0x40
# Round 20
sha512su0 v4.2d, v5.2d
ext v21.16b, v0.16b, v1.16b, #8
sha512su1 v4.2d, v3.2d, v21.2d
add v20.2d, v4.2d, v16.2d
ext v20.16b, v20.16b, v20.16b, #8
ext v21.16b, v26.16b, v27.16b, #8
ext v22.16b, v25.16b, v26.16b, #8
add v27.2d, v27.2d, v20.2d
sha512h q27, q21, v22.2d
add v23.2d, v25.2d, v27.2d
sha512h2 q27, q25, v24.2d
# Round 21
sha512su0 v5.2d, v6.2d
ext v21.16b, v1.16b, v2.16b, #8
sha512su1 v5.2d, v4.2d, v21.2d
add v20.2d, v5.2d, v17.2d
ext v20.16b, v20.16b, v20.16b, #8
ext v21.16b, v23.16b, v26.16b, #8
ext v22.16b, v24.16b, v23.16b, #8
add v26.2d, v26.2d, v20.2d
sha512h q26, q21, v22.2d
add v25.2d, v24.2d, v26.2d
sha512h2 q26, q24, v27.2d
# Round 22
sha512su0 v6.2d, v7.2d
ext v21.16b, v2.16b, v3.16b, #8
sha512su1 v6.2d, v5.2d, v21.2d
add v20.2d, v6.2d, v18.2d
ext v20.16b, v20.16b, v20.16b, #8
ext v21.16b, v25.16b, v23.16b, #8
ext v22.16b, v27.16b, v25.16b, #8
add v23.2d, v23.2d, v20.2d
sha512h q23, q21, v22.2d
add v24.2d, v27.2d, v23.2d
sha512h2 q23, q27, v26.2d
# Round 23
sha512su0 v7.2d, v0.2d
ext v21.16b, v3.16b, v4.16b, #8
sha512su1 v7.2d, v6.2d, v21.2d
add v20.2d, v7.2d, v19.2d
ext v20.16b, v20.16b, v20.16b, #8
ext v21.16b, v24.16b, v25.16b, #8
ext v22.16b, v26.16b, v24.16b, #8
add v25.2d, v25.2d, v20.2d
sha512h q25, q21, v22.2d
add v27.2d, v26.2d, v25.2d
sha512h2 q25, q26, v23.2d
# Load next 8 64-bit words of K
ld1 {v16.2d, v17.2d, v18.2d, v19.2d}, [x3], #0x40
# Round 24
sha512su0 v0.2d, v1.2d
ext v21.16b, v4.16b, v5.16b, #8
sha512su1 v0.2d, v7.2d, v21.2d
add v20.2d, v0.2d, v16.2d
ext v20.16b, v20.16b, v20.16b, #8
ext v21.16b, v27.16b, v24.16b, #8
ext v22.16b, v23.16b, v27.16b, #8
add v24.2d, v24.2d, v20.2d
sha512h q24, q21, v22.2d
add v26.2d, v23.2d, v24.2d
sha512h2 q24, q23, v25.2d
# Round 25
sha512su0 v1.2d, v2.2d
ext v21.16b, v5.16b, v6.16b, #8
sha512su1 v1.2d, v0.2d, v21.2d
add v20.2d, v1.2d, v17.2d
ext v20.16b, v20.16b, v20.16b, #8
ext v21.16b, v26.16b, v27.16b, #8
ext v22.16b, v25.16b, v26.16b, #8
add v27.2d, v27.2d, v20.2d
sha512h q27, q21, v22.2d
add v23.2d, v25.2d, v27.2d
sha512h2 q27, q25, v24.2d
# Round 26
sha512su0 v2.2d, v3.2d
ext v21.16b, v6.16b, v7.16b, #8
sha512su1 v2.2d, v1.2d, v21.2d
add v20.2d, v2.2d, v18.2d
ext v20.16b, v20.16b, v20.16b, #8
ext v21.16b, v23.16b, v26.16b, #8
ext v22.16b, v24.16b, v23.16b, #8
add v26.2d, v26.2d, v20.2d
sha512h q26, q21, v22.2d
add v25.2d, v24.2d, v26.2d
sha512h2 q26, q24, v27.2d
# Round 27
sha512su0 v3.2d, v4.2d
ext v21.16b, v7.16b, v0.16b, #8
sha512su1 v3.2d, v2.2d, v21.2d
add v20.2d, v3.2d, v19.2d
ext v20.16b, v20.16b, v20.16b, #8
ext v21.16b, v25.16b, v23.16b, #8
ext v22.16b, v27.16b, v25.16b, #8
add v23.2d, v23.2d, v20.2d
sha512h q23, q21, v22.2d
add v24.2d, v27.2d, v23.2d
sha512h2 q23, q27, v26.2d
# Load next 8 64-bit words of K
ld1 {v16.2d, v17.2d, v18.2d, v19.2d}, [x3], #0x40
# Round 28
sha512su0 v4.2d, v5.2d
ext v21.16b, v0.16b, v1.16b, #8
sha512su1 v4.2d, v3.2d, v21.2d
add v20.2d, v4.2d, v16.2d
ext v20.16b, v20.16b, v20.16b, #8
ext v21.16b, v24.16b, v25.16b, #8
ext v22.16b, v26.16b, v24.16b, #8
add v25.2d, v25.2d, v20.2d
sha512h q25, q21, v22.2d
add v27.2d, v26.2d, v25.2d
sha512h2 q25, q26, v23.2d
# Round 29
sha512su0 v5.2d, v6.2d
ext v21.16b, v1.16b, v2.16b, #8
sha512su1 v5.2d, v4.2d, v21.2d
add v20.2d, v5.2d, v17.2d
ext v20.16b, v20.16b, v20.16b, #8
ext v21.16b, v27.16b, v24.16b, #8
ext v22.16b, v23.16b, v27.16b, #8
add v24.2d, v24.2d, v20.2d
sha512h q24, q21, v22.2d
add v26.2d, v23.2d, v24.2d
sha512h2 q24, q23, v25.2d
# Round 30
sha512su0 v6.2d, v7.2d
ext v21.16b, v2.16b, v3.16b, #8
sha512su1 v6.2d, v5.2d, v21.2d
add v20.2d, v6.2d, v18.2d
ext v20.16b, v20.16b, v20.16b, #8
ext v21.16b, v26.16b, v27.16b, #8
ext v22.16b, v25.16b, v26.16b, #8
add v27.2d, v27.2d, v20.2d
sha512h q27, q21, v22.2d
add v23.2d, v25.2d, v27.2d
sha512h2 q27, q25, v24.2d
# Round 31
sha512su0 v7.2d, v0.2d
ext v21.16b, v3.16b, v4.16b, #8
sha512su1 v7.2d, v6.2d, v21.2d
add v20.2d, v7.2d, v19.2d
ext v20.16b, v20.16b, v20.16b, #8
ext v21.16b, v23.16b, v26.16b, #8
ext v22.16b, v24.16b, v23.16b, #8
add v26.2d, v26.2d, v20.2d
sha512h q26, q21, v22.2d
add v25.2d, v24.2d, v26.2d
sha512h2 q26, q24, v27.2d
# Load next 8 64-bit words of K
ld1 {v16.2d, v17.2d, v18.2d, v19.2d}, [x3], #0x40
# Round 32
sha512su0 v0.2d, v1.2d
ext v21.16b, v4.16b, v5.16b, #8
sha512su1 v0.2d, v7.2d, v21.2d
add v20.2d, v0.2d, v16.2d
ext v20.16b, v20.16b, v20.16b, #8
ext v21.16b, v25.16b, v23.16b, #8
ext v22.16b, v27.16b, v25.16b, #8
add v23.2d, v23.2d, v20.2d
sha512h q23, q21, v22.2d
add v24.2d, v27.2d, v23.2d
sha512h2 q23, q27, v26.2d
# Round 33
sha512su0 v1.2d, v2.2d
ext v21.16b, v5.16b, v6.16b, #8
sha512su1 v1.2d, v0.2d, v21.2d
add v20.2d, v1.2d, v17.2d
ext v20.16b, v20.16b, v20.16b, #8
ext v21.16b, v24.16b, v25.16b, #8
ext v22.16b, v26.16b, v24.16b, #8
add v25.2d, v25.2d, v20.2d
sha512h q25, q21, v22.2d
add v27.2d, v26.2d, v25.2d
sha512h2 q25, q26, v23.2d
# Round 34
sha512su0 v2.2d, v3.2d
ext v21.16b, v6.16b, v7.16b, #8
sha512su1 v2.2d, v1.2d, v21.2d
add v20.2d, v2.2d, v18.2d
ext v20.16b, v20.16b, v20.16b, #8
ext v21.16b, v27.16b, v24.16b, #8
ext v22.16b, v23.16b, v27.16b, #8
add v24.2d, v24.2d, v20.2d
sha512h q24, q21, v22.2d
add v26.2d, v23.2d, v24.2d
sha512h2 q24, q23, v25.2d
# Round 35
sha512su0 v3.2d, v4.2d
ext v21.16b, v7.16b, v0.16b, #8
sha512su1 v3.2d, v2.2d, v21.2d
add v20.2d, v3.2d, v19.2d
ext v20.16b, v20.16b, v20.16b, #8
ext v21.16b, v26.16b, v27.16b, #8
ext v22.16b, v25.16b, v26.16b, #8
add v27.2d, v27.2d, v20.2d
sha512h q27, q21, v22.2d
add v23.2d, v25.2d, v27.2d
sha512h2 q27, q25, v24.2d
# Load next 8 64-bit words of K
ld1 {v16.2d, v17.2d, v18.2d, v19.2d}, [x3], #0x40
# Round 36
sha512su0 v4.2d, v5.2d
ext v21.16b, v0.16b, v1.16b, #8
sha512su1 v4.2d, v3.2d, v21.2d
add v20.2d, v4.2d, v16.2d
ext v20.16b, v20.16b, v20.16b, #8
ext v21.16b, v23.16b, v26.16b, #8
ext v22.16b, v24.16b, v23.16b, #8
add v26.2d, v26.2d, v20.2d
sha512h q26, q21, v22.2d
add v25.2d, v24.2d, v26.2d
sha512h2 q26, q24, v27.2d
# Round 37
sha512su0 v5.2d, v6.2d
ext v21.16b, v1.16b, v2.16b, #8
sha512su1 v5.2d, v4.2d, v21.2d
add v20.2d, v5.2d, v17.2d
ext v20.16b, v20.16b, v20.16b, #8
ext v21.16b, v25.16b, v23.16b, #8
ext v22.16b, v27.16b, v25.16b, #8
add v23.2d, v23.2d, v20.2d
sha512h q23, q21, v22.2d
add v24.2d, v27.2d, v23.2d
sha512h2 q23, q27, v26.2d
# Round 38
sha512su0 v6.2d, v7.2d
ext v21.16b, v2.16b, v3.16b, #8
sha512su1 v6.2d, v5.2d, v21.2d
add v20.2d, v6.2d, v18.2d
ext v20.16b, v20.16b, v20.16b, #8
ext v21.16b, v24.16b, v25.16b, #8
ext v22.16b, v26.16b, v24.16b, #8
add v25.2d, v25.2d, v20.2d
sha512h q25, q21, v22.2d
add v27.2d, v26.2d, v25.2d
sha512h2 q25, q26, v23.2d
# Round 39
sha512su0 v7.2d, v0.2d
ext v21.16b, v3.16b, v4.16b, #8
sha512su1 v7.2d, v6.2d, v21.2d
add v20.2d, v7.2d, v19.2d
ext v20.16b, v20.16b, v20.16b, #8
ext v21.16b, v27.16b, v24.16b, #8
ext v22.16b, v23.16b, v27.16b, #8
add v24.2d, v24.2d, v20.2d
sha512h q24, q21, v22.2d
add v26.2d, v23.2d, v24.2d
sha512h2 q24, q23, v25.2d
add v27.2d, v27.2d, v31.2d
add v26.2d, v26.2d, v30.2d
add v25.2d, v25.2d, v29.2d
add v24.2d, v24.2d, v28.2d
subs w2, w2, #0x80
bne L_sha512_len_crypto_begin
# Store digest back
st1 {v24.2d, v25.2d, v26.2d, v27.2d}, [x0]
ldp d8, d9, [x29, #16]
ldp d10, d11, [x29, #32]
ldp d12, d13, [x29, #48]
ldp d14, d15, [x29, #64]
ldp x29, x30, [sp], #0x50
ret
#ifndef __APPLE__
.size Transform_Sha512_Len_crypto,.-Transform_Sha512_Len_crypto
#endif /* __APPLE__ */
#endif /* WOLFSSL_ARMASM_CRYPTO_SHA512 */
#endif /* WOLFSSL_SHA512 */
#endif /* __aarch64__ */
#endif /* WOLFSSL_ARMASM */
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
#endif /* !WOLFSSL_ARMASM_INLINE */
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,394
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/UART/UART_WakeUpFromStop/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400;
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200;
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/UART/UART_WakeUpFromStop/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aenu1/aps3e
| 68,016
|
app/src/main/cpp/rpcs3/3rdparty/wolfssl/wolfssl/wolfcrypt/src/port/arm/thumb2-aes-asm.S
|
/* thumb2-aes-asm
*
* Copyright (C) 2006-2023 wolfSSL Inc.
*
* This file is part of wolfSSL.
*
* wolfSSL is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* wolfSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
*/
/* Generated using (from wolfssl):
* cd ../scripts
* ruby ./aes/aes.rb thumb2 ../wolfssl/wolfcrypt/src/port/arm/thumb2-aes-asm.S
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif /* HAVE_CONFIG_H */
#include <wolfssl/wolfcrypt/settings.h>
#ifdef WOLFSSL_ARMASM
#if !defined(__aarch64__) && defined(__thumb__)
#ifndef WOLFSSL_ARMASM_INLINE
.thumb
.syntax unified
#ifndef NO_AES
#ifdef HAVE_AES_DECRYPT
.text
.type L_AES_Thumb2_td_data, %object
.size L_AES_Thumb2_td_data, 1024
.align 4
L_AES_Thumb2_td_data:
.word 0x5051f4a7
.word 0x537e4165
.word 0xc31a17a4
.word 0x963a275e
.word 0xcb3bab6b
.word 0xf11f9d45
.word 0xabacfa58
.word 0x934be303
.word 0x552030fa
.word 0xf6ad766d
.word 0x9188cc76
.word 0x25f5024c
.word 0xfc4fe5d7
.word 0xd7c52acb
.word 0x80263544
.word 0x8fb562a3
.word 0x49deb15a
.word 0x6725ba1b
.word 0x9845ea0e
.word 0xe15dfec0
.word 0x2c32f75
.word 0x12814cf0
.word 0xa38d4697
.word 0xc66bd3f9
.word 0xe7038f5f
.word 0x9515929c
.word 0xebbf6d7a
.word 0xda955259
.word 0x2dd4be83
.word 0xd3587421
.word 0x2949e069
.word 0x448ec9c8
.word 0x6a75c289
.word 0x78f48e79
.word 0x6b99583e
.word 0xdd27b971
.word 0xb6bee14f
.word 0x17f088ad
.word 0x66c920ac
.word 0xb47dce3a
.word 0x1863df4a
.word 0x82e51a31
.word 0x60975133
.word 0x4562537f
.word 0xe0b16477
.word 0x84bb6bae
.word 0x1cfe81a0
.word 0x94f9082b
.word 0x58704868
.word 0x198f45fd
.word 0x8794de6c
.word 0xb7527bf8
.word 0x23ab73d3
.word 0xe2724b02
.word 0x57e31f8f
.word 0x2a6655ab
.word 0x7b2eb28
.word 0x32fb5c2
.word 0x9a86c57b
.word 0xa5d33708
.word 0xf2302887
.word 0xb223bfa5
.word 0xba02036a
.word 0x5ced1682
.word 0x2b8acf1c
.word 0x92a779b4
.word 0xf0f307f2
.word 0xa14e69e2
.word 0xcd65daf4
.word 0xd50605be
.word 0x1fd13462
.word 0x8ac4a6fe
.word 0x9d342e53
.word 0xa0a2f355
.word 0x32058ae1
.word 0x75a4f6eb
.word 0x390b83ec
.word 0xaa4060ef
.word 0x65e719f
.word 0x51bd6e10
.word 0xf93e218a
.word 0x3d96dd06
.word 0xaedd3e05
.word 0x464de6bd
.word 0xb591548d
.word 0x571c45d
.word 0x6f0406d4
.word 0xff605015
.word 0x241998fb
.word 0x97d6bde9
.word 0xcc894043
.word 0x7767d99e
.word 0xbdb0e842
.word 0x8807898b
.word 0x38e7195b
.word 0xdb79c8ee
.word 0x47a17c0a
.word 0xe97c420f
.word 0xc9f8841e
.word 0x0
.word 0x83098086
.word 0x48322bed
.word 0xac1e1170
.word 0x4e6c5a72
.word 0xfbfd0eff
.word 0x560f8538
.word 0x1e3daed5
.word 0x27362d39
.word 0x640a0fd9
.word 0x21685ca6
.word 0xd19b5b54
.word 0x3a24362e
.word 0xb10c0a67
.word 0xf9357e7
.word 0xd2b4ee96
.word 0x9e1b9b91
.word 0x4f80c0c5
.word 0xa261dc20
.word 0x695a774b
.word 0x161c121a
.word 0xae293ba
.word 0xe5c0a02a
.word 0x433c22e0
.word 0x1d121b17
.word 0xb0e090d
.word 0xadf28bc7
.word 0xb92db6a8
.word 0xc8141ea9
.word 0x8557f119
.word 0x4caf7507
.word 0xbbee99dd
.word 0xfda37f60
.word 0x9ff70126
.word 0xbc5c72f5
.word 0xc544663b
.word 0x345bfb7e
.word 0x768b4329
.word 0xdccb23c6
.word 0x68b6edfc
.word 0x63b8e4f1
.word 0xcad731dc
.word 0x10426385
.word 0x40139722
.word 0x2084c611
.word 0x7d854a24
.word 0xf8d2bb3d
.word 0x11aef932
.word 0x6dc729a1
.word 0x4b1d9e2f
.word 0xf3dcb230
.word 0xec0d8652
.word 0xd077c1e3
.word 0x6c2bb316
.word 0x99a970b9
.word 0xfa119448
.word 0x2247e964
.word 0xc4a8fc8c
.word 0x1aa0f03f
.word 0xd8567d2c
.word 0xef223390
.word 0xc787494e
.word 0xc1d938d1
.word 0xfe8ccaa2
.word 0x3698d40b
.word 0xcfa6f581
.word 0x28a57ade
.word 0x26dab78e
.word 0xa43fadbf
.word 0xe42c3a9d
.word 0xd507892
.word 0x9b6a5fcc
.word 0x62547e46
.word 0xc2f68d13
.word 0xe890d8b8
.word 0x5e2e39f7
.word 0xf582c3af
.word 0xbe9f5d80
.word 0x7c69d093
.word 0xa96fd52d
.word 0xb3cf2512
.word 0x3bc8ac99
.word 0xa710187d
.word 0x6ee89c63
.word 0x7bdb3bbb
.word 0x9cd2678
.word 0xf46e5918
.word 0x1ec9ab7
.word 0xa8834f9a
.word 0x65e6956e
.word 0x7eaaffe6
.word 0x821bccf
.word 0xe6ef15e8
.word 0xd9bae79b
.word 0xce4a6f36
.word 0xd4ea9f09
.word 0xd629b07c
.word 0xaf31a4b2
.word 0x312a3f23
.word 0x30c6a594
.word 0xc035a266
.word 0x37744ebc
.word 0xa6fc82ca
.word 0xb0e090d0
.word 0x1533a7d8
.word 0x4af10498
.word 0xf741ecda
.word 0xe7fcd50
.word 0x2f1791f6
.word 0x8d764dd6
.word 0x4d43efb0
.word 0x54ccaa4d
.word 0xdfe49604
.word 0xe39ed1b5
.word 0x1b4c6a88
.word 0xb8c12c1f
.word 0x7f466551
.word 0x49d5eea
.word 0x5d018c35
.word 0x73fa8774
.word 0x2efb0b41
.word 0x5ab3671d
.word 0x5292dbd2
.word 0x33e91056
.word 0x136dd647
.word 0x8c9ad761
.word 0x7a37a10c
.word 0x8e59f814
.word 0x89eb133c
.word 0xeecea927
.word 0x35b761c9
.word 0xede11ce5
.word 0x3c7a47b1
.word 0x599cd2df
.word 0x3f55f273
.word 0x791814ce
.word 0xbf73c737
.word 0xea53f7cd
.word 0x5b5ffdaa
.word 0x14df3d6f
.word 0x867844db
.word 0x81caaff3
.word 0x3eb968c4
.word 0x2c382434
.word 0x5fc2a340
.word 0x72161dc3
.word 0xcbce225
.word 0x8b283c49
.word 0x41ff0d95
.word 0x7139a801
.word 0xde080cb3
.word 0x9cd8b4e4
.word 0x906456c1
.word 0x617bcb84
.word 0x70d532b6
.word 0x74486c5c
.word 0x42d0b857
#endif /* HAVE_AES_DECRYPT */
#if defined(HAVE_AES_DECRYPT) || defined(HAVE_AES_CBC) || defined(HAVE_AESCCM) || defined(HAVE_AESGCM) || defined(WOLFSSL_AES_DIRECT) || defined(WOLFSSL_AES_COUNTER)
.text
.type L_AES_Thumb2_te_data, %object
.size L_AES_Thumb2_te_data, 1024
.align 4
L_AES_Thumb2_te_data:
.word 0xa5c66363
.word 0x84f87c7c
.word 0x99ee7777
.word 0x8df67b7b
.word 0xdfff2f2
.word 0xbdd66b6b
.word 0xb1de6f6f
.word 0x5491c5c5
.word 0x50603030
.word 0x3020101
.word 0xa9ce6767
.word 0x7d562b2b
.word 0x19e7fefe
.word 0x62b5d7d7
.word 0xe64dabab
.word 0x9aec7676
.word 0x458fcaca
.word 0x9d1f8282
.word 0x4089c9c9
.word 0x87fa7d7d
.word 0x15effafa
.word 0xebb25959
.word 0xc98e4747
.word 0xbfbf0f0
.word 0xec41adad
.word 0x67b3d4d4
.word 0xfd5fa2a2
.word 0xea45afaf
.word 0xbf239c9c
.word 0xf753a4a4
.word 0x96e47272
.word 0x5b9bc0c0
.word 0xc275b7b7
.word 0x1ce1fdfd
.word 0xae3d9393
.word 0x6a4c2626
.word 0x5a6c3636
.word 0x417e3f3f
.word 0x2f5f7f7
.word 0x4f83cccc
.word 0x5c683434
.word 0xf451a5a5
.word 0x34d1e5e5
.word 0x8f9f1f1
.word 0x93e27171
.word 0x73abd8d8
.word 0x53623131
.word 0x3f2a1515
.word 0xc080404
.word 0x5295c7c7
.word 0x65462323
.word 0x5e9dc3c3
.word 0x28301818
.word 0xa1379696
.word 0xf0a0505
.word 0xb52f9a9a
.word 0x90e0707
.word 0x36241212
.word 0x9b1b8080
.word 0x3ddfe2e2
.word 0x26cdebeb
.word 0x694e2727
.word 0xcd7fb2b2
.word 0x9fea7575
.word 0x1b120909
.word 0x9e1d8383
.word 0x74582c2c
.word 0x2e341a1a
.word 0x2d361b1b
.word 0xb2dc6e6e
.word 0xeeb45a5a
.word 0xfb5ba0a0
.word 0xf6a45252
.word 0x4d763b3b
.word 0x61b7d6d6
.word 0xce7db3b3
.word 0x7b522929
.word 0x3edde3e3
.word 0x715e2f2f
.word 0x97138484
.word 0xf5a65353
.word 0x68b9d1d1
.word 0x0
.word 0x2cc1eded
.word 0x60402020
.word 0x1fe3fcfc
.word 0xc879b1b1
.word 0xedb65b5b
.word 0xbed46a6a
.word 0x468dcbcb
.word 0xd967bebe
.word 0x4b723939
.word 0xde944a4a
.word 0xd4984c4c
.word 0xe8b05858
.word 0x4a85cfcf
.word 0x6bbbd0d0
.word 0x2ac5efef
.word 0xe54faaaa
.word 0x16edfbfb
.word 0xc5864343
.word 0xd79a4d4d
.word 0x55663333
.word 0x94118585
.word 0xcf8a4545
.word 0x10e9f9f9
.word 0x6040202
.word 0x81fe7f7f
.word 0xf0a05050
.word 0x44783c3c
.word 0xba259f9f
.word 0xe34ba8a8
.word 0xf3a25151
.word 0xfe5da3a3
.word 0xc0804040
.word 0x8a058f8f
.word 0xad3f9292
.word 0xbc219d9d
.word 0x48703838
.word 0x4f1f5f5
.word 0xdf63bcbc
.word 0xc177b6b6
.word 0x75afdada
.word 0x63422121
.word 0x30201010
.word 0x1ae5ffff
.word 0xefdf3f3
.word 0x6dbfd2d2
.word 0x4c81cdcd
.word 0x14180c0c
.word 0x35261313
.word 0x2fc3ecec
.word 0xe1be5f5f
.word 0xa2359797
.word 0xcc884444
.word 0x392e1717
.word 0x5793c4c4
.word 0xf255a7a7
.word 0x82fc7e7e
.word 0x477a3d3d
.word 0xacc86464
.word 0xe7ba5d5d
.word 0x2b321919
.word 0x95e67373
.word 0xa0c06060
.word 0x98198181
.word 0xd19e4f4f
.word 0x7fa3dcdc
.word 0x66442222
.word 0x7e542a2a
.word 0xab3b9090
.word 0x830b8888
.word 0xca8c4646
.word 0x29c7eeee
.word 0xd36bb8b8
.word 0x3c281414
.word 0x79a7dede
.word 0xe2bc5e5e
.word 0x1d160b0b
.word 0x76addbdb
.word 0x3bdbe0e0
.word 0x56643232
.word 0x4e743a3a
.word 0x1e140a0a
.word 0xdb924949
.word 0xa0c0606
.word 0x6c482424
.word 0xe4b85c5c
.word 0x5d9fc2c2
.word 0x6ebdd3d3
.word 0xef43acac
.word 0xa6c46262
.word 0xa8399191
.word 0xa4319595
.word 0x37d3e4e4
.word 0x8bf27979
.word 0x32d5e7e7
.word 0x438bc8c8
.word 0x596e3737
.word 0xb7da6d6d
.word 0x8c018d8d
.word 0x64b1d5d5
.word 0xd29c4e4e
.word 0xe049a9a9
.word 0xb4d86c6c
.word 0xfaac5656
.word 0x7f3f4f4
.word 0x25cfeaea
.word 0xafca6565
.word 0x8ef47a7a
.word 0xe947aeae
.word 0x18100808
.word 0xd56fbaba
.word 0x88f07878
.word 0x6f4a2525
.word 0x725c2e2e
.word 0x24381c1c
.word 0xf157a6a6
.word 0xc773b4b4
.word 0x5197c6c6
.word 0x23cbe8e8
.word 0x7ca1dddd
.word 0x9ce87474
.word 0x213e1f1f
.word 0xdd964b4b
.word 0xdc61bdbd
.word 0x860d8b8b
.word 0x850f8a8a
.word 0x90e07070
.word 0x427c3e3e
.word 0xc471b5b5
.word 0xaacc6666
.word 0xd8904848
.word 0x5060303
.word 0x1f7f6f6
.word 0x121c0e0e
.word 0xa3c26161
.word 0x5f6a3535
.word 0xf9ae5757
.word 0xd069b9b9
.word 0x91178686
.word 0x5899c1c1
.word 0x273a1d1d
.word 0xb9279e9e
.word 0x38d9e1e1
.word 0x13ebf8f8
.word 0xb32b9898
.word 0x33221111
.word 0xbbd26969
.word 0x70a9d9d9
.word 0x89078e8e
.word 0xa7339494
.word 0xb62d9b9b
.word 0x223c1e1e
.word 0x92158787
.word 0x20c9e9e9
.word 0x4987cece
.word 0xffaa5555
.word 0x78502828
.word 0x7aa5dfdf
.word 0x8f038c8c
.word 0xf859a1a1
.word 0x80098989
.word 0x171a0d0d
.word 0xda65bfbf
.word 0x31d7e6e6
.word 0xc6844242
.word 0xb8d06868
.word 0xc3824141
.word 0xb0299999
.word 0x775a2d2d
.word 0x111e0f0f
.word 0xcb7bb0b0
.word 0xfca85454
.word 0xd66dbbbb
.word 0x3a2c1616
#endif /* HAVE_AES_DECRYPT || HAVE_AES_CBC || HAVE_AESCCM || HAVE_AESGCM || WOLFSSL_AES_DIRECT || WOLFSSL_AES_COUNTER */
#ifdef HAVE_AES_DECRYPT
.text
.type L_AES_Thumb2_td, %object
.size L_AES_Thumb2_td, 12
.align 4
L_AES_Thumb2_td:
.word L_AES_Thumb2_td_data
#endif /* HAVE_AES_DECRYPT */
#if defined(HAVE_AES_DECRYPT) || defined(HAVE_AES_CBC) || defined(HAVE_AESCCM) || defined(HAVE_AESGCM) || defined(WOLFSSL_AES_DIRECT) || defined(WOLFSSL_AES_COUNTER)
.text
.type L_AES_Thumb2_te, %object
.size L_AES_Thumb2_te, 12
.align 4
L_AES_Thumb2_te:
.word L_AES_Thumb2_te_data
#endif /* HAVE_AES_DECRYPT || HAVE_AES_CBC || HAVE_AESCCM || HAVE_AESGCM || WOLFSSL_AES_DIRECT || WOLFSSL_AES_COUNTER */
#ifdef HAVE_AES_DECRYPT
.text
.align 4
.globl AES_invert_key
.type AES_invert_key, %function
AES_invert_key:
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr}
LDR r12, L_AES_Thumb2_te
LDR lr, L_AES_Thumb2_td
ADD r10, r0, r1, LSL #4
MOV r11, r1
L_AES_invert_key_loop:
LDM r0, {r2, r3, r4, r5}
LDM r10, {r6, r7, r8, r9}
STM r10, {r2, r3, r4, r5}
STM r0!, {r6, r7, r8, r9}
SUBS r11, r11, #0x2
SUB r10, r10, #0x10
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
BNE L_AES_invert_key_loop
#else
BNE.N L_AES_invert_key_loop
#endif
SUB r0, r0, r1, LSL #3
ADD r0, r0, #0x10
SUB r11, r1, #0x1
L_AES_invert_key_mix_loop:
LDM r0, {r2, r3, r4, r5}
UBFX r6, r2, #0, #8
UBFX r7, r2, #8, #8
UBFX r8, r2, #16, #8
LSR r9, r2, #24
LDRB r6, [r12, r6, LSL #2]
LDRB r7, [r12, r7, LSL #2]
LDRB r8, [r12, r8, LSL #2]
LDRB r9, [r12, r9, LSL #2]
LDR r6, [lr, r6, LSL #2]
LDR r7, [lr, r7, LSL #2]
LDR r8, [lr, r8, LSL #2]
LDR r9, [lr, r9, LSL #2]
EOR r8, r8, r6, ROR #16
EOR r8, r8, r7, ROR #8
EOR r8, r8, r9, ROR #24
STR r8, [r0], #4
UBFX r6, r3, #0, #8
UBFX r7, r3, #8, #8
UBFX r8, r3, #16, #8
LSR r9, r3, #24
LDRB r6, [r12, r6, LSL #2]
LDRB r7, [r12, r7, LSL #2]
LDRB r8, [r12, r8, LSL #2]
LDRB r9, [r12, r9, LSL #2]
LDR r6, [lr, r6, LSL #2]
LDR r7, [lr, r7, LSL #2]
LDR r8, [lr, r8, LSL #2]
LDR r9, [lr, r9, LSL #2]
EOR r8, r8, r6, ROR #16
EOR r8, r8, r7, ROR #8
EOR r8, r8, r9, ROR #24
STR r8, [r0], #4
UBFX r6, r4, #0, #8
UBFX r7, r4, #8, #8
UBFX r8, r4, #16, #8
LSR r9, r4, #24
LDRB r6, [r12, r6, LSL #2]
LDRB r7, [r12, r7, LSL #2]
LDRB r8, [r12, r8, LSL #2]
LDRB r9, [r12, r9, LSL #2]
LDR r6, [lr, r6, LSL #2]
LDR r7, [lr, r7, LSL #2]
LDR r8, [lr, r8, LSL #2]
LDR r9, [lr, r9, LSL #2]
EOR r8, r8, r6, ROR #16
EOR r8, r8, r7, ROR #8
EOR r8, r8, r9, ROR #24
STR r8, [r0], #4
UBFX r6, r5, #0, #8
UBFX r7, r5, #8, #8
UBFX r8, r5, #16, #8
LSR r9, r5, #24
LDRB r6, [r12, r6, LSL #2]
LDRB r7, [r12, r7, LSL #2]
LDRB r8, [r12, r8, LSL #2]
LDRB r9, [r12, r9, LSL #2]
LDR r6, [lr, r6, LSL #2]
LDR r7, [lr, r7, LSL #2]
LDR r8, [lr, r8, LSL #2]
LDR r9, [lr, r9, LSL #2]
EOR r8, r8, r6, ROR #16
EOR r8, r8, r7, ROR #8
EOR r8, r8, r9, ROR #24
STR r8, [r0], #4
SUBS r11, r11, #0x1
#ifdef __GNUC__
BNE L_AES_invert_key_mix_loop
#else
BNE.W L_AES_invert_key_mix_loop
#endif
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
/* Cycle Count = 165 */
.size AES_invert_key,.-AES_invert_key
#endif /* HAVE_AES_DECRYPT */
.text
.type L_AES_Thumb2_rcon, %object
.size L_AES_Thumb2_rcon, 40
.align 4
L_AES_Thumb2_rcon:
.word 0x1000000
.word 0x2000000
.word 0x4000000
.word 0x8000000
.word 0x10000000
.word 0x20000000
.word 0x40000000
.word 0x80000000
.word 0x1b000000
.word 0x36000000
.text
.align 4
.globl AES_set_encrypt_key
.type AES_set_encrypt_key, %function
AES_set_encrypt_key:
PUSH {r4, r5, r6, r7, r8, r9, r10, lr}
LDR r10, L_AES_Thumb2_te
ADR lr, L_AES_Thumb2_rcon
CMP r1, #0x80
#ifdef __GNUC__
BEQ L_AES_set_encrypt_key_start_128
#else
BEQ.W L_AES_set_encrypt_key_start_128
#endif
CMP r1, #0xc0
#ifdef __GNUC__
BEQ L_AES_set_encrypt_key_start_192
#else
BEQ.W L_AES_set_encrypt_key_start_192
#endif
LDR r4, [r0]
LDR r5, [r0, #4]
LDR r6, [r0, #8]
LDR r7, [r0, #12]
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
STM r2!, {r4, r5, r6, r7}
LDR r4, [r0, #16]
LDR r5, [r0, #20]
LDR r6, [r0, #24]
LDR r7, [r0, #28]
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
STM r2, {r4, r5, r6, r7}
SUB r2, r2, #0x10
MOV r12, #0x6
L_AES_set_encrypt_key_loop_256:
UBFX r4, r7, #0, #8
UBFX r5, r7, #8, #8
UBFX r6, r7, #16, #8
LSR r7, r7, #24
LDRB r4, [r10, r4, LSL #2]
LDRB r5, [r10, r5, LSL #2]
LDRB r6, [r10, r6, LSL #2]
LDRB r7, [r10, r7, LSL #2]
EOR r3, r7, r4, LSL #8
EOR r3, r3, r5, LSL #16
EOR r3, r3, r6, LSL #24
LDM r2!, {r4, r5, r6, r7}
EOR r4, r4, r3
LDM lr!, {r3}
EOR r4, r4, r3
EOR r5, r5, r4
EOR r6, r6, r5
EOR r7, r7, r6
ADD r2, r2, #0x10
STM r2, {r4, r5, r6, r7}
SUB r2, r2, #0x10
MOV r3, r7
UBFX r4, r3, #8, #8
UBFX r5, r3, #16, #8
LSR r6, r3, #24
UBFX r3, r3, #0, #8
LDRB r4, [r10, r4, LSL #2]
LDRB r6, [r10, r6, LSL #2]
LDRB r5, [r10, r5, LSL #2]
LDRB r3, [r10, r3, LSL #2]
EOR r3, r3, r4, LSL #8
EOR r3, r3, r5, LSL #16
EOR r3, r3, r6, LSL #24
LDM r2!, {r4, r5, r6, r7}
EOR r4, r4, r3
EOR r5, r5, r4
EOR r6, r6, r5
EOR r7, r7, r6
ADD r2, r2, #0x10
STM r2, {r4, r5, r6, r7}
SUB r2, r2, #0x10
SUBS r12, r12, #0x1
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
BNE L_AES_set_encrypt_key_loop_256
#else
BNE.N L_AES_set_encrypt_key_loop_256
#endif
UBFX r4, r7, #0, #8
UBFX r5, r7, #8, #8
UBFX r6, r7, #16, #8
LSR r7, r7, #24
LDRB r4, [r10, r4, LSL #2]
LDRB r5, [r10, r5, LSL #2]
LDRB r6, [r10, r6, LSL #2]
LDRB r7, [r10, r7, LSL #2]
EOR r3, r7, r4, LSL #8
EOR r3, r3, r5, LSL #16
EOR r3, r3, r6, LSL #24
LDM r2!, {r4, r5, r6, r7}
EOR r4, r4, r3
LDM lr!, {r3}
EOR r4, r4, r3
EOR r5, r5, r4
EOR r6, r6, r5
EOR r7, r7, r6
ADD r2, r2, #0x10
STM r2, {r4, r5, r6, r7}
SUB r2, r2, #0x10
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
B L_AES_set_encrypt_key_end
#else
B.N L_AES_set_encrypt_key_end
#endif
L_AES_set_encrypt_key_start_192:
LDR r4, [r0]
LDR r5, [r0, #4]
LDR r6, [r0, #8]
LDR r7, [r0, #12]
LDR r8, [r0, #16]
LDR r9, [r0, #20]
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
REV r8, r8
REV r9, r9
STM r2, {r4, r5, r6, r7}
STRD r8, r9, [r2, #16]
MOV r7, r9
MOV r12, #0x7
L_AES_set_encrypt_key_loop_192:
UBFX r4, r9, #0, #8
UBFX r5, r9, #8, #8
UBFX r6, r9, #16, #8
LSR r9, r9, #24
LDRB r4, [r10, r4, LSL #2]
LDRB r5, [r10, r5, LSL #2]
LDRB r6, [r10, r6, LSL #2]
LDRB r9, [r10, r9, LSL #2]
EOR r3, r9, r4, LSL #8
EOR r3, r3, r5, LSL #16
EOR r3, r3, r6, LSL #24
LDM r2!, {r4, r5, r6, r7, r8, r9}
EOR r4, r4, r3
LDM lr!, {r3}
EOR r4, r4, r3
EOR r5, r5, r4
EOR r6, r6, r5
EOR r7, r7, r6
EOR r8, r8, r7
EOR r9, r9, r8
STM r2, {r4, r5, r6, r7, r8, r9}
SUBS r12, r12, #0x1
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
BNE L_AES_set_encrypt_key_loop_192
#else
BNE.N L_AES_set_encrypt_key_loop_192
#endif
UBFX r4, r9, #0, #8
UBFX r5, r9, #8, #8
UBFX r6, r9, #16, #8
LSR r9, r9, #24
LDRB r4, [r10, r4, LSL #2]
LDRB r5, [r10, r5, LSL #2]
LDRB r6, [r10, r6, LSL #2]
LDRB r9, [r10, r9, LSL #2]
EOR r3, r9, r4, LSL #8
EOR r3, r3, r5, LSL #16
EOR r3, r3, r6, LSL #24
LDM r2!, {r4, r5, r6, r7, r8, r9}
EOR r4, r4, r3
LDM lr!, {r3}
EOR r4, r4, r3
EOR r5, r5, r4
EOR r6, r6, r5
EOR r7, r7, r6
STM r2, {r4, r5, r6, r7}
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
B L_AES_set_encrypt_key_end
#else
B.N L_AES_set_encrypt_key_end
#endif
L_AES_set_encrypt_key_start_128:
LDR r4, [r0]
LDR r5, [r0, #4]
LDR r6, [r0, #8]
LDR r7, [r0, #12]
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
STM r2, {r4, r5, r6, r7}
MOV r12, #0xa
L_AES_set_encrypt_key_loop_128:
UBFX r4, r7, #0, #8
UBFX r5, r7, #8, #8
UBFX r6, r7, #16, #8
LSR r7, r7, #24
LDRB r4, [r10, r4, LSL #2]
LDRB r5, [r10, r5, LSL #2]
LDRB r6, [r10, r6, LSL #2]
LDRB r7, [r10, r7, LSL #2]
EOR r3, r7, r4, LSL #8
EOR r3, r3, r5, LSL #16
EOR r3, r3, r6, LSL #24
LDM r2!, {r4, r5, r6, r7}
EOR r4, r4, r3
LDM lr!, {r3}
EOR r4, r4, r3
EOR r5, r5, r4
EOR r6, r6, r5
EOR r7, r7, r6
STM r2, {r4, r5, r6, r7}
SUBS r12, r12, #0x1
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
BNE L_AES_set_encrypt_key_loop_128
#else
BNE.N L_AES_set_encrypt_key_loop_128
#endif
L_AES_set_encrypt_key_end:
POP {r4, r5, r6, r7, r8, r9, r10, pc}
/* Cycle Count = 340 */
.size AES_set_encrypt_key,.-AES_set_encrypt_key
.text
.align 4
.globl AES_encrypt_block
.type AES_encrypt_block, %function
AES_encrypt_block:
PUSH {lr}
L_AES_encrypt_block_nr:
UBFX r8, r5, #16, #8
LSR r11, r4, #24
UBFX lr, r6, #8, #8
UBFX r2, r7, #0, #8
LDR r8, [r0, r8, LSL #2]
LDR r11, [r0, r11, LSL #2]
LDR lr, [r0, lr, LSL #2]
LDR r2, [r0, r2, LSL #2]
UBFX r9, r6, #16, #8
EOR r8, r8, r11, ROR #24
LSR r11, r5, #24
EOR r8, r8, lr, ROR #8
UBFX lr, r7, #8, #8
EOR r8, r8, r2, ROR #16
UBFX r2, r4, #0, #8
LDR r9, [r0, r9, LSL #2]
LDR r11, [r0, r11, LSL #2]
LDR lr, [r0, lr, LSL #2]
LDR r2, [r0, r2, LSL #2]
UBFX r10, r7, #16, #8
EOR r9, r9, r11, ROR #24
LSR r11, r6, #24
EOR r9, r9, lr, ROR #8
UBFX lr, r4, #8, #8
EOR r9, r9, r2, ROR #16
UBFX r2, r5, #0, #8
LDR r10, [r0, r10, LSL #2]
LDR r11, [r0, r11, LSL #2]
LDR lr, [r0, lr, LSL #2]
LDR r2, [r0, r2, LSL #2]
UBFX r6, r6, #0, #8
EOR r10, r10, r11, ROR #24
UBFX r11, r4, #16, #8
EOR r10, r10, lr, ROR #8
LSR lr, r7, #24
EOR r10, r10, r2, ROR #16
UBFX r2, r5, #8, #8
LDR r6, [r0, r6, LSL #2]
LDR lr, [r0, lr, LSL #2]
LDR r11, [r0, r11, LSL #2]
LDR r2, [r0, r2, LSL #2]
EOR lr, lr, r6, ROR #24
LDM r3!, {r4, r5, r6, r7}
EOR r11, r11, lr, ROR #24
EOR r11, r11, r2, ROR #8
/* XOR in Key Schedule */
EOR r8, r8, r4
EOR r9, r9, r5
EOR r10, r10, r6
EOR r11, r11, r7
UBFX r4, r9, #16, #8
LSR r7, r8, #24
UBFX lr, r10, #8, #8
UBFX r2, r11, #0, #8
LDR r4, [r0, r4, LSL #2]
LDR r7, [r0, r7, LSL #2]
LDR lr, [r0, lr, LSL #2]
LDR r2, [r0, r2, LSL #2]
UBFX r5, r10, #16, #8
EOR r4, r4, r7, ROR #24
LSR r7, r9, #24
EOR r4, r4, lr, ROR #8
UBFX lr, r11, #8, #8
EOR r4, r4, r2, ROR #16
UBFX r2, r8, #0, #8
LDR r5, [r0, r5, LSL #2]
LDR r7, [r0, r7, LSL #2]
LDR lr, [r0, lr, LSL #2]
LDR r2, [r0, r2, LSL #2]
UBFX r6, r11, #16, #8
EOR r5, r5, r7, ROR #24
LSR r7, r10, #24
EOR r5, r5, lr, ROR #8
UBFX lr, r8, #8, #8
EOR r5, r5, r2, ROR #16
UBFX r2, r9, #0, #8
LDR r6, [r0, r6, LSL #2]
LDR r7, [r0, r7, LSL #2]
LDR lr, [r0, lr, LSL #2]
LDR r2, [r0, r2, LSL #2]
UBFX r10, r10, #0, #8
EOR r6, r6, r7, ROR #24
UBFX r7, r8, #16, #8
EOR r6, r6, lr, ROR #8
LSR lr, r11, #24
EOR r6, r6, r2, ROR #16
UBFX r2, r9, #8, #8
LDR r10, [r0, r10, LSL #2]
LDR lr, [r0, lr, LSL #2]
LDR r7, [r0, r7, LSL #2]
LDR r2, [r0, r2, LSL #2]
EOR lr, lr, r10, ROR #24
LDM r3!, {r8, r9, r10, r11}
EOR r7, r7, lr, ROR #24
EOR r7, r7, r2, ROR #8
/* XOR in Key Schedule */
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r10
EOR r7, r7, r11
SUBS r1, r1, #0x1
#ifdef __GNUC__
BNE L_AES_encrypt_block_nr
#else
BNE.W L_AES_encrypt_block_nr
#endif
UBFX r8, r5, #16, #8
LSR r11, r4, #24
UBFX lr, r6, #8, #8
UBFX r2, r7, #0, #8
LDR r8, [r0, r8, LSL #2]
LDR r11, [r0, r11, LSL #2]
LDR lr, [r0, lr, LSL #2]
LDR r2, [r0, r2, LSL #2]
UBFX r9, r6, #16, #8
EOR r8, r8, r11, ROR #24
LSR r11, r5, #24
EOR r8, r8, lr, ROR #8
UBFX lr, r7, #8, #8
EOR r8, r8, r2, ROR #16
UBFX r2, r4, #0, #8
LDR r9, [r0, r9, LSL #2]
LDR r11, [r0, r11, LSL #2]
LDR lr, [r0, lr, LSL #2]
LDR r2, [r0, r2, LSL #2]
UBFX r10, r7, #16, #8
EOR r9, r9, r11, ROR #24
LSR r11, r6, #24
EOR r9, r9, lr, ROR #8
UBFX lr, r4, #8, #8
EOR r9, r9, r2, ROR #16
UBFX r2, r5, #0, #8
LDR r10, [r0, r10, LSL #2]
LDR r11, [r0, r11, LSL #2]
LDR lr, [r0, lr, LSL #2]
LDR r2, [r0, r2, LSL #2]
UBFX r6, r6, #0, #8
EOR r10, r10, r11, ROR #24
UBFX r11, r4, #16, #8
EOR r10, r10, lr, ROR #8
LSR lr, r7, #24
EOR r10, r10, r2, ROR #16
UBFX r2, r5, #8, #8
LDR r6, [r0, r6, LSL #2]
LDR lr, [r0, lr, LSL #2]
LDR r11, [r0, r11, LSL #2]
LDR r2, [r0, r2, LSL #2]
EOR lr, lr, r6, ROR #24
LDM r3!, {r4, r5, r6, r7}
EOR r11, r11, lr, ROR #24
EOR r11, r11, r2, ROR #8
/* XOR in Key Schedule */
EOR r8, r8, r4
EOR r9, r9, r5
EOR r10, r10, r6
EOR r11, r11, r7
UBFX r4, r11, #0, #8
UBFX r7, r10, #8, #8
UBFX lr, r9, #16, #8
LSR r2, r8, #24
LDRB r4, [r0, r4, LSL #2]
LDRB r7, [r0, r7, LSL #2]
LDRB lr, [r0, lr, LSL #2]
LDRB r2, [r0, r2, LSL #2]
UBFX r5, r8, #0, #8
EOR r4, r4, r7, LSL #8
UBFX r7, r11, #8, #8
EOR r4, r4, lr, LSL #16
UBFX lr, r10, #16, #8
EOR r4, r4, r2, LSL #24
LSR r2, r9, #24
LDRB r5, [r0, r5, LSL #2]
LDRB r7, [r0, r7, LSL #2]
LDRB lr, [r0, lr, LSL #2]
LDRB r2, [r0, r2, LSL #2]
UBFX r6, r9, #0, #8
EOR r5, r5, r7, LSL #8
UBFX r7, r8, #8, #8
EOR r5, r5, lr, LSL #16
UBFX lr, r11, #16, #8
EOR r5, r5, r2, LSL #24
LSR r2, r10, #24
LDRB r6, [r0, r6, LSL #2]
LDRB r7, [r0, r7, LSL #2]
LDRB lr, [r0, lr, LSL #2]
LDRB r2, [r0, r2, LSL #2]
LSR r11, r11, #24
EOR r6, r6, r7, LSL #8
UBFX r7, r10, #0, #8
EOR r6, r6, lr, LSL #16
UBFX lr, r9, #8, #8
EOR r6, r6, r2, LSL #24
UBFX r2, r8, #16, #8
LDRB r11, [r0, r11, LSL #2]
LDRB r7, [r0, r7, LSL #2]
LDRB lr, [r0, lr, LSL #2]
LDRB r2, [r0, r2, LSL #2]
EOR lr, lr, r11, LSL #16
LDM r3, {r8, r9, r10, r11}
EOR r7, r7, lr, LSL #8
EOR r7, r7, r2, LSL #16
/* XOR in Key Schedule */
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r10
EOR r7, r7, r11
POP {pc}
/* Cycle Count = 285 */
.size AES_encrypt_block,.-AES_encrypt_block
#if defined(HAVE_AES_CBC) || defined(HAVE_AESCCM) || defined(HAVE_AESGCM) || defined(WOLFSSL_AES_DIRECT) || defined(WOLFSSL_AES_COUNTER)
.text
.type L_AES_Thumb2_te_ecb, %object
.size L_AES_Thumb2_te_ecb, 12
.align 4
L_AES_Thumb2_te_ecb:
.word L_AES_Thumb2_te_data
#endif /* HAVE_AES_CBC || HAVE_AESCCM || HAVE_AESGCM || WOLFSSL_AES_DIRECT || WOLFSSL_AES_COUNTER */
#if defined(HAVE_AESCCM) || defined(HAVE_AESGCM) || defined(WOLFSSL_AES_DIRECT) || defined(WOLFSSL_AES_COUNTER)
.text
.align 4
.globl AES_ECB_encrypt
.type AES_ECB_encrypt, %function
AES_ECB_encrypt:
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr}
MOV lr, r0
LDR r0, L_AES_Thumb2_te_ecb
LDR r12, [sp, #36]
PUSH {r3}
CMP r12, #0xa
#ifdef __GNUC__
BEQ L_AES_ECB_encrypt_start_block_128
#else
BEQ.W L_AES_ECB_encrypt_start_block_128
#endif
CMP r12, #0xc
#ifdef __GNUC__
BEQ L_AES_ECB_encrypt_start_block_192
#else
BEQ.W L_AES_ECB_encrypt_start_block_192
#endif
L_AES_ECB_encrypt_loop_block_256:
LDR r4, [lr]
LDR r5, [lr, #4]
LDR r6, [lr, #8]
LDR r7, [lr, #12]
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
PUSH {r1, r2, lr}
LDM r3!, {r8, r9, r10, r11}
/* Round: 0 - XOR in key schedule */
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r10
EOR r7, r7, r11
MOV r1, #0x6
BL AES_encrypt_block
POP {r1, r2, lr}
LDR r3, [sp]
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
STR r4, [r1]
STR r5, [r1, #4]
STR r6, [r1, #8]
STR r7, [r1, #12]
SUBS r2, r2, #0x10
ADD lr, lr, #0x10
ADD r1, r1, #0x10
#ifdef __GNUC__
BNE L_AES_ECB_encrypt_loop_block_256
#else
BNE.W L_AES_ECB_encrypt_loop_block_256
#endif
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
B L_AES_ECB_encrypt_end
#else
B.N L_AES_ECB_encrypt_end
#endif
L_AES_ECB_encrypt_start_block_192:
L_AES_ECB_encrypt_loop_block_192:
LDR r4, [lr]
LDR r5, [lr, #4]
LDR r6, [lr, #8]
LDR r7, [lr, #12]
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
PUSH {r1, r2, lr}
LDM r3!, {r8, r9, r10, r11}
/* Round: 0 - XOR in key schedule */
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r10
EOR r7, r7, r11
MOV r1, #0x5
BL AES_encrypt_block
POP {r1, r2, lr}
LDR r3, [sp]
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
STR r4, [r1]
STR r5, [r1, #4]
STR r6, [r1, #8]
STR r7, [r1, #12]
SUBS r2, r2, #0x10
ADD lr, lr, #0x10
ADD r1, r1, #0x10
#ifdef __GNUC__
BNE L_AES_ECB_encrypt_loop_block_192
#else
BNE.W L_AES_ECB_encrypt_loop_block_192
#endif
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
B L_AES_ECB_encrypt_end
#else
B.N L_AES_ECB_encrypt_end
#endif
L_AES_ECB_encrypt_start_block_128:
L_AES_ECB_encrypt_loop_block_128:
LDR r4, [lr]
LDR r5, [lr, #4]
LDR r6, [lr, #8]
LDR r7, [lr, #12]
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
PUSH {r1, r2, lr}
LDM r3!, {r8, r9, r10, r11}
/* Round: 0 - XOR in key schedule */
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r10
EOR r7, r7, r11
MOV r1, #0x4
BL AES_encrypt_block
POP {r1, r2, lr}
LDR r3, [sp]
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
STR r4, [r1]
STR r5, [r1, #4]
STR r6, [r1, #8]
STR r7, [r1, #12]
SUBS r2, r2, #0x10
ADD lr, lr, #0x10
ADD r1, r1, #0x10
#ifdef __GNUC__
BNE L_AES_ECB_encrypt_loop_block_128
#else
BNE.W L_AES_ECB_encrypt_loop_block_128
#endif
L_AES_ECB_encrypt_end:
POP {r3}
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
/* Cycle Count = 212 */
.size AES_ECB_encrypt,.-AES_ECB_encrypt
#endif /* HAVE_AESCCM || HAVE_AESGCM || WOLFSSL_AES_DIRECT || WOLFSSL_AES_COUNTER */
#ifdef HAVE_AES_CBC
.text
.align 4
.globl AES_CBC_encrypt
.type AES_CBC_encrypt, %function
AES_CBC_encrypt:
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr}
LDR r8, [sp, #36]
LDR r9, [sp, #40]
MOV lr, r0
LDR r0, L_AES_Thumb2_te_ecb
LDM r9, {r4, r5, r6, r7}
PUSH {r3, r9}
CMP r8, #0xa
#ifdef __GNUC__
BEQ L_AES_CBC_encrypt_start_block_128
#else
BEQ.W L_AES_CBC_encrypt_start_block_128
#endif
CMP r8, #0xc
#ifdef __GNUC__
BEQ L_AES_CBC_encrypt_start_block_192
#else
BEQ.W L_AES_CBC_encrypt_start_block_192
#endif
L_AES_CBC_encrypt_loop_block_256:
LDR r8, [lr]
LDR r9, [lr, #4]
LDR r10, [lr, #8]
LDR r11, [lr, #12]
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r10
EOR r7, r7, r11
PUSH {r1, r2, lr}
LDM r3!, {r8, r9, r10, r11}
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
/* Round: 0 - XOR in key schedule */
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r10
EOR r7, r7, r11
MOV r1, #0x6
BL AES_encrypt_block
POP {r1, r2, lr}
LDR r3, [sp]
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
STR r4, [r1]
STR r5, [r1, #4]
STR r6, [r1, #8]
STR r7, [r1, #12]
SUBS r2, r2, #0x10
ADD lr, lr, #0x10
ADD r1, r1, #0x10
#ifdef __GNUC__
BNE L_AES_CBC_encrypt_loop_block_256
#else
BNE.W L_AES_CBC_encrypt_loop_block_256
#endif
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
B L_AES_CBC_encrypt_end
#else
B.N L_AES_CBC_encrypt_end
#endif
L_AES_CBC_encrypt_start_block_192:
L_AES_CBC_encrypt_loop_block_192:
LDR r8, [lr]
LDR r9, [lr, #4]
LDR r10, [lr, #8]
LDR r11, [lr, #12]
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r10
EOR r7, r7, r11
PUSH {r1, r2, lr}
LDM r3!, {r8, r9, r10, r11}
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
/* Round: 0 - XOR in key schedule */
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r10
EOR r7, r7, r11
MOV r1, #0x5
BL AES_encrypt_block
POP {r1, r2, lr}
LDR r3, [sp]
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
STR r4, [r1]
STR r5, [r1, #4]
STR r6, [r1, #8]
STR r7, [r1, #12]
SUBS r2, r2, #0x10
ADD lr, lr, #0x10
ADD r1, r1, #0x10
#ifdef __GNUC__
BNE L_AES_CBC_encrypt_loop_block_192
#else
BNE.W L_AES_CBC_encrypt_loop_block_192
#endif
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
B L_AES_CBC_encrypt_end
#else
B.N L_AES_CBC_encrypt_end
#endif
L_AES_CBC_encrypt_start_block_128:
L_AES_CBC_encrypt_loop_block_128:
LDR r8, [lr]
LDR r9, [lr, #4]
LDR r10, [lr, #8]
LDR r11, [lr, #12]
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r10
EOR r7, r7, r11
PUSH {r1, r2, lr}
LDM r3!, {r8, r9, r10, r11}
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
/* Round: 0 - XOR in key schedule */
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r10
EOR r7, r7, r11
MOV r1, #0x4
BL AES_encrypt_block
POP {r1, r2, lr}
LDR r3, [sp]
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
STR r4, [r1]
STR r5, [r1, #4]
STR r6, [r1, #8]
STR r7, [r1, #12]
SUBS r2, r2, #0x10
ADD lr, lr, #0x10
ADD r1, r1, #0x10
#ifdef __GNUC__
BNE L_AES_CBC_encrypt_loop_block_128
#else
BNE.W L_AES_CBC_encrypt_loop_block_128
#endif
L_AES_CBC_encrypt_end:
POP {r3, r9}
STM r9, {r4, r5, r6, r7}
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
/* Cycle Count = 238 */
.size AES_CBC_encrypt,.-AES_CBC_encrypt
#endif /* HAVE_AES_CBC */
#ifdef WOLFSSL_AES_COUNTER
.text
.align 4
.globl AES_CTR_encrypt
.type AES_CTR_encrypt, %function
AES_CTR_encrypt:
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr}
LDR r12, [sp, #36]
LDR r8, [sp, #40]
MOV lr, r0
LDR r0, L_AES_Thumb2_te_ecb
LDM r8, {r4, r5, r6, r7}
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
STM r8, {r4, r5, r6, r7}
PUSH {r3, r8}
CMP r12, #0xa
#ifdef __GNUC__
BEQ L_AES_CTR_encrypt_start_block_128
#else
BEQ.W L_AES_CTR_encrypt_start_block_128
#endif
CMP r12, #0xc
#ifdef __GNUC__
BEQ L_AES_CTR_encrypt_start_block_192
#else
BEQ.W L_AES_CTR_encrypt_start_block_192
#endif
L_AES_CTR_encrypt_loop_block_256:
PUSH {r1, r2, lr}
LDR lr, [sp, #16]
ADDS r11, r7, #0x1
ADCS r10, r6, #0x0
ADCS r9, r5, #0x0
ADC r8, r4, #0x0
STM lr, {r8, r9, r10, r11}
LDM r3!, {r8, r9, r10, r11}
/* Round: 0 - XOR in key schedule */
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r10
EOR r7, r7, r11
MOV r1, #0x6
BL AES_encrypt_block
POP {r1, r2, lr}
LDR r3, [sp]
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
LDR r8, [lr]
LDR r9, [lr, #4]
LDR r10, [lr, #8]
LDR r11, [lr, #12]
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r10
EOR r7, r7, r11
LDR r8, [sp, #4]
STR r4, [r1]
STR r5, [r1, #4]
STR r6, [r1, #8]
STR r7, [r1, #12]
LDM r8, {r4, r5, r6, r7}
SUBS r2, r2, #0x10
ADD lr, lr, #0x10
ADD r1, r1, #0x10
#ifdef __GNUC__
BNE L_AES_CTR_encrypt_loop_block_256
#else
BNE.W L_AES_CTR_encrypt_loop_block_256
#endif
#ifdef __GNUC__
B L_AES_CTR_encrypt_end
#else
B.W L_AES_CTR_encrypt_end
#endif
L_AES_CTR_encrypt_start_block_192:
L_AES_CTR_encrypt_loop_block_192:
PUSH {r1, r2, lr}
LDR lr, [sp, #16]
ADDS r11, r7, #0x1
ADCS r10, r6, #0x0
ADCS r9, r5, #0x0
ADC r8, r4, #0x0
STM lr, {r8, r9, r10, r11}
LDM r3!, {r8, r9, r10, r11}
/* Round: 0 - XOR in key schedule */
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r10
EOR r7, r7, r11
MOV r1, #0x5
BL AES_encrypt_block
POP {r1, r2, lr}
LDR r3, [sp]
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
LDR r8, [lr]
LDR r9, [lr, #4]
LDR r10, [lr, #8]
LDR r11, [lr, #12]
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r10
EOR r7, r7, r11
LDR r8, [sp, #4]
STR r4, [r1]
STR r5, [r1, #4]
STR r6, [r1, #8]
STR r7, [r1, #12]
LDM r8, {r4, r5, r6, r7}
SUBS r2, r2, #0x10
ADD lr, lr, #0x10
ADD r1, r1, #0x10
#ifdef __GNUC__
BNE L_AES_CTR_encrypt_loop_block_192
#else
BNE.W L_AES_CTR_encrypt_loop_block_192
#endif
#ifdef __GNUC__
B L_AES_CTR_encrypt_end
#else
B.W L_AES_CTR_encrypt_end
#endif
L_AES_CTR_encrypt_start_block_128:
L_AES_CTR_encrypt_loop_block_128:
PUSH {r1, r2, lr}
LDR lr, [sp, #16]
ADDS r11, r7, #0x1
ADCS r10, r6, #0x0
ADCS r9, r5, #0x0
ADC r8, r4, #0x0
STM lr, {r8, r9, r10, r11}
LDM r3!, {r8, r9, r10, r11}
/* Round: 0 - XOR in key schedule */
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r10
EOR r7, r7, r11
MOV r1, #0x4
BL AES_encrypt_block
POP {r1, r2, lr}
LDR r3, [sp]
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
LDR r8, [lr]
LDR r9, [lr, #4]
LDR r10, [lr, #8]
LDR r11, [lr, #12]
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r10
EOR r7, r7, r11
LDR r8, [sp, #4]
STR r4, [r1]
STR r5, [r1, #4]
STR r6, [r1, #8]
STR r7, [r1, #12]
LDM r8, {r4, r5, r6, r7}
SUBS r2, r2, #0x10
ADD lr, lr, #0x10
ADD r1, r1, #0x10
#ifdef __GNUC__
BNE L_AES_CTR_encrypt_loop_block_128
#else
BNE.W L_AES_CTR_encrypt_loop_block_128
#endif
L_AES_CTR_encrypt_end:
POP {r3, r8}
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
STM r8, {r4, r5, r6, r7}
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
/* Cycle Count = 293 */
.size AES_CTR_encrypt,.-AES_CTR_encrypt
#endif /* WOLFSSL_AES_COUNTER */
#ifdef HAVE_AES_DECRYPT
#if defined(WOLFSSL_AES_DIRECT) || defined(WOLFSSL_AES_COUNTER) || defined(HAVE_AES_CBC)
.text
.align 4
.globl AES_decrypt_block
.type AES_decrypt_block, %function
AES_decrypt_block:
PUSH {lr}
L_AES_decrypt_block_nr:
UBFX r8, r7, #16, #8
LSR r11, r4, #24
UBFX r12, r6, #8, #8
UBFX lr, r5, #0, #8
LDR r8, [r0, r8, LSL #2]
LDR r11, [r0, r11, LSL #2]
LDR r12, [r0, r12, LSL #2]
LDR lr, [r0, lr, LSL #2]
UBFX r9, r4, #16, #8
EOR r8, r8, r11, ROR #24
LSR r11, r5, #24
EOR r8, r8, r12, ROR #8
UBFX r12, r7, #8, #8
EOR r8, r8, lr, ROR #16
UBFX lr, r6, #0, #8
LDR r9, [r0, r9, LSL #2]
LDR r11, [r0, r11, LSL #2]
LDR r12, [r0, r12, LSL #2]
LDR lr, [r0, lr, LSL #2]
UBFX r10, r5, #16, #8
EOR r9, r9, r11, ROR #24
LSR r11, r6, #24
EOR r9, r9, r12, ROR #8
UBFX r12, r4, #8, #8
EOR r9, r9, lr, ROR #16
UBFX lr, r7, #0, #8
LDR r10, [r0, r10, LSL #2]
LDR r11, [r0, r11, LSL #2]
LDR r12, [r0, r12, LSL #2]
LDR lr, [r0, lr, LSL #2]
UBFX r4, r4, #0, #8
EOR r10, r10, r11, ROR #24
UBFX r11, r6, #16, #8
EOR r10, r10, r12, ROR #8
LSR r12, r7, #24
EOR r10, r10, lr, ROR #16
UBFX lr, r5, #8, #8
LDR r4, [r0, r4, LSL #2]
LDR r12, [r0, r12, LSL #2]
LDR r11, [r0, r11, LSL #2]
LDR lr, [r0, lr, LSL #2]
EOR r12, r12, r4, ROR #24
LDM r3!, {r4, r5, r6, r7}
EOR r11, r11, lr, ROR #8
EOR r11, r11, r12, ROR #24
/* XOR in Key Schedule */
EOR r8, r8, r4
EOR r9, r9, r5
EOR r10, r10, r6
EOR r11, r11, r7
UBFX r4, r11, #16, #8
LSR r7, r8, #24
UBFX r12, r10, #8, #8
UBFX lr, r9, #0, #8
LDR r4, [r0, r4, LSL #2]
LDR r7, [r0, r7, LSL #2]
LDR r12, [r0, r12, LSL #2]
LDR lr, [r0, lr, LSL #2]
UBFX r5, r8, #16, #8
EOR r4, r4, r7, ROR #24
LSR r7, r9, #24
EOR r4, r4, r12, ROR #8
UBFX r12, r11, #8, #8
EOR r4, r4, lr, ROR #16
UBFX lr, r10, #0, #8
LDR r5, [r0, r5, LSL #2]
LDR r7, [r0, r7, LSL #2]
LDR r12, [r0, r12, LSL #2]
LDR lr, [r0, lr, LSL #2]
UBFX r6, r9, #16, #8
EOR r5, r5, r7, ROR #24
LSR r7, r10, #24
EOR r5, r5, r12, ROR #8
UBFX r12, r8, #8, #8
EOR r5, r5, lr, ROR #16
UBFX lr, r11, #0, #8
LDR r6, [r0, r6, LSL #2]
LDR r7, [r0, r7, LSL #2]
LDR r12, [r0, r12, LSL #2]
LDR lr, [r0, lr, LSL #2]
UBFX r8, r8, #0, #8
EOR r6, r6, r7, ROR #24
UBFX r7, r10, #16, #8
EOR r6, r6, r12, ROR #8
LSR r12, r11, #24
EOR r6, r6, lr, ROR #16
UBFX lr, r9, #8, #8
LDR r8, [r0, r8, LSL #2]
LDR r12, [r0, r12, LSL #2]
LDR r7, [r0, r7, LSL #2]
LDR lr, [r0, lr, LSL #2]
EOR r12, r12, r8, ROR #24
LDM r3!, {r8, r9, r10, r11}
EOR r7, r7, lr, ROR #8
EOR r7, r7, r12, ROR #24
/* XOR in Key Schedule */
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r10
EOR r7, r7, r11
SUBS r1, r1, #0x1
#ifdef __GNUC__
BNE L_AES_decrypt_block_nr
#else
BNE.W L_AES_decrypt_block_nr
#endif
UBFX r8, r7, #16, #8
LSR r11, r4, #24
UBFX r12, r6, #8, #8
UBFX lr, r5, #0, #8
LDR r8, [r0, r8, LSL #2]
LDR r11, [r0, r11, LSL #2]
LDR r12, [r0, r12, LSL #2]
LDR lr, [r0, lr, LSL #2]
UBFX r9, r4, #16, #8
EOR r8, r8, r11, ROR #24
LSR r11, r5, #24
EOR r8, r8, r12, ROR #8
UBFX r12, r7, #8, #8
EOR r8, r8, lr, ROR #16
UBFX lr, r6, #0, #8
LDR r9, [r0, r9, LSL #2]
LDR r11, [r0, r11, LSL #2]
LDR r12, [r0, r12, LSL #2]
LDR lr, [r0, lr, LSL #2]
UBFX r10, r5, #16, #8
EOR r9, r9, r11, ROR #24
LSR r11, r6, #24
EOR r9, r9, r12, ROR #8
UBFX r12, r4, #8, #8
EOR r9, r9, lr, ROR #16
UBFX lr, r7, #0, #8
LDR r10, [r0, r10, LSL #2]
LDR r11, [r0, r11, LSL #2]
LDR r12, [r0, r12, LSL #2]
LDR lr, [r0, lr, LSL #2]
UBFX r4, r4, #0, #8
EOR r10, r10, r11, ROR #24
UBFX r11, r6, #16, #8
EOR r10, r10, r12, ROR #8
LSR r12, r7, #24
EOR r10, r10, lr, ROR #16
UBFX lr, r5, #8, #8
LDR r4, [r0, r4, LSL #2]
LDR r12, [r0, r12, LSL #2]
LDR r11, [r0, r11, LSL #2]
LDR lr, [r0, lr, LSL #2]
EOR r12, r12, r4, ROR #24
LDM r3!, {r4, r5, r6, r7}
EOR r11, r11, lr, ROR #8
EOR r11, r11, r12, ROR #24
/* XOR in Key Schedule */
EOR r8, r8, r4
EOR r9, r9, r5
EOR r10, r10, r6
EOR r11, r11, r7
UBFX r4, r9, #0, #8
UBFX r7, r10, #8, #8
UBFX r12, r11, #16, #8
LSR lr, r8, #24
LDRB r4, [r2, r4]
LDRB r7, [r2, r7]
LDRB r12, [r2, r12]
LDRB lr, [r2, lr]
UBFX r5, r10, #0, #8
EOR r4, r4, r7, LSL #8
UBFX r7, r11, #8, #8
EOR r4, r4, r12, LSL #16
UBFX r12, r8, #16, #8
EOR r4, r4, lr, LSL #24
LSR lr, r9, #24
LDRB r7, [r2, r7]
LDRB lr, [r2, lr]
LDRB r5, [r2, r5]
LDRB r12, [r2, r12]
UBFX r6, r11, #0, #8
EOR r5, r5, r7, LSL #8
UBFX r7, r8, #8, #8
EOR r5, r5, r12, LSL #16
UBFX r12, r9, #16, #8
EOR r5, r5, lr, LSL #24
LSR lr, r10, #24
LDRB r7, [r2, r7]
LDRB lr, [r2, lr]
LDRB r6, [r2, r6]
LDRB r12, [r2, r12]
LSR r11, r11, #24
EOR r6, r6, r7, LSL #8
UBFX r7, r8, #0, #8
EOR r6, r6, r12, LSL #16
UBFX r12, r9, #8, #8
EOR r6, r6, lr, LSL #24
UBFX lr, r10, #16, #8
LDRB r11, [r2, r11]
LDRB r12, [r2, r12]
LDRB r7, [r2, r7]
LDRB lr, [r2, lr]
EOR r12, r12, r11, LSL #16
LDM r3, {r8, r9, r10, r11}
EOR r7, r7, r12, LSL #8
EOR r7, r7, lr, LSL #16
/* XOR in Key Schedule */
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r10
EOR r7, r7, r11
POP {pc}
/* Cycle Count = 285 */
.size AES_decrypt_block,.-AES_decrypt_block
.text
.type L_AES_Thumb2_td_ecb, %object
.size L_AES_Thumb2_td_ecb, 12
.align 4
L_AES_Thumb2_td_ecb:
.word L_AES_Thumb2_td_data
.text
.type L_AES_Thumb2_td4, %object
.size L_AES_Thumb2_td4, 256
.align 4
L_AES_Thumb2_td4:
.byte 0x52
.byte 0x9
.byte 0x6a
.byte 0xd5
.byte 0x30
.byte 0x36
.byte 0xa5
.byte 0x38
.byte 0xbf
.byte 0x40
.byte 0xa3
.byte 0x9e
.byte 0x81
.byte 0xf3
.byte 0xd7
.byte 0xfb
.byte 0x7c
.byte 0xe3
.byte 0x39
.byte 0x82
.byte 0x9b
.byte 0x2f
.byte 0xff
.byte 0x87
.byte 0x34
.byte 0x8e
.byte 0x43
.byte 0x44
.byte 0xc4
.byte 0xde
.byte 0xe9
.byte 0xcb
.byte 0x54
.byte 0x7b
.byte 0x94
.byte 0x32
.byte 0xa6
.byte 0xc2
.byte 0x23
.byte 0x3d
.byte 0xee
.byte 0x4c
.byte 0x95
.byte 0xb
.byte 0x42
.byte 0xfa
.byte 0xc3
.byte 0x4e
.byte 0x8
.byte 0x2e
.byte 0xa1
.byte 0x66
.byte 0x28
.byte 0xd9
.byte 0x24
.byte 0xb2
.byte 0x76
.byte 0x5b
.byte 0xa2
.byte 0x49
.byte 0x6d
.byte 0x8b
.byte 0xd1
.byte 0x25
.byte 0x72
.byte 0xf8
.byte 0xf6
.byte 0x64
.byte 0x86
.byte 0x68
.byte 0x98
.byte 0x16
.byte 0xd4
.byte 0xa4
.byte 0x5c
.byte 0xcc
.byte 0x5d
.byte 0x65
.byte 0xb6
.byte 0x92
.byte 0x6c
.byte 0x70
.byte 0x48
.byte 0x50
.byte 0xfd
.byte 0xed
.byte 0xb9
.byte 0xda
.byte 0x5e
.byte 0x15
.byte 0x46
.byte 0x57
.byte 0xa7
.byte 0x8d
.byte 0x9d
.byte 0x84
.byte 0x90
.byte 0xd8
.byte 0xab
.byte 0x0
.byte 0x8c
.byte 0xbc
.byte 0xd3
.byte 0xa
.byte 0xf7
.byte 0xe4
.byte 0x58
.byte 0x5
.byte 0xb8
.byte 0xb3
.byte 0x45
.byte 0x6
.byte 0xd0
.byte 0x2c
.byte 0x1e
.byte 0x8f
.byte 0xca
.byte 0x3f
.byte 0xf
.byte 0x2
.byte 0xc1
.byte 0xaf
.byte 0xbd
.byte 0x3
.byte 0x1
.byte 0x13
.byte 0x8a
.byte 0x6b
.byte 0x3a
.byte 0x91
.byte 0x11
.byte 0x41
.byte 0x4f
.byte 0x67
.byte 0xdc
.byte 0xea
.byte 0x97
.byte 0xf2
.byte 0xcf
.byte 0xce
.byte 0xf0
.byte 0xb4
.byte 0xe6
.byte 0x73
.byte 0x96
.byte 0xac
.byte 0x74
.byte 0x22
.byte 0xe7
.byte 0xad
.byte 0x35
.byte 0x85
.byte 0xe2
.byte 0xf9
.byte 0x37
.byte 0xe8
.byte 0x1c
.byte 0x75
.byte 0xdf
.byte 0x6e
.byte 0x47
.byte 0xf1
.byte 0x1a
.byte 0x71
.byte 0x1d
.byte 0x29
.byte 0xc5
.byte 0x89
.byte 0x6f
.byte 0xb7
.byte 0x62
.byte 0xe
.byte 0xaa
.byte 0x18
.byte 0xbe
.byte 0x1b
.byte 0xfc
.byte 0x56
.byte 0x3e
.byte 0x4b
.byte 0xc6
.byte 0xd2
.byte 0x79
.byte 0x20
.byte 0x9a
.byte 0xdb
.byte 0xc0
.byte 0xfe
.byte 0x78
.byte 0xcd
.byte 0x5a
.byte 0xf4
.byte 0x1f
.byte 0xdd
.byte 0xa8
.byte 0x33
.byte 0x88
.byte 0x7
.byte 0xc7
.byte 0x31
.byte 0xb1
.byte 0x12
.byte 0x10
.byte 0x59
.byte 0x27
.byte 0x80
.byte 0xec
.byte 0x5f
.byte 0x60
.byte 0x51
.byte 0x7f
.byte 0xa9
.byte 0x19
.byte 0xb5
.byte 0x4a
.byte 0xd
.byte 0x2d
.byte 0xe5
.byte 0x7a
.byte 0x9f
.byte 0x93
.byte 0xc9
.byte 0x9c
.byte 0xef
.byte 0xa0
.byte 0xe0
.byte 0x3b
.byte 0x4d
.byte 0xae
.byte 0x2a
.byte 0xf5
.byte 0xb0
.byte 0xc8
.byte 0xeb
.byte 0xbb
.byte 0x3c
.byte 0x83
.byte 0x53
.byte 0x99
.byte 0x61
.byte 0x17
.byte 0x2b
.byte 0x4
.byte 0x7e
.byte 0xba
.byte 0x77
.byte 0xd6
.byte 0x26
.byte 0xe1
.byte 0x69
.byte 0x14
.byte 0x63
.byte 0x55
.byte 0x21
.byte 0xc
.byte 0x7d
#if defined(WOLFSSL_AES_DIRECT) || defined(WOLFSSL_AES_COUNTER)
.text
.align 4
.globl AES_ECB_decrypt
.type AES_ECB_decrypt, %function
AES_ECB_decrypt:
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr}
LDR r8, [sp, #36]
MOV lr, r0
LDR r0, L_AES_Thumb2_td_ecb
MOV r12, r2
ADR r2, L_AES_Thumb2_td4
CMP r8, #0xa
#ifdef __GNUC__
BEQ L_AES_ECB_decrypt_start_block_128
#else
BEQ.W L_AES_ECB_decrypt_start_block_128
#endif
CMP r8, #0xc
#ifdef __GNUC__
BEQ L_AES_ECB_decrypt_start_block_192
#else
BEQ.W L_AES_ECB_decrypt_start_block_192
#endif
L_AES_ECB_decrypt_loop_block_256:
LDR r4, [lr]
LDR r5, [lr, #4]
LDR r6, [lr, #8]
LDR r7, [lr, #12]
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
PUSH {r1, r3, r12, lr}
LDM r3!, {r8, r9, r10, r11}
/* Round: 0 - XOR in key schedule */
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r10
EOR r7, r7, r11
MOV r1, #0x6
BL AES_decrypt_block
POP {r1, r3, r12, lr}
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
STR r4, [r1]
STR r5, [r1, #4]
STR r6, [r1, #8]
STR r7, [r1, #12]
SUBS r12, r12, #0x10
ADD lr, lr, #0x10
ADD r1, r1, #0x10
#ifdef __GNUC__
BNE L_AES_ECB_decrypt_loop_block_256
#else
BNE.W L_AES_ECB_decrypt_loop_block_256
#endif
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
B L_AES_ECB_decrypt_end
#else
B.N L_AES_ECB_decrypt_end
#endif
L_AES_ECB_decrypt_start_block_192:
L_AES_ECB_decrypt_loop_block_192:
LDR r4, [lr]
LDR r5, [lr, #4]
LDR r6, [lr, #8]
LDR r7, [lr, #12]
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
PUSH {r1, r3, r12, lr}
LDM r3!, {r8, r9, r10, r11}
/* Round: 0 - XOR in key schedule */
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r10
EOR r7, r7, r11
MOV r1, #0x5
BL AES_decrypt_block
POP {r1, r3, r12, lr}
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
STR r4, [r1]
STR r5, [r1, #4]
STR r6, [r1, #8]
STR r7, [r1, #12]
SUBS r12, r12, #0x10
ADD lr, lr, #0x10
ADD r1, r1, #0x10
#ifdef __GNUC__
BNE L_AES_ECB_decrypt_loop_block_192
#else
BNE.W L_AES_ECB_decrypt_loop_block_192
#endif
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
B L_AES_ECB_decrypt_end
#else
B.N L_AES_ECB_decrypt_end
#endif
L_AES_ECB_decrypt_start_block_128:
L_AES_ECB_decrypt_loop_block_128:
LDR r4, [lr]
LDR r5, [lr, #4]
LDR r6, [lr, #8]
LDR r7, [lr, #12]
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
PUSH {r1, r3, r12, lr}
LDM r3!, {r8, r9, r10, r11}
/* Round: 0 - XOR in key schedule */
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r10
EOR r7, r7, r11
MOV r1, #0x4
BL AES_decrypt_block
POP {r1, r3, r12, lr}
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
STR r4, [r1]
STR r5, [r1, #4]
STR r6, [r1, #8]
STR r7, [r1, #12]
SUBS r12, r12, #0x10
ADD lr, lr, #0x10
ADD r1, r1, #0x10
#ifdef __GNUC__
BNE L_AES_ECB_decrypt_loop_block_128
#else
BNE.W L_AES_ECB_decrypt_loop_block_128
#endif
L_AES_ECB_decrypt_end:
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
/* Cycle Count = 210 */
.size AES_ECB_decrypt,.-AES_ECB_decrypt
#endif /* WOLFSSL_AES_DIRECT || WOLFSSL_AES_COUNTER */
#ifdef HAVE_AES_CBC
.text
.align 4
.globl AES_CBC_decrypt
.type AES_CBC_decrypt, %function
AES_CBC_decrypt:
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr}
LDR r8, [sp, #36]
LDR r4, [sp, #40]
MOV lr, r0
LDR r0, L_AES_Thumb2_td_ecb
MOV r12, r2
ADR r2, L_AES_Thumb2_td4
PUSH {r3, r4}
CMP r8, #0xa
#ifdef __GNUC__
BEQ L_AES_CBC_decrypt_loop_block_128
#else
BEQ.W L_AES_CBC_decrypt_loop_block_128
#endif
CMP r8, #0xc
#ifdef __GNUC__
BEQ L_AES_CBC_decrypt_loop_block_192
#else
BEQ.W L_AES_CBC_decrypt_loop_block_192
#endif
L_AES_CBC_decrypt_loop_block_256:
PUSH {r1, r12, lr}
LDR r4, [lr]
LDR r5, [lr, #4]
LDR r6, [lr, #8]
LDR r7, [lr, #12]
LDR lr, [sp, #16]
STRD r4, r5, [lr, #16]
STRD r6, r7, [lr, #24]
LDM r3!, {r8, r9, r10, r11}
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
/* Round: 0 - XOR in key schedule */
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r10
EOR r7, r7, r11
MOV r1, #0x6
BL AES_decrypt_block
LDR lr, [sp, #16]
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
LDM lr, {r8, r9, r10, r11}
POP {r1, r12, lr}
LDR r3, [sp]
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r10
EOR r7, r7, r11
STR r4, [r1]
STR r5, [r1, #4]
STR r6, [r1, #8]
STR r7, [r1, #12]
SUBS r12, r12, #0x10
ADD lr, lr, #0x10
ADD r1, r1, #0x10
#ifdef __GNUC__
BEQ L_AES_CBC_decrypt_end_odd
#else
BEQ.W L_AES_CBC_decrypt_end_odd
#endif
PUSH {r1, r12, lr}
LDR r4, [lr]
LDR r5, [lr, #4]
LDR r6, [lr, #8]
LDR r7, [lr, #12]
LDR lr, [sp, #16]
STRD r4, r5, [lr]
STRD r6, r7, [lr, #8]
LDM r3!, {r8, r9, r10, r11}
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
/* Round: 0 - XOR in key schedule */
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r10
EOR r7, r7, r11
MOV r1, #0x6
BL AES_decrypt_block
LDR lr, [sp, #16]
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
LDRD r8, r9, [lr, #16]
LDRD r10, r11, [lr, #24]
POP {r1, r12, lr}
LDR r3, [sp]
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r10
EOR r7, r7, r11
STR r4, [r1]
STR r5, [r1, #4]
STR r6, [r1, #8]
STR r7, [r1, #12]
SUBS r12, r12, #0x10
ADD lr, lr, #0x10
ADD r1, r1, #0x10
#ifdef __GNUC__
BNE L_AES_CBC_decrypt_loop_block_256
#else
BNE.W L_AES_CBC_decrypt_loop_block_256
#endif
#ifdef __GNUC__
B L_AES_CBC_decrypt_end
#else
B.W L_AES_CBC_decrypt_end
#endif
L_AES_CBC_decrypt_loop_block_192:
PUSH {r1, r12, lr}
LDR r4, [lr]
LDR r5, [lr, #4]
LDR r6, [lr, #8]
LDR r7, [lr, #12]
LDR lr, [sp, #16]
STRD r4, r5, [lr, #16]
STRD r6, r7, [lr, #24]
LDM r3!, {r8, r9, r10, r11}
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
/* Round: 0 - XOR in key schedule */
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r10
EOR r7, r7, r11
MOV r1, #0x5
BL AES_decrypt_block
LDR lr, [sp, #16]
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
LDM lr, {r8, r9, r10, r11}
POP {r1, r12, lr}
LDR r3, [sp]
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r10
EOR r7, r7, r11
STR r4, [r1]
STR r5, [r1, #4]
STR r6, [r1, #8]
STR r7, [r1, #12]
SUBS r12, r12, #0x10
ADD lr, lr, #0x10
ADD r1, r1, #0x10
#ifdef __GNUC__
BEQ L_AES_CBC_decrypt_end_odd
#else
BEQ.W L_AES_CBC_decrypt_end_odd
#endif
PUSH {r1, r12, lr}
LDR r4, [lr]
LDR r5, [lr, #4]
LDR r6, [lr, #8]
LDR r7, [lr, #12]
LDR lr, [sp, #16]
STRD r4, r5, [lr]
STRD r6, r7, [lr, #8]
LDM r3!, {r8, r9, r10, r11}
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
/* Round: 0 - XOR in key schedule */
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r10
EOR r7, r7, r11
MOV r1, #0x5
BL AES_decrypt_block
LDR lr, [sp, #16]
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
LDRD r8, r9, [lr, #16]
LDRD r10, r11, [lr, #24]
POP {r1, r12, lr}
LDR r3, [sp]
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r10
EOR r7, r7, r11
STR r4, [r1]
STR r5, [r1, #4]
STR r6, [r1, #8]
STR r7, [r1, #12]
SUBS r12, r12, #0x10
ADD lr, lr, #0x10
ADD r1, r1, #0x10
#ifdef __GNUC__
BNE L_AES_CBC_decrypt_loop_block_192
#else
BNE.W L_AES_CBC_decrypt_loop_block_192
#endif
#ifdef __GNUC__
B L_AES_CBC_decrypt_end
#else
B.W L_AES_CBC_decrypt_end
#endif
L_AES_CBC_decrypt_loop_block_128:
PUSH {r1, r12, lr}
LDR r4, [lr]
LDR r5, [lr, #4]
LDR r6, [lr, #8]
LDR r7, [lr, #12]
LDR lr, [sp, #16]
STRD r4, r5, [lr, #16]
STRD r6, r7, [lr, #24]
LDM r3!, {r8, r9, r10, r11}
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
/* Round: 0 - XOR in key schedule */
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r10
EOR r7, r7, r11
MOV r1, #0x4
BL AES_decrypt_block
LDR lr, [sp, #16]
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
LDM lr, {r8, r9, r10, r11}
POP {r1, r12, lr}
LDR r3, [sp]
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r10
EOR r7, r7, r11
STR r4, [r1]
STR r5, [r1, #4]
STR r6, [r1, #8]
STR r7, [r1, #12]
SUBS r12, r12, #0x10
ADD lr, lr, #0x10
ADD r1, r1, #0x10
#ifdef __GNUC__
BEQ L_AES_CBC_decrypt_end_odd
#else
BEQ.W L_AES_CBC_decrypt_end_odd
#endif
PUSH {r1, r12, lr}
LDR r4, [lr]
LDR r5, [lr, #4]
LDR r6, [lr, #8]
LDR r7, [lr, #12]
LDR lr, [sp, #16]
STRD r4, r5, [lr]
STRD r6, r7, [lr, #8]
LDM r3!, {r8, r9, r10, r11}
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
/* Round: 0 - XOR in key schedule */
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r10
EOR r7, r7, r11
MOV r1, #0x4
BL AES_decrypt_block
LDR lr, [sp, #16]
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
LDRD r8, r9, [lr, #16]
LDRD r10, r11, [lr, #24]
POP {r1, r12, lr}
LDR r3, [sp]
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r10
EOR r7, r7, r11
STR r4, [r1]
STR r5, [r1, #4]
STR r6, [r1, #8]
STR r7, [r1, #12]
SUBS r12, r12, #0x10
ADD lr, lr, #0x10
ADD r1, r1, #0x10
#ifdef __GNUC__
BNE L_AES_CBC_decrypt_loop_block_128
#else
BNE.W L_AES_CBC_decrypt_loop_block_128
#endif
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
B L_AES_CBC_decrypt_end
#else
B.N L_AES_CBC_decrypt_end
#endif
L_AES_CBC_decrypt_end_odd:
LDR r4, [sp, #4]
LDRD r8, r9, [r4, #16]
LDRD r10, r11, [r4, #24]
STRD r8, r9, [r4]
STRD r10, r11, [r4, #8]
L_AES_CBC_decrypt_end:
POP {r3, r4}
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
/* Cycle Count = 518 */
.size AES_CBC_decrypt,.-AES_CBC_decrypt
#endif /* HAVE_AES_CBC */
#endif /* WOLFSSL_AES_DIRECT || WOLFSSL_AES_COUNTER || HAVE_AES_CBC */
#endif /* HAVE_AES_DECRYPT */
#ifdef HAVE_AESGCM
.text
.type L_GCM_gmult_len_r, %object
.size L_GCM_gmult_len_r, 64
.align 4
L_GCM_gmult_len_r:
.word 0x0
.word 0x1c200000
.word 0x38400000
.word 0x24600000
.word 0x70800000
.word 0x6ca00000
.word 0x48c00000
.word 0x54e00000
.word 0xe1000000
.word 0xfd200000
.word 0xd9400000
.word 0xc5600000
.word 0x91800000
.word 0x8da00000
.word 0xa9c00000
.word 0xb5e00000
.text
.align 4
.globl GCM_gmult_len
.type GCM_gmult_len, %function
GCM_gmult_len:
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr}
ADR lr, L_GCM_gmult_len_r
L_GCM_gmult_len_start_block:
PUSH {r3}
LDR r12, [r0, #12]
LDR r3, [r2, #12]
EOR r12, r12, r3
LSR r3, r12, #24
AND r3, r3, #0xf
ADD r3, r1, r3, LSL #4
LDM r3, {r8, r9, r10, r11}
LSR r6, r10, #4
AND r3, r11, #0xf
LSR r11, r11, #4
LSR r4, r12, #28
EOR r11, r11, r10, LSL #28
LDR r3, [lr, r3, LSL #2]
ADD r4, r1, r4, LSL #4
EOR r10, r6, r9, LSL #28
LSR r9, r9, #4
LDM r4, {r4, r5, r6, r7}
EOR r9, r9, r8, LSL #28
EOR r8, r3, r8, LSR #4
EOR r8, r8, r4
EOR r9, r9, r5
EOR r10, r10, r6
EOR r11, r11, r7
LSR r6, r10, #4
AND r3, r11, #0xf
LSR r11, r11, #4
LSR r4, r12, #16
EOR r11, r11, r10, LSL #28
AND r4, r4, #0xf
LDR r3, [lr, r3, LSL #2]
ADD r4, r1, r4, LSL #4
EOR r10, r6, r9, LSL #28
LSR r9, r9, #4
LDM r4, {r4, r5, r6, r7}
EOR r9, r9, r8, LSL #28
EOR r8, r3, r8, LSR #4
EOR r8, r8, r4
EOR r9, r9, r5
EOR r10, r10, r6
EOR r11, r11, r7
LSR r6, r10, #4
AND r3, r11, #0xf
LSR r11, r11, #4
LSR r4, r12, #20
EOR r11, r11, r10, LSL #28
AND r4, r4, #0xf
LDR r3, [lr, r3, LSL #2]
ADD r4, r1, r4, LSL #4
EOR r10, r6, r9, LSL #28
LSR r9, r9, #4
LDM r4, {r4, r5, r6, r7}
EOR r9, r9, r8, LSL #28
EOR r8, r3, r8, LSR #4
EOR r8, r8, r4
EOR r9, r9, r5
EOR r10, r10, r6
EOR r11, r11, r7
LSR r6, r10, #4
AND r3, r11, #0xf
LSR r11, r11, #4
LSR r4, r12, #8
EOR r11, r11, r10, LSL #28
AND r4, r4, #0xf
LDR r3, [lr, r3, LSL #2]
ADD r4, r1, r4, LSL #4
EOR r10, r6, r9, LSL #28
LSR r9, r9, #4
LDM r4, {r4, r5, r6, r7}
EOR r9, r9, r8, LSL #28
EOR r8, r3, r8, LSR #4
EOR r8, r8, r4
EOR r9, r9, r5
EOR r10, r10, r6
EOR r11, r11, r7
LSR r6, r10, #4
AND r3, r11, #0xf
LSR r11, r11, #4
LSR r4, r12, #12
EOR r11, r11, r10, LSL #28
AND r4, r4, #0xf
LDR r3, [lr, r3, LSL #2]
ADD r4, r1, r4, LSL #4
EOR r10, r6, r9, LSL #28
LSR r9, r9, #4
LDM r4, {r4, r5, r6, r7}
EOR r9, r9, r8, LSL #28
EOR r8, r3, r8, LSR #4
EOR r8, r8, r4
EOR r9, r9, r5
EOR r10, r10, r6
EOR r11, r11, r7
LSR r6, r10, #4
AND r3, r11, #0xf
LSR r11, r11, #4
AND r4, r12, #0xf
EOR r11, r11, r10, LSL #28
LDR r3, [lr, r3, LSL #2]
ADD r4, r1, r4, LSL #4
EOR r10, r6, r9, LSL #28
LSR r9, r9, #4
LDM r4, {r4, r5, r6, r7}
EOR r9, r9, r8, LSL #28
EOR r8, r3, r8, LSR #4
EOR r8, r8, r4
EOR r9, r9, r5
EOR r10, r10, r6
EOR r11, r11, r7
LSR r6, r10, #4
AND r3, r11, #0xf
LSR r11, r11, #4
LSR r4, r12, #4
EOR r11, r11, r10, LSL #28
AND r4, r4, #0xf
LDR r3, [lr, r3, LSL #2]
ADD r4, r1, r4, LSL #4
EOR r10, r6, r9, LSL #28
LSR r9, r9, #4
LDM r4, {r4, r5, r6, r7}
EOR r9, r9, r8, LSL #28
EOR r8, r3, r8, LSR #4
EOR r8, r8, r4
EOR r9, r9, r5
EOR r10, r10, r6
EOR r11, r11, r7
LSR r6, r10, #4
AND r3, r11, #0xf
LSR r11, r11, #4
EOR r11, r11, r10, LSL #28
LDR r3, [lr, r3, LSL #2]
EOR r10, r6, r9, LSL #28
LSR r9, r9, #4
EOR r9, r9, r8, LSL #28
EOR r8, r3, r8, LSR #4
LDR r12, [r0, #8]
LDR r3, [r2, #8]
EOR r12, r12, r3
LSR r3, r12, #24
AND r3, r3, #0xf
ADD r3, r1, r3, LSL #4
LDM r3, {r4, r5, r6, r7}
EOR r8, r8, r4
EOR r9, r9, r5
EOR r10, r10, r6
EOR r11, r11, r7
LSR r6, r10, #4
AND r3, r11, #0xf
LSR r11, r11, #4
LSR r4, r12, #28
EOR r11, r11, r10, LSL #28
LDR r3, [lr, r3, LSL #2]
ADD r4, r1, r4, LSL #4
EOR r10, r6, r9, LSL #28
LSR r9, r9, #4
LDM r4, {r4, r5, r6, r7}
EOR r9, r9, r8, LSL #28
EOR r8, r3, r8, LSR #4
EOR r8, r8, r4
EOR r9, r9, r5
EOR r10, r10, r6
EOR r11, r11, r7
LSR r6, r10, #4
AND r3, r11, #0xf
LSR r11, r11, #4
LSR r4, r12, #16
EOR r11, r11, r10, LSL #28
AND r4, r4, #0xf
LDR r3, [lr, r3, LSL #2]
ADD r4, r1, r4, LSL #4
EOR r10, r6, r9, LSL #28
LSR r9, r9, #4
LDM r4, {r4, r5, r6, r7}
EOR r9, r9, r8, LSL #28
EOR r8, r3, r8, LSR #4
EOR r8, r8, r4
EOR r9, r9, r5
EOR r10, r10, r6
EOR r11, r11, r7
LSR r6, r10, #4
AND r3, r11, #0xf
LSR r11, r11, #4
LSR r4, r12, #20
EOR r11, r11, r10, LSL #28
AND r4, r4, #0xf
LDR r3, [lr, r3, LSL #2]
ADD r4, r1, r4, LSL #4
EOR r10, r6, r9, LSL #28
LSR r9, r9, #4
LDM r4, {r4, r5, r6, r7}
EOR r9, r9, r8, LSL #28
EOR r8, r3, r8, LSR #4
EOR r8, r8, r4
EOR r9, r9, r5
EOR r10, r10, r6
EOR r11, r11, r7
LSR r6, r10, #4
AND r3, r11, #0xf
LSR r11, r11, #4
LSR r4, r12, #8
EOR r11, r11, r10, LSL #28
AND r4, r4, #0xf
LDR r3, [lr, r3, LSL #2]
ADD r4, r1, r4, LSL #4
EOR r10, r6, r9, LSL #28
LSR r9, r9, #4
LDM r4, {r4, r5, r6, r7}
EOR r9, r9, r8, LSL #28
EOR r8, r3, r8, LSR #4
EOR r8, r8, r4
EOR r9, r9, r5
EOR r10, r10, r6
EOR r11, r11, r7
LSR r6, r10, #4
AND r3, r11, #0xf
LSR r11, r11, #4
LSR r4, r12, #12
EOR r11, r11, r10, LSL #28
AND r4, r4, #0xf
LDR r3, [lr, r3, LSL #2]
ADD r4, r1, r4, LSL #4
EOR r10, r6, r9, LSL #28
LSR r9, r9, #4
LDM r4, {r4, r5, r6, r7}
EOR r9, r9, r8, LSL #28
EOR r8, r3, r8, LSR #4
EOR r8, r8, r4
EOR r9, r9, r5
EOR r10, r10, r6
EOR r11, r11, r7
LSR r6, r10, #4
AND r3, r11, #0xf
LSR r11, r11, #4
AND r4, r12, #0xf
EOR r11, r11, r10, LSL #28
LDR r3, [lr, r3, LSL #2]
ADD r4, r1, r4, LSL #4
EOR r10, r6, r9, LSL #28
LSR r9, r9, #4
LDM r4, {r4, r5, r6, r7}
EOR r9, r9, r8, LSL #28
EOR r8, r3, r8, LSR #4
EOR r8, r8, r4
EOR r9, r9, r5
EOR r10, r10, r6
EOR r11, r11, r7
LSR r6, r10, #4
AND r3, r11, #0xf
LSR r11, r11, #4
LSR r4, r12, #4
EOR r11, r11, r10, LSL #28
AND r4, r4, #0xf
LDR r3, [lr, r3, LSL #2]
ADD r4, r1, r4, LSL #4
EOR r10, r6, r9, LSL #28
LSR r9, r9, #4
LDM r4, {r4, r5, r6, r7}
EOR r9, r9, r8, LSL #28
EOR r8, r3, r8, LSR #4
EOR r8, r8, r4
EOR r9, r9, r5
EOR r10, r10, r6
EOR r11, r11, r7
LSR r6, r10, #4
AND r3, r11, #0xf
LSR r11, r11, #4
EOR r11, r11, r10, LSL #28
LDR r3, [lr, r3, LSL #2]
EOR r10, r6, r9, LSL #28
LSR r9, r9, #4
EOR r9, r9, r8, LSL #28
EOR r8, r3, r8, LSR #4
LDR r12, [r0, #4]
LDR r3, [r2, #4]
EOR r12, r12, r3
LSR r3, r12, #24
AND r3, r3, #0xf
ADD r3, r1, r3, LSL #4
LDM r3, {r4, r5, r6, r7}
EOR r8, r8, r4
EOR r9, r9, r5
EOR r10, r10, r6
EOR r11, r11, r7
LSR r6, r10, #4
AND r3, r11, #0xf
LSR r11, r11, #4
LSR r4, r12, #28
EOR r11, r11, r10, LSL #28
LDR r3, [lr, r3, LSL #2]
ADD r4, r1, r4, LSL #4
EOR r10, r6, r9, LSL #28
LSR r9, r9, #4
LDM r4, {r4, r5, r6, r7}
EOR r9, r9, r8, LSL #28
EOR r8, r3, r8, LSR #4
EOR r8, r8, r4
EOR r9, r9, r5
EOR r10, r10, r6
EOR r11, r11, r7
LSR r6, r10, #4
AND r3, r11, #0xf
LSR r11, r11, #4
LSR r4, r12, #16
EOR r11, r11, r10, LSL #28
AND r4, r4, #0xf
LDR r3, [lr, r3, LSL #2]
ADD r4, r1, r4, LSL #4
EOR r10, r6, r9, LSL #28
LSR r9, r9, #4
LDM r4, {r4, r5, r6, r7}
EOR r9, r9, r8, LSL #28
EOR r8, r3, r8, LSR #4
EOR r8, r8, r4
EOR r9, r9, r5
EOR r10, r10, r6
EOR r11, r11, r7
LSR r6, r10, #4
AND r3, r11, #0xf
LSR r11, r11, #4
LSR r4, r12, #20
EOR r11, r11, r10, LSL #28
AND r4, r4, #0xf
LDR r3, [lr, r3, LSL #2]
ADD r4, r1, r4, LSL #4
EOR r10, r6, r9, LSL #28
LSR r9, r9, #4
LDM r4, {r4, r5, r6, r7}
EOR r9, r9, r8, LSL #28
EOR r8, r3, r8, LSR #4
EOR r8, r8, r4
EOR r9, r9, r5
EOR r10, r10, r6
EOR r11, r11, r7
LSR r6, r10, #4
AND r3, r11, #0xf
LSR r11, r11, #4
LSR r4, r12, #8
EOR r11, r11, r10, LSL #28
AND r4, r4, #0xf
LDR r3, [lr, r3, LSL #2]
ADD r4, r1, r4, LSL #4
EOR r10, r6, r9, LSL #28
LSR r9, r9, #4
LDM r4, {r4, r5, r6, r7}
EOR r9, r9, r8, LSL #28
EOR r8, r3, r8, LSR #4
EOR r8, r8, r4
EOR r9, r9, r5
EOR r10, r10, r6
EOR r11, r11, r7
LSR r6, r10, #4
AND r3, r11, #0xf
LSR r11, r11, #4
LSR r4, r12, #12
EOR r11, r11, r10, LSL #28
AND r4, r4, #0xf
LDR r3, [lr, r3, LSL #2]
ADD r4, r1, r4, LSL #4
EOR r10, r6, r9, LSL #28
LSR r9, r9, #4
LDM r4, {r4, r5, r6, r7}
EOR r9, r9, r8, LSL #28
EOR r8, r3, r8, LSR #4
EOR r8, r8, r4
EOR r9, r9, r5
EOR r10, r10, r6
EOR r11, r11, r7
LSR r6, r10, #4
AND r3, r11, #0xf
LSR r11, r11, #4
AND r4, r12, #0xf
EOR r11, r11, r10, LSL #28
LDR r3, [lr, r3, LSL #2]
ADD r4, r1, r4, LSL #4
EOR r10, r6, r9, LSL #28
LSR r9, r9, #4
LDM r4, {r4, r5, r6, r7}
EOR r9, r9, r8, LSL #28
EOR r8, r3, r8, LSR #4
EOR r8, r8, r4
EOR r9, r9, r5
EOR r10, r10, r6
EOR r11, r11, r7
LSR r6, r10, #4
AND r3, r11, #0xf
LSR r11, r11, #4
LSR r4, r12, #4
EOR r11, r11, r10, LSL #28
AND r4, r4, #0xf
LDR r3, [lr, r3, LSL #2]
ADD r4, r1, r4, LSL #4
EOR r10, r6, r9, LSL #28
LSR r9, r9, #4
LDM r4, {r4, r5, r6, r7}
EOR r9, r9, r8, LSL #28
EOR r8, r3, r8, LSR #4
EOR r8, r8, r4
EOR r9, r9, r5
EOR r10, r10, r6
EOR r11, r11, r7
LSR r6, r10, #4
AND r3, r11, #0xf
LSR r11, r11, #4
EOR r11, r11, r10, LSL #28
LDR r3, [lr, r3, LSL #2]
EOR r10, r6, r9, LSL #28
LSR r9, r9, #4
EOR r9, r9, r8, LSL #28
EOR r8, r3, r8, LSR #4
LDR r12, [r0]
LDR r3, [r2]
EOR r12, r12, r3
LSR r3, r12, #24
AND r3, r3, #0xf
ADD r3, r1, r3, LSL #4
LDM r3, {r4, r5, r6, r7}
EOR r8, r8, r4
EOR r9, r9, r5
EOR r10, r10, r6
EOR r11, r11, r7
LSR r6, r10, #4
AND r3, r11, #0xf
LSR r11, r11, #4
LSR r4, r12, #28
EOR r11, r11, r10, LSL #28
LDR r3, [lr, r3, LSL #2]
ADD r4, r1, r4, LSL #4
EOR r10, r6, r9, LSL #28
LSR r9, r9, #4
LDM r4, {r4, r5, r6, r7}
EOR r9, r9, r8, LSL #28
EOR r8, r3, r8, LSR #4
EOR r8, r8, r4
EOR r9, r9, r5
EOR r10, r10, r6
EOR r11, r11, r7
LSR r6, r10, #4
AND r3, r11, #0xf
LSR r11, r11, #4
LSR r4, r12, #16
EOR r11, r11, r10, LSL #28
AND r4, r4, #0xf
LDR r3, [lr, r3, LSL #2]
ADD r4, r1, r4, LSL #4
EOR r10, r6, r9, LSL #28
LSR r9, r9, #4
LDM r4, {r4, r5, r6, r7}
EOR r9, r9, r8, LSL #28
EOR r8, r3, r8, LSR #4
EOR r8, r8, r4
EOR r9, r9, r5
EOR r10, r10, r6
EOR r11, r11, r7
LSR r6, r10, #4
AND r3, r11, #0xf
LSR r11, r11, #4
LSR r4, r12, #20
EOR r11, r11, r10, LSL #28
AND r4, r4, #0xf
LDR r3, [lr, r3, LSL #2]
ADD r4, r1, r4, LSL #4
EOR r10, r6, r9, LSL #28
LSR r9, r9, #4
LDM r4, {r4, r5, r6, r7}
EOR r9, r9, r8, LSL #28
EOR r8, r3, r8, LSR #4
EOR r8, r8, r4
EOR r9, r9, r5
EOR r10, r10, r6
EOR r11, r11, r7
LSR r6, r10, #4
AND r3, r11, #0xf
LSR r11, r11, #4
LSR r4, r12, #8
EOR r11, r11, r10, LSL #28
AND r4, r4, #0xf
LDR r3, [lr, r3, LSL #2]
ADD r4, r1, r4, LSL #4
EOR r10, r6, r9, LSL #28
LSR r9, r9, #4
LDM r4, {r4, r5, r6, r7}
EOR r9, r9, r8, LSL #28
EOR r8, r3, r8, LSR #4
EOR r8, r8, r4
EOR r9, r9, r5
EOR r10, r10, r6
EOR r11, r11, r7
LSR r6, r10, #4
AND r3, r11, #0xf
LSR r11, r11, #4
LSR r4, r12, #12
EOR r11, r11, r10, LSL #28
AND r4, r4, #0xf
LDR r3, [lr, r3, LSL #2]
ADD r4, r1, r4, LSL #4
EOR r10, r6, r9, LSL #28
LSR r9, r9, #4
LDM r4, {r4, r5, r6, r7}
EOR r9, r9, r8, LSL #28
EOR r8, r3, r8, LSR #4
EOR r8, r8, r4
EOR r9, r9, r5
EOR r10, r10, r6
EOR r11, r11, r7
LSR r6, r10, #4
AND r3, r11, #0xf
LSR r11, r11, #4
AND r4, r12, #0xf
EOR r11, r11, r10, LSL #28
LDR r3, [lr, r3, LSL #2]
ADD r4, r1, r4, LSL #4
EOR r10, r6, r9, LSL #28
LSR r9, r9, #4
LDM r4, {r4, r5, r6, r7}
EOR r9, r9, r8, LSL #28
EOR r8, r3, r8, LSR #4
EOR r8, r8, r4
EOR r9, r9, r5
EOR r10, r10, r6
EOR r11, r11, r7
LSR r6, r10, #4
AND r3, r11, #0xf
LSR r11, r11, #4
LSR r4, r12, #4
EOR r11, r11, r10, LSL #28
AND r4, r4, #0xf
LDR r3, [lr, r3, LSL #2]
ADD r4, r1, r4, LSL #4
EOR r10, r6, r9, LSL #28
LSR r9, r9, #4
LDM r4, {r4, r5, r6, r7}
EOR r9, r9, r8, LSL #28
EOR r8, r3, r8, LSR #4
EOR r8, r8, r4
EOR r9, r9, r5
EOR r10, r10, r6
EOR r11, r11, r7
REV r8, r8
REV r9, r9
REV r10, r10
REV r11, r11
STM r0, {r8, r9, r10, r11}
POP {r3}
SUBS r3, r3, #0x10
ADD r2, r2, #0x10
#ifdef __GNUC__
BNE L_GCM_gmult_len_start_block
#else
BNE.W L_GCM_gmult_len_start_block
#endif
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
/* Cycle Count = 742 */
.size GCM_gmult_len,.-GCM_gmult_len
.text
.type L_AES_Thumb2_te_gcm, %object
.size L_AES_Thumb2_te_gcm, 12
.align 4
L_AES_Thumb2_te_gcm:
.word L_AES_Thumb2_te_data
.text
.align 4
.globl AES_GCM_encrypt
.type AES_GCM_encrypt, %function
AES_GCM_encrypt:
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr}
LDR r12, [sp, #36]
LDR r8, [sp, #40]
MOV lr, r0
LDR r0, L_AES_Thumb2_te_gcm
LDM r8, {r4, r5, r6, r7}
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
STM r8, {r4, r5, r6, r7}
PUSH {r3, r8}
CMP r12, #0xa
#ifdef __GNUC__
BEQ L_AES_GCM_encrypt_start_block_128
#else
BEQ.W L_AES_GCM_encrypt_start_block_128
#endif
CMP r12, #0xc
#ifdef __GNUC__
BEQ L_AES_GCM_encrypt_start_block_192
#else
BEQ.W L_AES_GCM_encrypt_start_block_192
#endif
L_AES_GCM_encrypt_loop_block_256:
PUSH {r1, r2, lr}
LDR lr, [sp, #16]
ADD r7, r7, #0x1
LDM r3!, {r8, r9, r10, r11}
STR r7, [lr, #12]
/* Round: 0 - XOR in key schedule */
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r10
EOR r7, r7, r11
MOV r1, #0x6
BL AES_encrypt_block
POP {r1, r2, lr}
LDR r3, [sp]
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
LDR r8, [lr]
LDR r9, [lr, #4]
LDR r10, [lr, #8]
LDR r11, [lr, #12]
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r10
EOR r7, r7, r11
LDR r8, [sp, #4]
STR r4, [r1]
STR r5, [r1, #4]
STR r6, [r1, #8]
STR r7, [r1, #12]
LDM r8, {r4, r5, r6, r7}
SUBS r2, r2, #0x10
ADD lr, lr, #0x10
ADD r1, r1, #0x10
#ifdef __GNUC__
BNE L_AES_GCM_encrypt_loop_block_256
#else
BNE.W L_AES_GCM_encrypt_loop_block_256
#endif
#ifdef __GNUC__
B L_AES_GCM_encrypt_end
#else
B.W L_AES_GCM_encrypt_end
#endif
L_AES_GCM_encrypt_start_block_192:
L_AES_GCM_encrypt_loop_block_192:
PUSH {r1, r2, lr}
LDR lr, [sp, #16]
ADD r7, r7, #0x1
LDM r3!, {r8, r9, r10, r11}
STR r7, [lr, #12]
/* Round: 0 - XOR in key schedule */
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r10
EOR r7, r7, r11
MOV r1, #0x5
BL AES_encrypt_block
POP {r1, r2, lr}
LDR r3, [sp]
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
LDR r8, [lr]
LDR r9, [lr, #4]
LDR r10, [lr, #8]
LDR r11, [lr, #12]
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r10
EOR r7, r7, r11
LDR r8, [sp, #4]
STR r4, [r1]
STR r5, [r1, #4]
STR r6, [r1, #8]
STR r7, [r1, #12]
LDM r8, {r4, r5, r6, r7}
SUBS r2, r2, #0x10
ADD lr, lr, #0x10
ADD r1, r1, #0x10
#ifdef __GNUC__
BNE L_AES_GCM_encrypt_loop_block_192
#else
BNE.W L_AES_GCM_encrypt_loop_block_192
#endif
#ifdef __GNUC__
B L_AES_GCM_encrypt_end
#else
B.W L_AES_GCM_encrypt_end
#endif
L_AES_GCM_encrypt_start_block_128:
L_AES_GCM_encrypt_loop_block_128:
PUSH {r1, r2, lr}
LDR lr, [sp, #16]
ADD r7, r7, #0x1
LDM r3!, {r8, r9, r10, r11}
STR r7, [lr, #12]
/* Round: 0 - XOR in key schedule */
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r10
EOR r7, r7, r11
MOV r1, #0x4
BL AES_encrypt_block
POP {r1, r2, lr}
LDR r3, [sp]
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
LDR r8, [lr]
LDR r9, [lr, #4]
LDR r10, [lr, #8]
LDR r11, [lr, #12]
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r10
EOR r7, r7, r11
LDR r8, [sp, #4]
STR r4, [r1]
STR r5, [r1, #4]
STR r6, [r1, #8]
STR r7, [r1, #12]
LDM r8, {r4, r5, r6, r7}
SUBS r2, r2, #0x10
ADD lr, lr, #0x10
ADD r1, r1, #0x10
#ifdef __GNUC__
BNE L_AES_GCM_encrypt_loop_block_128
#else
BNE.W L_AES_GCM_encrypt_loop_block_128
#endif
L_AES_GCM_encrypt_end:
POP {r3, r8}
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
STM r8, {r4, r5, r6, r7}
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
/* Cycle Count = 275 */
.size AES_GCM_encrypt,.-AES_GCM_encrypt
#endif /* HAVE_AESGCM */
#endif /* !NO_AES */
#endif /* !__aarch64__ && __thumb__ */
#endif /* WOLFSSL_ARMASM */
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
#endif /* !WOLFSSL_ARMASM_INLINE */
|
aenu1/aps3e
| 49,102
|
app/src/main/cpp/rpcs3/3rdparty/wolfssl/wolfssl/wolfcrypt/src/port/arm/armv8-32-sha3-asm.S
|
/* armv8-32-sha3-asm
*
* Copyright (C) 2006-2023 wolfSSL Inc.
*
* This file is part of wolfSSL.
*
* wolfSSL is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* wolfSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
*/
/* Generated using (from wolfssl):
* cd ../scripts
* ruby ./sha3/sha3.rb arm32 ../wolfssl/wolfcrypt/src/port/arm/armv8-32-sha3-asm.S
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif /* HAVE_CONFIG_H */
#include <wolfssl/wolfcrypt/settings.h>
#ifdef WOLFSSL_ARMASM
#if !defined(__aarch64__) && defined(__arm__) && !defined(__thumb__)
#ifndef WOLFSSL_ARMASM_INLINE
.text
.type L_sha3_arm2_neon_rt, %object
.size L_sha3_arm2_neon_rt, 192
.align 4
L_sha3_arm2_neon_rt:
.word 0x1
.word 0x0
.word 0x8082
.word 0x0
.word 0x808a
.word 0x80000000
.word 0x80008000
.word 0x80000000
.word 0x808b
.word 0x0
.word 0x80000001
.word 0x0
.word 0x80008081
.word 0x80000000
.word 0x8009
.word 0x80000000
.word 0x8a
.word 0x0
.word 0x88
.word 0x0
.word 0x80008009
.word 0x0
.word 0x8000000a
.word 0x0
.word 0x8000808b
.word 0x0
.word 0x8b
.word 0x80000000
.word 0x8089
.word 0x80000000
.word 0x8003
.word 0x80000000
.word 0x8002
.word 0x80000000
.word 0x80
.word 0x80000000
.word 0x800a
.word 0x0
.word 0x8000000a
.word 0x80000000
.word 0x80008081
.word 0x80000000
.word 0x8080
.word 0x80000000
.word 0x80000001
.word 0x0
.word 0x80008008
.word 0x80000000
.text
.type L_sha3_arm2_rt, %object
.size L_sha3_arm2_rt, 192
.align 4
L_sha3_arm2_rt:
.word 0x1
.word 0x0
.word 0x8082
.word 0x0
.word 0x808a
.word 0x80000000
.word 0x80008000
.word 0x80000000
.word 0x808b
.word 0x0
.word 0x80000001
.word 0x0
.word 0x80008081
.word 0x80000000
.word 0x8009
.word 0x80000000
.word 0x8a
.word 0x0
.word 0x88
.word 0x0
.word 0x80008009
.word 0x0
.word 0x8000000a
.word 0x0
.word 0x8000808b
.word 0x0
.word 0x8b
.word 0x80000000
.word 0x8089
.word 0x80000000
.word 0x8003
.word 0x80000000
.word 0x8002
.word 0x80000000
.word 0x80
.word 0x80000000
.word 0x800a
.word 0x0
.word 0x8000000a
.word 0x80000000
.word 0x80008081
.word 0x80000000
.word 0x8080
.word 0x80000000
.word 0x80000001
.word 0x0
.word 0x80008008
.word 0x80000000
#ifndef WOLFSSL_ARMASM_NO_NEON
.text
.align 4
.globl BlockSha3
.type BlockSha3, %function
BlockSha3:
vpush {d8-d15}
sub sp, sp, #16
adr r1, L_sha3_arm2_neon_rt
mov r2, #24
mov r3, sp
vld1.8 {d0-d3}, [r0]!
vld1.8 {d4-d7}, [r0]!
vld1.8 {d8-d11}, [r0]!
vld1.8 {d12-d15}, [r0]!
vld1.8 {d16-d19}, [r0]!
vld1.8 {d20-d23}, [r0]!
vld1.8 {d24}, [r0]
sub r0, r0, #0xc0
L_sha3_arm32_neon_begin:
# Calc b[0..4]
veor d26, d0, d5
veor d27, d1, d6
veor d28, d2, d7
veor d29, d3, d8
veor d25, d4, d9
veor d26, d26, d10
veor d27, d27, d11
veor d28, d28, d12
veor d29, d29, d13
veor d25, d25, d14
veor d26, d26, d15
veor d27, d27, d16
veor d28, d28, d17
veor d29, d29, d18
veor d25, d25, d19
veor d26, d26, d20
veor d27, d27, d21
veor d28, d28, d22
veor d29, d29, d23
veor d25, d25, d24
vst1.8 {d25, d26}, [r3]
# Calc t[0..4] and XOR into s[i*5..i*5+4]
# t[0]
vshr.u64 d30, d27, #63
vshl.u64 d31, d27, #1
veor d25, d25, d30
veor d25, d25, d31
# t[1]
vshr.u64 d30, d28, #63
vshl.u64 d31, d28, #1
veor d26, d26, d30
veor d26, d26, d31
# t[2]
vshr.u64 d30, d29, #63
vshl.u64 d31, d29, #1
veor d27, d27, d30
veor d27, d27, d31
# t[3]
vldr.8 d31, [r3]
vshr.u64 d30, d31, #63
vshl.u64 d31, d31, #1
veor d28, d28, d30
veor d28, d28, d31
# t[4]
vldr.8 d31, [r3, #8]
vshr.u64 d30, d31, #63
vshl.u64 d31, d31, #1
veor d29, d29, d30
veor d29, d29, d31
sub r3, r3, #16
veor d0, d0, d25
# s[1] => s[10] (tmp)
veor d30, d1, d26
vshr.u64 d31, d30, #63
vshl.u64 d30, d30, #1
veor d30, d30, d31
# s[6] => s[1]
veor d1, d6, d26
vshr.u64 d31, d1, #20
vshl.u64 d1, d1, #44
veor d1, d1, d31
# s[9] => s[6]
veor d6, d9, d29
vshr.u64 d31, d6, #44
vshl.u64 d6, d6, #20
veor d6, d6, d31
# s[22] => s[9]
veor d9, d22, d27
vshr.u64 d31, d9, #3
vshl.u64 d9, d9, #61
veor d9, d9, d31
# s[14] => s[22]
veor d22, d14, d29
vshr.u64 d31, d22, #25
vshl.u64 d22, d22, #39
veor d22, d22, d31
# s[20] => s[14]
veor d14, d20, d25
vshr.u64 d31, d14, #46
vshl.u64 d14, d14, #18
veor d14, d14, d31
# s[2] => s[20]
veor d20, d2, d27
vshr.u64 d31, d20, #2
vshl.u64 d20, d20, #62
veor d20, d20, d31
# s[12] => s[2]
veor d2, d12, d27
vshr.u64 d31, d2, #21
vshl.u64 d2, d2, #43
veor d2, d2, d31
# s[13] => s[12]
veor d12, d13, d28
vshr.u64 d31, d12, #39
vshl.u64 d12, d12, #25
veor d12, d12, d31
# s[19] => s[13]
veor d13, d19, d29
vshr.u64 d31, d13, #56
vshl.u64 d13, d13, #8
veor d13, d13, d31
# s[23] => s[19]
veor d19, d23, d28
vshr.u64 d31, d19, #8
vshl.u64 d19, d19, #56
veor d19, d19, d31
# s[15] => s[23]
veor d23, d15, d25
vshr.u64 d31, d23, #23
vshl.u64 d23, d23, #41
veor d23, d23, d31
# s[4] => s[15]
veor d15, d4, d29
vshr.u64 d31, d15, #37
vshl.u64 d15, d15, #27
veor d15, d15, d31
# s[24] => s[4]
veor d4, d24, d29
vshr.u64 d31, d4, #50
vshl.u64 d4, d4, #14
veor d4, d4, d31
# s[21] => s[24]
veor d24, d21, d26
vshr.u64 d31, d24, #62
vshl.u64 d24, d24, #2
veor d24, d24, d31
# s[8] => s[21]
veor d21, d8, d28
vshr.u64 d31, d21, #9
vshl.u64 d21, d21, #55
veor d21, d21, d31
# s[16] => s[8]
veor d8, d16, d26
vshr.u64 d31, d8, #19
vshl.u64 d8, d8, #45
veor d8, d8, d31
# s[5] => s[16]
veor d16, d5, d25
vshr.u64 d31, d16, #28
vshl.u64 d16, d16, #36
veor d16, d16, d31
# s[3] => s[5]
veor d5, d3, d28
vshr.u64 d31, d5, #36
vshl.u64 d5, d5, #28
veor d5, d5, d31
# s[18] => s[3]
veor d3, d18, d28
vshr.u64 d31, d3, #43
vshl.u64 d3, d3, #21
veor d3, d3, d31
# s[17] => s[18]
veor d18, d17, d27
vshr.u64 d31, d18, #49
vshl.u64 d18, d18, #15
veor d18, d18, d31
# s[11] => s[17]
veor d17, d11, d26
vshr.u64 d31, d17, #54
vshl.u64 d17, d17, #10
veor d17, d17, d31
# s[7] => s[11]
veor d11, d7, d27
vshr.u64 d31, d11, #58
vshl.u64 d11, d11, #6
veor d11, d11, d31
# s[10] => s[7]
veor d7, d10, d25
vshr.u64 d31, d7, #61
vshl.u64 d7, d7, #3
veor d7, d7, d31
# Row Mix
vmov d25, d0
vmov d26, d1
vbic d31, d2, d26
veor d0, d25, d31
vbic d31, d3, d2
veor d1, d26, d31
vbic d31, d4, d3
veor d2, d2, d31
vbic d31, d25, d4
veor d3, d3, d31
vbic d31, d26, d25
veor d4, d4, d31
vmov d25, d5
vmov d26, d6
vbic d31, d7, d26
veor d5, d25, d31
vbic d31, d8, d7
veor d6, d26, d31
vbic d31, d9, d8
veor d7, d7, d31
vbic d31, d25, d9
veor d8, d8, d31
vbic d31, d26, d25
veor d9, d9, d31
vmov d26, d11
vbic d31, d12, d26
veor d10, d30, d31
vbic d31, d13, d12
veor d11, d26, d31
vbic d31, d14, d13
veor d12, d12, d31
vbic d31, d30, d14
veor d13, d13, d31
vbic d31, d26, d30
veor d14, d14, d31
vmov d25, d15
vmov d26, d16
vbic d31, d17, d26
veor d15, d25, d31
vbic d31, d18, d17
veor d16, d26, d31
vbic d31, d19, d18
veor d17, d17, d31
vbic d31, d25, d19
veor d18, d18, d31
vbic d31, d26, d25
veor d19, d19, d31
vmov d25, d20
vmov d26, d21
vbic d31, d22, d26
veor d20, d25, d31
vbic d31, d23, d22
veor d21, d26, d31
vbic d31, d24, d23
veor d22, d22, d31
vbic d31, d25, d24
veor d23, d23, d31
vbic d31, d26, d25
veor d24, d24, d31
vld1.8 {d30}, [r1]!
subs r2, r2, #1
veor d0, d0, d30
bne L_sha3_arm32_neon_begin
vst1.8 {d0-d3}, [r0]!
vst1.8 {d4-d7}, [r0]!
vst1.8 {d8-d11}, [r0]!
vst1.8 {d12-d15}, [r0]!
vst1.8 {d16-d19}, [r0]!
vst1.8 {d20-d23}, [r0]!
vst1.8 {d24}, [r0]
add sp, sp, #16
vpop {d8-d15}
bx lr
.size BlockSha3,.-BlockSha3
#endif /* WOLFSSL_ARMASM_NO_NEON */
#ifdef WOLFSSL_ARMASM_NO_NEON
.text
.align 4
.globl BlockSha3
.type BlockSha3, %function
BlockSha3:
push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
sub sp, sp, #0xcc
adr r1, L_sha3_arm2_rt
mov r2, #12
L_sha3_arm32_begin:
str r2, [sp, #200]
# Round even
# Calc b[4]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #32]
ldr r5, [r0, #36]
#else
ldrd r4, r5, [r0, #32]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #72]
ldr r7, [r0, #76]
#else
ldrd r6, r7, [r0, #72]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #112]
ldr r9, [r0, #116]
#else
ldrd r8, r9, [r0, #112]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r0, #152]
ldr r11, [r0, #156]
#else
ldrd r10, r11, [r0, #152]
#endif
ldr r12, [r0, #192]
ldr lr, [r0, #196]
eor r2, r4, r6
eor r3, r5, r7
eor r2, r2, r8
eor r3, r3, r9
eor r2, r2, r10
eor r3, r3, r11
eor r2, r2, r12
eor r3, r3, lr
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r2, [sp, #32]
str r3, [sp, #36]
#else
strd r2, r3, [sp, #32]
#endif
# Calc b[1]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #8]
ldr r5, [r0, #12]
#else
ldrd r4, r5, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #48]
ldr r7, [r0, #52]
#else
ldrd r6, r7, [r0, #48]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #88]
ldr r9, [r0, #92]
#else
ldrd r8, r9, [r0, #88]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r0, #128]
ldr r11, [r0, #132]
#else
ldrd r10, r11, [r0, #128]
#endif
ldr r12, [r0, #168]
ldr lr, [r0, #172]
eor r4, r4, r6
eor r5, r5, r7
eor r4, r4, r8
eor r5, r5, r9
eor r4, r4, r10
eor r5, r5, r11
eor r4, r4, r12
eor r5, r5, lr
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #8]
str r5, [sp, #12]
#else
strd r4, r5, [sp, #8]
#endif
# Calc t[0]
eor r2, r2, r5, lsr #31
eor r3, r3, r4, lsr #31
eor r2, r2, r4, lsl #1
eor r3, r3, r5, lsl #1
# Calc b[0] and XOR t[0] into s[x*5+0]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0]
ldr r5, [r0, #4]
#else
ldrd r4, r5, [r0]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #40]
ldr r7, [r0, #44]
#else
ldrd r6, r7, [r0, #40]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #80]
ldr r9, [r0, #84]
#else
ldrd r8, r9, [r0, #80]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r0, #120]
ldr r11, [r0, #124]
#else
ldrd r10, r11, [r0, #120]
#endif
eor r12, r4, r6
eor lr, r5, r7
eor r12, r12, r8
eor lr, lr, r9
eor r12, r12, r10
eor lr, lr, r11
eor r4, r4, r2
eor r5, r5, r3
eor r6, r6, r2
eor r7, r7, r3
eor r8, r8, r2
eor r9, r9, r3
eor r10, r10, r2
eor r11, r11, r3
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0]
str r5, [r0, #4]
#else
strd r4, r5, [r0]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0, #40]
str r7, [r0, #44]
#else
strd r6, r7, [r0, #40]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r0, #80]
str r9, [r0, #84]
#else
strd r8, r9, [r0, #80]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r10, [r0, #120]
str r11, [r0, #124]
#else
strd r10, r11, [r0, #120]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r0, #160]
ldr r11, [r0, #164]
#else
ldrd r10, r11, [r0, #160]
#endif
eor r12, r12, r10
eor lr, lr, r11
eor r10, r10, r2
eor r11, r11, r3
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r10, [r0, #160]
str r11, [r0, #164]
#else
strd r10, r11, [r0, #160]
#endif
str r12, [sp]
str lr, [sp, #4]
# Calc b[3]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #24]
ldr r5, [r0, #28]
#else
ldrd r4, r5, [r0, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #64]
ldr r7, [r0, #68]
#else
ldrd r6, r7, [r0, #64]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #104]
ldr r9, [r0, #108]
#else
ldrd r8, r9, [r0, #104]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r0, #144]
ldr r11, [r0, #148]
#else
ldrd r10, r11, [r0, #144]
#endif
ldr r12, [r0, #184]
ldr lr, [r0, #188]
eor r4, r4, r6
eor r5, r5, r7
eor r4, r4, r8
eor r5, r5, r9
eor r4, r4, r10
eor r5, r5, r11
eor r4, r4, r12
eor r5, r5, lr
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #24]
str r5, [sp, #28]
#else
strd r4, r5, [sp, #24]
#endif
# Calc t[2]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r2, [sp, #8]
ldr r3, [sp, #12]
#else
ldrd r2, r3, [sp, #8]
#endif
eor r2, r2, r5, lsr #31
eor r3, r3, r4, lsr #31
eor r2, r2, r4, lsl #1
eor r3, r3, r5, lsl #1
# Calc b[2] and XOR t[2] into s[x*5+2]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #16]
ldr r5, [r0, #20]
#else
ldrd r4, r5, [r0, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #56]
ldr r7, [r0, #60]
#else
ldrd r6, r7, [r0, #56]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #96]
ldr r9, [r0, #100]
#else
ldrd r8, r9, [r0, #96]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r0, #136]
ldr r11, [r0, #140]
#else
ldrd r10, r11, [r0, #136]
#endif
eor r12, r4, r6
eor lr, r5, r7
eor r12, r12, r8
eor lr, lr, r9
eor r12, r12, r10
eor lr, lr, r11
eor r4, r4, r2
eor r5, r5, r3
eor r6, r6, r2
eor r7, r7, r3
eor r8, r8, r2
eor r9, r9, r3
eor r10, r10, r2
eor r11, r11, r3
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #16]
str r5, [r0, #20]
#else
strd r4, r5, [r0, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0, #56]
str r7, [r0, #60]
#else
strd r6, r7, [r0, #56]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r0, #96]
str r9, [r0, #100]
#else
strd r8, r9, [r0, #96]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r10, [r0, #136]
str r11, [r0, #140]
#else
strd r10, r11, [r0, #136]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r0, #176]
ldr r11, [r0, #180]
#else
ldrd r10, r11, [r0, #176]
#endif
eor r12, r12, r10
eor lr, lr, r11
eor r10, r10, r2
eor r11, r11, r3
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r10, [r0, #176]
str r11, [r0, #180]
#else
strd r10, r11, [r0, #176]
#endif
str r12, [sp, #16]
str lr, [sp, #20]
# Calc t[1]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r2, [sp]
ldr r3, [sp, #4]
#else
ldrd r2, r3, [sp]
#endif
eor r2, r2, lr, lsr #31
eor r3, r3, r12, lsr #31
eor r2, r2, r12, lsl #1
eor r3, r3, lr, lsl #1
# XOR t[1] into s[x*5+1]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #8]
ldr r5, [r0, #12]
#else
ldrd r4, r5, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #48]
ldr r7, [r0, #52]
#else
ldrd r6, r7, [r0, #48]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #88]
ldr r9, [r0, #92]
#else
ldrd r8, r9, [r0, #88]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r0, #128]
ldr r11, [r0, #132]
#else
ldrd r10, r11, [r0, #128]
#endif
ldr r12, [r0, #168]
ldr lr, [r0, #172]
eor r4, r4, r2
eor r5, r5, r3
eor r6, r6, r2
eor r7, r7, r3
eor r8, r8, r2
eor r9, r9, r3
eor r10, r10, r2
eor r11, r11, r3
eor r12, r12, r2
eor lr, lr, r3
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #8]
str r5, [r0, #12]
#else
strd r4, r5, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0, #48]
str r7, [r0, #52]
#else
strd r6, r7, [r0, #48]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r0, #88]
str r9, [r0, #92]
#else
strd r8, r9, [r0, #88]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r10, [r0, #128]
str r11, [r0, #132]
#else
strd r10, r11, [r0, #128]
#endif
str r12, [r0, #168]
str lr, [r0, #172]
# Calc t[3]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r2, [sp, #16]
ldr r3, [sp, #20]
#else
ldrd r2, r3, [sp, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #32]
ldr r5, [sp, #36]
#else
ldrd r4, r5, [sp, #32]
#endif
eor r2, r2, r5, lsr #31
eor r3, r3, r4, lsr #31
eor r2, r2, r4, lsl #1
eor r3, r3, r5, lsl #1
# XOR t[3] into s[x*5+3]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #24]
ldr r5, [r0, #28]
#else
ldrd r4, r5, [r0, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #64]
ldr r7, [r0, #68]
#else
ldrd r6, r7, [r0, #64]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #104]
ldr r9, [r0, #108]
#else
ldrd r8, r9, [r0, #104]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r0, #144]
ldr r11, [r0, #148]
#else
ldrd r10, r11, [r0, #144]
#endif
ldr r12, [r0, #184]
ldr lr, [r0, #188]
eor r4, r4, r2
eor r5, r5, r3
eor r6, r6, r2
eor r7, r7, r3
eor r8, r8, r2
eor r9, r9, r3
eor r10, r10, r2
eor r11, r11, r3
eor r12, r12, r2
eor lr, lr, r3
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #24]
str r5, [r0, #28]
#else
strd r4, r5, [r0, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0, #64]
str r7, [r0, #68]
#else
strd r6, r7, [r0, #64]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r0, #104]
str r9, [r0, #108]
#else
strd r8, r9, [r0, #104]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r10, [r0, #144]
str r11, [r0, #148]
#else
strd r10, r11, [r0, #144]
#endif
str r12, [r0, #184]
str lr, [r0, #188]
# Calc t[4]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r2, [sp, #24]
ldr r3, [sp, #28]
#else
ldrd r2, r3, [sp, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp]
ldr r5, [sp, #4]
#else
ldrd r4, r5, [sp]
#endif
eor r2, r2, r5, lsr #31
eor r3, r3, r4, lsr #31
eor r2, r2, r4, lsl #1
eor r3, r3, r5, lsl #1
# XOR t[4] into s[x*5+4]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #32]
ldr r5, [r0, #36]
#else
ldrd r4, r5, [r0, #32]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #72]
ldr r7, [r0, #76]
#else
ldrd r6, r7, [r0, #72]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #112]
ldr r9, [r0, #116]
#else
ldrd r8, r9, [r0, #112]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r0, #152]
ldr r11, [r0, #156]
#else
ldrd r10, r11, [r0, #152]
#endif
ldr r12, [r0, #192]
ldr lr, [r0, #196]
eor r4, r4, r2
eor r5, r5, r3
eor r6, r6, r2
eor r7, r7, r3
eor r8, r8, r2
eor r9, r9, r3
eor r10, r10, r2
eor r11, r11, r3
eor r12, r12, r2
eor lr, lr, r3
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #32]
str r5, [r0, #36]
#else
strd r4, r5, [r0, #32]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0, #72]
str r7, [r0, #76]
#else
strd r6, r7, [r0, #72]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r0, #112]
str r9, [r0, #116]
#else
strd r8, r9, [r0, #112]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r10, [r0, #152]
str r11, [r0, #156]
#else
strd r10, r11, [r0, #152]
#endif
str r12, [r0, #192]
str lr, [r0, #196]
# Row Mix
# Row 0
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r2, [r0]
ldr r3, [r0, #4]
#else
ldrd r2, r3, [r0]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #48]
ldr r5, [r0, #52]
#else
ldrd r4, r5, [r0, #48]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #96]
ldr r7, [r0, #100]
#else
ldrd r6, r7, [r0, #96]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #144]
ldr r9, [r0, #148]
#else
ldrd r8, r9, [r0, #144]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r0, #192]
ldr r11, [r0, #196]
#else
ldrd r10, r11, [r0, #192]
#endif
# s[1] <<< 44
mov lr, r4
lsr r12, r5, #20
lsr r4, r4, #20
orr r4, r4, r5, lsl #12
orr r5, r12, lr, lsl #12
# s[2] <<< 43
mov lr, r6
lsr r12, r7, #21
lsr r6, r6, #21
orr r6, r6, r7, lsl #11
orr r7, r12, lr, lsl #11
# s[3] <<< 21
lsr r12, r9, #11
lsr lr, r8, #11
orr r8, r12, r8, lsl #21
orr r9, lr, r9, lsl #21
# s[4] <<< 14
lsr r12, r11, #18
lsr lr, r10, #18
orr r10, r12, r10, lsl #14
orr r11, lr, r11, lsl #14
bic r12, r8, r6
bic lr, r9, r7
eor r12, r12, r4
eor lr, lr, r5
str r12, [sp, #8]
str lr, [sp, #12]
bic r12, r10, r8
bic lr, r11, r9
eor r12, r12, r6
eor lr, lr, r7
str r12, [sp, #16]
str lr, [sp, #20]
bic r12, r2, r10
bic lr, r3, r11
eor r12, r12, r8
eor lr, lr, r9
str r12, [sp, #24]
str lr, [sp, #28]
bic r12, r4, r2
bic lr, r5, r3
eor r12, r12, r10
eor lr, lr, r11
str r12, [sp, #32]
str lr, [sp, #36]
# Get constant
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1]
ldr r11, [r1, #4]
#else
ldrd r10, r11, [r1]
#endif
add r1, r1, #8
bic r12, r6, r4
bic lr, r7, r5
eor r12, r12, r2
eor lr, lr, r3
# XOR in constant
eor r12, r12, r10
eor lr, lr, r11
str r12, [sp]
str lr, [sp, #4]
# Row 1
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r2, [r0, #24]
ldr r3, [r0, #28]
#else
ldrd r2, r3, [r0, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #72]
ldr r5, [r0, #76]
#else
ldrd r4, r5, [r0, #72]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #80]
ldr r7, [r0, #84]
#else
ldrd r6, r7, [r0, #80]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #128]
ldr r9, [r0, #132]
#else
ldrd r8, r9, [r0, #128]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r0, #176]
ldr r11, [r0, #180]
#else
ldrd r10, r11, [r0, #176]
#endif
# s[0] <<< 28
lsr r12, r3, #4
lsr lr, r2, #4
orr r2, r12, r2, lsl #28
orr r3, lr, r3, lsl #28
# s[1] <<< 20
lsr r12, r5, #12
lsr lr, r4, #12
orr r4, r12, r4, lsl #20
orr r5, lr, r5, lsl #20
# s[2] <<< 3
lsr r12, r7, #29
lsr lr, r6, #29
orr r6, r12, r6, lsl #3
orr r7, lr, r7, lsl #3
# s[3] <<< 45
mov lr, r8
lsr r12, r9, #19
lsr r8, r8, #19
orr r8, r8, r9, lsl #13
orr r9, r12, lr, lsl #13
# s[4] <<< 61
mov lr, r10
lsr r12, r11, #3
lsr r10, r10, #3
orr r10, r10, r11, lsl #29
orr r11, r12, lr, lsl #29
bic r12, r8, r6
bic lr, r9, r7
eor r12, r12, r4
eor lr, lr, r5
str r12, [sp, #48]
str lr, [sp, #52]
bic r12, r10, r8
bic lr, r11, r9
eor r12, r12, r6
eor lr, lr, r7
str r12, [sp, #56]
str lr, [sp, #60]
bic r12, r2, r10
bic lr, r3, r11
eor r12, r12, r8
eor lr, lr, r9
str r12, [sp, #64]
str lr, [sp, #68]
bic r12, r4, r2
bic lr, r5, r3
eor r12, r12, r10
eor lr, lr, r11
str r12, [sp, #72]
str lr, [sp, #76]
bic r12, r6, r4
bic lr, r7, r5
eor r12, r12, r2
eor lr, lr, r3
str r12, [sp, #40]
str lr, [sp, #44]
# Row 2
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r2, [r0, #8]
ldr r3, [r0, #12]
#else
ldrd r2, r3, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #56]
ldr r5, [r0, #60]
#else
ldrd r4, r5, [r0, #56]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #104]
ldr r7, [r0, #108]
#else
ldrd r6, r7, [r0, #104]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #152]
ldr r9, [r0, #156]
#else
ldrd r8, r9, [r0, #152]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r0, #160]
ldr r11, [r0, #164]
#else
ldrd r10, r11, [r0, #160]
#endif
# s[0] <<< 1
lsr r12, r3, #31
lsr lr, r2, #31
orr r2, r12, r2, lsl #1
orr r3, lr, r3, lsl #1
# s[1] <<< 6
lsr r12, r5, #26
lsr lr, r4, #26
orr r4, r12, r4, lsl #6
orr r5, lr, r5, lsl #6
# s[2] <<< 25
lsr r12, r7, #7
lsr lr, r6, #7
orr r6, r12, r6, lsl #25
orr r7, lr, r7, lsl #25
# s[3] <<< 8
lsr r12, r9, #24
lsr lr, r8, #24
orr r8, r12, r8, lsl #8
orr r9, lr, r9, lsl #8
# s[4] <<< 18
lsr r12, r11, #14
lsr lr, r10, #14
orr r10, r12, r10, lsl #18
orr r11, lr, r11, lsl #18
bic r12, r8, r6
bic lr, r9, r7
eor r12, r12, r4
eor lr, lr, r5
str r12, [sp, #88]
str lr, [sp, #92]
bic r12, r10, r8
bic lr, r11, r9
eor r12, r12, r6
eor lr, lr, r7
str r12, [sp, #96]
str lr, [sp, #100]
bic r12, r2, r10
bic lr, r3, r11
eor r12, r12, r8
eor lr, lr, r9
str r12, [sp, #104]
str lr, [sp, #108]
bic r12, r4, r2
bic lr, r5, r3
eor r12, r12, r10
eor lr, lr, r11
str r12, [sp, #112]
str lr, [sp, #116]
bic r12, r6, r4
bic lr, r7, r5
eor r12, r12, r2
eor lr, lr, r3
str r12, [sp, #80]
str lr, [sp, #84]
# Row 3
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r2, [r0, #32]
ldr r3, [r0, #36]
#else
ldrd r2, r3, [r0, #32]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #40]
ldr r5, [r0, #44]
#else
ldrd r4, r5, [r0, #40]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #88]
ldr r7, [r0, #92]
#else
ldrd r6, r7, [r0, #88]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #136]
ldr r9, [r0, #140]
#else
ldrd r8, r9, [r0, #136]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r0, #184]
ldr r11, [r0, #188]
#else
ldrd r10, r11, [r0, #184]
#endif
# s[0] <<< 27
lsr r12, r3, #5
lsr lr, r2, #5
orr r2, r12, r2, lsl #27
orr r3, lr, r3, lsl #27
# s[1] <<< 36
mov lr, r4
lsr r12, r5, #28
lsr r4, r4, #28
orr r4, r4, r5, lsl #4
orr r5, r12, lr, lsl #4
# s[2] <<< 10
lsr r12, r7, #22
lsr lr, r6, #22
orr r6, r12, r6, lsl #10
orr r7, lr, r7, lsl #10
# s[3] <<< 15
lsr r12, r9, #17
lsr lr, r8, #17
orr r8, r12, r8, lsl #15
orr r9, lr, r9, lsl #15
# s[4] <<< 56
mov lr, r10
lsr r12, r11, #8
lsr r10, r10, #8
orr r10, r10, r11, lsl #24
orr r11, r12, lr, lsl #24
bic r12, r8, r6
bic lr, r9, r7
eor r12, r12, r4
eor lr, lr, r5
str r12, [sp, #128]
str lr, [sp, #132]
bic r12, r10, r8
bic lr, r11, r9
eor r12, r12, r6
eor lr, lr, r7
str r12, [sp, #136]
str lr, [sp, #140]
bic r12, r2, r10
bic lr, r3, r11
eor r12, r12, r8
eor lr, lr, r9
str r12, [sp, #144]
str lr, [sp, #148]
bic r12, r4, r2
bic lr, r5, r3
eor r12, r12, r10
eor lr, lr, r11
str r12, [sp, #152]
str lr, [sp, #156]
bic r12, r6, r4
bic lr, r7, r5
eor r12, r12, r2
eor lr, lr, r3
str r12, [sp, #120]
str lr, [sp, #124]
# Row 4
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r2, [r0, #16]
ldr r3, [r0, #20]
#else
ldrd r2, r3, [r0, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #64]
ldr r5, [r0, #68]
#else
ldrd r4, r5, [r0, #64]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #112]
ldr r7, [r0, #116]
#else
ldrd r6, r7, [r0, #112]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #120]
ldr r9, [r0, #124]
#else
ldrd r8, r9, [r0, #120]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r0, #168]
ldr r11, [r0, #172]
#else
ldrd r10, r11, [r0, #168]
#endif
# s[0] <<< 62
mov lr, r2
lsr r12, r3, #2
lsr r2, r2, #2
orr r2, r2, r3, lsl #30
orr r3, r12, lr, lsl #30
# s[1] <<< 55
mov lr, r4
lsr r12, r5, #9
lsr r4, r4, #9
orr r4, r4, r5, lsl #23
orr r5, r12, lr, lsl #23
# s[2] <<< 39
mov lr, r6
lsr r12, r7, #25
lsr r6, r6, #25
orr r6, r6, r7, lsl #7
orr r7, r12, lr, lsl #7
# s[3] <<< 41
mov lr, r8
lsr r12, r9, #23
lsr r8, r8, #23
orr r8, r8, r9, lsl #9
orr r9, r12, lr, lsl #9
# s[4] <<< 2
lsr r12, r11, #30
lsr lr, r10, #30
orr r10, r12, r10, lsl #2
orr r11, lr, r11, lsl #2
bic r12, r8, r6
bic lr, r9, r7
eor r12, r12, r4
eor lr, lr, r5
str r12, [sp, #168]
str lr, [sp, #172]
bic r12, r10, r8
bic lr, r11, r9
eor r12, r12, r6
eor lr, lr, r7
str r12, [sp, #176]
str lr, [sp, #180]
bic r12, r2, r10
bic lr, r3, r11
eor r12, r12, r8
eor lr, lr, r9
str r12, [sp, #184]
str lr, [sp, #188]
bic r12, r4, r2
bic lr, r5, r3
eor r12, r12, r10
eor lr, lr, r11
str r12, [sp, #192]
str lr, [sp, #196]
bic r12, r6, r4
bic lr, r7, r5
eor r12, r12, r2
eor lr, lr, r3
str r12, [sp, #160]
str lr, [sp, #164]
# Round odd
# Calc b[4]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #32]
ldr r5, [sp, #36]
#else
ldrd r4, r5, [sp, #32]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [sp, #72]
ldr r7, [sp, #76]
#else
ldrd r6, r7, [sp, #72]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #112]
ldr r9, [sp, #116]
#else
ldrd r8, r9, [sp, #112]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [sp, #152]
ldr r11, [sp, #156]
#else
ldrd r10, r11, [sp, #152]
#endif
ldr r12, [sp, #192]
ldr lr, [sp, #196]
eor r2, r4, r6
eor r3, r5, r7
eor r2, r2, r8
eor r3, r3, r9
eor r2, r2, r10
eor r3, r3, r11
eor r2, r2, r12
eor r3, r3, lr
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r2, [r0, #32]
str r3, [r0, #36]
#else
strd r2, r3, [r0, #32]
#endif
# Calc b[1]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #8]
ldr r5, [sp, #12]
#else
ldrd r4, r5, [sp, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [sp, #48]
ldr r7, [sp, #52]
#else
ldrd r6, r7, [sp, #48]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #88]
ldr r9, [sp, #92]
#else
ldrd r8, r9, [sp, #88]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [sp, #128]
ldr r11, [sp, #132]
#else
ldrd r10, r11, [sp, #128]
#endif
ldr r12, [sp, #168]
ldr lr, [sp, #172]
eor r4, r4, r6
eor r5, r5, r7
eor r4, r4, r8
eor r5, r5, r9
eor r4, r4, r10
eor r5, r5, r11
eor r4, r4, r12
eor r5, r5, lr
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #8]
str r5, [r0, #12]
#else
strd r4, r5, [r0, #8]
#endif
# Calc t[0]
eor r2, r2, r5, lsr #31
eor r3, r3, r4, lsr #31
eor r2, r2, r4, lsl #1
eor r3, r3, r5, lsl #1
# Calc b[0] and XOR t[0] into s[x*5+0]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp]
ldr r5, [sp, #4]
#else
ldrd r4, r5, [sp]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [sp, #40]
ldr r7, [sp, #44]
#else
ldrd r6, r7, [sp, #40]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #80]
ldr r9, [sp, #84]
#else
ldrd r8, r9, [sp, #80]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [sp, #120]
ldr r11, [sp, #124]
#else
ldrd r10, r11, [sp, #120]
#endif
eor r12, r4, r6
eor lr, r5, r7
eor r12, r12, r8
eor lr, lr, r9
eor r12, r12, r10
eor lr, lr, r11
eor r4, r4, r2
eor r5, r5, r3
eor r6, r6, r2
eor r7, r7, r3
eor r8, r8, r2
eor r9, r9, r3
eor r10, r10, r2
eor r11, r11, r3
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp]
str r5, [sp, #4]
#else
strd r4, r5, [sp]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [sp, #40]
str r7, [sp, #44]
#else
strd r6, r7, [sp, #40]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [sp, #80]
str r9, [sp, #84]
#else
strd r8, r9, [sp, #80]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r10, [sp, #120]
str r11, [sp, #124]
#else
strd r10, r11, [sp, #120]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [sp, #160]
ldr r11, [sp, #164]
#else
ldrd r10, r11, [sp, #160]
#endif
eor r12, r12, r10
eor lr, lr, r11
eor r10, r10, r2
eor r11, r11, r3
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r10, [sp, #160]
str r11, [sp, #164]
#else
strd r10, r11, [sp, #160]
#endif
str r12, [r0]
str lr, [r0, #4]
# Calc b[3]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #24]
ldr r5, [sp, #28]
#else
ldrd r4, r5, [sp, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [sp, #64]
ldr r7, [sp, #68]
#else
ldrd r6, r7, [sp, #64]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #104]
ldr r9, [sp, #108]
#else
ldrd r8, r9, [sp, #104]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [sp, #144]
ldr r11, [sp, #148]
#else
ldrd r10, r11, [sp, #144]
#endif
ldr r12, [sp, #184]
ldr lr, [sp, #188]
eor r4, r4, r6
eor r5, r5, r7
eor r4, r4, r8
eor r5, r5, r9
eor r4, r4, r10
eor r5, r5, r11
eor r4, r4, r12
eor r5, r5, lr
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #24]
str r5, [r0, #28]
#else
strd r4, r5, [r0, #24]
#endif
# Calc t[2]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r2, [r0, #8]
ldr r3, [r0, #12]
#else
ldrd r2, r3, [r0, #8]
#endif
eor r2, r2, r5, lsr #31
eor r3, r3, r4, lsr #31
eor r2, r2, r4, lsl #1
eor r3, r3, r5, lsl #1
# Calc b[2] and XOR t[2] into s[x*5+2]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #16]
ldr r5, [sp, #20]
#else
ldrd r4, r5, [sp, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [sp, #56]
ldr r7, [sp, #60]
#else
ldrd r6, r7, [sp, #56]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #96]
ldr r9, [sp, #100]
#else
ldrd r8, r9, [sp, #96]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [sp, #136]
ldr r11, [sp, #140]
#else
ldrd r10, r11, [sp, #136]
#endif
eor r12, r4, r6
eor lr, r5, r7
eor r12, r12, r8
eor lr, lr, r9
eor r12, r12, r10
eor lr, lr, r11
eor r4, r4, r2
eor r5, r5, r3
eor r6, r6, r2
eor r7, r7, r3
eor r8, r8, r2
eor r9, r9, r3
eor r10, r10, r2
eor r11, r11, r3
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #16]
str r5, [sp, #20]
#else
strd r4, r5, [sp, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [sp, #56]
str r7, [sp, #60]
#else
strd r6, r7, [sp, #56]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [sp, #96]
str r9, [sp, #100]
#else
strd r8, r9, [sp, #96]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r10, [sp, #136]
str r11, [sp, #140]
#else
strd r10, r11, [sp, #136]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [sp, #176]
ldr r11, [sp, #180]
#else
ldrd r10, r11, [sp, #176]
#endif
eor r12, r12, r10
eor lr, lr, r11
eor r10, r10, r2
eor r11, r11, r3
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r10, [sp, #176]
str r11, [sp, #180]
#else
strd r10, r11, [sp, #176]
#endif
str r12, [r0, #16]
str lr, [r0, #20]
# Calc t[1]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r2, [r0]
ldr r3, [r0, #4]
#else
ldrd r2, r3, [r0]
#endif
eor r2, r2, lr, lsr #31
eor r3, r3, r12, lsr #31
eor r2, r2, r12, lsl #1
eor r3, r3, lr, lsl #1
# XOR t[1] into s[x*5+1]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #8]
ldr r5, [sp, #12]
#else
ldrd r4, r5, [sp, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [sp, #48]
ldr r7, [sp, #52]
#else
ldrd r6, r7, [sp, #48]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #88]
ldr r9, [sp, #92]
#else
ldrd r8, r9, [sp, #88]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [sp, #128]
ldr r11, [sp, #132]
#else
ldrd r10, r11, [sp, #128]
#endif
ldr r12, [sp, #168]
ldr lr, [sp, #172]
eor r4, r4, r2
eor r5, r5, r3
eor r6, r6, r2
eor r7, r7, r3
eor r8, r8, r2
eor r9, r9, r3
eor r10, r10, r2
eor r11, r11, r3
eor r12, r12, r2
eor lr, lr, r3
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #8]
str r5, [sp, #12]
#else
strd r4, r5, [sp, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [sp, #48]
str r7, [sp, #52]
#else
strd r6, r7, [sp, #48]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [sp, #88]
str r9, [sp, #92]
#else
strd r8, r9, [sp, #88]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r10, [sp, #128]
str r11, [sp, #132]
#else
strd r10, r11, [sp, #128]
#endif
str r12, [sp, #168]
str lr, [sp, #172]
# Calc t[3]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r2, [r0, #16]
ldr r3, [r0, #20]
#else
ldrd r2, r3, [r0, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #32]
ldr r5, [r0, #36]
#else
ldrd r4, r5, [r0, #32]
#endif
eor r2, r2, r5, lsr #31
eor r3, r3, r4, lsr #31
eor r2, r2, r4, lsl #1
eor r3, r3, r5, lsl #1
# XOR t[3] into s[x*5+3]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #24]
ldr r5, [sp, #28]
#else
ldrd r4, r5, [sp, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [sp, #64]
ldr r7, [sp, #68]
#else
ldrd r6, r7, [sp, #64]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #104]
ldr r9, [sp, #108]
#else
ldrd r8, r9, [sp, #104]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [sp, #144]
ldr r11, [sp, #148]
#else
ldrd r10, r11, [sp, #144]
#endif
ldr r12, [sp, #184]
ldr lr, [sp, #188]
eor r4, r4, r2
eor r5, r5, r3
eor r6, r6, r2
eor r7, r7, r3
eor r8, r8, r2
eor r9, r9, r3
eor r10, r10, r2
eor r11, r11, r3
eor r12, r12, r2
eor lr, lr, r3
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #24]
str r5, [sp, #28]
#else
strd r4, r5, [sp, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [sp, #64]
str r7, [sp, #68]
#else
strd r6, r7, [sp, #64]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [sp, #104]
str r9, [sp, #108]
#else
strd r8, r9, [sp, #104]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r10, [sp, #144]
str r11, [sp, #148]
#else
strd r10, r11, [sp, #144]
#endif
str r12, [sp, #184]
str lr, [sp, #188]
# Calc t[4]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r2, [r0, #24]
ldr r3, [r0, #28]
#else
ldrd r2, r3, [r0, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0]
ldr r5, [r0, #4]
#else
ldrd r4, r5, [r0]
#endif
eor r2, r2, r5, lsr #31
eor r3, r3, r4, lsr #31
eor r2, r2, r4, lsl #1
eor r3, r3, r5, lsl #1
# XOR t[4] into s[x*5+4]
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #32]
ldr r5, [sp, #36]
#else
ldrd r4, r5, [sp, #32]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [sp, #72]
ldr r7, [sp, #76]
#else
ldrd r6, r7, [sp, #72]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #112]
ldr r9, [sp, #116]
#else
ldrd r8, r9, [sp, #112]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [sp, #152]
ldr r11, [sp, #156]
#else
ldrd r10, r11, [sp, #152]
#endif
ldr r12, [sp, #192]
ldr lr, [sp, #196]
eor r4, r4, r2
eor r5, r5, r3
eor r6, r6, r2
eor r7, r7, r3
eor r8, r8, r2
eor r9, r9, r3
eor r10, r10, r2
eor r11, r11, r3
eor r12, r12, r2
eor lr, lr, r3
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #32]
str r5, [sp, #36]
#else
strd r4, r5, [sp, #32]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [sp, #72]
str r7, [sp, #76]
#else
strd r6, r7, [sp, #72]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [sp, #112]
str r9, [sp, #116]
#else
strd r8, r9, [sp, #112]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r10, [sp, #152]
str r11, [sp, #156]
#else
strd r10, r11, [sp, #152]
#endif
str r12, [sp, #192]
str lr, [sp, #196]
# Row Mix
# Row 0
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r2, [sp]
ldr r3, [sp, #4]
#else
ldrd r2, r3, [sp]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #48]
ldr r5, [sp, #52]
#else
ldrd r4, r5, [sp, #48]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [sp, #96]
ldr r7, [sp, #100]
#else
ldrd r6, r7, [sp, #96]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #144]
ldr r9, [sp, #148]
#else
ldrd r8, r9, [sp, #144]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [sp, #192]
ldr r11, [sp, #196]
#else
ldrd r10, r11, [sp, #192]
#endif
# s[1] <<< 44
mov lr, r4
lsr r12, r5, #20
lsr r4, r4, #20
orr r4, r4, r5, lsl #12
orr r5, r12, lr, lsl #12
# s[2] <<< 43
mov lr, r6
lsr r12, r7, #21
lsr r6, r6, #21
orr r6, r6, r7, lsl #11
orr r7, r12, lr, lsl #11
# s[3] <<< 21
lsr r12, r9, #11
lsr lr, r8, #11
orr r8, r12, r8, lsl #21
orr r9, lr, r9, lsl #21
# s[4] <<< 14
lsr r12, r11, #18
lsr lr, r10, #18
orr r10, r12, r10, lsl #14
orr r11, lr, r11, lsl #14
bic r12, r8, r6
bic lr, r9, r7
eor r12, r12, r4
eor lr, lr, r5
str r12, [r0, #8]
str lr, [r0, #12]
bic r12, r10, r8
bic lr, r11, r9
eor r12, r12, r6
eor lr, lr, r7
str r12, [r0, #16]
str lr, [r0, #20]
bic r12, r2, r10
bic lr, r3, r11
eor r12, r12, r8
eor lr, lr, r9
str r12, [r0, #24]
str lr, [r0, #28]
bic r12, r4, r2
bic lr, r5, r3
eor r12, r12, r10
eor lr, lr, r11
str r12, [r0, #32]
str lr, [r0, #36]
# Get constant
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r1]
ldr r11, [r1, #4]
#else
ldrd r10, r11, [r1]
#endif
add r1, r1, #8
bic r12, r6, r4
bic lr, r7, r5
eor r12, r12, r2
eor lr, lr, r3
# XOR in constant
eor r12, r12, r10
eor lr, lr, r11
str r12, [r0]
str lr, [r0, #4]
# Row 1
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r2, [sp, #24]
ldr r3, [sp, #28]
#else
ldrd r2, r3, [sp, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #72]
ldr r5, [sp, #76]
#else
ldrd r4, r5, [sp, #72]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [sp, #80]
ldr r7, [sp, #84]
#else
ldrd r6, r7, [sp, #80]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #128]
ldr r9, [sp, #132]
#else
ldrd r8, r9, [sp, #128]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [sp, #176]
ldr r11, [sp, #180]
#else
ldrd r10, r11, [sp, #176]
#endif
# s[0] <<< 28
lsr r12, r3, #4
lsr lr, r2, #4
orr r2, r12, r2, lsl #28
orr r3, lr, r3, lsl #28
# s[1] <<< 20
lsr r12, r5, #12
lsr lr, r4, #12
orr r4, r12, r4, lsl #20
orr r5, lr, r5, lsl #20
# s[2] <<< 3
lsr r12, r7, #29
lsr lr, r6, #29
orr r6, r12, r6, lsl #3
orr r7, lr, r7, lsl #3
# s[3] <<< 45
mov lr, r8
lsr r12, r9, #19
lsr r8, r8, #19
orr r8, r8, r9, lsl #13
orr r9, r12, lr, lsl #13
# s[4] <<< 61
mov lr, r10
lsr r12, r11, #3
lsr r10, r10, #3
orr r10, r10, r11, lsl #29
orr r11, r12, lr, lsl #29
bic r12, r8, r6
bic lr, r9, r7
eor r12, r12, r4
eor lr, lr, r5
str r12, [r0, #48]
str lr, [r0, #52]
bic r12, r10, r8
bic lr, r11, r9
eor r12, r12, r6
eor lr, lr, r7
str r12, [r0, #56]
str lr, [r0, #60]
bic r12, r2, r10
bic lr, r3, r11
eor r12, r12, r8
eor lr, lr, r9
str r12, [r0, #64]
str lr, [r0, #68]
bic r12, r4, r2
bic lr, r5, r3
eor r12, r12, r10
eor lr, lr, r11
str r12, [r0, #72]
str lr, [r0, #76]
bic r12, r6, r4
bic lr, r7, r5
eor r12, r12, r2
eor lr, lr, r3
str r12, [r0, #40]
str lr, [r0, #44]
# Row 2
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r2, [sp, #8]
ldr r3, [sp, #12]
#else
ldrd r2, r3, [sp, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #56]
ldr r5, [sp, #60]
#else
ldrd r4, r5, [sp, #56]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [sp, #104]
ldr r7, [sp, #108]
#else
ldrd r6, r7, [sp, #104]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #152]
ldr r9, [sp, #156]
#else
ldrd r8, r9, [sp, #152]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [sp, #160]
ldr r11, [sp, #164]
#else
ldrd r10, r11, [sp, #160]
#endif
# s[0] <<< 1
lsr r12, r3, #31
lsr lr, r2, #31
orr r2, r12, r2, lsl #1
orr r3, lr, r3, lsl #1
# s[1] <<< 6
lsr r12, r5, #26
lsr lr, r4, #26
orr r4, r12, r4, lsl #6
orr r5, lr, r5, lsl #6
# s[2] <<< 25
lsr r12, r7, #7
lsr lr, r6, #7
orr r6, r12, r6, lsl #25
orr r7, lr, r7, lsl #25
# s[3] <<< 8
lsr r12, r9, #24
lsr lr, r8, #24
orr r8, r12, r8, lsl #8
orr r9, lr, r9, lsl #8
# s[4] <<< 18
lsr r12, r11, #14
lsr lr, r10, #14
orr r10, r12, r10, lsl #18
orr r11, lr, r11, lsl #18
bic r12, r8, r6
bic lr, r9, r7
eor r12, r12, r4
eor lr, lr, r5
str r12, [r0, #88]
str lr, [r0, #92]
bic r12, r10, r8
bic lr, r11, r9
eor r12, r12, r6
eor lr, lr, r7
str r12, [r0, #96]
str lr, [r0, #100]
bic r12, r2, r10
bic lr, r3, r11
eor r12, r12, r8
eor lr, lr, r9
str r12, [r0, #104]
str lr, [r0, #108]
bic r12, r4, r2
bic lr, r5, r3
eor r12, r12, r10
eor lr, lr, r11
str r12, [r0, #112]
str lr, [r0, #116]
bic r12, r6, r4
bic lr, r7, r5
eor r12, r12, r2
eor lr, lr, r3
str r12, [r0, #80]
str lr, [r0, #84]
# Row 3
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r2, [sp, #32]
ldr r3, [sp, #36]
#else
ldrd r2, r3, [sp, #32]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #40]
ldr r5, [sp, #44]
#else
ldrd r4, r5, [sp, #40]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [sp, #88]
ldr r7, [sp, #92]
#else
ldrd r6, r7, [sp, #88]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #136]
ldr r9, [sp, #140]
#else
ldrd r8, r9, [sp, #136]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [sp, #184]
ldr r11, [sp, #188]
#else
ldrd r10, r11, [sp, #184]
#endif
# s[0] <<< 27
lsr r12, r3, #5
lsr lr, r2, #5
orr r2, r12, r2, lsl #27
orr r3, lr, r3, lsl #27
# s[1] <<< 36
mov lr, r4
lsr r12, r5, #28
lsr r4, r4, #28
orr r4, r4, r5, lsl #4
orr r5, r12, lr, lsl #4
# s[2] <<< 10
lsr r12, r7, #22
lsr lr, r6, #22
orr r6, r12, r6, lsl #10
orr r7, lr, r7, lsl #10
# s[3] <<< 15
lsr r12, r9, #17
lsr lr, r8, #17
orr r8, r12, r8, lsl #15
orr r9, lr, r9, lsl #15
# s[4] <<< 56
mov lr, r10
lsr r12, r11, #8
lsr r10, r10, #8
orr r10, r10, r11, lsl #24
orr r11, r12, lr, lsl #24
bic r12, r8, r6
bic lr, r9, r7
eor r12, r12, r4
eor lr, lr, r5
str r12, [r0, #128]
str lr, [r0, #132]
bic r12, r10, r8
bic lr, r11, r9
eor r12, r12, r6
eor lr, lr, r7
str r12, [r0, #136]
str lr, [r0, #140]
bic r12, r2, r10
bic lr, r3, r11
eor r12, r12, r8
eor lr, lr, r9
str r12, [r0, #144]
str lr, [r0, #148]
bic r12, r4, r2
bic lr, r5, r3
eor r12, r12, r10
eor lr, lr, r11
str r12, [r0, #152]
str lr, [r0, #156]
bic r12, r6, r4
bic lr, r7, r5
eor r12, r12, r2
eor lr, lr, r3
str r12, [r0, #120]
str lr, [r0, #124]
# Row 4
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r2, [sp, #16]
ldr r3, [sp, #20]
#else
ldrd r2, r3, [sp, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [sp, #64]
ldr r5, [sp, #68]
#else
ldrd r4, r5, [sp, #64]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [sp, #112]
ldr r7, [sp, #116]
#else
ldrd r6, r7, [sp, #112]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #120]
ldr r9, [sp, #124]
#else
ldrd r8, r9, [sp, #120]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [sp, #168]
ldr r11, [sp, #172]
#else
ldrd r10, r11, [sp, #168]
#endif
# s[0] <<< 62
mov lr, r2
lsr r12, r3, #2
lsr r2, r2, #2
orr r2, r2, r3, lsl #30
orr r3, r12, lr, lsl #30
# s[1] <<< 55
mov lr, r4
lsr r12, r5, #9
lsr r4, r4, #9
orr r4, r4, r5, lsl #23
orr r5, r12, lr, lsl #23
# s[2] <<< 39
mov lr, r6
lsr r12, r7, #25
lsr r6, r6, #25
orr r6, r6, r7, lsl #7
orr r7, r12, lr, lsl #7
# s[3] <<< 41
mov lr, r8
lsr r12, r9, #23
lsr r8, r8, #23
orr r8, r8, r9, lsl #9
orr r9, r12, lr, lsl #9
# s[4] <<< 2
lsr r12, r11, #30
lsr lr, r10, #30
orr r10, r12, r10, lsl #2
orr r11, lr, r11, lsl #2
bic r12, r8, r6
bic lr, r9, r7
eor r12, r12, r4
eor lr, lr, r5
str r12, [r0, #168]
str lr, [r0, #172]
bic r12, r10, r8
bic lr, r11, r9
eor r12, r12, r6
eor lr, lr, r7
str r12, [r0, #176]
str lr, [r0, #180]
bic r12, r2, r10
bic lr, r3, r11
eor r12, r12, r8
eor lr, lr, r9
str r12, [r0, #184]
str lr, [r0, #188]
bic r12, r4, r2
bic lr, r5, r3
eor r12, r12, r10
eor lr, lr, r11
str r12, [r0, #192]
str lr, [r0, #196]
bic r12, r6, r4
bic lr, r7, r5
eor r12, r12, r2
eor lr, lr, r3
str r12, [r0, #160]
str lr, [r0, #164]
ldr r2, [sp, #200]
subs r2, r2, #1
bne L_sha3_arm32_begin
add sp, sp, #0xcc
pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.size BlockSha3,.-BlockSha3
#endif /* WOLFSSL_ARMASM_NO_NEON */
#endif /* !__aarch64__ && __arm__ && !__thumb__ */
#endif /* WOLFSSL_ARMASM */
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
#endif /* !WOLFSSL_ARMASM_INLINE */
|
aenu1/aps3e
| 127,454
|
app/src/main/cpp/rpcs3/3rdparty/wolfssl/wolfssl/wolfcrypt/src/port/arm/thumb2-curve25519.S
|
/* thumb2-curve25519
*
* Copyright (C) 2006-2023 wolfSSL Inc.
*
* This file is part of wolfSSL.
*
* wolfSSL is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* wolfSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
*/
/* Generated using (from wolfssl):
* cd ../scripts
* ruby ./x25519/x25519.rb thumb2 ../wolfssl/wolfcrypt/src/port/arm/thumb2-curve25519.S
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif /* HAVE_CONFIG_H */
#include <wolfssl/wolfcrypt/settings.h>
#ifdef WOLFSSL_ARMASM
#if !defined(__aarch64__) && defined(__thumb__)
#ifndef WOLFSSL_ARMASM_INLINE
.thumb
.syntax unified
#if defined(HAVE_CURVE25519) || defined(HAVE_ED25519)
#if !defined(CURVE25519_SMALL) || !defined(ED25519_SMALL)
.text
.align 4
.globl fe_init
.type fe_init, %function
fe_init:
BX lr
/* Cycle Count = 4 */
.size fe_init,.-fe_init
.text
.align 4
.globl fe_add_sub_op
.type fe_add_sub_op, %function
fe_add_sub_op:
PUSH {lr}
/* Add-Sub */
LDRD r4, r5, [r2]
LDRD r6, r7, [r3]
/* Add */
ADDS r8, r4, r6
MOV r12, #0x0
ADCS r9, r5, r7
ADC r12, r12, #0x0
STRD r8, r9, [r0]
/* Sub */
SUBS r10, r4, r6
SBCS r11, r5, r7
STRD r10, r11, [r1]
LDRD r4, r5, [r2, #8]
LDRD r6, r7, [r3, #8]
/* Sub */
SBCS r10, r4, r6
MOV lr, #0x0
SBCS r11, r5, r7
ADC lr, lr, #0x0
STRD r10, r11, [r1, #8]
/* Add */
SUBS r12, r12, #0x1
ADCS r8, r4, r6
ADCS r9, r5, r7
STRD r8, r9, [r0, #8]
LDRD r4, r5, [r2, #16]
LDRD r6, r7, [r3, #16]
/* Add */
ADCS r8, r4, r6
MOV r12, #0x0
ADCS r9, r5, r7
ADC r12, r12, #0x0
STRD r8, r9, [r0, #16]
/* Sub */
SUBS lr, lr, #0x1
SBCS r10, r4, r6
SBCS r11, r5, r7
STRD r10, r11, [r1, #16]
LDRD r4, r5, [r2, #24]
LDRD r6, r7, [r3, #24]
/* Sub */
SBCS r10, r4, r6
SBC r11, r5, r7
/* Add */
SUBS r12, r12, #0x1
ADCS r8, r4, r6
MOV r12, #0x0
ADCS r9, r5, r7
ADC r12, r12, #0x0
/* Multiply -modulus by overflow */
LSL r3, r12, #1
MOV r12, #0x13
ORR r3, r3, r9, LSR #31
MUL r12, r3, r12
/* Add -x*modulus (if overflow) */
LDRD r4, r5, [r0]
LDRD r6, r7, [r0, #8]
ADDS r4, r4, r12
ADCS r5, r5, #0x0
ADCS r6, r6, #0x0
ADCS r7, r7, #0x0
STRD r4, r5, [r0]
STRD r6, r7, [r0, #8]
LDRD r4, r5, [r0, #16]
ADCS r4, r4, #0x0
ADCS r5, r5, #0x0
STRD r4, r5, [r0, #16]
BFC r9, #31, #1
ADCS r8, r8, #0x0
ADC r9, r9, #0x0
STRD r8, r9, [r0, #24]
/* Add -modulus on underflow */
MOV lr, #0x13
AND lr, lr, r11, ASR #31
LDM r1, {r4, r5, r6, r7, r8, r9}
SUBS r4, r4, lr
SBCS r5, r5, #0x0
SBCS r6, r6, #0x0
SBCS r7, r7, #0x0
SBCS r8, r8, #0x0
SBCS r9, r9, #0x0
BFC r11, #31, #1
SBCS r10, r10, #0x0
SBC r11, r11, #0x0
STM r1, {r4, r5, r6, r7, r8, r9, r10, r11}
/* Done Add-Sub */
POP {pc}
/* Cycle Count = 134 */
.size fe_add_sub_op,.-fe_add_sub_op
.text
.align 4
.globl fe_sub_op
.type fe_sub_op, %function
fe_sub_op:
PUSH {lr}
/* Sub */
LDM r2!, {r6, r7, r8, r9, r10, r11, r12, lr}
LDM r1!, {r2, r3, r4, r5}
SUBS r6, r2, r6
SBCS r7, r3, r7
SBCS r8, r4, r8
SBCS r9, r5, r9
LDM r1!, {r2, r3, r4, r5}
SBCS r10, r2, r10
SBCS r11, r3, r11
SBCS r12, r4, r12
SBC lr, r5, lr
MOV r2, #0x13
AND r2, r2, lr, ASR #31
SUBS r6, r6, r2
SBCS r7, r7, #0x0
SBCS r8, r8, #0x0
SBCS r9, r9, #0x0
SBCS r10, r10, #0x0
SBCS r11, r11, #0x0
BFC lr, #31, #1
SBCS r12, r12, #0x0
SBC lr, lr, #0x0
STM r0, {r6, r7, r8, r9, r10, r11, r12, lr}
/* Done Sub */
POP {pc}
/* Cycle Count = 51 */
.size fe_sub_op,.-fe_sub_op
.text
.align 4
.globl fe_sub
.type fe_sub, %function
fe_sub:
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr}
BL fe_sub_op
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
/* Cycle Count = 24 */
.size fe_sub,.-fe_sub
.text
.align 4
.globl fe_add_op
.type fe_add_op, %function
fe_add_op:
PUSH {lr}
/* Add */
LDM r2!, {r6, r7, r8, r9, r10, r11, r12, lr}
LDM r1!, {r2, r3, r4, r5}
ADDS r6, r2, r6
ADCS r7, r3, r7
ADCS r8, r4, r8
ADCS r9, r5, r9
LDM r1!, {r2, r3, r4, r5}
ADCS r10, r2, r10
ADCS r11, r3, r11
ADCS r12, r4, r12
ADC lr, r5, lr
MOV r2, #0x13
AND r2, r2, lr, ASR #31
ADDS r6, r6, r2
ADCS r7, r7, #0x0
ADCS r8, r8, #0x0
ADCS r9, r9, #0x0
ADCS r10, r10, #0x0
ADCS r11, r11, #0x0
BFC lr, #31, #1
ADCS r12, r12, #0x0
ADC lr, lr, #0x0
STM r0, {r6, r7, r8, r9, r10, r11, r12, lr}
/* Done Add */
POP {pc}
/* Cycle Count = 51 */
.size fe_add_op,.-fe_add_op
.text
.align 4
.globl fe_add
.type fe_add, %function
fe_add:
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr}
BL fe_add_op
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
/* Cycle Count = 24 */
.size fe_add,.-fe_add
#ifdef HAVE_ED25519
.text
.align 4
.globl fe_frombytes
.type fe_frombytes, %function
fe_frombytes:
PUSH {r4, r5, r6, r7, r8, r9, lr}
LDR r2, [r1]
LDR r3, [r1, #4]
LDR r4, [r1, #8]
LDR r5, [r1, #12]
LDR r6, [r1, #16]
LDR r7, [r1, #20]
LDR r8, [r1, #24]
LDR r9, [r1, #28]
BFC r9, #31, #1
STR r2, [r0]
STR r3, [r0, #4]
STR r4, [r0, #8]
STR r5, [r0, #12]
STR r6, [r0, #16]
STR r7, [r0, #20]
STR r8, [r0, #24]
STR r9, [r0, #28]
POP {r4, r5, r6, r7, r8, r9, pc}
/* Cycle Count = 49 */
.size fe_frombytes,.-fe_frombytes
.text
.align 4
.globl fe_tobytes
.type fe_tobytes, %function
fe_tobytes:
PUSH {r4, r5, r6, r7, r8, r9, r10, lr}
LDM r1, {r2, r3, r4, r5, r6, r7, r8, r9}
ADDS r10, r2, #0x13
ADCS r10, r3, #0x0
ADCS r10, r4, #0x0
ADCS r10, r5, #0x0
ADCS r10, r6, #0x0
ADCS r10, r7, #0x0
ADCS r10, r8, #0x0
ADC r10, r9, #0x0
ASR r10, r10, #31
AND r10, r10, #0x13
ADDS r2, r2, r10
ADCS r3, r3, #0x0
ADCS r4, r4, #0x0
ADCS r5, r5, #0x0
ADCS r6, r6, #0x0
ADCS r7, r7, #0x0
ADCS r8, r8, #0x0
ADC r9, r9, #0x0
BFC r9, #31, #1
STR r2, [r0]
STR r3, [r0, #4]
STR r4, [r0, #8]
STR r5, [r0, #12]
STR r6, [r0, #16]
STR r7, [r0, #20]
STR r8, [r0, #24]
STR r9, [r0, #28]
POP {r4, r5, r6, r7, r8, r9, r10, pc}
/* Cycle Count = 62 */
.size fe_tobytes,.-fe_tobytes
.text
.align 4
.globl fe_1
.type fe_1, %function
fe_1:
PUSH {r4, r5, r6, r7, r8, r9, lr}
/* Set one */
MOV r2, #0x1
MOV r3, #0x0
MOV r4, #0x0
MOV r5, #0x0
MOV r6, #0x0
MOV r7, #0x0
MOV r8, #0x0
MOV r9, #0x0
STM r0, {r2, r3, r4, r5, r6, r7, r8, r9}
POP {r4, r5, r6, r7, r8, r9, pc}
/* Cycle Count = 33 */
.size fe_1,.-fe_1
.text
.align 4
.globl fe_0
.type fe_0, %function
fe_0:
PUSH {r4, r5, r6, r7, r8, r9, lr}
/* Set zero */
MOV r2, #0x0
MOV r3, #0x0
MOV r4, #0x0
MOV r5, #0x0
MOV r6, #0x0
MOV r7, #0x0
MOV r8, #0x0
MOV r9, #0x0
STM r0, {r2, r3, r4, r5, r6, r7, r8, r9}
POP {r4, r5, r6, r7, r8, r9, pc}
/* Cycle Count = 33 */
.size fe_0,.-fe_0
.text
.align 4
.globl fe_copy
.type fe_copy, %function
fe_copy:
PUSH {r4, r5, lr}
/* Copy */
LDRD r2, r3, [r1]
LDRD r4, r5, [r1, #8]
STRD r2, r3, [r0]
STRD r4, r5, [r0, #8]
LDRD r2, r3, [r1, #16]
LDRD r4, r5, [r1, #24]
STRD r2, r3, [r0, #16]
STRD r4, r5, [r0, #24]
POP {r4, r5, pc}
/* Cycle Count = 32 */
.size fe_copy,.-fe_copy
.text
.align 4
.globl fe_neg
.type fe_neg, %function
fe_neg:
PUSH {r4, r5, r6, r7, lr}
MVN r7, #0x0
MVN r6, #0x12
LDM r1!, {r2, r3, r4, r5}
SUBS r2, r6, r2
SBCS r3, r7, r3
SBCS r4, r7, r4
SBCS r5, r7, r5
STM r0!, {r2, r3, r4, r5}
MVN r6, #0x80000000
LDM r1!, {r2, r3, r4, r5}
SBCS r2, r7, r2
SBCS r3, r7, r3
SBCS r4, r7, r4
SBC r5, r6, r5
STM r0!, {r2, r3, r4, r5}
POP {r4, r5, r6, r7, pc}
/* Cycle Count = 43 */
.size fe_neg,.-fe_neg
.text
.align 4
.globl fe_isnonzero
.type fe_isnonzero, %function
fe_isnonzero:
PUSH {r4, r5, r6, r7, r8, r9, r10, lr}
LDM r0, {r2, r3, r4, r5, r6, r7, r8, r9}
ADDS r1, r2, #0x13
ADCS r1, r3, #0x0
ADCS r1, r4, #0x0
ADCS r1, r5, #0x0
ADCS r1, r6, #0x0
ADCS r1, r7, #0x0
ADCS r1, r8, #0x0
ADC r1, r9, #0x0
ASR r1, r1, #31
AND r1, r1, #0x13
ADDS r2, r2, r1
ADCS r3, r3, #0x0
ADCS r4, r4, #0x0
ADCS r5, r5, #0x0
ADCS r6, r6, #0x0
ADCS r7, r7, #0x0
ADCS r8, r8, #0x0
ADC r9, r9, #0x0
BFC r9, #31, #1
ORR r2, r2, r3
ORR r4, r4, r5
ORR r6, r6, r7
ORR r8, r8, r9
ORR r4, r4, r6
ORR r2, r2, r8
ORR r0, r2, r4
POP {r4, r5, r6, r7, r8, r9, r10, pc}
/* Cycle Count = 53 */
.size fe_isnonzero,.-fe_isnonzero
.text
.align 4
.globl fe_isnegative
.type fe_isnegative, %function
fe_isnegative:
PUSH {r4, r5, lr}
LDM r0!, {r2, r3, r4, r5}
ADDS r1, r2, #0x13
ADCS r1, r3, #0x0
ADCS r1, r4, #0x0
ADCS r1, r5, #0x0
LDM r0, {r2, r3, r4, r5}
ADCS r1, r2, #0x0
ADCS r1, r3, #0x0
ADCS r1, r4, #0x0
LDR r2, [r0, #-16]
ADC r1, r5, #0x0
AND r0, r2, #0x1
LSR r1, r1, #31
EOR r0, r0, r1
POP {r4, r5, pc}
/* Cycle Count = 31 */
.size fe_isnegative,.-fe_isnegative
#if defined(HAVE_ED25519_MAKE_KEY) || defined(HAVE_ED25519_SIGN)
#ifndef WC_NO_CACHE_RESISTANT
.text
.align 4
.globl fe_cmov_table
.type fe_cmov_table, %function
fe_cmov_table:
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr}
SXTB r2, r2
SBFX r3, r2, #7, #1
EOR r12, r2, r3
SUB r12, r12, r3
MOV r4, #0x1
MOV r5, #0x0
MOV r6, #0x1
MOV r7, #0x0
MOV r8, #0x0
MOV r9, #0x0
MOV r3, #0x80000000
ROR r3, r3, #31
ROR r3, r3, r12
ASR r3, r3, #31
LDRD r10, r11, [r1]
EOR r10, r10, r4
EOR r11, r11, r5
AND r10, r10, r3
AND r11, r11, r3
EOR r4, r4, r10
EOR r5, r5, r11
LDRD r10, r11, [r1, #32]
EOR r10, r10, r6
EOR r11, r11, r7
AND r10, r10, r3
AND r11, r11, r3
EOR r6, r6, r10
EOR r7, r7, r11
LDRD r10, r11, [r1, #64]
EOR r10, r10, r8
EOR r11, r11, r9
AND r10, r10, r3
AND r11, r11, r3
EOR r8, r8, r10
EOR r9, r9, r11
ADD r1, r1, #0x60
MOV r3, #0x80000000
ROR r3, r3, #30
ROR r3, r3, r12
ASR r3, r3, #31
LDRD r10, r11, [r1]
EOR r10, r10, r4
EOR r11, r11, r5
AND r10, r10, r3
AND r11, r11, r3
EOR r4, r4, r10
EOR r5, r5, r11
LDRD r10, r11, [r1, #32]
EOR r10, r10, r6
EOR r11, r11, r7
AND r10, r10, r3
AND r11, r11, r3
EOR r6, r6, r10
EOR r7, r7, r11
LDRD r10, r11, [r1, #64]
EOR r10, r10, r8
EOR r11, r11, r9
AND r10, r10, r3
AND r11, r11, r3
EOR r8, r8, r10
EOR r9, r9, r11
ADD r1, r1, #0x60
MOV r3, #0x80000000
ROR r3, r3, #29
ROR r3, r3, r12
ASR r3, r3, #31
LDRD r10, r11, [r1]
EOR r10, r10, r4
EOR r11, r11, r5
AND r10, r10, r3
AND r11, r11, r3
EOR r4, r4, r10
EOR r5, r5, r11
LDRD r10, r11, [r1, #32]
EOR r10, r10, r6
EOR r11, r11, r7
AND r10, r10, r3
AND r11, r11, r3
EOR r6, r6, r10
EOR r7, r7, r11
LDRD r10, r11, [r1, #64]
EOR r10, r10, r8
EOR r11, r11, r9
AND r10, r10, r3
AND r11, r11, r3
EOR r8, r8, r10
EOR r9, r9, r11
ADD r1, r1, #0x60
MOV r3, #0x80000000
ROR r3, r3, #28
ROR r3, r3, r12
ASR r3, r3, #31
LDRD r10, r11, [r1]
EOR r10, r10, r4
EOR r11, r11, r5
AND r10, r10, r3
AND r11, r11, r3
EOR r4, r4, r10
EOR r5, r5, r11
LDRD r10, r11, [r1, #32]
EOR r10, r10, r6
EOR r11, r11, r7
AND r10, r10, r3
AND r11, r11, r3
EOR r6, r6, r10
EOR r7, r7, r11
LDRD r10, r11, [r1, #64]
EOR r10, r10, r8
EOR r11, r11, r9
AND r10, r10, r3
AND r11, r11, r3
EOR r8, r8, r10
EOR r9, r9, r11
ADD r1, r1, #0x60
MOV r3, #0x80000000
ROR r3, r3, #27
ROR r3, r3, r12
ASR r3, r3, #31
LDRD r10, r11, [r1]
EOR r10, r10, r4
EOR r11, r11, r5
AND r10, r10, r3
AND r11, r11, r3
EOR r4, r4, r10
EOR r5, r5, r11
LDRD r10, r11, [r1, #32]
EOR r10, r10, r6
EOR r11, r11, r7
AND r10, r10, r3
AND r11, r11, r3
EOR r6, r6, r10
EOR r7, r7, r11
LDRD r10, r11, [r1, #64]
EOR r10, r10, r8
EOR r11, r11, r9
AND r10, r10, r3
AND r11, r11, r3
EOR r8, r8, r10
EOR r9, r9, r11
ADD r1, r1, #0x60
MOV r3, #0x80000000
ROR r3, r3, #26
ROR r3, r3, r12
ASR r3, r3, #31
LDRD r10, r11, [r1]
EOR r10, r10, r4
EOR r11, r11, r5
AND r10, r10, r3
AND r11, r11, r3
EOR r4, r4, r10
EOR r5, r5, r11
LDRD r10, r11, [r1, #32]
EOR r10, r10, r6
EOR r11, r11, r7
AND r10, r10, r3
AND r11, r11, r3
EOR r6, r6, r10
EOR r7, r7, r11
LDRD r10, r11, [r1, #64]
EOR r10, r10, r8
EOR r11, r11, r9
AND r10, r10, r3
AND r11, r11, r3
EOR r8, r8, r10
EOR r9, r9, r11
ADD r1, r1, #0x60
MOV r3, #0x80000000
ROR r3, r3, #25
ROR r3, r3, r12
ASR r3, r3, #31
LDRD r10, r11, [r1]
EOR r10, r10, r4
EOR r11, r11, r5
AND r10, r10, r3
AND r11, r11, r3
EOR r4, r4, r10
EOR r5, r5, r11
LDRD r10, r11, [r1, #32]
EOR r10, r10, r6
EOR r11, r11, r7
AND r10, r10, r3
AND r11, r11, r3
EOR r6, r6, r10
EOR r7, r7, r11
LDRD r10, r11, [r1, #64]
EOR r10, r10, r8
EOR r11, r11, r9
AND r10, r10, r3
AND r11, r11, r3
EOR r8, r8, r10
EOR r9, r9, r11
ADD r1, r1, #0x60
MOV r3, #0x80000000
ROR r3, r3, #24
ROR r3, r3, r12
ASR r3, r3, #31
LDRD r10, r11, [r1]
EOR r10, r10, r4
EOR r11, r11, r5
AND r10, r10, r3
AND r11, r11, r3
EOR r4, r4, r10
EOR r5, r5, r11
LDRD r10, r11, [r1, #32]
EOR r10, r10, r6
EOR r11, r11, r7
AND r10, r10, r3
AND r11, r11, r3
EOR r6, r6, r10
EOR r7, r7, r11
LDRD r10, r11, [r1, #64]
EOR r10, r10, r8
EOR r11, r11, r9
AND r10, r10, r3
AND r11, r11, r3
EOR r8, r8, r10
EOR r9, r9, r11
SUB r1, r1, #0x2a0
MVN r10, #0x12
MVN r11, #0x0
SUBS r10, r10, r8
SBCS r11, r11, r9
SBC lr, lr, lr
ASR r12, r2, #31
EOR r3, r4, r6
AND r3, r3, r12
EOR r4, r4, r3
EOR r6, r6, r3
EOR r3, r5, r7
AND r3, r3, r12
EOR r5, r5, r3
EOR r7, r7, r3
EOR r10, r10, r8
AND r10, r10, r12
EOR r8, r8, r10
EOR r11, r11, r9
AND r11, r11, r12
EOR r9, r9, r11
STRD r4, r5, [r0]
STRD r6, r7, [r0, #32]
STRD r8, r9, [r0, #64]
SBFX r3, r2, #7, #1
EOR r12, r2, r3
SUB r12, r12, r3
MOV r4, #0x0
MOV r5, #0x0
MOV r6, #0x0
MOV r7, #0x0
MOV r8, #0x0
MOV r9, #0x0
MOV r3, #0x80000000
ROR r3, r3, #31
ROR r3, r3, r12
ASR r3, r3, #31
LDRD r10, r11, [r1, #8]
EOR r10, r10, r4
EOR r11, r11, r5
AND r10, r10, r3
AND r11, r11, r3
EOR r4, r4, r10
EOR r5, r5, r11
LDRD r10, r11, [r1, #40]
EOR r10, r10, r6
EOR r11, r11, r7
AND r10, r10, r3
AND r11, r11, r3
EOR r6, r6, r10
EOR r7, r7, r11
LDRD r10, r11, [r1, #72]
EOR r10, r10, r8
EOR r11, r11, r9
AND r10, r10, r3
AND r11, r11, r3
EOR r8, r8, r10
EOR r9, r9, r11
ADD r1, r1, #0x60
MOV r3, #0x80000000
ROR r3, r3, #30
ROR r3, r3, r12
ASR r3, r3, #31
LDRD r10, r11, [r1, #8]
EOR r10, r10, r4
EOR r11, r11, r5
AND r10, r10, r3
AND r11, r11, r3
EOR r4, r4, r10
EOR r5, r5, r11
LDRD r10, r11, [r1, #40]
EOR r10, r10, r6
EOR r11, r11, r7
AND r10, r10, r3
AND r11, r11, r3
EOR r6, r6, r10
EOR r7, r7, r11
LDRD r10, r11, [r1, #72]
EOR r10, r10, r8
EOR r11, r11, r9
AND r10, r10, r3
AND r11, r11, r3
EOR r8, r8, r10
EOR r9, r9, r11
ADD r1, r1, #0x60
MOV r3, #0x80000000
ROR r3, r3, #29
ROR r3, r3, r12
ASR r3, r3, #31
LDRD r10, r11, [r1, #8]
EOR r10, r10, r4
EOR r11, r11, r5
AND r10, r10, r3
AND r11, r11, r3
EOR r4, r4, r10
EOR r5, r5, r11
LDRD r10, r11, [r1, #40]
EOR r10, r10, r6
EOR r11, r11, r7
AND r10, r10, r3
AND r11, r11, r3
EOR r6, r6, r10
EOR r7, r7, r11
LDRD r10, r11, [r1, #72]
EOR r10, r10, r8
EOR r11, r11, r9
AND r10, r10, r3
AND r11, r11, r3
EOR r8, r8, r10
EOR r9, r9, r11
ADD r1, r1, #0x60
MOV r3, #0x80000000
ROR r3, r3, #28
ROR r3, r3, r12
ASR r3, r3, #31
LDRD r10, r11, [r1, #8]
EOR r10, r10, r4
EOR r11, r11, r5
AND r10, r10, r3
AND r11, r11, r3
EOR r4, r4, r10
EOR r5, r5, r11
LDRD r10, r11, [r1, #40]
EOR r10, r10, r6
EOR r11, r11, r7
AND r10, r10, r3
AND r11, r11, r3
EOR r6, r6, r10
EOR r7, r7, r11
LDRD r10, r11, [r1, #72]
EOR r10, r10, r8
EOR r11, r11, r9
AND r10, r10, r3
AND r11, r11, r3
EOR r8, r8, r10
EOR r9, r9, r11
ADD r1, r1, #0x60
MOV r3, #0x80000000
ROR r3, r3, #27
ROR r3, r3, r12
ASR r3, r3, #31
LDRD r10, r11, [r1, #8]
EOR r10, r10, r4
EOR r11, r11, r5
AND r10, r10, r3
AND r11, r11, r3
EOR r4, r4, r10
EOR r5, r5, r11
LDRD r10, r11, [r1, #40]
EOR r10, r10, r6
EOR r11, r11, r7
AND r10, r10, r3
AND r11, r11, r3
EOR r6, r6, r10
EOR r7, r7, r11
LDRD r10, r11, [r1, #72]
EOR r10, r10, r8
EOR r11, r11, r9
AND r10, r10, r3
AND r11, r11, r3
EOR r8, r8, r10
EOR r9, r9, r11
ADD r1, r1, #0x60
MOV r3, #0x80000000
ROR r3, r3, #26
ROR r3, r3, r12
ASR r3, r3, #31
LDRD r10, r11, [r1, #8]
EOR r10, r10, r4
EOR r11, r11, r5
AND r10, r10, r3
AND r11, r11, r3
EOR r4, r4, r10
EOR r5, r5, r11
LDRD r10, r11, [r1, #40]
EOR r10, r10, r6
EOR r11, r11, r7
AND r10, r10, r3
AND r11, r11, r3
EOR r6, r6, r10
EOR r7, r7, r11
LDRD r10, r11, [r1, #72]
EOR r10, r10, r8
EOR r11, r11, r9
AND r10, r10, r3
AND r11, r11, r3
EOR r8, r8, r10
EOR r9, r9, r11
ADD r1, r1, #0x60
MOV r3, #0x80000000
ROR r3, r3, #25
ROR r3, r3, r12
ASR r3, r3, #31
LDRD r10, r11, [r1, #8]
EOR r10, r10, r4
EOR r11, r11, r5
AND r10, r10, r3
AND r11, r11, r3
EOR r4, r4, r10
EOR r5, r5, r11
LDRD r10, r11, [r1, #40]
EOR r10, r10, r6
EOR r11, r11, r7
AND r10, r10, r3
AND r11, r11, r3
EOR r6, r6, r10
EOR r7, r7, r11
LDRD r10, r11, [r1, #72]
EOR r10, r10, r8
EOR r11, r11, r9
AND r10, r10, r3
AND r11, r11, r3
EOR r8, r8, r10
EOR r9, r9, r11
ADD r1, r1, #0x60
MOV r3, #0x80000000
ROR r3, r3, #24
ROR r3, r3, r12
ASR r3, r3, #31
LDRD r10, r11, [r1, #8]
EOR r10, r10, r4
EOR r11, r11, r5
AND r10, r10, r3
AND r11, r11, r3
EOR r4, r4, r10
EOR r5, r5, r11
LDRD r10, r11, [r1, #40]
EOR r10, r10, r6
EOR r11, r11, r7
AND r10, r10, r3
AND r11, r11, r3
EOR r6, r6, r10
EOR r7, r7, r11
LDRD r10, r11, [r1, #72]
EOR r10, r10, r8
EOR r11, r11, r9
AND r10, r10, r3
AND r11, r11, r3
EOR r8, r8, r10
EOR r9, r9, r11
SUB r1, r1, #0x2a0
MVN r10, #0x0
MVN r11, #0x0
RSBS lr, lr, #0x0
SBCS r10, r10, r8
SBCS r11, r11, r9
SBC lr, lr, lr
ASR r12, r2, #31
EOR r3, r4, r6
AND r3, r3, r12
EOR r4, r4, r3
EOR r6, r6, r3
EOR r3, r5, r7
AND r3, r3, r12
EOR r5, r5, r3
EOR r7, r7, r3
EOR r10, r10, r8
AND r10, r10, r12
EOR r8, r8, r10
EOR r11, r11, r9
AND r11, r11, r12
EOR r9, r9, r11
STRD r4, r5, [r0, #8]
STRD r6, r7, [r0, #40]
STRD r8, r9, [r0, #72]
SBFX r3, r2, #7, #1
EOR r12, r2, r3
SUB r12, r12, r3
MOV r4, #0x0
MOV r5, #0x0
MOV r6, #0x0
MOV r7, #0x0
MOV r8, #0x0
MOV r9, #0x0
MOV r3, #0x80000000
ROR r3, r3, #31
ROR r3, r3, r12
ASR r3, r3, #31
LDRD r10, r11, [r1, #16]
EOR r10, r10, r4
EOR r11, r11, r5
AND r10, r10, r3
AND r11, r11, r3
EOR r4, r4, r10
EOR r5, r5, r11
LDRD r10, r11, [r1, #48]
EOR r10, r10, r6
EOR r11, r11, r7
AND r10, r10, r3
AND r11, r11, r3
EOR r6, r6, r10
EOR r7, r7, r11
LDRD r10, r11, [r1, #80]
EOR r10, r10, r8
EOR r11, r11, r9
AND r10, r10, r3
AND r11, r11, r3
EOR r8, r8, r10
EOR r9, r9, r11
ADD r1, r1, #0x60
MOV r3, #0x80000000
ROR r3, r3, #30
ROR r3, r3, r12
ASR r3, r3, #31
LDRD r10, r11, [r1, #16]
EOR r10, r10, r4
EOR r11, r11, r5
AND r10, r10, r3
AND r11, r11, r3
EOR r4, r4, r10
EOR r5, r5, r11
LDRD r10, r11, [r1, #48]
EOR r10, r10, r6
EOR r11, r11, r7
AND r10, r10, r3
AND r11, r11, r3
EOR r6, r6, r10
EOR r7, r7, r11
LDRD r10, r11, [r1, #80]
EOR r10, r10, r8
EOR r11, r11, r9
AND r10, r10, r3
AND r11, r11, r3
EOR r8, r8, r10
EOR r9, r9, r11
ADD r1, r1, #0x60
MOV r3, #0x80000000
ROR r3, r3, #29
ROR r3, r3, r12
ASR r3, r3, #31
LDRD r10, r11, [r1, #16]
EOR r10, r10, r4
EOR r11, r11, r5
AND r10, r10, r3
AND r11, r11, r3
EOR r4, r4, r10
EOR r5, r5, r11
LDRD r10, r11, [r1, #48]
EOR r10, r10, r6
EOR r11, r11, r7
AND r10, r10, r3
AND r11, r11, r3
EOR r6, r6, r10
EOR r7, r7, r11
LDRD r10, r11, [r1, #80]
EOR r10, r10, r8
EOR r11, r11, r9
AND r10, r10, r3
AND r11, r11, r3
EOR r8, r8, r10
EOR r9, r9, r11
ADD r1, r1, #0x60
MOV r3, #0x80000000
ROR r3, r3, #28
ROR r3, r3, r12
ASR r3, r3, #31
LDRD r10, r11, [r1, #16]
EOR r10, r10, r4
EOR r11, r11, r5
AND r10, r10, r3
AND r11, r11, r3
EOR r4, r4, r10
EOR r5, r5, r11
LDRD r10, r11, [r1, #48]
EOR r10, r10, r6
EOR r11, r11, r7
AND r10, r10, r3
AND r11, r11, r3
EOR r6, r6, r10
EOR r7, r7, r11
LDRD r10, r11, [r1, #80]
EOR r10, r10, r8
EOR r11, r11, r9
AND r10, r10, r3
AND r11, r11, r3
EOR r8, r8, r10
EOR r9, r9, r11
ADD r1, r1, #0x60
MOV r3, #0x80000000
ROR r3, r3, #27
ROR r3, r3, r12
ASR r3, r3, #31
LDRD r10, r11, [r1, #16]
EOR r10, r10, r4
EOR r11, r11, r5
AND r10, r10, r3
AND r11, r11, r3
EOR r4, r4, r10
EOR r5, r5, r11
LDRD r10, r11, [r1, #48]
EOR r10, r10, r6
EOR r11, r11, r7
AND r10, r10, r3
AND r11, r11, r3
EOR r6, r6, r10
EOR r7, r7, r11
LDRD r10, r11, [r1, #80]
EOR r10, r10, r8
EOR r11, r11, r9
AND r10, r10, r3
AND r11, r11, r3
EOR r8, r8, r10
EOR r9, r9, r11
ADD r1, r1, #0x60
MOV r3, #0x80000000
ROR r3, r3, #26
ROR r3, r3, r12
ASR r3, r3, #31
LDRD r10, r11, [r1, #16]
EOR r10, r10, r4
EOR r11, r11, r5
AND r10, r10, r3
AND r11, r11, r3
EOR r4, r4, r10
EOR r5, r5, r11
LDRD r10, r11, [r1, #48]
EOR r10, r10, r6
EOR r11, r11, r7
AND r10, r10, r3
AND r11, r11, r3
EOR r6, r6, r10
EOR r7, r7, r11
LDRD r10, r11, [r1, #80]
EOR r10, r10, r8
EOR r11, r11, r9
AND r10, r10, r3
AND r11, r11, r3
EOR r8, r8, r10
EOR r9, r9, r11
ADD r1, r1, #0x60
MOV r3, #0x80000000
ROR r3, r3, #25
ROR r3, r3, r12
ASR r3, r3, #31
LDRD r10, r11, [r1, #16]
EOR r10, r10, r4
EOR r11, r11, r5
AND r10, r10, r3
AND r11, r11, r3
EOR r4, r4, r10
EOR r5, r5, r11
LDRD r10, r11, [r1, #48]
EOR r10, r10, r6
EOR r11, r11, r7
AND r10, r10, r3
AND r11, r11, r3
EOR r6, r6, r10
EOR r7, r7, r11
LDRD r10, r11, [r1, #80]
EOR r10, r10, r8
EOR r11, r11, r9
AND r10, r10, r3
AND r11, r11, r3
EOR r8, r8, r10
EOR r9, r9, r11
ADD r1, r1, #0x60
MOV r3, #0x80000000
ROR r3, r3, #24
ROR r3, r3, r12
ASR r3, r3, #31
LDRD r10, r11, [r1, #16]
EOR r10, r10, r4
EOR r11, r11, r5
AND r10, r10, r3
AND r11, r11, r3
EOR r4, r4, r10
EOR r5, r5, r11
LDRD r10, r11, [r1, #48]
EOR r10, r10, r6
EOR r11, r11, r7
AND r10, r10, r3
AND r11, r11, r3
EOR r6, r6, r10
EOR r7, r7, r11
LDRD r10, r11, [r1, #80]
EOR r10, r10, r8
EOR r11, r11, r9
AND r10, r10, r3
AND r11, r11, r3
EOR r8, r8, r10
EOR r9, r9, r11
SUB r1, r1, #0x2a0
MVN r10, #0x0
MVN r11, #0x0
RSBS lr, lr, #0x0
SBCS r10, r10, r8
SBCS r11, r11, r9
SBC lr, lr, lr
ASR r12, r2, #31
EOR r3, r4, r6
AND r3, r3, r12
EOR r4, r4, r3
EOR r6, r6, r3
EOR r3, r5, r7
AND r3, r3, r12
EOR r5, r5, r3
EOR r7, r7, r3
EOR r10, r10, r8
AND r10, r10, r12
EOR r8, r8, r10
EOR r11, r11, r9
AND r11, r11, r12
EOR r9, r9, r11
STRD r4, r5, [r0, #16]
STRD r6, r7, [r0, #48]
STRD r8, r9, [r0, #80]
SBFX r3, r2, #7, #1
EOR r12, r2, r3
SUB r12, r12, r3
MOV r4, #0x0
MOV r5, #0x0
MOV r6, #0x0
MOV r7, #0x0
MOV r8, #0x0
MOV r9, #0x0
MOV r3, #0x80000000
ROR r3, r3, #31
ROR r3, r3, r12
ASR r3, r3, #31
LDRD r10, r11, [r1, #24]
EOR r10, r10, r4
EOR r11, r11, r5
AND r10, r10, r3
AND r11, r11, r3
EOR r4, r4, r10
EOR r5, r5, r11
LDRD r10, r11, [r1, #56]
EOR r10, r10, r6
EOR r11, r11, r7
AND r10, r10, r3
AND r11, r11, r3
EOR r6, r6, r10
EOR r7, r7, r11
LDRD r10, r11, [r1, #88]
EOR r10, r10, r8
EOR r11, r11, r9
AND r10, r10, r3
AND r11, r11, r3
EOR r8, r8, r10
EOR r9, r9, r11
ADD r1, r1, #0x60
MOV r3, #0x80000000
ROR r3, r3, #30
ROR r3, r3, r12
ASR r3, r3, #31
LDRD r10, r11, [r1, #24]
EOR r10, r10, r4
EOR r11, r11, r5
AND r10, r10, r3
AND r11, r11, r3
EOR r4, r4, r10
EOR r5, r5, r11
LDRD r10, r11, [r1, #56]
EOR r10, r10, r6
EOR r11, r11, r7
AND r10, r10, r3
AND r11, r11, r3
EOR r6, r6, r10
EOR r7, r7, r11
LDRD r10, r11, [r1, #88]
EOR r10, r10, r8
EOR r11, r11, r9
AND r10, r10, r3
AND r11, r11, r3
EOR r8, r8, r10
EOR r9, r9, r11
ADD r1, r1, #0x60
MOV r3, #0x80000000
ROR r3, r3, #29
ROR r3, r3, r12
ASR r3, r3, #31
LDRD r10, r11, [r1, #24]
EOR r10, r10, r4
EOR r11, r11, r5
AND r10, r10, r3
AND r11, r11, r3
EOR r4, r4, r10
EOR r5, r5, r11
LDRD r10, r11, [r1, #56]
EOR r10, r10, r6
EOR r11, r11, r7
AND r10, r10, r3
AND r11, r11, r3
EOR r6, r6, r10
EOR r7, r7, r11
LDRD r10, r11, [r1, #88]
EOR r10, r10, r8
EOR r11, r11, r9
AND r10, r10, r3
AND r11, r11, r3
EOR r8, r8, r10
EOR r9, r9, r11
ADD r1, r1, #0x60
MOV r3, #0x80000000
ROR r3, r3, #28
ROR r3, r3, r12
ASR r3, r3, #31
LDRD r10, r11, [r1, #24]
EOR r10, r10, r4
EOR r11, r11, r5
AND r10, r10, r3
AND r11, r11, r3
EOR r4, r4, r10
EOR r5, r5, r11
LDRD r10, r11, [r1, #56]
EOR r10, r10, r6
EOR r11, r11, r7
AND r10, r10, r3
AND r11, r11, r3
EOR r6, r6, r10
EOR r7, r7, r11
LDRD r10, r11, [r1, #88]
EOR r10, r10, r8
EOR r11, r11, r9
AND r10, r10, r3
AND r11, r11, r3
EOR r8, r8, r10
EOR r9, r9, r11
ADD r1, r1, #0x60
MOV r3, #0x80000000
ROR r3, r3, #27
ROR r3, r3, r12
ASR r3, r3, #31
LDRD r10, r11, [r1, #24]
EOR r10, r10, r4
EOR r11, r11, r5
AND r10, r10, r3
AND r11, r11, r3
EOR r4, r4, r10
EOR r5, r5, r11
LDRD r10, r11, [r1, #56]
EOR r10, r10, r6
EOR r11, r11, r7
AND r10, r10, r3
AND r11, r11, r3
EOR r6, r6, r10
EOR r7, r7, r11
LDRD r10, r11, [r1, #88]
EOR r10, r10, r8
EOR r11, r11, r9
AND r10, r10, r3
AND r11, r11, r3
EOR r8, r8, r10
EOR r9, r9, r11
ADD r1, r1, #0x60
MOV r3, #0x80000000
ROR r3, r3, #26
ROR r3, r3, r12
ASR r3, r3, #31
LDRD r10, r11, [r1, #24]
EOR r10, r10, r4
EOR r11, r11, r5
AND r10, r10, r3
AND r11, r11, r3
EOR r4, r4, r10
EOR r5, r5, r11
LDRD r10, r11, [r1, #56]
EOR r10, r10, r6
EOR r11, r11, r7
AND r10, r10, r3
AND r11, r11, r3
EOR r6, r6, r10
EOR r7, r7, r11
LDRD r10, r11, [r1, #88]
EOR r10, r10, r8
EOR r11, r11, r9
AND r10, r10, r3
AND r11, r11, r3
EOR r8, r8, r10
EOR r9, r9, r11
ADD r1, r1, #0x60
MOV r3, #0x80000000
ROR r3, r3, #25
ROR r3, r3, r12
ASR r3, r3, #31
LDRD r10, r11, [r1, #24]
EOR r10, r10, r4
EOR r11, r11, r5
AND r10, r10, r3
AND r11, r11, r3
EOR r4, r4, r10
EOR r5, r5, r11
LDRD r10, r11, [r1, #56]
EOR r10, r10, r6
EOR r11, r11, r7
AND r10, r10, r3
AND r11, r11, r3
EOR r6, r6, r10
EOR r7, r7, r11
LDRD r10, r11, [r1, #88]
EOR r10, r10, r8
EOR r11, r11, r9
AND r10, r10, r3
AND r11, r11, r3
EOR r8, r8, r10
EOR r9, r9, r11
ADD r1, r1, #0x60
MOV r3, #0x80000000
ROR r3, r3, #24
ROR r3, r3, r12
ASR r3, r3, #31
LDRD r10, r11, [r1, #24]
EOR r10, r10, r4
EOR r11, r11, r5
AND r10, r10, r3
AND r11, r11, r3
EOR r4, r4, r10
EOR r5, r5, r11
LDRD r10, r11, [r1, #56]
EOR r10, r10, r6
EOR r11, r11, r7
AND r10, r10, r3
AND r11, r11, r3
EOR r6, r6, r10
EOR r7, r7, r11
LDRD r10, r11, [r1, #88]
EOR r10, r10, r8
EOR r11, r11, r9
AND r10, r10, r3
AND r11, r11, r3
EOR r8, r8, r10
EOR r9, r9, r11
SUB r1, r1, #0x2a0
MVN r10, #0x0
MVN r11, #0x80000000
RSBS lr, lr, #0x0
SBCS r10, r10, r8
SBC r11, r11, r9
ASR r12, r2, #31
EOR r3, r4, r6
AND r3, r3, r12
EOR r4, r4, r3
EOR r6, r6, r3
EOR r3, r5, r7
AND r3, r3, r12
EOR r5, r5, r3
EOR r7, r7, r3
EOR r10, r10, r8
AND r10, r10, r12
EOR r8, r8, r10
EOR r11, r11, r9
AND r11, r11, r12
EOR r9, r9, r11
STRD r4, r5, [r0, #24]
STRD r6, r7, [r0, #56]
STRD r8, r9, [r0, #88]
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
/* Cycle Count = 1195 */
.size fe_cmov_table,.-fe_cmov_table
#else
.text
.align 4
.globl fe_cmov_table
.type fe_cmov_table, %function
fe_cmov_table:
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr}
SXTB r2, r2
SBFX r3, r2, #7, #1
EOR r2, r2, r3
SUB r2, r2, r3
CLZ lr, r2
LSL lr, lr, #26
ASR lr, lr, #31
MVN lr, lr
ADD r2, r2, lr
MOV r12, #0x60
MUL r2, r2, r12
ADD r1, r1, r2
LDM r1!, {r4, r5, r6, r7, r8, r9, r10, r11}
AND r4, r4, lr
AND r5, r5, lr
AND r6, r6, lr
AND r7, r7, lr
AND r8, r8, lr
AND r9, r9, lr
AND r10, r10, lr
AND r11, r11, lr
MVN r12, lr
SUB r4, r4, r12
MOV r12, #0x20
AND r12, r12, r3
ADD r0, r0, r12
STM r0, {r4, r5, r6, r7, r8, r9, r10, r11}
SUB r0, r0, r12
LDM r1!, {r4, r5, r6, r7, r8, r9, r10, r11}
AND r4, r4, lr
AND r5, r5, lr
AND r6, r6, lr
AND r7, r7, lr
AND r8, r8, lr
AND r9, r9, lr
AND r10, r10, lr
AND r11, r11, lr
MVN r12, lr
SUB r4, r4, r12
MOV r12, #0x20
BIC r12, r12, r3
ADD r0, r0, r12
STM r0, {r4, r5, r6, r7, r8, r9, r10, r11}
SUB r0, r0, r12
ADD r0, r0, #0x40
LDM r1!, {r4, r5, r6, r7}
MVN r12, #0x12
SUBS r8, r12, r4
SBCS r9, r3, r5
SBCS r10, r3, r6
SBCS r11, r3, r7
BIC r4, r4, r3
BIC r5, r5, r3
BIC r6, r6, r3
BIC r7, r7, r3
AND r8, r8, r3
AND r9, r9, r3
AND r10, r10, r3
AND r11, r11, r3
ORR r4, r4, r8
ORR r5, r5, r9
ORR r6, r6, r10
ORR r7, r7, r11
AND r4, r4, lr
AND r5, r5, lr
AND r6, r6, lr
AND r7, r7, lr
STM r0!, {r4, r5, r6, r7}
LDM r1!, {r4, r5, r6, r7}
MVN r12, #0x80000000
SBCS r8, r3, r4
SBCS r9, r3, r5
SBCS r10, r3, r6
SBC r11, r12, r7
BIC r4, r4, r3
BIC r5, r5, r3
BIC r6, r6, r3
BIC r7, r7, r3
AND r8, r8, r3
AND r9, r9, r3
AND r10, r10, r3
AND r11, r11, r3
ORR r4, r4, r8
ORR r5, r5, r9
ORR r6, r6, r10
ORR r7, r7, r11
AND r4, r4, lr
AND r5, r5, lr
AND r6, r6, lr
AND r7, r7, lr
STM r0!, {r4, r5, r6, r7}
SUB r1, r1, r2
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
/* Cycle Count = 160 */
.size fe_cmov_table,.-fe_cmov_table
#endif /* WC_NO_CACHE_RESISTANT */
#endif /* HAVE_ED25519_MAKE_KEY || HAVE_ED25519_SIGN */
#endif /* HAVE_ED25519 */
#ifdef WOLFSSL_SP_NO_UMAAL
.text
.align 4
.globl fe_mul_op
.type fe_mul_op, %function
fe_mul_op:
PUSH {lr}
SUB sp, sp, #0x28
STR r0, [sp, #36]
MOV r0, #0x0
LDR r12, [r1]
/* A[0] * B[0] */
LDR lr, [r2]
UMULL r3, r4, r12, lr
/* A[0] * B[2] */
LDR lr, [r2, #8]
UMULL r5, r6, r12, lr
/* A[0] * B[4] */
LDR lr, [r2, #16]
UMULL r7, r8, r12, lr
/* A[0] * B[6] */
LDR lr, [r2, #24]
UMULL r9, r10, r12, lr
STR r3, [sp]
/* A[0] * B[1] */
LDR lr, [r2, #4]
MOV r11, r0
UMLAL r4, r11, r12, lr
ADDS r5, r5, r11
/* A[0] * B[3] */
LDR lr, [r2, #12]
ADCS r6, r6, #0x0
ADC r11, r0, #0x0
UMLAL r6, r11, r12, lr
ADDS r7, r7, r11
/* A[0] * B[5] */
LDR lr, [r2, #20]
ADCS r8, r8, #0x0
ADC r11, r0, #0x0
UMLAL r8, r11, r12, lr
ADDS r9, r9, r11
/* A[0] * B[7] */
LDR lr, [r2, #28]
ADCS r10, r10, #0x0
ADC r3, r0, #0x0
UMLAL r10, r3, r12, lr
/* A[1] * B[0] */
LDR r12, [r1, #4]
LDR lr, [r2]
MOV r11, #0x0
UMLAL r4, r11, r12, lr
STR r4, [sp, #4]
ADDS r5, r5, r11
/* A[1] * B[1] */
LDR lr, [r2, #4]
ADC r11, r0, #0x0
UMLAL r5, r11, r12, lr
ADDS r6, r6, r11
/* A[1] * B[2] */
LDR lr, [r2, #8]
ADC r11, r0, #0x0
UMLAL r6, r11, r12, lr
ADDS r7, r7, r11
/* A[1] * B[3] */
LDR lr, [r2, #12]
ADC r11, r0, #0x0
UMLAL r7, r11, r12, lr
ADDS r8, r8, r11
/* A[1] * B[4] */
LDR lr, [r2, #16]
ADC r11, r0, #0x0
UMLAL r8, r11, r12, lr
ADDS r9, r9, r11
/* A[1] * B[5] */
LDR lr, [r2, #20]
ADC r11, r0, #0x0
UMLAL r9, r11, r12, lr
ADDS r10, r10, r11
/* A[1] * B[6] */
LDR lr, [r2, #24]
ADC r11, r0, #0x0
UMLAL r10, r11, r12, lr
ADDS r3, r3, r11
/* A[1] * B[7] */
LDR lr, [r2, #28]
ADC r4, r0, #0x0
UMLAL r3, r4, r12, lr
/* A[2] * B[0] */
LDR r12, [r1, #8]
LDR lr, [r2]
MOV r11, #0x0
UMLAL r5, r11, r12, lr
STR r5, [sp, #8]
ADDS r6, r6, r11
/* A[2] * B[1] */
LDR lr, [r2, #4]
ADC r11, r0, #0x0
UMLAL r6, r11, r12, lr
ADDS r7, r7, r11
/* A[2] * B[2] */
LDR lr, [r2, #8]
ADC r11, r0, #0x0
UMLAL r7, r11, r12, lr
ADDS r8, r8, r11
/* A[2] * B[3] */
LDR lr, [r2, #12]
ADC r11, r0, #0x0
UMLAL r8, r11, r12, lr
ADDS r9, r9, r11
/* A[2] * B[4] */
LDR lr, [r2, #16]
ADC r11, r0, #0x0
UMLAL r9, r11, r12, lr
ADDS r10, r10, r11
/* A[2] * B[5] */
LDR lr, [r2, #20]
ADC r11, r0, #0x0
UMLAL r10, r11, r12, lr
ADDS r3, r3, r11
/* A[2] * B[6] */
LDR lr, [r2, #24]
ADC r11, r0, #0x0
UMLAL r3, r11, r12, lr
ADDS r4, r4, r11
/* A[2] * B[7] */
LDR lr, [r2, #28]
ADC r5, r0, #0x0
UMLAL r4, r5, r12, lr
/* A[3] * B[0] */
LDR r12, [r1, #12]
LDR lr, [r2]
MOV r11, #0x0
UMLAL r6, r11, r12, lr
STR r6, [sp, #12]
ADDS r7, r7, r11
/* A[3] * B[1] */
LDR lr, [r2, #4]
ADC r11, r0, #0x0
UMLAL r7, r11, r12, lr
ADDS r8, r8, r11
/* A[3] * B[2] */
LDR lr, [r2, #8]
ADC r11, r0, #0x0
UMLAL r8, r11, r12, lr
ADDS r9, r9, r11
/* A[3] * B[3] */
LDR lr, [r2, #12]
ADC r11, r0, #0x0
UMLAL r9, r11, r12, lr
ADDS r10, r10, r11
/* A[3] * B[4] */
LDR lr, [r2, #16]
ADC r11, r0, #0x0
UMLAL r10, r11, r12, lr
ADDS r3, r3, r11
/* A[3] * B[5] */
LDR lr, [r2, #20]
ADC r11, r0, #0x0
UMLAL r3, r11, r12, lr
ADDS r4, r4, r11
/* A[3] * B[6] */
LDR lr, [r2, #24]
ADC r11, r0, #0x0
UMLAL r4, r11, r12, lr
ADDS r5, r5, r11
/* A[3] * B[7] */
LDR lr, [r2, #28]
ADC r6, r0, #0x0
UMLAL r5, r6, r12, lr
/* A[4] * B[0] */
LDR r12, [r1, #16]
LDR lr, [r2]
MOV r11, #0x0
UMLAL r7, r11, r12, lr
STR r7, [sp, #16]
ADDS r8, r8, r11
/* A[4] * B[1] */
LDR lr, [r2, #4]
ADC r11, r0, #0x0
UMLAL r8, r11, r12, lr
ADDS r9, r9, r11
/* A[4] * B[2] */
LDR lr, [r2, #8]
ADC r11, r0, #0x0
UMLAL r9, r11, r12, lr
ADDS r10, r10, r11
/* A[4] * B[3] */
LDR lr, [r2, #12]
ADC r11, r0, #0x0
UMLAL r10, r11, r12, lr
ADDS r3, r3, r11
/* A[4] * B[4] */
LDR lr, [r2, #16]
ADC r11, r0, #0x0
UMLAL r3, r11, r12, lr
ADDS r4, r4, r11
/* A[4] * B[5] */
LDR lr, [r2, #20]
ADC r11, r0, #0x0
UMLAL r4, r11, r12, lr
ADDS r5, r5, r11
/* A[4] * B[6] */
LDR lr, [r2, #24]
ADC r11, r0, #0x0
UMLAL r5, r11, r12, lr
ADDS r6, r6, r11
/* A[4] * B[7] */
LDR lr, [r2, #28]
ADC r7, r0, #0x0
UMLAL r6, r7, r12, lr
/* A[5] * B[0] */
LDR r12, [r1, #20]
LDR lr, [r2]
MOV r11, #0x0
UMLAL r8, r11, r12, lr
STR r8, [sp, #20]
ADDS r9, r9, r11
/* A[5] * B[1] */
LDR lr, [r2, #4]
ADC r11, r0, #0x0
UMLAL r9, r11, r12, lr
ADDS r10, r10, r11
/* A[5] * B[2] */
LDR lr, [r2, #8]
ADC r11, r0, #0x0
UMLAL r10, r11, r12, lr
ADDS r3, r3, r11
/* A[5] * B[3] */
LDR lr, [r2, #12]
ADC r11, r0, #0x0
UMLAL r3, r11, r12, lr
ADDS r4, r4, r11
/* A[5] * B[4] */
LDR lr, [r2, #16]
ADC r11, r0, #0x0
UMLAL r4, r11, r12, lr
ADDS r5, r5, r11
/* A[5] * B[5] */
LDR lr, [r2, #20]
ADC r11, r0, #0x0
UMLAL r5, r11, r12, lr
ADDS r6, r6, r11
/* A[5] * B[6] */
LDR lr, [r2, #24]
ADC r11, r0, #0x0
UMLAL r6, r11, r12, lr
ADDS r7, r7, r11
/* A[5] * B[7] */
LDR lr, [r2, #28]
ADC r8, r0, #0x0
UMLAL r7, r8, r12, lr
/* A[6] * B[0] */
LDR r12, [r1, #24]
LDR lr, [r2]
MOV r11, #0x0
UMLAL r9, r11, r12, lr
STR r9, [sp, #24]
ADDS r10, r10, r11
/* A[6] * B[1] */
LDR lr, [r2, #4]
ADC r11, r0, #0x0
UMLAL r10, r11, r12, lr
ADDS r3, r3, r11
/* A[6] * B[2] */
LDR lr, [r2, #8]
ADC r11, r0, #0x0
UMLAL r3, r11, r12, lr
ADDS r4, r4, r11
/* A[6] * B[3] */
LDR lr, [r2, #12]
ADC r11, r0, #0x0
UMLAL r4, r11, r12, lr
ADDS r5, r5, r11
/* A[6] * B[4] */
LDR lr, [r2, #16]
ADC r11, r0, #0x0
UMLAL r5, r11, r12, lr
ADDS r6, r6, r11
/* A[6] * B[5] */
LDR lr, [r2, #20]
ADC r11, r0, #0x0
UMLAL r6, r11, r12, lr
ADDS r7, r7, r11
/* A[6] * B[6] */
LDR lr, [r2, #24]
ADC r11, r0, #0x0
UMLAL r7, r11, r12, lr
ADDS r8, r8, r11
/* A[6] * B[7] */
LDR lr, [r2, #28]
ADC r9, r0, #0x0
UMLAL r8, r9, r12, lr
/* A[7] * B[0] */
LDR r12, [r1, #28]
LDR lr, [r2]
MOV r11, #0x0
UMLAL r10, r11, r12, lr
STR r10, [sp, #28]
ADDS r3, r3, r11
/* A[7] * B[1] */
LDR lr, [r2, #4]
ADC r11, r0, #0x0
UMLAL r3, r11, r12, lr
ADDS r4, r4, r11
/* A[7] * B[2] */
LDR lr, [r2, #8]
ADC r11, r0, #0x0
UMLAL r4, r11, r12, lr
ADDS r5, r5, r11
/* A[7] * B[3] */
LDR lr, [r2, #12]
ADC r11, r0, #0x0
UMLAL r5, r11, r12, lr
ADDS r6, r6, r11
/* A[7] * B[4] */
LDR lr, [r2, #16]
ADC r11, r0, #0x0
UMLAL r6, r11, r12, lr
ADDS r7, r7, r11
/* A[7] * B[5] */
LDR lr, [r2, #20]
ADC r11, r0, #0x0
UMLAL r7, r11, r12, lr
ADDS r8, r8, r11
/* A[7] * B[6] */
LDR lr, [r2, #24]
ADC r11, r0, #0x0
UMLAL r8, r11, r12, lr
ADDS r9, r9, r11
/* A[7] * B[7] */
LDR lr, [r2, #28]
ADC r10, r0, #0x0
UMLAL r9, r10, r12, lr
/* Reduce */
LDR r2, [sp, #28]
MOV lr, sp
MOV r12, #0x26
UMULL r10, r11, r10, r12
ADDS r10, r10, r2
ADC r11, r11, #0x0
MOV r12, #0x13
LSL r11, r11, #1
ORR r11, r11, r10, LSR #31
MUL r11, r11, r12
LDM lr!, {r1, r2}
MOV r12, #0x26
ADDS r1, r1, r11
ADC r11, r0, #0x0
UMLAL r1, r11, r3, r12
ADDS r2, r2, r11
ADC r11, r0, #0x0
UMLAL r2, r11, r4, r12
LDM lr!, {r3, r4}
ADDS r3, r3, r11
ADC r11, r0, #0x0
UMLAL r3, r11, r5, r12
ADDS r4, r4, r11
ADC r11, r0, #0x0
UMLAL r4, r11, r6, r12
LDM lr!, {r5, r6}
ADDS r5, r5, r11
ADC r11, r0, #0x0
UMLAL r5, r11, r7, r12
ADDS r6, r6, r11
ADC r11, r0, #0x0
UMLAL r6, r11, r8, r12
LDM lr!, {r7, r8}
ADDS r7, r7, r11
ADC r11, r0, #0x0
UMLAL r7, r11, r9, r12
BFC r10, #31, #1
ADDS r8, r10, r11
/* Store */
LDR r0, [sp, #36]
STM r0, {r1, r2, r3, r4, r5, r6, r7, r8}
ADD sp, sp, #0x28
POP {pc}
/* Cycle Count = 406 */
.size fe_mul_op,.-fe_mul_op
#else
.text
.align 4
.globl fe_mul_op
.type fe_mul_op, %function
fe_mul_op:
PUSH {lr}
SUB sp, sp, #0x2c
STRD r0, r1, [sp, #36]
MOV lr, r2
LDM r1, {r0, r1, r2, r3}
LDM lr!, {r4, r5, r6}
UMULL r10, r11, r0, r4
UMULL r12, r7, r1, r4
UMAAL r11, r12, r0, r5
UMULL r8, r9, r2, r4
UMAAL r12, r8, r1, r5
UMAAL r12, r7, r0, r6
UMAAL r8, r9, r3, r4
STM sp, {r10, r11, r12}
UMAAL r7, r8, r2, r5
LDM lr!, {r4}
UMULL r10, r11, r1, r6
UMAAL r8, r9, r2, r6
UMAAL r7, r10, r0, r4
UMAAL r8, r11, r3, r5
STR r7, [sp, #12]
UMAAL r8, r10, r1, r4
UMAAL r9, r11, r3, r6
UMAAL r9, r10, r2, r4
UMAAL r10, r11, r3, r4
LDM lr, {r4, r5, r6, r7}
MOV r12, #0x0
UMLAL r8, r12, r0, r4
UMAAL r9, r12, r1, r4
UMAAL r10, r12, r2, r4
UMAAL r11, r12, r3, r4
MOV r4, #0x0
UMLAL r9, r4, r0, r5
UMAAL r10, r4, r1, r5
UMAAL r11, r4, r2, r5
UMAAL r12, r4, r3, r5
MOV r5, #0x0
UMLAL r10, r5, r0, r6
UMAAL r11, r5, r1, r6
UMAAL r12, r5, r2, r6
UMAAL r4, r5, r3, r6
MOV r6, #0x0
UMLAL r11, r6, r0, r7
LDR r0, [sp, #40]
UMAAL r12, r6, r1, r7
ADD r0, r0, #0x10
UMAAL r4, r6, r2, r7
SUB lr, lr, #0x10
UMAAL r5, r6, r3, r7
LDM r0, {r0, r1, r2, r3}
STR r6, [sp, #32]
LDM lr!, {r6}
MOV r7, #0x0
UMLAL r8, r7, r0, r6
UMAAL r9, r7, r1, r6
STR r8, [sp, #16]
UMAAL r10, r7, r2, r6
UMAAL r11, r7, r3, r6
LDM lr!, {r6}
MOV r8, #0x0
UMLAL r9, r8, r0, r6
UMAAL r10, r8, r1, r6
STR r9, [sp, #20]
UMAAL r11, r8, r2, r6
UMAAL r12, r8, r3, r6
LDM lr!, {r6}
MOV r9, #0x0
UMLAL r10, r9, r0, r6
UMAAL r11, r9, r1, r6
STR r10, [sp, #24]
UMAAL r12, r9, r2, r6
UMAAL r4, r9, r3, r6
LDM lr!, {r6}
MOV r10, #0x0
UMLAL r11, r10, r0, r6
UMAAL r12, r10, r1, r6
STR r11, [sp, #28]
UMAAL r4, r10, r2, r6
UMAAL r5, r10, r3, r6
LDM lr!, {r11}
UMAAL r12, r7, r0, r11
UMAAL r4, r7, r1, r11
LDR r6, [sp, #32]
UMAAL r5, r7, r2, r11
UMAAL r6, r7, r3, r11
LDM lr!, {r11}
UMAAL r4, r8, r0, r11
UMAAL r5, r8, r1, r11
UMAAL r6, r8, r2, r11
UMAAL r7, r8, r3, r11
LDM lr, {r11, lr}
UMAAL r5, r9, r0, r11
UMAAL r6, r10, r0, lr
UMAAL r6, r9, r1, r11
UMAAL r7, r10, r1, lr
UMAAL r7, r9, r2, r11
UMAAL r8, r10, r2, lr
UMAAL r8, r9, r3, r11
UMAAL r9, r10, r3, lr
/* Reduce */
LDR r0, [sp, #28]
MOV lr, #0x25
UMAAL r10, r0, r10, lr
MOV lr, #0x13
LSL r0, r0, #1
ORR r0, r0, r10, LSR #31
MUL r11, r0, lr
POP {r0, r1, r2}
MOV lr, #0x26
UMAAL r0, r11, r12, lr
UMAAL r1, r11, r4, lr
UMAAL r2, r11, r5, lr
POP {r3, r4, r5}
UMAAL r3, r11, r6, lr
UMAAL r4, r11, r7, lr
UMAAL r5, r11, r8, lr
POP {r6}
BFC r10, #31, #1
UMAAL r6, r11, r9, lr
ADD r7, r10, r11
LDR lr, [sp, #8]
/* Store */
STM lr, {r0, r1, r2, r3, r4, r5, r6, r7}
ADD sp, sp, #0x10
POP {pc}
/* Cycle Count = 239 */
.size fe_mul_op,.-fe_mul_op
#endif /* WOLFSSL_SP_NO_UMAAL */
.text
.align 4
.globl fe_mul
.type fe_mul, %function
fe_mul:
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr}
BL fe_mul_op
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
/* Cycle Count = 24 */
.size fe_mul,.-fe_mul
#ifdef WOLFSSL_SP_NO_UMAAL
.text
.align 4
.globl fe_sq_op
.type fe_sq_op, %function
fe_sq_op:
PUSH {lr}
SUB sp, sp, #0x44
STR r0, [sp, #64]
/* Square */
MOV r0, #0x0
LDR r12, [r1]
/* A[0] * A[1] */
LDR lr, [r1, #4]
UMULL r4, r5, r12, lr
/* A[0] * A[3] */
LDR lr, [r1, #12]
UMULL r6, r7, r12, lr
/* A[0] * A[5] */
LDR lr, [r1, #20]
UMULL r8, r9, r12, lr
/* A[0] * A[7] */
LDR lr, [r1, #28]
UMULL r10, r3, r12, lr
/* A[0] * A[2] */
LDR lr, [r1, #8]
MOV r11, #0x0
UMLAL r5, r11, r12, lr
ADDS r6, r6, r11
/* A[0] * A[4] */
LDR lr, [r1, #16]
ADCS r7, r7, #0x0
ADC r11, r0, #0x0
UMLAL r7, r11, r12, lr
ADDS r8, r8, r11
/* A[0] * A[6] */
LDR lr, [r1, #24]
ADCS r9, r9, #0x0
ADC r11, r0, #0x0
UMLAL r9, r11, r12, lr
ADDS r10, r10, r11
ADCS r3, r3, #0x0
STR r4, [sp, #4]
STR r5, [sp, #8]
/* A[1] * A[2] */
LDR r12, [r1, #4]
LDR lr, [r1, #8]
MOV r11, #0x0
UMLAL r6, r11, r12, lr
STR r6, [sp, #12]
ADDS r7, r7, r11
/* A[1] * A[3] */
LDR lr, [r1, #12]
ADC r11, r0, #0x0
UMLAL r7, r11, r12, lr
STR r7, [sp, #16]
ADDS r8, r8, r11
/* A[1] * A[4] */
LDR lr, [r1, #16]
ADC r11, r0, #0x0
UMLAL r8, r11, r12, lr
ADDS r9, r9, r11
/* A[1] * A[5] */
LDR lr, [r1, #20]
ADC r11, r0, #0x0
UMLAL r9, r11, r12, lr
ADDS r10, r10, r11
/* A[1] * A[6] */
LDR lr, [r1, #24]
ADC r11, r0, #0x0
UMLAL r10, r11, r12, lr
ADDS r3, r3, r11
/* A[1] * A[7] */
LDR lr, [r1, #28]
ADC r4, r0, #0x0
UMLAL r3, r4, r12, lr
/* A[2] * A[3] */
LDR r12, [r1, #8]
LDR lr, [r1, #12]
MOV r11, #0x0
UMLAL r8, r11, r12, lr
STR r8, [sp, #20]
ADDS r9, r9, r11
/* A[2] * A[4] */
LDR lr, [r1, #16]
ADC r11, r0, #0x0
UMLAL r9, r11, r12, lr
STR r9, [sp, #24]
ADDS r10, r10, r11
/* A[2] * A[5] */
LDR lr, [r1, #20]
ADC r11, r0, #0x0
UMLAL r10, r11, r12, lr
ADDS r3, r3, r11
/* A[2] * A[6] */
LDR lr, [r1, #24]
ADC r11, r0, #0x0
UMLAL r3, r11, r12, lr
ADDS r4, r4, r11
/* A[2] * A[7] */
LDR lr, [r1, #28]
ADC r5, r0, #0x0
UMLAL r4, r5, r12, lr
/* A[3] * A[4] */
LDR r12, [r1, #12]
LDR lr, [r1, #16]
MOV r11, #0x0
UMLAL r10, r11, r12, lr
STR r10, [sp, #28]
ADDS r3, r3, r11
/* A[3] * A[5] */
LDR lr, [r1, #20]
ADC r11, r0, #0x0
UMLAL r3, r11, r12, lr
ADDS r4, r4, r11
/* A[3] * A[6] */
LDR lr, [r1, #24]
ADC r11, r0, #0x0
UMLAL r4, r11, r12, lr
ADDS r5, r5, r11
/* A[3] * A[7] */
LDR lr, [r1, #28]
ADC r6, r0, #0x0
UMLAL r5, r6, r12, lr
/* A[4] * A[5] */
LDR r12, [r1, #16]
LDR lr, [r1, #20]
MOV r11, #0x0
UMLAL r4, r11, r12, lr
ADDS r5, r5, r11
/* A[4] * A[6] */
LDR lr, [r1, #24]
ADC r11, r0, #0x0
UMLAL r5, r11, r12, lr
ADDS r6, r6, r11
/* A[4] * A[7] */
LDR lr, [r1, #28]
ADC r7, r0, #0x0
UMLAL r6, r7, r12, lr
/* A[5] * A[6] */
LDR r12, [r1, #20]
LDR lr, [r1, #24]
MOV r11, #0x0
UMLAL r6, r11, r12, lr
ADDS r7, r7, r11
/* A[5] * A[7] */
LDR lr, [r1, #28]
ADC r8, r0, #0x0
UMLAL r7, r8, r12, lr
/* A[6] * A[7] */
LDR r12, [r1, #24]
LDR lr, [r1, #28]
MOV r9, #0x0
UMLAL r8, r9, r12, lr
ADD lr, sp, #0x20
STM lr, {r3, r4, r5, r6, r7, r8, r9}
ADD lr, sp, #0x4
LDM lr, {r4, r5, r6, r7, r8, r9, r10}
ADDS r4, r4, r4
ADCS r5, r5, r5
ADCS r6, r6, r6
ADCS r7, r7, r7
ADCS r8, r8, r8
ADCS r9, r9, r9
ADCS r10, r10, r10
STM lr!, {r4, r5, r6, r7, r8, r9, r10}
LDM lr, {r3, r4, r5, r6, r7, r8, r9}
ADCS r3, r3, r3
ADCS r4, r4, r4
ADCS r5, r5, r5
ADCS r6, r6, r6
ADCS r7, r7, r7
ADCS r8, r8, r8
ADCS r9, r9, r9
ADC r10, r0, #0x0
STM lr, {r3, r4, r5, r6, r7, r8, r9, r10}
ADD lr, sp, #0x4
LDM lr, {r4, r5, r6, r7, r8, r9, r10}
MOV lr, sp
/* A[0] * A[0] */
LDR r12, [r1]
UMULL r3, r11, r12, r12
ADDS r4, r4, r11
/* A[1] * A[1] */
LDR r12, [r1, #4]
ADCS r5, r5, #0x0
ADC r11, r0, #0x0
UMLAL r5, r11, r12, r12
ADDS r6, r6, r11
/* A[2] * A[2] */
LDR r12, [r1, #8]
ADCS r7, r7, #0x0
ADC r11, r0, #0x0
UMLAL r7, r11, r12, r12
ADDS r8, r8, r11
/* A[3] * A[3] */
LDR r12, [r1, #12]
ADCS r9, r9, #0x0
ADC r11, r0, #0x0
UMLAL r9, r11, r12, r12
ADDS r10, r10, r11
STM lr!, {r3, r4, r5, r6, r7, r8, r9, r10}
LDM lr, {r3, r4, r5, r6, r7, r8, r9, r10}
/* A[4] * A[4] */
LDR r12, [r1, #16]
ADCS r3, r3, #0x0
ADC r11, r0, #0x0
UMLAL r3, r11, r12, r12
ADDS r4, r4, r11
/* A[5] * A[5] */
LDR r12, [r1, #20]
ADCS r5, r5, #0x0
ADC r11, r0, #0x0
UMLAL r5, r11, r12, r12
ADDS r6, r6, r11
/* A[6] * A[6] */
LDR r12, [r1, #24]
ADCS r7, r7, #0x0
ADC r11, r0, #0x0
UMLAL r7, r11, r12, r12
ADDS r8, r8, r11
/* A[7] * A[7] */
LDR r12, [r1, #28]
ADCS r9, r9, #0x0
ADC r10, r10, #0x0
UMLAL r9, r10, r12, r12
/* Reduce */
LDR r2, [sp, #28]
MOV lr, sp
MOV r12, #0x26
UMULL r10, r11, r10, r12
ADDS r10, r10, r2
ADC r11, r11, #0x0
MOV r12, #0x13
LSL r11, r11, #1
ORR r11, r11, r10, LSR #31
MUL r11, r11, r12
LDM lr!, {r1, r2}
MOV r12, #0x26
ADDS r1, r1, r11
ADC r11, r0, #0x0
UMLAL r1, r11, r3, r12
ADDS r2, r2, r11
ADC r11, r0, #0x0
UMLAL r2, r11, r4, r12
LDM lr!, {r3, r4}
ADDS r3, r3, r11
ADC r11, r0, #0x0
UMLAL r3, r11, r5, r12
ADDS r4, r4, r11
ADC r11, r0, #0x0
UMLAL r4, r11, r6, r12
LDM lr!, {r5, r6}
ADDS r5, r5, r11
ADC r11, r0, #0x0
UMLAL r5, r11, r7, r12
ADDS r6, r6, r11
ADC r11, r0, #0x0
UMLAL r6, r11, r8, r12
LDM lr!, {r7, r8}
ADDS r7, r7, r11
ADC r11, r0, #0x0
UMLAL r7, r11, r9, r12
BFC r10, #31, #1
ADDS r8, r10, r11
/* Store */
LDR r0, [sp, #64]
STM r0, {r1, r2, r3, r4, r5, r6, r7, r8}
ADD sp, sp, #0x44
POP {pc}
/* Cycle Count = 355 */
.size fe_sq_op,.-fe_sq_op
#else
.text
.align 4
.globl fe_sq_op
.type fe_sq_op, %function
fe_sq_op:
PUSH {lr}
SUB sp, sp, #0x20
STR r0, [sp, #28]
LDM r1, {r0, r1, r2, r3, r4, r5, r6, r7}
/* Square */
UMULL r9, r10, r0, r0
UMULL r11, r12, r0, r1
ADDS r11, r11, r11
MOV lr, #0x0
UMAAL r10, r11, lr, lr
STM sp, {r9, r10}
MOV r8, lr
UMAAL r8, r12, r0, r2
ADCS r8, r8, r8
UMAAL r8, r11, r1, r1
UMULL r9, r10, r0, r3
UMAAL r9, r12, r1, r2
ADCS r9, r9, r9
UMAAL r9, r11, lr, lr
STRD r8, r9, [sp, #8]
MOV r9, lr
UMAAL r9, r10, r0, r4
UMAAL r9, r12, r1, r3
ADCS r9, r9, r9
UMAAL r9, r11, r2, r2
STR r9, [sp, #16]
UMULL r9, r8, r0, r5
UMAAL r9, r12, r1, r4
UMAAL r9, r10, r2, r3
ADCS r9, r9, r9
UMAAL r9, r11, lr, lr
STR r9, [sp, #20]
MOV r9, lr
UMAAL r9, r8, r0, r6
UMAAL r9, r12, r1, r5
UMAAL r9, r10, r2, r4
ADCS r9, r9, r9
UMAAL r9, r11, r3, r3
STR r9, [sp, #24]
UMULL r0, r9, r0, r7
UMAAL r0, r8, r1, r6
UMAAL r0, r12, r2, r5
UMAAL r0, r10, r3, r4
ADCS r0, r0, r0
UMAAL r0, r11, lr, lr
/* R[7] = r0 */
UMAAL r9, r8, r1, r7
UMAAL r9, r10, r2, r6
UMAAL r12, r9, r3, r5
ADCS r12, r12, r12
UMAAL r12, r11, r4, r4
/* R[8] = r12 */
UMAAL r9, r8, r2, r7
UMAAL r10, r9, r3, r6
MOV r2, lr
UMAAL r10, r2, r4, r5
ADCS r10, r10, r10
UMAAL r11, r10, lr, lr
/* R[9] = r11 */
UMAAL r2, r8, r3, r7
UMAAL r2, r9, r4, r6
ADCS r3, r2, r2
UMAAL r10, r3, r5, r5
/* R[10] = r10 */
MOV r1, lr
UMAAL r1, r8, r4, r7
UMAAL r1, r9, r5, r6
ADCS r4, r1, r1
UMAAL r3, r4, lr, lr
/* R[11] = r3 */
UMAAL r8, r9, r5, r7
ADCS r8, r8, r8
UMAAL r4, r8, r6, r6
/* R[12] = r4 */
MOV r5, lr
UMAAL r5, r9, r6, r7
ADCS r5, r5, r5
UMAAL r8, r5, lr, lr
/* R[13] = r8 */
ADCS r9, r9, r9
UMAAL r9, r5, r7, r7
ADCS r7, r5, lr
/* R[14] = r9 */
/* R[15] = r7 */
/* Reduce */
MOV r6, #0x25
UMAAL r7, r0, r7, r6
MOV r6, #0x13
LSL r0, r0, #1
ORR r0, r0, r7, LSR #31
MUL lr, r0, r6
POP {r0, r1}
MOV r6, #0x26
UMAAL r0, lr, r12, r6
UMAAL r1, lr, r11, r6
MOV r12, r3
MOV r11, r4
POP {r2, r3, r4}
UMAAL r2, lr, r10, r6
UMAAL r3, lr, r12, r6
UMAAL r4, lr, r11, r6
MOV r12, r6
POP {r5, r6}
UMAAL r5, lr, r8, r12
BFC r7, #31, #1
UMAAL r6, lr, r9, r12
ADD r7, r7, lr
POP {lr}
/* Store */
STM lr, {r0, r1, r2, r3, r4, r5, r6, r7}
POP {pc}
/* Cycle Count = 179 */
.size fe_sq_op,.-fe_sq_op
#endif /* WOLFSSL_SP_NO_UMAAL */
.text
.align 4
.globl fe_sq
.type fe_sq, %function
fe_sq:
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr}
BL fe_sq_op
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
/* Cycle Count = 24 */
.size fe_sq,.-fe_sq
#ifdef HAVE_CURVE25519
#ifdef WOLFSSL_SP_NO_UMAAL
.text
.align 4
.globl fe_mul121666
.type fe_mul121666, %function
fe_mul121666:
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr}
/* Multiply by 121666 */
LDM r1, {r2, r3, r4, r5, r6, r7, r8, r9}
MOV r12, #0xdb42
MOVT r12, #0x1
UMULL r2, r10, r2, r12
UMULL r3, r11, r3, r12
ADDS r3, r3, r10
ADC r11, r11, #0x0
UMULL r4, r10, r4, r12
ADDS r4, r4, r11
ADC r10, r10, #0x0
UMULL r5, r11, r5, r12
ADDS r5, r5, r10
ADC r11, r11, #0x0
UMULL r6, r10, r6, r12
ADDS r6, r6, r11
ADC r10, r10, #0x0
UMULL r7, r11, r7, r12
ADDS r7, r7, r10
ADC r11, r11, #0x0
UMULL r8, r10, r8, r12
ADDS r8, r8, r11
ADC r10, r10, #0x0
UMULL r9, r11, r9, r12
ADDS r9, r9, r10
MOV r12, #0x13
ADC r11, r11, #0x0
LSL r11, r11, #1
ORR r11, r11, r9, LSR #31
MUL r11, r11, r12
ADDS r2, r2, r11
ADCS r3, r3, #0x0
ADCS r4, r4, #0x0
ADCS r5, r5, #0x0
ADCS r6, r6, #0x0
ADCS r7, r7, #0x0
BFC r9, #31, #1
ADCS r8, r8, #0x0
ADC r9, r9, #0x0
STM r0, {r2, r3, r4, r5, r6, r7, r8, r9}
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
/* Cycle Count = 75 */
.size fe_mul121666,.-fe_mul121666
#else
.text
.align 4
.globl fe_mul121666
.type fe_mul121666, %function
fe_mul121666:
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr}
/* Multiply by 121666 */
LDM r1, {r2, r3, r4, r5, r6, r7, r8, r9}
MOV r11, #0xdb42
MOVT r11, #0x1
UMULL r2, r12, r2, r11
SUB r10, r11, #0x1
UMAAL r3, r12, r3, r10
UMAAL r4, r12, r4, r10
UMAAL r5, r12, r5, r10
UMAAL r6, r12, r6, r10
UMAAL r7, r12, r7, r10
UMAAL r8, r12, r8, r10
MOV r11, #0x13
UMAAL r9, r12, r9, r10
LSL r12, r12, #1
ORR r12, r12, r9, LSR #31
MUL r12, r12, r11
ADDS r2, r2, r12
ADCS r3, r3, #0x0
ADCS r4, r4, #0x0
ADCS r5, r5, #0x0
ADCS r6, r6, #0x0
ADCS r7, r7, #0x0
BFC r9, #31, #1
ADCS r8, r8, #0x0
ADC r9, r9, #0x0
STM r0, {r2, r3, r4, r5, r6, r7, r8, r9}
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
/* Cycle Count = 69 */
.size fe_mul121666,.-fe_mul121666
#endif /* WOLFSSL_SP_NO_UMAAL */
#ifndef WC_NO_CACHE_RESISTANT
.text
.align 4
.globl curve25519
.type curve25519, %function
curve25519:
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr}
SUB sp, sp, #0xbc
STR r0, [sp, #160]
STR r1, [sp, #164]
STR r2, [sp, #168]
MOV r1, #0x0
STR r1, [sp, #172]
MOV r4, #0x1
MOV r5, #0x0
MOV r6, #0x0
MOV r7, #0x0
MOV r8, #0x0
MOV r9, #0x0
MOV r10, #0x0
MOV r11, #0x0
STM r0, {r4, r5, r6, r7, r8, r9, r10, r11}
ADD r3, sp, #0x20
STM r3, {r4, r5, r6, r7, r8, r9, r10, r11}
MOV r4, #0x0
MOV r3, sp
STM r3, {r4, r5, r6, r7, r8, r9, r10, r11}
ADD r3, sp, #0x40
/* Copy */
LDM r2, {r4, r5, r6, r7, r8, r9, r10, r11}
STM r3, {r4, r5, r6, r7, r8, r9, r10, r11}
MOV r1, #0x1e
STR r1, [sp, #180]
MOV r2, #0x1c
STR r2, [sp, #176]
L_curve25519_words:
L_curve25519_bits:
LDR r1, [sp, #164]
LDR r2, [r1, r2]
LDR r1, [sp, #180]
LSR r2, r2, r1
AND r2, r2, #0x1
STR r2, [sp, #184]
LDR r1, [sp, #172]
EOR r1, r1, r2
STR r1, [sp, #172]
LDR r0, [sp, #160]
/* Conditional Swap */
RSB r1, r1, #0x0
MOV r3, r0
ADD r12, sp, #0x40
LDM r3, {r4, r5}
LDM r12, {r6, r7}
EOR r8, r4, r6
EOR r9, r5, r7
AND r8, r8, r1
AND r9, r9, r1
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r8
EOR r7, r7, r9
STM r3!, {r4, r5}
STM r12!, {r6, r7}
LDM r3, {r4, r5}
LDM r12, {r6, r7}
EOR r8, r4, r6
EOR r9, r5, r7
AND r8, r8, r1
AND r9, r9, r1
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r8
EOR r7, r7, r9
STM r3!, {r4, r5}
STM r12!, {r6, r7}
LDM r3, {r4, r5}
LDM r12, {r6, r7}
EOR r8, r4, r6
EOR r9, r5, r7
AND r8, r8, r1
AND r9, r9, r1
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r8
EOR r7, r7, r9
STM r3!, {r4, r5}
STM r12!, {r6, r7}
LDM r3, {r4, r5}
LDM r12, {r6, r7}
EOR r8, r4, r6
EOR r9, r5, r7
AND r8, r8, r1
AND r9, r9, r1
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r8
EOR r7, r7, r9
STM r3!, {r4, r5}
STM r12!, {r6, r7}
LDR r1, [sp, #172]
/* Conditional Swap */
RSB r1, r1, #0x0
MOV r3, sp
ADD r12, sp, #0x20
LDM r3, {r4, r5}
LDM r12, {r6, r7}
EOR r8, r4, r6
EOR r9, r5, r7
AND r8, r8, r1
AND r9, r9, r1
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r8
EOR r7, r7, r9
STM r3!, {r4, r5}
STM r12!, {r6, r7}
LDM r3, {r4, r5}
LDM r12, {r6, r7}
EOR r8, r4, r6
EOR r9, r5, r7
AND r8, r8, r1
AND r9, r9, r1
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r8
EOR r7, r7, r9
STM r3!, {r4, r5}
STM r12!, {r6, r7}
LDM r3, {r4, r5}
LDM r12, {r6, r7}
EOR r8, r4, r6
EOR r9, r5, r7
AND r8, r8, r1
AND r9, r9, r1
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r8
EOR r7, r7, r9
STM r3!, {r4, r5}
STM r12!, {r6, r7}
LDM r3, {r4, r5}
LDM r12, {r6, r7}
EOR r8, r4, r6
EOR r9, r5, r7
AND r8, r8, r1
AND r9, r9, r1
EOR r4, r4, r8
EOR r5, r5, r9
EOR r6, r6, r8
EOR r7, r7, r9
STM r3!, {r4, r5}
STM r12!, {r6, r7}
LDR r1, [sp, #184]
STR r1, [sp, #172]
MOV r3, sp
LDR r2, [sp, #160]
ADD r1, sp, #0x80
LDR r0, [sp, #160]
BL fe_add_sub_op
ADD r3, sp, #0x20
ADD r2, sp, #0x40
ADD r1, sp, #0x60
MOV r0, sp
BL fe_add_sub_op
LDR r2, [sp, #160]
ADD r1, sp, #0x60
ADD r0, sp, #0x20
BL fe_mul_op
ADD r2, sp, #0x80
MOV r1, sp
MOV r0, sp
BL fe_mul_op
ADD r1, sp, #0x80
ADD r0, sp, #0x80
BL fe_sq_op
LDR r1, [sp, #160]
ADD r0, sp, #0x60
BL fe_sq_op
MOV r3, sp
ADD r2, sp, #0x20
MOV r1, sp
ADD r0, sp, #0x40
BL fe_add_sub_op
ADD r2, sp, #0x80
ADD r1, sp, #0x60
LDR r0, [sp, #160]
BL fe_mul_op
ADD r2, sp, #0x80
ADD r1, sp, #0x60
ADD r0, sp, #0x60
BL fe_sub_op
MOV r1, sp
MOV r0, sp
BL fe_sq_op
ADD r1, sp, #0x60
ADD r0, sp, #0x20
BL fe_mul121666
ADD r1, sp, #0x40
ADD r0, sp, #0x40
BL fe_sq_op
ADD r2, sp, #0x20
ADD r1, sp, #0x80
ADD r0, sp, #0x80
BL fe_add_op
MOV r2, sp
LDR r1, [sp, #168]
ADD r0, sp, #0x20
BL fe_mul_op
ADD r2, sp, #0x80
ADD r1, sp, #0x60
MOV r0, sp
BL fe_mul_op
LDR r2, [sp, #176]
LDR r1, [sp, #180]
SUBS r1, r1, #0x1
STR r1, [sp, #180]
#ifdef __GNUC__
BGE L_curve25519_bits
#else
BGE.W L_curve25519_bits
#endif
MOV r1, #0x1f
STR r1, [sp, #180]
SUBS r2, r2, #0x4
STR r2, [sp, #176]
#ifdef __GNUC__
BGE L_curve25519_words
#else
BGE.W L_curve25519_words
#endif
/* Invert */
ADD r1, sp, #0x0
ADD r0, sp, #0x20
BL fe_sq_op
ADD r1, sp, #0x20
ADD r0, sp, #0x40
BL fe_sq_op
ADD r1, sp, #0x40
ADD r0, sp, #0x40
BL fe_sq_op
ADD r2, sp, #0x40
ADD r1, sp, #0x0
ADD r0, sp, #0x40
BL fe_mul_op
ADD r2, sp, #0x40
ADD r1, sp, #0x20
ADD r0, sp, #0x20
BL fe_mul_op
ADD r1, sp, #0x20
ADD r0, sp, #0x60
BL fe_sq_op
ADD r2, sp, #0x60
ADD r1, sp, #0x40
ADD r0, sp, #0x40
BL fe_mul_op
ADD r1, sp, #0x40
ADD r0, sp, #0x60
BL fe_sq_op
MOV r12, #0x4
L_curve25519_inv_1:
ADD r1, sp, #0x60
ADD r0, sp, #0x60
PUSH {r12}
BL fe_sq_op
POP {r12}
SUBS r12, r12, #0x1
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
BNE L_curve25519_inv_1
#else
BNE.N L_curve25519_inv_1
#endif
ADD r2, sp, #0x40
ADD r1, sp, #0x60
ADD r0, sp, #0x40
BL fe_mul_op
ADD r1, sp, #0x40
ADD r0, sp, #0x60
BL fe_sq_op
MOV r12, #0x9
L_curve25519_inv_2:
ADD r1, sp, #0x60
ADD r0, sp, #0x60
PUSH {r12}
BL fe_sq_op
POP {r12}
SUBS r12, r12, #0x1
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
BNE L_curve25519_inv_2
#else
BNE.N L_curve25519_inv_2
#endif
ADD r2, sp, #0x40
ADD r1, sp, #0x60
ADD r0, sp, #0x60
BL fe_mul_op
ADD r1, sp, #0x60
ADD r0, sp, #0x80
BL fe_sq_op
MOV r12, #0x13
L_curve25519_inv_3:
ADD r1, sp, #0x80
ADD r0, sp, #0x80
PUSH {r12}
BL fe_sq_op
POP {r12}
SUBS r12, r12, #0x1
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
BNE L_curve25519_inv_3
#else
BNE.N L_curve25519_inv_3
#endif
ADD r2, sp, #0x60
ADD r1, sp, #0x80
ADD r0, sp, #0x60
BL fe_mul_op
MOV r12, #0xa
L_curve25519_inv_4:
ADD r1, sp, #0x60
ADD r0, sp, #0x60
PUSH {r12}
BL fe_sq_op
POP {r12}
SUBS r12, r12, #0x1
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
BNE L_curve25519_inv_4
#else
BNE.N L_curve25519_inv_4
#endif
ADD r2, sp, #0x40
ADD r1, sp, #0x60
ADD r0, sp, #0x40
BL fe_mul_op
ADD r1, sp, #0x40
ADD r0, sp, #0x60
BL fe_sq_op
MOV r12, #0x31
L_curve25519_inv_5:
ADD r1, sp, #0x60
ADD r0, sp, #0x60
PUSH {r12}
BL fe_sq_op
POP {r12}
SUBS r12, r12, #0x1
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
BNE L_curve25519_inv_5
#else
BNE.N L_curve25519_inv_5
#endif
ADD r2, sp, #0x40
ADD r1, sp, #0x60
ADD r0, sp, #0x60
BL fe_mul_op
ADD r1, sp, #0x60
ADD r0, sp, #0x80
BL fe_sq_op
MOV r12, #0x63
L_curve25519_inv_6:
ADD r1, sp, #0x80
ADD r0, sp, #0x80
PUSH {r12}
BL fe_sq_op
POP {r12}
SUBS r12, r12, #0x1
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
BNE L_curve25519_inv_6
#else
BNE.N L_curve25519_inv_6
#endif
ADD r2, sp, #0x60
ADD r1, sp, #0x80
ADD r0, sp, #0x60
BL fe_mul_op
MOV r12, #0x32
L_curve25519_inv_7:
ADD r1, sp, #0x60
ADD r0, sp, #0x60
PUSH {r12}
BL fe_sq_op
POP {r12}
SUBS r12, r12, #0x1
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
BNE L_curve25519_inv_7
#else
BNE.N L_curve25519_inv_7
#endif
ADD r2, sp, #0x40
ADD r1, sp, #0x60
ADD r0, sp, #0x40
BL fe_mul_op
MOV r12, #0x5
L_curve25519_inv_8:
ADD r1, sp, #0x40
ADD r0, sp, #0x40
PUSH {r12}
BL fe_sq_op
POP {r12}
SUBS r12, r12, #0x1
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
BNE L_curve25519_inv_8
#else
BNE.N L_curve25519_inv_8
#endif
ADD r2, sp, #0x20
ADD r1, sp, #0x40
ADD r0, sp, #0x0
BL fe_mul_op
MOV r2, sp
LDR r1, [sp, #160]
LDR r0, [sp, #160]
BL fe_mul_op
MOV r0, #0x0
ADD sp, sp, #0xbc
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
/* Cycle Count = 682 */
.size curve25519,.-curve25519
#else
.text
.align 4
.globl curve25519
.type curve25519, %function
curve25519:
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr}
SUB sp, sp, #0xc0
STR r0, [sp, #176]
STR r1, [sp, #160]
STR r2, [sp, #172]
ADD r5, sp, #0x40
ADD r4, sp, #0x20
STR sp, [sp, #184]
STR r5, [sp, #180]
STR r4, [sp, #188]
MOV r1, #0x0
STR r1, [sp, #164]
MOV r4, #0x1
MOV r5, #0x0
MOV r6, #0x0
MOV r7, #0x0
MOV r8, #0x0
MOV r9, #0x0
MOV r10, #0x0
MOV r11, #0x0
STM r0, {r4, r5, r6, r7, r8, r9, r10, r11}
ADD r3, sp, #0x20
STM r3, {r4, r5, r6, r7, r8, r9, r10, r11}
MOV r4, #0x0
MOV r3, sp
STM r3, {r4, r5, r6, r7, r8, r9, r10, r11}
ADD r3, sp, #0x40
/* Copy */
LDM r2, {r4, r5, r6, r7, r8, r9, r10, r11}
STM r3, {r4, r5, r6, r7, r8, r9, r10, r11}
MOV r2, #0xfe
L_curve25519_bits:
STR r2, [sp, #168]
LDR r1, [sp, #160]
AND r4, r2, #0x1f
LSR r2, r2, #5
LDR r2, [r1, r2, LSL #2]
RSB r4, r4, #0x1f
LSL r2, r2, r4
LDR r1, [sp, #164]
EOR r1, r1, r2
ASR r1, r1, #31
STR r2, [sp, #164]
/* Conditional Swap */
ADD r11, sp, #0xb0
LDM r11, {r4, r5, r6, r7}
EOR r8, r4, r5
EOR r9, r6, r7
AND r8, r8, r1
AND r9, r9, r1
EOR r4, r4, r8
EOR r5, r5, r8
EOR r6, r6, r9
EOR r7, r7, r9
STM r11, {r4, r5, r6, r7}
/* Ladder step */
LDR r3, [sp, #184]
LDR r2, [sp, #176]
ADD r1, sp, #0x80
LDR r0, [sp, #176]
BL fe_add_sub_op
LDR r3, [sp, #188]
LDR r2, [sp, #180]
ADD r1, sp, #0x60
LDR r0, [sp, #184]
BL fe_add_sub_op
LDR r2, [sp, #176]
ADD r1, sp, #0x60
LDR r0, [sp, #188]
BL fe_mul_op
ADD r2, sp, #0x80
LDR r1, [sp, #184]
LDR r0, [sp, #184]
BL fe_mul_op
ADD r1, sp, #0x80
ADD r0, sp, #0x60
BL fe_sq_op
LDR r1, [sp, #176]
ADD r0, sp, #0x80
BL fe_sq_op
LDR r3, [sp, #184]
LDR r2, [sp, #188]
LDR r1, [sp, #184]
LDR r0, [sp, #180]
BL fe_add_sub_op
ADD r2, sp, #0x60
ADD r1, sp, #0x80
LDR r0, [sp, #176]
BL fe_mul_op
ADD r2, sp, #0x60
ADD r1, sp, #0x80
ADD r0, sp, #0x80
BL fe_sub_op
LDR r1, [sp, #184]
LDR r0, [sp, #184]
BL fe_sq_op
ADD r1, sp, #0x80
LDR r0, [sp, #188]
BL fe_mul121666
LDR r1, [sp, #180]
LDR r0, [sp, #180]
BL fe_sq_op
LDR r2, [sp, #188]
ADD r1, sp, #0x60
ADD r0, sp, #0x60
BL fe_add_op
LDR r2, [sp, #184]
LDR r1, [sp, #172]
LDR r0, [sp, #188]
BL fe_mul_op
ADD r2, sp, #0x60
ADD r1, sp, #0x80
LDR r0, [sp, #184]
BL fe_mul_op
LDR r2, [sp, #168]
SUBS r2, r2, #0x1
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
BGE L_curve25519_bits
#else
BGE.N L_curve25519_bits
#endif
/* Cycle Count: 171 */
LDR r1, [sp, #184]
/* Copy */
LDM r1, {r4, r5, r6, r7, r8, r9, r10, r11}
STM sp, {r4, r5, r6, r7, r8, r9, r10, r11}
/* Invert */
ADD r1, sp, #0x0
ADD r0, sp, #0x20
BL fe_sq_op
ADD r1, sp, #0x20
ADD r0, sp, #0x40
BL fe_sq_op
ADD r1, sp, #0x40
ADD r0, sp, #0x40
BL fe_sq_op
ADD r2, sp, #0x40
ADD r1, sp, #0x0
ADD r0, sp, #0x40
BL fe_mul_op
ADD r2, sp, #0x40
ADD r1, sp, #0x20
ADD r0, sp, #0x20
BL fe_mul_op
ADD r1, sp, #0x20
ADD r0, sp, #0x60
BL fe_sq_op
ADD r2, sp, #0x60
ADD r1, sp, #0x40
ADD r0, sp, #0x40
BL fe_mul_op
ADD r1, sp, #0x40
ADD r0, sp, #0x60
BL fe_sq_op
MOV r12, #0x4
L_curve25519_inv_1:
ADD r1, sp, #0x60
ADD r0, sp, #0x60
PUSH {r12}
BL fe_sq_op
POP {r12}
SUBS r12, r12, #0x1
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
BNE L_curve25519_inv_1
#else
BNE.N L_curve25519_inv_1
#endif
ADD r2, sp, #0x40
ADD r1, sp, #0x60
ADD r0, sp, #0x40
BL fe_mul_op
ADD r1, sp, #0x40
ADD r0, sp, #0x60
BL fe_sq_op
MOV r12, #0x9
L_curve25519_inv_2:
ADD r1, sp, #0x60
ADD r0, sp, #0x60
PUSH {r12}
BL fe_sq_op
POP {r12}
SUBS r12, r12, #0x1
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
BNE L_curve25519_inv_2
#else
BNE.N L_curve25519_inv_2
#endif
ADD r2, sp, #0x40
ADD r1, sp, #0x60
ADD r0, sp, #0x60
BL fe_mul_op
ADD r1, sp, #0x60
ADD r0, sp, #0x80
BL fe_sq_op
MOV r12, #0x13
L_curve25519_inv_3:
ADD r1, sp, #0x80
ADD r0, sp, #0x80
PUSH {r12}
BL fe_sq_op
POP {r12}
SUBS r12, r12, #0x1
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
BNE L_curve25519_inv_3
#else
BNE.N L_curve25519_inv_3
#endif
ADD r2, sp, #0x60
ADD r1, sp, #0x80
ADD r0, sp, #0x60
BL fe_mul_op
MOV r12, #0xa
L_curve25519_inv_4:
ADD r1, sp, #0x60
ADD r0, sp, #0x60
PUSH {r12}
BL fe_sq_op
POP {r12}
SUBS r12, r12, #0x1
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
BNE L_curve25519_inv_4
#else
BNE.N L_curve25519_inv_4
#endif
ADD r2, sp, #0x40
ADD r1, sp, #0x60
ADD r0, sp, #0x40
BL fe_mul_op
ADD r1, sp, #0x40
ADD r0, sp, #0x60
BL fe_sq_op
MOV r12, #0x31
L_curve25519_inv_5:
ADD r1, sp, #0x60
ADD r0, sp, #0x60
PUSH {r12}
BL fe_sq_op
POP {r12}
SUBS r12, r12, #0x1
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
BNE L_curve25519_inv_5
#else
BNE.N L_curve25519_inv_5
#endif
ADD r2, sp, #0x40
ADD r1, sp, #0x60
ADD r0, sp, #0x60
BL fe_mul_op
ADD r1, sp, #0x60
ADD r0, sp, #0x80
BL fe_sq_op
MOV r12, #0x63
L_curve25519_inv_6:
ADD r1, sp, #0x80
ADD r0, sp, #0x80
PUSH {r12}
BL fe_sq_op
POP {r12}
SUBS r12, r12, #0x1
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
BNE L_curve25519_inv_6
#else
BNE.N L_curve25519_inv_6
#endif
ADD r2, sp, #0x60
ADD r1, sp, #0x80
ADD r0, sp, #0x60
BL fe_mul_op
MOV r12, #0x32
L_curve25519_inv_7:
ADD r1, sp, #0x60
ADD r0, sp, #0x60
PUSH {r12}
BL fe_sq_op
POP {r12}
SUBS r12, r12, #0x1
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
BNE L_curve25519_inv_7
#else
BNE.N L_curve25519_inv_7
#endif
ADD r2, sp, #0x40
ADD r1, sp, #0x60
ADD r0, sp, #0x40
BL fe_mul_op
MOV r12, #0x5
L_curve25519_inv_8:
ADD r1, sp, #0x40
ADD r0, sp, #0x40
PUSH {r12}
BL fe_sq_op
POP {r12}
SUBS r12, r12, #0x1
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
BNE L_curve25519_inv_8
#else
BNE.N L_curve25519_inv_8
#endif
ADD r2, sp, #0x20
ADD r1, sp, #0x40
ADD r0, sp, #0x0
BL fe_mul_op
LDR r2, [sp, #184]
LDR r1, [sp, #176]
LDR r0, [sp, #176]
BL fe_mul_op
/* Ensure result is less than modulus */
LDR r0, [sp, #176]
LDM r0, {r4, r5, r6, r7, r8, r9, r10, r11}
MOV r2, #0x13
AND r2, r2, r11, ASR #31
ADDS r4, r4, r2
ADCS r5, r5, #0x0
ADCS r6, r6, #0x0
ADCS r7, r7, #0x0
ADCS r8, r8, #0x0
ADCS r9, r9, #0x0
BFC r11, #31, #1
ADCS r10, r10, #0x0
ADC r11, r11, #0x0
STM r0, {r4, r5, r6, r7, r8, r9, r10, r11}
MOV r0, #0x0
ADD sp, sp, #0xc0
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
/* Cycle Count = 589 */
.size curve25519,.-curve25519
#endif /* WC_NO_CACHE_RESISTANT */
#endif /* HAVE_CURVE25519 */
#ifdef HAVE_ED25519
.text
.align 4
.globl fe_invert
.type fe_invert, %function
fe_invert:
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr}
SUB sp, sp, #0x88
/* Invert */
STR r0, [sp, #128]
STR r1, [sp, #132]
LDR r1, [sp, #132]
MOV r0, sp
BL fe_sq_op
MOV r1, sp
ADD r0, sp, #0x20
BL fe_sq_op
ADD r1, sp, #0x20
ADD r0, sp, #0x20
BL fe_sq_op
ADD r2, sp, #0x20
LDR r1, [sp, #132]
ADD r0, sp, #0x20
BL fe_mul_op
ADD r2, sp, #0x20
MOV r1, sp
MOV r0, sp
BL fe_mul_op
MOV r1, sp
ADD r0, sp, #0x40
BL fe_sq_op
ADD r2, sp, #0x40
ADD r1, sp, #0x20
ADD r0, sp, #0x20
BL fe_mul_op
ADD r1, sp, #0x20
ADD r0, sp, #0x40
BL fe_sq_op
MOV r12, #0x4
L_fe_invert1:
ADD r1, sp, #0x40
ADD r0, sp, #0x40
PUSH {r12}
BL fe_sq_op
POP {r12}
SUBS r12, r12, #0x1
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
BNE L_fe_invert1
#else
BNE.N L_fe_invert1
#endif
ADD r2, sp, #0x20
ADD r1, sp, #0x40
ADD r0, sp, #0x20
BL fe_mul_op
ADD r1, sp, #0x20
ADD r0, sp, #0x40
BL fe_sq_op
MOV r12, #0x9
L_fe_invert2:
ADD r1, sp, #0x40
ADD r0, sp, #0x40
PUSH {r12}
BL fe_sq_op
POP {r12}
SUBS r12, r12, #0x1
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
BNE L_fe_invert2
#else
BNE.N L_fe_invert2
#endif
ADD r2, sp, #0x20
ADD r1, sp, #0x40
ADD r0, sp, #0x40
BL fe_mul_op
ADD r1, sp, #0x40
ADD r0, sp, #0x60
BL fe_sq_op
MOV r12, #0x13
L_fe_invert3:
ADD r1, sp, #0x60
ADD r0, sp, #0x60
PUSH {r12}
BL fe_sq_op
POP {r12}
SUBS r12, r12, #0x1
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
BNE L_fe_invert3
#else
BNE.N L_fe_invert3
#endif
ADD r2, sp, #0x40
ADD r1, sp, #0x60
ADD r0, sp, #0x40
BL fe_mul_op
MOV r12, #0xa
L_fe_invert4:
ADD r1, sp, #0x40
ADD r0, sp, #0x40
PUSH {r12}
BL fe_sq_op
POP {r12}
SUBS r12, r12, #0x1
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
BNE L_fe_invert4
#else
BNE.N L_fe_invert4
#endif
ADD r2, sp, #0x20
ADD r1, sp, #0x40
ADD r0, sp, #0x20
BL fe_mul_op
ADD r1, sp, #0x20
ADD r0, sp, #0x40
BL fe_sq_op
MOV r12, #0x31
L_fe_invert5:
ADD r1, sp, #0x40
ADD r0, sp, #0x40
PUSH {r12}
BL fe_sq_op
POP {r12}
SUBS r12, r12, #0x1
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
BNE L_fe_invert5
#else
BNE.N L_fe_invert5
#endif
ADD r2, sp, #0x20
ADD r1, sp, #0x40
ADD r0, sp, #0x40
BL fe_mul_op
ADD r1, sp, #0x40
ADD r0, sp, #0x60
BL fe_sq_op
MOV r12, #0x63
L_fe_invert6:
ADD r1, sp, #0x60
ADD r0, sp, #0x60
PUSH {r12}
BL fe_sq_op
POP {r12}
SUBS r12, r12, #0x1
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
BNE L_fe_invert6
#else
BNE.N L_fe_invert6
#endif
ADD r2, sp, #0x40
ADD r1, sp, #0x60
ADD r0, sp, #0x40
BL fe_mul_op
MOV r12, #0x32
L_fe_invert7:
ADD r1, sp, #0x40
ADD r0, sp, #0x40
PUSH {r12}
BL fe_sq_op
POP {r12}
SUBS r12, r12, #0x1
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
BNE L_fe_invert7
#else
BNE.N L_fe_invert7
#endif
ADD r2, sp, #0x20
ADD r1, sp, #0x40
ADD r0, sp, #0x20
BL fe_mul_op
MOV r12, #0x5
L_fe_invert8:
ADD r1, sp, #0x20
ADD r0, sp, #0x20
PUSH {r12}
BL fe_sq_op
POP {r12}
SUBS r12, r12, #0x1
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
BNE L_fe_invert8
#else
BNE.N L_fe_invert8
#endif
MOV r2, sp
ADD r1, sp, #0x20
LDR r0, [sp, #128]
BL fe_mul_op
LDR r1, [sp, #132]
LDR r0, [sp, #128]
ADD sp, sp, #0x88
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
/* Cycle Count = 292 */
.size fe_invert,.-fe_invert
#ifdef WOLFSSL_SP_NO_UMAAL
.text
.align 4
.globl fe_sq2
.type fe_sq2, %function
fe_sq2:
PUSH {lr}
SUB sp, sp, #0x44
STR r0, [sp, #64]
/* Square * 2 */
MOV r0, #0x0
LDR r12, [r1]
/* A[0] * A[1] */
LDR lr, [r1, #4]
UMULL r4, r5, r12, lr
/* A[0] * A[3] */
LDR lr, [r1, #12]
UMULL r6, r7, r12, lr
/* A[0] * A[5] */
LDR lr, [r1, #20]
UMULL r8, r9, r12, lr
/* A[0] * A[7] */
LDR lr, [r1, #28]
UMULL r10, r3, r12, lr
/* A[0] * A[2] */
LDR lr, [r1, #8]
MOV r11, #0x0
UMLAL r5, r11, r12, lr
ADDS r6, r6, r11
/* A[0] * A[4] */
LDR lr, [r1, #16]
ADCS r7, r7, #0x0
ADC r11, r0, #0x0
UMLAL r7, r11, r12, lr
ADDS r8, r8, r11
/* A[0] * A[6] */
LDR lr, [r1, #24]
ADCS r9, r9, #0x0
ADC r11, r0, #0x0
UMLAL r9, r11, r12, lr
ADDS r10, r10, r11
ADCS r3, r3, #0x0
STR r4, [sp, #4]
STR r5, [sp, #8]
/* A[1] * A[2] */
LDR r12, [r1, #4]
LDR lr, [r1, #8]
MOV r11, #0x0
UMLAL r6, r11, r12, lr
STR r6, [sp, #12]
ADDS r7, r7, r11
/* A[1] * A[3] */
LDR lr, [r1, #12]
ADC r11, r0, #0x0
UMLAL r7, r11, r12, lr
STR r7, [sp, #16]
ADDS r8, r8, r11
/* A[1] * A[4] */
LDR lr, [r1, #16]
ADC r11, r0, #0x0
UMLAL r8, r11, r12, lr
ADDS r9, r9, r11
/* A[1] * A[5] */
LDR lr, [r1, #20]
ADC r11, r0, #0x0
UMLAL r9, r11, r12, lr
ADDS r10, r10, r11
/* A[1] * A[6] */
LDR lr, [r1, #24]
ADC r11, r0, #0x0
UMLAL r10, r11, r12, lr
ADDS r3, r3, r11
/* A[1] * A[7] */
LDR lr, [r1, #28]
ADC r4, r0, #0x0
UMLAL r3, r4, r12, lr
/* A[2] * A[3] */
LDR r12, [r1, #8]
LDR lr, [r1, #12]
MOV r11, #0x0
UMLAL r8, r11, r12, lr
STR r8, [sp, #20]
ADDS r9, r9, r11
/* A[2] * A[4] */
LDR lr, [r1, #16]
ADC r11, r0, #0x0
UMLAL r9, r11, r12, lr
STR r9, [sp, #24]
ADDS r10, r10, r11
/* A[2] * A[5] */
LDR lr, [r1, #20]
ADC r11, r0, #0x0
UMLAL r10, r11, r12, lr
ADDS r3, r3, r11
/* A[2] * A[6] */
LDR lr, [r1, #24]
ADC r11, r0, #0x0
UMLAL r3, r11, r12, lr
ADDS r4, r4, r11
/* A[2] * A[7] */
LDR lr, [r1, #28]
ADC r5, r0, #0x0
UMLAL r4, r5, r12, lr
/* A[3] * A[4] */
LDR r12, [r1, #12]
LDR lr, [r1, #16]
MOV r11, #0x0
UMLAL r10, r11, r12, lr
STR r10, [sp, #28]
ADDS r3, r3, r11
/* A[3] * A[5] */
LDR lr, [r1, #20]
ADC r11, r0, #0x0
UMLAL r3, r11, r12, lr
ADDS r4, r4, r11
/* A[3] * A[6] */
LDR lr, [r1, #24]
ADC r11, r0, #0x0
UMLAL r4, r11, r12, lr
ADDS r5, r5, r11
/* A[3] * A[7] */
LDR lr, [r1, #28]
ADC r6, r0, #0x0
UMLAL r5, r6, r12, lr
/* A[4] * A[5] */
LDR r12, [r1, #16]
LDR lr, [r1, #20]
MOV r11, #0x0
UMLAL r4, r11, r12, lr
ADDS r5, r5, r11
/* A[4] * A[6] */
LDR lr, [r1, #24]
ADC r11, r0, #0x0
UMLAL r5, r11, r12, lr
ADDS r6, r6, r11
/* A[4] * A[7] */
LDR lr, [r1, #28]
ADC r7, r0, #0x0
UMLAL r6, r7, r12, lr
/* A[5] * A[6] */
LDR r12, [r1, #20]
LDR lr, [r1, #24]
MOV r11, #0x0
UMLAL r6, r11, r12, lr
ADDS r7, r7, r11
/* A[5] * A[7] */
LDR lr, [r1, #28]
ADC r8, r0, #0x0
UMLAL r7, r8, r12, lr
/* A[6] * A[7] */
LDR r12, [r1, #24]
LDR lr, [r1, #28]
MOV r9, #0x0
UMLAL r8, r9, r12, lr
ADD lr, sp, #0x20
STM lr, {r3, r4, r5, r6, r7, r8, r9}
ADD lr, sp, #0x4
LDM lr, {r4, r5, r6, r7, r8, r9, r10}
ADDS r4, r4, r4
ADCS r5, r5, r5
ADCS r6, r6, r6
ADCS r7, r7, r7
ADCS r8, r8, r8
ADCS r9, r9, r9
ADCS r10, r10, r10
STM lr!, {r4, r5, r6, r7, r8, r9, r10}
LDM lr, {r3, r4, r5, r6, r7, r8, r9}
ADCS r3, r3, r3
ADCS r4, r4, r4
ADCS r5, r5, r5
ADCS r6, r6, r6
ADCS r7, r7, r7
ADCS r8, r8, r8
ADCS r9, r9, r9
ADC r10, r0, #0x0
STM lr, {r3, r4, r5, r6, r7, r8, r9, r10}
ADD lr, sp, #0x4
LDM lr, {r4, r5, r6, r7, r8, r9, r10}
MOV lr, sp
/* A[0] * A[0] */
LDR r12, [r1]
UMULL r3, r11, r12, r12
ADDS r4, r4, r11
/* A[1] * A[1] */
LDR r12, [r1, #4]
ADCS r5, r5, #0x0
ADC r11, r0, #0x0
UMLAL r5, r11, r12, r12
ADDS r6, r6, r11
/* A[2] * A[2] */
LDR r12, [r1, #8]
ADCS r7, r7, #0x0
ADC r11, r0, #0x0
UMLAL r7, r11, r12, r12
ADDS r8, r8, r11
/* A[3] * A[3] */
LDR r12, [r1, #12]
ADCS r9, r9, #0x0
ADC r11, r0, #0x0
UMLAL r9, r11, r12, r12
ADDS r10, r10, r11
STM lr!, {r3, r4, r5, r6, r7, r8, r9, r10}
LDM lr, {r3, r4, r5, r6, r7, r8, r9, r10}
/* A[4] * A[4] */
LDR r12, [r1, #16]
ADCS r3, r3, #0x0
ADC r11, r0, #0x0
UMLAL r3, r11, r12, r12
ADDS r4, r4, r11
/* A[5] * A[5] */
LDR r12, [r1, #20]
ADCS r5, r5, #0x0
ADC r11, r0, #0x0
UMLAL r5, r11, r12, r12
ADDS r6, r6, r11
/* A[6] * A[6] */
LDR r12, [r1, #24]
ADCS r7, r7, #0x0
ADC r11, r0, #0x0
UMLAL r7, r11, r12, r12
ADDS r8, r8, r11
/* A[7] * A[7] */
LDR r12, [r1, #28]
ADCS r9, r9, #0x0
ADC r10, r10, #0x0
UMLAL r9, r10, r12, r12
/* Reduce */
LDR r2, [sp, #28]
MOV lr, sp
MOV r12, #0x26
UMULL r10, r11, r10, r12
ADDS r10, r10, r2
ADC r11, r11, #0x0
MOV r12, #0x13
LSL r11, r11, #1
ORR r11, r11, r10, LSR #31
MUL r11, r11, r12
LDM lr!, {r1, r2}
MOV r12, #0x26
ADDS r1, r1, r11
ADC r11, r0, #0x0
UMLAL r1, r11, r3, r12
ADDS r2, r2, r11
ADC r11, r0, #0x0
UMLAL r2, r11, r4, r12
LDM lr!, {r3, r4}
ADDS r3, r3, r11
ADC r11, r0, #0x0
UMLAL r3, r11, r5, r12
ADDS r4, r4, r11
ADC r11, r0, #0x0
UMLAL r4, r11, r6, r12
LDM lr!, {r5, r6}
ADDS r5, r5, r11
ADC r11, r0, #0x0
UMLAL r5, r11, r7, r12
ADDS r6, r6, r11
ADC r11, r0, #0x0
UMLAL r6, r11, r8, r12
LDM lr!, {r7, r8}
ADDS r7, r7, r11
ADC r11, r0, #0x0
UMLAL r7, r11, r9, r12
BFC r10, #31, #1
ADDS r8, r10, r11
/* Reduce if top bit set */
MOV r12, #0x13
AND r11, r12, r8, ASR #31
ADDS r1, r1, r11
ADCS r2, r2, #0x0
ADCS r3, r3, #0x0
ADCS r4, r4, #0x0
ADCS r5, r5, #0x0
ADCS r6, r6, #0x0
BFC r8, #31, #1
ADCS r7, r7, #0x0
ADC r8, r8, #0x0
/* Double */
ADDS r1, r1, r1
ADCS r2, r2, r2
ADCS r3, r3, r3
ADCS r4, r4, r4
ADCS r5, r5, r5
ADCS r6, r6, r6
ADCS r7, r7, r7
ADC r8, r8, r8
/* Reduce if top bit set */
MOV r12, #0x13
AND r11, r12, r8, ASR #31
ADDS r1, r1, r11
ADCS r2, r2, #0x0
ADCS r3, r3, #0x0
ADCS r4, r4, #0x0
ADCS r5, r5, #0x0
ADCS r6, r6, #0x0
BFC r8, #31, #1
ADCS r7, r7, #0x0
ADC r8, r8, #0x0
/* Store */
LDR r0, [sp, #64]
STM r0, {r1, r2, r3, r4, r5, r6, r7, r8}
ADD sp, sp, #0x44
POP {pc}
/* Cycle Count = 385 */
.size fe_sq2,.-fe_sq2
#else
.text
.align 4
.globl fe_sq2
.type fe_sq2, %function
fe_sq2:
PUSH {lr}
SUB sp, sp, #0x24
STRD r0, r1, [sp, #28]
LDM r1, {r0, r1, r2, r3, r4, r5, r6, r7}
/* Square * 2 */
UMULL r9, r10, r0, r0
UMULL r11, r12, r0, r1
ADDS r11, r11, r11
MOV lr, #0x0
UMAAL r10, r11, lr, lr
STM sp, {r9, r10}
MOV r8, lr
UMAAL r8, r12, r0, r2
ADCS r8, r8, r8
UMAAL r8, r11, r1, r1
UMULL r9, r10, r0, r3
UMAAL r9, r12, r1, r2
ADCS r9, r9, r9
UMAAL r9, r11, lr, lr
STRD r8, r9, [sp, #8]
MOV r9, lr
UMAAL r9, r10, r0, r4
UMAAL r9, r12, r1, r3
ADCS r9, r9, r9
UMAAL r9, r11, r2, r2
STR r9, [sp, #16]
UMULL r9, r8, r0, r5
UMAAL r9, r12, r1, r4
UMAAL r9, r10, r2, r3
ADCS r9, r9, r9
UMAAL r9, r11, lr, lr
STR r9, [sp, #20]
MOV r9, lr
UMAAL r9, r8, r0, r6
UMAAL r9, r12, r1, r5
UMAAL r9, r10, r2, r4
ADCS r9, r9, r9
UMAAL r9, r11, r3, r3
STR r9, [sp, #24]
UMULL r0, r9, r0, r7
UMAAL r0, r8, r1, r6
UMAAL r0, r12, r2, r5
UMAAL r0, r10, r3, r4
ADCS r0, r0, r0
UMAAL r0, r11, lr, lr
/* R[7] = r0 */
UMAAL r9, r8, r1, r7
UMAAL r9, r10, r2, r6
UMAAL r12, r9, r3, r5
ADCS r12, r12, r12
UMAAL r12, r11, r4, r4
/* R[8] = r12 */
UMAAL r9, r8, r2, r7
UMAAL r10, r9, r3, r6
MOV r2, lr
UMAAL r10, r2, r4, r5
ADCS r10, r10, r10
UMAAL r11, r10, lr, lr
/* R[9] = r11 */
UMAAL r2, r8, r3, r7
UMAAL r2, r9, r4, r6
ADCS r3, r2, r2
UMAAL r10, r3, r5, r5
/* R[10] = r10 */
MOV r1, lr
UMAAL r1, r8, r4, r7
UMAAL r1, r9, r5, r6
ADCS r4, r1, r1
UMAAL r3, r4, lr, lr
/* R[11] = r3 */
UMAAL r8, r9, r5, r7
ADCS r8, r8, r8
UMAAL r4, r8, r6, r6
/* R[12] = r4 */
MOV r5, lr
UMAAL r5, r9, r6, r7
ADCS r5, r5, r5
UMAAL r8, r5, lr, lr
/* R[13] = r8 */
ADCS r9, r9, r9
UMAAL r9, r5, r7, r7
ADCS r7, r5, lr
/* R[14] = r9 */
/* R[15] = r7 */
/* Reduce */
MOV r6, #0x25
UMAAL r7, r0, r7, r6
MOV r6, #0x13
LSL r0, r0, #1
ORR r0, r0, r7, LSR #31
MUL lr, r0, r6
POP {r0, r1}
MOV r6, #0x26
UMAAL r0, lr, r12, r6
UMAAL r1, lr, r11, r6
MOV r12, r3
MOV r11, r4
POP {r2, r3, r4}
UMAAL r2, lr, r10, r6
UMAAL r3, lr, r12, r6
UMAAL r4, lr, r11, r6
MOV r12, r6
POP {r5, r6}
UMAAL r5, lr, r8, r12
BFC r7, #31, #1
UMAAL r6, lr, r9, r12
ADD r7, r7, lr
/* Reduce if top bit set */
MOV r11, #0x13
AND r12, r11, r7, ASR #31
ADDS r0, r0, r12
ADCS r1, r1, #0x0
ADCS r2, r2, #0x0
ADCS r3, r3, #0x0
ADCS r4, r4, #0x0
ADCS r5, r5, #0x0
BFC r7, #31, #1
ADCS r6, r6, #0x0
ADC r7, r7, #0x0
/* Double */
ADDS r0, r0, r0
ADCS r1, r1, r1
ADCS r2, r2, r2
ADCS r3, r3, r3
ADCS r4, r4, r4
ADCS r5, r5, r5
ADCS r6, r6, r6
ADC r7, r7, r7
/* Reduce if top bit set */
MOV r11, #0x13
AND r12, r11, r7, ASR #31
ADDS r0, r0, r12
ADCS r1, r1, #0x0
ADCS r2, r2, #0x0
ADCS r3, r3, #0x0
ADCS r4, r4, #0x0
ADCS r5, r5, #0x0
BFC r7, #31, #1
ADCS r6, r6, #0x0
ADC r7, r7, #0x0
POP {r12, lr}
/* Store */
STM r12, {r0, r1, r2, r3, r4, r5, r6, r7}
MOV r0, r12
MOV r1, lr
POP {pc}
/* Cycle Count = 213 */
.size fe_sq2,.-fe_sq2
#endif /* WOLFSSL_SP_NO_UMAAL */
.text
.align 4
.globl fe_pow22523
.type fe_pow22523, %function
fe_pow22523:
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr}
SUB sp, sp, #0x68
/* pow22523 */
STR r0, [sp, #96]
STR r1, [sp, #100]
LDR r1, [sp, #100]
MOV r0, sp
BL fe_sq_op
MOV r1, sp
ADD r0, sp, #0x20
BL fe_sq_op
ADD r1, sp, #0x20
ADD r0, sp, #0x20
BL fe_sq_op
ADD r2, sp, #0x20
LDR r1, [sp, #100]
ADD r0, sp, #0x20
BL fe_mul_op
ADD r2, sp, #0x20
MOV r1, sp
MOV r0, sp
BL fe_mul_op
MOV r1, sp
MOV r0, sp
BL fe_sq_op
MOV r2, sp
ADD r1, sp, #0x20
MOV r0, sp
BL fe_mul_op
MOV r1, sp
ADD r0, sp, #0x20
BL fe_sq_op
MOV r12, #0x4
L_fe_pow22523_1:
ADD r1, sp, #0x20
ADD r0, sp, #0x20
PUSH {r12}
BL fe_sq_op
POP {r12}
SUBS r12, r12, #0x1
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
BNE L_fe_pow22523_1
#else
BNE.N L_fe_pow22523_1
#endif
MOV r2, sp
ADD r1, sp, #0x20
MOV r0, sp
BL fe_mul_op
MOV r1, sp
ADD r0, sp, #0x20
BL fe_sq_op
MOV r12, #0x9
L_fe_pow22523_2:
ADD r1, sp, #0x20
ADD r0, sp, #0x20
PUSH {r12}
BL fe_sq_op
POP {r12}
SUBS r12, r12, #0x1
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
BNE L_fe_pow22523_2
#else
BNE.N L_fe_pow22523_2
#endif
MOV r2, sp
ADD r1, sp, #0x20
ADD r0, sp, #0x20
BL fe_mul_op
ADD r1, sp, #0x20
ADD r0, sp, #0x40
BL fe_sq_op
MOV r12, #0x13
L_fe_pow22523_3:
ADD r1, sp, #0x40
ADD r0, sp, #0x40
PUSH {r12}
BL fe_sq_op
POP {r12}
SUBS r12, r12, #0x1
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
BNE L_fe_pow22523_3
#else
BNE.N L_fe_pow22523_3
#endif
ADD r2, sp, #0x20
ADD r1, sp, #0x40
ADD r0, sp, #0x20
BL fe_mul_op
MOV r12, #0xa
L_fe_pow22523_4:
ADD r1, sp, #0x20
ADD r0, sp, #0x20
PUSH {r12}
BL fe_sq_op
POP {r12}
SUBS r12, r12, #0x1
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
BNE L_fe_pow22523_4
#else
BNE.N L_fe_pow22523_4
#endif
MOV r2, sp
ADD r1, sp, #0x20
MOV r0, sp
BL fe_mul_op
MOV r1, sp
ADD r0, sp, #0x20
BL fe_sq_op
MOV r12, #0x31
L_fe_pow22523_5:
ADD r1, sp, #0x20
ADD r0, sp, #0x20
PUSH {r12}
BL fe_sq_op
POP {r12}
SUBS r12, r12, #0x1
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
BNE L_fe_pow22523_5
#else
BNE.N L_fe_pow22523_5
#endif
MOV r2, sp
ADD r1, sp, #0x20
ADD r0, sp, #0x20
BL fe_mul_op
ADD r1, sp, #0x20
ADD r0, sp, #0x40
BL fe_sq_op
MOV r12, #0x63
L_fe_pow22523_6:
ADD r1, sp, #0x40
ADD r0, sp, #0x40
PUSH {r12}
BL fe_sq_op
POP {r12}
SUBS r12, r12, #0x1
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
BNE L_fe_pow22523_6
#else
BNE.N L_fe_pow22523_6
#endif
ADD r2, sp, #0x20
ADD r1, sp, #0x40
ADD r0, sp, #0x20
BL fe_mul_op
MOV r12, #0x32
L_fe_pow22523_7:
ADD r1, sp, #0x20
ADD r0, sp, #0x20
PUSH {r12}
BL fe_sq_op
POP {r12}
SUBS r12, r12, #0x1
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
BNE L_fe_pow22523_7
#else
BNE.N L_fe_pow22523_7
#endif
MOV r2, sp
ADD r1, sp, #0x20
MOV r0, sp
BL fe_mul_op
MOV r12, #0x2
L_fe_pow22523_8:
MOV r1, sp
MOV r0, sp
PUSH {r12}
BL fe_sq_op
POP {r12}
SUBS r12, r12, #0x1
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
BNE L_fe_pow22523_8
#else
BNE.N L_fe_pow22523_8
#endif
LDR r2, [sp, #100]
MOV r1, sp
LDR r0, [sp, #96]
BL fe_mul_op
LDR r1, [sp, #100]
LDR r0, [sp, #96]
ADD sp, sp, #0x68
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
/* Cycle Count = 293 */
.size fe_pow22523,.-fe_pow22523
.text
.align 4
.globl ge_p1p1_to_p2
.type ge_p1p1_to_p2, %function
ge_p1p1_to_p2:
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr}
SUB sp, sp, #0x8
STR r0, [sp]
STR r1, [sp, #4]
ADD r2, r1, #0x60
BL fe_mul_op
LDR r0, [sp]
LDR r1, [sp, #4]
ADD r2, r1, #0x40
ADD r1, r1, #0x20
ADD r0, r0, #0x20
BL fe_mul_op
LDR r0, [sp]
LDR r1, [sp, #4]
ADD r2, r1, #0x60
ADD r1, r1, #0x40
ADD r0, r0, #0x40
BL fe_mul_op
ADD sp, sp, #0x8
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
/* Cycle Count = 53 */
.size ge_p1p1_to_p2,.-ge_p1p1_to_p2
.text
.align 4
.globl ge_p1p1_to_p3
.type ge_p1p1_to_p3, %function
ge_p1p1_to_p3:
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr}
SUB sp, sp, #0x8
STR r0, [sp]
STR r1, [sp, #4]
ADD r2, r1, #0x60
BL fe_mul_op
LDR r0, [sp]
LDR r1, [sp, #4]
ADD r2, r1, #0x40
ADD r1, r1, #0x20
ADD r0, r0, #0x20
BL fe_mul_op
LDR r0, [sp]
LDR r1, [sp, #4]
ADD r2, r1, #0x60
ADD r1, r1, #0x40
ADD r0, r0, #0x40
BL fe_mul_op
LDR r0, [sp]
LDR r1, [sp, #4]
ADD r2, r1, #0x20
ADD r0, r0, #0x60
BL fe_mul_op
ADD sp, sp, #0x8
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
/* Cycle Count = 63 */
.size ge_p1p1_to_p3,.-ge_p1p1_to_p3
.text
.align 4
.globl ge_p2_dbl
.type ge_p2_dbl, %function
ge_p2_dbl:
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr}
SUB sp, sp, #0x8
STR r0, [sp]
STR r1, [sp, #4]
BL fe_sq_op
LDR r0, [sp]
LDR r1, [sp, #4]
ADD r1, r1, #0x20
ADD r0, r0, #0x40
BL fe_sq_op
LDR r0, [sp]
LDR r1, [sp, #4]
ADD r2, r1, #0x20
ADD r0, r0, #0x20
BL fe_add_op
MOV r1, r0
ADD r0, r0, #0x40
BL fe_sq_op
LDR r0, [sp]
MOV r3, r0
ADD r2, r0, #0x40
ADD r1, r0, #0x40
ADD r0, r0, #0x20
BL fe_add_sub_op
MOV r2, r0
ADD r1, r0, #0x40
SUB r0, r0, #0x20
BL fe_sub_op
LDR r1, [sp, #4]
ADD r1, r1, #0x40
ADD r0, r0, #0x60
BL fe_sq2
SUB r2, r0, #0x20
MOV r1, r0
BL fe_sub_op
ADD sp, sp, #0x8
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
/* Cycle Count = 87 */
.size ge_p2_dbl,.-ge_p2_dbl
.text
.align 4
.globl ge_madd
.type ge_madd, %function
ge_madd:
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr}
SUB sp, sp, #0xc
STR r0, [sp]
STR r1, [sp, #4]
STR r2, [sp, #8]
MOV r2, r1
ADD r1, r1, #0x20
BL fe_add_op
LDR r1, [sp, #4]
MOV r2, r1
ADD r1, r1, #0x20
ADD r0, r0, #0x20
BL fe_sub_op
LDR r2, [sp, #8]
SUB r1, r0, #0x20
ADD r0, r0, #0x20
BL fe_mul_op
LDR r0, [sp]
LDR r2, [sp, #8]
ADD r2, r2, #0x20
ADD r1, r0, #0x20
ADD r0, r0, #0x20
BL fe_mul_op
LDR r0, [sp]
LDR r1, [sp, #8]
LDR r2, [sp, #4]
ADD r2, r2, #0x60
ADD r1, r1, #0x40
ADD r0, r0, #0x60
BL fe_mul_op
LDR r0, [sp]
ADD r3, r0, #0x20
ADD r2, r0, #0x40
MOV r1, r0
ADD r0, r0, #0x20
BL fe_add_sub_op
LDR r1, [sp, #4]
ADD r1, r1, #0x40
ADD r0, r0, #0x20
/* Double */
LDM r1, {r4, r5, r6, r7, r8, r9, r10, r11}
ADDS r4, r4, r4
ADCS r5, r5, r5
ADCS r6, r6, r6
ADCS r7, r7, r7
ADCS r8, r8, r8
ADCS r9, r9, r9
ADCS r10, r10, r10
MOV lr, #0x0
ADCS r11, r11, r11
ADC lr, lr, #0x0
MOV r12, #0x13
LSL lr, lr, #1
ORR lr, lr, r11, LSR #31
MUL r12, lr, r12
ADDS r4, r4, r12
ADCS r5, r5, #0x0
ADCS r6, r6, #0x0
ADCS r7, r7, #0x0
ADCS r8, r8, #0x0
ADCS r9, r9, #0x0
BFC r11, #31, #1
ADCS r10, r10, #0x0
ADC r11, r11, #0x0
STM r0, {r4, r5, r6, r7, r8, r9, r10, r11}
/* Done Double */
ADD r3, r0, #0x20
ADD r1, r0, #0x20
BL fe_add_sub_op
ADD sp, sp, #0xc
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
/* Cycle Count = 136 */
.size ge_madd,.-ge_madd
.text
.align 4
.globl ge_msub
.type ge_msub, %function
ge_msub:
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr}
SUB sp, sp, #0xc
STR r0, [sp]
STR r1, [sp, #4]
STR r2, [sp, #8]
MOV r2, r1
ADD r1, r1, #0x20
BL fe_add_op
LDR r1, [sp, #4]
MOV r2, r1
ADD r1, r1, #0x20
ADD r0, r0, #0x20
BL fe_sub_op
LDR r2, [sp, #8]
ADD r2, r2, #0x20
SUB r1, r0, #0x20
ADD r0, r0, #0x20
BL fe_mul_op
LDR r0, [sp]
LDR r2, [sp, #8]
ADD r1, r0, #0x20
ADD r0, r0, #0x20
BL fe_mul_op
LDR r0, [sp]
LDR r1, [sp, #8]
LDR r2, [sp, #4]
ADD r2, r2, #0x60
ADD r1, r1, #0x40
ADD r0, r0, #0x60
BL fe_mul_op
LDR r0, [sp]
ADD r3, r0, #0x20
ADD r2, r0, #0x40
MOV r1, r0
ADD r0, r0, #0x20
BL fe_add_sub_op
LDR r1, [sp, #4]
ADD r1, r1, #0x40
ADD r0, r0, #0x20
/* Double */
LDM r1, {r4, r5, r6, r7, r8, r9, r10, r11}
ADDS r4, r4, r4
ADCS r5, r5, r5
ADCS r6, r6, r6
ADCS r7, r7, r7
ADCS r8, r8, r8
ADCS r9, r9, r9
ADCS r10, r10, r10
MOV lr, #0x0
ADCS r11, r11, r11
ADC lr, lr, #0x0
MOV r12, #0x13
LSL lr, lr, #1
ORR lr, lr, r11, LSR #31
MUL r12, lr, r12
ADDS r4, r4, r12
ADCS r5, r5, #0x0
ADCS r6, r6, #0x0
ADCS r7, r7, #0x0
ADCS r8, r8, #0x0
ADCS r9, r9, #0x0
BFC r11, #31, #1
ADCS r10, r10, #0x0
ADC r11, r11, #0x0
STM r0, {r4, r5, r6, r7, r8, r9, r10, r11}
/* Done Double */
ADD r3, r0, #0x20
MOV r1, r0
ADD r0, r0, #0x20
BL fe_add_sub_op
ADD sp, sp, #0xc
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
/* Cycle Count = 137 */
.size ge_msub,.-ge_msub
.text
.align 4
.globl ge_add
.type ge_add, %function
ge_add:
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr}
SUB sp, sp, #0x2c
STR r0, [sp]
STR r1, [sp, #4]
STR r2, [sp, #8]
MOV r3, r1
ADD r2, r1, #0x20
ADD r1, r0, #0x20
BL fe_add_sub_op
LDR r2, [sp, #8]
MOV r1, r0
ADD r0, r0, #0x40
BL fe_mul_op
LDR r0, [sp]
LDR r2, [sp, #8]
ADD r2, r2, #0x20
ADD r1, r0, #0x20
ADD r0, r0, #0x20
BL fe_mul_op
LDR r0, [sp]
LDR r1, [sp, #8]
LDR r2, [sp, #4]
ADD r2, r2, #0x60
ADD r1, r1, #0x60
ADD r0, r0, #0x60
BL fe_mul_op
LDR r0, [sp]
LDR r1, [sp, #4]
LDR r2, [sp, #8]
ADD r2, r2, #0x40
ADD r1, r1, #0x40
BL fe_mul_op
LDR r1, [sp]
ADD r0, sp, #0xc
/* Double */
LDM r1, {r4, r5, r6, r7, r8, r9, r10, r11}
ADDS r4, r4, r4
ADCS r5, r5, r5
ADCS r6, r6, r6
ADCS r7, r7, r7
ADCS r8, r8, r8
ADCS r9, r9, r9
ADCS r10, r10, r10
MOV lr, #0x0
ADCS r11, r11, r11
ADC lr, lr, #0x0
MOV r12, #0x13
LSL lr, lr, #1
ORR lr, lr, r11, LSR #31
MUL r12, lr, r12
ADDS r4, r4, r12
ADCS r5, r5, #0x0
ADCS r6, r6, #0x0
ADCS r7, r7, #0x0
ADCS r8, r8, #0x0
ADCS r9, r9, #0x0
BFC r11, #31, #1
ADCS r10, r10, #0x0
ADC r11, r11, #0x0
STM r0, {r4, r5, r6, r7, r8, r9, r10, r11}
/* Done Double */
ADD r3, r1, #0x20
ADD r2, r1, #0x40
ADD r0, r1, #0x20
BL fe_add_sub_op
ADD r3, r0, #0x40
ADD r2, sp, #0xc
ADD r1, r0, #0x40
ADD r0, r0, #0x20
BL fe_add_sub_op
ADD sp, sp, #0x2c
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
/* Cycle Count = 138 */
.size ge_add,.-ge_add
.text
.align 4
.globl ge_sub
.type ge_sub, %function
ge_sub:
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr}
SUB sp, sp, #0x2c
STR r0, [sp]
STR r1, [sp, #4]
STR r2, [sp, #8]
MOV r3, r1
ADD r2, r1, #0x20
ADD r1, r0, #0x20
BL fe_add_sub_op
LDR r2, [sp, #8]
ADD r2, r2, #0x20
MOV r1, r0
ADD r0, r0, #0x40
BL fe_mul_op
LDR r0, [sp]
LDR r2, [sp, #8]
ADD r1, r0, #0x20
ADD r0, r0, #0x20
BL fe_mul_op
LDR r0, [sp]
LDR r1, [sp, #8]
LDR r2, [sp, #4]
ADD r2, r2, #0x60
ADD r1, r1, #0x60
ADD r0, r0, #0x60
BL fe_mul_op
LDR r0, [sp]
LDR r1, [sp, #4]
LDR r2, [sp, #8]
ADD r2, r2, #0x40
ADD r1, r1, #0x40
BL fe_mul_op
LDR r1, [sp]
ADD r0, sp, #0xc
/* Double */
LDM r1, {r4, r5, r6, r7, r8, r9, r10, r11}
ADDS r4, r4, r4
ADCS r5, r5, r5
ADCS r6, r6, r6
ADCS r7, r7, r7
ADCS r8, r8, r8
ADCS r9, r9, r9
ADCS r10, r10, r10
MOV lr, #0x0
ADCS r11, r11, r11
ADC lr, lr, #0x0
MOV r12, #0x13
LSL lr, lr, #1
ORR lr, lr, r11, LSR #31
MUL r12, lr, r12
ADDS r4, r4, r12
ADCS r5, r5, #0x0
ADCS r6, r6, #0x0
ADCS r7, r7, #0x0
ADCS r8, r8, #0x0
ADCS r9, r9, #0x0
BFC r11, #31, #1
ADCS r10, r10, #0x0
ADC r11, r11, #0x0
STM r0, {r4, r5, r6, r7, r8, r9, r10, r11}
/* Done Double */
ADD r3, r1, #0x20
ADD r2, r1, #0x40
ADD r0, r1, #0x20
BL fe_add_sub_op
ADD r3, r0, #0x40
ADD r2, sp, #0xc
ADD r1, r0, #0x20
ADD r0, r0, #0x40
BL fe_add_sub_op
ADD sp, sp, #0x2c
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
/* Cycle Count = 138 */
.size ge_sub,.-ge_sub
#ifdef WOLFSSL_SP_NO_UMAAL
.text
.align 4
.globl sc_reduce
.type sc_reduce, %function
sc_reduce:
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr}
SUB sp, sp, #0x38
STR r0, [sp, #52]
/* Load bits 252-511 */
ADD r0, r0, #0x1c
LDM r0, {r1, r2, r3, r4, r5, r6, r7, r8, r9}
LSR lr, r9, #24
LSL r9, r9, #4
ORR r9, r9, r8, LSR #28
LSL r8, r8, #4
ORR r8, r8, r7, LSR #28
LSL r7, r7, #4
ORR r7, r7, r6, LSR #28
LSL r6, r6, #4
ORR r6, r6, r5, LSR #28
LSL r5, r5, #4
ORR r5, r5, r4, LSR #28
LSL r4, r4, #4
ORR r4, r4, r3, LSR #28
LSL r3, r3, #4
ORR r3, r3, r2, LSR #28
LSL r2, r2, #4
ORR r2, r2, r1, LSR #28
BFC r9, #28, #4
SUB r0, r0, #0x1c
/* Add order times bits 504..511 */
MOV r10, #0x2c13
MOVT r10, #0xa30a
MOV r11, #0x9ce5
MOVT r11, #0xa7ed
MOV r1, #0x0
UMLAL r2, r1, r10, lr
ADDS r3, r3, r1
MOV r1, #0x0
ADC r1, r1, #0x0
UMLAL r3, r1, r11, lr
MOV r10, #0x6329
MOVT r10, #0x5d08
MOV r11, #0x621
MOVT r11, #0xeb21
ADDS r4, r4, r1
MOV r1, #0x0
ADC r1, r1, #0x0
UMLAL r4, r1, r10, lr
ADDS r5, r5, r1
MOV r1, #0x0
ADC r1, r1, #0x0
UMLAL r5, r1, r11, lr
ADDS r6, r6, r1
ADCS r7, r7, #0x0
ADCS r8, r8, #0x0
ADC r9, r9, #0x0
SUBS r6, r6, lr
SBCS r7, r7, #0x0
SBCS r8, r8, #0x0
SBC r9, r9, #0x0
/* Sub product of top 8 words and order */
MOV r12, sp
MOV r1, #0x2c13
MOVT r1, #0xa30a
MOV lr, #0x0
LDM r0!, {r10, r11}
UMLAL r10, lr, r2, r1
ADDS r11, r11, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r11, lr, r3, r1
STM r12!, {r10, r11}
LDM r0!, {r10, r11}
ADDS r10, r10, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r10, lr, r4, r1
ADDS r11, r11, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r11, lr, r5, r1
STM r12!, {r10, r11}
LDM r0!, {r10, r11}
ADDS r10, r10, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r10, lr, r6, r1
ADDS r11, r11, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r11, lr, r7, r1
STM r12!, {r10, r11}
LDM r0!, {r10, r11}
ADDS r10, r10, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r10, lr, r8, r1
BFC r11, #28, #4
ADDS r11, r11, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r11, lr, r9, r1
STM r12!, {r10, r11, lr}
SUB r0, r0, #0x10
SUB r12, r12, #0x20
MOV r1, #0x9ce5
MOVT r1, #0xa7ed
MOV lr, #0x0
LDM r12, {r10, r11}
UMLAL r10, lr, r2, r1
ADDS r11, r11, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r11, lr, r3, r1
STM r12!, {r10, r11}
LDM r12, {r10, r11}
ADDS r10, r10, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r10, lr, r4, r1
ADDS r11, r11, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r11, lr, r5, r1
STM r12!, {r10, r11}
LDM r12, {r10, r11}
ADDS r10, r10, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r10, lr, r6, r1
ADDS r11, r11, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r11, lr, r7, r1
STM r12!, {r10, r11}
LDM r12, {r10, r11}
ADDS r10, r10, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r10, lr, r8, r1
ADDS r11, r11, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r11, lr, r9, r1
STM r12!, {r10, r11, lr}
SUB r12, r12, #0x20
MOV r1, #0x6329
MOVT r1, #0x5d08
MOV lr, #0x0
LDM r12, {r10, r11}
UMLAL r10, lr, r2, r1
ADDS r11, r11, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r11, lr, r3, r1
STM r12!, {r10, r11}
LDM r12, {r10, r11}
ADDS r10, r10, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r10, lr, r4, r1
ADDS r11, r11, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r11, lr, r5, r1
STM r12!, {r10, r11}
LDM r12, {r10, r11}
ADDS r10, r10, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r10, lr, r6, r1
ADDS r11, r11, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r11, lr, r7, r1
STM r12!, {r10, r11}
LDM r12, {r10, r11}
ADDS r10, r10, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r10, lr, r8, r1
ADDS r11, r11, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r11, lr, r9, r1
STM r12!, {r10, r11, lr}
SUB r12, r12, #0x20
MOV r1, #0x621
MOVT r1, #0xeb21
MOV lr, #0x0
LDM r12, {r10, r11}
UMLAL r10, lr, r2, r1
ADDS r11, r11, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r11, lr, r3, r1
STM r12!, {r10, r11}
LDM r12, {r10, r11}
ADDS r10, r10, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r10, lr, r4, r1
ADDS r11, r11, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r11, lr, r5, r1
STM r12!, {r10, r11}
LDM r12, {r10, r11}
ADDS r10, r10, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r10, lr, r6, r1
ADDS r11, r11, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r11, lr, r7, r1
STM r12!, {r10, r11}
LDM r12, {r10, r11}
ADDS r10, r10, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r10, lr, r8, r1
ADDS r11, r11, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r11, lr, r9, r1
STM r12!, {r10, r11, lr}
SUB r12, r12, #0x20
/* Subtract at 4 * 32 */
LDM r12, {r10, r11}
SUBS r10, r10, r2
SBCS r11, r11, r3
STM r12!, {r10, r11}
LDM r12, {r10, r11}
SBCS r10, r10, r4
SBCS r11, r11, r5
STM r12!, {r10, r11}
LDM r12, {r10, r11}
SBCS r10, r10, r6
SBCS r11, r11, r7
STM r12!, {r10, r11}
LDM r12, {r10, r11}
SBCS r10, r10, r8
SBC r11, r11, r9
STM r12!, {r10, r11}
SUB r12, r12, #0x24
ASR lr, r11, #25
/* Conditionally subtract order starting at bit 125 */
MOV r1, #0xa0000000
MOV r2, #0xba7d
MOVT r2, #0x4b9e
MOV r3, #0x4c63
MOVT r3, #0xcb02
MOV r4, #0xf39a
MOVT r4, #0xd45e
MOV r5, #0xdf3b
MOVT r5, #0x29b
MOV r9, #0x2000000
AND r1, r1, lr
AND r2, r2, lr
AND r3, r3, lr
AND r4, r4, lr
AND r5, r5, lr
AND r9, r9, lr
LDM r12, {r10, r11}
ADDS r10, r10, r1
ADCS r11, r11, r2
STM r12!, {r10, r11}
LDM r12, {r10, r11}
ADCS r10, r10, r3
ADCS r11, r11, r4
STM r12!, {r10, r11}
LDM r12, {r10, r11}
ADCS r10, r10, r5
ADCS r11, r11, #0x0
STM r12!, {r10, r11}
LDM r12, {r10, r11}
ADCS r10, r10, #0x0
ADCS r11, r11, #0x0
STM r12!, {r10, r11}
LDM r12, {r10}
ADCS r10, r10, #0x0
STM r12!, {r10}
SUB r0, r0, #0x10
MOV r12, sp
/* Load bits 252-376 */
ADD r12, r12, #0x1c
LDM r12, {r1, r2, r3, r4, r5}
LSL r5, r5, #4
ORR r5, r5, r4, LSR #28
LSL r4, r4, #4
ORR r4, r4, r3, LSR #28
LSL r3, r3, #4
ORR r3, r3, r2, LSR #28
LSL r2, r2, #4
ORR r2, r2, r1, LSR #28
BFC r5, #29, #3
SUB r12, r12, #0x1c
/* Sub product of top 4 words and order */
MOV r0, sp
/* * -5cf5d3ed */
MOV r1, #0x2c13
MOVT r1, #0xa30a
MOV lr, #0x0
LDM r0, {r6, r7, r8, r9}
UMLAL r6, lr, r2, r1
ADDS r7, r7, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r7, lr, r3, r1
ADDS r8, r8, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r8, lr, r4, r1
ADDS r9, r9, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r9, lr, r5, r1
STM r0, {r6, r7, r8, r9}
ADD r0, r0, #0x4
/* * -5812631b */
MOV r1, #0x9ce5
MOVT r1, #0xa7ed
MOV r10, #0x0
LDM r0, {r6, r7, r8, r9}
UMLAL r6, r10, r2, r1
ADDS r7, r7, r10
MOV r10, #0x0
ADC r10, r10, #0x0
UMLAL r7, r10, r3, r1
ADDS r8, r8, r10
MOV r10, #0x0
ADC r10, r10, #0x0
UMLAL r8, r10, r4, r1
ADDS r9, r9, r10
MOV r10, #0x0
ADC r10, r10, #0x0
UMLAL r9, r10, r5, r1
STM r0, {r6, r7, r8, r9}
ADD r0, r0, #0x4
/* * -a2f79cd7 */
MOV r1, #0x6329
MOVT r1, #0x5d08
MOV r11, #0x0
LDM r0, {r6, r7, r8, r9}
UMLAL r6, r11, r2, r1
ADDS r7, r7, r11
MOV r11, #0x0
ADC r11, r11, #0x0
UMLAL r7, r11, r3, r1
ADDS r8, r8, r11
MOV r11, #0x0
ADC r11, r11, #0x0
UMLAL r8, r11, r4, r1
ADDS r9, r9, r11
MOV r11, #0x0
ADC r11, r11, #0x0
UMLAL r9, r11, r5, r1
STM r0, {r6, r7, r8, r9}
ADD r0, r0, #0x4
/* * -14def9df */
MOV r1, #0x621
MOVT r1, #0xeb21
MOV r12, #0x0
LDM r0, {r6, r7, r8, r9}
UMLAL r6, r12, r2, r1
ADDS r7, r7, r12
MOV r12, #0x0
ADC r12, r12, #0x0
UMLAL r7, r12, r3, r1
ADDS r8, r8, r12
MOV r12, #0x0
ADC r12, r12, #0x0
UMLAL r8, r12, r4, r1
ADDS r9, r9, r12
MOV r12, #0x0
ADC r12, r12, #0x0
UMLAL r9, r12, r5, r1
STM r0, {r6, r7, r8, r9}
ADD r0, r0, #0x4
/* Add overflows at 4 * 32 */
LDM r0, {r6, r7, r8, r9}
BFC r9, #28, #4
ADDS r6, r6, lr
ADCS r7, r7, r10
ADCS r8, r8, r11
ADC r9, r9, r12
/* Subtract top at 4 * 32 */
SUBS r6, r6, r2
SBCS r7, r7, r3
SBCS r8, r8, r4
SBCS r9, r9, r5
SBC r1, r1, r1
SUB r0, r0, #0x10
LDM r0, {r2, r3, r4, r5}
MOV r10, #0xd3ed
MOVT r10, #0x5cf5
MOV r11, #0x631a
MOVT r11, #0x5812
MOV r12, #0x9cd6
MOVT r12, #0xa2f7
MOV lr, #0xf9de
MOVT lr, #0x14de
AND r10, r10, r1
AND r11, r11, r1
AND r12, r12, r1
AND lr, lr, r1
ADDS r2, r2, r10
ADCS r3, r3, r11
ADCS r4, r4, r12
ADCS r5, r5, lr
ADCS r6, r6, #0x0
ADCS r7, r7, #0x0
AND r1, r1, #0x10000000
ADCS r8, r8, #0x0
ADC r9, r9, r1
BFC r9, #28, #4
/* Store result */
LDR r0, [sp, #52]
STM r0, {r2, r3, r4, r5, r6, r7, r8, r9}
ADD sp, sp, #0x38
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
/* Cycle Count = 588 */
.size sc_reduce,.-sc_reduce
#else
.text
.align 4
.globl sc_reduce
.type sc_reduce, %function
sc_reduce:
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr}
SUB sp, sp, #0x38
STR r0, [sp, #52]
/* Load bits 252-511 */
ADD r0, r0, #0x1c
LDM r0, {r1, r2, r3, r4, r5, r6, r7, r8, r9}
LSR lr, r9, #24
LSL r9, r9, #4
ORR r9, r9, r8, LSR #28
LSL r8, r8, #4
ORR r8, r8, r7, LSR #28
LSL r7, r7, #4
ORR r7, r7, r6, LSR #28
LSL r6, r6, #4
ORR r6, r6, r5, LSR #28
LSL r5, r5, #4
ORR r5, r5, r4, LSR #28
LSL r4, r4, #4
ORR r4, r4, r3, LSR #28
LSL r3, r3, #4
ORR r3, r3, r2, LSR #28
LSL r2, r2, #4
ORR r2, r2, r1, LSR #28
BFC r9, #28, #4
SUB r0, r0, #0x1c
/* Add order times bits 504..511 */
MOV r10, #0x2c13
MOVT r10, #0xa30a
MOV r11, #0x9ce5
MOVT r11, #0xa7ed
MOV r1, #0x0
UMLAL r2, r1, r10, lr
UMAAL r3, r1, r11, lr
MOV r10, #0x6329
MOVT r10, #0x5d08
MOV r11, #0x621
MOVT r11, #0xeb21
UMAAL r4, r1, r10, lr
UMAAL r5, r1, r11, lr
ADDS r6, r6, r1
ADCS r7, r7, #0x0
ADCS r8, r8, #0x0
ADC r9, r9, #0x0
SUBS r6, r6, lr
SBCS r7, r7, #0x0
SBCS r8, r8, #0x0
SBC r9, r9, #0x0
/* Sub product of top 8 words and order */
MOV r12, sp
MOV r1, #0x2c13
MOVT r1, #0xa30a
MOV lr, #0x0
LDM r0!, {r10, r11}
UMLAL r10, lr, r2, r1
UMAAL r11, lr, r3, r1
STM r12!, {r10, r11}
LDM r0!, {r10, r11}
UMAAL r10, lr, r4, r1
UMAAL r11, lr, r5, r1
STM r12!, {r10, r11}
LDM r0!, {r10, r11}
UMAAL r10, lr, r6, r1
UMAAL r11, lr, r7, r1
STM r12!, {r10, r11}
LDM r0!, {r10, r11}
UMAAL r10, lr, r8, r1
BFC r11, #28, #4
UMAAL r11, lr, r9, r1
STM r12!, {r10, r11, lr}
SUB r0, r0, #0x10
SUB r12, r12, #0x20
MOV r1, #0x9ce5
MOVT r1, #0xa7ed
MOV lr, #0x0
LDM r12, {r10, r11}
UMLAL r10, lr, r2, r1
UMAAL r11, lr, r3, r1
STM r12!, {r10, r11}
LDM r12, {r10, r11}
UMAAL r10, lr, r4, r1
UMAAL r11, lr, r5, r1
STM r12!, {r10, r11}
LDM r12, {r10, r11}
UMAAL r10, lr, r6, r1
UMAAL r11, lr, r7, r1
STM r12!, {r10, r11}
LDM r12, {r10, r11}
UMAAL r10, lr, r8, r1
UMAAL r11, lr, r9, r1
STM r12!, {r10, r11, lr}
SUB r12, r12, #0x20
MOV r1, #0x6329
MOVT r1, #0x5d08
MOV lr, #0x0
LDM r12, {r10, r11}
UMLAL r10, lr, r2, r1
UMAAL r11, lr, r3, r1
STM r12!, {r10, r11}
LDM r12, {r10, r11}
UMAAL r10, lr, r4, r1
UMAAL r11, lr, r5, r1
STM r12!, {r10, r11}
LDM r12, {r10, r11}
UMAAL r10, lr, r6, r1
UMAAL r11, lr, r7, r1
STM r12!, {r10, r11}
LDM r12, {r10, r11}
UMAAL r10, lr, r8, r1
UMAAL r11, lr, r9, r1
STM r12!, {r10, r11, lr}
SUB r12, r12, #0x20
MOV r1, #0x621
MOVT r1, #0xeb21
MOV lr, #0x0
LDM r12, {r10, r11}
UMLAL r10, lr, r2, r1
UMAAL r11, lr, r3, r1
STM r12!, {r10, r11}
LDM r12, {r10, r11}
UMAAL r10, lr, r4, r1
UMAAL r11, lr, r5, r1
STM r12!, {r10, r11}
LDM r12, {r10, r11}
UMAAL r10, lr, r6, r1
UMAAL r11, lr, r7, r1
STM r12!, {r10, r11}
LDM r12, {r10, r11}
UMAAL r10, lr, r8, r1
UMAAL r11, lr, r9, r1
STM r12!, {r10, r11, lr}
SUB r12, r12, #0x20
/* Subtract at 4 * 32 */
LDM r12, {r10, r11}
SUBS r10, r10, r2
SBCS r11, r11, r3
STM r12!, {r10, r11}
LDM r12, {r10, r11}
SBCS r10, r10, r4
SBCS r11, r11, r5
STM r12!, {r10, r11}
LDM r12, {r10, r11}
SBCS r10, r10, r6
SBCS r11, r11, r7
STM r12!, {r10, r11}
LDM r12, {r10, r11}
SBCS r10, r10, r8
SBC r11, r11, r9
STM r12!, {r10, r11}
SUB r12, r12, #0x24
ASR lr, r11, #25
/* Conditionally subtract order starting at bit 125 */
MOV r1, #0xa0000000
MOV r2, #0xba7d
MOVT r2, #0x4b9e
MOV r3, #0x4c63
MOVT r3, #0xcb02
MOV r4, #0xf39a
MOVT r4, #0xd45e
MOV r5, #0xdf3b
MOVT r5, #0x29b
MOV r9, #0x2000000
AND r1, r1, lr
AND r2, r2, lr
AND r3, r3, lr
AND r4, r4, lr
AND r5, r5, lr
AND r9, r9, lr
LDM r12, {r10, r11}
ADDS r10, r10, r1
ADCS r11, r11, r2
STM r12!, {r10, r11}
LDM r12, {r10, r11}
ADCS r10, r10, r3
ADCS r11, r11, r4
STM r12!, {r10, r11}
LDM r12, {r10, r11}
ADCS r10, r10, r5
ADCS r11, r11, #0x0
STM r12!, {r10, r11}
LDM r12, {r10, r11}
ADCS r10, r10, #0x0
ADCS r11, r11, #0x0
STM r12!, {r10, r11}
LDM r12, {r10}
ADCS r10, r10, #0x0
STM r12!, {r10}
SUB r0, r0, #0x10
MOV r12, sp
/* Load bits 252-376 */
ADD r12, r12, #0x1c
LDM r12, {r1, r2, r3, r4, r5}
LSL r5, r5, #4
ORR r5, r5, r4, LSR #28
LSL r4, r4, #4
ORR r4, r4, r3, LSR #28
LSL r3, r3, #4
ORR r3, r3, r2, LSR #28
LSL r2, r2, #4
ORR r2, r2, r1, LSR #28
BFC r5, #29, #3
SUB r12, r12, #0x1c
/* Sub product of top 4 words and order */
MOV r0, sp
/* * -5cf5d3ed */
MOV r1, #0x2c13
MOVT r1, #0xa30a
MOV lr, #0x0
LDM r0, {r6, r7, r8, r9}
UMLAL r6, lr, r2, r1
UMAAL r7, lr, r3, r1
UMAAL r8, lr, r4, r1
UMAAL r9, lr, r5, r1
STM r0, {r6, r7, r8, r9}
ADD r0, r0, #0x4
/* * -5812631b */
MOV r1, #0x9ce5
MOVT r1, #0xa7ed
MOV r10, #0x0
LDM r0, {r6, r7, r8, r9}
UMLAL r6, r10, r2, r1
UMAAL r7, r10, r3, r1
UMAAL r8, r10, r4, r1
UMAAL r9, r10, r5, r1
STM r0, {r6, r7, r8, r9}
ADD r0, r0, #0x4
/* * -a2f79cd7 */
MOV r1, #0x6329
MOVT r1, #0x5d08
MOV r11, #0x0
LDM r0, {r6, r7, r8, r9}
UMLAL r6, r11, r2, r1
UMAAL r7, r11, r3, r1
UMAAL r8, r11, r4, r1
UMAAL r9, r11, r5, r1
STM r0, {r6, r7, r8, r9}
ADD r0, r0, #0x4
/* * -14def9df */
MOV r1, #0x621
MOVT r1, #0xeb21
MOV r12, #0x0
LDM r0, {r6, r7, r8, r9}
UMLAL r6, r12, r2, r1
UMAAL r7, r12, r3, r1
UMAAL r8, r12, r4, r1
UMAAL r9, r12, r5, r1
STM r0, {r6, r7, r8, r9}
ADD r0, r0, #0x4
/* Add overflows at 4 * 32 */
LDM r0, {r6, r7, r8, r9}
BFC r9, #28, #4
ADDS r6, r6, lr
ADCS r7, r7, r10
ADCS r8, r8, r11
ADC r9, r9, r12
/* Subtract top at 4 * 32 */
SUBS r6, r6, r2
SBCS r7, r7, r3
SBCS r8, r8, r4
SBCS r9, r9, r5
SBC r1, r1, r1
SUB r0, r0, #0x10
LDM r0, {r2, r3, r4, r5}
MOV r10, #0xd3ed
MOVT r10, #0x5cf5
MOV r11, #0x631a
MOVT r11, #0x5812
MOV r12, #0x9cd6
MOVT r12, #0xa2f7
MOV lr, #0xf9de
MOVT lr, #0x14de
AND r10, r10, r1
AND r11, r11, r1
AND r12, r12, r1
AND lr, lr, r1
ADDS r2, r2, r10
ADCS r3, r3, r11
ADCS r4, r4, r12
ADCS r5, r5, lr
ADCS r6, r6, #0x0
ADCS r7, r7, #0x0
AND r1, r1, #0x10000000
ADCS r8, r8, #0x0
ADC r9, r9, r1
BFC r9, #28, #4
/* Store result */
LDR r0, [sp, #52]
STM r0, {r2, r3, r4, r5, r6, r7, r8, r9}
ADD sp, sp, #0x38
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
/* Cycle Count = 502 */
.size sc_reduce,.-sc_reduce
#endif /* WOLFSSL_SP_NO_UMAAL */
#ifdef HAVE_ED25519_SIGN
#ifdef WOLFSSL_SP_NO_UMAAL
.text
.align 4
.globl sc_muladd
.type sc_muladd, %function
sc_muladd:
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr}
SUB sp, sp, #0x50
ADD lr, sp, #0x44
STM lr, {r0, r1, r3}
MOV r0, #0x0
LDR r12, [r1]
/* A[0] * B[0] */
LDR lr, [r2]
UMULL r3, r4, r12, lr
/* A[0] * B[2] */
LDR lr, [r2, #8]
UMULL r5, r6, r12, lr
/* A[0] * B[4] */
LDR lr, [r2, #16]
UMULL r7, r8, r12, lr
/* A[0] * B[6] */
LDR lr, [r2, #24]
UMULL r9, r10, r12, lr
STR r3, [sp]
/* A[0] * B[1] */
LDR lr, [r2, #4]
MOV r11, r0
UMLAL r4, r11, r12, lr
ADDS r5, r5, r11
/* A[0] * B[3] */
LDR lr, [r2, #12]
ADCS r6, r6, #0x0
ADC r11, r0, #0x0
UMLAL r6, r11, r12, lr
ADDS r7, r7, r11
/* A[0] * B[5] */
LDR lr, [r2, #20]
ADCS r8, r8, #0x0
ADC r11, r0, #0x0
UMLAL r8, r11, r12, lr
ADDS r9, r9, r11
/* A[0] * B[7] */
LDR lr, [r2, #28]
ADCS r10, r10, #0x0
ADC r3, r0, #0x0
UMLAL r10, r3, r12, lr
/* A[1] * B[0] */
LDR r12, [r1, #4]
LDR lr, [r2]
MOV r11, #0x0
UMLAL r4, r11, r12, lr
STR r4, [sp, #4]
ADDS r5, r5, r11
/* A[1] * B[1] */
LDR lr, [r2, #4]
ADC r11, r0, #0x0
UMLAL r5, r11, r12, lr
ADDS r6, r6, r11
/* A[1] * B[2] */
LDR lr, [r2, #8]
ADC r11, r0, #0x0
UMLAL r6, r11, r12, lr
ADDS r7, r7, r11
/* A[1] * B[3] */
LDR lr, [r2, #12]
ADC r11, r0, #0x0
UMLAL r7, r11, r12, lr
ADDS r8, r8, r11
/* A[1] * B[4] */
LDR lr, [r2, #16]
ADC r11, r0, #0x0
UMLAL r8, r11, r12, lr
ADDS r9, r9, r11
/* A[1] * B[5] */
LDR lr, [r2, #20]
ADC r11, r0, #0x0
UMLAL r9, r11, r12, lr
ADDS r10, r10, r11
/* A[1] * B[6] */
LDR lr, [r2, #24]
ADC r11, r0, #0x0
UMLAL r10, r11, r12, lr
ADDS r3, r3, r11
/* A[1] * B[7] */
LDR lr, [r2, #28]
ADC r4, r0, #0x0
UMLAL r3, r4, r12, lr
/* A[2] * B[0] */
LDR r12, [r1, #8]
LDR lr, [r2]
MOV r11, #0x0
UMLAL r5, r11, r12, lr
STR r5, [sp, #8]
ADDS r6, r6, r11
/* A[2] * B[1] */
LDR lr, [r2, #4]
ADC r11, r0, #0x0
UMLAL r6, r11, r12, lr
ADDS r7, r7, r11
/* A[2] * B[2] */
LDR lr, [r2, #8]
ADC r11, r0, #0x0
UMLAL r7, r11, r12, lr
ADDS r8, r8, r11
/* A[2] * B[3] */
LDR lr, [r2, #12]
ADC r11, r0, #0x0
UMLAL r8, r11, r12, lr
ADDS r9, r9, r11
/* A[2] * B[4] */
LDR lr, [r2, #16]
ADC r11, r0, #0x0
UMLAL r9, r11, r12, lr
ADDS r10, r10, r11
/* A[2] * B[5] */
LDR lr, [r2, #20]
ADC r11, r0, #0x0
UMLAL r10, r11, r12, lr
ADDS r3, r3, r11
/* A[2] * B[6] */
LDR lr, [r2, #24]
ADC r11, r0, #0x0
UMLAL r3, r11, r12, lr
ADDS r4, r4, r11
/* A[2] * B[7] */
LDR lr, [r2, #28]
ADC r5, r0, #0x0
UMLAL r4, r5, r12, lr
/* A[3] * B[0] */
LDR r12, [r1, #12]
LDR lr, [r2]
MOV r11, #0x0
UMLAL r6, r11, r12, lr
STR r6, [sp, #12]
ADDS r7, r7, r11
/* A[3] * B[1] */
LDR lr, [r2, #4]
ADC r11, r0, #0x0
UMLAL r7, r11, r12, lr
ADDS r8, r8, r11
/* A[3] * B[2] */
LDR lr, [r2, #8]
ADC r11, r0, #0x0
UMLAL r8, r11, r12, lr
ADDS r9, r9, r11
/* A[3] * B[3] */
LDR lr, [r2, #12]
ADC r11, r0, #0x0
UMLAL r9, r11, r12, lr
ADDS r10, r10, r11
/* A[3] * B[4] */
LDR lr, [r2, #16]
ADC r11, r0, #0x0
UMLAL r10, r11, r12, lr
ADDS r3, r3, r11
/* A[3] * B[5] */
LDR lr, [r2, #20]
ADC r11, r0, #0x0
UMLAL r3, r11, r12, lr
ADDS r4, r4, r11
/* A[3] * B[6] */
LDR lr, [r2, #24]
ADC r11, r0, #0x0
UMLAL r4, r11, r12, lr
ADDS r5, r5, r11
/* A[3] * B[7] */
LDR lr, [r2, #28]
ADC r6, r0, #0x0
UMLAL r5, r6, r12, lr
/* A[4] * B[0] */
LDR r12, [r1, #16]
LDR lr, [r2]
MOV r11, #0x0
UMLAL r7, r11, r12, lr
STR r7, [sp, #16]
ADDS r8, r8, r11
/* A[4] * B[1] */
LDR lr, [r2, #4]
ADC r11, r0, #0x0
UMLAL r8, r11, r12, lr
ADDS r9, r9, r11
/* A[4] * B[2] */
LDR lr, [r2, #8]
ADC r11, r0, #0x0
UMLAL r9, r11, r12, lr
ADDS r10, r10, r11
/* A[4] * B[3] */
LDR lr, [r2, #12]
ADC r11, r0, #0x0
UMLAL r10, r11, r12, lr
ADDS r3, r3, r11
/* A[4] * B[4] */
LDR lr, [r2, #16]
ADC r11, r0, #0x0
UMLAL r3, r11, r12, lr
ADDS r4, r4, r11
/* A[4] * B[5] */
LDR lr, [r2, #20]
ADC r11, r0, #0x0
UMLAL r4, r11, r12, lr
ADDS r5, r5, r11
/* A[4] * B[6] */
LDR lr, [r2, #24]
ADC r11, r0, #0x0
UMLAL r5, r11, r12, lr
ADDS r6, r6, r11
/* A[4] * B[7] */
LDR lr, [r2, #28]
ADC r7, r0, #0x0
UMLAL r6, r7, r12, lr
/* A[5] * B[0] */
LDR r12, [r1, #20]
LDR lr, [r2]
MOV r11, #0x0
UMLAL r8, r11, r12, lr
STR r8, [sp, #20]
ADDS r9, r9, r11
/* A[5] * B[1] */
LDR lr, [r2, #4]
ADC r11, r0, #0x0
UMLAL r9, r11, r12, lr
ADDS r10, r10, r11
/* A[5] * B[2] */
LDR lr, [r2, #8]
ADC r11, r0, #0x0
UMLAL r10, r11, r12, lr
ADDS r3, r3, r11
/* A[5] * B[3] */
LDR lr, [r2, #12]
ADC r11, r0, #0x0
UMLAL r3, r11, r12, lr
ADDS r4, r4, r11
/* A[5] * B[4] */
LDR lr, [r2, #16]
ADC r11, r0, #0x0
UMLAL r4, r11, r12, lr
ADDS r5, r5, r11
/* A[5] * B[5] */
LDR lr, [r2, #20]
ADC r11, r0, #0x0
UMLAL r5, r11, r12, lr
ADDS r6, r6, r11
/* A[5] * B[6] */
LDR lr, [r2, #24]
ADC r11, r0, #0x0
UMLAL r6, r11, r12, lr
ADDS r7, r7, r11
/* A[5] * B[7] */
LDR lr, [r2, #28]
ADC r8, r0, #0x0
UMLAL r7, r8, r12, lr
/* A[6] * B[0] */
LDR r12, [r1, #24]
LDR lr, [r2]
MOV r11, #0x0
UMLAL r9, r11, r12, lr
STR r9, [sp, #24]
ADDS r10, r10, r11
/* A[6] * B[1] */
LDR lr, [r2, #4]
ADC r11, r0, #0x0
UMLAL r10, r11, r12, lr
ADDS r3, r3, r11
/* A[6] * B[2] */
LDR lr, [r2, #8]
ADC r11, r0, #0x0
UMLAL r3, r11, r12, lr
ADDS r4, r4, r11
/* A[6] * B[3] */
LDR lr, [r2, #12]
ADC r11, r0, #0x0
UMLAL r4, r11, r12, lr
ADDS r5, r5, r11
/* A[6] * B[4] */
LDR lr, [r2, #16]
ADC r11, r0, #0x0
UMLAL r5, r11, r12, lr
ADDS r6, r6, r11
/* A[6] * B[5] */
LDR lr, [r2, #20]
ADC r11, r0, #0x0
UMLAL r6, r11, r12, lr
ADDS r7, r7, r11
/* A[6] * B[6] */
LDR lr, [r2, #24]
ADC r11, r0, #0x0
UMLAL r7, r11, r12, lr
ADDS r8, r8, r11
/* A[6] * B[7] */
LDR lr, [r2, #28]
ADC r9, r0, #0x0
UMLAL r8, r9, r12, lr
/* A[7] * B[0] */
LDR r12, [r1, #28]
LDR lr, [r2]
MOV r11, #0x0
UMLAL r10, r11, r12, lr
STR r10, [sp, #28]
ADDS r3, r3, r11
/* A[7] * B[1] */
LDR lr, [r2, #4]
ADC r11, r0, #0x0
UMLAL r3, r11, r12, lr
ADDS r4, r4, r11
/* A[7] * B[2] */
LDR lr, [r2, #8]
ADC r11, r0, #0x0
UMLAL r4, r11, r12, lr
ADDS r5, r5, r11
/* A[7] * B[3] */
LDR lr, [r2, #12]
ADC r11, r0, #0x0
UMLAL r5, r11, r12, lr
ADDS r6, r6, r11
/* A[7] * B[4] */
LDR lr, [r2, #16]
ADC r11, r0, #0x0
UMLAL r6, r11, r12, lr
ADDS r7, r7, r11
/* A[7] * B[5] */
LDR lr, [r2, #20]
ADC r11, r0, #0x0
UMLAL r7, r11, r12, lr
ADDS r8, r8, r11
/* A[7] * B[6] */
LDR lr, [r2, #24]
ADC r11, r0, #0x0
UMLAL r8, r11, r12, lr
ADDS r9, r9, r11
/* A[7] * B[7] */
LDR lr, [r2, #28]
ADC r10, r0, #0x0
UMLAL r9, r10, r12, lr
ADD lr, sp, #0x20
STM lr, {r3, r4, r5, r6, r7, r8, r9, r10}
MOV r0, sp
/* Add c to a * b */
LDR lr, [sp, #76]
LDM r0, {r2, r3, r4, r5, r6, r7, r8, r9}
LDM lr!, {r1, r10, r11, r12}
ADDS r2, r2, r1
ADCS r3, r3, r10
ADCS r4, r4, r11
ADCS r5, r5, r12
LDM lr!, {r1, r10, r11, r12}
ADCS r6, r6, r1
ADCS r7, r7, r10
ADCS r8, r8, r11
ADCS r9, r9, r12
MOV r1, r9
STM r0!, {r2, r3, r4, r5, r6, r7, r8, r9}
LDM r0, {r2, r3, r4, r5, r6, r7, r8, r9}
ADCS r2, r2, #0x0
ADCS r3, r3, #0x0
ADCS r4, r4, #0x0
ADCS r5, r5, #0x0
ADCS r6, r6, #0x0
ADCS r7, r7, #0x0
ADCS r8, r8, #0x0
ADC r9, r9, #0x0
SUB r0, r0, #0x20
/* Get 252..503 and 504..507 */
LSR lr, r9, #24
LSL r9, r9, #4
ORR r9, r9, r8, LSR #28
LSL r8, r8, #4
ORR r8, r8, r7, LSR #28
LSL r7, r7, #4
ORR r7, r7, r6, LSR #28
LSL r6, r6, #4
ORR r6, r6, r5, LSR #28
LSL r5, r5, #4
ORR r5, r5, r4, LSR #28
LSL r4, r4, #4
ORR r4, r4, r3, LSR #28
LSL r3, r3, #4
ORR r3, r3, r2, LSR #28
LSL r2, r2, #4
ORR r2, r2, r1, LSR #28
BFC r9, #28, #4
/* Add order times bits 504..507 */
MOV r10, #0x2c13
MOVT r10, #0xa30a
MOV r11, #0x9ce5
MOVT r11, #0xa7ed
MOV r1, #0x0
UMLAL r2, r1, r10, lr
ADDS r3, r3, r1
MOV r1, #0x0
ADC r1, r1, #0x0
UMLAL r3, r1, r11, lr
MOV r10, #0x6329
MOVT r10, #0x5d08
MOV r11, #0x621
MOVT r11, #0xeb21
ADDS r4, r4, r1
MOV r1, #0x0
ADC r1, r1, #0x0
UMLAL r4, r1, r10, lr
ADDS r5, r5, r1
MOV r1, #0x0
ADC r1, r1, #0x0
UMLAL r5, r1, r11, lr
ADDS r6, r6, r1
ADCS r7, r7, #0x0
ADCS r8, r8, #0x0
ADC r9, r9, #0x0
SUBS r6, r6, lr
SBCS r7, r7, #0x0
SBCS r8, r8, #0x0
SBC r9, r9, #0x0
/* Sub product of top 8 words and order */
MOV r12, sp
MOV r1, #0x2c13
MOVT r1, #0xa30a
MOV lr, #0x0
LDM r0!, {r10, r11}
UMLAL r10, lr, r2, r1
ADDS r11, r11, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r11, lr, r3, r1
STM r12!, {r10, r11}
LDM r0!, {r10, r11}
ADDS r10, r10, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r10, lr, r4, r1
ADDS r11, r11, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r11, lr, r5, r1
STM r12!, {r10, r11}
LDM r0!, {r10, r11}
ADDS r10, r10, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r10, lr, r6, r1
ADDS r11, r11, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r11, lr, r7, r1
STM r12!, {r10, r11}
LDM r0!, {r10, r11}
ADDS r10, r10, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r10, lr, r8, r1
BFC r11, #28, #4
ADDS r11, r11, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r11, lr, r9, r1
STM r12!, {r10, r11, lr}
SUB r0, r0, #0x10
SUB r12, r12, #0x20
MOV r1, #0x9ce5
MOVT r1, #0xa7ed
MOV lr, #0x0
LDM r12, {r10, r11}
UMLAL r10, lr, r2, r1
ADDS r11, r11, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r11, lr, r3, r1
STM r12!, {r10, r11}
LDM r12, {r10, r11}
ADDS r10, r10, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r10, lr, r4, r1
ADDS r11, r11, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r11, lr, r5, r1
STM r12!, {r10, r11}
LDM r12, {r10, r11}
ADDS r10, r10, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r10, lr, r6, r1
ADDS r11, r11, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r11, lr, r7, r1
STM r12!, {r10, r11}
LDM r12, {r10, r11}
ADDS r10, r10, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r10, lr, r8, r1
ADDS r11, r11, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r11, lr, r9, r1
STM r12!, {r10, r11, lr}
SUB r12, r12, #0x20
MOV r1, #0x6329
MOVT r1, #0x5d08
MOV lr, #0x0
LDM r12, {r10, r11}
UMLAL r10, lr, r2, r1
ADDS r11, r11, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r11, lr, r3, r1
STM r12!, {r10, r11}
LDM r12, {r10, r11}
ADDS r10, r10, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r10, lr, r4, r1
ADDS r11, r11, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r11, lr, r5, r1
STM r12!, {r10, r11}
LDM r12, {r10, r11}
ADDS r10, r10, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r10, lr, r6, r1
ADDS r11, r11, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r11, lr, r7, r1
STM r12!, {r10, r11}
LDM r12, {r10, r11}
ADDS r10, r10, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r10, lr, r8, r1
ADDS r11, r11, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r11, lr, r9, r1
STM r12!, {r10, r11, lr}
SUB r12, r12, #0x20
MOV r1, #0x621
MOVT r1, #0xeb21
MOV lr, #0x0
LDM r12, {r10, r11}
UMLAL r10, lr, r2, r1
ADDS r11, r11, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r11, lr, r3, r1
STM r12!, {r10, r11}
LDM r12, {r10, r11}
ADDS r10, r10, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r10, lr, r4, r1
ADDS r11, r11, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r11, lr, r5, r1
STM r12!, {r10, r11}
LDM r12, {r10, r11}
ADDS r10, r10, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r10, lr, r6, r1
ADDS r11, r11, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r11, lr, r7, r1
STM r12!, {r10, r11}
LDM r12, {r10, r11}
ADDS r10, r10, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r10, lr, r8, r1
ADDS r11, r11, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r11, lr, r9, r1
STM r12!, {r10, r11, lr}
SUB r12, r12, #0x20
/* Subtract at 4 * 32 */
LDM r12, {r10, r11}
SUBS r10, r10, r2
SBCS r11, r11, r3
STM r12!, {r10, r11}
LDM r12, {r10, r11}
SBCS r10, r10, r4
SBCS r11, r11, r5
STM r12!, {r10, r11}
LDM r12, {r10, r11}
SBCS r10, r10, r6
SBCS r11, r11, r7
STM r12!, {r10, r11}
LDM r12, {r10, r11}
SBCS r10, r10, r8
SBC r11, r11, r9
STM r12!, {r10, r11}
SUB r12, r12, #0x24
ASR lr, r11, #25
/* Conditionally subtract order starting at bit 125 */
MOV r1, #0xa0000000
MOV r2, #0xba7d
MOVT r2, #0x4b9e
MOV r3, #0x4c63
MOVT r3, #0xcb02
MOV r4, #0xf39a
MOVT r4, #0xd45e
MOV r5, #0xdf3b
MOVT r5, #0x29b
MOV r9, #0x2000000
AND r1, r1, lr
AND r2, r2, lr
AND r3, r3, lr
AND r4, r4, lr
AND r5, r5, lr
AND r9, r9, lr
LDM r12, {r10, r11}
ADDS r10, r10, r1
ADCS r11, r11, r2
STM r12!, {r10, r11}
LDM r12, {r10, r11}
ADCS r10, r10, r3
ADCS r11, r11, r4
STM r12!, {r10, r11}
LDM r12, {r10, r11}
ADCS r10, r10, r5
ADCS r11, r11, #0x0
STM r12!, {r10, r11}
LDM r12, {r10, r11}
ADCS r10, r10, #0x0
ADCS r11, r11, #0x0
STM r12!, {r10, r11}
LDM r12, {r10}
ADCS r10, r10, #0x0
STM r12!, {r10}
SUB r0, r0, #0x10
MOV r12, sp
/* Load bits 252-376 */
ADD r12, r12, #0x1c
LDM r12, {r1, r2, r3, r4, r5}
LSL r5, r5, #4
ORR r5, r5, r4, LSR #28
LSL r4, r4, #4
ORR r4, r4, r3, LSR #28
LSL r3, r3, #4
ORR r3, r3, r2, LSR #28
LSL r2, r2, #4
ORR r2, r2, r1, LSR #28
BFC r5, #29, #3
SUB r12, r12, #0x1c
/* Sub product of top 4 words and order */
MOV r0, sp
/* * -5cf5d3ed */
MOV r1, #0x2c13
MOVT r1, #0xa30a
MOV lr, #0x0
LDM r0, {r6, r7, r8, r9}
UMLAL r6, lr, r2, r1
ADDS r7, r7, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r7, lr, r3, r1
ADDS r8, r8, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r8, lr, r4, r1
ADDS r9, r9, lr
MOV lr, #0x0
ADC lr, lr, #0x0
UMLAL r9, lr, r5, r1
STM r0, {r6, r7, r8, r9}
ADD r0, r0, #0x4
/* * -5812631b */
MOV r1, #0x9ce5
MOVT r1, #0xa7ed
MOV r10, #0x0
LDM r0, {r6, r7, r8, r9}
UMLAL r6, r10, r2, r1
ADDS r7, r7, r10
MOV r10, #0x0
ADC r10, r10, #0x0
UMLAL r7, r10, r3, r1
ADDS r8, r8, r10
MOV r10, #0x0
ADC r10, r10, #0x0
UMLAL r8, r10, r4, r1
ADDS r9, r9, r10
MOV r10, #0x0
ADC r10, r10, #0x0
UMLAL r9, r10, r5, r1
STM r0, {r6, r7, r8, r9}
ADD r0, r0, #0x4
/* * -a2f79cd7 */
MOV r1, #0x6329
MOVT r1, #0x5d08
MOV r11, #0x0
LDM r0, {r6, r7, r8, r9}
UMLAL r6, r11, r2, r1
ADDS r7, r7, r11
MOV r11, #0x0
ADC r11, r11, #0x0
UMLAL r7, r11, r3, r1
ADDS r8, r8, r11
MOV r11, #0x0
ADC r11, r11, #0x0
UMLAL r8, r11, r4, r1
ADDS r9, r9, r11
MOV r11, #0x0
ADC r11, r11, #0x0
UMLAL r9, r11, r5, r1
STM r0, {r6, r7, r8, r9}
ADD r0, r0, #0x4
/* * -14def9df */
MOV r1, #0x621
MOVT r1, #0xeb21
MOV r12, #0x0
LDM r0, {r6, r7, r8, r9}
UMLAL r6, r12, r2, r1
ADDS r7, r7, r12
MOV r12, #0x0
ADC r12, r12, #0x0
UMLAL r7, r12, r3, r1
ADDS r8, r8, r12
MOV r12, #0x0
ADC r12, r12, #0x0
UMLAL r8, r12, r4, r1
ADDS r9, r9, r12
MOV r12, #0x0
ADC r12, r12, #0x0
UMLAL r9, r12, r5, r1
STM r0, {r6, r7, r8, r9}
ADD r0, r0, #0x4
/* Add overflows at 4 * 32 */
LDM r0, {r6, r7, r8, r9}
BFC r9, #28, #4
ADDS r6, r6, lr
ADCS r7, r7, r10
ADCS r8, r8, r11
ADC r9, r9, r12
/* Subtract top at 4 * 32 */
SUBS r6, r6, r2
SBCS r7, r7, r3
SBCS r8, r8, r4
SBCS r9, r9, r5
SBC r1, r1, r1
SUB r0, r0, #0x10
LDM r0, {r2, r3, r4, r5}
MOV r10, #0xd3ed
MOVT r10, #0x5cf5
MOV r11, #0x631a
MOVT r11, #0x5812
MOV r12, #0x9cd6
MOVT r12, #0xa2f7
MOV lr, #0xf9de
MOVT lr, #0x14de
AND r10, r10, r1
AND r11, r11, r1
AND r12, r12, r1
AND lr, lr, r1
ADDS r2, r2, r10
ADCS r3, r3, r11
ADCS r4, r4, r12
ADCS r5, r5, lr
ADCS r6, r6, #0x0
ADCS r7, r7, #0x0
AND r1, r1, #0x10000000
ADCS r8, r8, #0x0
ADC r9, r9, r1
BFC r9, #28, #4
LDR r0, [sp, #68]
/* Store result */
STR r2, [r0]
STR r3, [r0, #4]
STR r4, [r0, #8]
STR r5, [r0, #12]
STR r6, [r0, #16]
STR r7, [r0, #20]
STR r8, [r0, #24]
STR r9, [r0, #28]
ADD sp, sp, #0x50
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
/* Cycle Count = 994 */
.size sc_muladd,.-sc_muladd
#else
.text
.align 4
.globl sc_muladd
.type sc_muladd, %function
sc_muladd:
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr}
SUB sp, sp, #0x50
ADD lr, sp, #0x44
STM lr, {r0, r1, r3}
MOV lr, r2
LDM r1, {r0, r1, r2, r3}
LDM lr!, {r4, r5, r6}
UMULL r10, r11, r0, r4
UMULL r12, r7, r1, r4
UMAAL r11, r12, r0, r5
UMULL r8, r9, r2, r4
UMAAL r12, r8, r1, r5
UMAAL r12, r7, r0, r6
UMAAL r8, r9, r3, r4
STM sp, {r10, r11, r12}
UMAAL r7, r8, r2, r5
LDM lr!, {r4}
UMULL r10, r11, r1, r6
UMAAL r8, r9, r2, r6
UMAAL r7, r10, r0, r4
UMAAL r8, r11, r3, r5
STR r7, [sp, #12]
UMAAL r8, r10, r1, r4
UMAAL r9, r11, r3, r6
UMAAL r9, r10, r2, r4
UMAAL r10, r11, r3, r4
LDM lr, {r4, r5, r6, r7}
MOV r12, #0x0
UMLAL r8, r12, r0, r4
UMAAL r9, r12, r1, r4
UMAAL r10, r12, r2, r4
UMAAL r11, r12, r3, r4
MOV r4, #0x0
UMLAL r9, r4, r0, r5
UMAAL r10, r4, r1, r5
UMAAL r11, r4, r2, r5
UMAAL r12, r4, r3, r5
MOV r5, #0x0
UMLAL r10, r5, r0, r6
UMAAL r11, r5, r1, r6
UMAAL r12, r5, r2, r6
UMAAL r4, r5, r3, r6
MOV r6, #0x0
UMLAL r11, r6, r0, r7
LDR r0, [sp, #72]
UMAAL r12, r6, r1, r7
ADD r0, r0, #0x10
UMAAL r4, r6, r2, r7
SUB lr, lr, #0x10
UMAAL r5, r6, r3, r7
LDM r0, {r0, r1, r2, r3}
STR r6, [sp, #64]
LDM lr!, {r6}
MOV r7, #0x0
UMLAL r8, r7, r0, r6
UMAAL r9, r7, r1, r6
STR r8, [sp, #16]
UMAAL r10, r7, r2, r6
UMAAL r11, r7, r3, r6
LDM lr!, {r6}
MOV r8, #0x0
UMLAL r9, r8, r0, r6
UMAAL r10, r8, r1, r6
STR r9, [sp, #20]
UMAAL r11, r8, r2, r6
UMAAL r12, r8, r3, r6
LDM lr!, {r6}
MOV r9, #0x0
UMLAL r10, r9, r0, r6
UMAAL r11, r9, r1, r6
STR r10, [sp, #24]
UMAAL r12, r9, r2, r6
UMAAL r4, r9, r3, r6
LDM lr!, {r6}
MOV r10, #0x0
UMLAL r11, r10, r0, r6
UMAAL r12, r10, r1, r6
STR r11, [sp, #28]
UMAAL r4, r10, r2, r6
UMAAL r5, r10, r3, r6
LDM lr!, {r11}
UMAAL r12, r7, r0, r11
UMAAL r4, r7, r1, r11
LDR r6, [sp, #64]
UMAAL r5, r7, r2, r11
UMAAL r6, r7, r3, r11
LDM lr!, {r11}
UMAAL r4, r8, r0, r11
UMAAL r5, r8, r1, r11
UMAAL r6, r8, r2, r11
UMAAL r7, r8, r3, r11
LDM lr, {r11, lr}
UMAAL r5, r9, r0, r11
UMAAL r6, r10, r0, lr
UMAAL r6, r9, r1, r11
UMAAL r7, r10, r1, lr
UMAAL r7, r9, r2, r11
UMAAL r8, r10, r2, lr
UMAAL r8, r9, r3, r11
UMAAL r9, r10, r3, lr
MOV r3, r12
ADD lr, sp, #0x20
STM lr, {r3, r4, r5, r6, r7, r8, r9, r10}
MOV r0, sp
/* Add c to a * b */
LDR lr, [sp, #76]
LDM r0, {r2, r3, r4, r5, r6, r7, r8, r9}
LDM lr!, {r1, r10, r11, r12}
ADDS r2, r2, r1
ADCS r3, r3, r10
ADCS r4, r4, r11
ADCS r5, r5, r12
LDM lr!, {r1, r10, r11, r12}
ADCS r6, r6, r1
ADCS r7, r7, r10
ADCS r8, r8, r11
ADCS r9, r9, r12
MOV r1, r9
STM r0!, {r2, r3, r4, r5, r6, r7, r8, r9}
LDM r0, {r2, r3, r4, r5, r6, r7, r8, r9}
ADCS r2, r2, #0x0
ADCS r3, r3, #0x0
ADCS r4, r4, #0x0
ADCS r5, r5, #0x0
ADCS r6, r6, #0x0
ADCS r7, r7, #0x0
ADCS r8, r8, #0x0
ADC r9, r9, #0x0
SUB r0, r0, #0x20
/* Get 252..503 and 504..507 */
LSR lr, r9, #24
LSL r9, r9, #4
ORR r9, r9, r8, LSR #28
LSL r8, r8, #4
ORR r8, r8, r7, LSR #28
LSL r7, r7, #4
ORR r7, r7, r6, LSR #28
LSL r6, r6, #4
ORR r6, r6, r5, LSR #28
LSL r5, r5, #4
ORR r5, r5, r4, LSR #28
LSL r4, r4, #4
ORR r4, r4, r3, LSR #28
LSL r3, r3, #4
ORR r3, r3, r2, LSR #28
LSL r2, r2, #4
ORR r2, r2, r1, LSR #28
BFC r9, #28, #4
/* Add order times bits 504..507 */
MOV r10, #0x2c13
MOVT r10, #0xa30a
MOV r11, #0x9ce5
MOVT r11, #0xa7ed
MOV r1, #0x0
UMLAL r2, r1, r10, lr
UMAAL r3, r1, r11, lr
MOV r10, #0x6329
MOVT r10, #0x5d08
MOV r11, #0x621
MOVT r11, #0xeb21
UMAAL r4, r1, r10, lr
UMAAL r5, r1, r11, lr
ADDS r6, r6, r1
ADCS r7, r7, #0x0
ADCS r8, r8, #0x0
ADC r9, r9, #0x0
SUBS r6, r6, lr
SBCS r7, r7, #0x0
SBCS r8, r8, #0x0
SBC r9, r9, #0x0
/* Sub product of top 8 words and order */
MOV r12, sp
MOV r1, #0x2c13
MOVT r1, #0xa30a
MOV lr, #0x0
LDM r0!, {r10, r11}
UMLAL r10, lr, r2, r1
UMAAL r11, lr, r3, r1
STM r12!, {r10, r11}
LDM r0!, {r10, r11}
UMAAL r10, lr, r4, r1
UMAAL r11, lr, r5, r1
STM r12!, {r10, r11}
LDM r0!, {r10, r11}
UMAAL r10, lr, r6, r1
UMAAL r11, lr, r7, r1
STM r12!, {r10, r11}
LDM r0!, {r10, r11}
UMAAL r10, lr, r8, r1
BFC r11, #28, #4
UMAAL r11, lr, r9, r1
STM r12!, {r10, r11, lr}
SUB r0, r0, #0x10
SUB r12, r12, #0x20
MOV r1, #0x9ce5
MOVT r1, #0xa7ed
MOV lr, #0x0
LDM r12, {r10, r11}
UMLAL r10, lr, r2, r1
UMAAL r11, lr, r3, r1
STM r12!, {r10, r11}
LDM r12, {r10, r11}
UMAAL r10, lr, r4, r1
UMAAL r11, lr, r5, r1
STM r12!, {r10, r11}
LDM r12, {r10, r11}
UMAAL r10, lr, r6, r1
UMAAL r11, lr, r7, r1
STM r12!, {r10, r11}
LDM r12, {r10, r11}
UMAAL r10, lr, r8, r1
UMAAL r11, lr, r9, r1
STM r12!, {r10, r11, lr}
SUB r12, r12, #0x20
MOV r1, #0x6329
MOVT r1, #0x5d08
MOV lr, #0x0
LDM r12, {r10, r11}
UMLAL r10, lr, r2, r1
UMAAL r11, lr, r3, r1
STM r12!, {r10, r11}
LDM r12, {r10, r11}
UMAAL r10, lr, r4, r1
UMAAL r11, lr, r5, r1
STM r12!, {r10, r11}
LDM r12, {r10, r11}
UMAAL r10, lr, r6, r1
UMAAL r11, lr, r7, r1
STM r12!, {r10, r11}
LDM r12, {r10, r11}
UMAAL r10, lr, r8, r1
UMAAL r11, lr, r9, r1
STM r12!, {r10, r11, lr}
SUB r12, r12, #0x20
MOV r1, #0x621
MOVT r1, #0xeb21
MOV lr, #0x0
LDM r12, {r10, r11}
UMLAL r10, lr, r2, r1
UMAAL r11, lr, r3, r1
STM r12!, {r10, r11}
LDM r12, {r10, r11}
UMAAL r10, lr, r4, r1
UMAAL r11, lr, r5, r1
STM r12!, {r10, r11}
LDM r12, {r10, r11}
UMAAL r10, lr, r6, r1
UMAAL r11, lr, r7, r1
STM r12!, {r10, r11}
LDM r12, {r10, r11}
UMAAL r10, lr, r8, r1
UMAAL r11, lr, r9, r1
STM r12!, {r10, r11, lr}
SUB r12, r12, #0x20
/* Subtract at 4 * 32 */
LDM r12, {r10, r11}
SUBS r10, r10, r2
SBCS r11, r11, r3
STM r12!, {r10, r11}
LDM r12, {r10, r11}
SBCS r10, r10, r4
SBCS r11, r11, r5
STM r12!, {r10, r11}
LDM r12, {r10, r11}
SBCS r10, r10, r6
SBCS r11, r11, r7
STM r12!, {r10, r11}
LDM r12, {r10, r11}
SBCS r10, r10, r8
SBC r11, r11, r9
STM r12!, {r10, r11}
SUB r12, r12, #0x24
ASR lr, r11, #25
/* Conditionally subtract order starting at bit 125 */
MOV r1, #0xa0000000
MOV r2, #0xba7d
MOVT r2, #0x4b9e
MOV r3, #0x4c63
MOVT r3, #0xcb02
MOV r4, #0xf39a
MOVT r4, #0xd45e
MOV r5, #0xdf3b
MOVT r5, #0x29b
MOV r9, #0x2000000
AND r1, r1, lr
AND r2, r2, lr
AND r3, r3, lr
AND r4, r4, lr
AND r5, r5, lr
AND r9, r9, lr
LDM r12, {r10, r11}
ADDS r10, r10, r1
ADCS r11, r11, r2
STM r12!, {r10, r11}
LDM r12, {r10, r11}
ADCS r10, r10, r3
ADCS r11, r11, r4
STM r12!, {r10, r11}
LDM r12, {r10, r11}
ADCS r10, r10, r5
ADCS r11, r11, #0x0
STM r12!, {r10, r11}
LDM r12, {r10, r11}
ADCS r10, r10, #0x0
ADCS r11, r11, #0x0
STM r12!, {r10, r11}
LDM r12, {r10}
ADCS r10, r10, #0x0
STM r12!, {r10}
SUB r0, r0, #0x10
MOV r12, sp
/* Load bits 252-376 */
ADD r12, r12, #0x1c
LDM r12, {r1, r2, r3, r4, r5}
LSL r5, r5, #4
ORR r5, r5, r4, LSR #28
LSL r4, r4, #4
ORR r4, r4, r3, LSR #28
LSL r3, r3, #4
ORR r3, r3, r2, LSR #28
LSL r2, r2, #4
ORR r2, r2, r1, LSR #28
BFC r5, #29, #3
SUB r12, r12, #0x1c
/* Sub product of top 4 words and order */
MOV r0, sp
/* * -5cf5d3ed */
MOV r1, #0x2c13
MOVT r1, #0xa30a
MOV lr, #0x0
LDM r0, {r6, r7, r8, r9}
UMLAL r6, lr, r2, r1
UMAAL r7, lr, r3, r1
UMAAL r8, lr, r4, r1
UMAAL r9, lr, r5, r1
STM r0, {r6, r7, r8, r9}
ADD r0, r0, #0x4
/* * -5812631b */
MOV r1, #0x9ce5
MOVT r1, #0xa7ed
MOV r10, #0x0
LDM r0, {r6, r7, r8, r9}
UMLAL r6, r10, r2, r1
UMAAL r7, r10, r3, r1
UMAAL r8, r10, r4, r1
UMAAL r9, r10, r5, r1
STM r0, {r6, r7, r8, r9}
ADD r0, r0, #0x4
/* * -a2f79cd7 */
MOV r1, #0x6329
MOVT r1, #0x5d08
MOV r11, #0x0
LDM r0, {r6, r7, r8, r9}
UMLAL r6, r11, r2, r1
UMAAL r7, r11, r3, r1
UMAAL r8, r11, r4, r1
UMAAL r9, r11, r5, r1
STM r0, {r6, r7, r8, r9}
ADD r0, r0, #0x4
/* * -14def9df */
MOV r1, #0x621
MOVT r1, #0xeb21
MOV r12, #0x0
LDM r0, {r6, r7, r8, r9}
UMLAL r6, r12, r2, r1
UMAAL r7, r12, r3, r1
UMAAL r8, r12, r4, r1
UMAAL r9, r12, r5, r1
STM r0, {r6, r7, r8, r9}
ADD r0, r0, #0x4
/* Add overflows at 4 * 32 */
LDM r0, {r6, r7, r8, r9}
BFC r9, #28, #4
ADDS r6, r6, lr
ADCS r7, r7, r10
ADCS r8, r8, r11
ADC r9, r9, r12
/* Subtract top at 4 * 32 */
SUBS r6, r6, r2
SBCS r7, r7, r3
SBCS r8, r8, r4
SBCS r9, r9, r5
SBC r1, r1, r1
SUB r0, r0, #0x10
LDM r0, {r2, r3, r4, r5}
MOV r10, #0xd3ed
MOVT r10, #0x5cf5
MOV r11, #0x631a
MOVT r11, #0x5812
MOV r12, #0x9cd6
MOVT r12, #0xa2f7
MOV lr, #0xf9de
MOVT lr, #0x14de
AND r10, r10, r1
AND r11, r11, r1
AND r12, r12, r1
AND lr, lr, r1
ADDS r2, r2, r10
ADCS r3, r3, r11
ADCS r4, r4, r12
ADCS r5, r5, lr
ADCS r6, r6, #0x0
ADCS r7, r7, #0x0
AND r1, r1, #0x10000000
ADCS r8, r8, #0x0
ADC r9, r9, r1
BFC r9, #28, #4
LDR r0, [sp, #68]
/* Store result */
STR r2, [r0]
STR r3, [r0, #4]
STR r4, [r0, #8]
STR r5, [r0, #12]
STR r6, [r0, #16]
STR r7, [r0, #20]
STR r8, [r0, #24]
STR r9, [r0, #28]
ADD sp, sp, #0x50
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
/* Cycle Count = 752 */
.size sc_muladd,.-sc_muladd
#endif /* WOLFSSL_SP_NO_UMAAL */
#endif /* HAVE_ED25519_SIGN */
#endif /* HAVE_ED25519 */
#endif /* !CURVE25519_SMALL || !ED25519_SMALL */
#endif /* HAVE_CURVE25519 || HAVE_ED25519 */
#endif /* !__aarch64__ && __thumb__ */
#endif /* WOLFSSL_ARMASM */
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
#endif /* !WOLFSSL_ARMASM_INLINE */
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/UART/UART_WakeUpFromStop/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aenu1/aps3e
| 164,818
|
app/src/main/cpp/rpcs3/3rdparty/wolfssl/wolfssl/wolfcrypt/src/port/arm/armv8-curve25519.S
|
/* armv8-curve25519
*
* Copyright (C) 2006-2023 wolfSSL Inc.
*
* This file is part of wolfSSL.
*
* wolfSSL is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* wolfSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif /* HAVE_CONFIG_H */
#include <wolfssl/wolfcrypt/settings.h>
/* Generated using (from wolfssl):
* cd ../scripts
* ruby ./x25519/x25519.rb arm64 ../wolfssl/wolfcrypt/src/port/arm/armv8-curve25519.S
*/
#ifdef WOLFSSL_ARMASM
#ifdef __aarch64__
#ifndef WOLFSSL_ARMASM_INLINE
#if defined(HAVE_CURVE25519) || defined(HAVE_ED25519)
#if !defined(CURVE25519_SMALL) || !defined(ED25519_SMALL)
#ifndef __APPLE__
.text
.globl fe_init
.type fe_init,@function
.align 2
fe_init:
#else
.section __TEXT,__text
.globl _fe_init
.p2align 2
_fe_init:
#endif /* __APPLE__ */
ret
#ifndef __APPLE__
.size fe_init,.-fe_init
#endif /* __APPLE__ */
#ifdef HAVE_ED25519
#ifndef __APPLE__
.text
.globl fe_frombytes
.type fe_frombytes,@function
.align 2
fe_frombytes:
#else
.section __TEXT,__text
.globl _fe_frombytes
.p2align 2
_fe_frombytes:
#endif /* __APPLE__ */
ldp x2, x3, [x1]
ldp x4, x5, [x1, #16]
and x5, x5, #0x7fffffffffffffff
stp x2, x3, [x0]
stp x4, x5, [x0, #16]
ret
#ifndef __APPLE__
.size fe_frombytes,.-fe_frombytes
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl fe_tobytes
.type fe_tobytes,@function
.align 2
fe_tobytes:
#else
.section __TEXT,__text
.globl _fe_tobytes
.p2align 2
_fe_tobytes:
#endif /* __APPLE__ */
mov x7, #19
ldp x2, x3, [x1]
ldp x4, x5, [x1, #16]
adds x6, x2, x7
adcs x6, x3, xzr
adcs x6, x4, xzr
adc x6, x5, xzr
and x6, x7, x6, asr 63
adds x2, x2, x6
adcs x3, x3, xzr
adcs x4, x4, xzr
adc x5, x5, xzr
and x5, x5, #0x7fffffffffffffff
stp x2, x3, [x0]
stp x4, x5, [x0, #16]
ret
#ifndef __APPLE__
.size fe_tobytes,.-fe_tobytes
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl fe_1
.type fe_1,@function
.align 2
fe_1:
#else
.section __TEXT,__text
.globl _fe_1
.p2align 2
_fe_1:
#endif /* __APPLE__ */
# Set one
mov x1, #1
stp x1, xzr, [x0]
stp xzr, xzr, [x0, #16]
ret
#ifndef __APPLE__
.size fe_1,.-fe_1
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl fe_0
.type fe_0,@function
.align 2
fe_0:
#else
.section __TEXT,__text
.globl _fe_0
.p2align 2
_fe_0:
#endif /* __APPLE__ */
# Set zero
stp xzr, xzr, [x0]
stp xzr, xzr, [x0, #16]
ret
#ifndef __APPLE__
.size fe_0,.-fe_0
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl fe_copy
.type fe_copy,@function
.align 2
fe_copy:
#else
.section __TEXT,__text
.globl _fe_copy
.p2align 2
_fe_copy:
#endif /* __APPLE__ */
# Copy
ldp x2, x3, [x1]
ldp x4, x5, [x1, #16]
stp x2, x3, [x0]
stp x4, x5, [x0, #16]
ret
#ifndef __APPLE__
.size fe_copy,.-fe_copy
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl fe_sub
.type fe_sub,@function
.align 2
fe_sub:
#else
.section __TEXT,__text
.globl _fe_sub
.p2align 2
_fe_sub:
#endif /* __APPLE__ */
# Sub
ldp x3, x4, [x1]
ldp x5, x6, [x1, #16]
ldp x7, x8, [x2]
ldp x9, x10, [x2, #16]
subs x3, x3, x7
sbcs x4, x4, x8
sbcs x5, x5, x9
sbcs x6, x6, x10
csetm x11, cc
mov x12, #-19
# Mask the modulus
extr x11, x11, x6, #63
mul x12, x11, x12
# Add modulus (if underflow)
subs x3, x3, x12
sbcs x4, x4, xzr
and x6, x6, #0x7fffffffffffffff
sbcs x5, x5, xzr
sbc x6, x6, xzr
stp x3, x4, [x0]
stp x5, x6, [x0, #16]
ret
#ifndef __APPLE__
.size fe_sub,.-fe_sub
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl fe_add
.type fe_add,@function
.align 2
fe_add:
#else
.section __TEXT,__text
.globl _fe_add
.p2align 2
_fe_add:
#endif /* __APPLE__ */
# Add
ldp x3, x4, [x1]
ldp x5, x6, [x1, #16]
ldp x7, x8, [x2]
ldp x9, x10, [x2, #16]
adds x3, x3, x7
adcs x4, x4, x8
adcs x5, x5, x9
adcs x6, x6, x10
cset x11, cs
mov x12, #19
# Mask the modulus
extr x11, x11, x6, #63
mul x12, x11, x12
# Sub modulus (if overflow)
adds x3, x3, x12
adcs x4, x4, xzr
and x6, x6, #0x7fffffffffffffff
adcs x5, x5, xzr
adc x6, x6, xzr
stp x3, x4, [x0]
stp x5, x6, [x0, #16]
ret
#ifndef __APPLE__
.size fe_add,.-fe_add
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl fe_neg
.type fe_neg,@function
.align 2
fe_neg:
#else
.section __TEXT,__text
.globl _fe_neg
.p2align 2
_fe_neg:
#endif /* __APPLE__ */
ldp x2, x3, [x1]
ldp x4, x5, [x1, #16]
mov x6, #-19
mov x7, #-1
mov x8, #-1
mov x9, #0x7fffffffffffffff
subs x6, x6, x2
sbcs x7, x7, x3
sbcs x8, x8, x4
sbc x9, x9, x5
stp x6, x7, [x0]
stp x8, x9, [x0, #16]
ret
#ifndef __APPLE__
.size fe_neg,.-fe_neg
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl fe_isnonzero
.type fe_isnonzero,@function
.align 2
fe_isnonzero:
#else
.section __TEXT,__text
.globl _fe_isnonzero
.p2align 2
_fe_isnonzero:
#endif /* __APPLE__ */
mov x6, #19
ldp x1, x2, [x0]
ldp x3, x4, [x0, #16]
adds x5, x1, x6
adcs x5, x2, xzr
adcs x5, x3, xzr
adc x5, x4, xzr
and x5, x6, x5, asr 63
adds x1, x1, x5
adcs x2, x2, xzr
adcs x3, x3, xzr
adc x4, x4, xzr
and x4, x4, #0x7fffffffffffffff
orr x0, x1, x2
orr x3, x3, x4
orr x0, x0, x3
ret
#ifndef __APPLE__
.size fe_isnonzero,.-fe_isnonzero
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl fe_isnegative
.type fe_isnegative,@function
.align 2
fe_isnegative:
#else
.section __TEXT,__text
.globl _fe_isnegative
.p2align 2
_fe_isnegative:
#endif /* __APPLE__ */
mov x6, #19
ldp x1, x2, [x0]
ldp x3, x4, [x0, #16]
adds x5, x1, x6
adcs x5, x2, xzr
adcs x5, x3, xzr
adc x5, x4, xzr
and x0, x1, #1
eor x0, x0, x5, lsr 63
ret
#ifndef __APPLE__
.size fe_isnegative,.-fe_isnegative
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl fe_cmov_table
.type fe_cmov_table,@function
.align 2
fe_cmov_table:
#else
.section __TEXT,__text
.globl _fe_cmov_table
.p2align 2
_fe_cmov_table:
#endif /* __APPLE__ */
stp x29, x30, [sp, #-128]!
add x29, sp, #0
str x17, [x29, #40]
str x19, [x29, #48]
stp x20, x21, [x29, #56]
stp x22, x23, [x29, #72]
stp x24, x25, [x29, #88]
stp x26, x27, [x29, #104]
str x28, [x29, #120]
str x0, [x29, #16]
sxtb x2, w2
sbfx x3, x2, #7, #1
eor x0, x2, x3
sub x0, x0, x3
mov x4, #1
mov x5, xzr
mov x6, xzr
mov x7, xzr
mov x8, #1
mov x9, xzr
mov x10, xzr
mov x11, xzr
mov x12, xzr
mov x13, xzr
mov x14, xzr
mov x15, xzr
cmp x0, #1
ldp x16, x17, [x1]
ldp x19, x20, [x1, #16]
ldp x21, x22, [x1, #32]
ldp x23, x24, [x1, #48]
ldp x25, x26, [x1, #64]
ldp x27, x28, [x1, #80]
csel x4, x16, x4, eq
csel x5, x17, x5, eq
csel x6, x19, x6, eq
csel x7, x20, x7, eq
csel x8, x21, x8, eq
csel x9, x22, x9, eq
csel x10, x23, x10, eq
csel x11, x24, x11, eq
csel x12, x25, x12, eq
csel x13, x26, x13, eq
csel x14, x27, x14, eq
csel x15, x28, x15, eq
cmp x0, #2
ldp x16, x17, [x1, #96]
ldp x19, x20, [x1, #112]
ldp x21, x22, [x1, #128]
ldp x23, x24, [x1, #144]
ldp x25, x26, [x1, #160]
ldp x27, x28, [x1, #176]
csel x4, x16, x4, eq
csel x5, x17, x5, eq
csel x6, x19, x6, eq
csel x7, x20, x7, eq
csel x8, x21, x8, eq
csel x9, x22, x9, eq
csel x10, x23, x10, eq
csel x11, x24, x11, eq
csel x12, x25, x12, eq
csel x13, x26, x13, eq
csel x14, x27, x14, eq
csel x15, x28, x15, eq
cmp x0, #3
ldp x16, x17, [x1, #192]
ldp x19, x20, [x1, #208]
ldp x21, x22, [x1, #224]
ldp x23, x24, [x1, #240]
ldp x25, x26, [x1, #256]
ldp x27, x28, [x1, #272]
csel x4, x16, x4, eq
csel x5, x17, x5, eq
csel x6, x19, x6, eq
csel x7, x20, x7, eq
csel x8, x21, x8, eq
csel x9, x22, x9, eq
csel x10, x23, x10, eq
csel x11, x24, x11, eq
csel x12, x25, x12, eq
csel x13, x26, x13, eq
csel x14, x27, x14, eq
csel x15, x28, x15, eq
cmp x0, #4
ldp x16, x17, [x1, #288]
ldp x19, x20, [x1, #304]
ldp x21, x22, [x1, #320]
ldp x23, x24, [x1, #336]
ldp x25, x26, [x1, #352]
ldp x27, x28, [x1, #368]
csel x4, x16, x4, eq
csel x5, x17, x5, eq
csel x6, x19, x6, eq
csel x7, x20, x7, eq
csel x8, x21, x8, eq
csel x9, x22, x9, eq
csel x10, x23, x10, eq
csel x11, x24, x11, eq
csel x12, x25, x12, eq
csel x13, x26, x13, eq
csel x14, x27, x14, eq
csel x15, x28, x15, eq
add x1, x1, #0x180
cmp x0, #5
ldp x16, x17, [x1]
ldp x19, x20, [x1, #16]
ldp x21, x22, [x1, #32]
ldp x23, x24, [x1, #48]
ldp x25, x26, [x1, #64]
ldp x27, x28, [x1, #80]
csel x4, x16, x4, eq
csel x5, x17, x5, eq
csel x6, x19, x6, eq
csel x7, x20, x7, eq
csel x8, x21, x8, eq
csel x9, x22, x9, eq
csel x10, x23, x10, eq
csel x11, x24, x11, eq
csel x12, x25, x12, eq
csel x13, x26, x13, eq
csel x14, x27, x14, eq
csel x15, x28, x15, eq
cmp x0, #6
ldp x16, x17, [x1, #96]
ldp x19, x20, [x1, #112]
ldp x21, x22, [x1, #128]
ldp x23, x24, [x1, #144]
ldp x25, x26, [x1, #160]
ldp x27, x28, [x1, #176]
csel x4, x16, x4, eq
csel x5, x17, x5, eq
csel x6, x19, x6, eq
csel x7, x20, x7, eq
csel x8, x21, x8, eq
csel x9, x22, x9, eq
csel x10, x23, x10, eq
csel x11, x24, x11, eq
csel x12, x25, x12, eq
csel x13, x26, x13, eq
csel x14, x27, x14, eq
csel x15, x28, x15, eq
cmp x0, #7
ldp x16, x17, [x1, #192]
ldp x19, x20, [x1, #208]
ldp x21, x22, [x1, #224]
ldp x23, x24, [x1, #240]
ldp x25, x26, [x1, #256]
ldp x27, x28, [x1, #272]
csel x4, x16, x4, eq
csel x5, x17, x5, eq
csel x6, x19, x6, eq
csel x7, x20, x7, eq
csel x8, x21, x8, eq
csel x9, x22, x9, eq
csel x10, x23, x10, eq
csel x11, x24, x11, eq
csel x12, x25, x12, eq
csel x13, x26, x13, eq
csel x14, x27, x14, eq
csel x15, x28, x15, eq
cmp x0, #8
ldp x16, x17, [x1, #288]
ldp x19, x20, [x1, #304]
ldp x21, x22, [x1, #320]
ldp x23, x24, [x1, #336]
ldp x25, x26, [x1, #352]
ldp x27, x28, [x1, #368]
csel x4, x16, x4, eq
csel x5, x17, x5, eq
csel x6, x19, x6, eq
csel x7, x20, x7, eq
csel x8, x21, x8, eq
csel x9, x22, x9, eq
csel x10, x23, x10, eq
csel x11, x24, x11, eq
csel x12, x25, x12, eq
csel x13, x26, x13, eq
csel x14, x27, x14, eq
csel x15, x28, x15, eq
mov x16, #-19
mov x17, #-1
mov x19, #-1
mov x20, #0x7fffffffffffffff
subs x16, x16, x12
sbcs x17, x17, x13
sbcs x19, x19, x14
sbc x20, x20, x15
cmp x2, #0
mov x3, x4
csel x4, x8, x4, lt
csel x8, x3, x8, lt
mov x3, x5
csel x5, x9, x5, lt
csel x9, x3, x9, lt
mov x3, x6
csel x6, x10, x6, lt
csel x10, x3, x10, lt
mov x3, x7
csel x7, x11, x7, lt
csel x11, x3, x11, lt
csel x12, x16, x12, lt
csel x13, x17, x13, lt
csel x14, x19, x14, lt
csel x15, x20, x15, lt
ldr x0, [x29, #16]
stp x4, x5, [x0]
stp x6, x7, [x0, #16]
stp x8, x9, [x0, #32]
stp x10, x11, [x0, #48]
stp x12, x13, [x0, #64]
stp x14, x15, [x0, #80]
ldr x17, [x29, #40]
ldr x19, [x29, #48]
ldp x20, x21, [x29, #56]
ldp x22, x23, [x29, #72]
ldp x24, x25, [x29, #88]
ldp x26, x27, [x29, #104]
ldr x28, [x29, #120]
ldp x29, x30, [sp], #0x80
ret
#ifndef __APPLE__
.size fe_cmov_table,.-fe_cmov_table
#endif /* __APPLE__ */
#endif /* HAVE_ED25519 */
#ifndef __APPLE__
.text
.globl fe_mul
.type fe_mul,@function
.align 2
fe_mul:
#else
.section __TEXT,__text
.globl _fe_mul
.p2align 2
_fe_mul:
#endif /* __APPLE__ */
stp x29, x30, [sp, #-64]!
add x29, sp, #0
str x17, [x29, #24]
str x19, [x29, #32]
stp x20, x21, [x29, #40]
str x22, [x29, #56]
# Multiply
ldp x14, x15, [x1]
ldp x16, x17, [x1, #16]
ldp x19, x20, [x2]
ldp x21, x22, [x2, #16]
# A[0] * B[0]
umulh x7, x14, x19
mul x6, x14, x19
# A[2] * B[0]
umulh x9, x16, x19
mul x8, x16, x19
# A[1] * B[0]
mul x3, x15, x19
adds x7, x7, x3
umulh x4, x15, x19
adcs x8, x8, x4
# A[1] * B[3]
umulh x11, x15, x22
adc x9, x9, xzr
mul x10, x15, x22
# A[0] * B[1]
mul x3, x14, x20
adds x7, x7, x3
umulh x4, x14, x20
adcs x8, x8, x4
# A[2] * B[1]
mul x3, x16, x20
adcs x9, x9, x3
umulh x4, x16, x20
adcs x10, x10, x4
adc x11, x11, xzr
# A[1] * B[2]
mul x3, x15, x21
adds x9, x9, x3
umulh x4, x15, x21
adcs x10, x10, x4
adcs x11, x11, xzr
adc x12, xzr, xzr
# A[0] * B[2]
mul x3, x14, x21
adds x8, x8, x3
umulh x4, x14, x21
adcs x9, x9, x4
adcs x10, x10, xzr
adcs x11, x11, xzr
adc x12, x12, xzr
# A[1] * B[1]
mul x3, x15, x20
adds x8, x8, x3
umulh x4, x15, x20
adcs x9, x9, x4
# A[3] * B[1]
mul x3, x17, x20
adcs x10, x10, x3
umulh x4, x17, x20
adcs x11, x11, x4
adc x12, x12, xzr
# A[2] * B[2]
mul x3, x16, x21
adds x10, x10, x3
umulh x4, x16, x21
adcs x11, x11, x4
# A[3] * B[3]
mul x3, x17, x22
adcs x12, x12, x3
umulh x13, x17, x22
adc x13, x13, xzr
# A[0] * B[3]
mul x3, x14, x22
adds x9, x9, x3
umulh x4, x14, x22
adcs x10, x10, x4
# A[2] * B[3]
mul x3, x16, x22
adcs x11, x11, x3
umulh x4, x16, x22
adcs x12, x12, x4
adc x13, x13, xzr
# A[3] * B[0]
mul x3, x17, x19
adds x9, x9, x3
umulh x4, x17, x19
adcs x10, x10, x4
# A[3] * B[2]
mul x3, x17, x21
adcs x11, x11, x3
umulh x4, x17, x21
adcs x12, x12, x4
adc x13, x13, xzr
# Reduce
mov x3, #38
mul x4, x3, x13
adds x9, x9, x4
umulh x5, x3, x13
adc x5, x5, xzr
mov x3, #19
extr x5, x5, x9, #63
mul x5, x5, x3
and x9, x9, #0x7fffffffffffffff
mov x3, #38
mul x4, x3, x10
adds x6, x6, x4
umulh x10, x3, x10
mul x4, x3, x11
adcs x7, x7, x4
umulh x11, x3, x11
mul x4, x3, x12
adcs x8, x8, x4
umulh x12, x3, x12
adc x9, x9, xzr
# Add high product results in
adds x6, x6, x5
adcs x7, x7, x10
adcs x8, x8, x11
adc x9, x9, x12
# Reduce if top bit set
mov x3, #19
and x4, x3, x9, asr 63
adds x6, x6, x4
adcs x7, x7, xzr
and x9, x9, #0x7fffffffffffffff
adcs x8, x8, xzr
adc x9, x9, xzr
# Store
stp x6, x7, [x0]
stp x8, x9, [x0, #16]
ldr x17, [x29, #24]
ldr x19, [x29, #32]
ldp x20, x21, [x29, #40]
ldr x22, [x29, #56]
ldp x29, x30, [sp], #0x40
ret
#ifndef __APPLE__
.size fe_mul,.-fe_mul
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl fe_sq
.type fe_sq,@function
.align 2
fe_sq:
#else
.section __TEXT,__text
.globl _fe_sq
.p2align 2
_fe_sq:
#endif /* __APPLE__ */
# Square
ldp x13, x14, [x1]
ldp x15, x16, [x1, #16]
# A[0] * A[1]
umulh x7, x13, x14
mul x6, x13, x14
# A[0] * A[3]
umulh x9, x13, x16
mul x8, x13, x16
# A[0] * A[2]
mul x2, x13, x15
adds x7, x7, x2
umulh x3, x13, x15
adcs x8, x8, x3
# A[1] * A[3]
mul x2, x14, x16
adcs x9, x9, x2
umulh x10, x14, x16
adc x10, x10, xzr
# A[1] * A[2]
mul x2, x14, x15
adds x8, x8, x2
umulh x3, x14, x15
adcs x9, x9, x3
# A[2] * A[3]
mul x2, x15, x16
adcs x10, x10, x2
umulh x11, x15, x16
adc x11, x11, xzr
# Double
adds x6, x6, x6
adcs x7, x7, x7
adcs x8, x8, x8
adcs x9, x9, x9
adcs x10, x10, x10
adcs x11, x11, x11
adc x12, xzr, xzr
# A[0] * A[0]
umulh x3, x13, x13
mul x5, x13, x13
# A[1] * A[1]
mul x2, x14, x14
adds x6, x6, x3
umulh x3, x14, x14
adcs x7, x7, x2
# A[2] * A[2]
mul x2, x15, x15
adcs x8, x8, x3
umulh x3, x15, x15
adcs x9, x9, x2
# A[3] * A[3]
mul x2, x16, x16
adcs x10, x10, x3
umulh x3, x16, x16
adcs x11, x11, x2
adc x12, x12, x3
# Reduce
mov x2, #38
mul x3, x2, x12
adds x8, x8, x3
umulh x4, x2, x12
adc x4, x4, xzr
mov x2, #19
extr x4, x4, x8, #63
mul x4, x4, x2
and x8, x8, #0x7fffffffffffffff
mov x2, #38
mul x3, x2, x9
adds x5, x5, x3
umulh x9, x2, x9
mul x3, x2, x10
adcs x6, x6, x3
umulh x10, x2, x10
mul x3, x2, x11
adcs x7, x7, x3
umulh x11, x2, x11
adc x8, x8, xzr
# Add high product results in
adds x5, x5, x4
adcs x6, x6, x9
adcs x7, x7, x10
adc x8, x8, x11
# Reduce if top bit set
mov x2, #19
and x3, x2, x8, asr 63
adds x5, x5, x3
adcs x6, x6, xzr
and x8, x8, #0x7fffffffffffffff
adcs x7, x7, xzr
adc x8, x8, xzr
# Store
stp x5, x6, [x0]
stp x7, x8, [x0, #16]
ret
#ifndef __APPLE__
.size fe_sq,.-fe_sq
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl fe_invert
.type fe_invert,@function
.align 2
fe_invert:
#else
.section __TEXT,__text
.globl _fe_invert
.p2align 2
_fe_invert:
#endif /* __APPLE__ */
stp x29, x30, [sp, #-176]!
add x29, sp, #0
str x17, [x29, #160]
str x20, [x29, #168]
# Invert
str x0, [x29, #144]
str x1, [x29, #152]
add x0, x29, #16
#ifndef NDEBUG
ldr x1, [x29, #152]
#endif /* !NDEBUG */
#ifndef __APPLE__
bl fe_sq
#else
bl _fe_sq
#endif /* __APPLE__ */
add x0, x29, #48
add x1, x29, #16
#ifndef __APPLE__
bl fe_sq
#else
bl _fe_sq
#endif /* __APPLE__ */
#ifndef NDEBUG
add x0, x29, #48
#endif /* !NDEBUG */
add x1, x29, #48
#ifndef __APPLE__
bl fe_sq
#else
bl _fe_sq
#endif /* __APPLE__ */
#ifndef NDEBUG
add x0, x29, #48
#endif /* !NDEBUG */
ldr x1, [x29, #152]
add x2, x29, #48
#ifndef __APPLE__
bl fe_mul
#else
bl _fe_mul
#endif /* __APPLE__ */
add x0, x29, #16
add x1, x29, #16
add x2, x29, #48
#ifndef __APPLE__
bl fe_mul
#else
bl _fe_mul
#endif /* __APPLE__ */
add x0, x29, #0x50
#ifndef NDEBUG
add x1, x29, #16
#endif /* !NDEBUG */
#ifndef __APPLE__
bl fe_sq
#else
bl _fe_sq
#endif /* __APPLE__ */
add x0, x29, #48
add x1, x29, #48
add x2, x29, #0x50
#ifndef __APPLE__
bl fe_mul
#else
bl _fe_mul
#endif /* __APPLE__ */
# Loop: 5 times
mov x20, #5
ldp x6, x7, [x29, #48]
ldp x8, x9, [x29, #64]
L_fe_invert1:
# Square
# A[0] * A[1]
umulh x12, x6, x7
mul x11, x6, x7
# A[0] * A[3]
umulh x14, x6, x9
mul x13, x6, x9
# A[0] * A[2]
mul x3, x6, x8
adds x12, x12, x3
umulh x4, x6, x8
adcs x13, x13, x4
# A[1] * A[3]
mul x3, x7, x9
adcs x14, x14, x3
umulh x15, x7, x9
adc x15, x15, xzr
# A[1] * A[2]
mul x3, x7, x8
adds x13, x13, x3
umulh x4, x7, x8
adcs x14, x14, x4
# A[2] * A[3]
mul x3, x8, x9
adcs x15, x15, x3
umulh x16, x8, x9
adc x16, x16, xzr
# Double
adds x11, x11, x11
adcs x12, x12, x12
adcs x13, x13, x13
adcs x14, x14, x14
adcs x15, x15, x15
adcs x16, x16, x16
adc x17, xzr, xzr
# A[0] * A[0]
umulh x4, x6, x6
mul x10, x6, x6
# A[1] * A[1]
mul x3, x7, x7
adds x11, x11, x4
umulh x4, x7, x7
adcs x12, x12, x3
# A[2] * A[2]
mul x3, x8, x8
adcs x13, x13, x4
umulh x4, x8, x8
adcs x14, x14, x3
# A[3] * A[3]
mul x3, x9, x9
adcs x15, x15, x4
umulh x4, x9, x9
adcs x16, x16, x3
adc x17, x17, x4
# Reduce
mov x3, #38
mul x4, x3, x17
adds x13, x13, x4
umulh x5, x3, x17
adc x5, x5, xzr
mov x3, #19
extr x5, x5, x13, #63
mul x5, x5, x3
and x13, x13, #0x7fffffffffffffff
mov x3, #38
mul x4, x3, x14
adds x10, x10, x4
umulh x14, x3, x14
mul x4, x3, x15
adcs x11, x11, x4
umulh x15, x3, x15
mul x4, x3, x16
adcs x12, x12, x4
umulh x16, x3, x16
adc x13, x13, xzr
# Add high product results in
adds x6, x10, x5
adcs x7, x11, x14
adcs x8, x12, x15
adc x9, x13, x16
subs x20, x20, #1
bne L_fe_invert1
# Store
stp x6, x7, [x29, #80]
stp x8, x9, [x29, #96]
#ifndef NDEBUG
add x0, x29, #48
#endif /* !NDEBUG */
add x1, x29, #0x50
add x2, x29, #48
#ifndef __APPLE__
bl fe_mul
#else
bl _fe_mul
#endif /* __APPLE__ */
# Loop: 10 times
mov x20, #10
ldp x6, x7, [x29, #48]
ldp x8, x9, [x29, #64]
L_fe_invert2:
# Square
# A[0] * A[1]
umulh x12, x6, x7
mul x11, x6, x7
# A[0] * A[3]
umulh x14, x6, x9
mul x13, x6, x9
# A[0] * A[2]
mul x3, x6, x8
adds x12, x12, x3
umulh x4, x6, x8
adcs x13, x13, x4
# A[1] * A[3]
mul x3, x7, x9
adcs x14, x14, x3
umulh x15, x7, x9
adc x15, x15, xzr
# A[1] * A[2]
mul x3, x7, x8
adds x13, x13, x3
umulh x4, x7, x8
adcs x14, x14, x4
# A[2] * A[3]
mul x3, x8, x9
adcs x15, x15, x3
umulh x16, x8, x9
adc x16, x16, xzr
# Double
adds x11, x11, x11
adcs x12, x12, x12
adcs x13, x13, x13
adcs x14, x14, x14
adcs x15, x15, x15
adcs x16, x16, x16
adc x17, xzr, xzr
# A[0] * A[0]
umulh x4, x6, x6
mul x10, x6, x6
# A[1] * A[1]
mul x3, x7, x7
adds x11, x11, x4
umulh x4, x7, x7
adcs x12, x12, x3
# A[2] * A[2]
mul x3, x8, x8
adcs x13, x13, x4
umulh x4, x8, x8
adcs x14, x14, x3
# A[3] * A[3]
mul x3, x9, x9
adcs x15, x15, x4
umulh x4, x9, x9
adcs x16, x16, x3
adc x17, x17, x4
# Reduce
mov x3, #38
mul x4, x3, x17
adds x13, x13, x4
umulh x5, x3, x17
adc x5, x5, xzr
mov x3, #19
extr x5, x5, x13, #63
mul x5, x5, x3
and x13, x13, #0x7fffffffffffffff
mov x3, #38
mul x4, x3, x14
adds x10, x10, x4
umulh x14, x3, x14
mul x4, x3, x15
adcs x11, x11, x4
umulh x15, x3, x15
mul x4, x3, x16
adcs x12, x12, x4
umulh x16, x3, x16
adc x13, x13, xzr
# Add high product results in
adds x6, x10, x5
adcs x7, x11, x14
adcs x8, x12, x15
adc x9, x13, x16
subs x20, x20, #1
bne L_fe_invert2
# Store
stp x6, x7, [x29, #80]
stp x8, x9, [x29, #96]
add x0, x29, #0x50
#ifndef NDEBUG
add x1, x29, #0x50
#endif /* !NDEBUG */
add x2, x29, #48
#ifndef __APPLE__
bl fe_mul
#else
bl _fe_mul
#endif /* __APPLE__ */
# Loop: 20 times
mov x20, #20
ldp x6, x7, [x29, #80]
ldp x8, x9, [x29, #96]
L_fe_invert3:
# Square
# A[0] * A[1]
umulh x12, x6, x7
mul x11, x6, x7
# A[0] * A[3]
umulh x14, x6, x9
mul x13, x6, x9
# A[0] * A[2]
mul x3, x6, x8
adds x12, x12, x3
umulh x4, x6, x8
adcs x13, x13, x4
# A[1] * A[3]
mul x3, x7, x9
adcs x14, x14, x3
umulh x15, x7, x9
adc x15, x15, xzr
# A[1] * A[2]
mul x3, x7, x8
adds x13, x13, x3
umulh x4, x7, x8
adcs x14, x14, x4
# A[2] * A[3]
mul x3, x8, x9
adcs x15, x15, x3
umulh x16, x8, x9
adc x16, x16, xzr
# Double
adds x11, x11, x11
adcs x12, x12, x12
adcs x13, x13, x13
adcs x14, x14, x14
adcs x15, x15, x15
adcs x16, x16, x16
adc x17, xzr, xzr
# A[0] * A[0]
umulh x4, x6, x6
mul x10, x6, x6
# A[1] * A[1]
mul x3, x7, x7
adds x11, x11, x4
umulh x4, x7, x7
adcs x12, x12, x3
# A[2] * A[2]
mul x3, x8, x8
adcs x13, x13, x4
umulh x4, x8, x8
adcs x14, x14, x3
# A[3] * A[3]
mul x3, x9, x9
adcs x15, x15, x4
umulh x4, x9, x9
adcs x16, x16, x3
adc x17, x17, x4
# Reduce
mov x3, #38
mul x4, x3, x17
adds x13, x13, x4
umulh x5, x3, x17
adc x5, x5, xzr
mov x3, #19
extr x5, x5, x13, #63
mul x5, x5, x3
and x13, x13, #0x7fffffffffffffff
mov x3, #38
mul x4, x3, x14
adds x10, x10, x4
umulh x14, x3, x14
mul x4, x3, x15
adcs x11, x11, x4
umulh x15, x3, x15
mul x4, x3, x16
adcs x12, x12, x4
umulh x16, x3, x16
adc x13, x13, xzr
# Add high product results in
adds x6, x10, x5
adcs x7, x11, x14
adcs x8, x12, x15
adc x9, x13, x16
subs x20, x20, #1
bne L_fe_invert3
# Store
stp x6, x7, [x29, #112]
stp x8, x9, [x29, #128]
#ifndef NDEBUG
add x0, x29, #0x50
#endif /* !NDEBUG */
add x1, x29, #0x70
add x2, x29, #0x50
#ifndef __APPLE__
bl fe_mul
#else
bl _fe_mul
#endif /* __APPLE__ */
# Loop: 10 times
mov x20, #10
ldp x6, x7, [x29, #80]
ldp x8, x9, [x29, #96]
L_fe_invert4:
# Square
# A[0] * A[1]
umulh x12, x6, x7
mul x11, x6, x7
# A[0] * A[3]
umulh x14, x6, x9
mul x13, x6, x9
# A[0] * A[2]
mul x3, x6, x8
adds x12, x12, x3
umulh x4, x6, x8
adcs x13, x13, x4
# A[1] * A[3]
mul x3, x7, x9
adcs x14, x14, x3
umulh x15, x7, x9
adc x15, x15, xzr
# A[1] * A[2]
mul x3, x7, x8
adds x13, x13, x3
umulh x4, x7, x8
adcs x14, x14, x4
# A[2] * A[3]
mul x3, x8, x9
adcs x15, x15, x3
umulh x16, x8, x9
adc x16, x16, xzr
# Double
adds x11, x11, x11
adcs x12, x12, x12
adcs x13, x13, x13
adcs x14, x14, x14
adcs x15, x15, x15
adcs x16, x16, x16
adc x17, xzr, xzr
# A[0] * A[0]
umulh x4, x6, x6
mul x10, x6, x6
# A[1] * A[1]
mul x3, x7, x7
adds x11, x11, x4
umulh x4, x7, x7
adcs x12, x12, x3
# A[2] * A[2]
mul x3, x8, x8
adcs x13, x13, x4
umulh x4, x8, x8
adcs x14, x14, x3
# A[3] * A[3]
mul x3, x9, x9
adcs x15, x15, x4
umulh x4, x9, x9
adcs x16, x16, x3
adc x17, x17, x4
# Reduce
mov x3, #38
mul x4, x3, x17
adds x13, x13, x4
umulh x5, x3, x17
adc x5, x5, xzr
mov x3, #19
extr x5, x5, x13, #63
mul x5, x5, x3
and x13, x13, #0x7fffffffffffffff
mov x3, #38
mul x4, x3, x14
adds x10, x10, x4
umulh x14, x3, x14
mul x4, x3, x15
adcs x11, x11, x4
umulh x15, x3, x15
mul x4, x3, x16
adcs x12, x12, x4
umulh x16, x3, x16
adc x13, x13, xzr
# Add high product results in
adds x6, x10, x5
adcs x7, x11, x14
adcs x8, x12, x15
adc x9, x13, x16
subs x20, x20, #1
bne L_fe_invert4
# Store
stp x6, x7, [x29, #80]
stp x8, x9, [x29, #96]
add x0, x29, #48
add x1, x29, #0x50
add x2, x29, #48
#ifndef __APPLE__
bl fe_mul
#else
bl _fe_mul
#endif /* __APPLE__ */
# Loop: 50 times
mov x20, #50
ldp x6, x7, [x29, #48]
ldp x8, x9, [x29, #64]
L_fe_invert5:
# Square
# A[0] * A[1]
umulh x12, x6, x7
mul x11, x6, x7
# A[0] * A[3]
umulh x14, x6, x9
mul x13, x6, x9
# A[0] * A[2]
mul x3, x6, x8
adds x12, x12, x3
umulh x4, x6, x8
adcs x13, x13, x4
# A[1] * A[3]
mul x3, x7, x9
adcs x14, x14, x3
umulh x15, x7, x9
adc x15, x15, xzr
# A[1] * A[2]
mul x3, x7, x8
adds x13, x13, x3
umulh x4, x7, x8
adcs x14, x14, x4
# A[2] * A[3]
mul x3, x8, x9
adcs x15, x15, x3
umulh x16, x8, x9
adc x16, x16, xzr
# Double
adds x11, x11, x11
adcs x12, x12, x12
adcs x13, x13, x13
adcs x14, x14, x14
adcs x15, x15, x15
adcs x16, x16, x16
adc x17, xzr, xzr
# A[0] * A[0]
umulh x4, x6, x6
mul x10, x6, x6
# A[1] * A[1]
mul x3, x7, x7
adds x11, x11, x4
umulh x4, x7, x7
adcs x12, x12, x3
# A[2] * A[2]
mul x3, x8, x8
adcs x13, x13, x4
umulh x4, x8, x8
adcs x14, x14, x3
# A[3] * A[3]
mul x3, x9, x9
adcs x15, x15, x4
umulh x4, x9, x9
adcs x16, x16, x3
adc x17, x17, x4
# Reduce
mov x3, #38
mul x4, x3, x17
adds x13, x13, x4
umulh x5, x3, x17
adc x5, x5, xzr
mov x3, #19
extr x5, x5, x13, #63
mul x5, x5, x3
and x13, x13, #0x7fffffffffffffff
mov x3, #38
mul x4, x3, x14
adds x10, x10, x4
umulh x14, x3, x14
mul x4, x3, x15
adcs x11, x11, x4
umulh x15, x3, x15
mul x4, x3, x16
adcs x12, x12, x4
umulh x16, x3, x16
adc x13, x13, xzr
# Add high product results in
adds x6, x10, x5
adcs x7, x11, x14
adcs x8, x12, x15
adc x9, x13, x16
subs x20, x20, #1
bne L_fe_invert5
# Store
stp x6, x7, [x29, #80]
stp x8, x9, [x29, #96]
add x0, x29, #0x50
#ifndef NDEBUG
add x1, x29, #0x50
#endif /* !NDEBUG */
add x2, x29, #48
#ifndef __APPLE__
bl fe_mul
#else
bl _fe_mul
#endif /* __APPLE__ */
# Loop: 100 times
mov x20, #0x64
ldp x6, x7, [x29, #80]
ldp x8, x9, [x29, #96]
L_fe_invert6:
# Square
# A[0] * A[1]
umulh x12, x6, x7
mul x11, x6, x7
# A[0] * A[3]
umulh x14, x6, x9
mul x13, x6, x9
# A[0] * A[2]
mul x3, x6, x8
adds x12, x12, x3
umulh x4, x6, x8
adcs x13, x13, x4
# A[1] * A[3]
mul x3, x7, x9
adcs x14, x14, x3
umulh x15, x7, x9
adc x15, x15, xzr
# A[1] * A[2]
mul x3, x7, x8
adds x13, x13, x3
umulh x4, x7, x8
adcs x14, x14, x4
# A[2] * A[3]
mul x3, x8, x9
adcs x15, x15, x3
umulh x16, x8, x9
adc x16, x16, xzr
# Double
adds x11, x11, x11
adcs x12, x12, x12
adcs x13, x13, x13
adcs x14, x14, x14
adcs x15, x15, x15
adcs x16, x16, x16
adc x17, xzr, xzr
# A[0] * A[0]
umulh x4, x6, x6
mul x10, x6, x6
# A[1] * A[1]
mul x3, x7, x7
adds x11, x11, x4
umulh x4, x7, x7
adcs x12, x12, x3
# A[2] * A[2]
mul x3, x8, x8
adcs x13, x13, x4
umulh x4, x8, x8
adcs x14, x14, x3
# A[3] * A[3]
mul x3, x9, x9
adcs x15, x15, x4
umulh x4, x9, x9
adcs x16, x16, x3
adc x17, x17, x4
# Reduce
mov x3, #38
mul x4, x3, x17
adds x13, x13, x4
umulh x5, x3, x17
adc x5, x5, xzr
mov x3, #19
extr x5, x5, x13, #63
mul x5, x5, x3
and x13, x13, #0x7fffffffffffffff
mov x3, #38
mul x4, x3, x14
adds x10, x10, x4
umulh x14, x3, x14
mul x4, x3, x15
adcs x11, x11, x4
umulh x15, x3, x15
mul x4, x3, x16
adcs x12, x12, x4
umulh x16, x3, x16
adc x13, x13, xzr
# Add high product results in
adds x6, x10, x5
adcs x7, x11, x14
adcs x8, x12, x15
adc x9, x13, x16
subs x20, x20, #1
bne L_fe_invert6
# Store
stp x6, x7, [x29, #112]
stp x8, x9, [x29, #128]
#ifndef NDEBUG
add x0, x29, #0x50
#endif /* !NDEBUG */
add x1, x29, #0x70
add x2, x29, #0x50
#ifndef __APPLE__
bl fe_mul
#else
bl _fe_mul
#endif /* __APPLE__ */
# Loop: 50 times
mov x20, #50
ldp x6, x7, [x29, #80]
ldp x8, x9, [x29, #96]
L_fe_invert7:
# Square
# A[0] * A[1]
umulh x12, x6, x7
mul x11, x6, x7
# A[0] * A[3]
umulh x14, x6, x9
mul x13, x6, x9
# A[0] * A[2]
mul x3, x6, x8
adds x12, x12, x3
umulh x4, x6, x8
adcs x13, x13, x4
# A[1] * A[3]
mul x3, x7, x9
adcs x14, x14, x3
umulh x15, x7, x9
adc x15, x15, xzr
# A[1] * A[2]
mul x3, x7, x8
adds x13, x13, x3
umulh x4, x7, x8
adcs x14, x14, x4
# A[2] * A[3]
mul x3, x8, x9
adcs x15, x15, x3
umulh x16, x8, x9
adc x16, x16, xzr
# Double
adds x11, x11, x11
adcs x12, x12, x12
adcs x13, x13, x13
adcs x14, x14, x14
adcs x15, x15, x15
adcs x16, x16, x16
adc x17, xzr, xzr
# A[0] * A[0]
umulh x4, x6, x6
mul x10, x6, x6
# A[1] * A[1]
mul x3, x7, x7
adds x11, x11, x4
umulh x4, x7, x7
adcs x12, x12, x3
# A[2] * A[2]
mul x3, x8, x8
adcs x13, x13, x4
umulh x4, x8, x8
adcs x14, x14, x3
# A[3] * A[3]
mul x3, x9, x9
adcs x15, x15, x4
umulh x4, x9, x9
adcs x16, x16, x3
adc x17, x17, x4
# Reduce
mov x3, #38
mul x4, x3, x17
adds x13, x13, x4
umulh x5, x3, x17
adc x5, x5, xzr
mov x3, #19
extr x5, x5, x13, #63
mul x5, x5, x3
and x13, x13, #0x7fffffffffffffff
mov x3, #38
mul x4, x3, x14
adds x10, x10, x4
umulh x14, x3, x14
mul x4, x3, x15
adcs x11, x11, x4
umulh x15, x3, x15
mul x4, x3, x16
adcs x12, x12, x4
umulh x16, x3, x16
adc x13, x13, xzr
# Add high product results in
adds x6, x10, x5
adcs x7, x11, x14
adcs x8, x12, x15
adc x9, x13, x16
subs x20, x20, #1
bne L_fe_invert7
# Store
stp x6, x7, [x29, #80]
stp x8, x9, [x29, #96]
add x0, x29, #48
add x1, x29, #0x50
add x2, x29, #48
#ifndef __APPLE__
bl fe_mul
#else
bl _fe_mul
#endif /* __APPLE__ */
# Loop: 5 times
mov x20, #5
ldp x6, x7, [x29, #48]
ldp x8, x9, [x29, #64]
L_fe_invert8:
# Square
# A[0] * A[1]
umulh x12, x6, x7
mul x11, x6, x7
# A[0] * A[3]
umulh x14, x6, x9
mul x13, x6, x9
# A[0] * A[2]
mul x3, x6, x8
adds x12, x12, x3
umulh x4, x6, x8
adcs x13, x13, x4
# A[1] * A[3]
mul x3, x7, x9
adcs x14, x14, x3
umulh x15, x7, x9
adc x15, x15, xzr
# A[1] * A[2]
mul x3, x7, x8
adds x13, x13, x3
umulh x4, x7, x8
adcs x14, x14, x4
# A[2] * A[3]
mul x3, x8, x9
adcs x15, x15, x3
umulh x16, x8, x9
adc x16, x16, xzr
# Double
adds x11, x11, x11
adcs x12, x12, x12
adcs x13, x13, x13
adcs x14, x14, x14
adcs x15, x15, x15
adcs x16, x16, x16
adc x17, xzr, xzr
# A[0] * A[0]
umulh x4, x6, x6
mul x10, x6, x6
# A[1] * A[1]
mul x3, x7, x7
adds x11, x11, x4
umulh x4, x7, x7
adcs x12, x12, x3
# A[2] * A[2]
mul x3, x8, x8
adcs x13, x13, x4
umulh x4, x8, x8
adcs x14, x14, x3
# A[3] * A[3]
mul x3, x9, x9
adcs x15, x15, x4
umulh x4, x9, x9
adcs x16, x16, x3
adc x17, x17, x4
# Reduce
mov x3, #38
mul x4, x3, x17
adds x13, x13, x4
umulh x5, x3, x17
adc x5, x5, xzr
mov x3, #19
extr x5, x5, x13, #63
mul x5, x5, x3
and x13, x13, #0x7fffffffffffffff
mov x3, #38
mul x4, x3, x14
adds x10, x10, x4
umulh x14, x3, x14
mul x4, x3, x15
adcs x11, x11, x4
umulh x15, x3, x15
mul x4, x3, x16
adcs x12, x12, x4
umulh x16, x3, x16
adc x13, x13, xzr
# Add high product results in
adds x6, x10, x5
adcs x7, x11, x14
adcs x8, x12, x15
adc x9, x13, x16
subs x20, x20, #1
bne L_fe_invert8
# Store
stp x6, x7, [x29, #48]
stp x8, x9, [x29, #64]
ldr x0, [x29, #144]
add x1, x29, #48
add x2, x29, #16
#ifndef __APPLE__
bl fe_mul
#else
bl _fe_mul
#endif /* __APPLE__ */
ldr x17, [x29, #160]
ldr x20, [x29, #168]
ldp x29, x30, [sp], #0xb0
ret
#ifndef __APPLE__
.size fe_invert,.-fe_invert
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl curve25519
.type curve25519,@function
.align 2
curve25519:
#else
.section __TEXT,__text
.globl _curve25519
.p2align 2
_curve25519:
#endif /* __APPLE__ */
stp x29, x30, [sp, #-288]!
add x29, sp, #0
str x17, [x29, #200]
str x19, [x29, #208]
stp x20, x21, [x29, #216]
stp x22, x23, [x29, #232]
stp x24, x25, [x29, #248]
stp x26, x27, [x29, #264]
str x28, [x29, #280]
mov x23, xzr
str x0, [x29, #176]
str x2, [x29, #184]
ldp x6, x7, [x2]
ldp x8, x9, [x2, #16]
mov x10, #1
mov x11, xzr
mov x12, xzr
mov x13, xzr
stp x10, x11, [x0]
stp x12, x13, [x0, #16]
# Set zero
stp xzr, xzr, [x29, #16]
stp xzr, xzr, [x29, #32]
mov x24, #0xfe
L_curve25519_bits:
lsr x3, x24, #6
and x4, x24, #63
ldr x5, [x1, x3, LSL 3]
lsr x5, x5, x4
eor x23, x23, x5
# Conditional Swap
subs xzr, xzr, x23, lsl 63
ldp x25, x26, [x29, #16]
ldp x27, x28, [x29, #32]
csel x19, x25, x10, ne
csel x25, x10, x25, ne
csel x20, x26, x11, ne
csel x26, x11, x26, ne
csel x21, x27, x12, ne
csel x27, x12, x27, ne
csel x22, x28, x13, ne
csel x28, x13, x28, ne
# Conditional Swap
subs xzr, xzr, x23, lsl 63
ldp x10, x11, [x0]
ldp x12, x13, [x0, #16]
csel x14, x10, x6, ne
csel x10, x6, x10, ne
csel x15, x11, x7, ne
csel x11, x7, x11, ne
csel x16, x12, x8, ne
csel x12, x8, x12, ne
csel x17, x13, x9, ne
csel x13, x9, x13, ne
mov x23, x5
# Add
adds x6, x10, x25
adcs x7, x11, x26
adcs x8, x12, x27
adcs x9, x13, x28
cset x5, cs
mov x3, #19
extr x5, x5, x9, #63
mul x3, x5, x3
# Sub modulus (if overflow)
adds x6, x6, x3
adcs x7, x7, xzr
and x9, x9, #0x7fffffffffffffff
adcs x8, x8, xzr
adc x9, x9, xzr
# Sub
subs x25, x10, x25
sbcs x26, x11, x26
sbcs x27, x12, x27
sbcs x28, x13, x28
csetm x5, cc
mov x3, #-19
extr x5, x5, x28, #63
mul x3, x5, x3
# Add modulus (if underflow)
subs x25, x25, x3
sbcs x26, x26, xzr
and x28, x28, #0x7fffffffffffffff
sbcs x27, x27, xzr
sbc x28, x28, xzr
stp x25, x26, [x29, #80]
stp x27, x28, [x29, #96]
# Add
adds x10, x14, x19
adcs x11, x15, x20
adcs x12, x16, x21
adcs x13, x17, x22
cset x5, cs
mov x3, #19
extr x5, x5, x13, #63
mul x3, x5, x3
# Sub modulus (if overflow)
adds x10, x10, x3
adcs x11, x11, xzr
and x13, x13, #0x7fffffffffffffff
adcs x12, x12, xzr
adc x13, x13, xzr
# Sub
subs x14, x14, x19
sbcs x15, x15, x20
sbcs x16, x16, x21
sbcs x17, x17, x22
csetm x5, cc
mov x3, #-19
extr x5, x5, x17, #63
mul x3, x5, x3
# Add modulus (if underflow)
subs x14, x14, x3
sbcs x15, x15, xzr
and x17, x17, #0x7fffffffffffffff
sbcs x16, x16, xzr
sbc x17, x17, xzr
# Multiply
# A[0] * B[0]
umulh x20, x14, x6
mul x19, x14, x6
# A[2] * B[0]
umulh x22, x16, x6
mul x21, x16, x6
# A[1] * B[0]
mul x3, x15, x6
adds x20, x20, x3
umulh x4, x15, x6
adcs x21, x21, x4
# A[1] * B[3]
umulh x26, x15, x9
adc x22, x22, xzr
mul x25, x15, x9
# A[0] * B[1]
mul x3, x14, x7
adds x20, x20, x3
umulh x4, x14, x7
adcs x21, x21, x4
# A[2] * B[1]
mul x3, x16, x7
adcs x22, x22, x3
umulh x4, x16, x7
adcs x25, x25, x4
adc x26, x26, xzr
# A[1] * B[2]
mul x3, x15, x8
adds x22, x22, x3
umulh x4, x15, x8
adcs x25, x25, x4
adcs x26, x26, xzr
adc x27, xzr, xzr
# A[0] * B[2]
mul x3, x14, x8
adds x21, x21, x3
umulh x4, x14, x8
adcs x22, x22, x4
adcs x25, x25, xzr
adcs x26, x26, xzr
adc x27, x27, xzr
# A[1] * B[1]
mul x3, x15, x7
adds x21, x21, x3
umulh x4, x15, x7
adcs x22, x22, x4
# A[3] * B[1]
mul x3, x17, x7
adcs x25, x25, x3
umulh x4, x17, x7
adcs x26, x26, x4
adc x27, x27, xzr
# A[2] * B[2]
mul x3, x16, x8
adds x25, x25, x3
umulh x4, x16, x8
adcs x26, x26, x4
# A[3] * B[3]
mul x3, x17, x9
adcs x27, x27, x3
umulh x28, x17, x9
adc x28, x28, xzr
# A[0] * B[3]
mul x3, x14, x9
adds x22, x22, x3
umulh x4, x14, x9
adcs x25, x25, x4
# A[2] * B[3]
mul x3, x16, x9
adcs x26, x26, x3
umulh x4, x16, x9
adcs x27, x27, x4
adc x28, x28, xzr
# A[3] * B[0]
mul x3, x17, x6
adds x22, x22, x3
umulh x4, x17, x6
adcs x25, x25, x4
# A[3] * B[2]
mul x3, x17, x8
adcs x26, x26, x3
umulh x4, x17, x8
adcs x27, x27, x4
adc x28, x28, xzr
# Reduce
mov x3, #38
mul x4, x3, x28
adds x22, x22, x4
umulh x5, x3, x28
adc x5, x5, xzr
mov x3, #19
extr x5, x5, x22, #63
mul x5, x5, x3
and x22, x22, #0x7fffffffffffffff
mov x3, #38
mul x4, x3, x25
adds x19, x19, x4
umulh x25, x3, x25
mul x4, x3, x26
adcs x20, x20, x4
umulh x26, x3, x26
mul x4, x3, x27
adcs x21, x21, x4
umulh x27, x3, x27
adc x22, x22, xzr
# Add high product results in
adds x19, x19, x5
adcs x20, x20, x25
adcs x21, x21, x26
adc x22, x22, x27
# Store
stp x19, x20, [x29, #48]
stp x21, x22, [x29, #64]
# Multiply
ldp x25, x26, [x29, #80]
ldp x27, x28, [x29, #96]
# A[0] * B[0]
umulh x20, x10, x25
mul x19, x10, x25
# A[2] * B[0]
umulh x22, x12, x25
mul x21, x12, x25
# A[1] * B[0]
mul x3, x11, x25
adds x20, x20, x3
umulh x4, x11, x25
adcs x21, x21, x4
# A[1] * B[3]
umulh x15, x11, x28
adc x22, x22, xzr
mul x14, x11, x28
# A[0] * B[1]
mul x3, x10, x26
adds x20, x20, x3
umulh x4, x10, x26
adcs x21, x21, x4
# A[2] * B[1]
mul x3, x12, x26
adcs x22, x22, x3
umulh x4, x12, x26
adcs x14, x14, x4
adc x15, x15, xzr
# A[1] * B[2]
mul x3, x11, x27
adds x22, x22, x3
umulh x4, x11, x27
adcs x14, x14, x4
adcs x15, x15, xzr
adc x16, xzr, xzr
# A[0] * B[2]
mul x3, x10, x27
adds x21, x21, x3
umulh x4, x10, x27
adcs x22, x22, x4
adcs x14, x14, xzr
adcs x15, x15, xzr
adc x16, x16, xzr
# A[1] * B[1]
mul x3, x11, x26
adds x21, x21, x3
umulh x4, x11, x26
adcs x22, x22, x4
# A[3] * B[1]
mul x3, x13, x26
adcs x14, x14, x3
umulh x4, x13, x26
adcs x15, x15, x4
adc x16, x16, xzr
# A[2] * B[2]
mul x3, x12, x27
adds x14, x14, x3
umulh x4, x12, x27
adcs x15, x15, x4
# A[3] * B[3]
mul x3, x13, x28
adcs x16, x16, x3
umulh x17, x13, x28
adc x17, x17, xzr
# A[0] * B[3]
mul x3, x10, x28
adds x22, x22, x3
umulh x4, x10, x28
adcs x14, x14, x4
# A[2] * B[3]
mul x3, x12, x28
adcs x15, x15, x3
umulh x4, x12, x28
adcs x16, x16, x4
adc x17, x17, xzr
# A[3] * B[0]
mul x3, x13, x25
adds x22, x22, x3
umulh x4, x13, x25
adcs x14, x14, x4
# A[3] * B[2]
mul x3, x13, x27
adcs x15, x15, x3
umulh x4, x13, x27
adcs x16, x16, x4
adc x17, x17, xzr
# Reduce
mov x3, #38
mul x4, x3, x17
adds x22, x22, x4
umulh x5, x3, x17
adc x5, x5, xzr
mov x3, #19
extr x5, x5, x22, #63
mul x5, x5, x3
and x22, x22, #0x7fffffffffffffff
mov x3, #38
mul x4, x3, x14
adds x19, x19, x4
umulh x14, x3, x14
mul x4, x3, x15
adcs x20, x20, x4
umulh x15, x3, x15
mul x4, x3, x16
adcs x21, x21, x4
umulh x16, x3, x16
adc x22, x22, xzr
# Add high product results in
adds x19, x19, x5
adcs x20, x20, x14
adcs x21, x21, x15
adc x22, x22, x16
# Square
# A[0] * A[1]
umulh x12, x25, x26
mul x11, x25, x26
# A[0] * A[3]
umulh x14, x25, x28
mul x13, x25, x28
# A[0] * A[2]
mul x3, x25, x27
adds x12, x12, x3
umulh x4, x25, x27
adcs x13, x13, x4
# A[1] * A[3]
mul x3, x26, x28
adcs x14, x14, x3
umulh x15, x26, x28
adc x15, x15, xzr
# A[1] * A[2]
mul x3, x26, x27
adds x13, x13, x3
umulh x4, x26, x27
adcs x14, x14, x4
# A[2] * A[3]
mul x3, x27, x28
adcs x15, x15, x3
umulh x16, x27, x28
adc x16, x16, xzr
# Double
adds x11, x11, x11
adcs x12, x12, x12
adcs x13, x13, x13
adcs x14, x14, x14
adcs x15, x15, x15
adcs x16, x16, x16
adc x17, xzr, xzr
# A[0] * A[0]
umulh x4, x25, x25
mul x10, x25, x25
# A[1] * A[1]
mul x3, x26, x26
adds x11, x11, x4
umulh x4, x26, x26
adcs x12, x12, x3
# A[2] * A[2]
mul x3, x27, x27
adcs x13, x13, x4
umulh x4, x27, x27
adcs x14, x14, x3
# A[3] * A[3]
mul x3, x28, x28
adcs x15, x15, x4
umulh x4, x28, x28
adcs x16, x16, x3
adc x17, x17, x4
# Reduce
mov x3, #38
mul x4, x3, x17
adds x13, x13, x4
umulh x5, x3, x17
adc x5, x5, xzr
mov x3, #19
extr x5, x5, x13, #63
mul x5, x5, x3
and x13, x13, #0x7fffffffffffffff
mov x3, #38
mul x4, x3, x14
adds x10, x10, x4
umulh x14, x3, x14
mul x4, x3, x15
adcs x11, x11, x4
umulh x15, x3, x15
mul x4, x3, x16
adcs x12, x12, x4
umulh x16, x3, x16
adc x13, x13, xzr
# Add high product results in
adds x10, x10, x5
adcs x11, x11, x14
adcs x12, x12, x15
adc x13, x13, x16
# Square
# A[0] * A[1]
umulh x16, x6, x7
mul x15, x6, x7
# A[0] * A[3]
umulh x25, x6, x9
mul x17, x6, x9
# A[0] * A[2]
mul x3, x6, x8
adds x16, x16, x3
umulh x4, x6, x8
adcs x17, x17, x4
# A[1] * A[3]
mul x3, x7, x9
adcs x25, x25, x3
umulh x26, x7, x9
adc x26, x26, xzr
# A[1] * A[2]
mul x3, x7, x8
adds x17, x17, x3
umulh x4, x7, x8
adcs x25, x25, x4
# A[2] * A[3]
mul x3, x8, x9
adcs x26, x26, x3
umulh x27, x8, x9
adc x27, x27, xzr
# Double
adds x15, x15, x15
adcs x16, x16, x16
adcs x17, x17, x17
adcs x25, x25, x25
adcs x26, x26, x26
adcs x27, x27, x27
adc x28, xzr, xzr
# A[0] * A[0]
umulh x4, x6, x6
mul x14, x6, x6
# A[1] * A[1]
mul x3, x7, x7
adds x15, x15, x4
umulh x4, x7, x7
adcs x16, x16, x3
# A[2] * A[2]
mul x3, x8, x8
adcs x17, x17, x4
umulh x4, x8, x8
adcs x25, x25, x3
# A[3] * A[3]
mul x3, x9, x9
adcs x26, x26, x4
umulh x4, x9, x9
adcs x27, x27, x3
adc x28, x28, x4
# Reduce
mov x3, #38
mul x4, x3, x28
adds x17, x17, x4
umulh x5, x3, x28
adc x5, x5, xzr
mov x3, #19
extr x5, x5, x17, #63
mul x5, x5, x3
and x17, x17, #0x7fffffffffffffff
mov x3, #38
mul x4, x3, x25
adds x14, x14, x4
umulh x25, x3, x25
mul x4, x3, x26
adcs x15, x15, x4
umulh x26, x3, x26
mul x4, x3, x27
adcs x16, x16, x4
umulh x27, x3, x27
adc x17, x17, xzr
# Add high product results in
adds x14, x14, x5
adcs x15, x15, x25
adcs x16, x16, x26
adc x17, x17, x27
# Multiply
# A[0] * B[0]
umulh x7, x14, x10
mul x6, x14, x10
# A[2] * B[0]
umulh x9, x16, x10
mul x8, x16, x10
# A[1] * B[0]
mul x3, x15, x10
adds x7, x7, x3
umulh x4, x15, x10
adcs x8, x8, x4
# A[1] * B[3]
umulh x26, x15, x13
adc x9, x9, xzr
mul x25, x15, x13
# A[0] * B[1]
mul x3, x14, x11
adds x7, x7, x3
umulh x4, x14, x11
adcs x8, x8, x4
# A[2] * B[1]
mul x3, x16, x11
adcs x9, x9, x3
umulh x4, x16, x11
adcs x25, x25, x4
adc x26, x26, xzr
# A[1] * B[2]
mul x3, x15, x12
adds x9, x9, x3
umulh x4, x15, x12
adcs x25, x25, x4
adcs x26, x26, xzr
adc x27, xzr, xzr
# A[0] * B[2]
mul x3, x14, x12
adds x8, x8, x3
umulh x4, x14, x12
adcs x9, x9, x4
adcs x25, x25, xzr
adcs x26, x26, xzr
adc x27, x27, xzr
# A[1] * B[1]
mul x3, x15, x11
adds x8, x8, x3
umulh x4, x15, x11
adcs x9, x9, x4
# A[3] * B[1]
mul x3, x17, x11
adcs x25, x25, x3
umulh x4, x17, x11
adcs x26, x26, x4
adc x27, x27, xzr
# A[2] * B[2]
mul x3, x16, x12
adds x25, x25, x3
umulh x4, x16, x12
adcs x26, x26, x4
# A[3] * B[3]
mul x3, x17, x13
adcs x27, x27, x3
umulh x28, x17, x13
adc x28, x28, xzr
# A[0] * B[3]
mul x3, x14, x13
adds x9, x9, x3
umulh x4, x14, x13
adcs x25, x25, x4
# A[2] * B[3]
mul x3, x16, x13
adcs x26, x26, x3
umulh x4, x16, x13
adcs x27, x27, x4
adc x28, x28, xzr
# A[3] * B[0]
mul x3, x17, x10
adds x9, x9, x3
umulh x4, x17, x10
adcs x25, x25, x4
# A[3] * B[2]
mul x3, x17, x12
adcs x26, x26, x3
umulh x4, x17, x12
adcs x27, x27, x4
adc x28, x28, xzr
# Reduce
mov x3, #38
mul x4, x3, x28
adds x9, x9, x4
umulh x5, x3, x28
adc x5, x5, xzr
mov x3, #19
extr x5, x5, x9, #63
mul x5, x5, x3
and x9, x9, #0x7fffffffffffffff
mov x3, #38
mul x4, x3, x25
adds x6, x6, x4
umulh x25, x3, x25
mul x4, x3, x26
adcs x7, x7, x4
umulh x26, x3, x26
mul x4, x3, x27
adcs x8, x8, x4
umulh x27, x3, x27
adc x9, x9, xzr
# Add high product results in
adds x6, x6, x5
adcs x7, x7, x25
adcs x8, x8, x26
adc x9, x9, x27
# Store
stp x6, x7, [x0]
stp x8, x9, [x0, #16]
# Sub
subs x14, x14, x10
sbcs x15, x15, x11
sbcs x16, x16, x12
sbcs x17, x17, x13
csetm x5, cc
mov x3, #-19
# Mask the modulus
extr x5, x5, x17, #63
mul x3, x5, x3
# Add modulus (if underflow)
subs x14, x14, x3
sbcs x15, x15, xzr
and x17, x17, #0x7fffffffffffffff
sbcs x16, x16, xzr
sbc x17, x17, xzr
# Multiply by 121666
mov x5, #0xdb42
movk x5, #1, lsl 16
mul x6, x14, x5
umulh x7, x14, x5
mul x3, x15, x5
umulh x8, x15, x5
adds x7, x7, x3
adc x8, x8, xzr
mul x3, x16, x5
umulh x9, x16, x5
adds x8, x8, x3
adc x9, x9, xzr
mul x3, x17, x5
umulh x4, x17, x5
adds x9, x9, x3
adc x4, x4, xzr
mov x5, #19
extr x4, x4, x9, #63
mul x4, x4, x5
adds x6, x6, x4
adcs x7, x7, xzr
and x9, x9, #0x7fffffffffffffff
adcs x8, x8, xzr
adc x9, x9, xzr
# Add
adds x10, x10, x6
adcs x11, x11, x7
adcs x12, x12, x8
adcs x13, x13, x9
cset x5, cs
mov x3, #19
# Mask the modulus
extr x5, x5, x13, #63
mul x3, x5, x3
# Sub modulus (if overflow)
adds x10, x10, x3
adcs x11, x11, xzr
and x13, x13, #0x7fffffffffffffff
adcs x12, x12, xzr
adc x13, x13, xzr
# Multiply
# A[0] * B[0]
umulh x7, x14, x10
mul x6, x14, x10
# A[2] * B[0]
umulh x9, x16, x10
mul x8, x16, x10
# A[1] * B[0]
mul x3, x15, x10
adds x7, x7, x3
umulh x4, x15, x10
adcs x8, x8, x4
# A[1] * B[3]
umulh x26, x15, x13
adc x9, x9, xzr
mul x25, x15, x13
# A[0] * B[1]
mul x3, x14, x11
adds x7, x7, x3
umulh x4, x14, x11
adcs x8, x8, x4
# A[2] * B[1]
mul x3, x16, x11
adcs x9, x9, x3
umulh x4, x16, x11
adcs x25, x25, x4
adc x26, x26, xzr
# A[1] * B[2]
mul x3, x15, x12
adds x9, x9, x3
umulh x4, x15, x12
adcs x25, x25, x4
adcs x26, x26, xzr
adc x27, xzr, xzr
# A[0] * B[2]
mul x3, x14, x12
adds x8, x8, x3
umulh x4, x14, x12
adcs x9, x9, x4
adcs x25, x25, xzr
adcs x26, x26, xzr
adc x27, x27, xzr
# A[1] * B[1]
mul x3, x15, x11
adds x8, x8, x3
umulh x4, x15, x11
adcs x9, x9, x4
# A[3] * B[1]
mul x3, x17, x11
adcs x25, x25, x3
umulh x4, x17, x11
adcs x26, x26, x4
adc x27, x27, xzr
# A[2] * B[2]
mul x3, x16, x12
adds x25, x25, x3
umulh x4, x16, x12
adcs x26, x26, x4
# A[3] * B[3]
mul x3, x17, x13
adcs x27, x27, x3
umulh x28, x17, x13
adc x28, x28, xzr
# A[0] * B[3]
mul x3, x14, x13
adds x9, x9, x3
umulh x4, x14, x13
adcs x25, x25, x4
# A[2] * B[3]
mul x3, x16, x13
adcs x26, x26, x3
umulh x4, x16, x13
adcs x27, x27, x4
adc x28, x28, xzr
# A[3] * B[0]
mul x3, x17, x10
adds x9, x9, x3
umulh x4, x17, x10
adcs x25, x25, x4
# A[3] * B[2]
mul x3, x17, x12
adcs x26, x26, x3
umulh x4, x17, x12
adcs x27, x27, x4
adc x28, x28, xzr
# Reduce
mov x3, #38
mul x4, x3, x28
adds x9, x9, x4
umulh x5, x3, x28
adc x5, x5, xzr
mov x3, #19
extr x5, x5, x9, #63
mul x5, x5, x3
and x9, x9, #0x7fffffffffffffff
mov x3, #38
mul x4, x3, x25
adds x6, x6, x4
umulh x25, x3, x25
mul x4, x3, x26
adcs x7, x7, x4
umulh x26, x3, x26
mul x4, x3, x27
adcs x8, x8, x4
umulh x27, x3, x27
adc x9, x9, xzr
# Add high product results in
adds x6, x6, x5
adcs x7, x7, x25
adcs x8, x8, x26
adc x9, x9, x27
# Store
stp x6, x7, [x29, #16]
stp x8, x9, [x29, #32]
# Add
ldp x25, x26, [x29, #48]
ldp x27, x28, [x29, #64]
adds x10, x25, x19
adcs x11, x26, x20
adcs x12, x27, x21
adcs x13, x28, x22
cset x5, cs
mov x3, #19
extr x5, x5, x13, #63
mul x3, x5, x3
# Sub modulus (if overflow)
adds x10, x10, x3
adcs x11, x11, xzr
and x13, x13, #0x7fffffffffffffff
adcs x12, x12, xzr
adc x13, x13, xzr
# Sub
subs x19, x25, x19
sbcs x20, x26, x20
sbcs x21, x27, x21
sbcs x22, x28, x22
csetm x5, cc
mov x3, #-19
extr x5, x5, x22, #63
mul x3, x5, x3
# Add modulus (if underflow)
subs x19, x19, x3
sbcs x20, x20, xzr
and x22, x22, #0x7fffffffffffffff
sbcs x21, x21, xzr
sbc x22, x22, xzr
# Square
# A[0] * A[1]
umulh x8, x10, x11
mul x7, x10, x11
# A[0] * A[3]
umulh x25, x10, x13
mul x9, x10, x13
# A[0] * A[2]
mul x3, x10, x12
adds x8, x8, x3
umulh x4, x10, x12
adcs x9, x9, x4
# A[1] * A[3]
mul x3, x11, x13
adcs x25, x25, x3
umulh x26, x11, x13
adc x26, x26, xzr
# A[1] * A[2]
mul x3, x11, x12
adds x9, x9, x3
umulh x4, x11, x12
adcs x25, x25, x4
# A[2] * A[3]
mul x3, x12, x13
adcs x26, x26, x3
umulh x27, x12, x13
adc x27, x27, xzr
# Double
adds x7, x7, x7
adcs x8, x8, x8
adcs x9, x9, x9
adcs x25, x25, x25
adcs x26, x26, x26
adcs x27, x27, x27
adc x28, xzr, xzr
# A[0] * A[0]
umulh x4, x10, x10
mul x6, x10, x10
# A[1] * A[1]
mul x3, x11, x11
adds x7, x7, x4
umulh x4, x11, x11
adcs x8, x8, x3
# A[2] * A[2]
mul x3, x12, x12
adcs x9, x9, x4
umulh x4, x12, x12
adcs x25, x25, x3
# A[3] * A[3]
mul x3, x13, x13
adcs x26, x26, x4
umulh x4, x13, x13
adcs x27, x27, x3
adc x28, x28, x4
# Reduce
mov x3, #38
mul x4, x3, x28
adds x9, x9, x4
umulh x5, x3, x28
adc x5, x5, xzr
mov x3, #19
extr x5, x5, x9, #63
mul x5, x5, x3
and x9, x9, #0x7fffffffffffffff
mov x3, #38
mul x4, x3, x25
adds x6, x6, x4
umulh x25, x3, x25
mul x4, x3, x26
adcs x7, x7, x4
umulh x26, x3, x26
mul x4, x3, x27
adcs x8, x8, x4
umulh x27, x3, x27
adc x9, x9, xzr
# Add high product results in
adds x6, x6, x5
adcs x7, x7, x25
adcs x8, x8, x26
adc x9, x9, x27
# Square
# A[0] * A[1]
umulh x16, x19, x20
mul x15, x19, x20
# A[0] * A[3]
umulh x25, x19, x22
mul x17, x19, x22
# A[0] * A[2]
mul x3, x19, x21
adds x16, x16, x3
umulh x4, x19, x21
adcs x17, x17, x4
# A[1] * A[3]
mul x3, x20, x22
adcs x25, x25, x3
umulh x26, x20, x22
adc x26, x26, xzr
# A[1] * A[2]
mul x3, x20, x21
adds x17, x17, x3
umulh x4, x20, x21
adcs x25, x25, x4
# A[2] * A[3]
mul x3, x21, x22
adcs x26, x26, x3
umulh x27, x21, x22
adc x27, x27, xzr
# Double
adds x15, x15, x15
adcs x16, x16, x16
adcs x17, x17, x17
adcs x25, x25, x25
adcs x26, x26, x26
adcs x27, x27, x27
adc x28, xzr, xzr
# A[0] * A[0]
umulh x4, x19, x19
mul x14, x19, x19
# A[1] * A[1]
mul x3, x20, x20
adds x15, x15, x4
umulh x4, x20, x20
adcs x16, x16, x3
# A[2] * A[2]
mul x3, x21, x21
adcs x17, x17, x4
umulh x4, x21, x21
adcs x25, x25, x3
# A[3] * A[3]
mul x3, x22, x22
adcs x26, x26, x4
umulh x4, x22, x22
adcs x27, x27, x3
adc x28, x28, x4
# Reduce
mov x3, #38
mul x4, x3, x28
adds x17, x17, x4
umulh x5, x3, x28
adc x5, x5, xzr
mov x3, #19
extr x5, x5, x17, #63
mul x5, x5, x3
and x17, x17, #0x7fffffffffffffff
mov x3, #38
mul x4, x3, x25
adds x14, x14, x4
umulh x25, x3, x25
mul x4, x3, x26
adcs x15, x15, x4
umulh x26, x3, x26
mul x4, x3, x27
adcs x16, x16, x4
umulh x27, x3, x27
adc x17, x17, xzr
# Add high product results in
adds x14, x14, x5
adcs x15, x15, x25
adcs x16, x16, x26
adc x17, x17, x27
# Multiply
ldp x19, x20, [x2]
ldp x21, x22, [x2, #16]
# A[0] * B[0]
umulh x11, x19, x14
mul x10, x19, x14
# A[2] * B[0]
umulh x13, x21, x14
mul x12, x21, x14
# A[1] * B[0]
mul x3, x20, x14
adds x11, x11, x3
umulh x4, x20, x14
adcs x12, x12, x4
# A[1] * B[3]
umulh x26, x20, x17
adc x13, x13, xzr
mul x25, x20, x17
# A[0] * B[1]
mul x3, x19, x15
adds x11, x11, x3
umulh x4, x19, x15
adcs x12, x12, x4
# A[2] * B[1]
mul x3, x21, x15
adcs x13, x13, x3
umulh x4, x21, x15
adcs x25, x25, x4
adc x26, x26, xzr
# A[1] * B[2]
mul x3, x20, x16
adds x13, x13, x3
umulh x4, x20, x16
adcs x25, x25, x4
adcs x26, x26, xzr
adc x27, xzr, xzr
# A[0] * B[2]
mul x3, x19, x16
adds x12, x12, x3
umulh x4, x19, x16
adcs x13, x13, x4
adcs x25, x25, xzr
adcs x26, x26, xzr
adc x27, x27, xzr
# A[1] * B[1]
mul x3, x20, x15
adds x12, x12, x3
umulh x4, x20, x15
adcs x13, x13, x4
# A[3] * B[1]
mul x3, x22, x15
adcs x25, x25, x3
umulh x4, x22, x15
adcs x26, x26, x4
adc x27, x27, xzr
# A[2] * B[2]
mul x3, x21, x16
adds x25, x25, x3
umulh x4, x21, x16
adcs x26, x26, x4
# A[3] * B[3]
mul x3, x22, x17
adcs x27, x27, x3
umulh x28, x22, x17
adc x28, x28, xzr
# A[0] * B[3]
mul x3, x19, x17
adds x13, x13, x3
umulh x4, x19, x17
adcs x25, x25, x4
# A[2] * B[3]
mul x3, x21, x17
adcs x26, x26, x3
umulh x4, x21, x17
adcs x27, x27, x4
adc x28, x28, xzr
# A[3] * B[0]
mul x3, x22, x14
adds x13, x13, x3
umulh x4, x22, x14
adcs x25, x25, x4
# A[3] * B[2]
mul x3, x22, x16
adcs x26, x26, x3
umulh x4, x22, x16
adcs x27, x27, x4
adc x28, x28, xzr
# Reduce
mov x3, #38
mul x4, x3, x28
adds x13, x13, x4
umulh x5, x3, x28
adc x5, x5, xzr
mov x3, #19
extr x5, x5, x13, #63
mul x5, x5, x3
and x13, x13, #0x7fffffffffffffff
mov x3, #38
mul x4, x3, x25
adds x10, x10, x4
umulh x25, x3, x25
mul x4, x3, x26
adcs x11, x11, x4
umulh x26, x3, x26
mul x4, x3, x27
adcs x12, x12, x4
umulh x27, x3, x27
adc x13, x13, xzr
# Add high product results in
adds x10, x10, x5
adcs x11, x11, x25
adcs x12, x12, x26
adc x13, x13, x27
subs x24, x24, #1
bge L_curve25519_bits
# Invert
add x0, x29, #48
add x1, x29, #16
#ifndef __APPLE__
bl fe_sq
#else
bl _fe_sq
#endif /* __APPLE__ */
add x0, x29, #0x50
add x1, x29, #48
#ifndef __APPLE__
bl fe_sq
#else
bl _fe_sq
#endif /* __APPLE__ */
#ifndef NDEBUG
add x0, x29, #0x50
#endif /* !NDEBUG */
add x1, x29, #0x50
#ifndef __APPLE__
bl fe_sq
#else
bl _fe_sq
#endif /* __APPLE__ */
#ifndef NDEBUG
add x0, x29, #0x50
#endif /* !NDEBUG */
add x1, x29, #16
add x2, x29, #0x50
#ifndef __APPLE__
bl fe_mul
#else
bl _fe_mul
#endif /* __APPLE__ */
add x0, x29, #48
add x1, x29, #48
add x2, x29, #0x50
#ifndef __APPLE__
bl fe_mul
#else
bl _fe_mul
#endif /* __APPLE__ */
add x0, x29, #0x70
#ifndef NDEBUG
add x1, x29, #48
#endif /* !NDEBUG */
#ifndef __APPLE__
bl fe_sq
#else
bl _fe_sq
#endif /* __APPLE__ */
add x0, x29, #0x50
add x1, x29, #0x50
add x2, x29, #0x70
#ifndef __APPLE__
bl fe_mul
#else
bl _fe_mul
#endif /* __APPLE__ */
# Loop: 5 times
mov x24, #5
ldp x6, x7, [x29, #80]
ldp x8, x9, [x29, #96]
L_curve25519_inv_1:
# Square
# A[0] * A[1]
umulh x12, x6, x7
mul x11, x6, x7
# A[0] * A[3]
umulh x14, x6, x9
mul x13, x6, x9
# A[0] * A[2]
mul x3, x6, x8
adds x12, x12, x3
umulh x4, x6, x8
adcs x13, x13, x4
# A[1] * A[3]
mul x3, x7, x9
adcs x14, x14, x3
umulh x15, x7, x9
adc x15, x15, xzr
# A[1] * A[2]
mul x3, x7, x8
adds x13, x13, x3
umulh x4, x7, x8
adcs x14, x14, x4
# A[2] * A[3]
mul x3, x8, x9
adcs x15, x15, x3
umulh x16, x8, x9
adc x16, x16, xzr
# Double
adds x11, x11, x11
adcs x12, x12, x12
adcs x13, x13, x13
adcs x14, x14, x14
adcs x15, x15, x15
adcs x16, x16, x16
adc x17, xzr, xzr
# A[0] * A[0]
umulh x4, x6, x6
mul x10, x6, x6
# A[1] * A[1]
mul x3, x7, x7
adds x11, x11, x4
umulh x4, x7, x7
adcs x12, x12, x3
# A[2] * A[2]
mul x3, x8, x8
adcs x13, x13, x4
umulh x4, x8, x8
adcs x14, x14, x3
# A[3] * A[3]
mul x3, x9, x9
adcs x15, x15, x4
umulh x4, x9, x9
adcs x16, x16, x3
adc x17, x17, x4
# Reduce
mov x3, #38
mul x4, x3, x17
adds x13, x13, x4
umulh x5, x3, x17
adc x5, x5, xzr
mov x3, #19
extr x5, x5, x13, #63
mul x5, x5, x3
and x13, x13, #0x7fffffffffffffff
mov x3, #38
mul x4, x3, x14
adds x10, x10, x4
umulh x14, x3, x14
mul x4, x3, x15
adcs x11, x11, x4
umulh x15, x3, x15
mul x4, x3, x16
adcs x12, x12, x4
umulh x16, x3, x16
adc x13, x13, xzr
# Add high product results in
adds x6, x10, x5
adcs x7, x11, x14
adcs x8, x12, x15
adc x9, x13, x16
subs x24, x24, #1
bne L_curve25519_inv_1
# Store
stp x6, x7, [x29, #112]
stp x8, x9, [x29, #128]
#ifndef NDEBUG
add x0, x29, #0x50
#endif /* !NDEBUG */
add x1, x29, #0x70
add x2, x29, #0x50
#ifndef __APPLE__
bl fe_mul
#else
bl _fe_mul
#endif /* __APPLE__ */
# Loop: 10 times
mov x24, #10
ldp x6, x7, [x29, #80]
ldp x8, x9, [x29, #96]
L_curve25519_inv_2:
# Square
# A[0] * A[1]
umulh x12, x6, x7
mul x11, x6, x7
# A[0] * A[3]
umulh x14, x6, x9
mul x13, x6, x9
# A[0] * A[2]
mul x3, x6, x8
adds x12, x12, x3
umulh x4, x6, x8
adcs x13, x13, x4
# A[1] * A[3]
mul x3, x7, x9
adcs x14, x14, x3
umulh x15, x7, x9
adc x15, x15, xzr
# A[1] * A[2]
mul x3, x7, x8
adds x13, x13, x3
umulh x4, x7, x8
adcs x14, x14, x4
# A[2] * A[3]
mul x3, x8, x9
adcs x15, x15, x3
umulh x16, x8, x9
adc x16, x16, xzr
# Double
adds x11, x11, x11
adcs x12, x12, x12
adcs x13, x13, x13
adcs x14, x14, x14
adcs x15, x15, x15
adcs x16, x16, x16
adc x17, xzr, xzr
# A[0] * A[0]
umulh x4, x6, x6
mul x10, x6, x6
# A[1] * A[1]
mul x3, x7, x7
adds x11, x11, x4
umulh x4, x7, x7
adcs x12, x12, x3
# A[2] * A[2]
mul x3, x8, x8
adcs x13, x13, x4
umulh x4, x8, x8
adcs x14, x14, x3
# A[3] * A[3]
mul x3, x9, x9
adcs x15, x15, x4
umulh x4, x9, x9
adcs x16, x16, x3
adc x17, x17, x4
# Reduce
mov x3, #38
mul x4, x3, x17
adds x13, x13, x4
umulh x5, x3, x17
adc x5, x5, xzr
mov x3, #19
extr x5, x5, x13, #63
mul x5, x5, x3
and x13, x13, #0x7fffffffffffffff
mov x3, #38
mul x4, x3, x14
adds x10, x10, x4
umulh x14, x3, x14
mul x4, x3, x15
adcs x11, x11, x4
umulh x15, x3, x15
mul x4, x3, x16
adcs x12, x12, x4
umulh x16, x3, x16
adc x13, x13, xzr
# Add high product results in
adds x6, x10, x5
adcs x7, x11, x14
adcs x8, x12, x15
adc x9, x13, x16
subs x24, x24, #1
bne L_curve25519_inv_2
# Store
stp x6, x7, [x29, #112]
stp x8, x9, [x29, #128]
add x0, x29, #0x70
#ifndef NDEBUG
add x1, x29, #0x70
#endif /* !NDEBUG */
add x2, x29, #0x50
#ifndef __APPLE__
bl fe_mul
#else
bl _fe_mul
#endif /* __APPLE__ */
# Loop: 20 times
mov x24, #20
ldp x6, x7, [x29, #112]
ldp x8, x9, [x29, #128]
L_curve25519_inv_3:
# Square
# A[0] * A[1]
umulh x12, x6, x7
mul x11, x6, x7
# A[0] * A[3]
umulh x14, x6, x9
mul x13, x6, x9
# A[0] * A[2]
mul x3, x6, x8
adds x12, x12, x3
umulh x4, x6, x8
adcs x13, x13, x4
# A[1] * A[3]
mul x3, x7, x9
adcs x14, x14, x3
umulh x15, x7, x9
adc x15, x15, xzr
# A[1] * A[2]
mul x3, x7, x8
adds x13, x13, x3
umulh x4, x7, x8
adcs x14, x14, x4
# A[2] * A[3]
mul x3, x8, x9
adcs x15, x15, x3
umulh x16, x8, x9
adc x16, x16, xzr
# Double
adds x11, x11, x11
adcs x12, x12, x12
adcs x13, x13, x13
adcs x14, x14, x14
adcs x15, x15, x15
adcs x16, x16, x16
adc x17, xzr, xzr
# A[0] * A[0]
umulh x4, x6, x6
mul x10, x6, x6
# A[1] * A[1]
mul x3, x7, x7
adds x11, x11, x4
umulh x4, x7, x7
adcs x12, x12, x3
# A[2] * A[2]
mul x3, x8, x8
adcs x13, x13, x4
umulh x4, x8, x8
adcs x14, x14, x3
# A[3] * A[3]
mul x3, x9, x9
adcs x15, x15, x4
umulh x4, x9, x9
adcs x16, x16, x3
adc x17, x17, x4
# Reduce
mov x3, #38
mul x4, x3, x17
adds x13, x13, x4
umulh x5, x3, x17
adc x5, x5, xzr
mov x3, #19
extr x5, x5, x13, #63
mul x5, x5, x3
and x13, x13, #0x7fffffffffffffff
mov x3, #38
mul x4, x3, x14
adds x10, x10, x4
umulh x14, x3, x14
mul x4, x3, x15
adcs x11, x11, x4
umulh x15, x3, x15
mul x4, x3, x16
adcs x12, x12, x4
umulh x16, x3, x16
adc x13, x13, xzr
# Add high product results in
adds x6, x10, x5
adcs x7, x11, x14
adcs x8, x12, x15
adc x9, x13, x16
subs x24, x24, #1
bne L_curve25519_inv_3
# Store
stp x6, x7, [x29, #144]
stp x8, x9, [x29, #160]
#ifndef NDEBUG
add x0, x29, #0x70
#endif /* !NDEBUG */
add x1, x29, #0x90
add x2, x29, #0x70
#ifndef __APPLE__
bl fe_mul
#else
bl _fe_mul
#endif /* __APPLE__ */
# Loop: 10 times
mov x24, #10
ldp x6, x7, [x29, #112]
ldp x8, x9, [x29, #128]
L_curve25519_inv_4:
# Square
# A[0] * A[1]
umulh x12, x6, x7
mul x11, x6, x7
# A[0] * A[3]
umulh x14, x6, x9
mul x13, x6, x9
# A[0] * A[2]
mul x3, x6, x8
adds x12, x12, x3
umulh x4, x6, x8
adcs x13, x13, x4
# A[1] * A[3]
mul x3, x7, x9
adcs x14, x14, x3
umulh x15, x7, x9
adc x15, x15, xzr
# A[1] * A[2]
mul x3, x7, x8
adds x13, x13, x3
umulh x4, x7, x8
adcs x14, x14, x4
# A[2] * A[3]
mul x3, x8, x9
adcs x15, x15, x3
umulh x16, x8, x9
adc x16, x16, xzr
# Double
adds x11, x11, x11
adcs x12, x12, x12
adcs x13, x13, x13
adcs x14, x14, x14
adcs x15, x15, x15
adcs x16, x16, x16
adc x17, xzr, xzr
# A[0] * A[0]
umulh x4, x6, x6
mul x10, x6, x6
# A[1] * A[1]
mul x3, x7, x7
adds x11, x11, x4
umulh x4, x7, x7
adcs x12, x12, x3
# A[2] * A[2]
mul x3, x8, x8
adcs x13, x13, x4
umulh x4, x8, x8
adcs x14, x14, x3
# A[3] * A[3]
mul x3, x9, x9
adcs x15, x15, x4
umulh x4, x9, x9
adcs x16, x16, x3
adc x17, x17, x4
# Reduce
mov x3, #38
mul x4, x3, x17
adds x13, x13, x4
umulh x5, x3, x17
adc x5, x5, xzr
mov x3, #19
extr x5, x5, x13, #63
mul x5, x5, x3
and x13, x13, #0x7fffffffffffffff
mov x3, #38
mul x4, x3, x14
adds x10, x10, x4
umulh x14, x3, x14
mul x4, x3, x15
adcs x11, x11, x4
umulh x15, x3, x15
mul x4, x3, x16
adcs x12, x12, x4
umulh x16, x3, x16
adc x13, x13, xzr
# Add high product results in
adds x6, x10, x5
adcs x7, x11, x14
adcs x8, x12, x15
adc x9, x13, x16
subs x24, x24, #1
bne L_curve25519_inv_4
# Store
stp x6, x7, [x29, #112]
stp x8, x9, [x29, #128]
add x0, x29, #0x50
add x1, x29, #0x70
add x2, x29, #0x50
#ifndef __APPLE__
bl fe_mul
#else
bl _fe_mul
#endif /* __APPLE__ */
# Loop: 50 times
mov x24, #50
ldp x6, x7, [x29, #80]
ldp x8, x9, [x29, #96]
L_curve25519_inv_5:
# Square
# A[0] * A[1]
umulh x12, x6, x7
mul x11, x6, x7
# A[0] * A[3]
umulh x14, x6, x9
mul x13, x6, x9
# A[0] * A[2]
mul x3, x6, x8
adds x12, x12, x3
umulh x4, x6, x8
adcs x13, x13, x4
# A[1] * A[3]
mul x3, x7, x9
adcs x14, x14, x3
umulh x15, x7, x9
adc x15, x15, xzr
# A[1] * A[2]
mul x3, x7, x8
adds x13, x13, x3
umulh x4, x7, x8
adcs x14, x14, x4
# A[2] * A[3]
mul x3, x8, x9
adcs x15, x15, x3
umulh x16, x8, x9
adc x16, x16, xzr
# Double
adds x11, x11, x11
adcs x12, x12, x12
adcs x13, x13, x13
adcs x14, x14, x14
adcs x15, x15, x15
adcs x16, x16, x16
adc x17, xzr, xzr
# A[0] * A[0]
umulh x4, x6, x6
mul x10, x6, x6
# A[1] * A[1]
mul x3, x7, x7
adds x11, x11, x4
umulh x4, x7, x7
adcs x12, x12, x3
# A[2] * A[2]
mul x3, x8, x8
adcs x13, x13, x4
umulh x4, x8, x8
adcs x14, x14, x3
# A[3] * A[3]
mul x3, x9, x9
adcs x15, x15, x4
umulh x4, x9, x9
adcs x16, x16, x3
adc x17, x17, x4
# Reduce
mov x3, #38
mul x4, x3, x17
adds x13, x13, x4
umulh x5, x3, x17
adc x5, x5, xzr
mov x3, #19
extr x5, x5, x13, #63
mul x5, x5, x3
and x13, x13, #0x7fffffffffffffff
mov x3, #38
mul x4, x3, x14
adds x10, x10, x4
umulh x14, x3, x14
mul x4, x3, x15
adcs x11, x11, x4
umulh x15, x3, x15
mul x4, x3, x16
adcs x12, x12, x4
umulh x16, x3, x16
adc x13, x13, xzr
# Add high product results in
adds x6, x10, x5
adcs x7, x11, x14
adcs x8, x12, x15
adc x9, x13, x16
subs x24, x24, #1
bne L_curve25519_inv_5
# Store
stp x6, x7, [x29, #112]
stp x8, x9, [x29, #128]
add x0, x29, #0x70
#ifndef NDEBUG
add x1, x29, #0x70
#endif /* !NDEBUG */
add x2, x29, #0x50
#ifndef __APPLE__
bl fe_mul
#else
bl _fe_mul
#endif /* __APPLE__ */
# Loop: 100 times
mov x24, #0x64
ldp x6, x7, [x29, #112]
ldp x8, x9, [x29, #128]
L_curve25519_inv_6:
# Square
# A[0] * A[1]
umulh x12, x6, x7
mul x11, x6, x7
# A[0] * A[3]
umulh x14, x6, x9
mul x13, x6, x9
# A[0] * A[2]
mul x3, x6, x8
adds x12, x12, x3
umulh x4, x6, x8
adcs x13, x13, x4
# A[1] * A[3]
mul x3, x7, x9
adcs x14, x14, x3
umulh x15, x7, x9
adc x15, x15, xzr
# A[1] * A[2]
mul x3, x7, x8
adds x13, x13, x3
umulh x4, x7, x8
adcs x14, x14, x4
# A[2] * A[3]
mul x3, x8, x9
adcs x15, x15, x3
umulh x16, x8, x9
adc x16, x16, xzr
# Double
adds x11, x11, x11
adcs x12, x12, x12
adcs x13, x13, x13
adcs x14, x14, x14
adcs x15, x15, x15
adcs x16, x16, x16
adc x17, xzr, xzr
# A[0] * A[0]
umulh x4, x6, x6
mul x10, x6, x6
# A[1] * A[1]
mul x3, x7, x7
adds x11, x11, x4
umulh x4, x7, x7
adcs x12, x12, x3
# A[2] * A[2]
mul x3, x8, x8
adcs x13, x13, x4
umulh x4, x8, x8
adcs x14, x14, x3
# A[3] * A[3]
mul x3, x9, x9
adcs x15, x15, x4
umulh x4, x9, x9
adcs x16, x16, x3
adc x17, x17, x4
# Reduce
mov x3, #38
mul x4, x3, x17
adds x13, x13, x4
umulh x5, x3, x17
adc x5, x5, xzr
mov x3, #19
extr x5, x5, x13, #63
mul x5, x5, x3
and x13, x13, #0x7fffffffffffffff
mov x3, #38
mul x4, x3, x14
adds x10, x10, x4
umulh x14, x3, x14
mul x4, x3, x15
adcs x11, x11, x4
umulh x15, x3, x15
mul x4, x3, x16
adcs x12, x12, x4
umulh x16, x3, x16
adc x13, x13, xzr
# Add high product results in
adds x6, x10, x5
adcs x7, x11, x14
adcs x8, x12, x15
adc x9, x13, x16
subs x24, x24, #1
bne L_curve25519_inv_6
# Store
stp x6, x7, [x29, #144]
stp x8, x9, [x29, #160]
#ifndef NDEBUG
add x0, x29, #0x70
#endif /* !NDEBUG */
add x1, x29, #0x90
add x2, x29, #0x70
#ifndef __APPLE__
bl fe_mul
#else
bl _fe_mul
#endif /* __APPLE__ */
# Loop: 50 times
mov x24, #50
ldp x6, x7, [x29, #112]
ldp x8, x9, [x29, #128]
L_curve25519_inv_7:
# Square
# A[0] * A[1]
umulh x12, x6, x7
mul x11, x6, x7
# A[0] * A[3]
umulh x14, x6, x9
mul x13, x6, x9
# A[0] * A[2]
mul x3, x6, x8
adds x12, x12, x3
umulh x4, x6, x8
adcs x13, x13, x4
# A[1] * A[3]
mul x3, x7, x9
adcs x14, x14, x3
umulh x15, x7, x9
adc x15, x15, xzr
# A[1] * A[2]
mul x3, x7, x8
adds x13, x13, x3
umulh x4, x7, x8
adcs x14, x14, x4
# A[2] * A[3]
mul x3, x8, x9
adcs x15, x15, x3
umulh x16, x8, x9
adc x16, x16, xzr
# Double
adds x11, x11, x11
adcs x12, x12, x12
adcs x13, x13, x13
adcs x14, x14, x14
adcs x15, x15, x15
adcs x16, x16, x16
adc x17, xzr, xzr
# A[0] * A[0]
umulh x4, x6, x6
mul x10, x6, x6
# A[1] * A[1]
mul x3, x7, x7
adds x11, x11, x4
umulh x4, x7, x7
adcs x12, x12, x3
# A[2] * A[2]
mul x3, x8, x8
adcs x13, x13, x4
umulh x4, x8, x8
adcs x14, x14, x3
# A[3] * A[3]
mul x3, x9, x9
adcs x15, x15, x4
umulh x4, x9, x9
adcs x16, x16, x3
adc x17, x17, x4
# Reduce
mov x3, #38
mul x4, x3, x17
adds x13, x13, x4
umulh x5, x3, x17
adc x5, x5, xzr
mov x3, #19
extr x5, x5, x13, #63
mul x5, x5, x3
and x13, x13, #0x7fffffffffffffff
mov x3, #38
mul x4, x3, x14
adds x10, x10, x4
umulh x14, x3, x14
mul x4, x3, x15
adcs x11, x11, x4
umulh x15, x3, x15
mul x4, x3, x16
adcs x12, x12, x4
umulh x16, x3, x16
adc x13, x13, xzr
# Add high product results in
adds x6, x10, x5
adcs x7, x11, x14
adcs x8, x12, x15
adc x9, x13, x16
subs x24, x24, #1
bne L_curve25519_inv_7
# Store
stp x6, x7, [x29, #112]
stp x8, x9, [x29, #128]
add x0, x29, #0x50
add x1, x29, #0x70
add x2, x29, #0x50
#ifndef __APPLE__
bl fe_mul
#else
bl _fe_mul
#endif /* __APPLE__ */
# Loop: 5 times
mov x24, #5
ldp x6, x7, [x29, #80]
ldp x8, x9, [x29, #96]
L_curve25519_inv_8:
# Square
# A[0] * A[1]
umulh x12, x6, x7
mul x11, x6, x7
# A[0] * A[3]
umulh x14, x6, x9
mul x13, x6, x9
# A[0] * A[2]
mul x3, x6, x8
adds x12, x12, x3
umulh x4, x6, x8
adcs x13, x13, x4
# A[1] * A[3]
mul x3, x7, x9
adcs x14, x14, x3
umulh x15, x7, x9
adc x15, x15, xzr
# A[1] * A[2]
mul x3, x7, x8
adds x13, x13, x3
umulh x4, x7, x8
adcs x14, x14, x4
# A[2] * A[3]
mul x3, x8, x9
adcs x15, x15, x3
umulh x16, x8, x9
adc x16, x16, xzr
# Double
adds x11, x11, x11
adcs x12, x12, x12
adcs x13, x13, x13
adcs x14, x14, x14
adcs x15, x15, x15
adcs x16, x16, x16
adc x17, xzr, xzr
# A[0] * A[0]
umulh x4, x6, x6
mul x10, x6, x6
# A[1] * A[1]
mul x3, x7, x7
adds x11, x11, x4
umulh x4, x7, x7
adcs x12, x12, x3
# A[2] * A[2]
mul x3, x8, x8
adcs x13, x13, x4
umulh x4, x8, x8
adcs x14, x14, x3
# A[3] * A[3]
mul x3, x9, x9
adcs x15, x15, x4
umulh x4, x9, x9
adcs x16, x16, x3
adc x17, x17, x4
# Reduce
mov x3, #38
mul x4, x3, x17
adds x13, x13, x4
umulh x5, x3, x17
adc x5, x5, xzr
mov x3, #19
extr x5, x5, x13, #63
mul x5, x5, x3
and x13, x13, #0x7fffffffffffffff
mov x3, #38
mul x4, x3, x14
adds x10, x10, x4
umulh x14, x3, x14
mul x4, x3, x15
adcs x11, x11, x4
umulh x15, x3, x15
mul x4, x3, x16
adcs x12, x12, x4
umulh x16, x3, x16
adc x13, x13, xzr
# Add high product results in
adds x6, x10, x5
adcs x7, x11, x14
adcs x8, x12, x15
adc x9, x13, x16
subs x24, x24, #1
bne L_curve25519_inv_8
# Store
stp x6, x7, [x29, #80]
stp x8, x9, [x29, #96]
add x0, x29, #16
add x1, x29, #0x50
add x2, x29, #48
#ifndef __APPLE__
bl fe_mul
#else
bl _fe_mul
#endif /* __APPLE__ */
ldr x0, [x29, #176]
# Multiply
ldp x6, x7, [x0]
ldp x8, x9, [x0, #16]
ldp x10, x11, [x29, #16]
ldp x12, x13, [x29, #32]
# A[0] * B[0]
umulh x15, x6, x10
mul x14, x6, x10
# A[2] * B[0]
umulh x17, x8, x10
mul x16, x8, x10
# A[1] * B[0]
mul x3, x7, x10
adds x15, x15, x3
umulh x4, x7, x10
adcs x16, x16, x4
# A[1] * B[3]
umulh x20, x7, x13
adc x17, x17, xzr
mul x19, x7, x13
# A[0] * B[1]
mul x3, x6, x11
adds x15, x15, x3
umulh x4, x6, x11
adcs x16, x16, x4
# A[2] * B[1]
mul x3, x8, x11
adcs x17, x17, x3
umulh x4, x8, x11
adcs x19, x19, x4
adc x20, x20, xzr
# A[1] * B[2]
mul x3, x7, x12
adds x17, x17, x3
umulh x4, x7, x12
adcs x19, x19, x4
adcs x20, x20, xzr
adc x21, xzr, xzr
# A[0] * B[2]
mul x3, x6, x12
adds x16, x16, x3
umulh x4, x6, x12
adcs x17, x17, x4
adcs x19, x19, xzr
adcs x20, x20, xzr
adc x21, x21, xzr
# A[1] * B[1]
mul x3, x7, x11
adds x16, x16, x3
umulh x4, x7, x11
adcs x17, x17, x4
# A[3] * B[1]
mul x3, x9, x11
adcs x19, x19, x3
umulh x4, x9, x11
adcs x20, x20, x4
adc x21, x21, xzr
# A[2] * B[2]
mul x3, x8, x12
adds x19, x19, x3
umulh x4, x8, x12
adcs x20, x20, x4
# A[3] * B[3]
mul x3, x9, x13
adcs x21, x21, x3
umulh x22, x9, x13
adc x22, x22, xzr
# A[0] * B[3]
mul x3, x6, x13
adds x17, x17, x3
umulh x4, x6, x13
adcs x19, x19, x4
# A[2] * B[3]
mul x3, x8, x13
adcs x20, x20, x3
umulh x4, x8, x13
adcs x21, x21, x4
adc x22, x22, xzr
# A[3] * B[0]
mul x3, x9, x10
adds x17, x17, x3
umulh x4, x9, x10
adcs x19, x19, x4
# A[3] * B[2]
mul x3, x9, x12
adcs x20, x20, x3
umulh x4, x9, x12
adcs x21, x21, x4
adc x22, x22, xzr
# Reduce
mov x3, #38
mul x4, x3, x22
adds x17, x17, x4
umulh x5, x3, x22
adc x5, x5, xzr
mov x3, #19
extr x5, x5, x17, #63
mul x5, x5, x3
and x17, x17, #0x7fffffffffffffff
mov x3, #38
mul x4, x3, x19
adds x14, x14, x4
umulh x19, x3, x19
mul x4, x3, x20
adcs x15, x15, x4
umulh x20, x3, x20
mul x4, x3, x21
adcs x16, x16, x4
umulh x21, x3, x21
adc x17, x17, xzr
# Add high product results in
adds x14, x14, x5
adcs x15, x15, x19
adcs x16, x16, x20
adc x17, x17, x21
# Reduce if top bit set
mov x3, #19
and x4, x3, x17, asr 63
adds x14, x14, x4
adcs x15, x15, xzr
and x17, x17, #0x7fffffffffffffff
adcs x16, x16, xzr
adc x17, x17, xzr
adds x4, x14, x3
adcs x4, x15, xzr
adcs x4, x16, xzr
adc x4, x17, xzr
and x4, x3, x4, asr 63
adds x14, x14, x4
adcs x15, x15, xzr
mov x4, #0x7fffffffffffffff
adcs x16, x16, xzr
adc x17, x17, xzr
and x17, x17, x4
# Store
stp x14, x15, [x0]
stp x16, x17, [x0, #16]
mov x0, xzr
ldr x17, [x29, #200]
ldr x19, [x29, #208]
ldp x20, x21, [x29, #216]
ldp x22, x23, [x29, #232]
ldp x24, x25, [x29, #248]
ldp x26, x27, [x29, #264]
ldr x28, [x29, #280]
ldp x29, x30, [sp], #0x120
ret
#ifndef __APPLE__
.size curve25519,.-curve25519
#endif /* __APPLE__ */
#ifdef HAVE_ED25519
#ifndef __APPLE__
.text
.globl fe_pow22523
.type fe_pow22523,@function
.align 2
fe_pow22523:
#else
.section __TEXT,__text
.globl _fe_pow22523
.p2align 2
_fe_pow22523:
#endif /* __APPLE__ */
stp x29, x30, [sp, #-144]!
add x29, sp, #0
str x17, [x29, #128]
str x23, [x29, #136]
# pow22523
str x0, [x29, #112]
str x1, [x29, #120]
add x0, x29, #16
#ifndef NDEBUG
ldr x1, [x29, #120]
#endif /* !NDEBUG */
#ifndef __APPLE__
bl fe_sq
#else
bl _fe_sq
#endif /* __APPLE__ */
add x0, x29, #48
add x1, x29, #16
#ifndef __APPLE__
bl fe_sq
#else
bl _fe_sq
#endif /* __APPLE__ */
#ifndef NDEBUG
add x0, x29, #48
#endif /* !NDEBUG */
add x1, x29, #48
#ifndef __APPLE__
bl fe_sq
#else
bl _fe_sq
#endif /* __APPLE__ */
#ifndef NDEBUG
add x0, x29, #48
#endif /* !NDEBUG */
ldr x1, [x29, #120]
add x2, x29, #48
#ifndef __APPLE__
bl fe_mul
#else
bl _fe_mul
#endif /* __APPLE__ */
add x0, x29, #16
add x1, x29, #16
add x2, x29, #48
#ifndef __APPLE__
bl fe_mul
#else
bl _fe_mul
#endif /* __APPLE__ */
#ifndef NDEBUG
add x0, x29, #16
#endif /* !NDEBUG */
#ifndef NDEBUG
add x1, x29, #16
#endif /* !NDEBUG */
#ifndef __APPLE__
bl fe_sq
#else
bl _fe_sq
#endif /* __APPLE__ */
#ifndef NDEBUG
add x0, x29, #16
#endif /* !NDEBUG */
add x1, x29, #48
add x2, x29, #16
#ifndef __APPLE__
bl fe_mul
#else
bl _fe_mul
#endif /* __APPLE__ */
# Loop: 5 times
mov x23, #5
ldp x6, x7, [x29, #16]
ldp x8, x9, [x29, #32]
L_fe_pow22523_1:
# Square
# A[0] * A[1]
umulh x12, x6, x7
mul x11, x6, x7
# A[0] * A[3]
umulh x14, x6, x9
mul x13, x6, x9
# A[0] * A[2]
mul x3, x6, x8
adds x12, x12, x3
umulh x4, x6, x8
adcs x13, x13, x4
# A[1] * A[3]
mul x3, x7, x9
adcs x14, x14, x3
umulh x15, x7, x9
adc x15, x15, xzr
# A[1] * A[2]
mul x3, x7, x8
adds x13, x13, x3
umulh x4, x7, x8
adcs x14, x14, x4
# A[2] * A[3]
mul x3, x8, x9
adcs x15, x15, x3
umulh x16, x8, x9
adc x16, x16, xzr
# Double
adds x11, x11, x11
adcs x12, x12, x12
adcs x13, x13, x13
adcs x14, x14, x14
adcs x15, x15, x15
adcs x16, x16, x16
adc x17, xzr, xzr
# A[0] * A[0]
umulh x4, x6, x6
mul x10, x6, x6
# A[1] * A[1]
mul x3, x7, x7
adds x11, x11, x4
umulh x4, x7, x7
adcs x12, x12, x3
# A[2] * A[2]
mul x3, x8, x8
adcs x13, x13, x4
umulh x4, x8, x8
adcs x14, x14, x3
# A[3] * A[3]
mul x3, x9, x9
adcs x15, x15, x4
umulh x4, x9, x9
adcs x16, x16, x3
adc x17, x17, x4
# Reduce
mov x3, #38
mul x4, x3, x17
adds x13, x13, x4
umulh x5, x3, x17
adc x5, x5, xzr
mov x3, #19
extr x5, x5, x13, #63
mul x5, x5, x3
and x13, x13, #0x7fffffffffffffff
mov x3, #38
mul x4, x3, x14
adds x10, x10, x4
umulh x14, x3, x14
mul x4, x3, x15
adcs x11, x11, x4
umulh x15, x3, x15
mul x4, x3, x16
adcs x12, x12, x4
umulh x16, x3, x16
adc x13, x13, xzr
# Add high product results in
adds x6, x10, x5
adcs x7, x11, x14
adcs x8, x12, x15
adc x9, x13, x16
subs x23, x23, #1
bne L_fe_pow22523_1
# Store
stp x6, x7, [x29, #48]
stp x8, x9, [x29, #64]
#ifndef NDEBUG
add x0, x29, #16
#endif /* !NDEBUG */
#ifndef NDEBUG
add x1, x29, #48
#endif /* !NDEBUG */
add x2, x29, #16
#ifndef __APPLE__
bl fe_mul
#else
bl _fe_mul
#endif /* __APPLE__ */
# Loop: 10 times
mov x23, #10
ldp x6, x7, [x29, #16]
ldp x8, x9, [x29, #32]
L_fe_pow22523_2:
# Square
# A[0] * A[1]
umulh x12, x6, x7
mul x11, x6, x7
# A[0] * A[3]
umulh x14, x6, x9
mul x13, x6, x9
# A[0] * A[2]
mul x3, x6, x8
adds x12, x12, x3
umulh x4, x6, x8
adcs x13, x13, x4
# A[1] * A[3]
mul x3, x7, x9
adcs x14, x14, x3
umulh x15, x7, x9
adc x15, x15, xzr
# A[1] * A[2]
mul x3, x7, x8
adds x13, x13, x3
umulh x4, x7, x8
adcs x14, x14, x4
# A[2] * A[3]
mul x3, x8, x9
adcs x15, x15, x3
umulh x16, x8, x9
adc x16, x16, xzr
# Double
adds x11, x11, x11
adcs x12, x12, x12
adcs x13, x13, x13
adcs x14, x14, x14
adcs x15, x15, x15
adcs x16, x16, x16
adc x17, xzr, xzr
# A[0] * A[0]
umulh x4, x6, x6
mul x10, x6, x6
# A[1] * A[1]
mul x3, x7, x7
adds x11, x11, x4
umulh x4, x7, x7
adcs x12, x12, x3
# A[2] * A[2]
mul x3, x8, x8
adcs x13, x13, x4
umulh x4, x8, x8
adcs x14, x14, x3
# A[3] * A[3]
mul x3, x9, x9
adcs x15, x15, x4
umulh x4, x9, x9
adcs x16, x16, x3
adc x17, x17, x4
# Reduce
mov x3, #38
mul x4, x3, x17
adds x13, x13, x4
umulh x5, x3, x17
adc x5, x5, xzr
mov x3, #19
extr x5, x5, x13, #63
mul x5, x5, x3
and x13, x13, #0x7fffffffffffffff
mov x3, #38
mul x4, x3, x14
adds x10, x10, x4
umulh x14, x3, x14
mul x4, x3, x15
adcs x11, x11, x4
umulh x15, x3, x15
mul x4, x3, x16
adcs x12, x12, x4
umulh x16, x3, x16
adc x13, x13, xzr
# Add high product results in
adds x6, x10, x5
adcs x7, x11, x14
adcs x8, x12, x15
adc x9, x13, x16
subs x23, x23, #1
bne L_fe_pow22523_2
# Store
stp x6, x7, [x29, #48]
stp x8, x9, [x29, #64]
add x0, x29, #48
#ifndef NDEBUG
add x1, x29, #48
#endif /* !NDEBUG */
add x2, x29, #16
#ifndef __APPLE__
bl fe_mul
#else
bl _fe_mul
#endif /* __APPLE__ */
# Loop: 20 times
mov x23, #20
ldp x6, x7, [x29, #48]
ldp x8, x9, [x29, #64]
L_fe_pow22523_3:
# Square
# A[0] * A[1]
umulh x12, x6, x7
mul x11, x6, x7
# A[0] * A[3]
umulh x14, x6, x9
mul x13, x6, x9
# A[0] * A[2]
mul x3, x6, x8
adds x12, x12, x3
umulh x4, x6, x8
adcs x13, x13, x4
# A[1] * A[3]
mul x3, x7, x9
adcs x14, x14, x3
umulh x15, x7, x9
adc x15, x15, xzr
# A[1] * A[2]
mul x3, x7, x8
adds x13, x13, x3
umulh x4, x7, x8
adcs x14, x14, x4
# A[2] * A[3]
mul x3, x8, x9
adcs x15, x15, x3
umulh x16, x8, x9
adc x16, x16, xzr
# Double
adds x11, x11, x11
adcs x12, x12, x12
adcs x13, x13, x13
adcs x14, x14, x14
adcs x15, x15, x15
adcs x16, x16, x16
adc x17, xzr, xzr
# A[0] * A[0]
umulh x4, x6, x6
mul x10, x6, x6
# A[1] * A[1]
mul x3, x7, x7
adds x11, x11, x4
umulh x4, x7, x7
adcs x12, x12, x3
# A[2] * A[2]
mul x3, x8, x8
adcs x13, x13, x4
umulh x4, x8, x8
adcs x14, x14, x3
# A[3] * A[3]
mul x3, x9, x9
adcs x15, x15, x4
umulh x4, x9, x9
adcs x16, x16, x3
adc x17, x17, x4
# Reduce
mov x3, #38
mul x4, x3, x17
adds x13, x13, x4
umulh x5, x3, x17
adc x5, x5, xzr
mov x3, #19
extr x5, x5, x13, #63
mul x5, x5, x3
and x13, x13, #0x7fffffffffffffff
mov x3, #38
mul x4, x3, x14
adds x10, x10, x4
umulh x14, x3, x14
mul x4, x3, x15
adcs x11, x11, x4
umulh x15, x3, x15
mul x4, x3, x16
adcs x12, x12, x4
umulh x16, x3, x16
adc x13, x13, xzr
# Add high product results in
adds x6, x10, x5
adcs x7, x11, x14
adcs x8, x12, x15
adc x9, x13, x16
subs x23, x23, #1
bne L_fe_pow22523_3
# Store
stp x6, x7, [x29, #80]
stp x8, x9, [x29, #96]
#ifndef NDEBUG
add x0, x29, #48
#endif /* !NDEBUG */
add x1, x29, #0x50
add x2, x29, #48
#ifndef __APPLE__
bl fe_mul
#else
bl _fe_mul
#endif /* __APPLE__ */
# Loop: 10 times
mov x23, #10
ldp x6, x7, [x29, #48]
ldp x8, x9, [x29, #64]
L_fe_pow22523_4:
# Square
# A[0] * A[1]
umulh x12, x6, x7
mul x11, x6, x7
# A[0] * A[3]
umulh x14, x6, x9
mul x13, x6, x9
# A[0] * A[2]
mul x3, x6, x8
adds x12, x12, x3
umulh x4, x6, x8
adcs x13, x13, x4
# A[1] * A[3]
mul x3, x7, x9
adcs x14, x14, x3
umulh x15, x7, x9
adc x15, x15, xzr
# A[1] * A[2]
mul x3, x7, x8
adds x13, x13, x3
umulh x4, x7, x8
adcs x14, x14, x4
# A[2] * A[3]
mul x3, x8, x9
adcs x15, x15, x3
umulh x16, x8, x9
adc x16, x16, xzr
# Double
adds x11, x11, x11
adcs x12, x12, x12
adcs x13, x13, x13
adcs x14, x14, x14
adcs x15, x15, x15
adcs x16, x16, x16
adc x17, xzr, xzr
# A[0] * A[0]
umulh x4, x6, x6
mul x10, x6, x6
# A[1] * A[1]
mul x3, x7, x7
adds x11, x11, x4
umulh x4, x7, x7
adcs x12, x12, x3
# A[2] * A[2]
mul x3, x8, x8
adcs x13, x13, x4
umulh x4, x8, x8
adcs x14, x14, x3
# A[3] * A[3]
mul x3, x9, x9
adcs x15, x15, x4
umulh x4, x9, x9
adcs x16, x16, x3
adc x17, x17, x4
# Reduce
mov x3, #38
mul x4, x3, x17
adds x13, x13, x4
umulh x5, x3, x17
adc x5, x5, xzr
mov x3, #19
extr x5, x5, x13, #63
mul x5, x5, x3
and x13, x13, #0x7fffffffffffffff
mov x3, #38
mul x4, x3, x14
adds x10, x10, x4
umulh x14, x3, x14
mul x4, x3, x15
adcs x11, x11, x4
umulh x15, x3, x15
mul x4, x3, x16
adcs x12, x12, x4
umulh x16, x3, x16
adc x13, x13, xzr
# Add high product results in
adds x6, x10, x5
adcs x7, x11, x14
adcs x8, x12, x15
adc x9, x13, x16
subs x23, x23, #1
bne L_fe_pow22523_4
# Store
stp x6, x7, [x29, #48]
stp x8, x9, [x29, #64]
add x0, x29, #16
add x1, x29, #48
add x2, x29, #16
#ifndef __APPLE__
bl fe_mul
#else
bl _fe_mul
#endif /* __APPLE__ */
# Loop: 50 times
mov x23, #50
ldp x6, x7, [x29, #16]
ldp x8, x9, [x29, #32]
L_fe_pow22523_5:
# Square
# A[0] * A[1]
umulh x12, x6, x7
mul x11, x6, x7
# A[0] * A[3]
umulh x14, x6, x9
mul x13, x6, x9
# A[0] * A[2]
mul x3, x6, x8
adds x12, x12, x3
umulh x4, x6, x8
adcs x13, x13, x4
# A[1] * A[3]
mul x3, x7, x9
adcs x14, x14, x3
umulh x15, x7, x9
adc x15, x15, xzr
# A[1] * A[2]
mul x3, x7, x8
adds x13, x13, x3
umulh x4, x7, x8
adcs x14, x14, x4
# A[2] * A[3]
mul x3, x8, x9
adcs x15, x15, x3
umulh x16, x8, x9
adc x16, x16, xzr
# Double
adds x11, x11, x11
adcs x12, x12, x12
adcs x13, x13, x13
adcs x14, x14, x14
adcs x15, x15, x15
adcs x16, x16, x16
adc x17, xzr, xzr
# A[0] * A[0]
umulh x4, x6, x6
mul x10, x6, x6
# A[1] * A[1]
mul x3, x7, x7
adds x11, x11, x4
umulh x4, x7, x7
adcs x12, x12, x3
# A[2] * A[2]
mul x3, x8, x8
adcs x13, x13, x4
umulh x4, x8, x8
adcs x14, x14, x3
# A[3] * A[3]
mul x3, x9, x9
adcs x15, x15, x4
umulh x4, x9, x9
adcs x16, x16, x3
adc x17, x17, x4
# Reduce
mov x3, #38
mul x4, x3, x17
adds x13, x13, x4
umulh x5, x3, x17
adc x5, x5, xzr
mov x3, #19
extr x5, x5, x13, #63
mul x5, x5, x3
and x13, x13, #0x7fffffffffffffff
mov x3, #38
mul x4, x3, x14
adds x10, x10, x4
umulh x14, x3, x14
mul x4, x3, x15
adcs x11, x11, x4
umulh x15, x3, x15
mul x4, x3, x16
adcs x12, x12, x4
umulh x16, x3, x16
adc x13, x13, xzr
# Add high product results in
adds x6, x10, x5
adcs x7, x11, x14
adcs x8, x12, x15
adc x9, x13, x16
subs x23, x23, #1
bne L_fe_pow22523_5
# Store
stp x6, x7, [x29, #48]
stp x8, x9, [x29, #64]
add x0, x29, #48
#ifndef NDEBUG
add x1, x29, #48
#endif /* !NDEBUG */
add x2, x29, #16
#ifndef __APPLE__
bl fe_mul
#else
bl _fe_mul
#endif /* __APPLE__ */
# Loop: 100 times
mov x23, #0x64
ldp x6, x7, [x29, #48]
ldp x8, x9, [x29, #64]
L_fe_pow22523_6:
# Square
# A[0] * A[1]
umulh x12, x6, x7
mul x11, x6, x7
# A[0] * A[3]
umulh x14, x6, x9
mul x13, x6, x9
# A[0] * A[2]
mul x3, x6, x8
adds x12, x12, x3
umulh x4, x6, x8
adcs x13, x13, x4
# A[1] * A[3]
mul x3, x7, x9
adcs x14, x14, x3
umulh x15, x7, x9
adc x15, x15, xzr
# A[1] * A[2]
mul x3, x7, x8
adds x13, x13, x3
umulh x4, x7, x8
adcs x14, x14, x4
# A[2] * A[3]
mul x3, x8, x9
adcs x15, x15, x3
umulh x16, x8, x9
adc x16, x16, xzr
# Double
adds x11, x11, x11
adcs x12, x12, x12
adcs x13, x13, x13
adcs x14, x14, x14
adcs x15, x15, x15
adcs x16, x16, x16
adc x17, xzr, xzr
# A[0] * A[0]
umulh x4, x6, x6
mul x10, x6, x6
# A[1] * A[1]
mul x3, x7, x7
adds x11, x11, x4
umulh x4, x7, x7
adcs x12, x12, x3
# A[2] * A[2]
mul x3, x8, x8
adcs x13, x13, x4
umulh x4, x8, x8
adcs x14, x14, x3
# A[3] * A[3]
mul x3, x9, x9
adcs x15, x15, x4
umulh x4, x9, x9
adcs x16, x16, x3
adc x17, x17, x4
# Reduce
mov x3, #38
mul x4, x3, x17
adds x13, x13, x4
umulh x5, x3, x17
adc x5, x5, xzr
mov x3, #19
extr x5, x5, x13, #63
mul x5, x5, x3
and x13, x13, #0x7fffffffffffffff
mov x3, #38
mul x4, x3, x14
adds x10, x10, x4
umulh x14, x3, x14
mul x4, x3, x15
adcs x11, x11, x4
umulh x15, x3, x15
mul x4, x3, x16
adcs x12, x12, x4
umulh x16, x3, x16
adc x13, x13, xzr
# Add high product results in
adds x6, x10, x5
adcs x7, x11, x14
adcs x8, x12, x15
adc x9, x13, x16
subs x23, x23, #1
bne L_fe_pow22523_6
# Store
stp x6, x7, [x29, #80]
stp x8, x9, [x29, #96]
#ifndef NDEBUG
add x0, x29, #48
#endif /* !NDEBUG */
add x1, x29, #0x50
add x2, x29, #48
#ifndef __APPLE__
bl fe_mul
#else
bl _fe_mul
#endif /* __APPLE__ */
# Loop: 50 times
mov x23, #50
ldp x6, x7, [x29, #48]
ldp x8, x9, [x29, #64]
L_fe_pow22523_7:
# Square
# A[0] * A[1]
umulh x12, x6, x7
mul x11, x6, x7
# A[0] * A[3]
umulh x14, x6, x9
mul x13, x6, x9
# A[0] * A[2]
mul x3, x6, x8
adds x12, x12, x3
umulh x4, x6, x8
adcs x13, x13, x4
# A[1] * A[3]
mul x3, x7, x9
adcs x14, x14, x3
umulh x15, x7, x9
adc x15, x15, xzr
# A[1] * A[2]
mul x3, x7, x8
adds x13, x13, x3
umulh x4, x7, x8
adcs x14, x14, x4
# A[2] * A[3]
mul x3, x8, x9
adcs x15, x15, x3
umulh x16, x8, x9
adc x16, x16, xzr
# Double
adds x11, x11, x11
adcs x12, x12, x12
adcs x13, x13, x13
adcs x14, x14, x14
adcs x15, x15, x15
adcs x16, x16, x16
adc x17, xzr, xzr
# A[0] * A[0]
umulh x4, x6, x6
mul x10, x6, x6
# A[1] * A[1]
mul x3, x7, x7
adds x11, x11, x4
umulh x4, x7, x7
adcs x12, x12, x3
# A[2] * A[2]
mul x3, x8, x8
adcs x13, x13, x4
umulh x4, x8, x8
adcs x14, x14, x3
# A[3] * A[3]
mul x3, x9, x9
adcs x15, x15, x4
umulh x4, x9, x9
adcs x16, x16, x3
adc x17, x17, x4
# Reduce
mov x3, #38
mul x4, x3, x17
adds x13, x13, x4
umulh x5, x3, x17
adc x5, x5, xzr
mov x3, #19
extr x5, x5, x13, #63
mul x5, x5, x3
and x13, x13, #0x7fffffffffffffff
mov x3, #38
mul x4, x3, x14
adds x10, x10, x4
umulh x14, x3, x14
mul x4, x3, x15
adcs x11, x11, x4
umulh x15, x3, x15
mul x4, x3, x16
adcs x12, x12, x4
umulh x16, x3, x16
adc x13, x13, xzr
# Add high product results in
adds x6, x10, x5
adcs x7, x11, x14
adcs x8, x12, x15
adc x9, x13, x16
subs x23, x23, #1
bne L_fe_pow22523_7
# Store
stp x6, x7, [x29, #48]
stp x8, x9, [x29, #64]
add x0, x29, #16
add x1, x29, #48
add x2, x29, #16
#ifndef __APPLE__
bl fe_mul
#else
bl _fe_mul
#endif /* __APPLE__ */
#ifndef NDEBUG
add x0, x29, #16
#endif /* !NDEBUG */
add x1, x29, #16
#ifndef __APPLE__
bl fe_sq
#else
bl _fe_sq
#endif /* __APPLE__ */
#ifndef __APPLE__
bl fe_sq
#else
bl _fe_sq
#endif /* __APPLE__ */
ldr x0, [x29, #112]
#ifndef NDEBUG
add x1, x29, #16
#endif /* !NDEBUG */
ldr x2, [x29, #120]
#ifndef __APPLE__
bl fe_mul
#else
bl _fe_mul
#endif /* __APPLE__ */
ldr x17, [x29, #128]
ldr x23, [x29, #136]
ldp x29, x30, [sp], #0x90
ret
#ifndef __APPLE__
.size fe_pow22523,.-fe_pow22523
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl ge_p1p1_to_p2
.type ge_p1p1_to_p2,@function
.align 2
ge_p1p1_to_p2:
#else
.section __TEXT,__text
.globl _ge_p1p1_to_p2
.p2align 2
_ge_p1p1_to_p2:
#endif /* __APPLE__ */
stp x29, x30, [sp, #-80]!
add x29, sp, #0
str x17, [x29, #40]
str x19, [x29, #48]
stp x20, x21, [x29, #56]
str x22, [x29, #72]
str x0, [x29, #16]
str x1, [x29, #24]
mov x2, x1
add x1, x1, #0x60
# Multiply
ldp x10, x11, [x1]
ldp x12, x13, [x1, #16]
ldp x6, x7, [x2]
ldp x8, x9, [x2, #16]
# A[0] * B[0]
umulh x15, x10, x6
mul x14, x10, x6
# A[2] * B[0]
umulh x17, x12, x6
mul x16, x12, x6
# A[1] * B[0]
mul x3, x11, x6
adds x15, x15, x3
umulh x4, x11, x6
adcs x16, x16, x4
# A[1] * B[3]
umulh x20, x11, x9
adc x17, x17, xzr
mul x19, x11, x9
# A[0] * B[1]
mul x3, x10, x7
adds x15, x15, x3
umulh x4, x10, x7
adcs x16, x16, x4
# A[2] * B[1]
mul x3, x12, x7
adcs x17, x17, x3
umulh x4, x12, x7
adcs x19, x19, x4
adc x20, x20, xzr
# A[1] * B[2]
mul x3, x11, x8
adds x17, x17, x3
umulh x4, x11, x8
adcs x19, x19, x4
adcs x20, x20, xzr
adc x21, xzr, xzr
# A[0] * B[2]
mul x3, x10, x8
adds x16, x16, x3
umulh x4, x10, x8
adcs x17, x17, x4
adcs x19, x19, xzr
adcs x20, x20, xzr
adc x21, x21, xzr
# A[1] * B[1]
mul x3, x11, x7
adds x16, x16, x3
umulh x4, x11, x7
adcs x17, x17, x4
# A[3] * B[1]
mul x3, x13, x7
adcs x19, x19, x3
umulh x4, x13, x7
adcs x20, x20, x4
adc x21, x21, xzr
# A[2] * B[2]
mul x3, x12, x8
adds x19, x19, x3
umulh x4, x12, x8
adcs x20, x20, x4
# A[3] * B[3]
mul x3, x13, x9
adcs x21, x21, x3
umulh x22, x13, x9
adc x22, x22, xzr
# A[0] * B[3]
mul x3, x10, x9
adds x17, x17, x3
umulh x4, x10, x9
adcs x19, x19, x4
# A[2] * B[3]
mul x3, x12, x9
adcs x20, x20, x3
umulh x4, x12, x9
adcs x21, x21, x4
adc x22, x22, xzr
# A[3] * B[0]
mul x3, x13, x6
adds x17, x17, x3
umulh x4, x13, x6
adcs x19, x19, x4
# A[3] * B[2]
mul x3, x13, x8
adcs x20, x20, x3
umulh x4, x13, x8
adcs x21, x21, x4
adc x22, x22, xzr
# Reduce
mov x3, #38
mul x4, x3, x22
adds x17, x17, x4
umulh x5, x3, x22
adc x5, x5, xzr
mov x3, #19
extr x5, x5, x17, #63
mul x5, x5, x3
and x17, x17, #0x7fffffffffffffff
mov x3, #38
mul x4, x3, x19
adds x14, x14, x4
umulh x19, x3, x19
mul x4, x3, x20
adcs x15, x15, x4
umulh x20, x3, x20
mul x4, x3, x21
adcs x16, x16, x4
umulh x21, x3, x21
adc x17, x17, xzr
# Add high product results in
adds x14, x14, x5
adcs x15, x15, x19
adcs x16, x16, x20
adc x17, x17, x21
# Store
stp x14, x15, [x0]
stp x16, x17, [x0, #16]
sub x2, x1, #32
add x0, x0, #0x40
# Multiply
ldp x6, x7, [x2]
ldp x8, x9, [x2, #16]
# A[0] * B[0]
umulh x15, x10, x6
mul x14, x10, x6
# A[2] * B[0]
umulh x17, x12, x6
mul x16, x12, x6
# A[1] * B[0]
mul x3, x11, x6
adds x15, x15, x3
umulh x4, x11, x6
adcs x16, x16, x4
# A[1] * B[3]
umulh x20, x11, x9
adc x17, x17, xzr
mul x19, x11, x9
# A[0] * B[1]
mul x3, x10, x7
adds x15, x15, x3
umulh x4, x10, x7
adcs x16, x16, x4
# A[2] * B[1]
mul x3, x12, x7
adcs x17, x17, x3
umulh x4, x12, x7
adcs x19, x19, x4
adc x20, x20, xzr
# A[1] * B[2]
mul x3, x11, x8
adds x17, x17, x3
umulh x4, x11, x8
adcs x19, x19, x4
adcs x20, x20, xzr
adc x21, xzr, xzr
# A[0] * B[2]
mul x3, x10, x8
adds x16, x16, x3
umulh x4, x10, x8
adcs x17, x17, x4
adcs x19, x19, xzr
adcs x20, x20, xzr
adc x21, x21, xzr
# A[1] * B[1]
mul x3, x11, x7
adds x16, x16, x3
umulh x4, x11, x7
adcs x17, x17, x4
# A[3] * B[1]
mul x3, x13, x7
adcs x19, x19, x3
umulh x4, x13, x7
adcs x20, x20, x4
adc x21, x21, xzr
# A[2] * B[2]
mul x3, x12, x8
adds x19, x19, x3
umulh x4, x12, x8
adcs x20, x20, x4
# A[3] * B[3]
mul x3, x13, x9
adcs x21, x21, x3
umulh x22, x13, x9
adc x22, x22, xzr
# A[0] * B[3]
mul x3, x10, x9
adds x17, x17, x3
umulh x4, x10, x9
adcs x19, x19, x4
# A[2] * B[3]
mul x3, x12, x9
adcs x20, x20, x3
umulh x4, x12, x9
adcs x21, x21, x4
adc x22, x22, xzr
# A[3] * B[0]
mul x3, x13, x6
adds x17, x17, x3
umulh x4, x13, x6
adcs x19, x19, x4
# A[3] * B[2]
mul x3, x13, x8
adcs x20, x20, x3
umulh x4, x13, x8
adcs x21, x21, x4
adc x22, x22, xzr
# Reduce
mov x3, #38
mul x4, x3, x22
adds x17, x17, x4
umulh x5, x3, x22
adc x5, x5, xzr
mov x3, #19
extr x5, x5, x17, #63
mul x5, x5, x3
and x17, x17, #0x7fffffffffffffff
mov x3, #38
mul x4, x3, x19
adds x14, x14, x4
umulh x19, x3, x19
mul x4, x3, x20
adcs x15, x15, x4
umulh x20, x3, x20
mul x4, x3, x21
adcs x16, x16, x4
umulh x21, x3, x21
adc x17, x17, xzr
# Add high product results in
adds x14, x14, x5
adcs x15, x15, x19
adcs x16, x16, x20
adc x17, x17, x21
# Store
stp x14, x15, [x0]
stp x16, x17, [x0, #16]
sub x1, x1, #0x40
sub x0, x0, #32
# Multiply
ldp x10, x11, [x1]
ldp x12, x13, [x1, #16]
# A[0] * B[0]
umulh x15, x10, x6
mul x14, x10, x6
# A[2] * B[0]
umulh x17, x12, x6
mul x16, x12, x6
# A[1] * B[0]
mul x3, x11, x6
adds x15, x15, x3
umulh x4, x11, x6
adcs x16, x16, x4
# A[1] * B[3]
umulh x20, x11, x9
adc x17, x17, xzr
mul x19, x11, x9
# A[0] * B[1]
mul x3, x10, x7
adds x15, x15, x3
umulh x4, x10, x7
adcs x16, x16, x4
# A[2] * B[1]
mul x3, x12, x7
adcs x17, x17, x3
umulh x4, x12, x7
adcs x19, x19, x4
adc x20, x20, xzr
# A[1] * B[2]
mul x3, x11, x8
adds x17, x17, x3
umulh x4, x11, x8
adcs x19, x19, x4
adcs x20, x20, xzr
adc x21, xzr, xzr
# A[0] * B[2]
mul x3, x10, x8
adds x16, x16, x3
umulh x4, x10, x8
adcs x17, x17, x4
adcs x19, x19, xzr
adcs x20, x20, xzr
adc x21, x21, xzr
# A[1] * B[1]
mul x3, x11, x7
adds x16, x16, x3
umulh x4, x11, x7
adcs x17, x17, x4
# A[3] * B[1]
mul x3, x13, x7
adcs x19, x19, x3
umulh x4, x13, x7
adcs x20, x20, x4
adc x21, x21, xzr
# A[2] * B[2]
mul x3, x12, x8
adds x19, x19, x3
umulh x4, x12, x8
adcs x20, x20, x4
# A[3] * B[3]
mul x3, x13, x9
adcs x21, x21, x3
umulh x22, x13, x9
adc x22, x22, xzr
# A[0] * B[3]
mul x3, x10, x9
adds x17, x17, x3
umulh x4, x10, x9
adcs x19, x19, x4
# A[2] * B[3]
mul x3, x12, x9
adcs x20, x20, x3
umulh x4, x12, x9
adcs x21, x21, x4
adc x22, x22, xzr
# A[3] * B[0]
mul x3, x13, x6
adds x17, x17, x3
umulh x4, x13, x6
adcs x19, x19, x4
# A[3] * B[2]
mul x3, x13, x8
adcs x20, x20, x3
umulh x4, x13, x8
adcs x21, x21, x4
adc x22, x22, xzr
# Reduce
mov x3, #38
mul x4, x3, x22
adds x17, x17, x4
umulh x5, x3, x22
adc x5, x5, xzr
mov x3, #19
extr x5, x5, x17, #63
mul x5, x5, x3
and x17, x17, #0x7fffffffffffffff
mov x3, #38
mul x4, x3, x19
adds x14, x14, x4
umulh x19, x3, x19
mul x4, x3, x20
adcs x15, x15, x4
umulh x20, x3, x20
mul x4, x3, x21
adcs x16, x16, x4
umulh x21, x3, x21
adc x17, x17, xzr
# Add high product results in
adds x14, x14, x5
adcs x15, x15, x19
adcs x16, x16, x20
adc x17, x17, x21
# Store
stp x14, x15, [x0]
stp x16, x17, [x0, #16]
ldr x17, [x29, #40]
ldr x19, [x29, #48]
ldp x20, x21, [x29, #56]
ldr x22, [x29, #72]
ldp x29, x30, [sp], #0x50
ret
#ifndef __APPLE__
.size ge_p1p1_to_p2,.-ge_p1p1_to_p2
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl ge_p1p1_to_p3
.type ge_p1p1_to_p3,@function
.align 2
ge_p1p1_to_p3:
#else
.section __TEXT,__text
.globl _ge_p1p1_to_p3
.p2align 2
_ge_p1p1_to_p3:
#endif /* __APPLE__ */
stp x29, x30, [sp, #-112]!
add x29, sp, #0
str x17, [x29, #40]
str x19, [x29, #48]
stp x20, x21, [x29, #56]
stp x22, x23, [x29, #72]
stp x24, x25, [x29, #88]
str x26, [x29, #104]
str x0, [x29, #16]
str x1, [x29, #24]
mov x2, x1
add x1, x1, #0x60
# Multiply
ldp x10, x11, [x1]
ldp x12, x13, [x1, #16]
ldp x6, x7, [x2]
ldp x8, x9, [x2, #16]
# A[0] * B[0]
umulh x15, x10, x6
mul x14, x10, x6
# A[2] * B[0]
umulh x17, x12, x6
mul x16, x12, x6
# A[1] * B[0]
mul x3, x11, x6
adds x15, x15, x3
umulh x4, x11, x6
adcs x16, x16, x4
# A[1] * B[3]
umulh x20, x11, x9
adc x17, x17, xzr
mul x19, x11, x9
# A[0] * B[1]
mul x3, x10, x7
adds x15, x15, x3
umulh x4, x10, x7
adcs x16, x16, x4
# A[2] * B[1]
mul x3, x12, x7
adcs x17, x17, x3
umulh x4, x12, x7
adcs x19, x19, x4
adc x20, x20, xzr
# A[1] * B[2]
mul x3, x11, x8
adds x17, x17, x3
umulh x4, x11, x8
adcs x19, x19, x4
adcs x20, x20, xzr
adc x21, xzr, xzr
# A[0] * B[2]
mul x3, x10, x8
adds x16, x16, x3
umulh x4, x10, x8
adcs x17, x17, x4
adcs x19, x19, xzr
adcs x20, x20, xzr
adc x21, x21, xzr
# A[1] * B[1]
mul x3, x11, x7
adds x16, x16, x3
umulh x4, x11, x7
adcs x17, x17, x4
# A[3] * B[1]
mul x3, x13, x7
adcs x19, x19, x3
umulh x4, x13, x7
adcs x20, x20, x4
adc x21, x21, xzr
# A[2] * B[2]
mul x3, x12, x8
adds x19, x19, x3
umulh x4, x12, x8
adcs x20, x20, x4
# A[3] * B[3]
mul x3, x13, x9
adcs x21, x21, x3
umulh x22, x13, x9
adc x22, x22, xzr
# A[0] * B[3]
mul x3, x10, x9
adds x17, x17, x3
umulh x4, x10, x9
adcs x19, x19, x4
# A[2] * B[3]
mul x3, x12, x9
adcs x20, x20, x3
umulh x4, x12, x9
adcs x21, x21, x4
adc x22, x22, xzr
# A[3] * B[0]
mul x3, x13, x6
adds x17, x17, x3
umulh x4, x13, x6
adcs x19, x19, x4
# A[3] * B[2]
mul x3, x13, x8
adcs x20, x20, x3
umulh x4, x13, x8
adcs x21, x21, x4
adc x22, x22, xzr
# Reduce
mov x3, #38
mul x4, x3, x22
adds x17, x17, x4
umulh x5, x3, x22
adc x5, x5, xzr
mov x3, #19
extr x5, x5, x17, #63
mul x5, x5, x3
and x17, x17, #0x7fffffffffffffff
mov x3, #38
mul x4, x3, x19
adds x14, x14, x4
umulh x19, x3, x19
mul x4, x3, x20
adcs x15, x15, x4
umulh x20, x3, x20
mul x4, x3, x21
adcs x16, x16, x4
umulh x21, x3, x21
adc x17, x17, xzr
# Add high product results in
adds x14, x14, x5
adcs x15, x15, x19
adcs x16, x16, x20
adc x17, x17, x21
# Store
stp x14, x15, [x0]
stp x16, x17, [x0, #16]
sub x1, x1, #0x40
add x0, x0, #0x60
# Multiply
ldp x23, x24, [x1]
ldp x25, x26, [x1, #16]
# A[0] * B[0]
umulh x15, x23, x6
mul x14, x23, x6
# A[2] * B[0]
umulh x17, x25, x6
mul x16, x25, x6
# A[1] * B[0]
mul x3, x24, x6
adds x15, x15, x3
umulh x4, x24, x6
adcs x16, x16, x4
# A[1] * B[3]
umulh x20, x24, x9
adc x17, x17, xzr
mul x19, x24, x9
# A[0] * B[1]
mul x3, x23, x7
adds x15, x15, x3
umulh x4, x23, x7
adcs x16, x16, x4
# A[2] * B[1]
mul x3, x25, x7
adcs x17, x17, x3
umulh x4, x25, x7
adcs x19, x19, x4
adc x20, x20, xzr
# A[1] * B[2]
mul x3, x24, x8
adds x17, x17, x3
umulh x4, x24, x8
adcs x19, x19, x4
adcs x20, x20, xzr
adc x21, xzr, xzr
# A[0] * B[2]
mul x3, x23, x8
adds x16, x16, x3
umulh x4, x23, x8
adcs x17, x17, x4
adcs x19, x19, xzr
adcs x20, x20, xzr
adc x21, x21, xzr
# A[1] * B[1]
mul x3, x24, x7
adds x16, x16, x3
umulh x4, x24, x7
adcs x17, x17, x4
# A[3] * B[1]
mul x3, x26, x7
adcs x19, x19, x3
umulh x4, x26, x7
adcs x20, x20, x4
adc x21, x21, xzr
# A[2] * B[2]
mul x3, x25, x8
adds x19, x19, x3
umulh x4, x25, x8
adcs x20, x20, x4
# A[3] * B[3]
mul x3, x26, x9
adcs x21, x21, x3
umulh x22, x26, x9
adc x22, x22, xzr
# A[0] * B[3]
mul x3, x23, x9
adds x17, x17, x3
umulh x4, x23, x9
adcs x19, x19, x4
# A[2] * B[3]
mul x3, x25, x9
adcs x20, x20, x3
umulh x4, x25, x9
adcs x21, x21, x4
adc x22, x22, xzr
# A[3] * B[0]
mul x3, x26, x6
adds x17, x17, x3
umulh x4, x26, x6
adcs x19, x19, x4
# A[3] * B[2]
mul x3, x26, x8
adcs x20, x20, x3
umulh x4, x26, x8
adcs x21, x21, x4
adc x22, x22, xzr
# Reduce
mov x3, #38
mul x4, x3, x22
adds x17, x17, x4
umulh x5, x3, x22
adc x5, x5, xzr
mov x3, #19
extr x5, x5, x17, #63
mul x5, x5, x3
and x17, x17, #0x7fffffffffffffff
mov x3, #38
mul x4, x3, x19
adds x14, x14, x4
umulh x19, x3, x19
mul x4, x3, x20
adcs x15, x15, x4
umulh x20, x3, x20
mul x4, x3, x21
adcs x16, x16, x4
umulh x21, x3, x21
adc x17, x17, xzr
# Add high product results in
adds x14, x14, x5
adcs x15, x15, x19
adcs x16, x16, x20
adc x17, x17, x21
# Store
stp x14, x15, [x0]
stp x16, x17, [x0, #16]
add x2, x1, #32
sub x0, x0, #0x40
# Multiply
ldp x6, x7, [x2]
ldp x8, x9, [x2, #16]
# A[0] * B[0]
umulh x15, x23, x6
mul x14, x23, x6
# A[2] * B[0]
umulh x17, x25, x6
mul x16, x25, x6
# A[1] * B[0]
mul x3, x24, x6
adds x15, x15, x3
umulh x4, x24, x6
adcs x16, x16, x4
# A[1] * B[3]
umulh x20, x24, x9
adc x17, x17, xzr
mul x19, x24, x9
# A[0] * B[1]
mul x3, x23, x7
adds x15, x15, x3
umulh x4, x23, x7
adcs x16, x16, x4
# A[2] * B[1]
mul x3, x25, x7
adcs x17, x17, x3
umulh x4, x25, x7
adcs x19, x19, x4
adc x20, x20, xzr
# A[1] * B[2]
mul x3, x24, x8
adds x17, x17, x3
umulh x4, x24, x8
adcs x19, x19, x4
adcs x20, x20, xzr
adc x21, xzr, xzr
# A[0] * B[2]
mul x3, x23, x8
adds x16, x16, x3
umulh x4, x23, x8
adcs x17, x17, x4
adcs x19, x19, xzr
adcs x20, x20, xzr
adc x21, x21, xzr
# A[1] * B[1]
mul x3, x24, x7
adds x16, x16, x3
umulh x4, x24, x7
adcs x17, x17, x4
# A[3] * B[1]
mul x3, x26, x7
adcs x19, x19, x3
umulh x4, x26, x7
adcs x20, x20, x4
adc x21, x21, xzr
# A[2] * B[2]
mul x3, x25, x8
adds x19, x19, x3
umulh x4, x25, x8
adcs x20, x20, x4
# A[3] * B[3]
mul x3, x26, x9
adcs x21, x21, x3
umulh x22, x26, x9
adc x22, x22, xzr
# A[0] * B[3]
mul x3, x23, x9
adds x17, x17, x3
umulh x4, x23, x9
adcs x19, x19, x4
# A[2] * B[3]
mul x3, x25, x9
adcs x20, x20, x3
umulh x4, x25, x9
adcs x21, x21, x4
adc x22, x22, xzr
# A[3] * B[0]
mul x3, x26, x6
adds x17, x17, x3
umulh x4, x26, x6
adcs x19, x19, x4
# A[3] * B[2]
mul x3, x26, x8
adcs x20, x20, x3
umulh x4, x26, x8
adcs x21, x21, x4
adc x22, x22, xzr
# Reduce
mov x3, #38
mul x4, x3, x22
adds x17, x17, x4
umulh x5, x3, x22
adc x5, x5, xzr
mov x3, #19
extr x5, x5, x17, #63
mul x5, x5, x3
and x17, x17, #0x7fffffffffffffff
mov x3, #38
mul x4, x3, x19
adds x14, x14, x4
umulh x19, x3, x19
mul x4, x3, x20
adcs x15, x15, x4
umulh x20, x3, x20
mul x4, x3, x21
adcs x16, x16, x4
umulh x21, x3, x21
adc x17, x17, xzr
# Add high product results in
adds x14, x14, x5
adcs x15, x15, x19
adcs x16, x16, x20
adc x17, x17, x21
# Store
stp x14, x15, [x0]
stp x16, x17, [x0, #16]
add x1, x1, #0x40
add x0, x0, #32
# Multiply
# A[0] * B[0]
umulh x15, x10, x6
mul x14, x10, x6
# A[2] * B[0]
umulh x17, x12, x6
mul x16, x12, x6
# A[1] * B[0]
mul x3, x11, x6
adds x15, x15, x3
umulh x4, x11, x6
adcs x16, x16, x4
# A[1] * B[3]
umulh x20, x11, x9
adc x17, x17, xzr
mul x19, x11, x9
# A[0] * B[1]
mul x3, x10, x7
adds x15, x15, x3
umulh x4, x10, x7
adcs x16, x16, x4
# A[2] * B[1]
mul x3, x12, x7
adcs x17, x17, x3
umulh x4, x12, x7
adcs x19, x19, x4
adc x20, x20, xzr
# A[1] * B[2]
mul x3, x11, x8
adds x17, x17, x3
umulh x4, x11, x8
adcs x19, x19, x4
adcs x20, x20, xzr
adc x21, xzr, xzr
# A[0] * B[2]
mul x3, x10, x8
adds x16, x16, x3
umulh x4, x10, x8
adcs x17, x17, x4
adcs x19, x19, xzr
adcs x20, x20, xzr
adc x21, x21, xzr
# A[1] * B[1]
mul x3, x11, x7
adds x16, x16, x3
umulh x4, x11, x7
adcs x17, x17, x4
# A[3] * B[1]
mul x3, x13, x7
adcs x19, x19, x3
umulh x4, x13, x7
adcs x20, x20, x4
adc x21, x21, xzr
# A[2] * B[2]
mul x3, x12, x8
adds x19, x19, x3
umulh x4, x12, x8
adcs x20, x20, x4
# A[3] * B[3]
mul x3, x13, x9
adcs x21, x21, x3
umulh x22, x13, x9
adc x22, x22, xzr
# A[0] * B[3]
mul x3, x10, x9
adds x17, x17, x3
umulh x4, x10, x9
adcs x19, x19, x4
# A[2] * B[3]
mul x3, x12, x9
adcs x20, x20, x3
umulh x4, x12, x9
adcs x21, x21, x4
adc x22, x22, xzr
# A[3] * B[0]
mul x3, x13, x6
adds x17, x17, x3
umulh x4, x13, x6
adcs x19, x19, x4
# A[3] * B[2]
mul x3, x13, x8
adcs x20, x20, x3
umulh x4, x13, x8
adcs x21, x21, x4
adc x22, x22, xzr
# Reduce
mov x3, #38
mul x4, x3, x22
adds x17, x17, x4
umulh x5, x3, x22
adc x5, x5, xzr
mov x3, #19
extr x5, x5, x17, #63
mul x5, x5, x3
and x17, x17, #0x7fffffffffffffff
mov x3, #38
mul x4, x3, x19
adds x14, x14, x4
umulh x19, x3, x19
mul x4, x3, x20
adcs x15, x15, x4
umulh x20, x3, x20
mul x4, x3, x21
adcs x16, x16, x4
umulh x21, x3, x21
adc x17, x17, xzr
# Add high product results in
adds x14, x14, x5
adcs x15, x15, x19
adcs x16, x16, x20
adc x17, x17, x21
# Store
stp x14, x15, [x0]
stp x16, x17, [x0, #16]
ldr x17, [x29, #40]
ldr x19, [x29, #48]
ldp x20, x21, [x29, #56]
ldp x22, x23, [x29, #72]
ldp x24, x25, [x29, #88]
ldr x26, [x29, #104]
ldp x29, x30, [sp], #0x70
ret
#ifndef __APPLE__
.size ge_p1p1_to_p3,.-ge_p1p1_to_p3
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl ge_p2_dbl
.type ge_p2_dbl,@function
.align 2
ge_p2_dbl:
#else
.section __TEXT,__text
.globl _ge_p2_dbl
.p2align 2
_ge_p2_dbl:
#endif /* __APPLE__ */
stp x29, x30, [sp, #-128]!
add x29, sp, #0
str x17, [x29, #40]
str x19, [x29, #48]
stp x20, x21, [x29, #56]
stp x22, x23, [x29, #72]
stp x24, x25, [x29, #88]
stp x26, x27, [x29, #104]
str x28, [x29, #120]
str x0, [x29, #16]
str x1, [x29, #24]
add x0, x0, #0x40
# Square
ldp x4, x5, [x1]
ldp x6, x7, [x1, #16]
# A[0] * A[1]
umulh x10, x4, x5
mul x9, x4, x5
# A[0] * A[3]
umulh x12, x4, x7
mul x11, x4, x7
# A[0] * A[2]
mul x25, x4, x6
adds x10, x10, x25
umulh x26, x4, x6
adcs x11, x11, x26
# A[1] * A[3]
mul x25, x5, x7
adcs x12, x12, x25
umulh x13, x5, x7
adc x13, x13, xzr
# A[1] * A[2]
mul x25, x5, x6
adds x11, x11, x25
umulh x26, x5, x6
adcs x12, x12, x26
# A[2] * A[3]
mul x25, x6, x7
adcs x13, x13, x25
umulh x14, x6, x7
adc x14, x14, xzr
# Double
adds x9, x9, x9
adcs x10, x10, x10
adcs x11, x11, x11
adcs x12, x12, x12
adcs x13, x13, x13
adcs x14, x14, x14
adc x15, xzr, xzr
# A[0] * A[0]
umulh x26, x4, x4
mul x8, x4, x4
# A[1] * A[1]
mul x25, x5, x5
adds x9, x9, x26
umulh x26, x5, x5
adcs x10, x10, x25
# A[2] * A[2]
mul x25, x6, x6
adcs x11, x11, x26
umulh x26, x6, x6
adcs x12, x12, x25
# A[3] * A[3]
mul x25, x7, x7
adcs x13, x13, x26
umulh x26, x7, x7
adcs x14, x14, x25
adc x15, x15, x26
# Reduce
mov x25, #38
mul x26, x25, x15
adds x11, x11, x26
umulh x27, x25, x15
adc x27, x27, xzr
mov x25, #19
extr x27, x27, x11, #63
mul x27, x27, x25
and x11, x11, #0x7fffffffffffffff
mov x25, #38
mul x26, x25, x12
adds x8, x8, x26
umulh x12, x25, x12
mul x26, x25, x13
adcs x9, x9, x26
umulh x13, x25, x13
mul x26, x25, x14
adcs x10, x10, x26
umulh x14, x25, x14
adc x11, x11, xzr
# Add high product results in
adds x8, x8, x27
adcs x9, x9, x12
adcs x10, x10, x13
adc x11, x11, x14
# Store
stp x8, x9, [x0]
stp x10, x11, [x0, #16]
add x2, x1, #32
sub x0, x0, #32
# Square
ldp x16, x17, [x2]
ldp x19, x20, [x2, #16]
# A[0] * A[1]
umulh x23, x16, x17
mul x22, x16, x17
# A[0] * A[3]
umulh x4, x16, x20
mul x24, x16, x20
# A[0] * A[2]
mul x25, x16, x19
adds x23, x23, x25
umulh x26, x16, x19
adcs x24, x24, x26
# A[1] * A[3]
mul x25, x17, x20
adcs x4, x4, x25
umulh x5, x17, x20
adc x5, x5, xzr
# A[1] * A[2]
mul x25, x17, x19
adds x24, x24, x25
umulh x26, x17, x19
adcs x4, x4, x26
# A[2] * A[3]
mul x25, x19, x20
adcs x5, x5, x25
umulh x6, x19, x20
adc x6, x6, xzr
# Double
adds x22, x22, x22
adcs x23, x23, x23
adcs x24, x24, x24
adcs x4, x4, x4
adcs x5, x5, x5
adcs x6, x6, x6
adc x7, xzr, xzr
# A[0] * A[0]
umulh x26, x16, x16
mul x21, x16, x16
# A[1] * A[1]
mul x25, x17, x17
adds x22, x22, x26
umulh x26, x17, x17
adcs x23, x23, x25
# A[2] * A[2]
mul x25, x19, x19
adcs x24, x24, x26
umulh x26, x19, x19
adcs x4, x4, x25
# A[3] * A[3]
mul x25, x20, x20
adcs x5, x5, x26
umulh x26, x20, x20
adcs x6, x6, x25
adc x7, x7, x26
# Reduce
mov x25, #38
mul x26, x25, x7
adds x24, x24, x26
umulh x27, x25, x7
adc x27, x27, xzr
mov x25, #19
extr x27, x27, x24, #63
mul x27, x27, x25
and x24, x24, #0x7fffffffffffffff
mov x25, #38
mul x26, x25, x4
adds x21, x21, x26
umulh x4, x25, x4
mul x26, x25, x5
adcs x22, x22, x26
umulh x5, x25, x5
mul x26, x25, x6
adcs x23, x23, x26
umulh x6, x25, x6
adc x24, x24, xzr
# Add high product results in
adds x21, x21, x27
adcs x22, x22, x4
adcs x23, x23, x5
adc x24, x24, x6
add x3, x0, #32
mov x2, x0
add x1, x0, #32
# Add
adds x4, x21, x8
adcs x5, x22, x9
adcs x6, x23, x10
adcs x7, x24, x11
cset x28, cs
mov x25, #19
extr x28, x28, x7, #63
mul x25, x28, x25
# Sub modulus (if overflow)
adds x4, x4, x25
adcs x5, x5, xzr
and x7, x7, #0x7fffffffffffffff
adcs x6, x6, xzr
adc x7, x7, xzr
# Sub
subs x12, x21, x8
sbcs x13, x22, x9
sbcs x14, x23, x10
sbcs x15, x24, x11
csetm x28, cc
mov x25, #-19
extr x28, x28, x15, #63
mul x25, x28, x25
# Add modulus (if underflow)
subs x12, x12, x25
sbcs x13, x13, xzr
and x15, x15, #0x7fffffffffffffff
sbcs x14, x14, xzr
sbc x15, x15, xzr
stp x4, x5, [x0]
stp x6, x7, [x0, #16]
stp x12, x13, [x1]
stp x14, x15, [x1, #16]
ldr x1, [x29, #24]
add x2, x1, #32
sub x0, x0, #32
# Add
ldp x8, x9, [x1]
ldp x10, x11, [x1, #16]
adds x8, x8, x16
adcs x9, x9, x17
adcs x10, x10, x19
adcs x11, x11, x20
cset x28, cs
mov x25, #19
# Mask the modulus
extr x28, x28, x11, #63
mul x25, x28, x25
# Sub modulus (if overflow)
adds x8, x8, x25
adcs x9, x9, xzr
and x11, x11, #0x7fffffffffffffff
adcs x10, x10, xzr
adc x11, x11, xzr
mov x1, x0
# Square
# A[0] * A[1]
umulh x23, x8, x9
mul x22, x8, x9
# A[0] * A[3]
umulh x4, x8, x11
mul x24, x8, x11
# A[0] * A[2]
mul x25, x8, x10
adds x23, x23, x25
umulh x26, x8, x10
adcs x24, x24, x26
# A[1] * A[3]
mul x25, x9, x11
adcs x4, x4, x25
umulh x5, x9, x11
adc x5, x5, xzr
# A[1] * A[2]
mul x25, x9, x10
adds x24, x24, x25
umulh x26, x9, x10
adcs x4, x4, x26
# A[2] * A[3]
mul x25, x10, x11
adcs x5, x5, x25
umulh x6, x10, x11
adc x6, x6, xzr
# Double
adds x22, x22, x22
adcs x23, x23, x23
adcs x24, x24, x24
adcs x4, x4, x4
adcs x5, x5, x5
adcs x6, x6, x6
adc x7, xzr, xzr
# A[0] * A[0]
umulh x26, x8, x8
mul x21, x8, x8
# A[1] * A[1]
mul x25, x9, x9
adds x22, x22, x26
umulh x26, x9, x9
adcs x23, x23, x25
# A[2] * A[2]
mul x25, x10, x10
adcs x24, x24, x26
umulh x26, x10, x10
adcs x4, x4, x25
# A[3] * A[3]
mul x25, x11, x11
adcs x5, x5, x26
umulh x26, x11, x11
adcs x6, x6, x25
adc x7, x7, x26
# Reduce
mov x25, #38
mul x26, x25, x7
adds x24, x24, x26
umulh x27, x25, x7
adc x27, x27, xzr
mov x25, #19
extr x27, x27, x24, #63
mul x27, x27, x25
and x24, x24, #0x7fffffffffffffff
mov x25, #38
mul x26, x25, x4
adds x21, x21, x26
umulh x4, x25, x4
mul x26, x25, x5
adcs x22, x22, x26
umulh x5, x25, x5
mul x26, x25, x6
adcs x23, x23, x26
umulh x6, x25, x6
adc x24, x24, xzr
# Add high product results in
adds x21, x21, x27
adcs x22, x22, x4
adcs x23, x23, x5
adc x24, x24, x6
add x2, x0, #32
# Sub
ldp x8, x9, [x2]
ldp x10, x11, [x2, #16]
subs x21, x21, x8
sbcs x22, x22, x9
sbcs x23, x23, x10
sbcs x24, x24, x11
csetm x28, cc
mov x25, #-19
# Mask the modulus
extr x28, x28, x24, #63
mul x25, x28, x25
# Add modulus (if underflow)
subs x21, x21, x25
sbcs x22, x22, xzr
and x24, x24, #0x7fffffffffffffff
sbcs x23, x23, xzr
sbc x24, x24, xzr
stp x21, x22, [x0]
stp x23, x24, [x0, #16]
ldr x2, [x29, #24]
add x2, x2, #0x40
add x0, x0, #0x60
# Square * 2
ldp x16, x17, [x2]
ldp x19, x20, [x2, #16]
# A[0] * A[1]
umulh x6, x16, x17
mul x5, x16, x17
# A[0] * A[3]
umulh x8, x16, x20
mul x7, x16, x20
# A[0] * A[2]
mul x25, x16, x19
adds x6, x6, x25
umulh x26, x16, x19
adcs x7, x7, x26
# A[1] * A[3]
mul x25, x17, x20
adcs x8, x8, x25
umulh x9, x17, x20
adc x9, x9, xzr
# A[1] * A[2]
mul x25, x17, x19
adds x7, x7, x25
umulh x26, x17, x19
adcs x8, x8, x26
# A[2] * A[3]
mul x25, x19, x20
adcs x9, x9, x25
umulh x10, x19, x20
adc x10, x10, xzr
# Double
adds x5, x5, x5
adcs x6, x6, x6
adcs x7, x7, x7
adcs x8, x8, x8
adcs x9, x9, x9
adcs x10, x10, x10
adc x11, xzr, xzr
# A[0] * A[0]
umulh x26, x16, x16
mul x4, x16, x16
# A[1] * A[1]
mul x25, x17, x17
adds x5, x5, x26
umulh x26, x17, x17
adcs x6, x6, x25
# A[2] * A[2]
mul x25, x19, x19
adcs x7, x7, x26
umulh x26, x19, x19
adcs x8, x8, x25
# A[3] * A[3]
mul x25, x20, x20
adcs x9, x9, x26
umulh x26, x20, x20
adcs x10, x10, x25
adc x11, x11, x26
# Reduce
mov x25, #38
mul x26, x25, x11
adds x7, x7, x26
umulh x27, x25, x11
adc x27, x27, xzr
mov x25, #19
extr x27, x27, x7, #63
mul x27, x27, x25
and x7, x7, #0x7fffffffffffffff
mov x25, #38
mul x26, x25, x8
adds x4, x4, x26
umulh x8, x25, x8
mul x26, x25, x9
adcs x5, x5, x26
umulh x9, x25, x9
mul x26, x25, x10
adcs x6, x6, x26
umulh x10, x25, x10
adc x7, x7, xzr
# Add high product results in
adds x4, x4, x27
adcs x5, x5, x8
adcs x6, x6, x9
adc x7, x7, x10
mov x25, #19
lsr x26, x7, #62
extr x7, x7, x6, #63
extr x6, x6, x5, #63
extr x5, x5, x4, #63
lsl x4, x4, #1
mul x26, x26, x25
adds x4, x4, x26
adcs x5, x5, xzr
and x7, x7, #0x7fffffffffffffff
adcs x6, x6, xzr
adc x7, x7, xzr
# Store
sub x1, x0, #32
# Sub
subs x4, x4, x12
sbcs x5, x5, x13
sbcs x6, x6, x14
sbcs x7, x7, x15
csetm x28, cc
mov x25, #-19
# Mask the modulus
extr x28, x28, x7, #63
mul x25, x28, x25
# Add modulus (if underflow)
subs x4, x4, x25
sbcs x5, x5, xzr
and x7, x7, #0x7fffffffffffffff
sbcs x6, x6, xzr
sbc x7, x7, xzr
stp x4, x5, [x0]
stp x6, x7, [x0, #16]
ldr x17, [x29, #40]
ldr x19, [x29, #48]
ldp x20, x21, [x29, #56]
ldp x22, x23, [x29, #72]
ldp x24, x25, [x29, #88]
ldp x26, x27, [x29, #104]
ldr x28, [x29, #120]
ldp x29, x30, [sp], #0x80
ret
#ifndef __APPLE__
.size ge_p2_dbl,.-ge_p2_dbl
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl ge_madd
.type ge_madd,@function
.align 2
ge_madd:
#else
.section __TEXT,__text
.globl _ge_madd
.p2align 2
_ge_madd:
#endif /* __APPLE__ */
stp x29, x30, [sp, #-144]!
add x29, sp, #0
str x17, [x29, #56]
str x19, [x29, #64]
stp x20, x21, [x29, #72]
stp x22, x23, [x29, #88]
stp x24, x25, [x29, #104]
stp x26, x27, [x29, #120]
str x28, [x29, #136]
str x0, [x29, #16]
str x1, [x29, #24]
str x2, [x29, #32]
mov x3, x1
add x2, x1, #32
add x1, x0, #32
# Add
ldp x8, x9, [x2]
ldp x10, x11, [x2, #16]
ldp x4, x5, [x3]
ldp x6, x7, [x3, #16]
adds x16, x8, x4
adcs x17, x9, x5
adcs x19, x10, x6
adcs x20, x11, x7
cset x28, cs
mov x25, #19
extr x28, x28, x20, #63
mul x25, x28, x25
# Sub modulus (if overflow)
adds x16, x16, x25
adcs x17, x17, xzr
and x20, x20, #0x7fffffffffffffff
adcs x19, x19, xzr
adc x20, x20, xzr
# Sub
subs x12, x8, x4
sbcs x13, x9, x5
sbcs x14, x10, x6
sbcs x15, x11, x7
csetm x28, cc
mov x25, #-19
extr x28, x28, x15, #63
mul x25, x28, x25
# Add modulus (if underflow)
subs x12, x12, x25
sbcs x13, x13, xzr
and x15, x15, #0x7fffffffffffffff
sbcs x14, x14, xzr
sbc x15, x15, xzr
ldr x2, [x29, #32]
mov x1, x0
# Multiply
ldp x8, x9, [x2]
ldp x10, x11, [x2, #16]
# A[0] * B[0]
umulh x22, x16, x8
mul x21, x16, x8
# A[2] * B[0]
umulh x24, x19, x8
mul x23, x19, x8
# A[1] * B[0]
mul x25, x17, x8
adds x22, x22, x25
umulh x26, x17, x8
adcs x23, x23, x26
# A[1] * B[3]
umulh x5, x17, x11
adc x24, x24, xzr
mul x4, x17, x11
# A[0] * B[1]
mul x25, x16, x9
adds x22, x22, x25
umulh x26, x16, x9
adcs x23, x23, x26
# A[2] * B[1]
mul x25, x19, x9
adcs x24, x24, x25
umulh x26, x19, x9
adcs x4, x4, x26
adc x5, x5, xzr
# A[1] * B[2]
mul x25, x17, x10
adds x24, x24, x25
umulh x26, x17, x10
adcs x4, x4, x26
adcs x5, x5, xzr
adc x6, xzr, xzr
# A[0] * B[2]
mul x25, x16, x10
adds x23, x23, x25
umulh x26, x16, x10
adcs x24, x24, x26
adcs x4, x4, xzr
adcs x5, x5, xzr
adc x6, x6, xzr
# A[1] * B[1]
mul x25, x17, x9
adds x23, x23, x25
umulh x26, x17, x9
adcs x24, x24, x26
# A[3] * B[1]
mul x25, x20, x9
adcs x4, x4, x25
umulh x26, x20, x9
adcs x5, x5, x26
adc x6, x6, xzr
# A[2] * B[2]
mul x25, x19, x10
adds x4, x4, x25
umulh x26, x19, x10
adcs x5, x5, x26
# A[3] * B[3]
mul x25, x20, x11
adcs x6, x6, x25
umulh x7, x20, x11
adc x7, x7, xzr
# A[0] * B[3]
mul x25, x16, x11
adds x24, x24, x25
umulh x26, x16, x11
adcs x4, x4, x26
# A[2] * B[3]
mul x25, x19, x11
adcs x5, x5, x25
umulh x26, x19, x11
adcs x6, x6, x26
adc x7, x7, xzr
# A[3] * B[0]
mul x25, x20, x8
adds x24, x24, x25
umulh x26, x20, x8
adcs x4, x4, x26
# A[3] * B[2]
mul x25, x20, x10
adcs x5, x5, x25
umulh x26, x20, x10
adcs x6, x6, x26
adc x7, x7, xzr
# Reduce
mov x25, #38
mul x26, x25, x7
adds x24, x24, x26
umulh x27, x25, x7
adc x27, x27, xzr
mov x25, #19
extr x27, x27, x24, #63
mul x27, x27, x25
and x24, x24, #0x7fffffffffffffff
mov x25, #38
mul x26, x25, x4
adds x21, x21, x26
umulh x4, x25, x4
mul x26, x25, x5
adcs x22, x22, x26
umulh x5, x25, x5
mul x26, x25, x6
adcs x23, x23, x26
umulh x6, x25, x6
adc x24, x24, xzr
# Add high product results in
adds x21, x21, x27
adcs x22, x22, x4
adcs x23, x23, x5
adc x24, x24, x6
add x2, x2, #32
add x1, x0, #32
add x0, x0, #32
# Multiply
ldp x16, x17, [x2]
ldp x19, x20, [x2, #16]
# A[0] * B[0]
umulh x5, x12, x16
mul x4, x12, x16
# A[2] * B[0]
umulh x7, x14, x16
mul x6, x14, x16
# A[1] * B[0]
mul x25, x13, x16
adds x5, x5, x25
umulh x26, x13, x16
adcs x6, x6, x26
# A[1] * B[3]
umulh x9, x13, x20
adc x7, x7, xzr
mul x8, x13, x20
# A[0] * B[1]
mul x25, x12, x17
adds x5, x5, x25
umulh x26, x12, x17
adcs x6, x6, x26
# A[2] * B[1]
mul x25, x14, x17
adcs x7, x7, x25
umulh x26, x14, x17
adcs x8, x8, x26
adc x9, x9, xzr
# A[1] * B[2]
mul x25, x13, x19
adds x7, x7, x25
umulh x26, x13, x19
adcs x8, x8, x26
adcs x9, x9, xzr
adc x10, xzr, xzr
# A[0] * B[2]
mul x25, x12, x19
adds x6, x6, x25
umulh x26, x12, x19
adcs x7, x7, x26
adcs x8, x8, xzr
adcs x9, x9, xzr
adc x10, x10, xzr
# A[1] * B[1]
mul x25, x13, x17
adds x6, x6, x25
umulh x26, x13, x17
adcs x7, x7, x26
# A[3] * B[1]
mul x25, x15, x17
adcs x8, x8, x25
umulh x26, x15, x17
adcs x9, x9, x26
adc x10, x10, xzr
# A[2] * B[2]
mul x25, x14, x19
adds x8, x8, x25
umulh x26, x14, x19
adcs x9, x9, x26
# A[3] * B[3]
mul x25, x15, x20
adcs x10, x10, x25
umulh x11, x15, x20
adc x11, x11, xzr
# A[0] * B[3]
mul x25, x12, x20
adds x7, x7, x25
umulh x26, x12, x20
adcs x8, x8, x26
# A[2] * B[3]
mul x25, x14, x20
adcs x9, x9, x25
umulh x26, x14, x20
adcs x10, x10, x26
adc x11, x11, xzr
# A[3] * B[0]
mul x25, x15, x16
adds x7, x7, x25
umulh x26, x15, x16
adcs x8, x8, x26
# A[3] * B[2]
mul x25, x15, x19
adcs x9, x9, x25
umulh x26, x15, x19
adcs x10, x10, x26
adc x11, x11, xzr
# Reduce
mov x25, #38
mul x26, x25, x11
adds x7, x7, x26
umulh x27, x25, x11
adc x27, x27, xzr
mov x25, #19
extr x27, x27, x7, #63
mul x27, x27, x25
and x7, x7, #0x7fffffffffffffff
mov x25, #38
mul x26, x25, x8
adds x4, x4, x26
umulh x8, x25, x8
mul x26, x25, x9
adcs x5, x5, x26
umulh x9, x25, x9
mul x26, x25, x10
adcs x6, x6, x26
umulh x10, x25, x10
adc x7, x7, xzr
# Add high product results in
adds x4, x4, x27
adcs x5, x5, x8
adcs x6, x6, x9
adc x7, x7, x10
mov x3, x0
sub x2, x0, #32
sub x1, x0, #32
# Add
adds x8, x21, x4
adcs x9, x22, x5
adcs x10, x23, x6
adcs x11, x24, x7
cset x28, cs
mov x25, #19
extr x28, x28, x11, #63
mul x25, x28, x25
# Sub modulus (if overflow)
adds x8, x8, x25
adcs x9, x9, xzr
and x11, x11, #0x7fffffffffffffff
adcs x10, x10, xzr
adc x11, x11, xzr
# Sub
subs x12, x21, x4
sbcs x13, x22, x5
sbcs x14, x23, x6
sbcs x15, x24, x7
csetm x28, cc
mov x25, #-19
extr x28, x28, x15, #63
mul x25, x28, x25
# Add modulus (if underflow)
subs x12, x12, x25
sbcs x13, x13, xzr
and x15, x15, #0x7fffffffffffffff
sbcs x14, x14, xzr
sbc x15, x15, xzr
stp x8, x9, [x0]
stp x10, x11, [x0, #16]
stp x12, x13, [x1]
stp x14, x15, [x1, #16]
ldr x1, [x29, #24]
ldr x2, [x29, #32]
add x2, x2, #0x40
add x1, x1, #0x60
add x0, x0, #0x40
# Multiply
ldp x21, x22, [x1]
ldp x23, x24, [x1, #16]
ldp x4, x5, [x2]
ldp x6, x7, [x2, #16]
# A[0] * B[0]
umulh x17, x21, x4
mul x16, x21, x4
# A[2] * B[0]
umulh x20, x23, x4
mul x19, x23, x4
# A[1] * B[0]
mul x25, x22, x4
adds x17, x17, x25
umulh x26, x22, x4
adcs x19, x19, x26
# A[1] * B[3]
umulh x9, x22, x7
adc x20, x20, xzr
mul x8, x22, x7
# A[0] * B[1]
mul x25, x21, x5
adds x17, x17, x25
umulh x26, x21, x5
adcs x19, x19, x26
# A[2] * B[1]
mul x25, x23, x5
adcs x20, x20, x25
umulh x26, x23, x5
adcs x8, x8, x26
adc x9, x9, xzr
# A[1] * B[2]
mul x25, x22, x6
adds x20, x20, x25
umulh x26, x22, x6
adcs x8, x8, x26
adcs x9, x9, xzr
adc x10, xzr, xzr
# A[0] * B[2]
mul x25, x21, x6
adds x19, x19, x25
umulh x26, x21, x6
adcs x20, x20, x26
adcs x8, x8, xzr
adcs x9, x9, xzr
adc x10, x10, xzr
# A[1] * B[1]
mul x25, x22, x5
adds x19, x19, x25
umulh x26, x22, x5
adcs x20, x20, x26
# A[3] * B[1]
mul x25, x24, x5
adcs x8, x8, x25
umulh x26, x24, x5
adcs x9, x9, x26
adc x10, x10, xzr
# A[2] * B[2]
mul x25, x23, x6
adds x8, x8, x25
umulh x26, x23, x6
adcs x9, x9, x26
# A[3] * B[3]
mul x25, x24, x7
adcs x10, x10, x25
umulh x11, x24, x7
adc x11, x11, xzr
# A[0] * B[3]
mul x25, x21, x7
adds x20, x20, x25
umulh x26, x21, x7
adcs x8, x8, x26
# A[2] * B[3]
mul x25, x23, x7
adcs x9, x9, x25
umulh x26, x23, x7
adcs x10, x10, x26
adc x11, x11, xzr
# A[3] * B[0]
mul x25, x24, x4
adds x20, x20, x25
umulh x26, x24, x4
adcs x8, x8, x26
# A[3] * B[2]
mul x25, x24, x6
adcs x9, x9, x25
umulh x26, x24, x6
adcs x10, x10, x26
adc x11, x11, xzr
# Reduce
mov x25, #38
mul x26, x25, x11
adds x20, x20, x26
umulh x27, x25, x11
adc x27, x27, xzr
mov x25, #19
extr x27, x27, x20, #63
mul x27, x27, x25
and x20, x20, #0x7fffffffffffffff
mov x25, #38
mul x26, x25, x8
adds x16, x16, x26
umulh x8, x25, x8
mul x26, x25, x9
adcs x17, x17, x26
umulh x9, x25, x9
mul x26, x25, x10
adcs x19, x19, x26
umulh x10, x25, x10
adc x20, x20, xzr
# Add high product results in
adds x16, x16, x27
adcs x17, x17, x8
adcs x19, x19, x9
adc x20, x20, x10
sub x1, x1, #32
# Double
ldp x12, x13, [x1]
ldp x14, x15, [x1, #16]
adds x12, x12, x12
adcs x13, x13, x13
adcs x14, x14, x14
adc x15, x15, x15
mov x25, #-19
asr x28, x15, #63
# Mask the modulus
and x25, x28, x25
and x26, x28, #0x7fffffffffffffff
# Sub modulus (if overflow)
subs x12, x12, x25
sbcs x13, x13, x28
sbcs x14, x14, x28
sbc x15, x15, x26
mov x3, x0
sub x2, x0, #32
mov x1, x0
sub x0, x0, #32
# Add
adds x8, x12, x16
adcs x9, x13, x17
adcs x10, x14, x19
adcs x11, x15, x20
cset x28, cs
mov x25, #19
extr x28, x28, x11, #63
mul x25, x28, x25
# Sub modulus (if overflow)
adds x8, x8, x25
adcs x9, x9, xzr
and x11, x11, #0x7fffffffffffffff
adcs x10, x10, xzr
adc x11, x11, xzr
# Sub
subs x4, x12, x16
sbcs x5, x13, x17
sbcs x6, x14, x19
sbcs x7, x15, x20
csetm x28, cc
mov x25, #-19
extr x28, x28, x7, #63
mul x25, x28, x25
# Add modulus (if underflow)
subs x4, x4, x25
sbcs x5, x5, xzr
and x7, x7, #0x7fffffffffffffff
sbcs x6, x6, xzr
sbc x7, x7, xzr
stp x8, x9, [x0]
stp x10, x11, [x0, #16]
stp x4, x5, [x1]
stp x6, x7, [x1, #16]
ldr x17, [x29, #56]
ldr x19, [x29, #64]
ldp x20, x21, [x29, #72]
ldp x22, x23, [x29, #88]
ldp x24, x25, [x29, #104]
ldp x26, x27, [x29, #120]
ldr x28, [x29, #136]
ldp x29, x30, [sp], #0x90
ret
#ifndef __APPLE__
.size ge_madd,.-ge_madd
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl ge_msub
.type ge_msub,@function
.align 2
ge_msub:
#else
.section __TEXT,__text
.globl _ge_msub
.p2align 2
_ge_msub:
#endif /* __APPLE__ */
stp x29, x30, [sp, #-144]!
add x29, sp, #0
str x17, [x29, #56]
str x19, [x29, #64]
stp x20, x21, [x29, #72]
stp x22, x23, [x29, #88]
stp x24, x25, [x29, #104]
stp x26, x27, [x29, #120]
str x28, [x29, #136]
str x0, [x29, #16]
str x1, [x29, #24]
str x2, [x29, #32]
mov x3, x1
add x2, x1, #32
add x1, x0, #32
# Add
ldp x8, x9, [x2]
ldp x10, x11, [x2, #16]
ldp x4, x5, [x3]
ldp x6, x7, [x3, #16]
adds x16, x8, x4
adcs x17, x9, x5
adcs x19, x10, x6
adcs x20, x11, x7
cset x28, cs
mov x25, #19
extr x28, x28, x20, #63
mul x25, x28, x25
# Sub modulus (if overflow)
adds x16, x16, x25
adcs x17, x17, xzr
and x20, x20, #0x7fffffffffffffff
adcs x19, x19, xzr
adc x20, x20, xzr
# Sub
subs x12, x8, x4
sbcs x13, x9, x5
sbcs x14, x10, x6
sbcs x15, x11, x7
csetm x28, cc
mov x25, #-19
extr x28, x28, x15, #63
mul x25, x28, x25
# Add modulus (if underflow)
subs x12, x12, x25
sbcs x13, x13, xzr
and x15, x15, #0x7fffffffffffffff
sbcs x14, x14, xzr
sbc x15, x15, xzr
ldr x2, [x29, #32]
add x2, x2, #32
mov x1, x0
# Multiply
ldp x8, x9, [x2]
ldp x10, x11, [x2, #16]
# A[0] * B[0]
umulh x22, x16, x8
mul x21, x16, x8
# A[2] * B[0]
umulh x24, x19, x8
mul x23, x19, x8
# A[1] * B[0]
mul x25, x17, x8
adds x22, x22, x25
umulh x26, x17, x8
adcs x23, x23, x26
# A[1] * B[3]
umulh x5, x17, x11
adc x24, x24, xzr
mul x4, x17, x11
# A[0] * B[1]
mul x25, x16, x9
adds x22, x22, x25
umulh x26, x16, x9
adcs x23, x23, x26
# A[2] * B[1]
mul x25, x19, x9
adcs x24, x24, x25
umulh x26, x19, x9
adcs x4, x4, x26
adc x5, x5, xzr
# A[1] * B[2]
mul x25, x17, x10
adds x24, x24, x25
umulh x26, x17, x10
adcs x4, x4, x26
adcs x5, x5, xzr
adc x6, xzr, xzr
# A[0] * B[2]
mul x25, x16, x10
adds x23, x23, x25
umulh x26, x16, x10
adcs x24, x24, x26
adcs x4, x4, xzr
adcs x5, x5, xzr
adc x6, x6, xzr
# A[1] * B[1]
mul x25, x17, x9
adds x23, x23, x25
umulh x26, x17, x9
adcs x24, x24, x26
# A[3] * B[1]
mul x25, x20, x9
adcs x4, x4, x25
umulh x26, x20, x9
adcs x5, x5, x26
adc x6, x6, xzr
# A[2] * B[2]
mul x25, x19, x10
adds x4, x4, x25
umulh x26, x19, x10
adcs x5, x5, x26
# A[3] * B[3]
mul x25, x20, x11
adcs x6, x6, x25
umulh x7, x20, x11
adc x7, x7, xzr
# A[0] * B[3]
mul x25, x16, x11
adds x24, x24, x25
umulh x26, x16, x11
adcs x4, x4, x26
# A[2] * B[3]
mul x25, x19, x11
adcs x5, x5, x25
umulh x26, x19, x11
adcs x6, x6, x26
adc x7, x7, xzr
# A[3] * B[0]
mul x25, x20, x8
adds x24, x24, x25
umulh x26, x20, x8
adcs x4, x4, x26
# A[3] * B[2]
mul x25, x20, x10
adcs x5, x5, x25
umulh x26, x20, x10
adcs x6, x6, x26
adc x7, x7, xzr
# Reduce
mov x25, #38
mul x26, x25, x7
adds x24, x24, x26
umulh x27, x25, x7
adc x27, x27, xzr
mov x25, #19
extr x27, x27, x24, #63
mul x27, x27, x25
and x24, x24, #0x7fffffffffffffff
mov x25, #38
mul x26, x25, x4
adds x21, x21, x26
umulh x4, x25, x4
mul x26, x25, x5
adcs x22, x22, x26
umulh x5, x25, x5
mul x26, x25, x6
adcs x23, x23, x26
umulh x6, x25, x6
adc x24, x24, xzr
# Add high product results in
adds x21, x21, x27
adcs x22, x22, x4
adcs x23, x23, x5
adc x24, x24, x6
sub x2, x2, #32
add x1, x0, #32
add x0, x0, #32
# Multiply
ldp x16, x17, [x2]
ldp x19, x20, [x2, #16]
# A[0] * B[0]
umulh x5, x12, x16
mul x4, x12, x16
# A[2] * B[0]
umulh x7, x14, x16
mul x6, x14, x16
# A[1] * B[0]
mul x25, x13, x16
adds x5, x5, x25
umulh x26, x13, x16
adcs x6, x6, x26
# A[1] * B[3]
umulh x9, x13, x20
adc x7, x7, xzr
mul x8, x13, x20
# A[0] * B[1]
mul x25, x12, x17
adds x5, x5, x25
umulh x26, x12, x17
adcs x6, x6, x26
# A[2] * B[1]
mul x25, x14, x17
adcs x7, x7, x25
umulh x26, x14, x17
adcs x8, x8, x26
adc x9, x9, xzr
# A[1] * B[2]
mul x25, x13, x19
adds x7, x7, x25
umulh x26, x13, x19
adcs x8, x8, x26
adcs x9, x9, xzr
adc x10, xzr, xzr
# A[0] * B[2]
mul x25, x12, x19
adds x6, x6, x25
umulh x26, x12, x19
adcs x7, x7, x26
adcs x8, x8, xzr
adcs x9, x9, xzr
adc x10, x10, xzr
# A[1] * B[1]
mul x25, x13, x17
adds x6, x6, x25
umulh x26, x13, x17
adcs x7, x7, x26
# A[3] * B[1]
mul x25, x15, x17
adcs x8, x8, x25
umulh x26, x15, x17
adcs x9, x9, x26
adc x10, x10, xzr
# A[2] * B[2]
mul x25, x14, x19
adds x8, x8, x25
umulh x26, x14, x19
adcs x9, x9, x26
# A[3] * B[3]
mul x25, x15, x20
adcs x10, x10, x25
umulh x11, x15, x20
adc x11, x11, xzr
# A[0] * B[3]
mul x25, x12, x20
adds x7, x7, x25
umulh x26, x12, x20
adcs x8, x8, x26
# A[2] * B[3]
mul x25, x14, x20
adcs x9, x9, x25
umulh x26, x14, x20
adcs x10, x10, x26
adc x11, x11, xzr
# A[3] * B[0]
mul x25, x15, x16
adds x7, x7, x25
umulh x26, x15, x16
adcs x8, x8, x26
# A[3] * B[2]
mul x25, x15, x19
adcs x9, x9, x25
umulh x26, x15, x19
adcs x10, x10, x26
adc x11, x11, xzr
# Reduce
mov x25, #38
mul x26, x25, x11
adds x7, x7, x26
umulh x27, x25, x11
adc x27, x27, xzr
mov x25, #19
extr x27, x27, x7, #63
mul x27, x27, x25
and x7, x7, #0x7fffffffffffffff
mov x25, #38
mul x26, x25, x8
adds x4, x4, x26
umulh x8, x25, x8
mul x26, x25, x9
adcs x5, x5, x26
umulh x9, x25, x9
mul x26, x25, x10
adcs x6, x6, x26
umulh x10, x25, x10
adc x7, x7, xzr
# Add high product results in
adds x4, x4, x27
adcs x5, x5, x8
adcs x6, x6, x9
adc x7, x7, x10
mov x3, x0
sub x2, x0, #32
sub x1, x0, #32
# Add
adds x8, x21, x4
adcs x9, x22, x5
adcs x10, x23, x6
adcs x11, x24, x7
cset x28, cs
mov x25, #19
extr x28, x28, x11, #63
mul x25, x28, x25
# Sub modulus (if overflow)
adds x8, x8, x25
adcs x9, x9, xzr
and x11, x11, #0x7fffffffffffffff
adcs x10, x10, xzr
adc x11, x11, xzr
# Sub
subs x12, x21, x4
sbcs x13, x22, x5
sbcs x14, x23, x6
sbcs x15, x24, x7
csetm x28, cc
mov x25, #-19
extr x28, x28, x15, #63
mul x25, x28, x25
# Add modulus (if underflow)
subs x12, x12, x25
sbcs x13, x13, xzr
and x15, x15, #0x7fffffffffffffff
sbcs x14, x14, xzr
sbc x15, x15, xzr
stp x8, x9, [x0]
stp x10, x11, [x0, #16]
stp x12, x13, [x1]
stp x14, x15, [x1, #16]
ldr x1, [x29, #24]
ldr x2, [x29, #32]
add x2, x2, #0x40
add x1, x1, #0x60
add x0, x0, #0x40
# Multiply
ldp x21, x22, [x1]
ldp x23, x24, [x1, #16]
ldp x4, x5, [x2]
ldp x6, x7, [x2, #16]
# A[0] * B[0]
umulh x17, x21, x4
mul x16, x21, x4
# A[2] * B[0]
umulh x20, x23, x4
mul x19, x23, x4
# A[1] * B[0]
mul x25, x22, x4
adds x17, x17, x25
umulh x26, x22, x4
adcs x19, x19, x26
# A[1] * B[3]
umulh x9, x22, x7
adc x20, x20, xzr
mul x8, x22, x7
# A[0] * B[1]
mul x25, x21, x5
adds x17, x17, x25
umulh x26, x21, x5
adcs x19, x19, x26
# A[2] * B[1]
mul x25, x23, x5
adcs x20, x20, x25
umulh x26, x23, x5
adcs x8, x8, x26
adc x9, x9, xzr
# A[1] * B[2]
mul x25, x22, x6
adds x20, x20, x25
umulh x26, x22, x6
adcs x8, x8, x26
adcs x9, x9, xzr
adc x10, xzr, xzr
# A[0] * B[2]
mul x25, x21, x6
adds x19, x19, x25
umulh x26, x21, x6
adcs x20, x20, x26
adcs x8, x8, xzr
adcs x9, x9, xzr
adc x10, x10, xzr
# A[1] * B[1]
mul x25, x22, x5
adds x19, x19, x25
umulh x26, x22, x5
adcs x20, x20, x26
# A[3] * B[1]
mul x25, x24, x5
adcs x8, x8, x25
umulh x26, x24, x5
adcs x9, x9, x26
adc x10, x10, xzr
# A[2] * B[2]
mul x25, x23, x6
adds x8, x8, x25
umulh x26, x23, x6
adcs x9, x9, x26
# A[3] * B[3]
mul x25, x24, x7
adcs x10, x10, x25
umulh x11, x24, x7
adc x11, x11, xzr
# A[0] * B[3]
mul x25, x21, x7
adds x20, x20, x25
umulh x26, x21, x7
adcs x8, x8, x26
# A[2] * B[3]
mul x25, x23, x7
adcs x9, x9, x25
umulh x26, x23, x7
adcs x10, x10, x26
adc x11, x11, xzr
# A[3] * B[0]
mul x25, x24, x4
adds x20, x20, x25
umulh x26, x24, x4
adcs x8, x8, x26
# A[3] * B[2]
mul x25, x24, x6
adcs x9, x9, x25
umulh x26, x24, x6
adcs x10, x10, x26
adc x11, x11, xzr
# Reduce
mov x25, #38
mul x26, x25, x11
adds x20, x20, x26
umulh x27, x25, x11
adc x27, x27, xzr
mov x25, #19
extr x27, x27, x20, #63
mul x27, x27, x25
and x20, x20, #0x7fffffffffffffff
mov x25, #38
mul x26, x25, x8
adds x16, x16, x26
umulh x8, x25, x8
mul x26, x25, x9
adcs x17, x17, x26
umulh x9, x25, x9
mul x26, x25, x10
adcs x19, x19, x26
umulh x10, x25, x10
adc x20, x20, xzr
# Add high product results in
adds x16, x16, x27
adcs x17, x17, x8
adcs x19, x19, x9
adc x20, x20, x10
sub x1, x1, #32
# Double
ldp x12, x13, [x1]
ldp x14, x15, [x1, #16]
adds x12, x12, x12
adcs x13, x13, x13
adcs x14, x14, x14
adc x15, x15, x15
mov x25, #-19
asr x28, x15, #63
# Mask the modulus
and x25, x28, x25
and x26, x28, #0x7fffffffffffffff
# Sub modulus (if overflow)
subs x12, x12, x25
sbcs x13, x13, x28
sbcs x14, x14, x28
sbc x15, x15, x26
mov x3, x0
sub x2, x0, #32
sub x1, x0, #32
# Add
adds x8, x12, x16
adcs x9, x13, x17
adcs x10, x14, x19
adcs x11, x15, x20
cset x28, cs
mov x25, #19
extr x28, x28, x11, #63
mul x25, x28, x25
# Sub modulus (if overflow)
adds x8, x8, x25
adcs x9, x9, xzr
and x11, x11, #0x7fffffffffffffff
adcs x10, x10, xzr
adc x11, x11, xzr
# Sub
subs x4, x12, x16
sbcs x5, x13, x17
sbcs x6, x14, x19
sbcs x7, x15, x20
csetm x28, cc
mov x25, #-19
extr x28, x28, x7, #63
mul x25, x28, x25
# Add modulus (if underflow)
subs x4, x4, x25
sbcs x5, x5, xzr
and x7, x7, #0x7fffffffffffffff
sbcs x6, x6, xzr
sbc x7, x7, xzr
stp x8, x9, [x0]
stp x10, x11, [x0, #16]
stp x4, x5, [x1]
stp x6, x7, [x1, #16]
ldr x17, [x29, #56]
ldr x19, [x29, #64]
ldp x20, x21, [x29, #72]
ldp x22, x23, [x29, #88]
ldp x24, x25, [x29, #104]
ldp x26, x27, [x29, #120]
ldr x28, [x29, #136]
ldp x29, x30, [sp], #0x90
ret
#ifndef __APPLE__
.size ge_msub,.-ge_msub
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl ge_add
.type ge_add,@function
.align 2
ge_add:
#else
.section __TEXT,__text
.globl _ge_add
.p2align 2
_ge_add:
#endif /* __APPLE__ */
stp x29, x30, [sp, #-144]!
add x29, sp, #0
str x17, [x29, #56]
str x19, [x29, #64]
stp x20, x21, [x29, #72]
stp x22, x23, [x29, #88]
stp x24, x25, [x29, #104]
stp x26, x27, [x29, #120]
str x28, [x29, #136]
str x0, [x29, #16]
str x1, [x29, #24]
str x2, [x29, #32]
mov x3, x1
add x2, x1, #32
add x1, x0, #32
# Add
ldp x8, x9, [x2]
ldp x10, x11, [x2, #16]
ldp x4, x5, [x3]
ldp x6, x7, [x3, #16]
adds x16, x8, x4
adcs x17, x9, x5
adcs x19, x10, x6
adcs x20, x11, x7
cset x28, cs
mov x25, #19
extr x28, x28, x20, #63
mul x25, x28, x25
# Sub modulus (if overflow)
adds x16, x16, x25
adcs x17, x17, xzr
and x20, x20, #0x7fffffffffffffff
adcs x19, x19, xzr
adc x20, x20, xzr
# Sub
subs x12, x8, x4
sbcs x13, x9, x5
sbcs x14, x10, x6
sbcs x15, x11, x7
csetm x28, cc
mov x25, #-19
extr x28, x28, x15, #63
mul x25, x28, x25
# Add modulus (if underflow)
subs x12, x12, x25
sbcs x13, x13, xzr
and x15, x15, #0x7fffffffffffffff
sbcs x14, x14, xzr
sbc x15, x15, xzr
ldr x2, [x29, #32]
mov x1, x0
# Multiply
ldp x8, x9, [x2]
ldp x10, x11, [x2, #16]
# A[0] * B[0]
umulh x22, x16, x8
mul x21, x16, x8
# A[2] * B[0]
umulh x24, x19, x8
mul x23, x19, x8
# A[1] * B[0]
mul x25, x17, x8
adds x22, x22, x25
umulh x26, x17, x8
adcs x23, x23, x26
# A[1] * B[3]
umulh x5, x17, x11
adc x24, x24, xzr
mul x4, x17, x11
# A[0] * B[1]
mul x25, x16, x9
adds x22, x22, x25
umulh x26, x16, x9
adcs x23, x23, x26
# A[2] * B[1]
mul x25, x19, x9
adcs x24, x24, x25
umulh x26, x19, x9
adcs x4, x4, x26
adc x5, x5, xzr
# A[1] * B[2]
mul x25, x17, x10
adds x24, x24, x25
umulh x26, x17, x10
adcs x4, x4, x26
adcs x5, x5, xzr
adc x6, xzr, xzr
# A[0] * B[2]
mul x25, x16, x10
adds x23, x23, x25
umulh x26, x16, x10
adcs x24, x24, x26
adcs x4, x4, xzr
adcs x5, x5, xzr
adc x6, x6, xzr
# A[1] * B[1]
mul x25, x17, x9
adds x23, x23, x25
umulh x26, x17, x9
adcs x24, x24, x26
# A[3] * B[1]
mul x25, x20, x9
adcs x4, x4, x25
umulh x26, x20, x9
adcs x5, x5, x26
adc x6, x6, xzr
# A[2] * B[2]
mul x25, x19, x10
adds x4, x4, x25
umulh x26, x19, x10
adcs x5, x5, x26
# A[3] * B[3]
mul x25, x20, x11
adcs x6, x6, x25
umulh x7, x20, x11
adc x7, x7, xzr
# A[0] * B[3]
mul x25, x16, x11
adds x24, x24, x25
umulh x26, x16, x11
adcs x4, x4, x26
# A[2] * B[3]
mul x25, x19, x11
adcs x5, x5, x25
umulh x26, x19, x11
adcs x6, x6, x26
adc x7, x7, xzr
# A[3] * B[0]
mul x25, x20, x8
adds x24, x24, x25
umulh x26, x20, x8
adcs x4, x4, x26
# A[3] * B[2]
mul x25, x20, x10
adcs x5, x5, x25
umulh x26, x20, x10
adcs x6, x6, x26
adc x7, x7, xzr
# Reduce
mov x25, #38
mul x26, x25, x7
adds x24, x24, x26
umulh x27, x25, x7
adc x27, x27, xzr
mov x25, #19
extr x27, x27, x24, #63
mul x27, x27, x25
and x24, x24, #0x7fffffffffffffff
mov x25, #38
mul x26, x25, x4
adds x21, x21, x26
umulh x4, x25, x4
mul x26, x25, x5
adcs x22, x22, x26
umulh x5, x25, x5
mul x26, x25, x6
adcs x23, x23, x26
umulh x6, x25, x6
adc x24, x24, xzr
# Add high product results in
adds x21, x21, x27
adcs x22, x22, x4
adcs x23, x23, x5
adc x24, x24, x6
# Store
stp x21, x22, [x0]
stp x23, x24, [x0, #16]
add x2, x2, #32
add x1, x0, #32
add x0, x0, #32
# Multiply
ldp x16, x17, [x2]
ldp x19, x20, [x2, #16]
# A[0] * B[0]
umulh x5, x12, x16
mul x4, x12, x16
# A[2] * B[0]
umulh x7, x14, x16
mul x6, x14, x16
# A[1] * B[0]
mul x25, x13, x16
adds x5, x5, x25
umulh x26, x13, x16
adcs x6, x6, x26
# A[1] * B[3]
umulh x9, x13, x20
adc x7, x7, xzr
mul x8, x13, x20
# A[0] * B[1]
mul x25, x12, x17
adds x5, x5, x25
umulh x26, x12, x17
adcs x6, x6, x26
# A[2] * B[1]
mul x25, x14, x17
adcs x7, x7, x25
umulh x26, x14, x17
adcs x8, x8, x26
adc x9, x9, xzr
# A[1] * B[2]
mul x25, x13, x19
adds x7, x7, x25
umulh x26, x13, x19
adcs x8, x8, x26
adcs x9, x9, xzr
adc x10, xzr, xzr
# A[0] * B[2]
mul x25, x12, x19
adds x6, x6, x25
umulh x26, x12, x19
adcs x7, x7, x26
adcs x8, x8, xzr
adcs x9, x9, xzr
adc x10, x10, xzr
# A[1] * B[1]
mul x25, x13, x17
adds x6, x6, x25
umulh x26, x13, x17
adcs x7, x7, x26
# A[3] * B[1]
mul x25, x15, x17
adcs x8, x8, x25
umulh x26, x15, x17
adcs x9, x9, x26
adc x10, x10, xzr
# A[2] * B[2]
mul x25, x14, x19
adds x8, x8, x25
umulh x26, x14, x19
adcs x9, x9, x26
# A[3] * B[3]
mul x25, x15, x20
adcs x10, x10, x25
umulh x11, x15, x20
adc x11, x11, xzr
# A[0] * B[3]
mul x25, x12, x20
adds x7, x7, x25
umulh x26, x12, x20
adcs x8, x8, x26
# A[2] * B[3]
mul x25, x14, x20
adcs x9, x9, x25
umulh x26, x14, x20
adcs x10, x10, x26
adc x11, x11, xzr
# A[3] * B[0]
mul x25, x15, x16
adds x7, x7, x25
umulh x26, x15, x16
adcs x8, x8, x26
# A[3] * B[2]
mul x25, x15, x19
adcs x9, x9, x25
umulh x26, x15, x19
adcs x10, x10, x26
adc x11, x11, xzr
# Reduce
mov x25, #38
mul x26, x25, x11
adds x7, x7, x26
umulh x27, x25, x11
adc x27, x27, xzr
mov x25, #19
extr x27, x27, x7, #63
mul x27, x27, x25
and x7, x7, #0x7fffffffffffffff
mov x25, #38
mul x26, x25, x8
adds x4, x4, x26
umulh x8, x25, x8
mul x26, x25, x9
adcs x5, x5, x26
umulh x9, x25, x9
mul x26, x25, x10
adcs x6, x6, x26
umulh x10, x25, x10
adc x7, x7, xzr
# Add high product results in
adds x4, x4, x27
adcs x5, x5, x8
adcs x6, x6, x9
adc x7, x7, x10
# Store
stp x4, x5, [x0]
stp x6, x7, [x0, #16]
mov x3, x0
sub x2, x0, #32
sub x1, x0, #32
# Add
adds x8, x21, x4
adcs x9, x22, x5
adcs x10, x23, x6
adcs x11, x24, x7
cset x28, cs
mov x25, #19
extr x28, x28, x11, #63
mul x25, x28, x25
# Sub modulus (if overflow)
adds x8, x8, x25
adcs x9, x9, xzr
and x11, x11, #0x7fffffffffffffff
adcs x10, x10, xzr
adc x11, x11, xzr
# Sub
subs x12, x21, x4
sbcs x13, x22, x5
sbcs x14, x23, x6
sbcs x15, x24, x7
csetm x28, cc
mov x25, #-19
extr x28, x28, x15, #63
mul x25, x28, x25
# Add modulus (if underflow)
subs x12, x12, x25
sbcs x13, x13, xzr
and x15, x15, #0x7fffffffffffffff
sbcs x14, x14, xzr
sbc x15, x15, xzr
stp x8, x9, [x0]
stp x10, x11, [x0, #16]
stp x12, x13, [x1]
stp x14, x15, [x1, #16]
ldr x1, [x29, #24]
ldr x2, [x29, #32]
add x2, x2, #0x60
add x1, x1, #0x60
add x0, x0, #0x40
# Multiply
ldp x21, x22, [x1]
ldp x23, x24, [x1, #16]
ldp x4, x5, [x2]
ldp x6, x7, [x2, #16]
# A[0] * B[0]
umulh x17, x21, x4
mul x16, x21, x4
# A[2] * B[0]
umulh x20, x23, x4
mul x19, x23, x4
# A[1] * B[0]
mul x25, x22, x4
adds x17, x17, x25
umulh x26, x22, x4
adcs x19, x19, x26
# A[1] * B[3]
umulh x9, x22, x7
adc x20, x20, xzr
mul x8, x22, x7
# A[0] * B[1]
mul x25, x21, x5
adds x17, x17, x25
umulh x26, x21, x5
adcs x19, x19, x26
# A[2] * B[1]
mul x25, x23, x5
adcs x20, x20, x25
umulh x26, x23, x5
adcs x8, x8, x26
adc x9, x9, xzr
# A[1] * B[2]
mul x25, x22, x6
adds x20, x20, x25
umulh x26, x22, x6
adcs x8, x8, x26
adcs x9, x9, xzr
adc x10, xzr, xzr
# A[0] * B[2]
mul x25, x21, x6
adds x19, x19, x25
umulh x26, x21, x6
adcs x20, x20, x26
adcs x8, x8, xzr
adcs x9, x9, xzr
adc x10, x10, xzr
# A[1] * B[1]
mul x25, x22, x5
adds x19, x19, x25
umulh x26, x22, x5
adcs x20, x20, x26
# A[3] * B[1]
mul x25, x24, x5
adcs x8, x8, x25
umulh x26, x24, x5
adcs x9, x9, x26
adc x10, x10, xzr
# A[2] * B[2]
mul x25, x23, x6
adds x8, x8, x25
umulh x26, x23, x6
adcs x9, x9, x26
# A[3] * B[3]
mul x25, x24, x7
adcs x10, x10, x25
umulh x11, x24, x7
adc x11, x11, xzr
# A[0] * B[3]
mul x25, x21, x7
adds x20, x20, x25
umulh x26, x21, x7
adcs x8, x8, x26
# A[2] * B[3]
mul x25, x23, x7
adcs x9, x9, x25
umulh x26, x23, x7
adcs x10, x10, x26
adc x11, x11, xzr
# A[3] * B[0]
mul x25, x24, x4
adds x20, x20, x25
umulh x26, x24, x4
adcs x8, x8, x26
# A[3] * B[2]
mul x25, x24, x6
adcs x9, x9, x25
umulh x26, x24, x6
adcs x10, x10, x26
adc x11, x11, xzr
# Reduce
mov x25, #38
mul x26, x25, x11
adds x20, x20, x26
umulh x27, x25, x11
adc x27, x27, xzr
mov x25, #19
extr x27, x27, x20, #63
mul x27, x27, x25
and x20, x20, #0x7fffffffffffffff
mov x25, #38
mul x26, x25, x8
adds x16, x16, x26
umulh x8, x25, x8
mul x26, x25, x9
adcs x17, x17, x26
umulh x9, x25, x9
mul x26, x25, x10
adcs x19, x19, x26
umulh x10, x25, x10
adc x20, x20, xzr
# Add high product results in
adds x16, x16, x27
adcs x17, x17, x8
adcs x19, x19, x9
adc x20, x20, x10
# Store
stp x16, x17, [x0]
stp x19, x20, [x0, #16]
sub x3, x2, #32
sub x2, x1, #32
sub x1, x0, #32
# Multiply
ldp x4, x5, [x2]
ldp x6, x7, [x2, #16]
ldp x12, x13, [x3]
ldp x14, x15, [x3, #16]
# A[0] * B[0]
umulh x9, x4, x12
mul x8, x4, x12
# A[2] * B[0]
umulh x11, x6, x12
mul x10, x6, x12
# A[1] * B[0]
mul x25, x5, x12
adds x9, x9, x25
umulh x26, x5, x12
adcs x10, x10, x26
# A[1] * B[3]
umulh x17, x5, x15
adc x11, x11, xzr
mul x16, x5, x15
# A[0] * B[1]
mul x25, x4, x13
adds x9, x9, x25
umulh x26, x4, x13
adcs x10, x10, x26
# A[2] * B[1]
mul x25, x6, x13
adcs x11, x11, x25
umulh x26, x6, x13
adcs x16, x16, x26
adc x17, x17, xzr
# A[1] * B[2]
mul x25, x5, x14
adds x11, x11, x25
umulh x26, x5, x14
adcs x16, x16, x26
adcs x17, x17, xzr
adc x19, xzr, xzr
# A[0] * B[2]
mul x25, x4, x14
adds x10, x10, x25
umulh x26, x4, x14
adcs x11, x11, x26
adcs x16, x16, xzr
adcs x17, x17, xzr
adc x19, x19, xzr
# A[1] * B[1]
mul x25, x5, x13
adds x10, x10, x25
umulh x26, x5, x13
adcs x11, x11, x26
# A[3] * B[1]
mul x25, x7, x13
adcs x16, x16, x25
umulh x26, x7, x13
adcs x17, x17, x26
adc x19, x19, xzr
# A[2] * B[2]
mul x25, x6, x14
adds x16, x16, x25
umulh x26, x6, x14
adcs x17, x17, x26
# A[3] * B[3]
mul x25, x7, x15
adcs x19, x19, x25
umulh x20, x7, x15
adc x20, x20, xzr
# A[0] * B[3]
mul x25, x4, x15
adds x11, x11, x25
umulh x26, x4, x15
adcs x16, x16, x26
# A[2] * B[3]
mul x25, x6, x15
adcs x17, x17, x25
umulh x26, x6, x15
adcs x19, x19, x26
adc x20, x20, xzr
# A[3] * B[0]
mul x25, x7, x12
adds x11, x11, x25
umulh x26, x7, x12
adcs x16, x16, x26
# A[3] * B[2]
mul x25, x7, x14
adcs x17, x17, x25
umulh x26, x7, x14
adcs x19, x19, x26
adc x20, x20, xzr
# Reduce
mov x25, #38
mul x26, x25, x20
adds x11, x11, x26
umulh x27, x25, x20
adc x27, x27, xzr
mov x25, #19
extr x27, x27, x11, #63
mul x27, x27, x25
and x11, x11, #0x7fffffffffffffff
mov x25, #38
mul x26, x25, x16
adds x8, x8, x26
umulh x16, x25, x16
mul x26, x25, x17
adcs x9, x9, x26
umulh x17, x25, x17
mul x26, x25, x19
adcs x10, x10, x26
umulh x19, x25, x19
adc x11, x11, xzr
# Add high product results in
adds x8, x8, x27
adcs x9, x9, x16
adcs x10, x10, x17
adc x11, x11, x19
# Double
adds x8, x8, x8
adcs x9, x9, x9
adcs x10, x10, x10
adc x11, x11, x11
mov x25, #-19
asr x28, x11, #63
# Mask the modulus
and x25, x28, x25
and x26, x28, #0x7fffffffffffffff
# Sub modulus (if overflow)
subs x8, x8, x25
sbcs x9, x9, x28
sbcs x10, x10, x28
sbc x11, x11, x26
mov x3, x0
sub x2, x0, #32
mov x1, x0
sub x0, x0, #32
# Add
ldp x4, x5, [x3]
ldp x6, x7, [x3, #16]
adds x21, x8, x4
adcs x22, x9, x5
adcs x23, x10, x6
adcs x24, x11, x7
cset x28, cs
mov x25, #19
extr x28, x28, x24, #63
mul x25, x28, x25
# Sub modulus (if overflow)
adds x21, x21, x25
adcs x22, x22, xzr
and x24, x24, #0x7fffffffffffffff
adcs x23, x23, xzr
adc x24, x24, xzr
# Sub
subs x12, x8, x4
sbcs x13, x9, x5
sbcs x14, x10, x6
sbcs x15, x11, x7
csetm x28, cc
mov x25, #-19
extr x28, x28, x15, #63
mul x25, x28, x25
# Add modulus (if underflow)
subs x12, x12, x25
sbcs x13, x13, xzr
and x15, x15, #0x7fffffffffffffff
sbcs x14, x14, xzr
sbc x15, x15, xzr
stp x21, x22, [x0]
stp x23, x24, [x0, #16]
stp x12, x13, [x1]
stp x14, x15, [x1, #16]
ldr x17, [x29, #56]
ldr x19, [x29, #64]
ldp x20, x21, [x29, #72]
ldp x22, x23, [x29, #88]
ldp x24, x25, [x29, #104]
ldp x26, x27, [x29, #120]
ldr x28, [x29, #136]
ldp x29, x30, [sp], #0x90
ret
#ifndef __APPLE__
.size ge_add,.-ge_add
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl ge_sub
.type ge_sub,@function
.align 2
ge_sub:
#else
.section __TEXT,__text
.globl _ge_sub
.p2align 2
_ge_sub:
#endif /* __APPLE__ */
stp x29, x30, [sp, #-144]!
add x29, sp, #0
str x17, [x29, #56]
str x19, [x29, #64]
stp x20, x21, [x29, #72]
stp x22, x23, [x29, #88]
stp x24, x25, [x29, #104]
stp x26, x27, [x29, #120]
str x28, [x29, #136]
str x0, [x29, #16]
str x1, [x29, #24]
str x2, [x29, #32]
mov x3, x1
add x2, x1, #32
add x1, x0, #32
# Add
ldp x8, x9, [x2]
ldp x10, x11, [x2, #16]
ldp x4, x5, [x3]
ldp x6, x7, [x3, #16]
adds x16, x8, x4
adcs x17, x9, x5
adcs x19, x10, x6
adcs x20, x11, x7
cset x28, cs
mov x25, #19
extr x28, x28, x20, #63
mul x25, x28, x25
# Sub modulus (if overflow)
adds x16, x16, x25
adcs x17, x17, xzr
and x20, x20, #0x7fffffffffffffff
adcs x19, x19, xzr
adc x20, x20, xzr
# Sub
subs x12, x8, x4
sbcs x13, x9, x5
sbcs x14, x10, x6
sbcs x15, x11, x7
csetm x28, cc
mov x25, #-19
extr x28, x28, x15, #63
mul x25, x28, x25
# Add modulus (if underflow)
subs x12, x12, x25
sbcs x13, x13, xzr
and x15, x15, #0x7fffffffffffffff
sbcs x14, x14, xzr
sbc x15, x15, xzr
ldr x2, [x29, #32]
add x2, x2, #32
mov x1, x0
# Multiply
ldp x8, x9, [x2]
ldp x10, x11, [x2, #16]
# A[0] * B[0]
umulh x22, x16, x8
mul x21, x16, x8
# A[2] * B[0]
umulh x24, x19, x8
mul x23, x19, x8
# A[1] * B[0]
mul x25, x17, x8
adds x22, x22, x25
umulh x26, x17, x8
adcs x23, x23, x26
# A[1] * B[3]
umulh x5, x17, x11
adc x24, x24, xzr
mul x4, x17, x11
# A[0] * B[1]
mul x25, x16, x9
adds x22, x22, x25
umulh x26, x16, x9
adcs x23, x23, x26
# A[2] * B[1]
mul x25, x19, x9
adcs x24, x24, x25
umulh x26, x19, x9
adcs x4, x4, x26
adc x5, x5, xzr
# A[1] * B[2]
mul x25, x17, x10
adds x24, x24, x25
umulh x26, x17, x10
adcs x4, x4, x26
adcs x5, x5, xzr
adc x6, xzr, xzr
# A[0] * B[2]
mul x25, x16, x10
adds x23, x23, x25
umulh x26, x16, x10
adcs x24, x24, x26
adcs x4, x4, xzr
adcs x5, x5, xzr
adc x6, x6, xzr
# A[1] * B[1]
mul x25, x17, x9
adds x23, x23, x25
umulh x26, x17, x9
adcs x24, x24, x26
# A[3] * B[1]
mul x25, x20, x9
adcs x4, x4, x25
umulh x26, x20, x9
adcs x5, x5, x26
adc x6, x6, xzr
# A[2] * B[2]
mul x25, x19, x10
adds x4, x4, x25
umulh x26, x19, x10
adcs x5, x5, x26
# A[3] * B[3]
mul x25, x20, x11
adcs x6, x6, x25
umulh x7, x20, x11
adc x7, x7, xzr
# A[0] * B[3]
mul x25, x16, x11
adds x24, x24, x25
umulh x26, x16, x11
adcs x4, x4, x26
# A[2] * B[3]
mul x25, x19, x11
adcs x5, x5, x25
umulh x26, x19, x11
adcs x6, x6, x26
adc x7, x7, xzr
# A[3] * B[0]
mul x25, x20, x8
adds x24, x24, x25
umulh x26, x20, x8
adcs x4, x4, x26
# A[3] * B[2]
mul x25, x20, x10
adcs x5, x5, x25
umulh x26, x20, x10
adcs x6, x6, x26
adc x7, x7, xzr
# Reduce
mov x25, #38
mul x26, x25, x7
adds x24, x24, x26
umulh x27, x25, x7
adc x27, x27, xzr
mov x25, #19
extr x27, x27, x24, #63
mul x27, x27, x25
and x24, x24, #0x7fffffffffffffff
mov x25, #38
mul x26, x25, x4
adds x21, x21, x26
umulh x4, x25, x4
mul x26, x25, x5
adcs x22, x22, x26
umulh x5, x25, x5
mul x26, x25, x6
adcs x23, x23, x26
umulh x6, x25, x6
adc x24, x24, xzr
# Add high product results in
adds x21, x21, x27
adcs x22, x22, x4
adcs x23, x23, x5
adc x24, x24, x6
# Reduce if top bit set
mov x25, #19
and x26, x25, x24, asr 63
adds x21, x21, x26
adcs x22, x22, xzr
and x24, x24, #0x7fffffffffffffff
adcs x23, x23, xzr
adc x24, x24, xzr
# Store
stp x21, x22, [x0]
stp x23, x24, [x0, #16]
sub x2, x2, #32
add x1, x0, #32
add x0, x0, #32
# Multiply
ldp x16, x17, [x2]
ldp x19, x20, [x2, #16]
# A[0] * B[0]
umulh x5, x12, x16
mul x4, x12, x16
# A[2] * B[0]
umulh x7, x14, x16
mul x6, x14, x16
# A[1] * B[0]
mul x25, x13, x16
adds x5, x5, x25
umulh x26, x13, x16
adcs x6, x6, x26
# A[1] * B[3]
umulh x9, x13, x20
adc x7, x7, xzr
mul x8, x13, x20
# A[0] * B[1]
mul x25, x12, x17
adds x5, x5, x25
umulh x26, x12, x17
adcs x6, x6, x26
# A[2] * B[1]
mul x25, x14, x17
adcs x7, x7, x25
umulh x26, x14, x17
adcs x8, x8, x26
adc x9, x9, xzr
# A[1] * B[2]
mul x25, x13, x19
adds x7, x7, x25
umulh x26, x13, x19
adcs x8, x8, x26
adcs x9, x9, xzr
adc x10, xzr, xzr
# A[0] * B[2]
mul x25, x12, x19
adds x6, x6, x25
umulh x26, x12, x19
adcs x7, x7, x26
adcs x8, x8, xzr
adcs x9, x9, xzr
adc x10, x10, xzr
# A[1] * B[1]
mul x25, x13, x17
adds x6, x6, x25
umulh x26, x13, x17
adcs x7, x7, x26
# A[3] * B[1]
mul x25, x15, x17
adcs x8, x8, x25
umulh x26, x15, x17
adcs x9, x9, x26
adc x10, x10, xzr
# A[2] * B[2]
mul x25, x14, x19
adds x8, x8, x25
umulh x26, x14, x19
adcs x9, x9, x26
# A[3] * B[3]
mul x25, x15, x20
adcs x10, x10, x25
umulh x11, x15, x20
adc x11, x11, xzr
# A[0] * B[3]
mul x25, x12, x20
adds x7, x7, x25
umulh x26, x12, x20
adcs x8, x8, x26
# A[2] * B[3]
mul x25, x14, x20
adcs x9, x9, x25
umulh x26, x14, x20
adcs x10, x10, x26
adc x11, x11, xzr
# A[3] * B[0]
mul x25, x15, x16
adds x7, x7, x25
umulh x26, x15, x16
adcs x8, x8, x26
# A[3] * B[2]
mul x25, x15, x19
adcs x9, x9, x25
umulh x26, x15, x19
adcs x10, x10, x26
adc x11, x11, xzr
# Reduce
mov x25, #38
mul x26, x25, x11
adds x7, x7, x26
umulh x27, x25, x11
adc x27, x27, xzr
mov x25, #19
extr x27, x27, x7, #63
mul x27, x27, x25
and x7, x7, #0x7fffffffffffffff
mov x25, #38
mul x26, x25, x8
adds x4, x4, x26
umulh x8, x25, x8
mul x26, x25, x9
adcs x5, x5, x26
umulh x9, x25, x9
mul x26, x25, x10
adcs x6, x6, x26
umulh x10, x25, x10
adc x7, x7, xzr
# Add high product results in
adds x4, x4, x27
adcs x5, x5, x8
adcs x6, x6, x9
adc x7, x7, x10
# Store
stp x4, x5, [x0]
stp x6, x7, [x0, #16]
mov x3, x0
sub x2, x0, #32
sub x1, x0, #32
# Add
adds x8, x21, x4
adcs x9, x22, x5
adcs x10, x23, x6
adcs x11, x24, x7
cset x28, cs
mov x25, #19
extr x28, x28, x11, #63
mul x25, x28, x25
# Sub modulus (if overflow)
adds x8, x8, x25
adcs x9, x9, xzr
and x11, x11, #0x7fffffffffffffff
adcs x10, x10, xzr
adc x11, x11, xzr
# Sub
subs x12, x21, x4
sbcs x13, x22, x5
sbcs x14, x23, x6
sbcs x15, x24, x7
csetm x28, cc
mov x25, #-19
extr x28, x28, x15, #63
mul x25, x28, x25
# Add modulus (if underflow)
subs x12, x12, x25
sbcs x13, x13, xzr
and x15, x15, #0x7fffffffffffffff
sbcs x14, x14, xzr
sbc x15, x15, xzr
stp x8, x9, [x0]
stp x10, x11, [x0, #16]
stp x12, x13, [x1]
stp x14, x15, [x1, #16]
ldr x1, [x29, #24]
ldr x2, [x29, #32]
add x2, x2, #0x60
add x1, x1, #0x60
add x0, x0, #0x40
# Multiply
ldp x21, x22, [x1]
ldp x23, x24, [x1, #16]
ldp x4, x5, [x2]
ldp x6, x7, [x2, #16]
# A[0] * B[0]
umulh x17, x21, x4
mul x16, x21, x4
# A[2] * B[0]
umulh x20, x23, x4
mul x19, x23, x4
# A[1] * B[0]
mul x25, x22, x4
adds x17, x17, x25
umulh x26, x22, x4
adcs x19, x19, x26
# A[1] * B[3]
umulh x9, x22, x7
adc x20, x20, xzr
mul x8, x22, x7
# A[0] * B[1]
mul x25, x21, x5
adds x17, x17, x25
umulh x26, x21, x5
adcs x19, x19, x26
# A[2] * B[1]
mul x25, x23, x5
adcs x20, x20, x25
umulh x26, x23, x5
adcs x8, x8, x26
adc x9, x9, xzr
# A[1] * B[2]
mul x25, x22, x6
adds x20, x20, x25
umulh x26, x22, x6
adcs x8, x8, x26
adcs x9, x9, xzr
adc x10, xzr, xzr
# A[0] * B[2]
mul x25, x21, x6
adds x19, x19, x25
umulh x26, x21, x6
adcs x20, x20, x26
adcs x8, x8, xzr
adcs x9, x9, xzr
adc x10, x10, xzr
# A[1] * B[1]
mul x25, x22, x5
adds x19, x19, x25
umulh x26, x22, x5
adcs x20, x20, x26
# A[3] * B[1]
mul x25, x24, x5
adcs x8, x8, x25
umulh x26, x24, x5
adcs x9, x9, x26
adc x10, x10, xzr
# A[2] * B[2]
mul x25, x23, x6
adds x8, x8, x25
umulh x26, x23, x6
adcs x9, x9, x26
# A[3] * B[3]
mul x25, x24, x7
adcs x10, x10, x25
umulh x11, x24, x7
adc x11, x11, xzr
# A[0] * B[3]
mul x25, x21, x7
adds x20, x20, x25
umulh x26, x21, x7
adcs x8, x8, x26
# A[2] * B[3]
mul x25, x23, x7
adcs x9, x9, x25
umulh x26, x23, x7
adcs x10, x10, x26
adc x11, x11, xzr
# A[3] * B[0]
mul x25, x24, x4
adds x20, x20, x25
umulh x26, x24, x4
adcs x8, x8, x26
# A[3] * B[2]
mul x25, x24, x6
adcs x9, x9, x25
umulh x26, x24, x6
adcs x10, x10, x26
adc x11, x11, xzr
# Reduce
mov x25, #38
mul x26, x25, x11
adds x20, x20, x26
umulh x27, x25, x11
adc x27, x27, xzr
mov x25, #19
extr x27, x27, x20, #63
mul x27, x27, x25
and x20, x20, #0x7fffffffffffffff
mov x25, #38
mul x26, x25, x8
adds x16, x16, x26
umulh x8, x25, x8
mul x26, x25, x9
adcs x17, x17, x26
umulh x9, x25, x9
mul x26, x25, x10
adcs x19, x19, x26
umulh x10, x25, x10
adc x20, x20, xzr
# Add high product results in
adds x16, x16, x27
adcs x17, x17, x8
adcs x19, x19, x9
adc x20, x20, x10
# Reduce if top bit set
mov x25, #19
and x26, x25, x20, asr 63
adds x16, x16, x26
adcs x17, x17, xzr
and x20, x20, #0x7fffffffffffffff
adcs x19, x19, xzr
adc x20, x20, xzr
# Store
stp x16, x17, [x0]
stp x19, x20, [x0, #16]
sub x3, x2, #32
sub x2, x1, #32
sub x1, x0, #32
# Multiply
ldp x4, x5, [x2]
ldp x6, x7, [x2, #16]
ldp x12, x13, [x3]
ldp x14, x15, [x3, #16]
# A[0] * B[0]
umulh x9, x4, x12
mul x8, x4, x12
# A[2] * B[0]
umulh x11, x6, x12
mul x10, x6, x12
# A[1] * B[0]
mul x25, x5, x12
adds x9, x9, x25
umulh x26, x5, x12
adcs x10, x10, x26
# A[1] * B[3]
umulh x17, x5, x15
adc x11, x11, xzr
mul x16, x5, x15
# A[0] * B[1]
mul x25, x4, x13
adds x9, x9, x25
umulh x26, x4, x13
adcs x10, x10, x26
# A[2] * B[1]
mul x25, x6, x13
adcs x11, x11, x25
umulh x26, x6, x13
adcs x16, x16, x26
adc x17, x17, xzr
# A[1] * B[2]
mul x25, x5, x14
adds x11, x11, x25
umulh x26, x5, x14
adcs x16, x16, x26
adcs x17, x17, xzr
adc x19, xzr, xzr
# A[0] * B[2]
mul x25, x4, x14
adds x10, x10, x25
umulh x26, x4, x14
adcs x11, x11, x26
adcs x16, x16, xzr
adcs x17, x17, xzr
adc x19, x19, xzr
# A[1] * B[1]
mul x25, x5, x13
adds x10, x10, x25
umulh x26, x5, x13
adcs x11, x11, x26
# A[3] * B[1]
mul x25, x7, x13
adcs x16, x16, x25
umulh x26, x7, x13
adcs x17, x17, x26
adc x19, x19, xzr
# A[2] * B[2]
mul x25, x6, x14
adds x16, x16, x25
umulh x26, x6, x14
adcs x17, x17, x26
# A[3] * B[3]
mul x25, x7, x15
adcs x19, x19, x25
umulh x20, x7, x15
adc x20, x20, xzr
# A[0] * B[3]
mul x25, x4, x15
adds x11, x11, x25
umulh x26, x4, x15
adcs x16, x16, x26
# A[2] * B[3]
mul x25, x6, x15
adcs x17, x17, x25
umulh x26, x6, x15
adcs x19, x19, x26
adc x20, x20, xzr
# A[3] * B[0]
mul x25, x7, x12
adds x11, x11, x25
umulh x26, x7, x12
adcs x16, x16, x26
# A[3] * B[2]
mul x25, x7, x14
adcs x17, x17, x25
umulh x26, x7, x14
adcs x19, x19, x26
adc x20, x20, xzr
# Reduce
mov x25, #38
mul x26, x25, x20
adds x11, x11, x26
umulh x27, x25, x20
adc x27, x27, xzr
mov x25, #19
extr x27, x27, x11, #63
mul x27, x27, x25
and x11, x11, #0x7fffffffffffffff
mov x25, #38
mul x26, x25, x16
adds x8, x8, x26
umulh x16, x25, x16
mul x26, x25, x17
adcs x9, x9, x26
umulh x17, x25, x17
mul x26, x25, x19
adcs x10, x10, x26
umulh x19, x25, x19
adc x11, x11, xzr
# Add high product results in
adds x8, x8, x27
adcs x9, x9, x16
adcs x10, x10, x17
adc x11, x11, x19
# Double
adds x8, x8, x8
adcs x9, x9, x9
adcs x10, x10, x10
adc x11, x11, x11
mov x25, #-19
asr x28, x11, #63
# Mask the modulus
and x25, x28, x25
and x26, x28, #0x7fffffffffffffff
# Sub modulus (if overflow)
subs x8, x8, x25
sbcs x9, x9, x28
sbcs x10, x10, x28
sbc x11, x11, x26
mov x3, x0
sub x2, x0, #32
# Add
ldp x4, x5, [x3]
ldp x6, x7, [x3, #16]
adds x12, x8, x4
adcs x13, x9, x5
adcs x14, x10, x6
adcs x15, x11, x7
cset x28, cs
mov x25, #19
extr x28, x28, x15, #63
mul x25, x28, x25
# Sub modulus (if overflow)
adds x12, x12, x25
adcs x13, x13, xzr
and x15, x15, #0x7fffffffffffffff
adcs x14, x14, xzr
adc x15, x15, xzr
# Sub
subs x21, x8, x4
sbcs x22, x9, x5
sbcs x23, x10, x6
sbcs x24, x11, x7
csetm x28, cc
mov x25, #-19
extr x28, x28, x24, #63
mul x25, x28, x25
# Add modulus (if underflow)
subs x21, x21, x25
sbcs x22, x22, xzr
and x24, x24, #0x7fffffffffffffff
sbcs x23, x23, xzr
sbc x24, x24, xzr
stp x12, x13, [x0]
stp x14, x15, [x0, #16]
stp x21, x22, [x1]
stp x23, x24, [x1, #16]
ldr x17, [x29, #56]
ldr x19, [x29, #64]
ldp x20, x21, [x29, #72]
ldp x22, x23, [x29, #88]
ldp x24, x25, [x29, #104]
ldp x26, x27, [x29, #120]
ldr x28, [x29, #136]
ldp x29, x30, [sp], #0x90
ret
#ifndef __APPLE__
.size ge_sub,.-ge_sub
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl sc_reduce
.type sc_reduce,@function
.align 2
sc_reduce:
#else
.section __TEXT,__text
.globl _sc_reduce
.p2align 2
_sc_reduce:
#endif /* __APPLE__ */
stp x29, x30, [sp, #-64]!
add x29, sp, #0
str x17, [x29, #16]
str x19, [x29, #24]
stp x20, x21, [x29, #32]
stp x22, x23, [x29, #48]
ldp x2, x3, [x0]
ldp x4, x5, [x0, #16]
ldp x6, x7, [x0, #32]
ldp x8, x9, [x0, #48]
lsr x23, x9, #56
lsl x9, x9, #4
orr x9, x9, x8, lsr 60
lsl x8, x8, #4
orr x8, x8, x7, lsr 60
lsl x7, x7, #4
orr x7, x7, x6, lsr 60
lsl x6, x6, #4
mov x1, #15
orr x6, x6, x5, lsr 60
bic x5, x5, x1, lsl 60
bic x9, x9, x1, lsl 60
# Add order times bits 504..511
mov x11, #0x2c13
movk x11, #0xa30a, lsl 16
movk x11, #0x9ce5, lsl 32
movk x11, #0xa7ed, lsl 48
mov x13, #0x6329
movk x13, #0x5d08, lsl 16
movk x13, #0x621, lsl 32
movk x13, #0xeb21, lsl 48
mul x10, x23, x11
umulh x11, x23, x11
mul x12, x23, x13
umulh x13, x23, x13
adds x6, x6, x10
adcs x7, x7, x11
adcs x8, x8, xzr
adc x9, x9, xzr
adds x7, x7, x12
adcs x8, x8, x13
adc x9, x9, xzr
subs x8, x8, x23
sbc x9, x9, xzr
# Sub product of top 4 words and order
mov x1, #0x2c13
movk x1, #0xa30a, lsl 16
movk x1, #0x9ce5, lsl 32
movk x1, #0xa7ed, lsl 48
mul x10, x6, x1
umulh x11, x6, x1
mul x12, x7, x1
umulh x13, x7, x1
mul x14, x8, x1
umulh x15, x8, x1
mul x16, x9, x1
umulh x17, x9, x1
adds x2, x2, x10
adcs x3, x3, x11
adcs x4, x4, x14
adcs x5, x5, x15
adc x19, xzr, xzr
adds x3, x3, x12
adcs x4, x4, x13
adcs x5, x5, x16
adc x19, x19, x17
mov x1, #0x6329
movk x1, #0x5d08, lsl 16
movk x1, #0x621, lsl 32
movk x1, #0xeb21, lsl 48
mul x10, x6, x1
umulh x11, x6, x1
mul x12, x7, x1
umulh x13, x7, x1
mul x14, x8, x1
umulh x15, x8, x1
mul x16, x9, x1
umulh x17, x9, x1
adds x3, x3, x10
adcs x4, x4, x11
adcs x5, x5, x14
adcs x19, x19, x15
adc x20, xzr, xzr
adds x4, x4, x12
adcs x5, x5, x13
adcs x19, x19, x16
adc x20, x20, x17
subs x4, x4, x6
sbcs x5, x5, x7
sbcs x6, x19, x8
sbc x7, x20, x9
asr x23, x7, #57
# Conditionally subtract order starting at bit 125
mov x10, xzr
mov x13, xzr
mov x11, #0xba7d
movk x11, #0x4b9e, lsl 16
movk x11, #0x4c63, lsl 32
movk x11, #0xcb02, lsl 48
mov x12, #0xf39a
movk x12, #0xd45e, lsl 16
movk x12, #0xdf3b, lsl 32
movk x12, #0x29b, lsl 48
movk x10, #0xa000, lsl 48
movk x13, #0x200, lsl 48
and x10, x10, x23
and x11, x11, x23
and x12, x12, x23
and x13, x13, x23
adds x3, x3, x10
adcs x4, x4, x11
adcs x5, x5, x12
adcs x6, x6, xzr
adc x7, x7, x13
# Move bits 252-376 to own registers
lsl x7, x7, #4
orr x7, x7, x6, lsr 60
lsl x6, x6, #4
mov x23, #15
orr x6, x6, x5, lsr 60
bic x5, x5, x23, lsl 60
# Sub product of top 2 words and order
# * -5812631a5cf5d3ed
mov x1, #0x2c13
movk x1, #0xa30a, lsl 16
movk x1, #0x9ce5, lsl 32
movk x1, #0xa7ed, lsl 48
mul x10, x6, x1
umulh x11, x6, x1
mul x12, x7, x1
umulh x13, x7, x1
adds x2, x2, x10
adcs x3, x3, x11
adc x19, xzr, xzr
adds x3, x3, x12
adc x19, x19, x13
# * -14def9dea2f79cd7
mov x1, #0x6329
movk x1, #0x5d08, lsl 16
movk x1, #0x621, lsl 32
movk x1, #0xeb21, lsl 48
mul x10, x6, x1
umulh x11, x6, x1
mul x12, x7, x1
umulh x13, x7, x1
adds x3, x3, x10
adcs x4, x4, x11
adc x20, xzr, xzr
adds x4, x4, x12
adc x20, x20, x13
# Add overflows at 2 * 64
mov x1, #15
bic x5, x5, x1, lsl 60
adds x4, x4, x19
adc x5, x5, x20
# Subtract top at 2 * 64
subs x4, x4, x6
sbcs x5, x5, x7
sbc x1, x1, x1
# Conditional sub order
mov x10, #0xd3ed
movk x10, #0x5cf5, lsl 16
movk x10, #0x631a, lsl 32
movk x10, #0x5812, lsl 48
mov x11, #0x9cd6
movk x11, #0xa2f7, lsl 16
movk x11, #0xf9de, lsl 32
movk x11, #0x14de, lsl 48
and x10, x10, x1
and x11, x11, x1
adds x2, x2, x10
adcs x3, x3, x11
and x1, x1, #0x1000000000000000
adcs x4, x4, xzr
mov x23, #15
adc x5, x5, x1
bic x5, x5, x23, lsl 60
# Store result
stp x2, x3, [x0]
stp x4, x5, [x0, #16]
ldr x17, [x29, #16]
ldr x19, [x29, #24]
ldp x20, x21, [x29, #32]
ldp x22, x23, [x29, #48]
ldp x29, x30, [sp], #0x40
ret
#ifndef __APPLE__
.size sc_reduce,.-sc_reduce
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl sc_muladd
.type sc_muladd,@function
.align 2
sc_muladd:
#else
.section __TEXT,__text
.globl _sc_muladd
.p2align 2
_sc_muladd:
#endif /* __APPLE__ */
stp x29, x30, [sp, #-96]!
add x29, sp, #0
str x17, [x29, #24]
str x19, [x29, #32]
stp x20, x21, [x29, #40]
stp x22, x23, [x29, #56]
stp x24, x25, [x29, #72]
str x26, [x29, #88]
# Multiply
ldp x12, x13, [x1]
ldp x14, x15, [x1, #16]
ldp x16, x17, [x2]
ldp x19, x20, [x2, #16]
# A[0] * B[0]
umulh x5, x12, x16
mul x4, x12, x16
# A[2] * B[0]
umulh x7, x14, x16
mul x6, x14, x16
# A[1] * B[0]
mul x21, x13, x16
adds x5, x5, x21
umulh x22, x13, x16
adcs x6, x6, x22
# A[1] * B[3]
umulh x9, x13, x20
adc x7, x7, xzr
mul x8, x13, x20
# A[0] * B[1]
mul x21, x12, x17
adds x5, x5, x21
umulh x22, x12, x17
adcs x6, x6, x22
# A[2] * B[1]
mul x21, x14, x17
adcs x7, x7, x21
umulh x22, x14, x17
adcs x8, x8, x22
adc x9, x9, xzr
# A[1] * B[2]
mul x21, x13, x19
adds x7, x7, x21
umulh x22, x13, x19
adcs x8, x8, x22
adcs x9, x9, xzr
adc x10, xzr, xzr
# A[0] * B[2]
mul x21, x12, x19
adds x6, x6, x21
umulh x22, x12, x19
adcs x7, x7, x22
adcs x8, x8, xzr
adcs x9, x9, xzr
adc x10, x10, xzr
# A[1] * B[1]
mul x21, x13, x17
adds x6, x6, x21
umulh x22, x13, x17
adcs x7, x7, x22
# A[3] * B[1]
mul x21, x15, x17
adcs x8, x8, x21
umulh x22, x15, x17
adcs x9, x9, x22
adc x10, x10, xzr
# A[2] * B[2]
mul x21, x14, x19
adds x8, x8, x21
umulh x22, x14, x19
adcs x9, x9, x22
# A[3] * B[3]
mul x21, x15, x20
adcs x10, x10, x21
umulh x11, x15, x20
adc x11, x11, xzr
# A[0] * B[3]
mul x21, x12, x20
adds x7, x7, x21
umulh x22, x12, x20
adcs x8, x8, x22
# A[2] * B[3]
mul x21, x14, x20
adcs x9, x9, x21
umulh x22, x14, x20
adcs x10, x10, x22
adc x11, x11, xzr
# A[3] * B[0]
mul x21, x15, x16
adds x7, x7, x21
umulh x22, x15, x16
adcs x8, x8, x22
# A[3] * B[2]
mul x21, x15, x19
adcs x9, x9, x21
umulh x22, x15, x19
adcs x10, x10, x22
adc x11, x11, xzr
# Add c to a * b
ldp x12, x13, [x3]
ldp x14, x15, [x3, #16]
adds x4, x4, x12
adcs x5, x5, x13
adcs x6, x6, x14
adcs x7, x7, x15
adcs x8, x8, xzr
adcs x9, x9, xzr
adcs x10, x10, xzr
adc x11, x11, xzr
lsr x25, x11, #56
lsl x11, x11, #4
orr x11, x11, x10, lsr 60
lsl x10, x10, #4
orr x10, x10, x9, lsr 60
lsl x9, x9, #4
orr x9, x9, x8, lsr 60
lsl x8, x8, #4
mov x26, #15
orr x8, x8, x7, lsr 60
bic x7, x7, x26, lsl 60
bic x11, x11, x26, lsl 60
# Add order times bits 504..507
mov x22, #0x2c13
movk x22, #0xa30a, lsl 16
movk x22, #0x9ce5, lsl 32
movk x22, #0xa7ed, lsl 48
mov x24, #0x6329
movk x24, #0x5d08, lsl 16
movk x24, #0x621, lsl 32
movk x24, #0xeb21, lsl 48
mul x21, x25, x22
umulh x22, x25, x22
mul x23, x25, x24
umulh x24, x25, x24
adds x8, x8, x21
adcs x9, x9, x22
adcs x10, x10, xzr
adc x11, x11, xzr
adds x9, x9, x23
adcs x10, x10, x24
adc x11, x11, xzr
subs x10, x10, x25
sbc x11, x11, xzr
# Sub product of top 4 words and order
mov x26, #0x2c13
movk x26, #0xa30a, lsl 16
movk x26, #0x9ce5, lsl 32
movk x26, #0xa7ed, lsl 48
mul x16, x8, x26
umulh x17, x8, x26
mul x19, x9, x26
umulh x20, x9, x26
mul x21, x10, x26
umulh x22, x10, x26
mul x23, x11, x26
umulh x24, x11, x26
adds x4, x4, x16
adcs x5, x5, x17
adcs x6, x6, x21
adcs x7, x7, x22
adc x12, xzr, xzr
adds x5, x5, x19
adcs x6, x6, x20
adcs x7, x7, x23
adc x12, x12, x24
mov x26, #0x6329
movk x26, #0x5d08, lsl 16
movk x26, #0x621, lsl 32
movk x26, #0xeb21, lsl 48
mul x16, x8, x26
umulh x17, x8, x26
mul x19, x9, x26
umulh x20, x9, x26
mul x21, x10, x26
umulh x22, x10, x26
mul x23, x11, x26
umulh x24, x11, x26
adds x5, x5, x16
adcs x6, x6, x17
adcs x7, x7, x21
adcs x12, x12, x22
adc x13, xzr, xzr
adds x6, x6, x19
adcs x7, x7, x20
adcs x12, x12, x23
adc x13, x13, x24
subs x6, x6, x8
sbcs x7, x7, x9
sbcs x8, x12, x10
sbc x9, x13, x11
asr x25, x9, #57
# Conditionally subtract order starting at bit 125
mov x16, xzr
mov x20, xzr
mov x17, #0xba7d
movk x17, #0x4b9e, lsl 16
movk x17, #0x4c63, lsl 32
movk x17, #0xcb02, lsl 48
mov x19, #0xf39a
movk x19, #0xd45e, lsl 16
movk x19, #0xdf3b, lsl 32
movk x19, #0x29b, lsl 48
movk x16, #0xa000, lsl 48
movk x20, #0x200, lsl 48
and x16, x16, x25
and x17, x17, x25
and x19, x19, x25
and x20, x20, x25
adds x5, x5, x16
adcs x6, x6, x17
adcs x7, x7, x19
adcs x8, x8, xzr
adc x9, x9, x20
# Move bits 252-376 to own registers
lsl x9, x9, #4
orr x9, x9, x8, lsr 60
lsl x8, x8, #4
mov x25, #15
orr x8, x8, x7, lsr 60
bic x7, x7, x25, lsl 60
# Sub product of top 2 words and order
# * -5812631a5cf5d3ed
mov x26, #0x2c13
movk x26, #0xa30a, lsl 16
movk x26, #0x9ce5, lsl 32
movk x26, #0xa7ed, lsl 48
mul x16, x8, x26
umulh x17, x8, x26
mul x19, x9, x26
umulh x20, x9, x26
adds x4, x4, x16
adcs x5, x5, x17
adc x12, xzr, xzr
adds x5, x5, x19
adc x12, x12, x20
# * -14def9dea2f79cd7
mov x26, #0x6329
movk x26, #0x5d08, lsl 16
movk x26, #0x621, lsl 32
movk x26, #0xeb21, lsl 48
mul x16, x8, x26
umulh x17, x8, x26
mul x19, x9, x26
umulh x20, x9, x26
adds x5, x5, x16
adcs x6, x6, x17
adc x13, xzr, xzr
adds x6, x6, x19
adc x13, x13, x20
# Add overflows at 2 * 64
mov x26, #15
bic x7, x7, x26, lsl 60
adds x6, x6, x12
adc x7, x7, x13
# Subtract top at 2 * 64
subs x6, x6, x8
sbcs x7, x7, x9
sbc x26, x26, x26
# Conditional sub order
mov x16, #0xd3ed
movk x16, #0x5cf5, lsl 16
movk x16, #0x631a, lsl 32
movk x16, #0x5812, lsl 48
mov x17, #0x9cd6
movk x17, #0xa2f7, lsl 16
movk x17, #0xf9de, lsl 32
movk x17, #0x14de, lsl 48
and x16, x16, x26
and x17, x17, x26
adds x4, x4, x16
adcs x5, x5, x17
and x26, x26, #0x1000000000000000
adcs x6, x6, xzr
mov x25, #15
adc x7, x7, x26
bic x7, x7, x25, lsl 60
# Store result
stp x4, x5, [x0]
stp x6, x7, [x0, #16]
ldr x17, [x29, #24]
ldr x19, [x29, #32]
ldp x20, x21, [x29, #40]
ldp x22, x23, [x29, #56]
ldp x24, x25, [x29, #72]
ldr x26, [x29, #88]
ldp x29, x30, [sp], #0x60
ret
#ifndef __APPLE__
.size sc_muladd,.-sc_muladd
#endif /* __APPLE__ */
#endif /* HAVE_ED25519 */
#endif /* !CURVE25519_SMALL || !ED25519_SMALL */
#endif /* HAVE_CURVE25519 || HAVE_ED25519 */
#endif /* __aarch64__ */
#endif /* WOLFSSL_ARMASM */
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
#endif /* !WOLFSSL_ARMASM_INLINE */
|
aenu1/aps3e
| 28,407
|
app/src/main/cpp/rpcs3/3rdparty/wolfssl/wolfssl/wolfcrypt/src/port/arm/thumb2-sha256-asm.S
|
/* thumb2-sha256-asm
*
* Copyright (C) 2006-2023 wolfSSL Inc.
*
* This file is part of wolfSSL.
*
* wolfSSL is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* wolfSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
*/
/* Generated using (from wolfssl):
* cd ../scripts
* ruby ./sha2/sha256.rb thumb2 ../wolfssl/wolfcrypt/src/port/arm/thumb2-sha256-asm.S
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif /* HAVE_CONFIG_H */
#include <wolfssl/wolfcrypt/settings.h>
#ifdef WOLFSSL_ARMASM
#if !defined(__aarch64__) && defined(__thumb__)
#ifndef WOLFSSL_ARMASM_INLINE
.thumb
.syntax unified
#ifndef NO_SHA256
#ifdef WOLFSSL_ARMASM_NO_NEON
.text
.type L_SHA256_transform_len_k, %object
.size L_SHA256_transform_len_k, 256
.align 4
L_SHA256_transform_len_k:
.word 0x428a2f98
.word 0x71374491
.word 0xb5c0fbcf
.word 0xe9b5dba5
.word 0x3956c25b
.word 0x59f111f1
.word 0x923f82a4
.word 0xab1c5ed5
.word 0xd807aa98
.word 0x12835b01
.word 0x243185be
.word 0x550c7dc3
.word 0x72be5d74
.word 0x80deb1fe
.word 0x9bdc06a7
.word 0xc19bf174
.word 0xe49b69c1
.word 0xefbe4786
.word 0xfc19dc6
.word 0x240ca1cc
.word 0x2de92c6f
.word 0x4a7484aa
.word 0x5cb0a9dc
.word 0x76f988da
.word 0x983e5152
.word 0xa831c66d
.word 0xb00327c8
.word 0xbf597fc7
.word 0xc6e00bf3
.word 0xd5a79147
.word 0x6ca6351
.word 0x14292967
.word 0x27b70a85
.word 0x2e1b2138
.word 0x4d2c6dfc
.word 0x53380d13
.word 0x650a7354
.word 0x766a0abb
.word 0x81c2c92e
.word 0x92722c85
.word 0xa2bfe8a1
.word 0xa81a664b
.word 0xc24b8b70
.word 0xc76c51a3
.word 0xd192e819
.word 0xd6990624
.word 0xf40e3585
.word 0x106aa070
.word 0x19a4c116
.word 0x1e376c08
.word 0x2748774c
.word 0x34b0bcb5
.word 0x391c0cb3
.word 0x4ed8aa4a
.word 0x5b9cca4f
.word 0x682e6ff3
.word 0x748f82ee
.word 0x78a5636f
.word 0x84c87814
.word 0x8cc70208
.word 0x90befffa
.word 0xa4506ceb
.word 0xbef9a3f7
.word 0xc67178f2
.text
.align 4
.globl Transform_Sha256_Len
.type Transform_Sha256_Len, %function
Transform_Sha256_Len:
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr}
SUB sp, sp, #0xc0
ADR r3, L_SHA256_transform_len_k
/* Copy digest to add in at end */
LDRD r4, r5, [r0]
LDRD r6, r7, [r0, #8]
LDRD r8, r9, [r0, #16]
LDRD r10, r11, [r0, #24]
STRD r4, r5, [sp, #64]
STRD r6, r7, [sp, #72]
STRD r8, r9, [sp, #80]
STRD r10, r11, [sp, #88]
/* Start of loop processing a block */
L_SHA256_transform_len_begin:
/* Load, Reverse and Store W - 64 bytes */
LDR r4, [r1]
LDR r5, [r1, #4]
LDR r6, [r1, #8]
LDR r7, [r1, #12]
LDR r8, [r1, #16]
LDR r9, [r1, #20]
LDR r10, [r1, #24]
LDR r11, [r1, #28]
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
REV r8, r8
REV r9, r9
REV r10, r10
REV r11, r11
STRD r4, r5, [sp]
STRD r6, r7, [sp, #8]
STRD r8, r9, [sp, #16]
STRD r10, r11, [sp, #24]
LDR r4, [r1, #32]
LDR r5, [r1, #36]
LDR r6, [r1, #40]
LDR r7, [r1, #44]
LDR r8, [r1, #48]
LDR r9, [r1, #52]
LDR r10, [r1, #56]
LDR r11, [r1, #60]
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
REV r8, r8
REV r9, r9
REV r10, r10
REV r11, r11
STRD r4, r5, [sp, #32]
STRD r6, r7, [sp, #40]
STRD r8, r9, [sp, #48]
STRD r10, r11, [sp, #56]
LDR r11, [r0, #4]
LDR r4, [r0, #8]
EOR r11, r11, r4
MOV r12, #0x3
/* Start of 16 rounds */
L_SHA256_transform_len_start:
/* Round 0 */
LDR r5, [r0, #16]
LDR r6, [r0, #20]
LDR r7, [r0, #24]
LDR r9, [r0, #28]
ROR r4, r5, #6
EOR r6, r6, r7
EOR r4, r4, r5, ROR #11
AND r6, r6, r5
EOR r4, r4, r5, ROR #25
EOR r6, r6, r7
ADD r9, r9, r4
ADD r9, r9, r6
LDR r5, [sp]
LDR r6, [r3]
ADD r9, r9, r5
ADD r9, r9, r6
LDR r5, [r0]
LDR r6, [r0, #4]
LDR r7, [r0, #8]
LDR r8, [r0, #12]
ROR r4, r5, #2
EOR r10, r5, r6
EOR r4, r4, r5, ROR #13
AND r11, r11, r10
EOR r4, r4, r5, ROR #22
EOR r11, r11, r6
ADD r8, r8, r9
ADD r9, r9, r4
ADD r9, r9, r11
STR r8, [r0, #12]
STR r9, [r0, #28]
/* Calc new W[0] */
LDR r6, [sp, #56]
LDR r7, [sp, #36]
LDR r8, [sp, #4]
LDR r9, [sp]
ROR r4, r6, #17
ROR r5, r8, #7
EOR r4, r4, r6, ROR #19
EOR r5, r5, r8, ROR #18
EOR r4, r4, r6, LSR #10
EOR r5, r5, r8, LSR #3
ADD r9, r9, r7
ADD r4, r4, r5
ADD r9, r9, r4
STR r9, [sp]
/* Round 1 */
LDR r5, [r0, #12]
LDR r6, [r0, #16]
LDR r7, [r0, #20]
LDR r9, [r0, #24]
ROR r4, r5, #6
EOR r6, r6, r7
EOR r4, r4, r5, ROR #11
AND r6, r6, r5
EOR r4, r4, r5, ROR #25
EOR r6, r6, r7
ADD r9, r9, r4
ADD r9, r9, r6
LDR r5, [sp, #4]
LDR r6, [r3, #4]
ADD r9, r9, r5
ADD r9, r9, r6
LDR r5, [r0, #28]
LDR r6, [r0]
LDR r7, [r0, #4]
LDR r8, [r0, #8]
ROR r4, r5, #2
EOR r11, r5, r6
EOR r4, r4, r5, ROR #13
AND r10, r10, r11
EOR r4, r4, r5, ROR #22
EOR r10, r10, r6
ADD r8, r8, r9
ADD r9, r9, r4
ADD r9, r9, r10
STR r8, [r0, #8]
STR r9, [r0, #24]
/* Calc new W[1] */
LDR r6, [sp, #60]
LDR r7, [sp, #40]
LDR r8, [sp, #8]
LDR r9, [sp, #4]
ROR r4, r6, #17
ROR r5, r8, #7
EOR r4, r4, r6, ROR #19
EOR r5, r5, r8, ROR #18
EOR r4, r4, r6, LSR #10
EOR r5, r5, r8, LSR #3
ADD r9, r9, r7
ADD r4, r4, r5
ADD r9, r9, r4
STR r9, [sp, #4]
/* Round 2 */
LDR r5, [r0, #8]
LDR r6, [r0, #12]
LDR r7, [r0, #16]
LDR r9, [r0, #20]
ROR r4, r5, #6
EOR r6, r6, r7
EOR r4, r4, r5, ROR #11
AND r6, r6, r5
EOR r4, r4, r5, ROR #25
EOR r6, r6, r7
ADD r9, r9, r4
ADD r9, r9, r6
LDR r5, [sp, #8]
LDR r6, [r3, #8]
ADD r9, r9, r5
ADD r9, r9, r6
LDR r5, [r0, #24]
LDR r6, [r0, #28]
LDR r7, [r0]
LDR r8, [r0, #4]
ROR r4, r5, #2
EOR r10, r5, r6
EOR r4, r4, r5, ROR #13
AND r11, r11, r10
EOR r4, r4, r5, ROR #22
EOR r11, r11, r6
ADD r8, r8, r9
ADD r9, r9, r4
ADD r9, r9, r11
STR r8, [r0, #4]
STR r9, [r0, #20]
/* Calc new W[2] */
LDR r6, [sp]
LDR r7, [sp, #44]
LDR r8, [sp, #12]
LDR r9, [sp, #8]
ROR r4, r6, #17
ROR r5, r8, #7
EOR r4, r4, r6, ROR #19
EOR r5, r5, r8, ROR #18
EOR r4, r4, r6, LSR #10
EOR r5, r5, r8, LSR #3
ADD r9, r9, r7
ADD r4, r4, r5
ADD r9, r9, r4
STR r9, [sp, #8]
/* Round 3 */
LDR r5, [r0, #4]
LDR r6, [r0, #8]
LDR r7, [r0, #12]
LDR r9, [r0, #16]
ROR r4, r5, #6
EOR r6, r6, r7
EOR r4, r4, r5, ROR #11
AND r6, r6, r5
EOR r4, r4, r5, ROR #25
EOR r6, r6, r7
ADD r9, r9, r4
ADD r9, r9, r6
LDR r5, [sp, #12]
LDR r6, [r3, #12]
ADD r9, r9, r5
ADD r9, r9, r6
LDR r5, [r0, #20]
LDR r6, [r0, #24]
LDR r7, [r0, #28]
LDR r8, [r0]
ROR r4, r5, #2
EOR r11, r5, r6
EOR r4, r4, r5, ROR #13
AND r10, r10, r11
EOR r4, r4, r5, ROR #22
EOR r10, r10, r6
ADD r8, r8, r9
ADD r9, r9, r4
ADD r9, r9, r10
STR r8, [r0]
STR r9, [r0, #16]
/* Calc new W[3] */
LDR r6, [sp, #4]
LDR r7, [sp, #48]
LDR r8, [sp, #16]
LDR r9, [sp, #12]
ROR r4, r6, #17
ROR r5, r8, #7
EOR r4, r4, r6, ROR #19
EOR r5, r5, r8, ROR #18
EOR r4, r4, r6, LSR #10
EOR r5, r5, r8, LSR #3
ADD r9, r9, r7
ADD r4, r4, r5
ADD r9, r9, r4
STR r9, [sp, #12]
/* Round 4 */
LDR r5, [r0]
LDR r6, [r0, #4]
LDR r7, [r0, #8]
LDR r9, [r0, #12]
ROR r4, r5, #6
EOR r6, r6, r7
EOR r4, r4, r5, ROR #11
AND r6, r6, r5
EOR r4, r4, r5, ROR #25
EOR r6, r6, r7
ADD r9, r9, r4
ADD r9, r9, r6
LDR r5, [sp, #16]
LDR r6, [r3, #16]
ADD r9, r9, r5
ADD r9, r9, r6
LDR r5, [r0, #16]
LDR r6, [r0, #20]
LDR r7, [r0, #24]
LDR r8, [r0, #28]
ROR r4, r5, #2
EOR r10, r5, r6
EOR r4, r4, r5, ROR #13
AND r11, r11, r10
EOR r4, r4, r5, ROR #22
EOR r11, r11, r6
ADD r8, r8, r9
ADD r9, r9, r4
ADD r9, r9, r11
STR r8, [r0, #28]
STR r9, [r0, #12]
/* Calc new W[4] */
LDR r6, [sp, #8]
LDR r7, [sp, #52]
LDR r8, [sp, #20]
LDR r9, [sp, #16]
ROR r4, r6, #17
ROR r5, r8, #7
EOR r4, r4, r6, ROR #19
EOR r5, r5, r8, ROR #18
EOR r4, r4, r6, LSR #10
EOR r5, r5, r8, LSR #3
ADD r9, r9, r7
ADD r4, r4, r5
ADD r9, r9, r4
STR r9, [sp, #16]
/* Round 5 */
LDR r5, [r0, #28]
LDR r6, [r0]
LDR r7, [r0, #4]
LDR r9, [r0, #8]
ROR r4, r5, #6
EOR r6, r6, r7
EOR r4, r4, r5, ROR #11
AND r6, r6, r5
EOR r4, r4, r5, ROR #25
EOR r6, r6, r7
ADD r9, r9, r4
ADD r9, r9, r6
LDR r5, [sp, #20]
LDR r6, [r3, #20]
ADD r9, r9, r5
ADD r9, r9, r6
LDR r5, [r0, #12]
LDR r6, [r0, #16]
LDR r7, [r0, #20]
LDR r8, [r0, #24]
ROR r4, r5, #2
EOR r11, r5, r6
EOR r4, r4, r5, ROR #13
AND r10, r10, r11
EOR r4, r4, r5, ROR #22
EOR r10, r10, r6
ADD r8, r8, r9
ADD r9, r9, r4
ADD r9, r9, r10
STR r8, [r0, #24]
STR r9, [r0, #8]
/* Calc new W[5] */
LDR r6, [sp, #12]
LDR r7, [sp, #56]
LDR r8, [sp, #24]
LDR r9, [sp, #20]
ROR r4, r6, #17
ROR r5, r8, #7
EOR r4, r4, r6, ROR #19
EOR r5, r5, r8, ROR #18
EOR r4, r4, r6, LSR #10
EOR r5, r5, r8, LSR #3
ADD r9, r9, r7
ADD r4, r4, r5
ADD r9, r9, r4
STR r9, [sp, #20]
/* Round 6 */
LDR r5, [r0, #24]
LDR r6, [r0, #28]
LDR r7, [r0]
LDR r9, [r0, #4]
ROR r4, r5, #6
EOR r6, r6, r7
EOR r4, r4, r5, ROR #11
AND r6, r6, r5
EOR r4, r4, r5, ROR #25
EOR r6, r6, r7
ADD r9, r9, r4
ADD r9, r9, r6
LDR r5, [sp, #24]
LDR r6, [r3, #24]
ADD r9, r9, r5
ADD r9, r9, r6
LDR r5, [r0, #8]
LDR r6, [r0, #12]
LDR r7, [r0, #16]
LDR r8, [r0, #20]
ROR r4, r5, #2
EOR r10, r5, r6
EOR r4, r4, r5, ROR #13
AND r11, r11, r10
EOR r4, r4, r5, ROR #22
EOR r11, r11, r6
ADD r8, r8, r9
ADD r9, r9, r4
ADD r9, r9, r11
STR r8, [r0, #20]
STR r9, [r0, #4]
/* Calc new W[6] */
LDR r6, [sp, #16]
LDR r7, [sp, #60]
LDR r8, [sp, #28]
LDR r9, [sp, #24]
ROR r4, r6, #17
ROR r5, r8, #7
EOR r4, r4, r6, ROR #19
EOR r5, r5, r8, ROR #18
EOR r4, r4, r6, LSR #10
EOR r5, r5, r8, LSR #3
ADD r9, r9, r7
ADD r4, r4, r5
ADD r9, r9, r4
STR r9, [sp, #24]
/* Round 7 */
LDR r5, [r0, #20]
LDR r6, [r0, #24]
LDR r7, [r0, #28]
LDR r9, [r0]
ROR r4, r5, #6
EOR r6, r6, r7
EOR r4, r4, r5, ROR #11
AND r6, r6, r5
EOR r4, r4, r5, ROR #25
EOR r6, r6, r7
ADD r9, r9, r4
ADD r9, r9, r6
LDR r5, [sp, #28]
LDR r6, [r3, #28]
ADD r9, r9, r5
ADD r9, r9, r6
LDR r5, [r0, #4]
LDR r6, [r0, #8]
LDR r7, [r0, #12]
LDR r8, [r0, #16]
ROR r4, r5, #2
EOR r11, r5, r6
EOR r4, r4, r5, ROR #13
AND r10, r10, r11
EOR r4, r4, r5, ROR #22
EOR r10, r10, r6
ADD r8, r8, r9
ADD r9, r9, r4
ADD r9, r9, r10
STR r8, [r0, #16]
STR r9, [r0]
/* Calc new W[7] */
LDR r6, [sp, #20]
LDR r7, [sp]
LDR r8, [sp, #32]
LDR r9, [sp, #28]
ROR r4, r6, #17
ROR r5, r8, #7
EOR r4, r4, r6, ROR #19
EOR r5, r5, r8, ROR #18
EOR r4, r4, r6, LSR #10
EOR r5, r5, r8, LSR #3
ADD r9, r9, r7
ADD r4, r4, r5
ADD r9, r9, r4
STR r9, [sp, #28]
/* Round 8 */
LDR r5, [r0, #16]
LDR r6, [r0, #20]
LDR r7, [r0, #24]
LDR r9, [r0, #28]
ROR r4, r5, #6
EOR r6, r6, r7
EOR r4, r4, r5, ROR #11
AND r6, r6, r5
EOR r4, r4, r5, ROR #25
EOR r6, r6, r7
ADD r9, r9, r4
ADD r9, r9, r6
LDR r5, [sp, #32]
LDR r6, [r3, #32]
ADD r9, r9, r5
ADD r9, r9, r6
LDR r5, [r0]
LDR r6, [r0, #4]
LDR r7, [r0, #8]
LDR r8, [r0, #12]
ROR r4, r5, #2
EOR r10, r5, r6
EOR r4, r4, r5, ROR #13
AND r11, r11, r10
EOR r4, r4, r5, ROR #22
EOR r11, r11, r6
ADD r8, r8, r9
ADD r9, r9, r4
ADD r9, r9, r11
STR r8, [r0, #12]
STR r9, [r0, #28]
/* Calc new W[8] */
LDR r6, [sp, #24]
LDR r7, [sp, #4]
LDR r8, [sp, #36]
LDR r9, [sp, #32]
ROR r4, r6, #17
ROR r5, r8, #7
EOR r4, r4, r6, ROR #19
EOR r5, r5, r8, ROR #18
EOR r4, r4, r6, LSR #10
EOR r5, r5, r8, LSR #3
ADD r9, r9, r7
ADD r4, r4, r5
ADD r9, r9, r4
STR r9, [sp, #32]
/* Round 9 */
LDR r5, [r0, #12]
LDR r6, [r0, #16]
LDR r7, [r0, #20]
LDR r9, [r0, #24]
ROR r4, r5, #6
EOR r6, r6, r7
EOR r4, r4, r5, ROR #11
AND r6, r6, r5
EOR r4, r4, r5, ROR #25
EOR r6, r6, r7
ADD r9, r9, r4
ADD r9, r9, r6
LDR r5, [sp, #36]
LDR r6, [r3, #36]
ADD r9, r9, r5
ADD r9, r9, r6
LDR r5, [r0, #28]
LDR r6, [r0]
LDR r7, [r0, #4]
LDR r8, [r0, #8]
ROR r4, r5, #2
EOR r11, r5, r6
EOR r4, r4, r5, ROR #13
AND r10, r10, r11
EOR r4, r4, r5, ROR #22
EOR r10, r10, r6
ADD r8, r8, r9
ADD r9, r9, r4
ADD r9, r9, r10
STR r8, [r0, #8]
STR r9, [r0, #24]
/* Calc new W[9] */
LDR r6, [sp, #28]
LDR r7, [sp, #8]
LDR r8, [sp, #40]
LDR r9, [sp, #36]
ROR r4, r6, #17
ROR r5, r8, #7
EOR r4, r4, r6, ROR #19
EOR r5, r5, r8, ROR #18
EOR r4, r4, r6, LSR #10
EOR r5, r5, r8, LSR #3
ADD r9, r9, r7
ADD r4, r4, r5
ADD r9, r9, r4
STR r9, [sp, #36]
/* Round 10 */
LDR r5, [r0, #8]
LDR r6, [r0, #12]
LDR r7, [r0, #16]
LDR r9, [r0, #20]
ROR r4, r5, #6
EOR r6, r6, r7
EOR r4, r4, r5, ROR #11
AND r6, r6, r5
EOR r4, r4, r5, ROR #25
EOR r6, r6, r7
ADD r9, r9, r4
ADD r9, r9, r6
LDR r5, [sp, #40]
LDR r6, [r3, #40]
ADD r9, r9, r5
ADD r9, r9, r6
LDR r5, [r0, #24]
LDR r6, [r0, #28]
LDR r7, [r0]
LDR r8, [r0, #4]
ROR r4, r5, #2
EOR r10, r5, r6
EOR r4, r4, r5, ROR #13
AND r11, r11, r10
EOR r4, r4, r5, ROR #22
EOR r11, r11, r6
ADD r8, r8, r9
ADD r9, r9, r4
ADD r9, r9, r11
STR r8, [r0, #4]
STR r9, [r0, #20]
/* Calc new W[10] */
LDR r6, [sp, #32]
LDR r7, [sp, #12]
LDR r8, [sp, #44]
LDR r9, [sp, #40]
ROR r4, r6, #17
ROR r5, r8, #7
EOR r4, r4, r6, ROR #19
EOR r5, r5, r8, ROR #18
EOR r4, r4, r6, LSR #10
EOR r5, r5, r8, LSR #3
ADD r9, r9, r7
ADD r4, r4, r5
ADD r9, r9, r4
STR r9, [sp, #40]
/* Round 11 */
LDR r5, [r0, #4]
LDR r6, [r0, #8]
LDR r7, [r0, #12]
LDR r9, [r0, #16]
ROR r4, r5, #6
EOR r6, r6, r7
EOR r4, r4, r5, ROR #11
AND r6, r6, r5
EOR r4, r4, r5, ROR #25
EOR r6, r6, r7
ADD r9, r9, r4
ADD r9, r9, r6
LDR r5, [sp, #44]
LDR r6, [r3, #44]
ADD r9, r9, r5
ADD r9, r9, r6
LDR r5, [r0, #20]
LDR r6, [r0, #24]
LDR r7, [r0, #28]
LDR r8, [r0]
ROR r4, r5, #2
EOR r11, r5, r6
EOR r4, r4, r5, ROR #13
AND r10, r10, r11
EOR r4, r4, r5, ROR #22
EOR r10, r10, r6
ADD r8, r8, r9
ADD r9, r9, r4
ADD r9, r9, r10
STR r8, [r0]
STR r9, [r0, #16]
/* Calc new W[11] */
LDR r6, [sp, #36]
LDR r7, [sp, #16]
LDR r8, [sp, #48]
LDR r9, [sp, #44]
ROR r4, r6, #17
ROR r5, r8, #7
EOR r4, r4, r6, ROR #19
EOR r5, r5, r8, ROR #18
EOR r4, r4, r6, LSR #10
EOR r5, r5, r8, LSR #3
ADD r9, r9, r7
ADD r4, r4, r5
ADD r9, r9, r4
STR r9, [sp, #44]
/* Round 12 */
LDR r5, [r0]
LDR r6, [r0, #4]
LDR r7, [r0, #8]
LDR r9, [r0, #12]
ROR r4, r5, #6
EOR r6, r6, r7
EOR r4, r4, r5, ROR #11
AND r6, r6, r5
EOR r4, r4, r5, ROR #25
EOR r6, r6, r7
ADD r9, r9, r4
ADD r9, r9, r6
LDR r5, [sp, #48]
LDR r6, [r3, #48]
ADD r9, r9, r5
ADD r9, r9, r6
LDR r5, [r0, #16]
LDR r6, [r0, #20]
LDR r7, [r0, #24]
LDR r8, [r0, #28]
ROR r4, r5, #2
EOR r10, r5, r6
EOR r4, r4, r5, ROR #13
AND r11, r11, r10
EOR r4, r4, r5, ROR #22
EOR r11, r11, r6
ADD r8, r8, r9
ADD r9, r9, r4
ADD r9, r9, r11
STR r8, [r0, #28]
STR r9, [r0, #12]
/* Calc new W[12] */
LDR r6, [sp, #40]
LDR r7, [sp, #20]
LDR r8, [sp, #52]
LDR r9, [sp, #48]
ROR r4, r6, #17
ROR r5, r8, #7
EOR r4, r4, r6, ROR #19
EOR r5, r5, r8, ROR #18
EOR r4, r4, r6, LSR #10
EOR r5, r5, r8, LSR #3
ADD r9, r9, r7
ADD r4, r4, r5
ADD r9, r9, r4
STR r9, [sp, #48]
/* Round 13 */
LDR r5, [r0, #28]
LDR r6, [r0]
LDR r7, [r0, #4]
LDR r9, [r0, #8]
ROR r4, r5, #6
EOR r6, r6, r7
EOR r4, r4, r5, ROR #11
AND r6, r6, r5
EOR r4, r4, r5, ROR #25
EOR r6, r6, r7
ADD r9, r9, r4
ADD r9, r9, r6
LDR r5, [sp, #52]
LDR r6, [r3, #52]
ADD r9, r9, r5
ADD r9, r9, r6
LDR r5, [r0, #12]
LDR r6, [r0, #16]
LDR r7, [r0, #20]
LDR r8, [r0, #24]
ROR r4, r5, #2
EOR r11, r5, r6
EOR r4, r4, r5, ROR #13
AND r10, r10, r11
EOR r4, r4, r5, ROR #22
EOR r10, r10, r6
ADD r8, r8, r9
ADD r9, r9, r4
ADD r9, r9, r10
STR r8, [r0, #24]
STR r9, [r0, #8]
/* Calc new W[13] */
LDR r6, [sp, #44]
LDR r7, [sp, #24]
LDR r8, [sp, #56]
LDR r9, [sp, #52]
ROR r4, r6, #17
ROR r5, r8, #7
EOR r4, r4, r6, ROR #19
EOR r5, r5, r8, ROR #18
EOR r4, r4, r6, LSR #10
EOR r5, r5, r8, LSR #3
ADD r9, r9, r7
ADD r4, r4, r5
ADD r9, r9, r4
STR r9, [sp, #52]
/* Round 14 */
LDR r5, [r0, #24]
LDR r6, [r0, #28]
LDR r7, [r0]
LDR r9, [r0, #4]
ROR r4, r5, #6
EOR r6, r6, r7
EOR r4, r4, r5, ROR #11
AND r6, r6, r5
EOR r4, r4, r5, ROR #25
EOR r6, r6, r7
ADD r9, r9, r4
ADD r9, r9, r6
LDR r5, [sp, #56]
LDR r6, [r3, #56]
ADD r9, r9, r5
ADD r9, r9, r6
LDR r5, [r0, #8]
LDR r6, [r0, #12]
LDR r7, [r0, #16]
LDR r8, [r0, #20]
ROR r4, r5, #2
EOR r10, r5, r6
EOR r4, r4, r5, ROR #13
AND r11, r11, r10
EOR r4, r4, r5, ROR #22
EOR r11, r11, r6
ADD r8, r8, r9
ADD r9, r9, r4
ADD r9, r9, r11
STR r8, [r0, #20]
STR r9, [r0, #4]
/* Calc new W[14] */
LDR r6, [sp, #48]
LDR r7, [sp, #28]
LDR r8, [sp, #60]
LDR r9, [sp, #56]
ROR r4, r6, #17
ROR r5, r8, #7
EOR r4, r4, r6, ROR #19
EOR r5, r5, r8, ROR #18
EOR r4, r4, r6, LSR #10
EOR r5, r5, r8, LSR #3
ADD r9, r9, r7
ADD r4, r4, r5
ADD r9, r9, r4
STR r9, [sp, #56]
/* Round 15 */
LDR r5, [r0, #20]
LDR r6, [r0, #24]
LDR r7, [r0, #28]
LDR r9, [r0]
ROR r4, r5, #6
EOR r6, r6, r7
EOR r4, r4, r5, ROR #11
AND r6, r6, r5
EOR r4, r4, r5, ROR #25
EOR r6, r6, r7
ADD r9, r9, r4
ADD r9, r9, r6
LDR r5, [sp, #60]
LDR r6, [r3, #60]
ADD r9, r9, r5
ADD r9, r9, r6
LDR r5, [r0, #4]
LDR r6, [r0, #8]
LDR r7, [r0, #12]
LDR r8, [r0, #16]
ROR r4, r5, #2
EOR r11, r5, r6
EOR r4, r4, r5, ROR #13
AND r10, r10, r11
EOR r4, r4, r5, ROR #22
EOR r10, r10, r6
ADD r8, r8, r9
ADD r9, r9, r4
ADD r9, r9, r10
STR r8, [r0, #16]
STR r9, [r0]
/* Calc new W[15] */
LDR r6, [sp, #52]
LDR r7, [sp, #32]
LDR r8, [sp]
LDR r9, [sp, #60]
ROR r4, r6, #17
ROR r5, r8, #7
EOR r4, r4, r6, ROR #19
EOR r5, r5, r8, ROR #18
EOR r4, r4, r6, LSR #10
EOR r5, r5, r8, LSR #3
ADD r9, r9, r7
ADD r4, r4, r5
ADD r9, r9, r4
STR r9, [sp, #60]
ADD r3, r3, #0x40
SUBS r12, r12, #0x1
#ifdef __GNUC__
BNE L_SHA256_transform_len_start
#else
BNE.W L_SHA256_transform_len_start
#endif
/* Round 0 */
LDR r5, [r0, #16]
LDR r6, [r0, #20]
LDR r7, [r0, #24]
LDR r9, [r0, #28]
ROR r4, r5, #6
EOR r6, r6, r7
EOR r4, r4, r5, ROR #11
AND r6, r6, r5
EOR r4, r4, r5, ROR #25
EOR r6, r6, r7
ADD r9, r9, r4
ADD r9, r9, r6
LDR r5, [sp]
LDR r6, [r3]
ADD r9, r9, r5
ADD r9, r9, r6
LDR r5, [r0]
LDR r6, [r0, #4]
LDR r7, [r0, #8]
LDR r8, [r0, #12]
ROR r4, r5, #2
EOR r10, r5, r6
EOR r4, r4, r5, ROR #13
AND r11, r11, r10
EOR r4, r4, r5, ROR #22
EOR r11, r11, r6
ADD r8, r8, r9
ADD r9, r9, r4
ADD r9, r9, r11
STR r8, [r0, #12]
STR r9, [r0, #28]
/* Round 1 */
LDR r5, [r0, #12]
LDR r6, [r0, #16]
LDR r7, [r0, #20]
LDR r9, [r0, #24]
ROR r4, r5, #6
EOR r6, r6, r7
EOR r4, r4, r5, ROR #11
AND r6, r6, r5
EOR r4, r4, r5, ROR #25
EOR r6, r6, r7
ADD r9, r9, r4
ADD r9, r9, r6
LDR r5, [sp, #4]
LDR r6, [r3, #4]
ADD r9, r9, r5
ADD r9, r9, r6
LDR r5, [r0, #28]
LDR r6, [r0]
LDR r7, [r0, #4]
LDR r8, [r0, #8]
ROR r4, r5, #2
EOR r11, r5, r6
EOR r4, r4, r5, ROR #13
AND r10, r10, r11
EOR r4, r4, r5, ROR #22
EOR r10, r10, r6
ADD r8, r8, r9
ADD r9, r9, r4
ADD r9, r9, r10
STR r8, [r0, #8]
STR r9, [r0, #24]
/* Round 2 */
LDR r5, [r0, #8]
LDR r6, [r0, #12]
LDR r7, [r0, #16]
LDR r9, [r0, #20]
ROR r4, r5, #6
EOR r6, r6, r7
EOR r4, r4, r5, ROR #11
AND r6, r6, r5
EOR r4, r4, r5, ROR #25
EOR r6, r6, r7
ADD r9, r9, r4
ADD r9, r9, r6
LDR r5, [sp, #8]
LDR r6, [r3, #8]
ADD r9, r9, r5
ADD r9, r9, r6
LDR r5, [r0, #24]
LDR r6, [r0, #28]
LDR r7, [r0]
LDR r8, [r0, #4]
ROR r4, r5, #2
EOR r10, r5, r6
EOR r4, r4, r5, ROR #13
AND r11, r11, r10
EOR r4, r4, r5, ROR #22
EOR r11, r11, r6
ADD r8, r8, r9
ADD r9, r9, r4
ADD r9, r9, r11
STR r8, [r0, #4]
STR r9, [r0, #20]
/* Round 3 */
LDR r5, [r0, #4]
LDR r6, [r0, #8]
LDR r7, [r0, #12]
LDR r9, [r0, #16]
ROR r4, r5, #6
EOR r6, r6, r7
EOR r4, r4, r5, ROR #11
AND r6, r6, r5
EOR r4, r4, r5, ROR #25
EOR r6, r6, r7
ADD r9, r9, r4
ADD r9, r9, r6
LDR r5, [sp, #12]
LDR r6, [r3, #12]
ADD r9, r9, r5
ADD r9, r9, r6
LDR r5, [r0, #20]
LDR r6, [r0, #24]
LDR r7, [r0, #28]
LDR r8, [r0]
ROR r4, r5, #2
EOR r11, r5, r6
EOR r4, r4, r5, ROR #13
AND r10, r10, r11
EOR r4, r4, r5, ROR #22
EOR r10, r10, r6
ADD r8, r8, r9
ADD r9, r9, r4
ADD r9, r9, r10
STR r8, [r0]
STR r9, [r0, #16]
/* Round 4 */
LDR r5, [r0]
LDR r6, [r0, #4]
LDR r7, [r0, #8]
LDR r9, [r0, #12]
ROR r4, r5, #6
EOR r6, r6, r7
EOR r4, r4, r5, ROR #11
AND r6, r6, r5
EOR r4, r4, r5, ROR #25
EOR r6, r6, r7
ADD r9, r9, r4
ADD r9, r9, r6
LDR r5, [sp, #16]
LDR r6, [r3, #16]
ADD r9, r9, r5
ADD r9, r9, r6
LDR r5, [r0, #16]
LDR r6, [r0, #20]
LDR r7, [r0, #24]
LDR r8, [r0, #28]
ROR r4, r5, #2
EOR r10, r5, r6
EOR r4, r4, r5, ROR #13
AND r11, r11, r10
EOR r4, r4, r5, ROR #22
EOR r11, r11, r6
ADD r8, r8, r9
ADD r9, r9, r4
ADD r9, r9, r11
STR r8, [r0, #28]
STR r9, [r0, #12]
/* Round 5 */
LDR r5, [r0, #28]
LDR r6, [r0]
LDR r7, [r0, #4]
LDR r9, [r0, #8]
ROR r4, r5, #6
EOR r6, r6, r7
EOR r4, r4, r5, ROR #11
AND r6, r6, r5
EOR r4, r4, r5, ROR #25
EOR r6, r6, r7
ADD r9, r9, r4
ADD r9, r9, r6
LDR r5, [sp, #20]
LDR r6, [r3, #20]
ADD r9, r9, r5
ADD r9, r9, r6
LDR r5, [r0, #12]
LDR r6, [r0, #16]
LDR r7, [r0, #20]
LDR r8, [r0, #24]
ROR r4, r5, #2
EOR r11, r5, r6
EOR r4, r4, r5, ROR #13
AND r10, r10, r11
EOR r4, r4, r5, ROR #22
EOR r10, r10, r6
ADD r8, r8, r9
ADD r9, r9, r4
ADD r9, r9, r10
STR r8, [r0, #24]
STR r9, [r0, #8]
/* Round 6 */
LDR r5, [r0, #24]
LDR r6, [r0, #28]
LDR r7, [r0]
LDR r9, [r0, #4]
ROR r4, r5, #6
EOR r6, r6, r7
EOR r4, r4, r5, ROR #11
AND r6, r6, r5
EOR r4, r4, r5, ROR #25
EOR r6, r6, r7
ADD r9, r9, r4
ADD r9, r9, r6
LDR r5, [sp, #24]
LDR r6, [r3, #24]
ADD r9, r9, r5
ADD r9, r9, r6
LDR r5, [r0, #8]
LDR r6, [r0, #12]
LDR r7, [r0, #16]
LDR r8, [r0, #20]
ROR r4, r5, #2
EOR r10, r5, r6
EOR r4, r4, r5, ROR #13
AND r11, r11, r10
EOR r4, r4, r5, ROR #22
EOR r11, r11, r6
ADD r8, r8, r9
ADD r9, r9, r4
ADD r9, r9, r11
STR r8, [r0, #20]
STR r9, [r0, #4]
/* Round 7 */
LDR r5, [r0, #20]
LDR r6, [r0, #24]
LDR r7, [r0, #28]
LDR r9, [r0]
ROR r4, r5, #6
EOR r6, r6, r7
EOR r4, r4, r5, ROR #11
AND r6, r6, r5
EOR r4, r4, r5, ROR #25
EOR r6, r6, r7
ADD r9, r9, r4
ADD r9, r9, r6
LDR r5, [sp, #28]
LDR r6, [r3, #28]
ADD r9, r9, r5
ADD r9, r9, r6
LDR r5, [r0, #4]
LDR r6, [r0, #8]
LDR r7, [r0, #12]
LDR r8, [r0, #16]
ROR r4, r5, #2
EOR r11, r5, r6
EOR r4, r4, r5, ROR #13
AND r10, r10, r11
EOR r4, r4, r5, ROR #22
EOR r10, r10, r6
ADD r8, r8, r9
ADD r9, r9, r4
ADD r9, r9, r10
STR r8, [r0, #16]
STR r9, [r0]
/* Round 8 */
LDR r5, [r0, #16]
LDR r6, [r0, #20]
LDR r7, [r0, #24]
LDR r9, [r0, #28]
ROR r4, r5, #6
EOR r6, r6, r7
EOR r4, r4, r5, ROR #11
AND r6, r6, r5
EOR r4, r4, r5, ROR #25
EOR r6, r6, r7
ADD r9, r9, r4
ADD r9, r9, r6
LDR r5, [sp, #32]
LDR r6, [r3, #32]
ADD r9, r9, r5
ADD r9, r9, r6
LDR r5, [r0]
LDR r6, [r0, #4]
LDR r7, [r0, #8]
LDR r8, [r0, #12]
ROR r4, r5, #2
EOR r10, r5, r6
EOR r4, r4, r5, ROR #13
AND r11, r11, r10
EOR r4, r4, r5, ROR #22
EOR r11, r11, r6
ADD r8, r8, r9
ADD r9, r9, r4
ADD r9, r9, r11
STR r8, [r0, #12]
STR r9, [r0, #28]
/* Round 9 */
LDR r5, [r0, #12]
LDR r6, [r0, #16]
LDR r7, [r0, #20]
LDR r9, [r0, #24]
ROR r4, r5, #6
EOR r6, r6, r7
EOR r4, r4, r5, ROR #11
AND r6, r6, r5
EOR r4, r4, r5, ROR #25
EOR r6, r6, r7
ADD r9, r9, r4
ADD r9, r9, r6
LDR r5, [sp, #36]
LDR r6, [r3, #36]
ADD r9, r9, r5
ADD r9, r9, r6
LDR r5, [r0, #28]
LDR r6, [r0]
LDR r7, [r0, #4]
LDR r8, [r0, #8]
ROR r4, r5, #2
EOR r11, r5, r6
EOR r4, r4, r5, ROR #13
AND r10, r10, r11
EOR r4, r4, r5, ROR #22
EOR r10, r10, r6
ADD r8, r8, r9
ADD r9, r9, r4
ADD r9, r9, r10
STR r8, [r0, #8]
STR r9, [r0, #24]
/* Round 10 */
LDR r5, [r0, #8]
LDR r6, [r0, #12]
LDR r7, [r0, #16]
LDR r9, [r0, #20]
ROR r4, r5, #6
EOR r6, r6, r7
EOR r4, r4, r5, ROR #11
AND r6, r6, r5
EOR r4, r4, r5, ROR #25
EOR r6, r6, r7
ADD r9, r9, r4
ADD r9, r9, r6
LDR r5, [sp, #40]
LDR r6, [r3, #40]
ADD r9, r9, r5
ADD r9, r9, r6
LDR r5, [r0, #24]
LDR r6, [r0, #28]
LDR r7, [r0]
LDR r8, [r0, #4]
ROR r4, r5, #2
EOR r10, r5, r6
EOR r4, r4, r5, ROR #13
AND r11, r11, r10
EOR r4, r4, r5, ROR #22
EOR r11, r11, r6
ADD r8, r8, r9
ADD r9, r9, r4
ADD r9, r9, r11
STR r8, [r0, #4]
STR r9, [r0, #20]
/* Round 11 */
LDR r5, [r0, #4]
LDR r6, [r0, #8]
LDR r7, [r0, #12]
LDR r9, [r0, #16]
ROR r4, r5, #6
EOR r6, r6, r7
EOR r4, r4, r5, ROR #11
AND r6, r6, r5
EOR r4, r4, r5, ROR #25
EOR r6, r6, r7
ADD r9, r9, r4
ADD r9, r9, r6
LDR r5, [sp, #44]
LDR r6, [r3, #44]
ADD r9, r9, r5
ADD r9, r9, r6
LDR r5, [r0, #20]
LDR r6, [r0, #24]
LDR r7, [r0, #28]
LDR r8, [r0]
ROR r4, r5, #2
EOR r11, r5, r6
EOR r4, r4, r5, ROR #13
AND r10, r10, r11
EOR r4, r4, r5, ROR #22
EOR r10, r10, r6
ADD r8, r8, r9
ADD r9, r9, r4
ADD r9, r9, r10
STR r8, [r0]
STR r9, [r0, #16]
/* Round 12 */
LDR r5, [r0]
LDR r6, [r0, #4]
LDR r7, [r0, #8]
LDR r9, [r0, #12]
ROR r4, r5, #6
EOR r6, r6, r7
EOR r4, r4, r5, ROR #11
AND r6, r6, r5
EOR r4, r4, r5, ROR #25
EOR r6, r6, r7
ADD r9, r9, r4
ADD r9, r9, r6
LDR r5, [sp, #48]
LDR r6, [r3, #48]
ADD r9, r9, r5
ADD r9, r9, r6
LDR r5, [r0, #16]
LDR r6, [r0, #20]
LDR r7, [r0, #24]
LDR r8, [r0, #28]
ROR r4, r5, #2
EOR r10, r5, r6
EOR r4, r4, r5, ROR #13
AND r11, r11, r10
EOR r4, r4, r5, ROR #22
EOR r11, r11, r6
ADD r8, r8, r9
ADD r9, r9, r4
ADD r9, r9, r11
STR r8, [r0, #28]
STR r9, [r0, #12]
/* Round 13 */
LDR r5, [r0, #28]
LDR r6, [r0]
LDR r7, [r0, #4]
LDR r9, [r0, #8]
ROR r4, r5, #6
EOR r6, r6, r7
EOR r4, r4, r5, ROR #11
AND r6, r6, r5
EOR r4, r4, r5, ROR #25
EOR r6, r6, r7
ADD r9, r9, r4
ADD r9, r9, r6
LDR r5, [sp, #52]
LDR r6, [r3, #52]
ADD r9, r9, r5
ADD r9, r9, r6
LDR r5, [r0, #12]
LDR r6, [r0, #16]
LDR r7, [r0, #20]
LDR r8, [r0, #24]
ROR r4, r5, #2
EOR r11, r5, r6
EOR r4, r4, r5, ROR #13
AND r10, r10, r11
EOR r4, r4, r5, ROR #22
EOR r10, r10, r6
ADD r8, r8, r9
ADD r9, r9, r4
ADD r9, r9, r10
STR r8, [r0, #24]
STR r9, [r0, #8]
/* Round 14 */
LDR r5, [r0, #24]
LDR r6, [r0, #28]
LDR r7, [r0]
LDR r9, [r0, #4]
ROR r4, r5, #6
EOR r6, r6, r7
EOR r4, r4, r5, ROR #11
AND r6, r6, r5
EOR r4, r4, r5, ROR #25
EOR r6, r6, r7
ADD r9, r9, r4
ADD r9, r9, r6
LDR r5, [sp, #56]
LDR r6, [r3, #56]
ADD r9, r9, r5
ADD r9, r9, r6
LDR r5, [r0, #8]
LDR r6, [r0, #12]
LDR r7, [r0, #16]
LDR r8, [r0, #20]
ROR r4, r5, #2
EOR r10, r5, r6
EOR r4, r4, r5, ROR #13
AND r11, r11, r10
EOR r4, r4, r5, ROR #22
EOR r11, r11, r6
ADD r8, r8, r9
ADD r9, r9, r4
ADD r9, r9, r11
STR r8, [r0, #20]
STR r9, [r0, #4]
/* Round 15 */
LDR r5, [r0, #20]
LDR r6, [r0, #24]
LDR r7, [r0, #28]
LDR r9, [r0]
ROR r4, r5, #6
EOR r6, r6, r7
EOR r4, r4, r5, ROR #11
AND r6, r6, r5
EOR r4, r4, r5, ROR #25
EOR r6, r6, r7
ADD r9, r9, r4
ADD r9, r9, r6
LDR r5, [sp, #60]
LDR r6, [r3, #60]
ADD r9, r9, r5
ADD r9, r9, r6
LDR r5, [r0, #4]
LDR r6, [r0, #8]
LDR r7, [r0, #12]
LDR r8, [r0, #16]
ROR r4, r5, #2
EOR r11, r5, r6
EOR r4, r4, r5, ROR #13
AND r10, r10, r11
EOR r4, r4, r5, ROR #22
EOR r10, r10, r6
ADD r8, r8, r9
ADD r9, r9, r4
ADD r9, r9, r10
STR r8, [r0, #16]
STR r9, [r0]
/* Add in digest from start */
LDRD r4, r5, [r0]
LDRD r6, r7, [r0, #8]
LDRD r8, r9, [sp, #64]
LDRD r10, r11, [sp, #72]
ADD r4, r4, r8
ADD r5, r5, r9
ADD r6, r6, r10
ADD r7, r7, r11
STRD r4, r5, [r0]
STRD r6, r7, [r0, #8]
STRD r4, r5, [sp, #64]
STRD r6, r7, [sp, #72]
LDRD r4, r5, [r0, #16]
LDRD r6, r7, [r0, #24]
LDRD r8, r9, [sp, #80]
LDRD r10, r11, [sp, #88]
ADD r4, r4, r8
ADD r5, r5, r9
ADD r6, r6, r10
ADD r7, r7, r11
STRD r4, r5, [r0, #16]
STRD r6, r7, [r0, #24]
STRD r4, r5, [sp, #80]
STRD r6, r7, [sp, #88]
SUBS r2, r2, #0x40
SUB r3, r3, #0xc0
ADD r1, r1, #0x40
#ifdef __GNUC__
BNE L_SHA256_transform_len_begin
#else
BNE.W L_SHA256_transform_len_begin
#endif
ADD sp, sp, #0xc0
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
/* Cycle Count = 1874 */
.size Transform_Sha256_Len,.-Transform_Sha256_Len
#endif /* WOLFSSL_ARMASM_NO_NEON */
#endif /* !NO_SHA256 */
#endif /* !__aarch64__ && __thumb__ */
#endif /* WOLFSSL_ARMASM */
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
#endif /* !WOLFSSL_ARMASM_INLINE */
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,445
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/SPI/SPI_FullDuplex_ComIT/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/SPI/SPI_FullDuplex_ComIT/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aenu1/aps3e
| 72,237
|
app/src/main/cpp/rpcs3/3rdparty/wolfssl/wolfssl/wolfcrypt/src/port/arm/thumb2-sha512-asm.S
|
/* thumb2-sha512-asm
*
* Copyright (C) 2006-2023 wolfSSL Inc.
*
* This file is part of wolfSSL.
*
* wolfSSL is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* wolfSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
*/
/* Generated using (from wolfssl):
* cd ../scripts
* ruby ./sha2/sha512.rb thumb2 ../wolfssl/wolfcrypt/src/port/arm/thumb2-sha512-asm.S
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif /* HAVE_CONFIG_H */
#include <wolfssl/wolfcrypt/settings.h>
#ifdef WOLFSSL_ARMASM
#if !defined(__aarch64__) && defined(__thumb__)
#ifndef WOLFSSL_ARMASM_INLINE
.thumb
.syntax unified
#ifdef WOLFSSL_SHA512
#ifdef WOLFSSL_ARMASM_NO_NEON
.text
.type L_SHA512_transform_len_k, %object
.size L_SHA512_transform_len_k, 640
.align 8
L_SHA512_transform_len_k:
.word 0xd728ae22
.word 0x428a2f98
.word 0x23ef65cd
.word 0x71374491
.word 0xec4d3b2f
.word 0xb5c0fbcf
.word 0x8189dbbc
.word 0xe9b5dba5
.word 0xf348b538
.word 0x3956c25b
.word 0xb605d019
.word 0x59f111f1
.word 0xaf194f9b
.word 0x923f82a4
.word 0xda6d8118
.word 0xab1c5ed5
.word 0xa3030242
.word 0xd807aa98
.word 0x45706fbe
.word 0x12835b01
.word 0x4ee4b28c
.word 0x243185be
.word 0xd5ffb4e2
.word 0x550c7dc3
.word 0xf27b896f
.word 0x72be5d74
.word 0x3b1696b1
.word 0x80deb1fe
.word 0x25c71235
.word 0x9bdc06a7
.word 0xcf692694
.word 0xc19bf174
.word 0x9ef14ad2
.word 0xe49b69c1
.word 0x384f25e3
.word 0xefbe4786
.word 0x8b8cd5b5
.word 0xfc19dc6
.word 0x77ac9c65
.word 0x240ca1cc
.word 0x592b0275
.word 0x2de92c6f
.word 0x6ea6e483
.word 0x4a7484aa
.word 0xbd41fbd4
.word 0x5cb0a9dc
.word 0x831153b5
.word 0x76f988da
.word 0xee66dfab
.word 0x983e5152
.word 0x2db43210
.word 0xa831c66d
.word 0x98fb213f
.word 0xb00327c8
.word 0xbeef0ee4
.word 0xbf597fc7
.word 0x3da88fc2
.word 0xc6e00bf3
.word 0x930aa725
.word 0xd5a79147
.word 0xe003826f
.word 0x6ca6351
.word 0xa0e6e70
.word 0x14292967
.word 0x46d22ffc
.word 0x27b70a85
.word 0x5c26c926
.word 0x2e1b2138
.word 0x5ac42aed
.word 0x4d2c6dfc
.word 0x9d95b3df
.word 0x53380d13
.word 0x8baf63de
.word 0x650a7354
.word 0x3c77b2a8
.word 0x766a0abb
.word 0x47edaee6
.word 0x81c2c92e
.word 0x1482353b
.word 0x92722c85
.word 0x4cf10364
.word 0xa2bfe8a1
.word 0xbc423001
.word 0xa81a664b
.word 0xd0f89791
.word 0xc24b8b70
.word 0x654be30
.word 0xc76c51a3
.word 0xd6ef5218
.word 0xd192e819
.word 0x5565a910
.word 0xd6990624
.word 0x5771202a
.word 0xf40e3585
.word 0x32bbd1b8
.word 0x106aa070
.word 0xb8d2d0c8
.word 0x19a4c116
.word 0x5141ab53
.word 0x1e376c08
.word 0xdf8eeb99
.word 0x2748774c
.word 0xe19b48a8
.word 0x34b0bcb5
.word 0xc5c95a63
.word 0x391c0cb3
.word 0xe3418acb
.word 0x4ed8aa4a
.word 0x7763e373
.word 0x5b9cca4f
.word 0xd6b2b8a3
.word 0x682e6ff3
.word 0x5defb2fc
.word 0x748f82ee
.word 0x43172f60
.word 0x78a5636f
.word 0xa1f0ab72
.word 0x84c87814
.word 0x1a6439ec
.word 0x8cc70208
.word 0x23631e28
.word 0x90befffa
.word 0xde82bde9
.word 0xa4506ceb
.word 0xb2c67915
.word 0xbef9a3f7
.word 0xe372532b
.word 0xc67178f2
.word 0xea26619c
.word 0xca273ece
.word 0x21c0c207
.word 0xd186b8c7
.word 0xcde0eb1e
.word 0xeada7dd6
.word 0xee6ed178
.word 0xf57d4f7f
.word 0x72176fba
.word 0x6f067aa
.word 0xa2c898a6
.word 0xa637dc5
.word 0xbef90dae
.word 0x113f9804
.word 0x131c471b
.word 0x1b710b35
.word 0x23047d84
.word 0x28db77f5
.word 0x40c72493
.word 0x32caab7b
.word 0x15c9bebc
.word 0x3c9ebe0a
.word 0x9c100d4c
.word 0x431d67c4
.word 0xcb3e42b6
.word 0x4cc5d4be
.word 0xfc657e2a
.word 0x597f299c
.word 0x3ad6faec
.word 0x5fcb6fab
.word 0x4a475817
.word 0x6c44198c
.text
.align 4
.globl Transform_Sha512_Len
.type Transform_Sha512_Len, %function
Transform_Sha512_Len:
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr}
SUB sp, sp, #0xc0
ADR r3, L_SHA512_transform_len_k
/* Copy digest to add in at end */
LDRD r4, r5, [r0]
LDRD r6, r7, [r0, #8]
LDRD r8, r9, [r0, #16]
LDRD r10, r11, [r0, #24]
STRD r4, r5, [sp, #128]
STRD r6, r7, [sp, #136]
STRD r8, r9, [sp, #144]
STRD r10, r11, [sp, #152]
LDRD r4, r5, [r0, #32]
LDRD r6, r7, [r0, #40]
LDRD r8, r9, [r0, #48]
LDRD r10, r11, [r0, #56]
STRD r4, r5, [sp, #160]
STRD r6, r7, [sp, #168]
STRD r8, r9, [sp, #176]
STRD r10, r11, [sp, #184]
/* Start of loop processing a block */
L_SHA512_transform_len_begin:
/* Load, Reverse and Store W */
LDR r4, [r1]
LDR r5, [r1, #4]
LDR r6, [r1, #8]
LDR r7, [r1, #12]
LDR r8, [r1, #16]
LDR r9, [r1, #20]
LDR r10, [r1, #24]
LDR r11, [r1, #28]
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
REV r8, r8
REV r9, r9
REV r10, r10
REV r11, r11
STR r5, [sp]
STR r4, [sp, #4]
STR r7, [sp, #8]
STR r6, [sp, #12]
STR r9, [sp, #16]
STR r8, [sp, #20]
STR r11, [sp, #24]
STR r10, [sp, #28]
LDR r4, [r1, #32]
LDR r5, [r1, #36]
LDR r6, [r1, #40]
LDR r7, [r1, #44]
LDR r8, [r1, #48]
LDR r9, [r1, #52]
LDR r10, [r1, #56]
LDR r11, [r1, #60]
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
REV r8, r8
REV r9, r9
REV r10, r10
REV r11, r11
STR r5, [sp, #32]
STR r4, [sp, #36]
STR r7, [sp, #40]
STR r6, [sp, #44]
STR r9, [sp, #48]
STR r8, [sp, #52]
STR r11, [sp, #56]
STR r10, [sp, #60]
LDR r4, [r1, #64]
LDR r5, [r1, #68]
LDR r6, [r1, #72]
LDR r7, [r1, #76]
LDR r8, [r1, #80]
LDR r9, [r1, #84]
LDR r10, [r1, #88]
LDR r11, [r1, #92]
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
REV r8, r8
REV r9, r9
REV r10, r10
REV r11, r11
STR r5, [sp, #64]
STR r4, [sp, #68]
STR r7, [sp, #72]
STR r6, [sp, #76]
STR r9, [sp, #80]
STR r8, [sp, #84]
STR r11, [sp, #88]
STR r10, [sp, #92]
LDR r4, [r1, #96]
LDR r5, [r1, #100]
LDR r6, [r1, #104]
LDR r7, [r1, #108]
LDR r8, [r1, #112]
LDR r9, [r1, #116]
LDR r10, [r1, #120]
LDR r11, [r1, #124]
REV r4, r4
REV r5, r5
REV r6, r6
REV r7, r7
REV r8, r8
REV r9, r9
REV r10, r10
REV r11, r11
STR r5, [sp, #96]
STR r4, [sp, #100]
STR r7, [sp, #104]
STR r6, [sp, #108]
STR r9, [sp, #112]
STR r8, [sp, #116]
STR r11, [sp, #120]
STR r10, [sp, #124]
/* Pre-calc: b ^ c */
LDRD r10, r11, [r0, #8]
LDRD r4, r5, [r0, #16]
EOR r10, r10, r4
EOR r11, r11, r5
MOV r12, #0x4
/* Start of 16 rounds */
L_SHA512_transform_len_start:
/* Round 0 */
LDRD r4, r5, [r0, #32]
LSRS r6, r4, #14
LSRS r7, r5, #14
ORR r7, r7, r4, LSL #18
ORR r6, r6, r5, LSL #18
LSRS r8, r4, #18
LSRS r9, r5, #18
ORR r9, r9, r4, LSL #14
ORR r8, r8, r5, LSL #14
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #23
LSLS r9, r5, #23
ORR r9, r9, r4, LSR #9
ORR r8, r8, r5, LSR #9
LDRD r4, r5, [r0, #56]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #56]
LDRD r4, r5, [r0, #32]
LDRD r6, r7, [r0, #40]
LDRD r8, r9, [r0, #48]
EOR r6, r6, r8
EOR r7, r7, r9
AND r6, r6, r4
AND r7, r7, r5
EOR r6, r6, r8
EOR r7, r7, r9
LDRD r4, r5, [r0, #56]
LDRD r8, r9, [sp]
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r6, r7, [r3]
ADDS r4, r4, r8
ADC r5, r5, r9
LDRD r8, r9, [r0, #24]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #56]
ADDS r8, r8, r4
ADC r9, r9, r5
LDRD r4, r5, [r0]
STRD r8, r9, [r0, #24]
LSRS r6, r4, #28
LSRS r7, r5, #28
ORR r7, r7, r4, LSL #4
ORR r6, r6, r5, LSL #4
LSLS r8, r4, #30
LSLS r9, r5, #30
ORR r9, r9, r4, LSR #2
ORR r8, r8, r5, LSR #2
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #25
LSLS r9, r5, #25
ORR r9, r9, r4, LSR #7
ORR r8, r8, r5, LSR #7
LDRD r4, r5, [r0, #56]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r8, r9, [r0]
LDRD r6, r7, [r0, #8]
STRD r4, r5, [r0, #56]
EOR r8, r8, r6
EOR r9, r9, r7
AND r10, r10, r8
AND r11, r11, r9
EOR r10, r10, r6
EOR r11, r11, r7
LDRD r6, r7, [r0, #56]
ADDS r6, r6, r10
ADC r7, r7, r11
STRD r6, r7, [r0, #56]
MOV r10, r8
MOV r11, r9
/* Calc new W[0] */
LDRD r4, r5, [sp, #112]
LSRS r6, r4, #19
LSRS r7, r5, #19
ORR r7, r7, r4, LSL #13
ORR r6, r6, r5, LSL #13
LSLS r8, r4, #3
LSLS r9, r5, #3
ORR r9, r9, r4, LSR #29
ORR r8, r8, r5, LSR #29
EOR r7, r7, r9
EOR r6, r6, r8
LSRS r8, r4, #6
LSRS r9, r5, #6
ORR r8, r8, r5, LSL #26
EOR r7, r7, r9
EOR r6, r6, r8
LDRD r4, r5, [sp]
LDRD r8, r9, [sp, #72]
ADDS r4, r4, r6
ADC r5, r5, r7
ADDS r4, r4, r8
ADC r5, r5, r9
STRD r4, r5, [sp]
LDRD r4, r5, [sp, #8]
LSRS r6, r4, #1
LSRS r7, r5, #1
ORR r7, r7, r4, LSL #31
ORR r6, r6, r5, LSL #31
LSRS r8, r4, #8
LSRS r9, r5, #8
ORR r9, r9, r4, LSL #24
ORR r8, r8, r5, LSL #24
EOR r7, r7, r9
EOR r6, r6, r8
LSRS r8, r4, #7
LSRS r9, r5, #7
ORR r8, r8, r5, LSL #25
EOR r7, r7, r9
EOR r6, r6, r8
LDRD r4, r5, [sp]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [sp]
/* Round 1 */
LDRD r4, r5, [r0, #24]
LSRS r6, r4, #14
LSRS r7, r5, #14
ORR r7, r7, r4, LSL #18
ORR r6, r6, r5, LSL #18
LSRS r8, r4, #18
LSRS r9, r5, #18
ORR r9, r9, r4, LSL #14
ORR r8, r8, r5, LSL #14
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #23
LSLS r9, r5, #23
ORR r9, r9, r4, LSR #9
ORR r8, r8, r5, LSR #9
LDRD r4, r5, [r0, #48]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #48]
LDRD r4, r5, [r0, #24]
LDRD r6, r7, [r0, #32]
LDRD r8, r9, [r0, #40]
EOR r6, r6, r8
EOR r7, r7, r9
AND r6, r6, r4
AND r7, r7, r5
EOR r6, r6, r8
EOR r7, r7, r9
LDRD r4, r5, [r0, #48]
LDRD r8, r9, [sp, #8]
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r6, r7, [r3, #8]
ADDS r4, r4, r8
ADC r5, r5, r9
LDRD r8, r9, [r0, #16]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #48]
ADDS r8, r8, r4
ADC r9, r9, r5
LDRD r4, r5, [r0, #56]
STRD r8, r9, [r0, #16]
LSRS r6, r4, #28
LSRS r7, r5, #28
ORR r7, r7, r4, LSL #4
ORR r6, r6, r5, LSL #4
LSLS r8, r4, #30
LSLS r9, r5, #30
ORR r9, r9, r4, LSR #2
ORR r8, r8, r5, LSR #2
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #25
LSLS r9, r5, #25
ORR r9, r9, r4, LSR #7
ORR r8, r8, r5, LSR #7
LDRD r4, r5, [r0, #48]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r8, r9, [r0, #56]
LDRD r6, r7, [r0]
STRD r4, r5, [r0, #48]
EOR r8, r8, r6
EOR r9, r9, r7
AND r10, r10, r8
AND r11, r11, r9
EOR r10, r10, r6
EOR r11, r11, r7
LDRD r6, r7, [r0, #48]
ADDS r6, r6, r10
ADC r7, r7, r11
STRD r6, r7, [r0, #48]
MOV r10, r8
MOV r11, r9
/* Calc new W[1] */
LDRD r4, r5, [sp, #120]
LSRS r6, r4, #19
LSRS r7, r5, #19
ORR r7, r7, r4, LSL #13
ORR r6, r6, r5, LSL #13
LSLS r8, r4, #3
LSLS r9, r5, #3
ORR r9, r9, r4, LSR #29
ORR r8, r8, r5, LSR #29
EOR r7, r7, r9
EOR r6, r6, r8
LSRS r8, r4, #6
LSRS r9, r5, #6
ORR r8, r8, r5, LSL #26
EOR r7, r7, r9
EOR r6, r6, r8
LDRD r4, r5, [sp, #8]
LDRD r8, r9, [sp, #80]
ADDS r4, r4, r6
ADC r5, r5, r7
ADDS r4, r4, r8
ADC r5, r5, r9
STRD r4, r5, [sp, #8]
LDRD r4, r5, [sp, #16]
LSRS r6, r4, #1
LSRS r7, r5, #1
ORR r7, r7, r4, LSL #31
ORR r6, r6, r5, LSL #31
LSRS r8, r4, #8
LSRS r9, r5, #8
ORR r9, r9, r4, LSL #24
ORR r8, r8, r5, LSL #24
EOR r7, r7, r9
EOR r6, r6, r8
LSRS r8, r4, #7
LSRS r9, r5, #7
ORR r8, r8, r5, LSL #25
EOR r7, r7, r9
EOR r6, r6, r8
LDRD r4, r5, [sp, #8]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [sp, #8]
/* Round 2 */
LDRD r4, r5, [r0, #16]
LSRS r6, r4, #14
LSRS r7, r5, #14
ORR r7, r7, r4, LSL #18
ORR r6, r6, r5, LSL #18
LSRS r8, r4, #18
LSRS r9, r5, #18
ORR r9, r9, r4, LSL #14
ORR r8, r8, r5, LSL #14
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #23
LSLS r9, r5, #23
ORR r9, r9, r4, LSR #9
ORR r8, r8, r5, LSR #9
LDRD r4, r5, [r0, #40]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #40]
LDRD r4, r5, [r0, #16]
LDRD r6, r7, [r0, #24]
LDRD r8, r9, [r0, #32]
EOR r6, r6, r8
EOR r7, r7, r9
AND r6, r6, r4
AND r7, r7, r5
EOR r6, r6, r8
EOR r7, r7, r9
LDRD r4, r5, [r0, #40]
LDRD r8, r9, [sp, #16]
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r6, r7, [r3, #16]
ADDS r4, r4, r8
ADC r5, r5, r9
LDRD r8, r9, [r0, #8]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #40]
ADDS r8, r8, r4
ADC r9, r9, r5
LDRD r4, r5, [r0, #48]
STRD r8, r9, [r0, #8]
LSRS r6, r4, #28
LSRS r7, r5, #28
ORR r7, r7, r4, LSL #4
ORR r6, r6, r5, LSL #4
LSLS r8, r4, #30
LSLS r9, r5, #30
ORR r9, r9, r4, LSR #2
ORR r8, r8, r5, LSR #2
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #25
LSLS r9, r5, #25
ORR r9, r9, r4, LSR #7
ORR r8, r8, r5, LSR #7
LDRD r4, r5, [r0, #40]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r8, r9, [r0, #48]
LDRD r6, r7, [r0, #56]
STRD r4, r5, [r0, #40]
EOR r8, r8, r6
EOR r9, r9, r7
AND r10, r10, r8
AND r11, r11, r9
EOR r10, r10, r6
EOR r11, r11, r7
LDRD r6, r7, [r0, #40]
ADDS r6, r6, r10
ADC r7, r7, r11
STRD r6, r7, [r0, #40]
MOV r10, r8
MOV r11, r9
/* Calc new W[2] */
LDRD r4, r5, [sp]
LSRS r6, r4, #19
LSRS r7, r5, #19
ORR r7, r7, r4, LSL #13
ORR r6, r6, r5, LSL #13
LSLS r8, r4, #3
LSLS r9, r5, #3
ORR r9, r9, r4, LSR #29
ORR r8, r8, r5, LSR #29
EOR r7, r7, r9
EOR r6, r6, r8
LSRS r8, r4, #6
LSRS r9, r5, #6
ORR r8, r8, r5, LSL #26
EOR r7, r7, r9
EOR r6, r6, r8
LDRD r4, r5, [sp, #16]
LDRD r8, r9, [sp, #88]
ADDS r4, r4, r6
ADC r5, r5, r7
ADDS r4, r4, r8
ADC r5, r5, r9
STRD r4, r5, [sp, #16]
LDRD r4, r5, [sp, #24]
LSRS r6, r4, #1
LSRS r7, r5, #1
ORR r7, r7, r4, LSL #31
ORR r6, r6, r5, LSL #31
LSRS r8, r4, #8
LSRS r9, r5, #8
ORR r9, r9, r4, LSL #24
ORR r8, r8, r5, LSL #24
EOR r7, r7, r9
EOR r6, r6, r8
LSRS r8, r4, #7
LSRS r9, r5, #7
ORR r8, r8, r5, LSL #25
EOR r7, r7, r9
EOR r6, r6, r8
LDRD r4, r5, [sp, #16]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [sp, #16]
/* Round 3 */
LDRD r4, r5, [r0, #8]
LSRS r6, r4, #14
LSRS r7, r5, #14
ORR r7, r7, r4, LSL #18
ORR r6, r6, r5, LSL #18
LSRS r8, r4, #18
LSRS r9, r5, #18
ORR r9, r9, r4, LSL #14
ORR r8, r8, r5, LSL #14
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #23
LSLS r9, r5, #23
ORR r9, r9, r4, LSR #9
ORR r8, r8, r5, LSR #9
LDRD r4, r5, [r0, #32]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #32]
LDRD r4, r5, [r0, #8]
LDRD r6, r7, [r0, #16]
LDRD r8, r9, [r0, #24]
EOR r6, r6, r8
EOR r7, r7, r9
AND r6, r6, r4
AND r7, r7, r5
EOR r6, r6, r8
EOR r7, r7, r9
LDRD r4, r5, [r0, #32]
LDRD r8, r9, [sp, #24]
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r6, r7, [r3, #24]
ADDS r4, r4, r8
ADC r5, r5, r9
LDRD r8, r9, [r0]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #32]
ADDS r8, r8, r4
ADC r9, r9, r5
LDRD r4, r5, [r0, #40]
STRD r8, r9, [r0]
LSRS r6, r4, #28
LSRS r7, r5, #28
ORR r7, r7, r4, LSL #4
ORR r6, r6, r5, LSL #4
LSLS r8, r4, #30
LSLS r9, r5, #30
ORR r9, r9, r4, LSR #2
ORR r8, r8, r5, LSR #2
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #25
LSLS r9, r5, #25
ORR r9, r9, r4, LSR #7
ORR r8, r8, r5, LSR #7
LDRD r4, r5, [r0, #32]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r8, r9, [r0, #40]
LDRD r6, r7, [r0, #48]
STRD r4, r5, [r0, #32]
EOR r8, r8, r6
EOR r9, r9, r7
AND r10, r10, r8
AND r11, r11, r9
EOR r10, r10, r6
EOR r11, r11, r7
LDRD r6, r7, [r0, #32]
ADDS r6, r6, r10
ADC r7, r7, r11
STRD r6, r7, [r0, #32]
MOV r10, r8
MOV r11, r9
/* Calc new W[3] */
LDRD r4, r5, [sp, #8]
LSRS r6, r4, #19
LSRS r7, r5, #19
ORR r7, r7, r4, LSL #13
ORR r6, r6, r5, LSL #13
LSLS r8, r4, #3
LSLS r9, r5, #3
ORR r9, r9, r4, LSR #29
ORR r8, r8, r5, LSR #29
EOR r7, r7, r9
EOR r6, r6, r8
LSRS r8, r4, #6
LSRS r9, r5, #6
ORR r8, r8, r5, LSL #26
EOR r7, r7, r9
EOR r6, r6, r8
LDRD r4, r5, [sp, #24]
LDRD r8, r9, [sp, #96]
ADDS r4, r4, r6
ADC r5, r5, r7
ADDS r4, r4, r8
ADC r5, r5, r9
STRD r4, r5, [sp, #24]
LDRD r4, r5, [sp, #32]
LSRS r6, r4, #1
LSRS r7, r5, #1
ORR r7, r7, r4, LSL #31
ORR r6, r6, r5, LSL #31
LSRS r8, r4, #8
LSRS r9, r5, #8
ORR r9, r9, r4, LSL #24
ORR r8, r8, r5, LSL #24
EOR r7, r7, r9
EOR r6, r6, r8
LSRS r8, r4, #7
LSRS r9, r5, #7
ORR r8, r8, r5, LSL #25
EOR r7, r7, r9
EOR r6, r6, r8
LDRD r4, r5, [sp, #24]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [sp, #24]
/* Round 4 */
LDRD r4, r5, [r0]
LSRS r6, r4, #14
LSRS r7, r5, #14
ORR r7, r7, r4, LSL #18
ORR r6, r6, r5, LSL #18
LSRS r8, r4, #18
LSRS r9, r5, #18
ORR r9, r9, r4, LSL #14
ORR r8, r8, r5, LSL #14
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #23
LSLS r9, r5, #23
ORR r9, r9, r4, LSR #9
ORR r8, r8, r5, LSR #9
LDRD r4, r5, [r0, #24]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #24]
LDRD r4, r5, [r0]
LDRD r6, r7, [r0, #8]
LDRD r8, r9, [r0, #16]
EOR r6, r6, r8
EOR r7, r7, r9
AND r6, r6, r4
AND r7, r7, r5
EOR r6, r6, r8
EOR r7, r7, r9
LDRD r4, r5, [r0, #24]
LDRD r8, r9, [sp, #32]
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r6, r7, [r3, #32]
ADDS r4, r4, r8
ADC r5, r5, r9
LDRD r8, r9, [r0, #56]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #24]
ADDS r8, r8, r4
ADC r9, r9, r5
LDRD r4, r5, [r0, #32]
STRD r8, r9, [r0, #56]
LSRS r6, r4, #28
LSRS r7, r5, #28
ORR r7, r7, r4, LSL #4
ORR r6, r6, r5, LSL #4
LSLS r8, r4, #30
LSLS r9, r5, #30
ORR r9, r9, r4, LSR #2
ORR r8, r8, r5, LSR #2
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #25
LSLS r9, r5, #25
ORR r9, r9, r4, LSR #7
ORR r8, r8, r5, LSR #7
LDRD r4, r5, [r0, #24]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r8, r9, [r0, #32]
LDRD r6, r7, [r0, #40]
STRD r4, r5, [r0, #24]
EOR r8, r8, r6
EOR r9, r9, r7
AND r10, r10, r8
AND r11, r11, r9
EOR r10, r10, r6
EOR r11, r11, r7
LDRD r6, r7, [r0, #24]
ADDS r6, r6, r10
ADC r7, r7, r11
STRD r6, r7, [r0, #24]
MOV r10, r8
MOV r11, r9
/* Calc new W[4] */
LDRD r4, r5, [sp, #16]
LSRS r6, r4, #19
LSRS r7, r5, #19
ORR r7, r7, r4, LSL #13
ORR r6, r6, r5, LSL #13
LSLS r8, r4, #3
LSLS r9, r5, #3
ORR r9, r9, r4, LSR #29
ORR r8, r8, r5, LSR #29
EOR r7, r7, r9
EOR r6, r6, r8
LSRS r8, r4, #6
LSRS r9, r5, #6
ORR r8, r8, r5, LSL #26
EOR r7, r7, r9
EOR r6, r6, r8
LDRD r4, r5, [sp, #32]
LDRD r8, r9, [sp, #104]
ADDS r4, r4, r6
ADC r5, r5, r7
ADDS r4, r4, r8
ADC r5, r5, r9
STRD r4, r5, [sp, #32]
LDRD r4, r5, [sp, #40]
LSRS r6, r4, #1
LSRS r7, r5, #1
ORR r7, r7, r4, LSL #31
ORR r6, r6, r5, LSL #31
LSRS r8, r4, #8
LSRS r9, r5, #8
ORR r9, r9, r4, LSL #24
ORR r8, r8, r5, LSL #24
EOR r7, r7, r9
EOR r6, r6, r8
LSRS r8, r4, #7
LSRS r9, r5, #7
ORR r8, r8, r5, LSL #25
EOR r7, r7, r9
EOR r6, r6, r8
LDRD r4, r5, [sp, #32]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [sp, #32]
/* Round 5 */
LDRD r4, r5, [r0, #56]
LSRS r6, r4, #14
LSRS r7, r5, #14
ORR r7, r7, r4, LSL #18
ORR r6, r6, r5, LSL #18
LSRS r8, r4, #18
LSRS r9, r5, #18
ORR r9, r9, r4, LSL #14
ORR r8, r8, r5, LSL #14
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #23
LSLS r9, r5, #23
ORR r9, r9, r4, LSR #9
ORR r8, r8, r5, LSR #9
LDRD r4, r5, [r0, #16]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #16]
LDRD r4, r5, [r0, #56]
LDRD r6, r7, [r0]
LDRD r8, r9, [r0, #8]
EOR r6, r6, r8
EOR r7, r7, r9
AND r6, r6, r4
AND r7, r7, r5
EOR r6, r6, r8
EOR r7, r7, r9
LDRD r4, r5, [r0, #16]
LDRD r8, r9, [sp, #40]
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r6, r7, [r3, #40]
ADDS r4, r4, r8
ADC r5, r5, r9
LDRD r8, r9, [r0, #48]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #16]
ADDS r8, r8, r4
ADC r9, r9, r5
LDRD r4, r5, [r0, #24]
STRD r8, r9, [r0, #48]
LSRS r6, r4, #28
LSRS r7, r5, #28
ORR r7, r7, r4, LSL #4
ORR r6, r6, r5, LSL #4
LSLS r8, r4, #30
LSLS r9, r5, #30
ORR r9, r9, r4, LSR #2
ORR r8, r8, r5, LSR #2
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #25
LSLS r9, r5, #25
ORR r9, r9, r4, LSR #7
ORR r8, r8, r5, LSR #7
LDRD r4, r5, [r0, #16]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r8, r9, [r0, #24]
LDRD r6, r7, [r0, #32]
STRD r4, r5, [r0, #16]
EOR r8, r8, r6
EOR r9, r9, r7
AND r10, r10, r8
AND r11, r11, r9
EOR r10, r10, r6
EOR r11, r11, r7
LDRD r6, r7, [r0, #16]
ADDS r6, r6, r10
ADC r7, r7, r11
STRD r6, r7, [r0, #16]
MOV r10, r8
MOV r11, r9
/* Calc new W[5] */
LDRD r4, r5, [sp, #24]
LSRS r6, r4, #19
LSRS r7, r5, #19
ORR r7, r7, r4, LSL #13
ORR r6, r6, r5, LSL #13
LSLS r8, r4, #3
LSLS r9, r5, #3
ORR r9, r9, r4, LSR #29
ORR r8, r8, r5, LSR #29
EOR r7, r7, r9
EOR r6, r6, r8
LSRS r8, r4, #6
LSRS r9, r5, #6
ORR r8, r8, r5, LSL #26
EOR r7, r7, r9
EOR r6, r6, r8
LDRD r4, r5, [sp, #40]
LDRD r8, r9, [sp, #112]
ADDS r4, r4, r6
ADC r5, r5, r7
ADDS r4, r4, r8
ADC r5, r5, r9
STRD r4, r5, [sp, #40]
LDRD r4, r5, [sp, #48]
LSRS r6, r4, #1
LSRS r7, r5, #1
ORR r7, r7, r4, LSL #31
ORR r6, r6, r5, LSL #31
LSRS r8, r4, #8
LSRS r9, r5, #8
ORR r9, r9, r4, LSL #24
ORR r8, r8, r5, LSL #24
EOR r7, r7, r9
EOR r6, r6, r8
LSRS r8, r4, #7
LSRS r9, r5, #7
ORR r8, r8, r5, LSL #25
EOR r7, r7, r9
EOR r6, r6, r8
LDRD r4, r5, [sp, #40]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [sp, #40]
/* Round 6 */
LDRD r4, r5, [r0, #48]
LSRS r6, r4, #14
LSRS r7, r5, #14
ORR r7, r7, r4, LSL #18
ORR r6, r6, r5, LSL #18
LSRS r8, r4, #18
LSRS r9, r5, #18
ORR r9, r9, r4, LSL #14
ORR r8, r8, r5, LSL #14
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #23
LSLS r9, r5, #23
ORR r9, r9, r4, LSR #9
ORR r8, r8, r5, LSR #9
LDRD r4, r5, [r0, #8]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #8]
LDRD r4, r5, [r0, #48]
LDRD r6, r7, [r0, #56]
LDRD r8, r9, [r0]
EOR r6, r6, r8
EOR r7, r7, r9
AND r6, r6, r4
AND r7, r7, r5
EOR r6, r6, r8
EOR r7, r7, r9
LDRD r4, r5, [r0, #8]
LDRD r8, r9, [sp, #48]
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r6, r7, [r3, #48]
ADDS r4, r4, r8
ADC r5, r5, r9
LDRD r8, r9, [r0, #40]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #8]
ADDS r8, r8, r4
ADC r9, r9, r5
LDRD r4, r5, [r0, #16]
STRD r8, r9, [r0, #40]
LSRS r6, r4, #28
LSRS r7, r5, #28
ORR r7, r7, r4, LSL #4
ORR r6, r6, r5, LSL #4
LSLS r8, r4, #30
LSLS r9, r5, #30
ORR r9, r9, r4, LSR #2
ORR r8, r8, r5, LSR #2
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #25
LSLS r9, r5, #25
ORR r9, r9, r4, LSR #7
ORR r8, r8, r5, LSR #7
LDRD r4, r5, [r0, #8]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r8, r9, [r0, #16]
LDRD r6, r7, [r0, #24]
STRD r4, r5, [r0, #8]
EOR r8, r8, r6
EOR r9, r9, r7
AND r10, r10, r8
AND r11, r11, r9
EOR r10, r10, r6
EOR r11, r11, r7
LDRD r6, r7, [r0, #8]
ADDS r6, r6, r10
ADC r7, r7, r11
STRD r6, r7, [r0, #8]
MOV r10, r8
MOV r11, r9
/* Calc new W[6] */
LDRD r4, r5, [sp, #32]
LSRS r6, r4, #19
LSRS r7, r5, #19
ORR r7, r7, r4, LSL #13
ORR r6, r6, r5, LSL #13
LSLS r8, r4, #3
LSLS r9, r5, #3
ORR r9, r9, r4, LSR #29
ORR r8, r8, r5, LSR #29
EOR r7, r7, r9
EOR r6, r6, r8
LSRS r8, r4, #6
LSRS r9, r5, #6
ORR r8, r8, r5, LSL #26
EOR r7, r7, r9
EOR r6, r6, r8
LDRD r4, r5, [sp, #48]
LDRD r8, r9, [sp, #120]
ADDS r4, r4, r6
ADC r5, r5, r7
ADDS r4, r4, r8
ADC r5, r5, r9
STRD r4, r5, [sp, #48]
LDRD r4, r5, [sp, #56]
LSRS r6, r4, #1
LSRS r7, r5, #1
ORR r7, r7, r4, LSL #31
ORR r6, r6, r5, LSL #31
LSRS r8, r4, #8
LSRS r9, r5, #8
ORR r9, r9, r4, LSL #24
ORR r8, r8, r5, LSL #24
EOR r7, r7, r9
EOR r6, r6, r8
LSRS r8, r4, #7
LSRS r9, r5, #7
ORR r8, r8, r5, LSL #25
EOR r7, r7, r9
EOR r6, r6, r8
LDRD r4, r5, [sp, #48]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [sp, #48]
/* Round 7 */
LDRD r4, r5, [r0, #40]
LSRS r6, r4, #14
LSRS r7, r5, #14
ORR r7, r7, r4, LSL #18
ORR r6, r6, r5, LSL #18
LSRS r8, r4, #18
LSRS r9, r5, #18
ORR r9, r9, r4, LSL #14
ORR r8, r8, r5, LSL #14
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #23
LSLS r9, r5, #23
ORR r9, r9, r4, LSR #9
ORR r8, r8, r5, LSR #9
LDRD r4, r5, [r0]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0]
LDRD r4, r5, [r0, #40]
LDRD r6, r7, [r0, #48]
LDRD r8, r9, [r0, #56]
EOR r6, r6, r8
EOR r7, r7, r9
AND r6, r6, r4
AND r7, r7, r5
EOR r6, r6, r8
EOR r7, r7, r9
LDRD r4, r5, [r0]
LDRD r8, r9, [sp, #56]
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r6, r7, [r3, #56]
ADDS r4, r4, r8
ADC r5, r5, r9
LDRD r8, r9, [r0, #32]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0]
ADDS r8, r8, r4
ADC r9, r9, r5
LDRD r4, r5, [r0, #8]
STRD r8, r9, [r0, #32]
LSRS r6, r4, #28
LSRS r7, r5, #28
ORR r7, r7, r4, LSL #4
ORR r6, r6, r5, LSL #4
LSLS r8, r4, #30
LSLS r9, r5, #30
ORR r9, r9, r4, LSR #2
ORR r8, r8, r5, LSR #2
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #25
LSLS r9, r5, #25
ORR r9, r9, r4, LSR #7
ORR r8, r8, r5, LSR #7
LDRD r4, r5, [r0]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r8, r9, [r0, #8]
LDRD r6, r7, [r0, #16]
STRD r4, r5, [r0]
EOR r8, r8, r6
EOR r9, r9, r7
AND r10, r10, r8
AND r11, r11, r9
EOR r10, r10, r6
EOR r11, r11, r7
LDRD r6, r7, [r0]
ADDS r6, r6, r10
ADC r7, r7, r11
STRD r6, r7, [r0]
MOV r10, r8
MOV r11, r9
/* Calc new W[7] */
LDRD r4, r5, [sp, #40]
LSRS r6, r4, #19
LSRS r7, r5, #19
ORR r7, r7, r4, LSL #13
ORR r6, r6, r5, LSL #13
LSLS r8, r4, #3
LSLS r9, r5, #3
ORR r9, r9, r4, LSR #29
ORR r8, r8, r5, LSR #29
EOR r7, r7, r9
EOR r6, r6, r8
LSRS r8, r4, #6
LSRS r9, r5, #6
ORR r8, r8, r5, LSL #26
EOR r7, r7, r9
EOR r6, r6, r8
LDRD r4, r5, [sp, #56]
LDRD r8, r9, [sp]
ADDS r4, r4, r6
ADC r5, r5, r7
ADDS r4, r4, r8
ADC r5, r5, r9
STRD r4, r5, [sp, #56]
LDRD r4, r5, [sp, #64]
LSRS r6, r4, #1
LSRS r7, r5, #1
ORR r7, r7, r4, LSL #31
ORR r6, r6, r5, LSL #31
LSRS r8, r4, #8
LSRS r9, r5, #8
ORR r9, r9, r4, LSL #24
ORR r8, r8, r5, LSL #24
EOR r7, r7, r9
EOR r6, r6, r8
LSRS r8, r4, #7
LSRS r9, r5, #7
ORR r8, r8, r5, LSL #25
EOR r7, r7, r9
EOR r6, r6, r8
LDRD r4, r5, [sp, #56]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [sp, #56]
/* Round 8 */
LDRD r4, r5, [r0, #32]
LSRS r6, r4, #14
LSRS r7, r5, #14
ORR r7, r7, r4, LSL #18
ORR r6, r6, r5, LSL #18
LSRS r8, r4, #18
LSRS r9, r5, #18
ORR r9, r9, r4, LSL #14
ORR r8, r8, r5, LSL #14
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #23
LSLS r9, r5, #23
ORR r9, r9, r4, LSR #9
ORR r8, r8, r5, LSR #9
LDRD r4, r5, [r0, #56]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #56]
LDRD r4, r5, [r0, #32]
LDRD r6, r7, [r0, #40]
LDRD r8, r9, [r0, #48]
EOR r6, r6, r8
EOR r7, r7, r9
AND r6, r6, r4
AND r7, r7, r5
EOR r6, r6, r8
EOR r7, r7, r9
LDRD r4, r5, [r0, #56]
LDRD r8, r9, [sp, #64]
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r6, r7, [r3, #64]
ADDS r4, r4, r8
ADC r5, r5, r9
LDRD r8, r9, [r0, #24]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #56]
ADDS r8, r8, r4
ADC r9, r9, r5
LDRD r4, r5, [r0]
STRD r8, r9, [r0, #24]
LSRS r6, r4, #28
LSRS r7, r5, #28
ORR r7, r7, r4, LSL #4
ORR r6, r6, r5, LSL #4
LSLS r8, r4, #30
LSLS r9, r5, #30
ORR r9, r9, r4, LSR #2
ORR r8, r8, r5, LSR #2
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #25
LSLS r9, r5, #25
ORR r9, r9, r4, LSR #7
ORR r8, r8, r5, LSR #7
LDRD r4, r5, [r0, #56]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r8, r9, [r0]
LDRD r6, r7, [r0, #8]
STRD r4, r5, [r0, #56]
EOR r8, r8, r6
EOR r9, r9, r7
AND r10, r10, r8
AND r11, r11, r9
EOR r10, r10, r6
EOR r11, r11, r7
LDRD r6, r7, [r0, #56]
ADDS r6, r6, r10
ADC r7, r7, r11
STRD r6, r7, [r0, #56]
MOV r10, r8
MOV r11, r9
/* Calc new W[8] */
LDRD r4, r5, [sp, #48]
LSRS r6, r4, #19
LSRS r7, r5, #19
ORR r7, r7, r4, LSL #13
ORR r6, r6, r5, LSL #13
LSLS r8, r4, #3
LSLS r9, r5, #3
ORR r9, r9, r4, LSR #29
ORR r8, r8, r5, LSR #29
EOR r7, r7, r9
EOR r6, r6, r8
LSRS r8, r4, #6
LSRS r9, r5, #6
ORR r8, r8, r5, LSL #26
EOR r7, r7, r9
EOR r6, r6, r8
LDRD r4, r5, [sp, #64]
LDRD r8, r9, [sp, #8]
ADDS r4, r4, r6
ADC r5, r5, r7
ADDS r4, r4, r8
ADC r5, r5, r9
STRD r4, r5, [sp, #64]
LDRD r4, r5, [sp, #72]
LSRS r6, r4, #1
LSRS r7, r5, #1
ORR r7, r7, r4, LSL #31
ORR r6, r6, r5, LSL #31
LSRS r8, r4, #8
LSRS r9, r5, #8
ORR r9, r9, r4, LSL #24
ORR r8, r8, r5, LSL #24
EOR r7, r7, r9
EOR r6, r6, r8
LSRS r8, r4, #7
LSRS r9, r5, #7
ORR r8, r8, r5, LSL #25
EOR r7, r7, r9
EOR r6, r6, r8
LDRD r4, r5, [sp, #64]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [sp, #64]
/* Round 9 */
LDRD r4, r5, [r0, #24]
LSRS r6, r4, #14
LSRS r7, r5, #14
ORR r7, r7, r4, LSL #18
ORR r6, r6, r5, LSL #18
LSRS r8, r4, #18
LSRS r9, r5, #18
ORR r9, r9, r4, LSL #14
ORR r8, r8, r5, LSL #14
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #23
LSLS r9, r5, #23
ORR r9, r9, r4, LSR #9
ORR r8, r8, r5, LSR #9
LDRD r4, r5, [r0, #48]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #48]
LDRD r4, r5, [r0, #24]
LDRD r6, r7, [r0, #32]
LDRD r8, r9, [r0, #40]
EOR r6, r6, r8
EOR r7, r7, r9
AND r6, r6, r4
AND r7, r7, r5
EOR r6, r6, r8
EOR r7, r7, r9
LDRD r4, r5, [r0, #48]
LDRD r8, r9, [sp, #72]
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r6, r7, [r3, #72]
ADDS r4, r4, r8
ADC r5, r5, r9
LDRD r8, r9, [r0, #16]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #48]
ADDS r8, r8, r4
ADC r9, r9, r5
LDRD r4, r5, [r0, #56]
STRD r8, r9, [r0, #16]
LSRS r6, r4, #28
LSRS r7, r5, #28
ORR r7, r7, r4, LSL #4
ORR r6, r6, r5, LSL #4
LSLS r8, r4, #30
LSLS r9, r5, #30
ORR r9, r9, r4, LSR #2
ORR r8, r8, r5, LSR #2
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #25
LSLS r9, r5, #25
ORR r9, r9, r4, LSR #7
ORR r8, r8, r5, LSR #7
LDRD r4, r5, [r0, #48]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r8, r9, [r0, #56]
LDRD r6, r7, [r0]
STRD r4, r5, [r0, #48]
EOR r8, r8, r6
EOR r9, r9, r7
AND r10, r10, r8
AND r11, r11, r9
EOR r10, r10, r6
EOR r11, r11, r7
LDRD r6, r7, [r0, #48]
ADDS r6, r6, r10
ADC r7, r7, r11
STRD r6, r7, [r0, #48]
MOV r10, r8
MOV r11, r9
/* Calc new W[9] */
LDRD r4, r5, [sp, #56]
LSRS r6, r4, #19
LSRS r7, r5, #19
ORR r7, r7, r4, LSL #13
ORR r6, r6, r5, LSL #13
LSLS r8, r4, #3
LSLS r9, r5, #3
ORR r9, r9, r4, LSR #29
ORR r8, r8, r5, LSR #29
EOR r7, r7, r9
EOR r6, r6, r8
LSRS r8, r4, #6
LSRS r9, r5, #6
ORR r8, r8, r5, LSL #26
EOR r7, r7, r9
EOR r6, r6, r8
LDRD r4, r5, [sp, #72]
LDRD r8, r9, [sp, #16]
ADDS r4, r4, r6
ADC r5, r5, r7
ADDS r4, r4, r8
ADC r5, r5, r9
STRD r4, r5, [sp, #72]
LDRD r4, r5, [sp, #80]
LSRS r6, r4, #1
LSRS r7, r5, #1
ORR r7, r7, r4, LSL #31
ORR r6, r6, r5, LSL #31
LSRS r8, r4, #8
LSRS r9, r5, #8
ORR r9, r9, r4, LSL #24
ORR r8, r8, r5, LSL #24
EOR r7, r7, r9
EOR r6, r6, r8
LSRS r8, r4, #7
LSRS r9, r5, #7
ORR r8, r8, r5, LSL #25
EOR r7, r7, r9
EOR r6, r6, r8
LDRD r4, r5, [sp, #72]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [sp, #72]
/* Round 10 */
LDRD r4, r5, [r0, #16]
LSRS r6, r4, #14
LSRS r7, r5, #14
ORR r7, r7, r4, LSL #18
ORR r6, r6, r5, LSL #18
LSRS r8, r4, #18
LSRS r9, r5, #18
ORR r9, r9, r4, LSL #14
ORR r8, r8, r5, LSL #14
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #23
LSLS r9, r5, #23
ORR r9, r9, r4, LSR #9
ORR r8, r8, r5, LSR #9
LDRD r4, r5, [r0, #40]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #40]
LDRD r4, r5, [r0, #16]
LDRD r6, r7, [r0, #24]
LDRD r8, r9, [r0, #32]
EOR r6, r6, r8
EOR r7, r7, r9
AND r6, r6, r4
AND r7, r7, r5
EOR r6, r6, r8
EOR r7, r7, r9
LDRD r4, r5, [r0, #40]
LDRD r8, r9, [sp, #80]
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r6, r7, [r3, #80]
ADDS r4, r4, r8
ADC r5, r5, r9
LDRD r8, r9, [r0, #8]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #40]
ADDS r8, r8, r4
ADC r9, r9, r5
LDRD r4, r5, [r0, #48]
STRD r8, r9, [r0, #8]
LSRS r6, r4, #28
LSRS r7, r5, #28
ORR r7, r7, r4, LSL #4
ORR r6, r6, r5, LSL #4
LSLS r8, r4, #30
LSLS r9, r5, #30
ORR r9, r9, r4, LSR #2
ORR r8, r8, r5, LSR #2
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #25
LSLS r9, r5, #25
ORR r9, r9, r4, LSR #7
ORR r8, r8, r5, LSR #7
LDRD r4, r5, [r0, #40]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r8, r9, [r0, #48]
LDRD r6, r7, [r0, #56]
STRD r4, r5, [r0, #40]
EOR r8, r8, r6
EOR r9, r9, r7
AND r10, r10, r8
AND r11, r11, r9
EOR r10, r10, r6
EOR r11, r11, r7
LDRD r6, r7, [r0, #40]
ADDS r6, r6, r10
ADC r7, r7, r11
STRD r6, r7, [r0, #40]
MOV r10, r8
MOV r11, r9
/* Calc new W[10] */
LDRD r4, r5, [sp, #64]
LSRS r6, r4, #19
LSRS r7, r5, #19
ORR r7, r7, r4, LSL #13
ORR r6, r6, r5, LSL #13
LSLS r8, r4, #3
LSLS r9, r5, #3
ORR r9, r9, r4, LSR #29
ORR r8, r8, r5, LSR #29
EOR r7, r7, r9
EOR r6, r6, r8
LSRS r8, r4, #6
LSRS r9, r5, #6
ORR r8, r8, r5, LSL #26
EOR r7, r7, r9
EOR r6, r6, r8
LDRD r4, r5, [sp, #80]
LDRD r8, r9, [sp, #24]
ADDS r4, r4, r6
ADC r5, r5, r7
ADDS r4, r4, r8
ADC r5, r5, r9
STRD r4, r5, [sp, #80]
LDRD r4, r5, [sp, #88]
LSRS r6, r4, #1
LSRS r7, r5, #1
ORR r7, r7, r4, LSL #31
ORR r6, r6, r5, LSL #31
LSRS r8, r4, #8
LSRS r9, r5, #8
ORR r9, r9, r4, LSL #24
ORR r8, r8, r5, LSL #24
EOR r7, r7, r9
EOR r6, r6, r8
LSRS r8, r4, #7
LSRS r9, r5, #7
ORR r8, r8, r5, LSL #25
EOR r7, r7, r9
EOR r6, r6, r8
LDRD r4, r5, [sp, #80]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [sp, #80]
/* Round 11 */
LDRD r4, r5, [r0, #8]
LSRS r6, r4, #14
LSRS r7, r5, #14
ORR r7, r7, r4, LSL #18
ORR r6, r6, r5, LSL #18
LSRS r8, r4, #18
LSRS r9, r5, #18
ORR r9, r9, r4, LSL #14
ORR r8, r8, r5, LSL #14
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #23
LSLS r9, r5, #23
ORR r9, r9, r4, LSR #9
ORR r8, r8, r5, LSR #9
LDRD r4, r5, [r0, #32]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #32]
LDRD r4, r5, [r0, #8]
LDRD r6, r7, [r0, #16]
LDRD r8, r9, [r0, #24]
EOR r6, r6, r8
EOR r7, r7, r9
AND r6, r6, r4
AND r7, r7, r5
EOR r6, r6, r8
EOR r7, r7, r9
LDRD r4, r5, [r0, #32]
LDRD r8, r9, [sp, #88]
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r6, r7, [r3, #88]
ADDS r4, r4, r8
ADC r5, r5, r9
LDRD r8, r9, [r0]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #32]
ADDS r8, r8, r4
ADC r9, r9, r5
LDRD r4, r5, [r0, #40]
STRD r8, r9, [r0]
LSRS r6, r4, #28
LSRS r7, r5, #28
ORR r7, r7, r4, LSL #4
ORR r6, r6, r5, LSL #4
LSLS r8, r4, #30
LSLS r9, r5, #30
ORR r9, r9, r4, LSR #2
ORR r8, r8, r5, LSR #2
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #25
LSLS r9, r5, #25
ORR r9, r9, r4, LSR #7
ORR r8, r8, r5, LSR #7
LDRD r4, r5, [r0, #32]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r8, r9, [r0, #40]
LDRD r6, r7, [r0, #48]
STRD r4, r5, [r0, #32]
EOR r8, r8, r6
EOR r9, r9, r7
AND r10, r10, r8
AND r11, r11, r9
EOR r10, r10, r6
EOR r11, r11, r7
LDRD r6, r7, [r0, #32]
ADDS r6, r6, r10
ADC r7, r7, r11
STRD r6, r7, [r0, #32]
MOV r10, r8
MOV r11, r9
/* Calc new W[11] */
LDRD r4, r5, [sp, #72]
LSRS r6, r4, #19
LSRS r7, r5, #19
ORR r7, r7, r4, LSL #13
ORR r6, r6, r5, LSL #13
LSLS r8, r4, #3
LSLS r9, r5, #3
ORR r9, r9, r4, LSR #29
ORR r8, r8, r5, LSR #29
EOR r7, r7, r9
EOR r6, r6, r8
LSRS r8, r4, #6
LSRS r9, r5, #6
ORR r8, r8, r5, LSL #26
EOR r7, r7, r9
EOR r6, r6, r8
LDRD r4, r5, [sp, #88]
LDRD r8, r9, [sp, #32]
ADDS r4, r4, r6
ADC r5, r5, r7
ADDS r4, r4, r8
ADC r5, r5, r9
STRD r4, r5, [sp, #88]
LDRD r4, r5, [sp, #96]
LSRS r6, r4, #1
LSRS r7, r5, #1
ORR r7, r7, r4, LSL #31
ORR r6, r6, r5, LSL #31
LSRS r8, r4, #8
LSRS r9, r5, #8
ORR r9, r9, r4, LSL #24
ORR r8, r8, r5, LSL #24
EOR r7, r7, r9
EOR r6, r6, r8
LSRS r8, r4, #7
LSRS r9, r5, #7
ORR r8, r8, r5, LSL #25
EOR r7, r7, r9
EOR r6, r6, r8
LDRD r4, r5, [sp, #88]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [sp, #88]
/* Round 12 */
LDRD r4, r5, [r0]
LSRS r6, r4, #14
LSRS r7, r5, #14
ORR r7, r7, r4, LSL #18
ORR r6, r6, r5, LSL #18
LSRS r8, r4, #18
LSRS r9, r5, #18
ORR r9, r9, r4, LSL #14
ORR r8, r8, r5, LSL #14
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #23
LSLS r9, r5, #23
ORR r9, r9, r4, LSR #9
ORR r8, r8, r5, LSR #9
LDRD r4, r5, [r0, #24]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #24]
LDRD r4, r5, [r0]
LDRD r6, r7, [r0, #8]
LDRD r8, r9, [r0, #16]
EOR r6, r6, r8
EOR r7, r7, r9
AND r6, r6, r4
AND r7, r7, r5
EOR r6, r6, r8
EOR r7, r7, r9
LDRD r4, r5, [r0, #24]
LDRD r8, r9, [sp, #96]
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r6, r7, [r3, #96]
ADDS r4, r4, r8
ADC r5, r5, r9
LDRD r8, r9, [r0, #56]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #24]
ADDS r8, r8, r4
ADC r9, r9, r5
LDRD r4, r5, [r0, #32]
STRD r8, r9, [r0, #56]
LSRS r6, r4, #28
LSRS r7, r5, #28
ORR r7, r7, r4, LSL #4
ORR r6, r6, r5, LSL #4
LSLS r8, r4, #30
LSLS r9, r5, #30
ORR r9, r9, r4, LSR #2
ORR r8, r8, r5, LSR #2
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #25
LSLS r9, r5, #25
ORR r9, r9, r4, LSR #7
ORR r8, r8, r5, LSR #7
LDRD r4, r5, [r0, #24]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r8, r9, [r0, #32]
LDRD r6, r7, [r0, #40]
STRD r4, r5, [r0, #24]
EOR r8, r8, r6
EOR r9, r9, r7
AND r10, r10, r8
AND r11, r11, r9
EOR r10, r10, r6
EOR r11, r11, r7
LDRD r6, r7, [r0, #24]
ADDS r6, r6, r10
ADC r7, r7, r11
STRD r6, r7, [r0, #24]
MOV r10, r8
MOV r11, r9
/* Calc new W[12] */
LDRD r4, r5, [sp, #80]
LSRS r6, r4, #19
LSRS r7, r5, #19
ORR r7, r7, r4, LSL #13
ORR r6, r6, r5, LSL #13
LSLS r8, r4, #3
LSLS r9, r5, #3
ORR r9, r9, r4, LSR #29
ORR r8, r8, r5, LSR #29
EOR r7, r7, r9
EOR r6, r6, r8
LSRS r8, r4, #6
LSRS r9, r5, #6
ORR r8, r8, r5, LSL #26
EOR r7, r7, r9
EOR r6, r6, r8
LDRD r4, r5, [sp, #96]
LDRD r8, r9, [sp, #40]
ADDS r4, r4, r6
ADC r5, r5, r7
ADDS r4, r4, r8
ADC r5, r5, r9
STRD r4, r5, [sp, #96]
LDRD r4, r5, [sp, #104]
LSRS r6, r4, #1
LSRS r7, r5, #1
ORR r7, r7, r4, LSL #31
ORR r6, r6, r5, LSL #31
LSRS r8, r4, #8
LSRS r9, r5, #8
ORR r9, r9, r4, LSL #24
ORR r8, r8, r5, LSL #24
EOR r7, r7, r9
EOR r6, r6, r8
LSRS r8, r4, #7
LSRS r9, r5, #7
ORR r8, r8, r5, LSL #25
EOR r7, r7, r9
EOR r6, r6, r8
LDRD r4, r5, [sp, #96]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [sp, #96]
/* Round 13 */
LDRD r4, r5, [r0, #56]
LSRS r6, r4, #14
LSRS r7, r5, #14
ORR r7, r7, r4, LSL #18
ORR r6, r6, r5, LSL #18
LSRS r8, r4, #18
LSRS r9, r5, #18
ORR r9, r9, r4, LSL #14
ORR r8, r8, r5, LSL #14
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #23
LSLS r9, r5, #23
ORR r9, r9, r4, LSR #9
ORR r8, r8, r5, LSR #9
LDRD r4, r5, [r0, #16]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #16]
LDRD r4, r5, [r0, #56]
LDRD r6, r7, [r0]
LDRD r8, r9, [r0, #8]
EOR r6, r6, r8
EOR r7, r7, r9
AND r6, r6, r4
AND r7, r7, r5
EOR r6, r6, r8
EOR r7, r7, r9
LDRD r4, r5, [r0, #16]
LDRD r8, r9, [sp, #104]
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r6, r7, [r3, #104]
ADDS r4, r4, r8
ADC r5, r5, r9
LDRD r8, r9, [r0, #48]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #16]
ADDS r8, r8, r4
ADC r9, r9, r5
LDRD r4, r5, [r0, #24]
STRD r8, r9, [r0, #48]
LSRS r6, r4, #28
LSRS r7, r5, #28
ORR r7, r7, r4, LSL #4
ORR r6, r6, r5, LSL #4
LSLS r8, r4, #30
LSLS r9, r5, #30
ORR r9, r9, r4, LSR #2
ORR r8, r8, r5, LSR #2
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #25
LSLS r9, r5, #25
ORR r9, r9, r4, LSR #7
ORR r8, r8, r5, LSR #7
LDRD r4, r5, [r0, #16]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r8, r9, [r0, #24]
LDRD r6, r7, [r0, #32]
STRD r4, r5, [r0, #16]
EOR r8, r8, r6
EOR r9, r9, r7
AND r10, r10, r8
AND r11, r11, r9
EOR r10, r10, r6
EOR r11, r11, r7
LDRD r6, r7, [r0, #16]
ADDS r6, r6, r10
ADC r7, r7, r11
STRD r6, r7, [r0, #16]
MOV r10, r8
MOV r11, r9
/* Calc new W[13] */
LDRD r4, r5, [sp, #88]
LSRS r6, r4, #19
LSRS r7, r5, #19
ORR r7, r7, r4, LSL #13
ORR r6, r6, r5, LSL #13
LSLS r8, r4, #3
LSLS r9, r5, #3
ORR r9, r9, r4, LSR #29
ORR r8, r8, r5, LSR #29
EOR r7, r7, r9
EOR r6, r6, r8
LSRS r8, r4, #6
LSRS r9, r5, #6
ORR r8, r8, r5, LSL #26
EOR r7, r7, r9
EOR r6, r6, r8
LDRD r4, r5, [sp, #104]
LDRD r8, r9, [sp, #48]
ADDS r4, r4, r6
ADC r5, r5, r7
ADDS r4, r4, r8
ADC r5, r5, r9
STRD r4, r5, [sp, #104]
LDRD r4, r5, [sp, #112]
LSRS r6, r4, #1
LSRS r7, r5, #1
ORR r7, r7, r4, LSL #31
ORR r6, r6, r5, LSL #31
LSRS r8, r4, #8
LSRS r9, r5, #8
ORR r9, r9, r4, LSL #24
ORR r8, r8, r5, LSL #24
EOR r7, r7, r9
EOR r6, r6, r8
LSRS r8, r4, #7
LSRS r9, r5, #7
ORR r8, r8, r5, LSL #25
EOR r7, r7, r9
EOR r6, r6, r8
LDRD r4, r5, [sp, #104]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [sp, #104]
/* Round 14 */
LDRD r4, r5, [r0, #48]
LSRS r6, r4, #14
LSRS r7, r5, #14
ORR r7, r7, r4, LSL #18
ORR r6, r6, r5, LSL #18
LSRS r8, r4, #18
LSRS r9, r5, #18
ORR r9, r9, r4, LSL #14
ORR r8, r8, r5, LSL #14
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #23
LSLS r9, r5, #23
ORR r9, r9, r4, LSR #9
ORR r8, r8, r5, LSR #9
LDRD r4, r5, [r0, #8]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #8]
LDRD r4, r5, [r0, #48]
LDRD r6, r7, [r0, #56]
LDRD r8, r9, [r0]
EOR r6, r6, r8
EOR r7, r7, r9
AND r6, r6, r4
AND r7, r7, r5
EOR r6, r6, r8
EOR r7, r7, r9
LDRD r4, r5, [r0, #8]
LDRD r8, r9, [sp, #112]
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r6, r7, [r3, #112]
ADDS r4, r4, r8
ADC r5, r5, r9
LDRD r8, r9, [r0, #40]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #8]
ADDS r8, r8, r4
ADC r9, r9, r5
LDRD r4, r5, [r0, #16]
STRD r8, r9, [r0, #40]
LSRS r6, r4, #28
LSRS r7, r5, #28
ORR r7, r7, r4, LSL #4
ORR r6, r6, r5, LSL #4
LSLS r8, r4, #30
LSLS r9, r5, #30
ORR r9, r9, r4, LSR #2
ORR r8, r8, r5, LSR #2
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #25
LSLS r9, r5, #25
ORR r9, r9, r4, LSR #7
ORR r8, r8, r5, LSR #7
LDRD r4, r5, [r0, #8]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r8, r9, [r0, #16]
LDRD r6, r7, [r0, #24]
STRD r4, r5, [r0, #8]
EOR r8, r8, r6
EOR r9, r9, r7
AND r10, r10, r8
AND r11, r11, r9
EOR r10, r10, r6
EOR r11, r11, r7
LDRD r6, r7, [r0, #8]
ADDS r6, r6, r10
ADC r7, r7, r11
STRD r6, r7, [r0, #8]
MOV r10, r8
MOV r11, r9
/* Calc new W[14] */
LDRD r4, r5, [sp, #96]
LSRS r6, r4, #19
LSRS r7, r5, #19
ORR r7, r7, r4, LSL #13
ORR r6, r6, r5, LSL #13
LSLS r8, r4, #3
LSLS r9, r5, #3
ORR r9, r9, r4, LSR #29
ORR r8, r8, r5, LSR #29
EOR r7, r7, r9
EOR r6, r6, r8
LSRS r8, r4, #6
LSRS r9, r5, #6
ORR r8, r8, r5, LSL #26
EOR r7, r7, r9
EOR r6, r6, r8
LDRD r4, r5, [sp, #112]
LDRD r8, r9, [sp, #56]
ADDS r4, r4, r6
ADC r5, r5, r7
ADDS r4, r4, r8
ADC r5, r5, r9
STRD r4, r5, [sp, #112]
LDRD r4, r5, [sp, #120]
LSRS r6, r4, #1
LSRS r7, r5, #1
ORR r7, r7, r4, LSL #31
ORR r6, r6, r5, LSL #31
LSRS r8, r4, #8
LSRS r9, r5, #8
ORR r9, r9, r4, LSL #24
ORR r8, r8, r5, LSL #24
EOR r7, r7, r9
EOR r6, r6, r8
LSRS r8, r4, #7
LSRS r9, r5, #7
ORR r8, r8, r5, LSL #25
EOR r7, r7, r9
EOR r6, r6, r8
LDRD r4, r5, [sp, #112]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [sp, #112]
/* Round 15 */
LDRD r4, r5, [r0, #40]
LSRS r6, r4, #14
LSRS r7, r5, #14
ORR r7, r7, r4, LSL #18
ORR r6, r6, r5, LSL #18
LSRS r8, r4, #18
LSRS r9, r5, #18
ORR r9, r9, r4, LSL #14
ORR r8, r8, r5, LSL #14
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #23
LSLS r9, r5, #23
ORR r9, r9, r4, LSR #9
ORR r8, r8, r5, LSR #9
LDRD r4, r5, [r0]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0]
LDRD r4, r5, [r0, #40]
LDRD r6, r7, [r0, #48]
LDRD r8, r9, [r0, #56]
EOR r6, r6, r8
EOR r7, r7, r9
AND r6, r6, r4
AND r7, r7, r5
EOR r6, r6, r8
EOR r7, r7, r9
LDRD r4, r5, [r0]
LDRD r8, r9, [sp, #120]
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r6, r7, [r3, #120]
ADDS r4, r4, r8
ADC r5, r5, r9
LDRD r8, r9, [r0, #32]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0]
ADDS r8, r8, r4
ADC r9, r9, r5
LDRD r4, r5, [r0, #8]
STRD r8, r9, [r0, #32]
LSRS r6, r4, #28
LSRS r7, r5, #28
ORR r7, r7, r4, LSL #4
ORR r6, r6, r5, LSL #4
LSLS r8, r4, #30
LSLS r9, r5, #30
ORR r9, r9, r4, LSR #2
ORR r8, r8, r5, LSR #2
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #25
LSLS r9, r5, #25
ORR r9, r9, r4, LSR #7
ORR r8, r8, r5, LSR #7
LDRD r4, r5, [r0]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r8, r9, [r0, #8]
LDRD r6, r7, [r0, #16]
STRD r4, r5, [r0]
EOR r8, r8, r6
EOR r9, r9, r7
AND r10, r10, r8
AND r11, r11, r9
EOR r10, r10, r6
EOR r11, r11, r7
LDRD r6, r7, [r0]
ADDS r6, r6, r10
ADC r7, r7, r11
STRD r6, r7, [r0]
MOV r10, r8
MOV r11, r9
/* Calc new W[15] */
LDRD r4, r5, [sp, #104]
LSRS r6, r4, #19
LSRS r7, r5, #19
ORR r7, r7, r4, LSL #13
ORR r6, r6, r5, LSL #13
LSLS r8, r4, #3
LSLS r9, r5, #3
ORR r9, r9, r4, LSR #29
ORR r8, r8, r5, LSR #29
EOR r7, r7, r9
EOR r6, r6, r8
LSRS r8, r4, #6
LSRS r9, r5, #6
ORR r8, r8, r5, LSL #26
EOR r7, r7, r9
EOR r6, r6, r8
LDRD r4, r5, [sp, #120]
LDRD r8, r9, [sp, #64]
ADDS r4, r4, r6
ADC r5, r5, r7
ADDS r4, r4, r8
ADC r5, r5, r9
STRD r4, r5, [sp, #120]
LDRD r4, r5, [sp]
LSRS r6, r4, #1
LSRS r7, r5, #1
ORR r7, r7, r4, LSL #31
ORR r6, r6, r5, LSL #31
LSRS r8, r4, #8
LSRS r9, r5, #8
ORR r9, r9, r4, LSL #24
ORR r8, r8, r5, LSL #24
EOR r7, r7, r9
EOR r6, r6, r8
LSRS r8, r4, #7
LSRS r9, r5, #7
ORR r8, r8, r5, LSL #25
EOR r7, r7, r9
EOR r6, r6, r8
LDRD r4, r5, [sp, #120]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [sp, #120]
ADD r3, r3, #0x80
SUBS r12, r12, #0x1
#ifdef __GNUC__
BNE L_SHA512_transform_len_start
#else
BNE.W L_SHA512_transform_len_start
#endif
/* Round 0 */
LDRD r4, r5, [r0, #32]
LSRS r6, r4, #14
LSRS r7, r5, #14
ORR r7, r7, r4, LSL #18
ORR r6, r6, r5, LSL #18
LSRS r8, r4, #18
LSRS r9, r5, #18
ORR r9, r9, r4, LSL #14
ORR r8, r8, r5, LSL #14
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #23
LSLS r9, r5, #23
ORR r9, r9, r4, LSR #9
ORR r8, r8, r5, LSR #9
LDRD r4, r5, [r0, #56]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #56]
LDRD r4, r5, [r0, #32]
LDRD r6, r7, [r0, #40]
LDRD r8, r9, [r0, #48]
EOR r6, r6, r8
EOR r7, r7, r9
AND r6, r6, r4
AND r7, r7, r5
EOR r6, r6, r8
EOR r7, r7, r9
LDRD r4, r5, [r0, #56]
LDRD r8, r9, [sp]
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r6, r7, [r3]
ADDS r4, r4, r8
ADC r5, r5, r9
LDRD r8, r9, [r0, #24]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #56]
ADDS r8, r8, r4
ADC r9, r9, r5
LDRD r4, r5, [r0]
STRD r8, r9, [r0, #24]
LSRS r6, r4, #28
LSRS r7, r5, #28
ORR r7, r7, r4, LSL #4
ORR r6, r6, r5, LSL #4
LSLS r8, r4, #30
LSLS r9, r5, #30
ORR r9, r9, r4, LSR #2
ORR r8, r8, r5, LSR #2
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #25
LSLS r9, r5, #25
ORR r9, r9, r4, LSR #7
ORR r8, r8, r5, LSR #7
LDRD r4, r5, [r0, #56]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r8, r9, [r0]
LDRD r6, r7, [r0, #8]
STRD r4, r5, [r0, #56]
EOR r8, r8, r6
EOR r9, r9, r7
AND r10, r10, r8
AND r11, r11, r9
EOR r10, r10, r6
EOR r11, r11, r7
LDRD r6, r7, [r0, #56]
ADDS r6, r6, r10
ADC r7, r7, r11
STRD r6, r7, [r0, #56]
MOV r10, r8
MOV r11, r9
/* Round 1 */
LDRD r4, r5, [r0, #24]
LSRS r6, r4, #14
LSRS r7, r5, #14
ORR r7, r7, r4, LSL #18
ORR r6, r6, r5, LSL #18
LSRS r8, r4, #18
LSRS r9, r5, #18
ORR r9, r9, r4, LSL #14
ORR r8, r8, r5, LSL #14
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #23
LSLS r9, r5, #23
ORR r9, r9, r4, LSR #9
ORR r8, r8, r5, LSR #9
LDRD r4, r5, [r0, #48]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #48]
LDRD r4, r5, [r0, #24]
LDRD r6, r7, [r0, #32]
LDRD r8, r9, [r0, #40]
EOR r6, r6, r8
EOR r7, r7, r9
AND r6, r6, r4
AND r7, r7, r5
EOR r6, r6, r8
EOR r7, r7, r9
LDRD r4, r5, [r0, #48]
LDRD r8, r9, [sp, #8]
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r6, r7, [r3, #8]
ADDS r4, r4, r8
ADC r5, r5, r9
LDRD r8, r9, [r0, #16]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #48]
ADDS r8, r8, r4
ADC r9, r9, r5
LDRD r4, r5, [r0, #56]
STRD r8, r9, [r0, #16]
LSRS r6, r4, #28
LSRS r7, r5, #28
ORR r7, r7, r4, LSL #4
ORR r6, r6, r5, LSL #4
LSLS r8, r4, #30
LSLS r9, r5, #30
ORR r9, r9, r4, LSR #2
ORR r8, r8, r5, LSR #2
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #25
LSLS r9, r5, #25
ORR r9, r9, r4, LSR #7
ORR r8, r8, r5, LSR #7
LDRD r4, r5, [r0, #48]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r8, r9, [r0, #56]
LDRD r6, r7, [r0]
STRD r4, r5, [r0, #48]
EOR r8, r8, r6
EOR r9, r9, r7
AND r10, r10, r8
AND r11, r11, r9
EOR r10, r10, r6
EOR r11, r11, r7
LDRD r6, r7, [r0, #48]
ADDS r6, r6, r10
ADC r7, r7, r11
STRD r6, r7, [r0, #48]
MOV r10, r8
MOV r11, r9
/* Round 2 */
LDRD r4, r5, [r0, #16]
LSRS r6, r4, #14
LSRS r7, r5, #14
ORR r7, r7, r4, LSL #18
ORR r6, r6, r5, LSL #18
LSRS r8, r4, #18
LSRS r9, r5, #18
ORR r9, r9, r4, LSL #14
ORR r8, r8, r5, LSL #14
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #23
LSLS r9, r5, #23
ORR r9, r9, r4, LSR #9
ORR r8, r8, r5, LSR #9
LDRD r4, r5, [r0, #40]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #40]
LDRD r4, r5, [r0, #16]
LDRD r6, r7, [r0, #24]
LDRD r8, r9, [r0, #32]
EOR r6, r6, r8
EOR r7, r7, r9
AND r6, r6, r4
AND r7, r7, r5
EOR r6, r6, r8
EOR r7, r7, r9
LDRD r4, r5, [r0, #40]
LDRD r8, r9, [sp, #16]
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r6, r7, [r3, #16]
ADDS r4, r4, r8
ADC r5, r5, r9
LDRD r8, r9, [r0, #8]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #40]
ADDS r8, r8, r4
ADC r9, r9, r5
LDRD r4, r5, [r0, #48]
STRD r8, r9, [r0, #8]
LSRS r6, r4, #28
LSRS r7, r5, #28
ORR r7, r7, r4, LSL #4
ORR r6, r6, r5, LSL #4
LSLS r8, r4, #30
LSLS r9, r5, #30
ORR r9, r9, r4, LSR #2
ORR r8, r8, r5, LSR #2
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #25
LSLS r9, r5, #25
ORR r9, r9, r4, LSR #7
ORR r8, r8, r5, LSR #7
LDRD r4, r5, [r0, #40]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r8, r9, [r0, #48]
LDRD r6, r7, [r0, #56]
STRD r4, r5, [r0, #40]
EOR r8, r8, r6
EOR r9, r9, r7
AND r10, r10, r8
AND r11, r11, r9
EOR r10, r10, r6
EOR r11, r11, r7
LDRD r6, r7, [r0, #40]
ADDS r6, r6, r10
ADC r7, r7, r11
STRD r6, r7, [r0, #40]
MOV r10, r8
MOV r11, r9
/* Round 3 */
LDRD r4, r5, [r0, #8]
LSRS r6, r4, #14
LSRS r7, r5, #14
ORR r7, r7, r4, LSL #18
ORR r6, r6, r5, LSL #18
LSRS r8, r4, #18
LSRS r9, r5, #18
ORR r9, r9, r4, LSL #14
ORR r8, r8, r5, LSL #14
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #23
LSLS r9, r5, #23
ORR r9, r9, r4, LSR #9
ORR r8, r8, r5, LSR #9
LDRD r4, r5, [r0, #32]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #32]
LDRD r4, r5, [r0, #8]
LDRD r6, r7, [r0, #16]
LDRD r8, r9, [r0, #24]
EOR r6, r6, r8
EOR r7, r7, r9
AND r6, r6, r4
AND r7, r7, r5
EOR r6, r6, r8
EOR r7, r7, r9
LDRD r4, r5, [r0, #32]
LDRD r8, r9, [sp, #24]
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r6, r7, [r3, #24]
ADDS r4, r4, r8
ADC r5, r5, r9
LDRD r8, r9, [r0]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #32]
ADDS r8, r8, r4
ADC r9, r9, r5
LDRD r4, r5, [r0, #40]
STRD r8, r9, [r0]
LSRS r6, r4, #28
LSRS r7, r5, #28
ORR r7, r7, r4, LSL #4
ORR r6, r6, r5, LSL #4
LSLS r8, r4, #30
LSLS r9, r5, #30
ORR r9, r9, r4, LSR #2
ORR r8, r8, r5, LSR #2
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #25
LSLS r9, r5, #25
ORR r9, r9, r4, LSR #7
ORR r8, r8, r5, LSR #7
LDRD r4, r5, [r0, #32]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r8, r9, [r0, #40]
LDRD r6, r7, [r0, #48]
STRD r4, r5, [r0, #32]
EOR r8, r8, r6
EOR r9, r9, r7
AND r10, r10, r8
AND r11, r11, r9
EOR r10, r10, r6
EOR r11, r11, r7
LDRD r6, r7, [r0, #32]
ADDS r6, r6, r10
ADC r7, r7, r11
STRD r6, r7, [r0, #32]
MOV r10, r8
MOV r11, r9
/* Round 4 */
LDRD r4, r5, [r0]
LSRS r6, r4, #14
LSRS r7, r5, #14
ORR r7, r7, r4, LSL #18
ORR r6, r6, r5, LSL #18
LSRS r8, r4, #18
LSRS r9, r5, #18
ORR r9, r9, r4, LSL #14
ORR r8, r8, r5, LSL #14
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #23
LSLS r9, r5, #23
ORR r9, r9, r4, LSR #9
ORR r8, r8, r5, LSR #9
LDRD r4, r5, [r0, #24]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #24]
LDRD r4, r5, [r0]
LDRD r6, r7, [r0, #8]
LDRD r8, r9, [r0, #16]
EOR r6, r6, r8
EOR r7, r7, r9
AND r6, r6, r4
AND r7, r7, r5
EOR r6, r6, r8
EOR r7, r7, r9
LDRD r4, r5, [r0, #24]
LDRD r8, r9, [sp, #32]
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r6, r7, [r3, #32]
ADDS r4, r4, r8
ADC r5, r5, r9
LDRD r8, r9, [r0, #56]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #24]
ADDS r8, r8, r4
ADC r9, r9, r5
LDRD r4, r5, [r0, #32]
STRD r8, r9, [r0, #56]
LSRS r6, r4, #28
LSRS r7, r5, #28
ORR r7, r7, r4, LSL #4
ORR r6, r6, r5, LSL #4
LSLS r8, r4, #30
LSLS r9, r5, #30
ORR r9, r9, r4, LSR #2
ORR r8, r8, r5, LSR #2
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #25
LSLS r9, r5, #25
ORR r9, r9, r4, LSR #7
ORR r8, r8, r5, LSR #7
LDRD r4, r5, [r0, #24]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r8, r9, [r0, #32]
LDRD r6, r7, [r0, #40]
STRD r4, r5, [r0, #24]
EOR r8, r8, r6
EOR r9, r9, r7
AND r10, r10, r8
AND r11, r11, r9
EOR r10, r10, r6
EOR r11, r11, r7
LDRD r6, r7, [r0, #24]
ADDS r6, r6, r10
ADC r7, r7, r11
STRD r6, r7, [r0, #24]
MOV r10, r8
MOV r11, r9
/* Round 5 */
LDRD r4, r5, [r0, #56]
LSRS r6, r4, #14
LSRS r7, r5, #14
ORR r7, r7, r4, LSL #18
ORR r6, r6, r5, LSL #18
LSRS r8, r4, #18
LSRS r9, r5, #18
ORR r9, r9, r4, LSL #14
ORR r8, r8, r5, LSL #14
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #23
LSLS r9, r5, #23
ORR r9, r9, r4, LSR #9
ORR r8, r8, r5, LSR #9
LDRD r4, r5, [r0, #16]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #16]
LDRD r4, r5, [r0, #56]
LDRD r6, r7, [r0]
LDRD r8, r9, [r0, #8]
EOR r6, r6, r8
EOR r7, r7, r9
AND r6, r6, r4
AND r7, r7, r5
EOR r6, r6, r8
EOR r7, r7, r9
LDRD r4, r5, [r0, #16]
LDRD r8, r9, [sp, #40]
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r6, r7, [r3, #40]
ADDS r4, r4, r8
ADC r5, r5, r9
LDRD r8, r9, [r0, #48]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #16]
ADDS r8, r8, r4
ADC r9, r9, r5
LDRD r4, r5, [r0, #24]
STRD r8, r9, [r0, #48]
LSRS r6, r4, #28
LSRS r7, r5, #28
ORR r7, r7, r4, LSL #4
ORR r6, r6, r5, LSL #4
LSLS r8, r4, #30
LSLS r9, r5, #30
ORR r9, r9, r4, LSR #2
ORR r8, r8, r5, LSR #2
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #25
LSLS r9, r5, #25
ORR r9, r9, r4, LSR #7
ORR r8, r8, r5, LSR #7
LDRD r4, r5, [r0, #16]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r8, r9, [r0, #24]
LDRD r6, r7, [r0, #32]
STRD r4, r5, [r0, #16]
EOR r8, r8, r6
EOR r9, r9, r7
AND r10, r10, r8
AND r11, r11, r9
EOR r10, r10, r6
EOR r11, r11, r7
LDRD r6, r7, [r0, #16]
ADDS r6, r6, r10
ADC r7, r7, r11
STRD r6, r7, [r0, #16]
MOV r10, r8
MOV r11, r9
/* Round 6 */
LDRD r4, r5, [r0, #48]
LSRS r6, r4, #14
LSRS r7, r5, #14
ORR r7, r7, r4, LSL #18
ORR r6, r6, r5, LSL #18
LSRS r8, r4, #18
LSRS r9, r5, #18
ORR r9, r9, r4, LSL #14
ORR r8, r8, r5, LSL #14
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #23
LSLS r9, r5, #23
ORR r9, r9, r4, LSR #9
ORR r8, r8, r5, LSR #9
LDRD r4, r5, [r0, #8]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #8]
LDRD r4, r5, [r0, #48]
LDRD r6, r7, [r0, #56]
LDRD r8, r9, [r0]
EOR r6, r6, r8
EOR r7, r7, r9
AND r6, r6, r4
AND r7, r7, r5
EOR r6, r6, r8
EOR r7, r7, r9
LDRD r4, r5, [r0, #8]
LDRD r8, r9, [sp, #48]
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r6, r7, [r3, #48]
ADDS r4, r4, r8
ADC r5, r5, r9
LDRD r8, r9, [r0, #40]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #8]
ADDS r8, r8, r4
ADC r9, r9, r5
LDRD r4, r5, [r0, #16]
STRD r8, r9, [r0, #40]
LSRS r6, r4, #28
LSRS r7, r5, #28
ORR r7, r7, r4, LSL #4
ORR r6, r6, r5, LSL #4
LSLS r8, r4, #30
LSLS r9, r5, #30
ORR r9, r9, r4, LSR #2
ORR r8, r8, r5, LSR #2
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #25
LSLS r9, r5, #25
ORR r9, r9, r4, LSR #7
ORR r8, r8, r5, LSR #7
LDRD r4, r5, [r0, #8]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r8, r9, [r0, #16]
LDRD r6, r7, [r0, #24]
STRD r4, r5, [r0, #8]
EOR r8, r8, r6
EOR r9, r9, r7
AND r10, r10, r8
AND r11, r11, r9
EOR r10, r10, r6
EOR r11, r11, r7
LDRD r6, r7, [r0, #8]
ADDS r6, r6, r10
ADC r7, r7, r11
STRD r6, r7, [r0, #8]
MOV r10, r8
MOV r11, r9
/* Round 7 */
LDRD r4, r5, [r0, #40]
LSRS r6, r4, #14
LSRS r7, r5, #14
ORR r7, r7, r4, LSL #18
ORR r6, r6, r5, LSL #18
LSRS r8, r4, #18
LSRS r9, r5, #18
ORR r9, r9, r4, LSL #14
ORR r8, r8, r5, LSL #14
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #23
LSLS r9, r5, #23
ORR r9, r9, r4, LSR #9
ORR r8, r8, r5, LSR #9
LDRD r4, r5, [r0]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0]
LDRD r4, r5, [r0, #40]
LDRD r6, r7, [r0, #48]
LDRD r8, r9, [r0, #56]
EOR r6, r6, r8
EOR r7, r7, r9
AND r6, r6, r4
AND r7, r7, r5
EOR r6, r6, r8
EOR r7, r7, r9
LDRD r4, r5, [r0]
LDRD r8, r9, [sp, #56]
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r6, r7, [r3, #56]
ADDS r4, r4, r8
ADC r5, r5, r9
LDRD r8, r9, [r0, #32]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0]
ADDS r8, r8, r4
ADC r9, r9, r5
LDRD r4, r5, [r0, #8]
STRD r8, r9, [r0, #32]
LSRS r6, r4, #28
LSRS r7, r5, #28
ORR r7, r7, r4, LSL #4
ORR r6, r6, r5, LSL #4
LSLS r8, r4, #30
LSLS r9, r5, #30
ORR r9, r9, r4, LSR #2
ORR r8, r8, r5, LSR #2
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #25
LSLS r9, r5, #25
ORR r9, r9, r4, LSR #7
ORR r8, r8, r5, LSR #7
LDRD r4, r5, [r0]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r8, r9, [r0, #8]
LDRD r6, r7, [r0, #16]
STRD r4, r5, [r0]
EOR r8, r8, r6
EOR r9, r9, r7
AND r10, r10, r8
AND r11, r11, r9
EOR r10, r10, r6
EOR r11, r11, r7
LDRD r6, r7, [r0]
ADDS r6, r6, r10
ADC r7, r7, r11
STRD r6, r7, [r0]
MOV r10, r8
MOV r11, r9
/* Round 8 */
LDRD r4, r5, [r0, #32]
LSRS r6, r4, #14
LSRS r7, r5, #14
ORR r7, r7, r4, LSL #18
ORR r6, r6, r5, LSL #18
LSRS r8, r4, #18
LSRS r9, r5, #18
ORR r9, r9, r4, LSL #14
ORR r8, r8, r5, LSL #14
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #23
LSLS r9, r5, #23
ORR r9, r9, r4, LSR #9
ORR r8, r8, r5, LSR #9
LDRD r4, r5, [r0, #56]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #56]
LDRD r4, r5, [r0, #32]
LDRD r6, r7, [r0, #40]
LDRD r8, r9, [r0, #48]
EOR r6, r6, r8
EOR r7, r7, r9
AND r6, r6, r4
AND r7, r7, r5
EOR r6, r6, r8
EOR r7, r7, r9
LDRD r4, r5, [r0, #56]
LDRD r8, r9, [sp, #64]
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r6, r7, [r3, #64]
ADDS r4, r4, r8
ADC r5, r5, r9
LDRD r8, r9, [r0, #24]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #56]
ADDS r8, r8, r4
ADC r9, r9, r5
LDRD r4, r5, [r0]
STRD r8, r9, [r0, #24]
LSRS r6, r4, #28
LSRS r7, r5, #28
ORR r7, r7, r4, LSL #4
ORR r6, r6, r5, LSL #4
LSLS r8, r4, #30
LSLS r9, r5, #30
ORR r9, r9, r4, LSR #2
ORR r8, r8, r5, LSR #2
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #25
LSLS r9, r5, #25
ORR r9, r9, r4, LSR #7
ORR r8, r8, r5, LSR #7
LDRD r4, r5, [r0, #56]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r8, r9, [r0]
LDRD r6, r7, [r0, #8]
STRD r4, r5, [r0, #56]
EOR r8, r8, r6
EOR r9, r9, r7
AND r10, r10, r8
AND r11, r11, r9
EOR r10, r10, r6
EOR r11, r11, r7
LDRD r6, r7, [r0, #56]
ADDS r6, r6, r10
ADC r7, r7, r11
STRD r6, r7, [r0, #56]
MOV r10, r8
MOV r11, r9
/* Round 9 */
LDRD r4, r5, [r0, #24]
LSRS r6, r4, #14
LSRS r7, r5, #14
ORR r7, r7, r4, LSL #18
ORR r6, r6, r5, LSL #18
LSRS r8, r4, #18
LSRS r9, r5, #18
ORR r9, r9, r4, LSL #14
ORR r8, r8, r5, LSL #14
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #23
LSLS r9, r5, #23
ORR r9, r9, r4, LSR #9
ORR r8, r8, r5, LSR #9
LDRD r4, r5, [r0, #48]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #48]
LDRD r4, r5, [r0, #24]
LDRD r6, r7, [r0, #32]
LDRD r8, r9, [r0, #40]
EOR r6, r6, r8
EOR r7, r7, r9
AND r6, r6, r4
AND r7, r7, r5
EOR r6, r6, r8
EOR r7, r7, r9
LDRD r4, r5, [r0, #48]
LDRD r8, r9, [sp, #72]
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r6, r7, [r3, #72]
ADDS r4, r4, r8
ADC r5, r5, r9
LDRD r8, r9, [r0, #16]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #48]
ADDS r8, r8, r4
ADC r9, r9, r5
LDRD r4, r5, [r0, #56]
STRD r8, r9, [r0, #16]
LSRS r6, r4, #28
LSRS r7, r5, #28
ORR r7, r7, r4, LSL #4
ORR r6, r6, r5, LSL #4
LSLS r8, r4, #30
LSLS r9, r5, #30
ORR r9, r9, r4, LSR #2
ORR r8, r8, r5, LSR #2
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #25
LSLS r9, r5, #25
ORR r9, r9, r4, LSR #7
ORR r8, r8, r5, LSR #7
LDRD r4, r5, [r0, #48]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r8, r9, [r0, #56]
LDRD r6, r7, [r0]
STRD r4, r5, [r0, #48]
EOR r8, r8, r6
EOR r9, r9, r7
AND r10, r10, r8
AND r11, r11, r9
EOR r10, r10, r6
EOR r11, r11, r7
LDRD r6, r7, [r0, #48]
ADDS r6, r6, r10
ADC r7, r7, r11
STRD r6, r7, [r0, #48]
MOV r10, r8
MOV r11, r9
/* Round 10 */
LDRD r4, r5, [r0, #16]
LSRS r6, r4, #14
LSRS r7, r5, #14
ORR r7, r7, r4, LSL #18
ORR r6, r6, r5, LSL #18
LSRS r8, r4, #18
LSRS r9, r5, #18
ORR r9, r9, r4, LSL #14
ORR r8, r8, r5, LSL #14
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #23
LSLS r9, r5, #23
ORR r9, r9, r4, LSR #9
ORR r8, r8, r5, LSR #9
LDRD r4, r5, [r0, #40]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #40]
LDRD r4, r5, [r0, #16]
LDRD r6, r7, [r0, #24]
LDRD r8, r9, [r0, #32]
EOR r6, r6, r8
EOR r7, r7, r9
AND r6, r6, r4
AND r7, r7, r5
EOR r6, r6, r8
EOR r7, r7, r9
LDRD r4, r5, [r0, #40]
LDRD r8, r9, [sp, #80]
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r6, r7, [r3, #80]
ADDS r4, r4, r8
ADC r5, r5, r9
LDRD r8, r9, [r0, #8]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #40]
ADDS r8, r8, r4
ADC r9, r9, r5
LDRD r4, r5, [r0, #48]
STRD r8, r9, [r0, #8]
LSRS r6, r4, #28
LSRS r7, r5, #28
ORR r7, r7, r4, LSL #4
ORR r6, r6, r5, LSL #4
LSLS r8, r4, #30
LSLS r9, r5, #30
ORR r9, r9, r4, LSR #2
ORR r8, r8, r5, LSR #2
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #25
LSLS r9, r5, #25
ORR r9, r9, r4, LSR #7
ORR r8, r8, r5, LSR #7
LDRD r4, r5, [r0, #40]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r8, r9, [r0, #48]
LDRD r6, r7, [r0, #56]
STRD r4, r5, [r0, #40]
EOR r8, r8, r6
EOR r9, r9, r7
AND r10, r10, r8
AND r11, r11, r9
EOR r10, r10, r6
EOR r11, r11, r7
LDRD r6, r7, [r0, #40]
ADDS r6, r6, r10
ADC r7, r7, r11
STRD r6, r7, [r0, #40]
MOV r10, r8
MOV r11, r9
/* Round 11 */
LDRD r4, r5, [r0, #8]
LSRS r6, r4, #14
LSRS r7, r5, #14
ORR r7, r7, r4, LSL #18
ORR r6, r6, r5, LSL #18
LSRS r8, r4, #18
LSRS r9, r5, #18
ORR r9, r9, r4, LSL #14
ORR r8, r8, r5, LSL #14
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #23
LSLS r9, r5, #23
ORR r9, r9, r4, LSR #9
ORR r8, r8, r5, LSR #9
LDRD r4, r5, [r0, #32]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #32]
LDRD r4, r5, [r0, #8]
LDRD r6, r7, [r0, #16]
LDRD r8, r9, [r0, #24]
EOR r6, r6, r8
EOR r7, r7, r9
AND r6, r6, r4
AND r7, r7, r5
EOR r6, r6, r8
EOR r7, r7, r9
LDRD r4, r5, [r0, #32]
LDRD r8, r9, [sp, #88]
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r6, r7, [r3, #88]
ADDS r4, r4, r8
ADC r5, r5, r9
LDRD r8, r9, [r0]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #32]
ADDS r8, r8, r4
ADC r9, r9, r5
LDRD r4, r5, [r0, #40]
STRD r8, r9, [r0]
LSRS r6, r4, #28
LSRS r7, r5, #28
ORR r7, r7, r4, LSL #4
ORR r6, r6, r5, LSL #4
LSLS r8, r4, #30
LSLS r9, r5, #30
ORR r9, r9, r4, LSR #2
ORR r8, r8, r5, LSR #2
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #25
LSLS r9, r5, #25
ORR r9, r9, r4, LSR #7
ORR r8, r8, r5, LSR #7
LDRD r4, r5, [r0, #32]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r8, r9, [r0, #40]
LDRD r6, r7, [r0, #48]
STRD r4, r5, [r0, #32]
EOR r8, r8, r6
EOR r9, r9, r7
AND r10, r10, r8
AND r11, r11, r9
EOR r10, r10, r6
EOR r11, r11, r7
LDRD r6, r7, [r0, #32]
ADDS r6, r6, r10
ADC r7, r7, r11
STRD r6, r7, [r0, #32]
MOV r10, r8
MOV r11, r9
/* Round 12 */
LDRD r4, r5, [r0]
LSRS r6, r4, #14
LSRS r7, r5, #14
ORR r7, r7, r4, LSL #18
ORR r6, r6, r5, LSL #18
LSRS r8, r4, #18
LSRS r9, r5, #18
ORR r9, r9, r4, LSL #14
ORR r8, r8, r5, LSL #14
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #23
LSLS r9, r5, #23
ORR r9, r9, r4, LSR #9
ORR r8, r8, r5, LSR #9
LDRD r4, r5, [r0, #24]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #24]
LDRD r4, r5, [r0]
LDRD r6, r7, [r0, #8]
LDRD r8, r9, [r0, #16]
EOR r6, r6, r8
EOR r7, r7, r9
AND r6, r6, r4
AND r7, r7, r5
EOR r6, r6, r8
EOR r7, r7, r9
LDRD r4, r5, [r0, #24]
LDRD r8, r9, [sp, #96]
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r6, r7, [r3, #96]
ADDS r4, r4, r8
ADC r5, r5, r9
LDRD r8, r9, [r0, #56]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #24]
ADDS r8, r8, r4
ADC r9, r9, r5
LDRD r4, r5, [r0, #32]
STRD r8, r9, [r0, #56]
LSRS r6, r4, #28
LSRS r7, r5, #28
ORR r7, r7, r4, LSL #4
ORR r6, r6, r5, LSL #4
LSLS r8, r4, #30
LSLS r9, r5, #30
ORR r9, r9, r4, LSR #2
ORR r8, r8, r5, LSR #2
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #25
LSLS r9, r5, #25
ORR r9, r9, r4, LSR #7
ORR r8, r8, r5, LSR #7
LDRD r4, r5, [r0, #24]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r8, r9, [r0, #32]
LDRD r6, r7, [r0, #40]
STRD r4, r5, [r0, #24]
EOR r8, r8, r6
EOR r9, r9, r7
AND r10, r10, r8
AND r11, r11, r9
EOR r10, r10, r6
EOR r11, r11, r7
LDRD r6, r7, [r0, #24]
ADDS r6, r6, r10
ADC r7, r7, r11
STRD r6, r7, [r0, #24]
MOV r10, r8
MOV r11, r9
/* Round 13 */
LDRD r4, r5, [r0, #56]
LSRS r6, r4, #14
LSRS r7, r5, #14
ORR r7, r7, r4, LSL #18
ORR r6, r6, r5, LSL #18
LSRS r8, r4, #18
LSRS r9, r5, #18
ORR r9, r9, r4, LSL #14
ORR r8, r8, r5, LSL #14
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #23
LSLS r9, r5, #23
ORR r9, r9, r4, LSR #9
ORR r8, r8, r5, LSR #9
LDRD r4, r5, [r0, #16]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #16]
LDRD r4, r5, [r0, #56]
LDRD r6, r7, [r0]
LDRD r8, r9, [r0, #8]
EOR r6, r6, r8
EOR r7, r7, r9
AND r6, r6, r4
AND r7, r7, r5
EOR r6, r6, r8
EOR r7, r7, r9
LDRD r4, r5, [r0, #16]
LDRD r8, r9, [sp, #104]
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r6, r7, [r3, #104]
ADDS r4, r4, r8
ADC r5, r5, r9
LDRD r8, r9, [r0, #48]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #16]
ADDS r8, r8, r4
ADC r9, r9, r5
LDRD r4, r5, [r0, #24]
STRD r8, r9, [r0, #48]
LSRS r6, r4, #28
LSRS r7, r5, #28
ORR r7, r7, r4, LSL #4
ORR r6, r6, r5, LSL #4
LSLS r8, r4, #30
LSLS r9, r5, #30
ORR r9, r9, r4, LSR #2
ORR r8, r8, r5, LSR #2
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #25
LSLS r9, r5, #25
ORR r9, r9, r4, LSR #7
ORR r8, r8, r5, LSR #7
LDRD r4, r5, [r0, #16]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r8, r9, [r0, #24]
LDRD r6, r7, [r0, #32]
STRD r4, r5, [r0, #16]
EOR r8, r8, r6
EOR r9, r9, r7
AND r10, r10, r8
AND r11, r11, r9
EOR r10, r10, r6
EOR r11, r11, r7
LDRD r6, r7, [r0, #16]
ADDS r6, r6, r10
ADC r7, r7, r11
STRD r6, r7, [r0, #16]
MOV r10, r8
MOV r11, r9
/* Round 14 */
LDRD r4, r5, [r0, #48]
LSRS r6, r4, #14
LSRS r7, r5, #14
ORR r7, r7, r4, LSL #18
ORR r6, r6, r5, LSL #18
LSRS r8, r4, #18
LSRS r9, r5, #18
ORR r9, r9, r4, LSL #14
ORR r8, r8, r5, LSL #14
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #23
LSLS r9, r5, #23
ORR r9, r9, r4, LSR #9
ORR r8, r8, r5, LSR #9
LDRD r4, r5, [r0, #8]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #8]
LDRD r4, r5, [r0, #48]
LDRD r6, r7, [r0, #56]
LDRD r8, r9, [r0]
EOR r6, r6, r8
EOR r7, r7, r9
AND r6, r6, r4
AND r7, r7, r5
EOR r6, r6, r8
EOR r7, r7, r9
LDRD r4, r5, [r0, #8]
LDRD r8, r9, [sp, #112]
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r6, r7, [r3, #112]
ADDS r4, r4, r8
ADC r5, r5, r9
LDRD r8, r9, [r0, #40]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0, #8]
ADDS r8, r8, r4
ADC r9, r9, r5
LDRD r4, r5, [r0, #16]
STRD r8, r9, [r0, #40]
LSRS r6, r4, #28
LSRS r7, r5, #28
ORR r7, r7, r4, LSL #4
ORR r6, r6, r5, LSL #4
LSLS r8, r4, #30
LSLS r9, r5, #30
ORR r9, r9, r4, LSR #2
ORR r8, r8, r5, LSR #2
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #25
LSLS r9, r5, #25
ORR r9, r9, r4, LSR #7
ORR r8, r8, r5, LSR #7
LDRD r4, r5, [r0, #8]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r8, r9, [r0, #16]
LDRD r6, r7, [r0, #24]
STRD r4, r5, [r0, #8]
EOR r8, r8, r6
EOR r9, r9, r7
AND r10, r10, r8
AND r11, r11, r9
EOR r10, r10, r6
EOR r11, r11, r7
LDRD r6, r7, [r0, #8]
ADDS r6, r6, r10
ADC r7, r7, r11
STRD r6, r7, [r0, #8]
MOV r10, r8
MOV r11, r9
/* Round 15 */
LDRD r4, r5, [r0, #40]
LSRS r6, r4, #14
LSRS r7, r5, #14
ORR r7, r7, r4, LSL #18
ORR r6, r6, r5, LSL #18
LSRS r8, r4, #18
LSRS r9, r5, #18
ORR r9, r9, r4, LSL #14
ORR r8, r8, r5, LSL #14
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #23
LSLS r9, r5, #23
ORR r9, r9, r4, LSR #9
ORR r8, r8, r5, LSR #9
LDRD r4, r5, [r0]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0]
LDRD r4, r5, [r0, #40]
LDRD r6, r7, [r0, #48]
LDRD r8, r9, [r0, #56]
EOR r6, r6, r8
EOR r7, r7, r9
AND r6, r6, r4
AND r7, r7, r5
EOR r6, r6, r8
EOR r7, r7, r9
LDRD r4, r5, [r0]
LDRD r8, r9, [sp, #120]
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r6, r7, [r3, #120]
ADDS r4, r4, r8
ADC r5, r5, r9
LDRD r8, r9, [r0, #32]
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [r0]
ADDS r8, r8, r4
ADC r9, r9, r5
LDRD r4, r5, [r0, #8]
STRD r8, r9, [r0, #32]
LSRS r6, r4, #28
LSRS r7, r5, #28
ORR r7, r7, r4, LSL #4
ORR r6, r6, r5, LSL #4
LSLS r8, r4, #30
LSLS r9, r5, #30
ORR r9, r9, r4, LSR #2
ORR r8, r8, r5, LSR #2
EOR r6, r6, r8
EOR r7, r7, r9
LSLS r8, r4, #25
LSLS r9, r5, #25
ORR r9, r9, r4, LSR #7
ORR r8, r8, r5, LSR #7
LDRD r4, r5, [r0]
EOR r6, r6, r8
EOR r7, r7, r9
ADDS r4, r4, r6
ADC r5, r5, r7
LDRD r8, r9, [r0, #8]
LDRD r6, r7, [r0, #16]
STRD r4, r5, [r0]
EOR r8, r8, r6
EOR r9, r9, r7
AND r10, r10, r8
AND r11, r11, r9
EOR r10, r10, r6
EOR r11, r11, r7
LDRD r6, r7, [r0]
ADDS r6, r6, r10
ADC r7, r7, r11
STRD r6, r7, [r0]
MOV r10, r8
MOV r11, r9
/* Add in digest from start */
LDRD r4, r5, [r0]
LDRD r6, r7, [r0, #8]
LDRD r8, r9, [sp, #128]
LDRD r10, r11, [sp, #136]
ADDS r4, r4, r8
ADC r5, r5, r9
ADDS r6, r6, r10
ADC r7, r7, r11
STRD r4, r5, [r0]
STRD r6, r7, [r0, #8]
STRD r4, r5, [sp, #128]
STRD r6, r7, [sp, #136]
LDRD r4, r5, [r0, #16]
LDRD r6, r7, [r0, #24]
LDRD r8, r9, [sp, #144]
LDRD r10, r11, [sp, #152]
ADDS r4, r4, r8
ADC r5, r5, r9
ADDS r6, r6, r10
ADC r7, r7, r11
STRD r4, r5, [r0, #16]
STRD r6, r7, [r0, #24]
STRD r4, r5, [sp, #144]
STRD r6, r7, [sp, #152]
LDRD r4, r5, [r0, #32]
LDRD r6, r7, [r0, #40]
LDRD r8, r9, [sp, #160]
LDRD r10, r11, [sp, #168]
ADDS r4, r4, r8
ADC r5, r5, r9
ADDS r6, r6, r10
ADC r7, r7, r11
STRD r4, r5, [r0, #32]
STRD r6, r7, [r0, #40]
STRD r4, r5, [sp, #160]
STRD r6, r7, [sp, #168]
LDRD r4, r5, [r0, #48]
LDRD r6, r7, [r0, #56]
LDRD r8, r9, [sp, #176]
LDRD r10, r11, [sp, #184]
ADDS r4, r4, r8
ADC r5, r5, r9
ADDS r6, r6, r10
ADC r7, r7, r11
STRD r4, r5, [r0, #48]
STRD r6, r7, [r0, #56]
STRD r4, r5, [sp, #176]
STRD r6, r7, [sp, #184]
SUBS r2, r2, #0x80
SUB r3, r3, #0x200
ADD r1, r1, #0x80
#ifdef __GNUC__
BNE L_SHA512_transform_len_begin
#else
BNE.W L_SHA512_transform_len_begin
#endif
EOR r0, r0, r0
ADD sp, sp, #0xc0
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
/* Cycle Count = 5021 */
.size Transform_Sha512_Len,.-Transform_Sha512_Len
#endif /* WOLFSSL_ARMASM_NO_NEON */
#endif /* WOLFSSL_SHA512 */
#endif /* !__aarch64__ && __thumb__ */
#endif /* WOLFSSL_ARMASM */
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
#endif /* !WOLFSSL_ARMASM_INLINE */
|
aenu1/aps3e
| 54,929
|
app/src/main/cpp/rpcs3/3rdparty/wolfssl/wolfssl/wolfcrypt/src/port/arm/armv8-32-sha256-asm.S
|
/* armv8-32-sha256-asm
*
* Copyright (C) 2006-2023 wolfSSL Inc.
*
* This file is part of wolfSSL.
*
* wolfSSL is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* wolfSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
*/
/* Generated using (from wolfssl):
* cd ../scripts
* ruby ./sha2/sha256.rb arm32 ../wolfssl/wolfcrypt/src/port/arm/armv8-32-sha256-asm.S
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif /* HAVE_CONFIG_H */
#include <wolfssl/wolfcrypt/settings.h>
#ifdef WOLFSSL_ARMASM
#if !defined(__aarch64__) && defined(__arm__) && !defined(__thumb__)
#ifndef WOLFSSL_ARMASM_INLINE
#ifndef NO_SHA256
#ifdef WOLFSSL_ARMASM_NO_NEON
.text
.type L_SHA256_transform_len_k, %object
.size L_SHA256_transform_len_k, 256
.align 4
L_SHA256_transform_len_k:
.word 0x428a2f98
.word 0x71374491
.word 0xb5c0fbcf
.word 0xe9b5dba5
.word 0x3956c25b
.word 0x59f111f1
.word 0x923f82a4
.word 0xab1c5ed5
.word 0xd807aa98
.word 0x12835b01
.word 0x243185be
.word 0x550c7dc3
.word 0x72be5d74
.word 0x80deb1fe
.word 0x9bdc06a7
.word 0xc19bf174
.word 0xe49b69c1
.word 0xefbe4786
.word 0xfc19dc6
.word 0x240ca1cc
.word 0x2de92c6f
.word 0x4a7484aa
.word 0x5cb0a9dc
.word 0x76f988da
.word 0x983e5152
.word 0xa831c66d
.word 0xb00327c8
.word 0xbf597fc7
.word 0xc6e00bf3
.word 0xd5a79147
.word 0x6ca6351
.word 0x14292967
.word 0x27b70a85
.word 0x2e1b2138
.word 0x4d2c6dfc
.word 0x53380d13
.word 0x650a7354
.word 0x766a0abb
.word 0x81c2c92e
.word 0x92722c85
.word 0xa2bfe8a1
.word 0xa81a664b
.word 0xc24b8b70
.word 0xc76c51a3
.word 0xd192e819
.word 0xd6990624
.word 0xf40e3585
.word 0x106aa070
.word 0x19a4c116
.word 0x1e376c08
.word 0x2748774c
.word 0x34b0bcb5
.word 0x391c0cb3
.word 0x4ed8aa4a
.word 0x5b9cca4f
.word 0x682e6ff3
.word 0x748f82ee
.word 0x78a5636f
.word 0x84c87814
.word 0x8cc70208
.word 0x90befffa
.word 0xa4506ceb
.word 0xbef9a3f7
.word 0xc67178f2
.text
.align 4
.globl Transform_Sha256_Len
.type Transform_Sha256_Len, %function
Transform_Sha256_Len:
push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
sub sp, sp, #0xc0
adr r3, L_SHA256_transform_len_k
# Copy digest to add in at end
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0]
ldr r5, [r0, #4]
#else
ldrd r4, r5, [r0]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #8]
ldr r7, [r0, #12]
#else
ldrd r6, r7, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #16]
ldr r9, [r0, #20]
#else
ldrd r8, r9, [r0, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [r0, #24]
ldr r11, [r0, #28]
#else
ldrd r10, r11, [r0, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #64]
str r5, [sp, #68]
#else
strd r4, r5, [sp, #64]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [sp, #72]
str r7, [sp, #76]
#else
strd r6, r7, [sp, #72]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [sp, #80]
str r9, [sp, #84]
#else
strd r8, r9, [sp, #80]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r10, [sp, #88]
str r11, [sp, #92]
#else
strd r10, r11, [sp, #88]
#endif
# Start of loop processing a block
L_SHA256_transform_len_begin:
# Load, Reverse and Store W - 64 bytes
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
ldr r4, [r1]
ldr r5, [r1, #4]
ldr r6, [r1, #8]
ldr r7, [r1, #12]
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp]
str r5, [sp, #4]
#else
strd r4, r5, [sp]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [sp, #8]
str r7, [sp, #12]
#else
strd r6, r7, [sp, #8]
#endif
ldr r4, [r1, #16]
ldr r5, [r1, #20]
ldr r6, [r1, #24]
ldr r7, [r1, #28]
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #16]
str r5, [sp, #20]
#else
strd r4, r5, [sp, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [sp, #24]
str r7, [sp, #28]
#else
strd r6, r7, [sp, #24]
#endif
ldr r4, [r1, #32]
ldr r5, [r1, #36]
ldr r6, [r1, #40]
ldr r7, [r1, #44]
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #32]
str r5, [sp, #36]
#else
strd r4, r5, [sp, #32]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [sp, #40]
str r7, [sp, #44]
#else
strd r6, r7, [sp, #40]
#endif
ldr r4, [r1, #48]
ldr r5, [r1, #52]
ldr r6, [r1, #56]
ldr r7, [r1, #60]
eor r8, r4, r4, ror #16
eor r9, r5, r5, ror #16
eor r10, r6, r6, ror #16
eor r11, r7, r7, ror #16
bic r8, r8, #0xff0000
bic r9, r9, #0xff0000
bic r10, r10, #0xff0000
bic r11, r11, #0xff0000
ror r4, r4, #8
ror r5, r5, #8
ror r6, r6, #8
ror r7, r7, #8
eor r4, r4, r8, lsr #8
eor r5, r5, r9, lsr #8
eor r6, r6, r10, lsr #8
eor r7, r7, r11, lsr #8
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #48]
str r5, [sp, #52]
#else
strd r4, r5, [sp, #48]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [sp, #56]
str r7, [sp, #60]
#else
strd r6, r7, [sp, #56]
#endif
#else
ldr r4, [r1]
ldr r5, [r1, #4]
ldr r6, [r1, #8]
ldr r7, [r1, #12]
ldr r8, [r1, #16]
ldr r9, [r1, #20]
ldr r10, [r1, #24]
ldr r11, [r1, #28]
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
rev r8, r8
rev r9, r9
rev r10, r10
rev r11, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp]
str r5, [sp, #4]
#else
strd r4, r5, [sp]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [sp, #8]
str r7, [sp, #12]
#else
strd r6, r7, [sp, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [sp, #16]
str r9, [sp, #20]
#else
strd r8, r9, [sp, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r10, [sp, #24]
str r11, [sp, #28]
#else
strd r10, r11, [sp, #24]
#endif
ldr r4, [r1, #32]
ldr r5, [r1, #36]
ldr r6, [r1, #40]
ldr r7, [r1, #44]
ldr r8, [r1, #48]
ldr r9, [r1, #52]
ldr r10, [r1, #56]
ldr r11, [r1, #60]
rev r4, r4
rev r5, r5
rev r6, r6
rev r7, r7
rev r8, r8
rev r9, r9
rev r10, r10
rev r11, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #32]
str r5, [sp, #36]
#else
strd r4, r5, [sp, #32]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [sp, #40]
str r7, [sp, #44]
#else
strd r6, r7, [sp, #40]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [sp, #48]
str r9, [sp, #52]
#else
strd r8, r9, [sp, #48]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r10, [sp, #56]
str r11, [sp, #60]
#else
strd r10, r11, [sp, #56]
#endif
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
ldr r11, [r0, #4]
ldr r4, [r0, #8]
eor r11, r11, r4
mov r12, #3
# Start of 16 rounds
L_SHA256_transform_len_start:
# Round 0
ldr r5, [r0, #16]
ldr r6, [r0, #20]
ldr r7, [r0, #24]
ldr r9, [r0, #28]
ror r4, r5, #6
eor r6, r6, r7
eor r4, r4, r5, ror #11
and r6, r6, r5
eor r4, r4, r5, ror #25
eor r6, r6, r7
add r9, r9, r4
add r9, r9, r6
ldr r5, [sp]
ldr r6, [r3]
add r9, r9, r5
add r9, r9, r6
ldr r5, [r0]
ldr r6, [r0, #4]
ldr r7, [r0, #8]
ldr r8, [r0, #12]
ror r4, r5, #2
eor r10, r5, r6
eor r4, r4, r5, ror #13
and r11, r11, r10
eor r4, r4, r5, ror #22
eor r11, r11, r6
add r8, r8, r9
add r9, r9, r4
add r9, r9, r11
str r8, [r0, #12]
str r9, [r0, #28]
# Calc new W[0]
ldr r6, [sp, #56]
ldr r7, [sp, #36]
ldr r8, [sp, #4]
ldr r9, [sp]
ror r4, r6, #17
ror r5, r8, #7
eor r4, r4, r6, ror #19
eor r5, r5, r8, ror #18
eor r4, r4, r6, lsr #10
eor r5, r5, r8, lsr #3
add r9, r9, r7
add r4, r4, r5
add r9, r9, r4
str r9, [sp]
# Round 1
ldr r5, [r0, #12]
ldr r6, [r0, #16]
ldr r7, [r0, #20]
ldr r9, [r0, #24]
ror r4, r5, #6
eor r6, r6, r7
eor r4, r4, r5, ror #11
and r6, r6, r5
eor r4, r4, r5, ror #25
eor r6, r6, r7
add r9, r9, r4
add r9, r9, r6
ldr r5, [sp, #4]
ldr r6, [r3, #4]
add r9, r9, r5
add r9, r9, r6
ldr r5, [r0, #28]
ldr r6, [r0]
ldr r7, [r0, #4]
ldr r8, [r0, #8]
ror r4, r5, #2
eor r11, r5, r6
eor r4, r4, r5, ror #13
and r10, r10, r11
eor r4, r4, r5, ror #22
eor r10, r10, r6
add r8, r8, r9
add r9, r9, r4
add r9, r9, r10
str r8, [r0, #8]
str r9, [r0, #24]
# Calc new W[1]
ldr r6, [sp, #60]
ldr r7, [sp, #40]
ldr r8, [sp, #8]
ldr r9, [sp, #4]
ror r4, r6, #17
ror r5, r8, #7
eor r4, r4, r6, ror #19
eor r5, r5, r8, ror #18
eor r4, r4, r6, lsr #10
eor r5, r5, r8, lsr #3
add r9, r9, r7
add r4, r4, r5
add r9, r9, r4
str r9, [sp, #4]
# Round 2
ldr r5, [r0, #8]
ldr r6, [r0, #12]
ldr r7, [r0, #16]
ldr r9, [r0, #20]
ror r4, r5, #6
eor r6, r6, r7
eor r4, r4, r5, ror #11
and r6, r6, r5
eor r4, r4, r5, ror #25
eor r6, r6, r7
add r9, r9, r4
add r9, r9, r6
ldr r5, [sp, #8]
ldr r6, [r3, #8]
add r9, r9, r5
add r9, r9, r6
ldr r5, [r0, #24]
ldr r6, [r0, #28]
ldr r7, [r0]
ldr r8, [r0, #4]
ror r4, r5, #2
eor r10, r5, r6
eor r4, r4, r5, ror #13
and r11, r11, r10
eor r4, r4, r5, ror #22
eor r11, r11, r6
add r8, r8, r9
add r9, r9, r4
add r9, r9, r11
str r8, [r0, #4]
str r9, [r0, #20]
# Calc new W[2]
ldr r6, [sp]
ldr r7, [sp, #44]
ldr r8, [sp, #12]
ldr r9, [sp, #8]
ror r4, r6, #17
ror r5, r8, #7
eor r4, r4, r6, ror #19
eor r5, r5, r8, ror #18
eor r4, r4, r6, lsr #10
eor r5, r5, r8, lsr #3
add r9, r9, r7
add r4, r4, r5
add r9, r9, r4
str r9, [sp, #8]
# Round 3
ldr r5, [r0, #4]
ldr r6, [r0, #8]
ldr r7, [r0, #12]
ldr r9, [r0, #16]
ror r4, r5, #6
eor r6, r6, r7
eor r4, r4, r5, ror #11
and r6, r6, r5
eor r4, r4, r5, ror #25
eor r6, r6, r7
add r9, r9, r4
add r9, r9, r6
ldr r5, [sp, #12]
ldr r6, [r3, #12]
add r9, r9, r5
add r9, r9, r6
ldr r5, [r0, #20]
ldr r6, [r0, #24]
ldr r7, [r0, #28]
ldr r8, [r0]
ror r4, r5, #2
eor r11, r5, r6
eor r4, r4, r5, ror #13
and r10, r10, r11
eor r4, r4, r5, ror #22
eor r10, r10, r6
add r8, r8, r9
add r9, r9, r4
add r9, r9, r10
str r8, [r0]
str r9, [r0, #16]
# Calc new W[3]
ldr r6, [sp, #4]
ldr r7, [sp, #48]
ldr r8, [sp, #16]
ldr r9, [sp, #12]
ror r4, r6, #17
ror r5, r8, #7
eor r4, r4, r6, ror #19
eor r5, r5, r8, ror #18
eor r4, r4, r6, lsr #10
eor r5, r5, r8, lsr #3
add r9, r9, r7
add r4, r4, r5
add r9, r9, r4
str r9, [sp, #12]
# Round 4
ldr r5, [r0]
ldr r6, [r0, #4]
ldr r7, [r0, #8]
ldr r9, [r0, #12]
ror r4, r5, #6
eor r6, r6, r7
eor r4, r4, r5, ror #11
and r6, r6, r5
eor r4, r4, r5, ror #25
eor r6, r6, r7
add r9, r9, r4
add r9, r9, r6
ldr r5, [sp, #16]
ldr r6, [r3, #16]
add r9, r9, r5
add r9, r9, r6
ldr r5, [r0, #16]
ldr r6, [r0, #20]
ldr r7, [r0, #24]
ldr r8, [r0, #28]
ror r4, r5, #2
eor r10, r5, r6
eor r4, r4, r5, ror #13
and r11, r11, r10
eor r4, r4, r5, ror #22
eor r11, r11, r6
add r8, r8, r9
add r9, r9, r4
add r9, r9, r11
str r8, [r0, #28]
str r9, [r0, #12]
# Calc new W[4]
ldr r6, [sp, #8]
ldr r7, [sp, #52]
ldr r8, [sp, #20]
ldr r9, [sp, #16]
ror r4, r6, #17
ror r5, r8, #7
eor r4, r4, r6, ror #19
eor r5, r5, r8, ror #18
eor r4, r4, r6, lsr #10
eor r5, r5, r8, lsr #3
add r9, r9, r7
add r4, r4, r5
add r9, r9, r4
str r9, [sp, #16]
# Round 5
ldr r5, [r0, #28]
ldr r6, [r0]
ldr r7, [r0, #4]
ldr r9, [r0, #8]
ror r4, r5, #6
eor r6, r6, r7
eor r4, r4, r5, ror #11
and r6, r6, r5
eor r4, r4, r5, ror #25
eor r6, r6, r7
add r9, r9, r4
add r9, r9, r6
ldr r5, [sp, #20]
ldr r6, [r3, #20]
add r9, r9, r5
add r9, r9, r6
ldr r5, [r0, #12]
ldr r6, [r0, #16]
ldr r7, [r0, #20]
ldr r8, [r0, #24]
ror r4, r5, #2
eor r11, r5, r6
eor r4, r4, r5, ror #13
and r10, r10, r11
eor r4, r4, r5, ror #22
eor r10, r10, r6
add r8, r8, r9
add r9, r9, r4
add r9, r9, r10
str r8, [r0, #24]
str r9, [r0, #8]
# Calc new W[5]
ldr r6, [sp, #12]
ldr r7, [sp, #56]
ldr r8, [sp, #24]
ldr r9, [sp, #20]
ror r4, r6, #17
ror r5, r8, #7
eor r4, r4, r6, ror #19
eor r5, r5, r8, ror #18
eor r4, r4, r6, lsr #10
eor r5, r5, r8, lsr #3
add r9, r9, r7
add r4, r4, r5
add r9, r9, r4
str r9, [sp, #20]
# Round 6
ldr r5, [r0, #24]
ldr r6, [r0, #28]
ldr r7, [r0]
ldr r9, [r0, #4]
ror r4, r5, #6
eor r6, r6, r7
eor r4, r4, r5, ror #11
and r6, r6, r5
eor r4, r4, r5, ror #25
eor r6, r6, r7
add r9, r9, r4
add r9, r9, r6
ldr r5, [sp, #24]
ldr r6, [r3, #24]
add r9, r9, r5
add r9, r9, r6
ldr r5, [r0, #8]
ldr r6, [r0, #12]
ldr r7, [r0, #16]
ldr r8, [r0, #20]
ror r4, r5, #2
eor r10, r5, r6
eor r4, r4, r5, ror #13
and r11, r11, r10
eor r4, r4, r5, ror #22
eor r11, r11, r6
add r8, r8, r9
add r9, r9, r4
add r9, r9, r11
str r8, [r0, #20]
str r9, [r0, #4]
# Calc new W[6]
ldr r6, [sp, #16]
ldr r7, [sp, #60]
ldr r8, [sp, #28]
ldr r9, [sp, #24]
ror r4, r6, #17
ror r5, r8, #7
eor r4, r4, r6, ror #19
eor r5, r5, r8, ror #18
eor r4, r4, r6, lsr #10
eor r5, r5, r8, lsr #3
add r9, r9, r7
add r4, r4, r5
add r9, r9, r4
str r9, [sp, #24]
# Round 7
ldr r5, [r0, #20]
ldr r6, [r0, #24]
ldr r7, [r0, #28]
ldr r9, [r0]
ror r4, r5, #6
eor r6, r6, r7
eor r4, r4, r5, ror #11
and r6, r6, r5
eor r4, r4, r5, ror #25
eor r6, r6, r7
add r9, r9, r4
add r9, r9, r6
ldr r5, [sp, #28]
ldr r6, [r3, #28]
add r9, r9, r5
add r9, r9, r6
ldr r5, [r0, #4]
ldr r6, [r0, #8]
ldr r7, [r0, #12]
ldr r8, [r0, #16]
ror r4, r5, #2
eor r11, r5, r6
eor r4, r4, r5, ror #13
and r10, r10, r11
eor r4, r4, r5, ror #22
eor r10, r10, r6
add r8, r8, r9
add r9, r9, r4
add r9, r9, r10
str r8, [r0, #16]
str r9, [r0]
# Calc new W[7]
ldr r6, [sp, #20]
ldr r7, [sp]
ldr r8, [sp, #32]
ldr r9, [sp, #28]
ror r4, r6, #17
ror r5, r8, #7
eor r4, r4, r6, ror #19
eor r5, r5, r8, ror #18
eor r4, r4, r6, lsr #10
eor r5, r5, r8, lsr #3
add r9, r9, r7
add r4, r4, r5
add r9, r9, r4
str r9, [sp, #28]
# Round 8
ldr r5, [r0, #16]
ldr r6, [r0, #20]
ldr r7, [r0, #24]
ldr r9, [r0, #28]
ror r4, r5, #6
eor r6, r6, r7
eor r4, r4, r5, ror #11
and r6, r6, r5
eor r4, r4, r5, ror #25
eor r6, r6, r7
add r9, r9, r4
add r9, r9, r6
ldr r5, [sp, #32]
ldr r6, [r3, #32]
add r9, r9, r5
add r9, r9, r6
ldr r5, [r0]
ldr r6, [r0, #4]
ldr r7, [r0, #8]
ldr r8, [r0, #12]
ror r4, r5, #2
eor r10, r5, r6
eor r4, r4, r5, ror #13
and r11, r11, r10
eor r4, r4, r5, ror #22
eor r11, r11, r6
add r8, r8, r9
add r9, r9, r4
add r9, r9, r11
str r8, [r0, #12]
str r9, [r0, #28]
# Calc new W[8]
ldr r6, [sp, #24]
ldr r7, [sp, #4]
ldr r8, [sp, #36]
ldr r9, [sp, #32]
ror r4, r6, #17
ror r5, r8, #7
eor r4, r4, r6, ror #19
eor r5, r5, r8, ror #18
eor r4, r4, r6, lsr #10
eor r5, r5, r8, lsr #3
add r9, r9, r7
add r4, r4, r5
add r9, r9, r4
str r9, [sp, #32]
# Round 9
ldr r5, [r0, #12]
ldr r6, [r0, #16]
ldr r7, [r0, #20]
ldr r9, [r0, #24]
ror r4, r5, #6
eor r6, r6, r7
eor r4, r4, r5, ror #11
and r6, r6, r5
eor r4, r4, r5, ror #25
eor r6, r6, r7
add r9, r9, r4
add r9, r9, r6
ldr r5, [sp, #36]
ldr r6, [r3, #36]
add r9, r9, r5
add r9, r9, r6
ldr r5, [r0, #28]
ldr r6, [r0]
ldr r7, [r0, #4]
ldr r8, [r0, #8]
ror r4, r5, #2
eor r11, r5, r6
eor r4, r4, r5, ror #13
and r10, r10, r11
eor r4, r4, r5, ror #22
eor r10, r10, r6
add r8, r8, r9
add r9, r9, r4
add r9, r9, r10
str r8, [r0, #8]
str r9, [r0, #24]
# Calc new W[9]
ldr r6, [sp, #28]
ldr r7, [sp, #8]
ldr r8, [sp, #40]
ldr r9, [sp, #36]
ror r4, r6, #17
ror r5, r8, #7
eor r4, r4, r6, ror #19
eor r5, r5, r8, ror #18
eor r4, r4, r6, lsr #10
eor r5, r5, r8, lsr #3
add r9, r9, r7
add r4, r4, r5
add r9, r9, r4
str r9, [sp, #36]
# Round 10
ldr r5, [r0, #8]
ldr r6, [r0, #12]
ldr r7, [r0, #16]
ldr r9, [r0, #20]
ror r4, r5, #6
eor r6, r6, r7
eor r4, r4, r5, ror #11
and r6, r6, r5
eor r4, r4, r5, ror #25
eor r6, r6, r7
add r9, r9, r4
add r9, r9, r6
ldr r5, [sp, #40]
ldr r6, [r3, #40]
add r9, r9, r5
add r9, r9, r6
ldr r5, [r0, #24]
ldr r6, [r0, #28]
ldr r7, [r0]
ldr r8, [r0, #4]
ror r4, r5, #2
eor r10, r5, r6
eor r4, r4, r5, ror #13
and r11, r11, r10
eor r4, r4, r5, ror #22
eor r11, r11, r6
add r8, r8, r9
add r9, r9, r4
add r9, r9, r11
str r8, [r0, #4]
str r9, [r0, #20]
# Calc new W[10]
ldr r6, [sp, #32]
ldr r7, [sp, #12]
ldr r8, [sp, #44]
ldr r9, [sp, #40]
ror r4, r6, #17
ror r5, r8, #7
eor r4, r4, r6, ror #19
eor r5, r5, r8, ror #18
eor r4, r4, r6, lsr #10
eor r5, r5, r8, lsr #3
add r9, r9, r7
add r4, r4, r5
add r9, r9, r4
str r9, [sp, #40]
# Round 11
ldr r5, [r0, #4]
ldr r6, [r0, #8]
ldr r7, [r0, #12]
ldr r9, [r0, #16]
ror r4, r5, #6
eor r6, r6, r7
eor r4, r4, r5, ror #11
and r6, r6, r5
eor r4, r4, r5, ror #25
eor r6, r6, r7
add r9, r9, r4
add r9, r9, r6
ldr r5, [sp, #44]
ldr r6, [r3, #44]
add r9, r9, r5
add r9, r9, r6
ldr r5, [r0, #20]
ldr r6, [r0, #24]
ldr r7, [r0, #28]
ldr r8, [r0]
ror r4, r5, #2
eor r11, r5, r6
eor r4, r4, r5, ror #13
and r10, r10, r11
eor r4, r4, r5, ror #22
eor r10, r10, r6
add r8, r8, r9
add r9, r9, r4
add r9, r9, r10
str r8, [r0]
str r9, [r0, #16]
# Calc new W[11]
ldr r6, [sp, #36]
ldr r7, [sp, #16]
ldr r8, [sp, #48]
ldr r9, [sp, #44]
ror r4, r6, #17
ror r5, r8, #7
eor r4, r4, r6, ror #19
eor r5, r5, r8, ror #18
eor r4, r4, r6, lsr #10
eor r5, r5, r8, lsr #3
add r9, r9, r7
add r4, r4, r5
add r9, r9, r4
str r9, [sp, #44]
# Round 12
ldr r5, [r0]
ldr r6, [r0, #4]
ldr r7, [r0, #8]
ldr r9, [r0, #12]
ror r4, r5, #6
eor r6, r6, r7
eor r4, r4, r5, ror #11
and r6, r6, r5
eor r4, r4, r5, ror #25
eor r6, r6, r7
add r9, r9, r4
add r9, r9, r6
ldr r5, [sp, #48]
ldr r6, [r3, #48]
add r9, r9, r5
add r9, r9, r6
ldr r5, [r0, #16]
ldr r6, [r0, #20]
ldr r7, [r0, #24]
ldr r8, [r0, #28]
ror r4, r5, #2
eor r10, r5, r6
eor r4, r4, r5, ror #13
and r11, r11, r10
eor r4, r4, r5, ror #22
eor r11, r11, r6
add r8, r8, r9
add r9, r9, r4
add r9, r9, r11
str r8, [r0, #28]
str r9, [r0, #12]
# Calc new W[12]
ldr r6, [sp, #40]
ldr r7, [sp, #20]
ldr r8, [sp, #52]
ldr r9, [sp, #48]
ror r4, r6, #17
ror r5, r8, #7
eor r4, r4, r6, ror #19
eor r5, r5, r8, ror #18
eor r4, r4, r6, lsr #10
eor r5, r5, r8, lsr #3
add r9, r9, r7
add r4, r4, r5
add r9, r9, r4
str r9, [sp, #48]
# Round 13
ldr r5, [r0, #28]
ldr r6, [r0]
ldr r7, [r0, #4]
ldr r9, [r0, #8]
ror r4, r5, #6
eor r6, r6, r7
eor r4, r4, r5, ror #11
and r6, r6, r5
eor r4, r4, r5, ror #25
eor r6, r6, r7
add r9, r9, r4
add r9, r9, r6
ldr r5, [sp, #52]
ldr r6, [r3, #52]
add r9, r9, r5
add r9, r9, r6
ldr r5, [r0, #12]
ldr r6, [r0, #16]
ldr r7, [r0, #20]
ldr r8, [r0, #24]
ror r4, r5, #2
eor r11, r5, r6
eor r4, r4, r5, ror #13
and r10, r10, r11
eor r4, r4, r5, ror #22
eor r10, r10, r6
add r8, r8, r9
add r9, r9, r4
add r9, r9, r10
str r8, [r0, #24]
str r9, [r0, #8]
# Calc new W[13]
ldr r6, [sp, #44]
ldr r7, [sp, #24]
ldr r8, [sp, #56]
ldr r9, [sp, #52]
ror r4, r6, #17
ror r5, r8, #7
eor r4, r4, r6, ror #19
eor r5, r5, r8, ror #18
eor r4, r4, r6, lsr #10
eor r5, r5, r8, lsr #3
add r9, r9, r7
add r4, r4, r5
add r9, r9, r4
str r9, [sp, #52]
# Round 14
ldr r5, [r0, #24]
ldr r6, [r0, #28]
ldr r7, [r0]
ldr r9, [r0, #4]
ror r4, r5, #6
eor r6, r6, r7
eor r4, r4, r5, ror #11
and r6, r6, r5
eor r4, r4, r5, ror #25
eor r6, r6, r7
add r9, r9, r4
add r9, r9, r6
ldr r5, [sp, #56]
ldr r6, [r3, #56]
add r9, r9, r5
add r9, r9, r6
ldr r5, [r0, #8]
ldr r6, [r0, #12]
ldr r7, [r0, #16]
ldr r8, [r0, #20]
ror r4, r5, #2
eor r10, r5, r6
eor r4, r4, r5, ror #13
and r11, r11, r10
eor r4, r4, r5, ror #22
eor r11, r11, r6
add r8, r8, r9
add r9, r9, r4
add r9, r9, r11
str r8, [r0, #20]
str r9, [r0, #4]
# Calc new W[14]
ldr r6, [sp, #48]
ldr r7, [sp, #28]
ldr r8, [sp, #60]
ldr r9, [sp, #56]
ror r4, r6, #17
ror r5, r8, #7
eor r4, r4, r6, ror #19
eor r5, r5, r8, ror #18
eor r4, r4, r6, lsr #10
eor r5, r5, r8, lsr #3
add r9, r9, r7
add r4, r4, r5
add r9, r9, r4
str r9, [sp, #56]
# Round 15
ldr r5, [r0, #20]
ldr r6, [r0, #24]
ldr r7, [r0, #28]
ldr r9, [r0]
ror r4, r5, #6
eor r6, r6, r7
eor r4, r4, r5, ror #11
and r6, r6, r5
eor r4, r4, r5, ror #25
eor r6, r6, r7
add r9, r9, r4
add r9, r9, r6
ldr r5, [sp, #60]
ldr r6, [r3, #60]
add r9, r9, r5
add r9, r9, r6
ldr r5, [r0, #4]
ldr r6, [r0, #8]
ldr r7, [r0, #12]
ldr r8, [r0, #16]
ror r4, r5, #2
eor r11, r5, r6
eor r4, r4, r5, ror #13
and r10, r10, r11
eor r4, r4, r5, ror #22
eor r10, r10, r6
add r8, r8, r9
add r9, r9, r4
add r9, r9, r10
str r8, [r0, #16]
str r9, [r0]
# Calc new W[15]
ldr r6, [sp, #52]
ldr r7, [sp, #32]
ldr r8, [sp]
ldr r9, [sp, #60]
ror r4, r6, #17
ror r5, r8, #7
eor r4, r4, r6, ror #19
eor r5, r5, r8, ror #18
eor r4, r4, r6, lsr #10
eor r5, r5, r8, lsr #3
add r9, r9, r7
add r4, r4, r5
add r9, r9, r4
str r9, [sp, #60]
add r3, r3, #0x40
subs r12, r12, #1
bne L_SHA256_transform_len_start
# Round 0
ldr r5, [r0, #16]
ldr r6, [r0, #20]
ldr r7, [r0, #24]
ldr r9, [r0, #28]
ror r4, r5, #6
eor r6, r6, r7
eor r4, r4, r5, ror #11
and r6, r6, r5
eor r4, r4, r5, ror #25
eor r6, r6, r7
add r9, r9, r4
add r9, r9, r6
ldr r5, [sp]
ldr r6, [r3]
add r9, r9, r5
add r9, r9, r6
ldr r5, [r0]
ldr r6, [r0, #4]
ldr r7, [r0, #8]
ldr r8, [r0, #12]
ror r4, r5, #2
eor r10, r5, r6
eor r4, r4, r5, ror #13
and r11, r11, r10
eor r4, r4, r5, ror #22
eor r11, r11, r6
add r8, r8, r9
add r9, r9, r4
add r9, r9, r11
str r8, [r0, #12]
str r9, [r0, #28]
# Round 1
ldr r5, [r0, #12]
ldr r6, [r0, #16]
ldr r7, [r0, #20]
ldr r9, [r0, #24]
ror r4, r5, #6
eor r6, r6, r7
eor r4, r4, r5, ror #11
and r6, r6, r5
eor r4, r4, r5, ror #25
eor r6, r6, r7
add r9, r9, r4
add r9, r9, r6
ldr r5, [sp, #4]
ldr r6, [r3, #4]
add r9, r9, r5
add r9, r9, r6
ldr r5, [r0, #28]
ldr r6, [r0]
ldr r7, [r0, #4]
ldr r8, [r0, #8]
ror r4, r5, #2
eor r11, r5, r6
eor r4, r4, r5, ror #13
and r10, r10, r11
eor r4, r4, r5, ror #22
eor r10, r10, r6
add r8, r8, r9
add r9, r9, r4
add r9, r9, r10
str r8, [r0, #8]
str r9, [r0, #24]
# Round 2
ldr r5, [r0, #8]
ldr r6, [r0, #12]
ldr r7, [r0, #16]
ldr r9, [r0, #20]
ror r4, r5, #6
eor r6, r6, r7
eor r4, r4, r5, ror #11
and r6, r6, r5
eor r4, r4, r5, ror #25
eor r6, r6, r7
add r9, r9, r4
add r9, r9, r6
ldr r5, [sp, #8]
ldr r6, [r3, #8]
add r9, r9, r5
add r9, r9, r6
ldr r5, [r0, #24]
ldr r6, [r0, #28]
ldr r7, [r0]
ldr r8, [r0, #4]
ror r4, r5, #2
eor r10, r5, r6
eor r4, r4, r5, ror #13
and r11, r11, r10
eor r4, r4, r5, ror #22
eor r11, r11, r6
add r8, r8, r9
add r9, r9, r4
add r9, r9, r11
str r8, [r0, #4]
str r9, [r0, #20]
# Round 3
ldr r5, [r0, #4]
ldr r6, [r0, #8]
ldr r7, [r0, #12]
ldr r9, [r0, #16]
ror r4, r5, #6
eor r6, r6, r7
eor r4, r4, r5, ror #11
and r6, r6, r5
eor r4, r4, r5, ror #25
eor r6, r6, r7
add r9, r9, r4
add r9, r9, r6
ldr r5, [sp, #12]
ldr r6, [r3, #12]
add r9, r9, r5
add r9, r9, r6
ldr r5, [r0, #20]
ldr r6, [r0, #24]
ldr r7, [r0, #28]
ldr r8, [r0]
ror r4, r5, #2
eor r11, r5, r6
eor r4, r4, r5, ror #13
and r10, r10, r11
eor r4, r4, r5, ror #22
eor r10, r10, r6
add r8, r8, r9
add r9, r9, r4
add r9, r9, r10
str r8, [r0]
str r9, [r0, #16]
# Round 4
ldr r5, [r0]
ldr r6, [r0, #4]
ldr r7, [r0, #8]
ldr r9, [r0, #12]
ror r4, r5, #6
eor r6, r6, r7
eor r4, r4, r5, ror #11
and r6, r6, r5
eor r4, r4, r5, ror #25
eor r6, r6, r7
add r9, r9, r4
add r9, r9, r6
ldr r5, [sp, #16]
ldr r6, [r3, #16]
add r9, r9, r5
add r9, r9, r6
ldr r5, [r0, #16]
ldr r6, [r0, #20]
ldr r7, [r0, #24]
ldr r8, [r0, #28]
ror r4, r5, #2
eor r10, r5, r6
eor r4, r4, r5, ror #13
and r11, r11, r10
eor r4, r4, r5, ror #22
eor r11, r11, r6
add r8, r8, r9
add r9, r9, r4
add r9, r9, r11
str r8, [r0, #28]
str r9, [r0, #12]
# Round 5
ldr r5, [r0, #28]
ldr r6, [r0]
ldr r7, [r0, #4]
ldr r9, [r0, #8]
ror r4, r5, #6
eor r6, r6, r7
eor r4, r4, r5, ror #11
and r6, r6, r5
eor r4, r4, r5, ror #25
eor r6, r6, r7
add r9, r9, r4
add r9, r9, r6
ldr r5, [sp, #20]
ldr r6, [r3, #20]
add r9, r9, r5
add r9, r9, r6
ldr r5, [r0, #12]
ldr r6, [r0, #16]
ldr r7, [r0, #20]
ldr r8, [r0, #24]
ror r4, r5, #2
eor r11, r5, r6
eor r4, r4, r5, ror #13
and r10, r10, r11
eor r4, r4, r5, ror #22
eor r10, r10, r6
add r8, r8, r9
add r9, r9, r4
add r9, r9, r10
str r8, [r0, #24]
str r9, [r0, #8]
# Round 6
ldr r5, [r0, #24]
ldr r6, [r0, #28]
ldr r7, [r0]
ldr r9, [r0, #4]
ror r4, r5, #6
eor r6, r6, r7
eor r4, r4, r5, ror #11
and r6, r6, r5
eor r4, r4, r5, ror #25
eor r6, r6, r7
add r9, r9, r4
add r9, r9, r6
ldr r5, [sp, #24]
ldr r6, [r3, #24]
add r9, r9, r5
add r9, r9, r6
ldr r5, [r0, #8]
ldr r6, [r0, #12]
ldr r7, [r0, #16]
ldr r8, [r0, #20]
ror r4, r5, #2
eor r10, r5, r6
eor r4, r4, r5, ror #13
and r11, r11, r10
eor r4, r4, r5, ror #22
eor r11, r11, r6
add r8, r8, r9
add r9, r9, r4
add r9, r9, r11
str r8, [r0, #20]
str r9, [r0, #4]
# Round 7
ldr r5, [r0, #20]
ldr r6, [r0, #24]
ldr r7, [r0, #28]
ldr r9, [r0]
ror r4, r5, #6
eor r6, r6, r7
eor r4, r4, r5, ror #11
and r6, r6, r5
eor r4, r4, r5, ror #25
eor r6, r6, r7
add r9, r9, r4
add r9, r9, r6
ldr r5, [sp, #28]
ldr r6, [r3, #28]
add r9, r9, r5
add r9, r9, r6
ldr r5, [r0, #4]
ldr r6, [r0, #8]
ldr r7, [r0, #12]
ldr r8, [r0, #16]
ror r4, r5, #2
eor r11, r5, r6
eor r4, r4, r5, ror #13
and r10, r10, r11
eor r4, r4, r5, ror #22
eor r10, r10, r6
add r8, r8, r9
add r9, r9, r4
add r9, r9, r10
str r8, [r0, #16]
str r9, [r0]
# Round 8
ldr r5, [r0, #16]
ldr r6, [r0, #20]
ldr r7, [r0, #24]
ldr r9, [r0, #28]
ror r4, r5, #6
eor r6, r6, r7
eor r4, r4, r5, ror #11
and r6, r6, r5
eor r4, r4, r5, ror #25
eor r6, r6, r7
add r9, r9, r4
add r9, r9, r6
ldr r5, [sp, #32]
ldr r6, [r3, #32]
add r9, r9, r5
add r9, r9, r6
ldr r5, [r0]
ldr r6, [r0, #4]
ldr r7, [r0, #8]
ldr r8, [r0, #12]
ror r4, r5, #2
eor r10, r5, r6
eor r4, r4, r5, ror #13
and r11, r11, r10
eor r4, r4, r5, ror #22
eor r11, r11, r6
add r8, r8, r9
add r9, r9, r4
add r9, r9, r11
str r8, [r0, #12]
str r9, [r0, #28]
# Round 9
ldr r5, [r0, #12]
ldr r6, [r0, #16]
ldr r7, [r0, #20]
ldr r9, [r0, #24]
ror r4, r5, #6
eor r6, r6, r7
eor r4, r4, r5, ror #11
and r6, r6, r5
eor r4, r4, r5, ror #25
eor r6, r6, r7
add r9, r9, r4
add r9, r9, r6
ldr r5, [sp, #36]
ldr r6, [r3, #36]
add r9, r9, r5
add r9, r9, r6
ldr r5, [r0, #28]
ldr r6, [r0]
ldr r7, [r0, #4]
ldr r8, [r0, #8]
ror r4, r5, #2
eor r11, r5, r6
eor r4, r4, r5, ror #13
and r10, r10, r11
eor r4, r4, r5, ror #22
eor r10, r10, r6
add r8, r8, r9
add r9, r9, r4
add r9, r9, r10
str r8, [r0, #8]
str r9, [r0, #24]
# Round 10
ldr r5, [r0, #8]
ldr r6, [r0, #12]
ldr r7, [r0, #16]
ldr r9, [r0, #20]
ror r4, r5, #6
eor r6, r6, r7
eor r4, r4, r5, ror #11
and r6, r6, r5
eor r4, r4, r5, ror #25
eor r6, r6, r7
add r9, r9, r4
add r9, r9, r6
ldr r5, [sp, #40]
ldr r6, [r3, #40]
add r9, r9, r5
add r9, r9, r6
ldr r5, [r0, #24]
ldr r6, [r0, #28]
ldr r7, [r0]
ldr r8, [r0, #4]
ror r4, r5, #2
eor r10, r5, r6
eor r4, r4, r5, ror #13
and r11, r11, r10
eor r4, r4, r5, ror #22
eor r11, r11, r6
add r8, r8, r9
add r9, r9, r4
add r9, r9, r11
str r8, [r0, #4]
str r9, [r0, #20]
# Round 11
ldr r5, [r0, #4]
ldr r6, [r0, #8]
ldr r7, [r0, #12]
ldr r9, [r0, #16]
ror r4, r5, #6
eor r6, r6, r7
eor r4, r4, r5, ror #11
and r6, r6, r5
eor r4, r4, r5, ror #25
eor r6, r6, r7
add r9, r9, r4
add r9, r9, r6
ldr r5, [sp, #44]
ldr r6, [r3, #44]
add r9, r9, r5
add r9, r9, r6
ldr r5, [r0, #20]
ldr r6, [r0, #24]
ldr r7, [r0, #28]
ldr r8, [r0]
ror r4, r5, #2
eor r11, r5, r6
eor r4, r4, r5, ror #13
and r10, r10, r11
eor r4, r4, r5, ror #22
eor r10, r10, r6
add r8, r8, r9
add r9, r9, r4
add r9, r9, r10
str r8, [r0]
str r9, [r0, #16]
# Round 12
ldr r5, [r0]
ldr r6, [r0, #4]
ldr r7, [r0, #8]
ldr r9, [r0, #12]
ror r4, r5, #6
eor r6, r6, r7
eor r4, r4, r5, ror #11
and r6, r6, r5
eor r4, r4, r5, ror #25
eor r6, r6, r7
add r9, r9, r4
add r9, r9, r6
ldr r5, [sp, #48]
ldr r6, [r3, #48]
add r9, r9, r5
add r9, r9, r6
ldr r5, [r0, #16]
ldr r6, [r0, #20]
ldr r7, [r0, #24]
ldr r8, [r0, #28]
ror r4, r5, #2
eor r10, r5, r6
eor r4, r4, r5, ror #13
and r11, r11, r10
eor r4, r4, r5, ror #22
eor r11, r11, r6
add r8, r8, r9
add r9, r9, r4
add r9, r9, r11
str r8, [r0, #28]
str r9, [r0, #12]
# Round 13
ldr r5, [r0, #28]
ldr r6, [r0]
ldr r7, [r0, #4]
ldr r9, [r0, #8]
ror r4, r5, #6
eor r6, r6, r7
eor r4, r4, r5, ror #11
and r6, r6, r5
eor r4, r4, r5, ror #25
eor r6, r6, r7
add r9, r9, r4
add r9, r9, r6
ldr r5, [sp, #52]
ldr r6, [r3, #52]
add r9, r9, r5
add r9, r9, r6
ldr r5, [r0, #12]
ldr r6, [r0, #16]
ldr r7, [r0, #20]
ldr r8, [r0, #24]
ror r4, r5, #2
eor r11, r5, r6
eor r4, r4, r5, ror #13
and r10, r10, r11
eor r4, r4, r5, ror #22
eor r10, r10, r6
add r8, r8, r9
add r9, r9, r4
add r9, r9, r10
str r8, [r0, #24]
str r9, [r0, #8]
# Round 14
ldr r5, [r0, #24]
ldr r6, [r0, #28]
ldr r7, [r0]
ldr r9, [r0, #4]
ror r4, r5, #6
eor r6, r6, r7
eor r4, r4, r5, ror #11
and r6, r6, r5
eor r4, r4, r5, ror #25
eor r6, r6, r7
add r9, r9, r4
add r9, r9, r6
ldr r5, [sp, #56]
ldr r6, [r3, #56]
add r9, r9, r5
add r9, r9, r6
ldr r5, [r0, #8]
ldr r6, [r0, #12]
ldr r7, [r0, #16]
ldr r8, [r0, #20]
ror r4, r5, #2
eor r10, r5, r6
eor r4, r4, r5, ror #13
and r11, r11, r10
eor r4, r4, r5, ror #22
eor r11, r11, r6
add r8, r8, r9
add r9, r9, r4
add r9, r9, r11
str r8, [r0, #20]
str r9, [r0, #4]
# Round 15
ldr r5, [r0, #20]
ldr r6, [r0, #24]
ldr r7, [r0, #28]
ldr r9, [r0]
ror r4, r5, #6
eor r6, r6, r7
eor r4, r4, r5, ror #11
and r6, r6, r5
eor r4, r4, r5, ror #25
eor r6, r6, r7
add r9, r9, r4
add r9, r9, r6
ldr r5, [sp, #60]
ldr r6, [r3, #60]
add r9, r9, r5
add r9, r9, r6
ldr r5, [r0, #4]
ldr r6, [r0, #8]
ldr r7, [r0, #12]
ldr r8, [r0, #16]
ror r4, r5, #2
eor r11, r5, r6
eor r4, r4, r5, ror #13
and r10, r10, r11
eor r4, r4, r5, ror #22
eor r10, r10, r6
add r8, r8, r9
add r9, r9, r4
add r9, r9, r10
str r8, [r0, #16]
str r9, [r0]
# Add in digest from start
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0]
ldr r5, [r0, #4]
#else
ldrd r4, r5, [r0]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #8]
ldr r7, [r0, #12]
#else
ldrd r6, r7, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #64]
ldr r9, [sp, #68]
#else
ldrd r8, r9, [sp, #64]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [sp, #72]
ldr r11, [sp, #76]
#else
ldrd r10, r11, [sp, #72]
#endif
add r4, r4, r8
add r5, r5, r9
add r6, r6, r10
add r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0]
str r5, [r0, #4]
#else
strd r4, r5, [r0]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0, #8]
str r7, [r0, #12]
#else
strd r6, r7, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #64]
str r5, [sp, #68]
#else
strd r4, r5, [sp, #64]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [sp, #72]
str r7, [sp, #76]
#else
strd r6, r7, [sp, #72]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #16]
ldr r5, [r0, #20]
#else
ldrd r4, r5, [r0, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #24]
ldr r7, [r0, #28]
#else
ldrd r6, r7, [r0, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [sp, #80]
ldr r9, [sp, #84]
#else
ldrd r8, r9, [sp, #80]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r10, [sp, #88]
ldr r11, [sp, #92]
#else
ldrd r10, r11, [sp, #88]
#endif
add r4, r4, r8
add r5, r5, r9
add r6, r6, r10
add r7, r7, r11
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r0, #16]
str r5, [r0, #20]
#else
strd r4, r5, [r0, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r0, #24]
str r7, [r0, #28]
#else
strd r6, r7, [r0, #24]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [sp, #80]
str r5, [sp, #84]
#else
strd r4, r5, [sp, #80]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [sp, #88]
str r7, [sp, #92]
#else
strd r6, r7, [sp, #88]
#endif
subs r2, r2, #0x40
sub r3, r3, #0xc0
add r1, r1, #0x40
bne L_SHA256_transform_len_begin
add sp, sp, #0xc0
pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.size Transform_Sha256_Len,.-Transform_Sha256_Len
#endif /* WOLFSSL_ARMASM_NO_NEON */
#ifndef WOLFSSL_ARMASM_NO_NEON
.text
.type L_SHA256_transform_neon_len_k, %object
.size L_SHA256_transform_neon_len_k, 256
.align 4
L_SHA256_transform_neon_len_k:
.word 0x428a2f98
.word 0x71374491
.word 0xb5c0fbcf
.word 0xe9b5dba5
.word 0x3956c25b
.word 0x59f111f1
.word 0x923f82a4
.word 0xab1c5ed5
.word 0xd807aa98
.word 0x12835b01
.word 0x243185be
.word 0x550c7dc3
.word 0x72be5d74
.word 0x80deb1fe
.word 0x9bdc06a7
.word 0xc19bf174
.word 0xe49b69c1
.word 0xefbe4786
.word 0xfc19dc6
.word 0x240ca1cc
.word 0x2de92c6f
.word 0x4a7484aa
.word 0x5cb0a9dc
.word 0x76f988da
.word 0x983e5152
.word 0xa831c66d
.word 0xb00327c8
.word 0xbf597fc7
.word 0xc6e00bf3
.word 0xd5a79147
.word 0x6ca6351
.word 0x14292967
.word 0x27b70a85
.word 0x2e1b2138
.word 0x4d2c6dfc
.word 0x53380d13
.word 0x650a7354
.word 0x766a0abb
.word 0x81c2c92e
.word 0x92722c85
.word 0xa2bfe8a1
.word 0xa81a664b
.word 0xc24b8b70
.word 0xc76c51a3
.word 0xd192e819
.word 0xd6990624
.word 0xf40e3585
.word 0x106aa070
.word 0x19a4c116
.word 0x1e376c08
.word 0x2748774c
.word 0x34b0bcb5
.word 0x391c0cb3
.word 0x4ed8aa4a
.word 0x5b9cca4f
.word 0x682e6ff3
.word 0x748f82ee
.word 0x78a5636f
.word 0x84c87814
.word 0x8cc70208
.word 0x90befffa
.word 0xa4506ceb
.word 0xbef9a3f7
.word 0xc67178f2
.text
.align 4
.fpu neon
.globl Transform_Sha256_Len
.type Transform_Sha256_Len, %function
Transform_Sha256_Len:
push {r4, r5, r6, r7, r8, r9, r10, lr}
vpush {d8-d11}
sub sp, sp, #24
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r0, [sp]
str r1, [sp, #4]
#else
strd r0, r1, [sp]
#endif
str r2, [sp, #8]
adr r12, L_SHA256_transform_neon_len_k
# Load digest into registers
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r2, [r0]
ldr r3, [r0, #4]
#else
ldrd r2, r3, [r0]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r4, [r0, #8]
ldr r5, [r0, #12]
#else
ldrd r4, r5, [r0, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r6, [r0, #16]
ldr r7, [r0, #20]
#else
ldrd r6, r7, [r0, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r8, [r0, #24]
ldr r9, [r0, #28]
#else
ldrd r8, r9, [r0, #24]
#endif
# Start of loop processing a block
L_SHA256_transform_neon_len_begin:
# Load W
vld1.8 {d0-d3}, [r1]!
vld1.8 {d4-d7}, [r1]!
#ifndef WOLFSSL_ARM_ARCH_NEON_64BIT
vrev32.8 q0, q0
vrev32.8 q1, q1
vrev32.8 q2, q2
vrev32.8 q3, q3
#else
vrev32.8 d0, d0
vrev32.8 d1, d1
vrev32.8 d2, d2
vrev32.8 d3, d3
vrev32.8 d4, d4
vrev32.8 d5, d5
vrev32.8 d6, d6
vrev32.8 d7, d7
#endif /* WOLFSSL_ARM_ARCH_NEON_64BIT */
str r1, [sp, #4]
mov lr, #3
# Start of 16 rounds
L_SHA256_transform_neon_len_start:
# Round 0
vmov.32 r10, d0[0]
ror r0, r6, #6
eor r1, r7, r8
eor r0, r0, r6, ror #11
and r1, r1, r6
eor r0, r0, r6, ror #25
eor r1, r1, r8
add r9, r9, r0
add r9, r9, r1
ldr r0, [r12]
add r9, r9, r10
add r9, r9, r0
add r5, r5, r9
ror r0, r2, #2
eor r1, r2, r3
eor r0, r0, r2, ror #13
eor r10, r3, r4
and r1, r1, r10
eor r0, r0, r2, ror #22
eor r1, r1, r3
add r9, r9, r0
add r9, r9, r1
# Round 1
vmov.32 r10, d0[1]
# Calc new W[0]-W[1]
vext.8 d10, d0, d1, #4
ror r0, r5, #6
vshl.u32 d8, d7, #15
eor r1, r6, r7
vsri.u32 d8, d7, #17
eor r0, r0, r5, ror #11
vshl.u32 d9, d7, #13
and r1, r1, r5
vsri.u32 d9, d7, #19
eor r0, r0, r5, ror #25
veor d9, d8
eor r1, r1, r7
vshr.u32 d8, d7, #10
add r8, r8, r0
veor d9, d8
add r8, r8, r1
vadd.i32 d0, d9
ldr r0, [r12, #4]
vext.8 d11, d4, d5, #4
add r8, r8, r10
vadd.i32 d0, d11
add r8, r8, r0
vshl.u32 d8, d10, #25
add r4, r4, r8
vsri.u32 d8, d10, #7
ror r0, r9, #2
vshl.u32 d9, d10, #14
eor r1, r9, r2
vsri.u32 d9, d10, #18
eor r0, r0, r9, ror #13
veor d9, d8
eor r10, r2, r3
vshr.u32 d10, #3
and r1, r1, r10
veor d9, d10
eor r0, r0, r9, ror #22
vadd.i32 d0, d9
eor r1, r1, r2
add r8, r8, r0
add r8, r8, r1
# Round 2
vmov.32 r10, d1[0]
ror r0, r4, #6
eor r1, r5, r6
eor r0, r0, r4, ror #11
and r1, r1, r4
eor r0, r0, r4, ror #25
eor r1, r1, r6
add r7, r7, r0
add r7, r7, r1
ldr r0, [r12, #8]
add r7, r7, r10
add r7, r7, r0
add r3, r3, r7
ror r0, r8, #2
eor r1, r8, r9
eor r0, r0, r8, ror #13
eor r10, r9, r2
and r1, r1, r10
eor r0, r0, r8, ror #22
eor r1, r1, r9
add r7, r7, r0
add r7, r7, r1
# Round 3
vmov.32 r10, d1[1]
# Calc new W[2]-W[3]
vext.8 d10, d1, d2, #4
ror r0, r3, #6
vshl.u32 d8, d0, #15
eor r1, r4, r5
vsri.u32 d8, d0, #17
eor r0, r0, r3, ror #11
vshl.u32 d9, d0, #13
and r1, r1, r3
vsri.u32 d9, d0, #19
eor r0, r0, r3, ror #25
veor d9, d8
eor r1, r1, r5
vshr.u32 d8, d0, #10
add r6, r6, r0
veor d9, d8
add r6, r6, r1
vadd.i32 d1, d9
ldr r0, [r12, #12]
vext.8 d11, d5, d6, #4
add r6, r6, r10
vadd.i32 d1, d11
add r6, r6, r0
vshl.u32 d8, d10, #25
add r2, r2, r6
vsri.u32 d8, d10, #7
ror r0, r7, #2
vshl.u32 d9, d10, #14
eor r1, r7, r8
vsri.u32 d9, d10, #18
eor r0, r0, r7, ror #13
veor d9, d8
eor r10, r8, r9
vshr.u32 d10, #3
and r1, r1, r10
veor d9, d10
eor r0, r0, r7, ror #22
vadd.i32 d1, d9
eor r1, r1, r8
add r6, r6, r0
add r6, r6, r1
# Round 4
vmov.32 r10, d2[0]
ror r0, r2, #6
eor r1, r3, r4
eor r0, r0, r2, ror #11
and r1, r1, r2
eor r0, r0, r2, ror #25
eor r1, r1, r4
add r5, r5, r0
add r5, r5, r1
ldr r0, [r12, #16]
add r5, r5, r10
add r5, r5, r0
add r9, r9, r5
ror r0, r6, #2
eor r1, r6, r7
eor r0, r0, r6, ror #13
eor r10, r7, r8
and r1, r1, r10
eor r0, r0, r6, ror #22
eor r1, r1, r7
add r5, r5, r0
add r5, r5, r1
# Round 5
vmov.32 r10, d2[1]
# Calc new W[4]-W[5]
vext.8 d10, d2, d3, #4
ror r0, r9, #6
vshl.u32 d8, d1, #15
eor r1, r2, r3
vsri.u32 d8, d1, #17
eor r0, r0, r9, ror #11
vshl.u32 d9, d1, #13
and r1, r1, r9
vsri.u32 d9, d1, #19
eor r0, r0, r9, ror #25
veor d9, d8
eor r1, r1, r3
vshr.u32 d8, d1, #10
add r4, r4, r0
veor d9, d8
add r4, r4, r1
vadd.i32 d2, d9
ldr r0, [r12, #20]
vext.8 d11, d6, d7, #4
add r4, r4, r10
vadd.i32 d2, d11
add r4, r4, r0
vshl.u32 d8, d10, #25
add r8, r8, r4
vsri.u32 d8, d10, #7
ror r0, r5, #2
vshl.u32 d9, d10, #14
eor r1, r5, r6
vsri.u32 d9, d10, #18
eor r0, r0, r5, ror #13
veor d9, d8
eor r10, r6, r7
vshr.u32 d10, #3
and r1, r1, r10
veor d9, d10
eor r0, r0, r5, ror #22
vadd.i32 d2, d9
eor r1, r1, r6
add r4, r4, r0
add r4, r4, r1
# Round 6
vmov.32 r10, d3[0]
ror r0, r8, #6
eor r1, r9, r2
eor r0, r0, r8, ror #11
and r1, r1, r8
eor r0, r0, r8, ror #25
eor r1, r1, r2
add r3, r3, r0
add r3, r3, r1
ldr r0, [r12, #24]
add r3, r3, r10
add r3, r3, r0
add r7, r7, r3
ror r0, r4, #2
eor r1, r4, r5
eor r0, r0, r4, ror #13
eor r10, r5, r6
and r1, r1, r10
eor r0, r0, r4, ror #22
eor r1, r1, r5
add r3, r3, r0
add r3, r3, r1
# Round 7
vmov.32 r10, d3[1]
# Calc new W[6]-W[7]
vext.8 d10, d3, d4, #4
ror r0, r7, #6
vshl.u32 d8, d2, #15
eor r1, r8, r9
vsri.u32 d8, d2, #17
eor r0, r0, r7, ror #11
vshl.u32 d9, d2, #13
and r1, r1, r7
vsri.u32 d9, d2, #19
eor r0, r0, r7, ror #25
veor d9, d8
eor r1, r1, r9
vshr.u32 d8, d2, #10
add r2, r2, r0
veor d9, d8
add r2, r2, r1
vadd.i32 d3, d9
ldr r0, [r12, #28]
vext.8 d11, d7, d0, #4
add r2, r2, r10
vadd.i32 d3, d11
add r2, r2, r0
vshl.u32 d8, d10, #25
add r6, r6, r2
vsri.u32 d8, d10, #7
ror r0, r3, #2
vshl.u32 d9, d10, #14
eor r1, r3, r4
vsri.u32 d9, d10, #18
eor r0, r0, r3, ror #13
veor d9, d8
eor r10, r4, r5
vshr.u32 d10, #3
and r1, r1, r10
veor d9, d10
eor r0, r0, r3, ror #22
vadd.i32 d3, d9
eor r1, r1, r4
add r2, r2, r0
add r2, r2, r1
# Round 8
vmov.32 r10, d4[0]
ror r0, r6, #6
eor r1, r7, r8
eor r0, r0, r6, ror #11
and r1, r1, r6
eor r0, r0, r6, ror #25
eor r1, r1, r8
add r9, r9, r0
add r9, r9, r1
ldr r0, [r12, #32]
add r9, r9, r10
add r9, r9, r0
add r5, r5, r9
ror r0, r2, #2
eor r1, r2, r3
eor r0, r0, r2, ror #13
eor r10, r3, r4
and r1, r1, r10
eor r0, r0, r2, ror #22
eor r1, r1, r3
add r9, r9, r0
add r9, r9, r1
# Round 9
vmov.32 r10, d4[1]
# Calc new W[8]-W[9]
vext.8 d10, d4, d5, #4
ror r0, r5, #6
vshl.u32 d8, d3, #15
eor r1, r6, r7
vsri.u32 d8, d3, #17
eor r0, r0, r5, ror #11
vshl.u32 d9, d3, #13
and r1, r1, r5
vsri.u32 d9, d3, #19
eor r0, r0, r5, ror #25
veor d9, d8
eor r1, r1, r7
vshr.u32 d8, d3, #10
add r8, r8, r0
veor d9, d8
add r8, r8, r1
vadd.i32 d4, d9
ldr r0, [r12, #36]
vext.8 d11, d0, d1, #4
add r8, r8, r10
vadd.i32 d4, d11
add r8, r8, r0
vshl.u32 d8, d10, #25
add r4, r4, r8
vsri.u32 d8, d10, #7
ror r0, r9, #2
vshl.u32 d9, d10, #14
eor r1, r9, r2
vsri.u32 d9, d10, #18
eor r0, r0, r9, ror #13
veor d9, d8
eor r10, r2, r3
vshr.u32 d10, #3
and r1, r1, r10
veor d9, d10
eor r0, r0, r9, ror #22
vadd.i32 d4, d9
eor r1, r1, r2
add r8, r8, r0
add r8, r8, r1
# Round 10
vmov.32 r10, d5[0]
ror r0, r4, #6
eor r1, r5, r6
eor r0, r0, r4, ror #11
and r1, r1, r4
eor r0, r0, r4, ror #25
eor r1, r1, r6
add r7, r7, r0
add r7, r7, r1
ldr r0, [r12, #40]
add r7, r7, r10
add r7, r7, r0
add r3, r3, r7
ror r0, r8, #2
eor r1, r8, r9
eor r0, r0, r8, ror #13
eor r10, r9, r2
and r1, r1, r10
eor r0, r0, r8, ror #22
eor r1, r1, r9
add r7, r7, r0
add r7, r7, r1
# Round 11
vmov.32 r10, d5[1]
# Calc new W[10]-W[11]
vext.8 d10, d5, d6, #4
ror r0, r3, #6
vshl.u32 d8, d4, #15
eor r1, r4, r5
vsri.u32 d8, d4, #17
eor r0, r0, r3, ror #11
vshl.u32 d9, d4, #13
and r1, r1, r3
vsri.u32 d9, d4, #19
eor r0, r0, r3, ror #25
veor d9, d8
eor r1, r1, r5
vshr.u32 d8, d4, #10
add r6, r6, r0
veor d9, d8
add r6, r6, r1
vadd.i32 d5, d9
ldr r0, [r12, #44]
vext.8 d11, d1, d2, #4
add r6, r6, r10
vadd.i32 d5, d11
add r6, r6, r0
vshl.u32 d8, d10, #25
add r2, r2, r6
vsri.u32 d8, d10, #7
ror r0, r7, #2
vshl.u32 d9, d10, #14
eor r1, r7, r8
vsri.u32 d9, d10, #18
eor r0, r0, r7, ror #13
veor d9, d8
eor r10, r8, r9
vshr.u32 d10, #3
and r1, r1, r10
veor d9, d10
eor r0, r0, r7, ror #22
vadd.i32 d5, d9
eor r1, r1, r8
add r6, r6, r0
add r6, r6, r1
# Round 12
vmov.32 r10, d6[0]
ror r0, r2, #6
eor r1, r3, r4
eor r0, r0, r2, ror #11
and r1, r1, r2
eor r0, r0, r2, ror #25
eor r1, r1, r4
add r5, r5, r0
add r5, r5, r1
ldr r0, [r12, #48]
add r5, r5, r10
add r5, r5, r0
add r9, r9, r5
ror r0, r6, #2
eor r1, r6, r7
eor r0, r0, r6, ror #13
eor r10, r7, r8
and r1, r1, r10
eor r0, r0, r6, ror #22
eor r1, r1, r7
add r5, r5, r0
add r5, r5, r1
# Round 13
vmov.32 r10, d6[1]
# Calc new W[12]-W[13]
vext.8 d10, d6, d7, #4
ror r0, r9, #6
vshl.u32 d8, d5, #15
eor r1, r2, r3
vsri.u32 d8, d5, #17
eor r0, r0, r9, ror #11
vshl.u32 d9, d5, #13
and r1, r1, r9
vsri.u32 d9, d5, #19
eor r0, r0, r9, ror #25
veor d9, d8
eor r1, r1, r3
vshr.u32 d8, d5, #10
add r4, r4, r0
veor d9, d8
add r4, r4, r1
vadd.i32 d6, d9
ldr r0, [r12, #52]
vext.8 d11, d2, d3, #4
add r4, r4, r10
vadd.i32 d6, d11
add r4, r4, r0
vshl.u32 d8, d10, #25
add r8, r8, r4
vsri.u32 d8, d10, #7
ror r0, r5, #2
vshl.u32 d9, d10, #14
eor r1, r5, r6
vsri.u32 d9, d10, #18
eor r0, r0, r5, ror #13
veor d9, d8
eor r10, r6, r7
vshr.u32 d10, #3
and r1, r1, r10
veor d9, d10
eor r0, r0, r5, ror #22
vadd.i32 d6, d9
eor r1, r1, r6
add r4, r4, r0
add r4, r4, r1
# Round 14
vmov.32 r10, d7[0]
ror r0, r8, #6
eor r1, r9, r2
eor r0, r0, r8, ror #11
and r1, r1, r8
eor r0, r0, r8, ror #25
eor r1, r1, r2
add r3, r3, r0
add r3, r3, r1
ldr r0, [r12, #56]
add r3, r3, r10
add r3, r3, r0
add r7, r7, r3
ror r0, r4, #2
eor r1, r4, r5
eor r0, r0, r4, ror #13
eor r10, r5, r6
and r1, r1, r10
eor r0, r0, r4, ror #22
eor r1, r1, r5
add r3, r3, r0
add r3, r3, r1
# Round 15
vmov.32 r10, d7[1]
# Calc new W[14]-W[15]
vext.8 d10, d7, d0, #4
ror r0, r7, #6
vshl.u32 d8, d6, #15
eor r1, r8, r9
vsri.u32 d8, d6, #17
eor r0, r0, r7, ror #11
vshl.u32 d9, d6, #13
and r1, r1, r7
vsri.u32 d9, d6, #19
eor r0, r0, r7, ror #25
veor d9, d8
eor r1, r1, r9
vshr.u32 d8, d6, #10
add r2, r2, r0
veor d9, d8
add r2, r2, r1
vadd.i32 d7, d9
ldr r0, [r12, #60]
vext.8 d11, d3, d4, #4
add r2, r2, r10
vadd.i32 d7, d11
add r2, r2, r0
vshl.u32 d8, d10, #25
add r6, r6, r2
vsri.u32 d8, d10, #7
ror r0, r3, #2
vshl.u32 d9, d10, #14
eor r1, r3, r4
vsri.u32 d9, d10, #18
eor r0, r0, r3, ror #13
veor d9, d8
eor r10, r4, r5
vshr.u32 d10, #3
and r1, r1, r10
veor d9, d10
eor r0, r0, r3, ror #22
vadd.i32 d7, d9
eor r1, r1, r4
add r2, r2, r0
add r2, r2, r1
add r12, r12, #0x40
subs lr, lr, #1
bne L_SHA256_transform_neon_len_start
# Round 0
vmov.32 r10, d0[0]
ror r0, r6, #6
eor r1, r7, r8
eor r0, r0, r6, ror #11
and r1, r1, r6
eor r0, r0, r6, ror #25
eor r1, r1, r8
add r9, r9, r0
add r9, r9, r1
ldr r0, [r12]
add r9, r9, r10
add r9, r9, r0
add r5, r5, r9
ror r0, r2, #2
eor r1, r2, r3
eor r0, r0, r2, ror #13
eor r10, r3, r4
and r1, r1, r10
eor r0, r0, r2, ror #22
eor r1, r1, r3
add r9, r9, r0
add r9, r9, r1
# Round 1
vmov.32 r10, d0[1]
ror r0, r5, #6
eor r1, r6, r7
eor r0, r0, r5, ror #11
and r1, r1, r5
eor r0, r0, r5, ror #25
eor r1, r1, r7
add r8, r8, r0
add r8, r8, r1
ldr r0, [r12, #4]
add r8, r8, r10
add r8, r8, r0
add r4, r4, r8
ror r0, r9, #2
eor r1, r9, r2
eor r0, r0, r9, ror #13
eor r10, r2, r3
and r1, r1, r10
eor r0, r0, r9, ror #22
eor r1, r1, r2
add r8, r8, r0
add r8, r8, r1
# Round 2
vmov.32 r10, d1[0]
ror r0, r4, #6
eor r1, r5, r6
eor r0, r0, r4, ror #11
and r1, r1, r4
eor r0, r0, r4, ror #25
eor r1, r1, r6
add r7, r7, r0
add r7, r7, r1
ldr r0, [r12, #8]
add r7, r7, r10
add r7, r7, r0
add r3, r3, r7
ror r0, r8, #2
eor r1, r8, r9
eor r0, r0, r8, ror #13
eor r10, r9, r2
and r1, r1, r10
eor r0, r0, r8, ror #22
eor r1, r1, r9
add r7, r7, r0
add r7, r7, r1
# Round 3
vmov.32 r10, d1[1]
ror r0, r3, #6
eor r1, r4, r5
eor r0, r0, r3, ror #11
and r1, r1, r3
eor r0, r0, r3, ror #25
eor r1, r1, r5
add r6, r6, r0
add r6, r6, r1
ldr r0, [r12, #12]
add r6, r6, r10
add r6, r6, r0
add r2, r2, r6
ror r0, r7, #2
eor r1, r7, r8
eor r0, r0, r7, ror #13
eor r10, r8, r9
and r1, r1, r10
eor r0, r0, r7, ror #22
eor r1, r1, r8
add r6, r6, r0
add r6, r6, r1
# Round 4
vmov.32 r10, d2[0]
ror r0, r2, #6
eor r1, r3, r4
eor r0, r0, r2, ror #11
and r1, r1, r2
eor r0, r0, r2, ror #25
eor r1, r1, r4
add r5, r5, r0
add r5, r5, r1
ldr r0, [r12, #16]
add r5, r5, r10
add r5, r5, r0
add r9, r9, r5
ror r0, r6, #2
eor r1, r6, r7
eor r0, r0, r6, ror #13
eor r10, r7, r8
and r1, r1, r10
eor r0, r0, r6, ror #22
eor r1, r1, r7
add r5, r5, r0
add r5, r5, r1
# Round 5
vmov.32 r10, d2[1]
ror r0, r9, #6
eor r1, r2, r3
eor r0, r0, r9, ror #11
and r1, r1, r9
eor r0, r0, r9, ror #25
eor r1, r1, r3
add r4, r4, r0
add r4, r4, r1
ldr r0, [r12, #20]
add r4, r4, r10
add r4, r4, r0
add r8, r8, r4
ror r0, r5, #2
eor r1, r5, r6
eor r0, r0, r5, ror #13
eor r10, r6, r7
and r1, r1, r10
eor r0, r0, r5, ror #22
eor r1, r1, r6
add r4, r4, r0
add r4, r4, r1
# Round 6
vmov.32 r10, d3[0]
ror r0, r8, #6
eor r1, r9, r2
eor r0, r0, r8, ror #11
and r1, r1, r8
eor r0, r0, r8, ror #25
eor r1, r1, r2
add r3, r3, r0
add r3, r3, r1
ldr r0, [r12, #24]
add r3, r3, r10
add r3, r3, r0
add r7, r7, r3
ror r0, r4, #2
eor r1, r4, r5
eor r0, r0, r4, ror #13
eor r10, r5, r6
and r1, r1, r10
eor r0, r0, r4, ror #22
eor r1, r1, r5
add r3, r3, r0
add r3, r3, r1
# Round 7
vmov.32 r10, d3[1]
ror r0, r7, #6
eor r1, r8, r9
eor r0, r0, r7, ror #11
and r1, r1, r7
eor r0, r0, r7, ror #25
eor r1, r1, r9
add r2, r2, r0
add r2, r2, r1
ldr r0, [r12, #28]
add r2, r2, r10
add r2, r2, r0
add r6, r6, r2
ror r0, r3, #2
eor r1, r3, r4
eor r0, r0, r3, ror #13
eor r10, r4, r5
and r1, r1, r10
eor r0, r0, r3, ror #22
eor r1, r1, r4
add r2, r2, r0
add r2, r2, r1
# Round 8
vmov.32 r10, d4[0]
ror r0, r6, #6
eor r1, r7, r8
eor r0, r0, r6, ror #11
and r1, r1, r6
eor r0, r0, r6, ror #25
eor r1, r1, r8
add r9, r9, r0
add r9, r9, r1
ldr r0, [r12, #32]
add r9, r9, r10
add r9, r9, r0
add r5, r5, r9
ror r0, r2, #2
eor r1, r2, r3
eor r0, r0, r2, ror #13
eor r10, r3, r4
and r1, r1, r10
eor r0, r0, r2, ror #22
eor r1, r1, r3
add r9, r9, r0
add r9, r9, r1
# Round 9
vmov.32 r10, d4[1]
ror r0, r5, #6
eor r1, r6, r7
eor r0, r0, r5, ror #11
and r1, r1, r5
eor r0, r0, r5, ror #25
eor r1, r1, r7
add r8, r8, r0
add r8, r8, r1
ldr r0, [r12, #36]
add r8, r8, r10
add r8, r8, r0
add r4, r4, r8
ror r0, r9, #2
eor r1, r9, r2
eor r0, r0, r9, ror #13
eor r10, r2, r3
and r1, r1, r10
eor r0, r0, r9, ror #22
eor r1, r1, r2
add r8, r8, r0
add r8, r8, r1
# Round 10
vmov.32 r10, d5[0]
ror r0, r4, #6
eor r1, r5, r6
eor r0, r0, r4, ror #11
and r1, r1, r4
eor r0, r0, r4, ror #25
eor r1, r1, r6
add r7, r7, r0
add r7, r7, r1
ldr r0, [r12, #40]
add r7, r7, r10
add r7, r7, r0
add r3, r3, r7
ror r0, r8, #2
eor r1, r8, r9
eor r0, r0, r8, ror #13
eor r10, r9, r2
and r1, r1, r10
eor r0, r0, r8, ror #22
eor r1, r1, r9
add r7, r7, r0
add r7, r7, r1
# Round 11
vmov.32 r10, d5[1]
ror r0, r3, #6
eor r1, r4, r5
eor r0, r0, r3, ror #11
and r1, r1, r3
eor r0, r0, r3, ror #25
eor r1, r1, r5
add r6, r6, r0
add r6, r6, r1
ldr r0, [r12, #44]
add r6, r6, r10
add r6, r6, r0
add r2, r2, r6
ror r0, r7, #2
eor r1, r7, r8
eor r0, r0, r7, ror #13
eor r10, r8, r9
and r1, r1, r10
eor r0, r0, r7, ror #22
eor r1, r1, r8
add r6, r6, r0
add r6, r6, r1
# Round 12
vmov.32 r10, d6[0]
ror r0, r2, #6
eor r1, r3, r4
eor r0, r0, r2, ror #11
and r1, r1, r2
eor r0, r0, r2, ror #25
eor r1, r1, r4
add r5, r5, r0
add r5, r5, r1
ldr r0, [r12, #48]
add r5, r5, r10
add r5, r5, r0
add r9, r9, r5
ror r0, r6, #2
eor r1, r6, r7
eor r0, r0, r6, ror #13
eor r10, r7, r8
and r1, r1, r10
eor r0, r0, r6, ror #22
eor r1, r1, r7
add r5, r5, r0
add r5, r5, r1
# Round 13
vmov.32 r10, d6[1]
ror r0, r9, #6
eor r1, r2, r3
eor r0, r0, r9, ror #11
and r1, r1, r9
eor r0, r0, r9, ror #25
eor r1, r1, r3
add r4, r4, r0
add r4, r4, r1
ldr r0, [r12, #52]
add r4, r4, r10
add r4, r4, r0
add r8, r8, r4
ror r0, r5, #2
eor r1, r5, r6
eor r0, r0, r5, ror #13
eor r10, r6, r7
and r1, r1, r10
eor r0, r0, r5, ror #22
eor r1, r1, r6
add r4, r4, r0
add r4, r4, r1
# Round 14
vmov.32 r10, d7[0]
ror r0, r8, #6
eor r1, r9, r2
eor r0, r0, r8, ror #11
and r1, r1, r8
eor r0, r0, r8, ror #25
eor r1, r1, r2
add r3, r3, r0
add r3, r3, r1
ldr r0, [r12, #56]
add r3, r3, r10
add r3, r3, r0
add r7, r7, r3
ror r0, r4, #2
eor r1, r4, r5
eor r0, r0, r4, ror #13
eor r10, r5, r6
and r1, r1, r10
eor r0, r0, r4, ror #22
eor r1, r1, r5
add r3, r3, r0
add r3, r3, r1
# Round 15
vmov.32 r10, d7[1]
ror r0, r7, #6
eor r1, r8, r9
eor r0, r0, r7, ror #11
and r1, r1, r7
eor r0, r0, r7, ror #25
eor r1, r1, r9
add r2, r2, r0
add r2, r2, r1
ldr r0, [r12, #60]
add r2, r2, r10
add r2, r2, r0
add r6, r6, r2
ror r0, r3, #2
eor r1, r3, r4
eor r0, r0, r3, ror #13
eor r10, r4, r5
and r1, r1, r10
eor r0, r0, r3, ror #22
eor r1, r1, r4
add r2, r2, r0
add r2, r2, r1
ldr r10, [sp]
# Add in digest from start
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r0, [r10]
ldr r1, [r10, #4]
#else
ldrd r0, r1, [r10]
#endif
add r2, r2, r0
add r3, r3, r1
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r2, [r10]
str r3, [r10, #4]
#else
strd r2, r3, [r10]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r0, [r10, #8]
ldr r1, [r10, #12]
#else
ldrd r0, r1, [r10, #8]
#endif
add r4, r4, r0
add r5, r5, r1
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r4, [r10, #8]
str r5, [r10, #12]
#else
strd r4, r5, [r10, #8]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r0, [r10, #16]
ldr r1, [r10, #20]
#else
ldrd r0, r1, [r10, #16]
#endif
add r6, r6, r0
add r7, r7, r1
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r6, [r10, #16]
str r7, [r10, #20]
#else
strd r6, r7, [r10, #16]
#endif
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
ldr r0, [r10, #24]
ldr r1, [r10, #28]
#else
ldrd r0, r1, [r10, #24]
#endif
add r8, r8, r0
add r9, r9, r1
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
str r8, [r10, #24]
str r9, [r10, #28]
#else
strd r8, r9, [r10, #24]
#endif
ldr r10, [sp, #8]
ldr r1, [sp, #4]
subs r10, r10, #0x40
sub r12, r12, #0xc0
str r10, [sp, #8]
bne L_SHA256_transform_neon_len_begin
add sp, sp, #24
vpop {d8-d11}
pop {r4, r5, r6, r7, r8, r9, r10, pc}
.size Transform_Sha256_Len,.-Transform_Sha256_Len
#endif /* WOLFSSL_ARMASM_NO_NEON */
#endif /* !NO_SHA256 */
#endif /* !__aarch64__ && __arm__ && !__thumb__ */
#endif /* WOLFSSL_ARMASM */
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
#endif /* !WOLFSSL_ARMASM_INLINE */
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/SPI/SPI_FullDuplex_ComIT/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,445
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/SPI/SPI_FullDuplex_ComDMA/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/SPI/SPI_FullDuplex_ComDMA/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/SPI/SPI_FullDuplex_ComDMA/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,445
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/SPI/SPI_FullDuplex_ComPolling/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/SPI/SPI_FullDuplex_ComPolling/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/SPI/SPI_FullDuplex_ComPolling/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,394
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/Cortex/CORTEXM_SysTick/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400;
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200;
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/Cortex/CORTEXM_SysTick/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/Cortex/CORTEXM_SysTick/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,394
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/Cortex/CORTEXM_ProcessStack/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400;
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200;
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/Cortex/CORTEXM_ProcessStack/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/Cortex/CORTEXM_ProcessStack/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,394
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/GPIO/GPIO_IOToggle/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400;
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200;
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/GPIO/GPIO_IOToggle/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/GPIO/GPIO_IOToggle/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,394
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/GPIO/GPIO_EXTI/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400;
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200;
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/GPIO/GPIO_EXTI/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/GPIO/GPIO_EXTI/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,445
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/FLASH/FLASH_EraseProgram/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/FLASH/FLASH_EraseProgram/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/FLASH/FLASH_EraseProgram/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aesophor/valkyrie
| 2,001
|
kernel/esr.S
|
// Copyright (c) 2021 Marco Wang <m.aesophor@gmail.com>. All rights reserved.
//
// esr.S - Exception Service Routine
#define REGISTER_SAVE_LOAD_BYTES 272
.macro save_cpu_context
sub sp, sp, REGISTER_SAVE_LOAD_BYTES
stp x0, x1, [sp, 16 * 0]
stp x2, x3, [sp, 16 * 1]
stp x4, x5, [sp, 16 * 2]
stp x6, x7, [sp, 16 * 3]
stp x8, x9, [sp, 16 * 4]
stp x10, x11, [sp, 16 * 5]
stp x12, x13, [sp, 16 * 6]
stp x14, x15, [sp, 16 * 7]
stp x16, x17, [sp, 16 * 8]
stp x18, x19, [sp, 16 * 9]
stp x20, x21, [sp, 16 * 10]
stp x22, x23, [sp, 16 * 11]
stp x24, x25, [sp, 16 * 12]
stp x26, x27, [sp, 16 * 13]
stp x28, x29, [sp, 16 * 14]
// Begin saving SPSR_EL1 and ELR_EL1
mrs x28, SPSR_EL1
mrs x29, ELR_EL1
stp x28, x29, [sp, 16 * 15]
// End saving SPSR_EL1 and ELR_EL1
mrs x29, SP_EL0
stp x29, x30, [sp, 16 * 16]
.endm
.macro restore_cpu_context
ldp x0, x1, [sp, 16 * 0]
ldp x2, x3, [sp, 16 * 1]
ldp x4, x5, [sp, 16 * 2]
ldp x6, x7, [sp, 16 * 3]
ldp x8, x9, [sp, 16 * 4]
ldp x10, x11, [sp, 16 * 5]
ldp x12, x13, [sp, 16 * 6]
ldp x14, x15, [sp, 16 * 7]
ldp x16, x17, [sp, 16 * 8]
ldp x18, x19, [sp, 16 * 9]
ldp x20, x21, [sp, 16 * 10]
ldp x22, x23, [sp, 16 * 11]
ldp x24, x25, [sp, 16 * 12]
ldp x26, x27, [sp, 16 * 13]
// Begin restoring SPSR_EL1 and ELR_EL1
ldp x28, x29, [sp, 16 * 15]
msr SPSR_EL1, x28
msr ELR_EL1, x29
// End restoring SPSR_EL1 and ELR_EL1
ldp x29, x30, [sp, 16 * 16]
msr SP_EL0, x29
ldp x28, x29, [sp, 16 * 14]
add sp, sp, REGISTER_SAVE_LOAD_BYTES
.endm
.macro set_x0_to_trapframe_addr
mov x0, sp
.endm
.section ".text"
.global handle_exception
handle_exception:
save_cpu_context
set_x0_to_trapframe_addr
bl _ZN8valkyrie6kernel9exception16handle_exceptionEPNS0_9TrapFrameE
restore_cpu_context
eret
.global handle_irq
handle_irq:
save_cpu_context
set_x0_to_trapframe_addr
bl _ZN8valkyrie6kernel9exception10handle_irqEPNS0_9TrapFrameE
restore_cpu_context
eret
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,445
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/FLASH/FLASH_WriteProtection/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aesophor/valkyrie
| 2,463
|
boot/mmu.S
|
// Copyright (c) 2021-2022 Marco Wang <m.aesophor@gmail.com>. All rights reserved.
#include <mm/mmu.h>
// MMU Configuration
#define TCR_CONFIG_REGION_48bit (((64 - 48) << 0) | ((64 - 48) << 16))
#define TCR_CONFIG_4KB ((0b00 << 14) | (0b10 << 30))
#define TCR_CONFIG_DEFAULT (TCR_CONFIG_REGION_48bit | TCR_CONFIG_4KB)
// The addresses of Kernel PGD, PUD, PMD
#define KERNEL_PGD_PTR 0x0000
#define KERNEL_PUD_PTR 0x1000
#define KERNEL_PMD_PTR 0x2000
#define DEVICE_MEMORY_ATTR ((MAIR_IDX_DEVICE_nGnRnE << 2) | PD_ACCESS | PD_BLOCK)
#define NORMAL_MEMORY_ATTR ((MAIR_IDX_NORMAL_NOCACHE << 2) | PD_ACCESS | PD_BLOCK)
.macro WRITE_PAGE_DESCRIPTOR write_to_addr physical_addr attributes
mov x0, \write_to_addr
mov x1, \physical_addr
ldr x2, = \attributes
orr x2, x1, x2
str x2, [x0]
.endm
.section ".text"
.global __mmu_init
__mmu_init:
ldr x0, = TCR_CONFIG_DEFAULT
msr tcr_el1, x0
ldr x0, =( \
(MAIR_DEVICE_nGnRnE << (MAIR_IDX_DEVICE_nGnRnE * 8)) | \
(MAIR_NORMAL_NOCACHE << (MAIR_IDX_NORMAL_NOCACHE * 8)) \
)
msr mair_el1, x0
// Create kernel page tables and load its address to
// the translation base register of the upper va_space (i.e. kernel space).
bl __create_kernel_page_tables
msr ttbr0_el1, x0
msr ttbr1_el1, x0
// Enable MMU with cache disabled.
mrs x2, sctlr_el1
orr x2 , x2, 1
msr sctlr_el1, x2
// Indirect branch to the virtual address of `kmain()`.
// See kernel/kmain.cc
ldr x2, = kmain
br x2
__create_kernel_page_tables:
// Write a PGD at 0x0000
WRITE_PAGE_DESCRIPTOR KERNEL_PGD_PTR, KERNEL_PUD_PTR, PD_TABLE
// Write PUD at 0x1000
WRITE_PAGE_DESCRIPTOR KERNEL_PUD_PTR, KERNEL_PMD_PTR, PD_TABLE
// Write PMD at 0x2000
WRITE_PAGE_DESCRIPTOR KERNEL_PMD_PTR, 0, NORMAL_MEMORY_ATTR
// ARM local peripherals (0x40000000 - 0x7fffffff)
WRITE_PAGE_DESCRIPTOR (KERNEL_PUD_PTR + 8), 0x40000000, DEVICE_MEMORY_ATTR
// GPU peripherals (0x3f000000 - 0x3fffffff)
WRITE_PAGE_DESCRIPTOR (KERNEL_PMD_PTR + (0x3f000000 / 0x200000) * 8), 0x3f000000, DEVICE_MEMORY_ATTR
WRITE_PAGE_DESCRIPTOR (KERNEL_PMD_PTR + (0x3f200000 / 0x200000) * 8), 0x3f200000, DEVICE_MEMORY_ATTR
// Kernel heap
WRITE_PAGE_DESCRIPTOR (KERNEL_PMD_PTR + (0x10000000 / 0x200000) * 8), 0x10000000, NORMAL_MEMORY_ATTR
WRITE_PAGE_DESCRIPTOR (KERNEL_PMD_PTR + (0x10200000 / 0x200000) * 8), 0x10200000, NORMAL_MEMORY_ATTR
// Return the address of PGD
mov x0, KERNEL_PGD_PTR
ret
|
aesophor/valkyrie
| 1,331
|
boot/boot.S
|
// Copyright (c) 2021-2022 Marco Wang <m.aesophor@gmail.com>. All rights reserved.
//
// boot.S - Valkyrie Kernel Entry Point
//
// See scripts/linker.ld for details.
.section ".text"
.global _start
_start:
// Let core with cpuid != 0 enter busy loop
mrs x0, mpidr_el1
and x0, x0, 3
cbnz x0, _ZN8valkyrie6kernel6Kernel4haltEv
// Allow access to variadic functions in EL1.
// On Arm64, when we want to print out some message, the va_list will
// use the SIMD&FP registers (like q0, q1) to store parameters. So, we
// have to disable the trap of accessing floating-point and Advanced SIMD
// registers to make va_list use SIMD&FP registers properly.
// See: https://lists.xen.org/archives/html/minios-devel/2018-07/msg00038.html
mrs x0, cpacr_el1
orr x0, x0, #(0b11 << 20)
msr cpacr_el1, x0
// Configure HCR_EL2 (Hypervisor Configuration Register - EL2)
// by setting HCR_EL2.RW to 1 (which means EL1 is AArch64)
mrs x0, hcr_el2
orr x0, x0, #(1 << 31)
msr hcr_el2, x0
// Install exception vector table.
ldr x0, = evt
msr vbar_el1, x0
// Drop exception level to EL1 and enable MMU. See boot/mmu.S
mov x0, 0
orr x0, x0, #(1 << 0)
orr x0, x0, #(1 << 2)
orr x0, x0, #(0b1111 << 6)
msr spsr_el2, x0
adr x0, __mmu_init
msr elr_el2, x0
adr x0, _start
msr sp_el1, x0
eret
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/FLASH/FLASH_WriteProtection/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/FLASH/FLASH_WriteProtection/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,394
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/ADC/ADC_Sequencer/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400;
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200;
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/ADC/ADC_Sequencer/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/ADC/ADC_Sequencer/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,394
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/ADC/ADC_AnalogWatchdog/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400;
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200;
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/ADC/ADC_AnalogWatchdog/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/ADC/ADC_AnalogWatchdog/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,394
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/RCC/RCC_ClockConfig/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400;
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200;
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/RCC/RCC_ClockConfig/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/RCC/RCC_ClockConfig/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,395
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Demonstrations/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x1000;
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x400;
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Demonstrations/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Demonstrations/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,445
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_MIX/TIM/TIM_6Steps/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_MIX/TIM/TIM_6Steps/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_MIX/TIM/TIM_6Steps/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,445
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_MIX/PWR/PWR_STANDBY_RTC/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_MIX/PWR/PWR_STANDBY_RTC/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_MIX/PWR/PWR_STANDBY_RTC/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,445
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_MIX/PWR/PWR_STOP/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_MIX/PWR/PWR_STOP/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_MIX/PWR/PWR_STOP/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,445
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_MIX/I2C/I2C_OneBoard_ComSlave7_10bits_IT/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_MIX/I2C/I2C_OneBoard_ComSlave7_10bits_IT/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_MIX/I2C/I2C_OneBoard_ComSlave7_10bits_IT/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,445
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_MIX/CRC/CRC_PolynomialUpdate/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_MIX/CRC/CRC_PolynomialUpdate/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_MIX/CRC/CRC_PolynomialUpdate/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,445
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_MIX/DMA/DMA_FLASHToRAM/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_MIX/DMA/DMA_FLASHToRAM/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_MIX/DMA/DMA_FLASHToRAM/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,445
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_MIX/UART/UART_HyperTerminal_TxPolling_RxIT/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_MIX/UART/UART_HyperTerminal_TxPolling_RxIT/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.