repo_id
stringlengths
5
115
size
int64
590
5.01M
file_path
stringlengths
4
212
content
stringlengths
590
5.01M
al3xtjames/Clover
2,499
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/mips3/mpih-sub1.S
/* mips3 sub_n -- Subtract two limb vectors of the same length > 0 and * store difference in a third limb vector. * * Copyright (C) 1995, 1998, 1999, 2000, * 2001, 2002 Free Software Foundation, Inc. * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA */ /******************* * mpi_limb_t * _gcry_mpih_sub_n( mpi_ptr_t res_ptr, (r4) * mpi_ptr_t s1_ptr, (r5) * mpi_ptr_t s2_ptr, (r6) * mpi_size_t size) (r7) */ .text .align 2 .globl _gcry_mpih_sub_n .ent _gcry_mpih_sub_n _gcry_mpih_sub_n: .set noreorder .set nomacro ld $10,0($5) ld $11,0($6) daddiu $7,$7,-1 and $9,$7,4-1 # number of limbs in first loop beq $9,$0,.L0 # if multiple of 4 limbs, skip first loop move $2,$0 dsubu $7,$7,$9 .Loop0: daddiu $9,$9,-1 ld $12,8($5) daddu $11,$11,$2 ld $13,8($6) sltu $8,$11,$2 dsubu $11,$10,$11 sltu $2,$10,$11 sd $11,0($4) or $2,$2,$8 daddiu $5,$5,8 daddiu $6,$6,8 move $10,$12 move $11,$13 bne $9,$0,.Loop0 daddiu $4,$4,8 .L0: beq $7,$0,.Lend nop .Loop: daddiu $7,$7,-4 ld $12,8($5) daddu $11,$11,$2 ld $13,8($6) sltu $8,$11,$2 dsubu $11,$10,$11 sltu $2,$10,$11 sd $11,0($4) or $2,$2,$8 ld $10,16($5) daddu $13,$13,$2 ld $11,16($6) sltu $8,$13,$2 dsubu $13,$12,$13 sltu $2,$12,$13 sd $13,8($4) or $2,$2,$8 ld $12,24($5) daddu $11,$11,$2 ld $13,24($6) sltu $8,$11,$2 dsubu $11,$10,$11 sltu $2,$10,$11 sd $11,16($4) or $2,$2,$8 ld $10,32($5) daddu $13,$13,$2 ld $11,32($6) sltu $8,$13,$2 dsubu $13,$12,$13 sltu $2,$12,$13 sd $13,24($4) or $2,$2,$8 daddiu $5,$5,32 daddiu $6,$6,32 bne $7,$0,.Loop daddiu $4,$4,32 .Lend: daddu $11,$11,$2 sltu $8,$11,$2 dsubu $11,$10,$11 sltu $2,$10,$11 sd $11,0($4) j $31 or $2,$2,$8 .end _gcry_mpih_sub_n
al3xtjames/Clover
1,914
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/amd64/mpih-mul1.S
/* AMD64 mul_1 -- Multiply a limb vector with a limb and store * the result in a second limb vector. * Copyright (C) 1992, 1994, 1998, * 2001, 2002, 2006 Free Software Foundation, Inc. * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA * * Note: This code is heavily based on the GNU MP Library. * Actually it's the same code with only minor changes in the * way the data is stored; this is to support the abstraction * of an optional secure memory allocation which may be used * to avoid revealing of sensitive data due to paging etc. */ #include "sysdep.h" #include "asm-syntax.h" /******************* * mpi_limb_t * _gcry_mpih_mul_1( mpi_ptr_t res_ptr, (rdi) * mpi_ptr_t s1_ptr, (rsi) * mpi_size_t s1_size, (rdx) * mpi_limb_t s2_limb) (rcx) */ TEXT ALIGN(5) .byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 GLOBL C_SYMBOL_NAME(_gcry_mpih_mul_1) C_SYMBOL_NAME(_gcry_mpih_mul_1:) movq %rdx, %r11 leaq (%rsi,%rdx,8), %rsi leaq (%rdi,%rdx,8), %rdi negq %r11 xorl %r8d, %r8d .Loop: movq (%rsi,%r11,8), %rax mulq %rcx addq %r8, %rax movl $0, %r8d adcq %rdx, %r8 movq %rax, (%rdi,%r11,8) incq %r11 jne .Loop movq %r8, %rax ret
al3xtjames/Clover
3,596
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/amd64/mpih-mul2.S
/* AMD64 addmul2 -- Multiply a limb vector with a limb and add * the result to a second limb vector. * * Copyright (C) 1992, 1994, 1998, * 2001, 2002, 2006 Free Software Foundation, Inc. * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA * * Note: This code is heavily based on the GNU MP Library. * Actually it's the same code with only minor changes in the * way the data is stored; this is to support the abstraction * of an optional secure memory allocation which may be used * to avoid revealing of sensitive data due to paging etc. */ #include "sysdep.h" #include "asm-syntax.h" /******************* * mpi_limb_t * _gcry_mpih_addmul_2( mpi_ptr_t res_ptr, (sp + 4) * mpi_ptr_t s1_ptr, (sp + 8) * mpi_size_t s1_size, (sp + 12) * mpi_limb_t s2_limb) (sp + 16) */ /* i80386 addmul_1 -- Multiply a limb vector with a limb and add * the result to a second limb vector. * * Copyright (C) 1992, 1994, 1998, * 2001, 2002 Free Software Foundation, Inc. * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA * * Note: This code is heavily based on the GNU MP Library. * Actually it's the same code with only minor changes in the * way the data is stored; this is to support the abstraction * of an optional secure memory allocation which may be used * to avoid revealing of sensitive data due to paging etc. */ #include "sysdep.h" #include "asm-syntax.h" /******************* * mpi_limb_t * _gcry_mpih_addmul_1( mpi_ptr_t res_ptr, (rdi) * mpi_ptr_t s1_ptr, (rsi) * mpi_size_t s1_size, (rdx) * mpi_limb_t s2_limb) (rcx) */ TEXT GLOBL C_SYMBOL_NAME(_gcry_mpih_addmul_1) C_SYMBOL_NAME(_gcry_mpih_addmul_1:) movq %rdx, %r11 leaq (%rsi,%rdx,8), %rsi leaq (%rdi,%rdx,8), %rdi negq %r11 xorl %r8d, %r8d xorl %r10d, %r10d ALIGN(3) /* minimal alignment for claimed speed */ .Loop: movq (%rsi,%r11,8), %rax mulq %rcx addq (%rdi,%r11,8), %rax adcq %r10, %rdx addq %r8, %rax movq %r10, %r8 movq %rax, (%rdi,%r11,8) adcq %rdx, %r8 incq %r11 jne .Loop movq %r8, %rax ret
al3xtjames/Clover
2,189
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/amd64/mpih-lshift.S
/* AMD64 (x86_64) lshift -- Left shift a limb vector and store * result in a second limb vector. * * Copyright (C) 1992, 1994, 1995, 1998, * 2001, 2002, 2006 Free Software Foundation, Inc. * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA * * Note: This code is heavily based on the GNU MP Library. * Actually it's the same code with only minor changes in the * way the data is stored; this is to support the abstraction * of an optional secure memory allocation which may be used * to avoid revealing of sensitive data due to paging etc. */ #include "sysdep.h" #include "asm-syntax.h" /******************* * mpi_limb_t * _gcry_mpih_lshift( mpi_ptr_t wp, rdi * mpi_ptr_t up, rsi * mpi_size_t usize, rdx * unsigned cnt) rcx */ .text .globl C_SYMBOL_NAME(_gcry_mpih_lshift) C_SYMBOL_NAME(_gcry_mpih_lshift:) movq -8(%rsi,%rdx,8), %mm7 movd %ecx, %mm1 movl $64, %eax subl %ecx, %eax movd %eax, %mm0 movq %mm7, %mm3 psrlq %mm0, %mm7 movd %mm7, %rax subq $2, %rdx jl .Lendo ALIGN(4) /* minimal alignment for claimed speed */ .Loop: movq (%rsi,%rdx,8), %mm6 movq %mm6, %mm2 psrlq %mm0, %mm6 psllq %mm1, %mm3 por %mm6, %mm3 movq %mm3, 8(%rdi,%rdx,8) je .Lende movq -8(%rsi,%rdx,8), %mm7 movq %mm7, %mm3 psrlq %mm0, %mm7 psllq %mm1, %mm2 por %mm7, %mm2 movq %mm2, (%rdi,%rdx,8) subq $2, %rdx jge .Loop .Lendo: movq %mm3, %mm2 .Lende: psllq %mm1, %mm2 movq %mm2, (%rdi) emms ret
al3xtjames/Clover
2,250
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/amd64/mpih-rshift.S
/* AMD64 (x86_64) rshift -- Right shift a limb vector and store * result in a second limb vector. * * Copyright (C) 1992, 1994, 1995, 1998, * 2001, 2002, 2006 Free Software Foundation, Inc. * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA * * Note: This code is heavily based on the GNU MP Library. * Actually it's the same code with only minor changes in the * way the data is stored; this is to support the abstraction * of an optional secure memory allocation which may be used * to avoid revealing of sensitive data due to paging etc. */ #include "sysdep.h" #include "asm-syntax.h" /******************* * mpi_limb_t * _gcry_mpih_rshift( mpi_ptr_t wp, rdi * mpi_ptr_t up, rsi * mpi_size_t usize, rdx * unsigned cnt) rcx */ .text .globl C_SYMBOL_NAME(_gcry_mpih_rshift) C_SYMBOL_NAME(_gcry_mpih_rshift:) movq (%rsi), %mm7 movd %ecx, %mm1 movl $64, %eax subl %ecx, %eax movd %eax, %mm0 movq %mm7, %mm3 psllq %mm0, %mm7 movd %mm7, %rax leaq (%rsi,%rdx,8), %rsi leaq (%rdi,%rdx,8), %rdi negq %rdx addq $2, %rdx jg .Lendo ALIGN(8) /* minimal alignment for claimed speed */ .Loop: movq -8(%rsi,%rdx,8), %mm6 movq %mm6, %mm2 psllq %mm0, %mm6 psrlq %mm1, %mm3 por %mm6, %mm3 movq %mm3, -16(%rdi,%rdx,8) je .Lende movq (%rsi,%rdx,8), %mm7 movq %mm7, %mm3 psllq %mm0, %mm7 psrlq %mm1, %mm2 por %mm7, %mm2 movq %mm2, -8(%rdi,%rdx,8) addq $2, %rdx jle .Loop .Lendo: movq %mm3, %mm2 .Lende: psrlq %mm1, %mm2 movq %mm2, -8(%rdi) emms ret
al3xtjames/Clover
2,020
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/amd64/mpih-mul3.S
/* AMD64 submul_1 -- Multiply a limb vector with a limb and add * the result to a second limb vector. * * Copyright (C) 1992, 1994, 1998, * 2001, 2002, 2006 Free Software Foundation, Inc. * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA * * Note: This code is heavily based on the GNU MP Library. * Actually it's the same code with only minor changes in the * way the data is stored; this is to support the abstraction * of an optional secure memory allocation which may be used * to avoid revealing of sensitive data due to paging etc. */ #include "sysdep.h" #include "asm-syntax.h" /******************* * mpi_limb_t * _gcry_mpih_submul_1( mpi_ptr_t res_ptr, (rdi) * mpi_ptr_t s1_ptr, (rsi) * mpi_size_t s1_size, (rdx) * mpi_limb_t s2_limb) (rcx) */ TEXT GLOBL C_SYMBOL_NAME(_gcry_mpih_submul_1) C_SYMBOL_NAME(_gcry_mpih_submul_1:) movq %rdx, %r11 leaq (%rsi,%r11,8), %rsi leaq (%rdi,%r11,8), %rdi negq %r11 xorl %r8d, %r8d ALIGN(3) /* minimal alignment for claimed speed */ .Loop: movq (%rsi,%r11,8), %rax movq (%rdi,%r11,8), %r10 mulq %rcx subq %r8, %r10 movl $0, %r8d adcl %r8d, %r8d subq %rax, %r10 adcq %rdx, %r8 movq %r10, (%rdi,%r11,8) incq %r11 jne .Loop movq %r8, %rax ret
al3xtjames/Clover
1,981
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/amd64/mpih-add1.S
/* AMD64 (x86_64) add_n -- Add two limb vectors of the same length > 0 and store * sum in a third limb vector. * * Copyright (C) 1992, 1994, 1995, 1998, * 2001, 2002, 2006 Free Software Foundation, Inc. * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA * * Note: This code is heavily based on the GNU MP Library. * Actually it's the same code with only minor changes in the * way the data is stored; this is to support the abstraction * of an optional secure memory allocation which may be used * to avoid revealing of sensitive data due to paging etc. */ #include "sysdep.h" #include "asm-syntax.h" /******************* * mpi_limb_t * _gcry_mpih_add_n( mpi_ptr_t res_ptr, rdi * mpi_ptr_t s1_ptr, rsi * mpi_ptr_t s2_ptr, rdx * mpi_size_t size) rcx */ .text .globl C_SYMBOL_NAME(_gcry_mpih_add_n) C_SYMBOL_NAME(_gcry_mpih_add_n:) leaq (%rsi,%rcx,8), %rsi leaq (%rdi,%rcx,8), %rdi leaq (%rdx,%rcx,8), %rdx negq %rcx xorl %eax, %eax /* clear cy */ ALIGN(4) /* minimal alignment for claimed speed */ .Loop: movq (%rsi,%rcx,8), %rax movq (%rdx,%rcx,8), %r10 adcq %r10, %rax movq %rax, (%rdi,%rcx,8) incq %rcx jne .Loop movq %rcx, %rax /* zero %rax */ adcq %rax, %rax ret
al3xtjames/Clover
1,984
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/amd64/mpih-sub1.S
/* AMD64 (x86_64) sub_n -- Subtract two limb vectors of the same length > 0 and store * sum in a third limb vector. * * Copyright (C) 1992, 1994, 1995, 1998, * 2001, 2002, 2006 Free Software Foundation, Inc. * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA * * Note: This code is heavily based on the GNU MP Library. * Actually it's the same code with only minor changes in the * way the data is stored; this is to support the abstraction * of an optional secure memory allocation which may be used * to avoid revealing of sensitive data due to paging etc. */ #include "sysdep.h" #include "asm-syntax.h" /******************* * mpi_limb_t * _gcry_mpih_sub_n( mpi_ptr_t res_ptr, rdi * mpi_ptr_t s1_ptr, rsi * mpi_ptr_t s2_ptr, rdx * mpi_size_t size) rcx */ .text .globl C_SYMBOL_NAME(_gcry_mpih_sub_n) C_SYMBOL_NAME(_gcry_mpih_sub_n:) leaq (%rsi,%rcx,8), %rsi leaq (%rdi,%rcx,8), %rdi leaq (%rdx,%rcx,8), %rdx negq %rcx xorl %eax, %eax /* clear cy */ ALIGN(4) /* minimal alignment for claimed speed */ .Loop: movq (%rsi,%rcx,8), %rax movq (%rdx,%rcx,8), %r10 sbbq %r10, %rax movq %rax, (%rdi,%rcx,8) incq %rcx jne .Loop movq %rcx, %rax /* zero %rax */ adcq %rax, %rax ret
al3xtjames/Clover
2,919
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/alpha/mpih-mul1.S
/* Alpha 21064 mpih-mul1.S -- Multiply a limb vector with a limb and store * the result in a second limb vector. * * Copyright (C) 1992, 1994, 1995, 1998, * 2001, 2002 Free Software Foundation, Inc. * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA */ /******************* * mpi_limb_t * _gcry_mpih_mul_1( mpi_ptr_t res_ptr, (r16) * mpi_ptr_t s1_ptr, (r17) * mpi_size_t s1_size, (r18) * mpi_limb_t s2_limb) (r19) * * This code runs at 42 cycles/limb on the EV4 and 18 cycles/limb on the EV5. * * To improve performance for long multiplications, we would use * 'fetch' for S1 and 'fetch_m' for RES. It's not obvious how to use * these instructions without slowing down the general code: 1. We can * only have two prefetches in operation at any time in the Alpha * architecture. 2. There will seldom be any special alignment * between RES_PTR and S1_PTR. Maybe we can simply divide the current * loop into an inner and outer loop, having the inner loop handle * exactly one prefetch block? */ .set noreorder .set noat .text .align 3 .globl _gcry_mpih_mul_1 .ent _gcry_mpih_mul_1 2 _gcry_mpih_mul_1: .frame $30,0,$26 ldq $2,0($17) # $2 = s1_limb subq $18,1,$18 # size-- mulq $2,$19,$3 # $3 = prod_low bic $31,$31,$4 # clear cy_limb umulh $2,$19,$0 # $0 = prod_high beq $18,Lend1 # jump if size was == 1 ldq $2,8($17) # $2 = s1_limb subq $18,1,$18 # size-- stq $3,0($16) beq $18,Lend2 # jump if size was == 2 .align 3 Loop: mulq $2,$19,$3 # $3 = prod_low addq $4,$0,$0 # cy_limb = cy_limb + 'cy' subq $18,1,$18 # size-- umulh $2,$19,$4 # $4 = cy_limb ldq $2,16($17) # $2 = s1_limb addq $17,8,$17 # s1_ptr++ addq $3,$0,$3 # $3 = cy_limb + prod_low stq $3,8($16) cmpult $3,$0,$0 # $0 = carry from (cy_limb + prod_low) addq $16,8,$16 # res_ptr++ bne $18,Loop Lend2: mulq $2,$19,$3 # $3 = prod_low addq $4,$0,$0 # cy_limb = cy_limb + 'cy' umulh $2,$19,$4 # $4 = cy_limb addq $3,$0,$3 # $3 = cy_limb + prod_low cmpult $3,$0,$0 # $0 = carry from (cy_limb + prod_low) stq $3,8($16) addq $4,$0,$0 # cy_limb = prod_high + cy ret $31,($26),1 Lend1: stq $3,0($16) ret $31,($26),1 .end _gcry_mpih_mul_1
al3xtjames/Clover
2,794
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/alpha/mpih-mul2.S
/* Alpha 21064 addmul_1 -- Multiply a limb vector with a limb and add * the result to a second limb vector. * * Copyright (C) 1992, 1994, 1995, 1998, * 2001, 2002 Free Software Foundation, Inc. * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA */ /******************* * mpi_limb_t * _gcry_mpih_addmul_1( mpi_ptr_t res_ptr, (r16) * mpi_ptr_t s1_ptr, (r17) * mpi_size_t s1_size, (r18) * mpi_limb_t s2_limb) (r19) * * This code runs at 42 cycles/limb on EV4 and 18 cycles/limb on EV5. */ .set noreorder .set noat .text .align 3 .globl _gcry_mpih_addmul_1 .ent _gcry_mpih_addmul_1 2 _gcry_mpih_addmul_1: .frame $30,0,$26 ldq $2,0($17) # $2 = s1_limb addq $17,8,$17 # s1_ptr++ subq $18,1,$18 # size-- mulq $2,$19,$3 # $3 = prod_low ldq $5,0($16) # $5 = *res_ptr umulh $2,$19,$0 # $0 = prod_high beq $18,.Lend1 # jump if size was == 1 ldq $2,0($17) # $2 = s1_limb addq $17,8,$17 # s1_ptr++ subq $18,1,$18 # size-- addq $5,$3,$3 cmpult $3,$5,$4 stq $3,0($16) addq $16,8,$16 # res_ptr++ beq $18,.Lend2 # jump if size was == 2 .align 3 .Loop: mulq $2,$19,$3 # $3 = prod_low ldq $5,0($16) # $5 = *res_ptr addq $4,$0,$0 # cy_limb = cy_limb + 'cy' subq $18,1,$18 # size-- umulh $2,$19,$4 # $4 = cy_limb ldq $2,0($17) # $2 = s1_limb addq $17,8,$17 # s1_ptr++ addq $3,$0,$3 # $3 = cy_limb + prod_low cmpult $3,$0,$0 # $0 = carry from (cy_limb + prod_low) addq $5,$3,$3 cmpult $3,$5,$5 stq $3,0($16) addq $16,8,$16 # res_ptr++ addq $5,$0,$0 # combine carries bne $18,.Loop .Lend2: mulq $2,$19,$3 # $3 = prod_low ldq $5,0($16) # $5 = *res_ptr addq $4,$0,$0 # cy_limb = cy_limb + 'cy' umulh $2,$19,$4 # $4 = cy_limb addq $3,$0,$3 # $3 = cy_limb + prod_low cmpult $3,$0,$0 # $0 = carry from (cy_limb + prod_low) addq $5,$3,$3 cmpult $3,$5,$5 stq $3,0($16) addq $5,$0,$0 # combine carries addq $4,$0,$0 # cy_limb = prod_high + cy ret $31,($26),1 .Lend1: addq $5,$3,$3 cmpult $3,$5,$5 stq $3,0($16) addq $0,$5,$0 ret $31,($26),1 .end _gcry_mpih_addmul_1
al3xtjames/Clover
3,051
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/alpha/mpih-lshift.S
/* alpha - left shift * * Copyright (C) 1994, 1995, 1998, 2001, * 2002 Free Software Foundation, Inc. * * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA * * Note: This code is heavily based on the GNU MP Library. * Actually it's the same code with only minor changes in the * way the data is stored; this is to support the abstraction * of an optional secure memory allocation which may be used * to avoid revealing of sensitive data due to paging etc. */ /******************* * mpi_limb_t * _gcry_mpih_lshift( mpi_ptr_t wp, (r16) * mpi_ptr_t up, (r17) * mpi_size_t usize, (r18) * unsigned cnt) (r19) * * This code runs at 4.8 cycles/limb on the 21064. With infinite unrolling, * it would take 4 cycles/limb. It should be possible to get down to 3 * cycles/limb since both ldq and stq can be paired with the other used * instructions. But there are many restrictions in the 21064 pipeline that * makes it hard, if not impossible, to get down to 3 cycles/limb: * * 1. ldq has a 3 cycle delay, srl and sll have a 2 cycle delay. * 2. Only aligned instruction pairs can be paired. * 3. The store buffer or silo might not be able to deal with the bandwidth. */ .set noreorder .set noat .text .align 3 .globl _gcry_mpih_lshift .ent _gcry_mpih_lshift _gcry_mpih_lshift: .frame $30,0,$26,0 s8addq $18,$17,$17 # make r17 point at end of s1 ldq $4,-8($17) # load first limb subq $17,8,$17 subq $31,$19,$7 s8addq $18,$16,$16 # make r16 point at end of RES subq $18,1,$18 and $18,4-1,$20 # number of limbs in first loop srl $4,$7,$0 # compute function result beq $20,.L0 subq $18,$20,$18 .align 3 .Loop0: ldq $3,-8($17) subq $16,8,$16 subq $17,8,$17 subq $20,1,$20 sll $4,$19,$5 srl $3,$7,$6 bis $3,$3,$4 bis $5,$6,$8 stq $8,0($16) bne $20,.Loop0 .L0: beq $18,.Lend .align 3 .Loop: ldq $3,-8($17) subq $16,32,$16 subq $18,4,$18 sll $4,$19,$5 srl $3,$7,$6 ldq $4,-16($17) sll $3,$19,$1 bis $5,$6,$8 stq $8,24($16) srl $4,$7,$2 ldq $3,-24($17) sll $4,$19,$5 bis $1,$2,$8 stq $8,16($16) srl $3,$7,$6 ldq $4,-32($17) sll $3,$19,$1 bis $5,$6,$8 stq $8,8($16) srl $4,$7,$2 subq $17,32,$17 bis $1,$2,$8 stq $8,0($16) bgt $18,.Loop .Lend: sll $4,$19,$8 stq $8,-8($16) ret $31,($26),1 .end _gcry_mpih_lshift
al3xtjames/Clover
2,950
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/alpha/mpih-rshift.S
/* alpha rshift * Copyright (C) 1994, 1995, 1998, 1999, * 2000, 2001, 2002 Free Software Foundation, Inc. * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA * * Note: This code is heavily based on the GNU MP Library. * Actually it's the same code with only minor changes in the * way the data is stored; this is to support the abstraction * of an optional secure memory allocation which may be used * to avoid revealing of sensitive data due to paging etc. */ /******************* * mpi_limb_t * _gcry_mpih_rshift( mpi_ptr_t wp, (r16) * mpi_ptr_t up, (r17) * mpi_size_t usize, (r18) * unsigned cnt) (r19) * * This code runs at 4.8 cycles/limb on the 21064. With infinite unrolling, * it would take 4 cycles/limb. It should be possible to get down to 3 * cycles/limb since both ldq and stq can be paired with the other used * instructions. But there are many restrictions in the 21064 pipeline that * makes it hard, if not impossible, to get down to 3 cycles/limb: * * 1. ldq has a 3 cycle delay, srl and sll have a 2 cycle delay. * 2. Only aligned instruction pairs can be paired. * 3. The store buffer or silo might not be able to deal with the bandwidth. */ .set noreorder .set noat .text .align 3 .globl _gcry_mpih_rshift .ent _gcry_mpih_rshift _gcry_mpih_rshift: .frame $30,0,$26,0 ldq $4,0($17) # load first limb addq $17,8,$17 subq $31,$19,$7 subq $18,1,$18 and $18,4-1,$20 # number of limbs in first loop sll $4,$7,$0 # compute function result beq $20,.R0 subq $18,$20,$18 .align 3 .Roop0: ldq $3,0($17) addq $16,8,$16 addq $17,8,$17 subq $20,1,$20 srl $4,$19,$5 sll $3,$7,$6 bis $3,$3,$4 bis $5,$6,$8 stq $8,-8($16) bne $20,.Roop0 .R0: beq $18,.Rend .align 3 .Roop: ldq $3,0($17) addq $16,32,$16 subq $18,4,$18 srl $4,$19,$5 sll $3,$7,$6 ldq $4,8($17) srl $3,$19,$1 bis $5,$6,$8 stq $8,-32($16) sll $4,$7,$2 ldq $3,16($17) srl $4,$19,$5 bis $1,$2,$8 stq $8,-24($16) sll $3,$7,$6 ldq $4,24($17) srl $3,$19,$1 bis $5,$6,$8 stq $8,-16($16) sll $4,$7,$2 addq $17,32,$17 bis $1,$2,$8 stq $8,-8($16) bgt $18,.Roop .Rend: srl $4,$19,$8 stq $8,0($16) ret $31,($26),1 .end _gcry_mpih_rshift
al3xtjames/Clover
2,811
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/alpha/mpih-mul3.S
/* Alpha 21064 submul_1 -- Multiply a limb vector with a limb and * subtract the result from a second limb vector. * Copyright (C) 1992, 1994, 1995, 1998, * 2001, 2002 Free Software Foundation, Inc. * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA */ /******************* * mpi_limb_t * _gcry_mpih_submul_1( mpi_ptr_t res_ptr, (r16 ) * mpi_ptr_t s1_ptr, (r17 ) * mpi_size_t s1_size, (r18 ) * mpi_limb_t s2_limb) (r19 ) * * This code runs at 42 cycles/limb on EV4 and 18 cycles/limb on EV5. */ .set noreorder .set noat .text .align 3 .globl _gcry_mpih_submul_1 .ent _gcry_mpih_submul_1 2 _gcry_mpih_submul_1: .frame $30,0,$26 ldq $2,0($17) # $2 = s1_limb addq $17,8,$17 # s1_ptr++ subq $18,1,$18 # size-- mulq $2,$19,$3 # $3 = prod_low ldq $5,0($16) # $5 = *res_ptr umulh $2,$19,$0 # $0 = prod_high beq $18,.Lend1 # jump if size was == 1 ldq $2,0($17) # $2 = s1_limb addq $17,8,$17 # s1_ptr++ subq $18,1,$18 # size-- subq $5,$3,$3 cmpult $5,$3,$4 stq $3,0($16) addq $16,8,$16 # res_ptr++ beq $18,.Lend2 # jump if size was == 2 .align 3 .Loop: mulq $2,$19,$3 # $3 = prod_low ldq $5,0($16) # $5 = *res_ptr addq $4,$0,$0 # cy_limb = cy_limb + 'cy' subq $18,1,$18 # size-- umulh $2,$19,$4 # $4 = cy_limb ldq $2,0($17) # $2 = s1_limb addq $17,8,$17 # s1_ptr++ addq $3,$0,$3 # $3 = cy_limb + prod_low cmpult $3,$0,$0 # $0 = carry from (cy_limb + prod_low) subq $5,$3,$3 cmpult $5,$3,$5 stq $3,0($16) addq $16,8,$16 # res_ptr++ addq $5,$0,$0 # combine carries bne $18,.Loop .Lend2: mulq $2,$19,$3 # $3 = prod_low ldq $5,0($16) # $5 = *res_ptr addq $4,$0,$0 # cy_limb = cy_limb + 'cy' umulh $2,$19,$4 # $4 = cy_limb addq $3,$0,$3 # $3 = cy_limb + prod_low cmpult $3,$0,$0 # $0 = carry from (cy_limb + prod_low) subq $5,$3,$3 cmpult $5,$3,$5 stq $3,0($16) addq $5,$0,$0 # combine carries addq $4,$0,$0 # cy_limb = prod_high + cy ret $31,($26),1 .Lend1: subq $5,$3,$3 cmpult $5,$3,$5 stq $3,0($16) addq $0,$5,$0 ret $31,($26),1 .end _gcry_mpih_submul_1
al3xtjames/Clover
2,449
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/alpha/mpih-add1.S
/* alpha add_n -- Add two limb vectors of the same length > 0 and store * sum in a third limb vector. * Copyright (C) 1995, 1998, 2001, 2002 Free Software Foundation, Inc. * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA */ /******************* * mpi_limb_t * _gcry_mpih_add_n( mpi_ptr_t res_ptr, ($16) * mpi_ptr_t s1_ptr, ($17) * mpi_ptr_t s2_ptr, ($18) * mpi_size_t size) ($19) */ .set noreorder .set noat .text .align 3 .globl _gcry_mpih_add_n .ent _gcry_mpih_add_n _gcry_mpih_add_n: .frame $30,0,$26,0 ldq $3,0($17) ldq $4,0($18) subq $19,1,$19 and $19,4-1,$2 # number of limbs in first loop bis $31,$31,$0 beq $2,.L0 # if multiple of 4 limbs, skip first loop subq $19,$2,$19 .Loop0: subq $2,1,$2 ldq $5,8($17) addq $4,$0,$4 ldq $6,8($18) cmpult $4,$0,$1 addq $3,$4,$4 cmpult $4,$3,$0 stq $4,0($16) or $0,$1,$0 addq $17,8,$17 addq $18,8,$18 bis $5,$5,$3 bis $6,$6,$4 addq $16,8,$16 bne $2,.Loop0 .L0: beq $19,.Lend .align 3 .Loop: subq $19,4,$19 ldq $5,8($17) addq $4,$0,$4 ldq $6,8($18) cmpult $4,$0,$1 addq $3,$4,$4 cmpult $4,$3,$0 stq $4,0($16) or $0,$1,$0 ldq $3,16($17) addq $6,$0,$6 ldq $4,16($18) cmpult $6,$0,$1 addq $5,$6,$6 cmpult $6,$5,$0 stq $6,8($16) or $0,$1,$0 ldq $5,24($17) addq $4,$0,$4 ldq $6,24($18) cmpult $4,$0,$1 addq $3,$4,$4 cmpult $4,$3,$0 stq $4,16($16) or $0,$1,$0 ldq $3,32($17) addq $6,$0,$6 ldq $4,32($18) cmpult $6,$0,$1 addq $5,$6,$6 cmpult $6,$5,$0 stq $6,24($16) or $0,$1,$0 addq $17,32,$17 addq $18,32,$18 addq $16,32,$16 bne $19,.Loop .Lend: addq $4,$0,$4 cmpult $4,$0,$1 addq $3,$4,$4 cmpult $4,$3,$0 stq $4,0($16) or $0,$1,$0 ret $31,($26),1 .end _gcry_mpih_add_n
al3xtjames/Clover
3,134
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/alpha/udiv-qrnnd.S
/* Alpha 21064 __udiv_qrnnd * * Copyright (C) 1992, 1994, 1995, 1998, * 2001, 2002 Free Software Foundation, Inc. * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA * * Note: This code is heavily based on the GNU MP Library. * Actually it's the same code with only minor changes in the * way the data is stored; this is to support the abstraction * of an optional secure memory allocation which may be used * to avoid revealing of sensitive data due to paging etc. */ .set noreorder .set noat .text .align 3 .globl __udiv_qrnnd .ent __udiv_qrnnd __udiv_qrnnd: .frame $30,0,$26,0 .prologue 0 #define cnt $2 #define tmp $3 #define rem_ptr $16 #define n1 $17 #define n0 $18 #define d $19 #define qb $20 ldiq cnt,16 blt d,.Largedivisor .Loop1: cmplt n0,0,tmp addq n1,n1,n1 bis n1,tmp,n1 addq n0,n0,n0 cmpule d,n1,qb subq n1,d,tmp cmovne qb,tmp,n1 bis n0,qb,n0 cmplt n0,0,tmp addq n1,n1,n1 bis n1,tmp,n1 addq n0,n0,n0 cmpule d,n1,qb subq n1,d,tmp cmovne qb,tmp,n1 bis n0,qb,n0 cmplt n0,0,tmp addq n1,n1,n1 bis n1,tmp,n1 addq n0,n0,n0 cmpule d,n1,qb subq n1,d,tmp cmovne qb,tmp,n1 bis n0,qb,n0 cmplt n0,0,tmp addq n1,n1,n1 bis n1,tmp,n1 addq n0,n0,n0 cmpule d,n1,qb subq n1,d,tmp cmovne qb,tmp,n1 bis n0,qb,n0 subq cnt,1,cnt bgt cnt,.Loop1 stq n1,0(rem_ptr) bis $31,n0,$0 ret $31,($26),1 .Largedivisor: and n0,1,$4 srl n0,1,n0 sll n1,63,tmp or tmp,n0,n0 srl n1,1,n1 and d,1,$6 srl d,1,$5 addq $5,$6,$5 .Loop2: cmplt n0,0,tmp addq n1,n1,n1 bis n1,tmp,n1 addq n0,n0,n0 cmpule $5,n1,qb subq n1,$5,tmp cmovne qb,tmp,n1 bis n0,qb,n0 cmplt n0,0,tmp addq n1,n1,n1 bis n1,tmp,n1 addq n0,n0,n0 cmpule $5,n1,qb subq n1,$5,tmp cmovne qb,tmp,n1 bis n0,qb,n0 cmplt n0,0,tmp addq n1,n1,n1 bis n1,tmp,n1 addq n0,n0,n0 cmpule $5,n1,qb subq n1,$5,tmp cmovne qb,tmp,n1 bis n0,qb,n0 cmplt n0,0,tmp addq n1,n1,n1 bis n1,tmp,n1 addq n0,n0,n0 cmpule $5,n1,qb subq n1,$5,tmp cmovne qb,tmp,n1 bis n0,qb,n0 subq cnt,1,cnt bgt cnt,.Loop2 addq n1,n1,n1 addq $4,n1,n1 bne $6,.LOdd stq n1,0(rem_ptr) bis $31,n0,$0 ret $31,($26),1 .LOdd: /* q' in n0. r' in n1 */ addq n1,n0,n1 cmpult n1,n0,tmp # tmp := carry from addq beq tmp,.LLp6 addq n0,1,n0 subq n1,d,n1 .LLp6: cmpult n1,d,tmp bne tmp,.LLp7 addq n0,1,n0 subq n1,d,n1 .LLp7: stq n1,0(rem_ptr) bis $31,n0,$0 ret $31,($26),1 .end __udiv_qrnnd
al3xtjames/Clover
2,487
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/alpha/mpih-sub1.S
/* Alpha sub_n -- Subtract two limb vectors of the same length > 0 and * store difference in a third limb vector. * Copyright (C) 1995, 1998, * 2001, 2002 Free Software Foundation, Inc. * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA */ /******************* * mpi_limb_t * _gcry_mpih_sub_n( mpi_ptr_t res_ptr, (r16) * mpi_ptr_t s1_ptr, (r17) * mpi_ptr_t s2_ptr, (r18) * mpi_size_t size) (r19) */ .set noreorder .set noat .text .align 3 .globl _gcry_mpih_sub_n .ent _gcry_mpih_sub_n _gcry_mpih_sub_n: .frame $30,0,$26,0 ldq $3,0($17) ldq $4,0($18) subq $19,1,$19 and $19,4-1,$2 # number of limbs in first loop bis $31,$31,$0 beq $2,.L0 # if multiple of 4 limbs, skip first loop subq $19,$2,$19 .Loop0: subq $2,1,$2 ldq $5,8($17) addq $4,$0,$4 ldq $6,8($18) cmpult $4,$0,$1 subq $3,$4,$4 cmpult $3,$4,$0 stq $4,0($16) or $0,$1,$0 addq $17,8,$17 addq $18,8,$18 bis $5,$5,$3 bis $6,$6,$4 addq $16,8,$16 bne $2,.Loop0 .L0: beq $19,.Lend .align 3 .Loop: subq $19,4,$19 ldq $5,8($17) addq $4,$0,$4 ldq $6,8($18) cmpult $4,$0,$1 subq $3,$4,$4 cmpult $3,$4,$0 stq $4,0($16) or $0,$1,$0 ldq $3,16($17) addq $6,$0,$6 ldq $4,16($18) cmpult $6,$0,$1 subq $5,$6,$6 cmpult $5,$6,$0 stq $6,8($16) or $0,$1,$0 ldq $5,24($17) addq $4,$0,$4 ldq $6,24($18) cmpult $4,$0,$1 subq $3,$4,$4 cmpult $3,$4,$0 stq $4,16($16) or $0,$1,$0 ldq $3,32($17) addq $6,$0,$6 ldq $4,32($18) cmpult $6,$0,$1 subq $5,$6,$6 cmpult $5,$6,$0 stq $6,24($16) or $0,$1,$0 addq $17,32,$17 addq $18,32,$18 addq $16,32,$16 bne $19,.Loop .Lend: addq $4,$0,$4 cmpult $4,$0,$1 subq $3,$4,$4 cmpult $3,$4,$0 stq $4,0($16) or $0,$1,$0 ret $31,($26),1 .end _gcry_mpih_sub_n
al3xtjames/Clover
4,503
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/pentium4/mmx/mpih-lshift.S
/* Intel Pentium-4 mpn_lshift -- left shift. * * Copyright 2001, 2002 Free Software Foundation, Inc. * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA * * Note: This code is heavily based on the GNU MP Library. * Actually it's the same code with only minor changes in the * way the data is stored; this is to support the abstraction * of an optional secure memory allocation which may be used * to avoid revealing of sensitive data due to paging etc. */ #include "sysdep.h" #include "asm-syntax.h" /******************* * mpi_limb_t * _gcry_mpih_lshift( mpi_ptr_t wp, (sp + 4) * mpi_ptr_t up, (sp + 8) * mpi_size_t usize, (sp + 12) * unsigned cnt) (sp + 16) * * P4 Willamette, Northwood: 1.75 cycles/limb * P4 Prescott: 2.0 cycles/limb */ .text ALIGN (3) .globl C_SYMBOL_NAME(_gcry_mpih_lshift) C_SYMBOL_NAME(_gcry_mpih_lshift:) pushl %ebx pushl %edi movl 20(%esp), %eax movl 12(%esp), %edx movl 16(%esp), %ebx movl 24(%esp), %ecx cmp $5, %eax jae .Lunroll movl -4(%ebx,%eax,4), %edi decl %eax jnz .Lsimple shldl %cl, %edi, %eax shll %cl, %edi movl %edi, (%edx) popl %edi popl %ebx ret .Lsimple: movd (%ebx,%eax,4), %mm5 movd %ecx, %mm6 negl %ecx psllq %mm6, %mm5 addl $32, %ecx movd %ecx, %mm7 psrlq $32, %mm5 .Lsimple_top: movq -4(%ebx,%eax,4), %mm0 decl %eax psrlq %mm7, %mm0 movd %mm0, 4(%edx,%eax,4) jnz .Lsimple_top movd (%ebx), %mm0 movd %mm5, %eax psllq %mm6, %mm0 popl %edi popl %ebx movd %mm0, (%edx) emms ret .align 8, 0x90 .Lunroll: movd -4(%ebx,%eax,4), %mm5 leal (%ebx,%eax,4), %edi movd %ecx, %mm6 andl $4, %edi psllq %mm6, %mm5 jz .Lstart_src_aligned movq -8(%ebx,%eax,4), %mm0 psllq %mm6, %mm0 decl %eax psrlq $32, %mm0 movd %mm0, (%edx,%eax,4) .Lstart_src_aligned: movq -8(%ebx,%eax,4), %mm1 leal (%edx,%eax,4), %edi andl $4, %edi psrlq $32, %mm5 movq -16(%ebx,%eax,4), %mm3 jz .Lstart_dst_aligned movq %mm1, %mm0 addl $32, %ecx psllq %mm6, %mm0 movd %ecx, %mm6 psrlq $32, %mm0 movd %mm0, -4(%edx,%eax,4) subl $4, %edx .Lstart_dst_aligned: psllq %mm6, %mm1 negl %ecx addl $64, %ecx movq %mm3, %mm2 movd %ecx, %mm7 subl $8, %eax psrlq %mm7, %mm3 por %mm1, %mm3 jc .Lfinish .align 8, 0x90 .Lunroll_loop: movq 8(%ebx,%eax,4), %mm0 psllq %mm6, %mm2 movq %mm0, %mm1 psrlq %mm7, %mm0 movq %mm3, 24(%edx,%eax,4) por %mm2, %mm0 movq (%ebx,%eax,4), %mm3 psllq %mm6, %mm1 movq %mm0, 16(%edx,%eax,4) movq %mm3, %mm2 psrlq %mm7, %mm3 subl $4, %eax por %mm1, %mm3 jnc .Lunroll_loop .Lfinish: testb $2, %al jz .Lfinish_no_two movq 8(%ebx,%eax,4), %mm0 psllq %mm6, %mm2 movq %mm0, %mm1 psrlq %mm7, %mm0 movq %mm3, 24(%edx,%eax,4) por %mm2, %mm0 movq %mm1, %mm2 movq %mm0, %mm3 subl $2, %eax .Lfinish_no_two: testb $1, %al movd %mm5, %eax popl %edi jz .Lfinish_zero movd (%ebx), %mm0 psllq %mm6, %mm2 movq %mm3, 12(%edx) psllq $32, %mm0 movq %mm0, %mm1 psrlq %mm7, %mm0 por %mm2, %mm0 psllq %mm6, %mm1 movq %mm0, 4(%edx) psrlq $32, %mm1 andl $32, %ecx popl %ebx jz .Lfinish_one_unaligned movd %mm1, (%edx) .Lfinish_one_unaligned: emms ret .Lfinish_zero: movq %mm3, 8(%edx) andl $32, %ecx psllq %mm6, %mm2 jz .Lfinish_zero_unaligned movq %mm2, (%edx) .Lfinish_zero_unaligned: psrlq $32, %mm2 popl %ebx movd %mm5, %eax movd %mm2, 4(%edx) emms ret
al3xtjames/Clover
4,439
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/pentium4/mmx/mpih-rshift.S
/* Intel Pentium-4 mpn_rshift -- right shift. * * Copyright 2001, 2002 Free Software Foundation, Inc. * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA * * Note: This code is heavily based on the GNU MP Library. * Actually it's the same code with only minor changes in the * way the data is stored; this is to support the abstraction * of an optional secure memory allocation which may be used * to avoid revealing of sensitive data due to paging etc. */ #include "sysdep.h" #include "asm-syntax.h" /******************* * mpi_limb_t * _gcry_mpih_rshift( mpi_ptr_t wp, (sp + 4) * mpi_ptr_t up, (sp + 8) * mpi_size_t usize, (sp + 12) * unsigned cnt) (sp + 16) * * P4 Willamette, Northwood: 1.75 cycles/limb * P4 Prescott: 2.0 cycles/limb */ .text ALIGN (3) .globl C_SYMBOL_NAME(_gcry_mpih_rshift) C_SYMBOL_NAME(_gcry_mpih_rshift:) pushl %ebx pushl %edi movl 20(%esp), %eax movl 12(%esp), %edx movl 16(%esp), %ebx movl 24(%esp), %ecx cmp $5, %eax jae .Lunroll decl %eax movl (%ebx), %edi jnz .Lsimple shrdl %cl, %edi, %eax shrl %cl, %edi movl %edi, (%edx) popl %edi popl %ebx ret .align 8, 0x90 .Lsimple: movd (%ebx), %mm5 leal (%ebx,%eax,4), %ebx movd %ecx, %mm6 leal -4(%edx,%eax,4), %edx psllq $32, %mm5 negl %eax .Lsimple_top: movq (%ebx,%eax,4), %mm0 incl %eax psrlq %mm6, %mm0 movd %mm0, (%edx,%eax,4) jnz .Lsimple_top movd (%ebx), %mm0 psrlq %mm6, %mm5 psrlq %mm6, %mm0 popl %edi movd %mm5, %eax popl %ebx movd %mm0, 4(%edx) emms ret .align 8, 0x90 .Lunroll: movd (%ebx), %mm5 movl $4, %edi movd %ecx, %mm6 testl %edi, %ebx psllq $32, %mm5 jz .Lstart_src_aligned movq (%ebx), %mm0 psrlq %mm6, %mm0 addl $4, %ebx decl %eax movd %mm0, (%edx) addl $4, %edx .Lstart_src_aligned: movq (%ebx), %mm1 testl %edi, %edx psrlq %mm6, %mm5 jz .Lstart_dst_aligned movq %mm1, %mm0 addl $32, %ecx psrlq %mm6, %mm0 movd %ecx, %mm6 movd %mm0, (%edx) addl $4, %edx .Lstart_dst_aligned: movq 8(%ebx), %mm3 negl %ecx movq %mm3, %mm2 addl $64, %ecx movd %ecx, %mm7 psrlq %mm6, %mm1 leal -12(%ebx,%eax,4), %ebx leal -20(%edx,%eax,4), %edx psllq %mm7, %mm3 subl $7, %eax por %mm1, %mm3 negl %eax jns .Lfinish .align 8, 0x90 .Lunroll_loop: movq (%ebx,%eax,4), %mm0 psrlq %mm6, %mm2 movq %mm0, %mm1 psllq %mm7, %mm0 movq %mm3, -8(%edx,%eax,4) por %mm2, %mm0 movq 8(%ebx,%eax,4), %mm3 psrlq %mm6, %mm1 movq %mm0, (%edx,%eax,4) movq %mm3, %mm2 psllq %mm7, %mm3 addl $4, %eax por %mm1, %mm3 js .Lunroll_loop .Lfinish: testb $2, %al jnz .Lfinish_no_two movq (%ebx,%eax,4), %mm0 psrlq %mm6, %mm2 movq %mm0, %mm1 psllq %mm7, %mm0 movq %mm3, -8(%edx,%eax,4) por %mm2, %mm0 movq %mm1, %mm2 movq %mm0, %mm3 addl $2, %eax .Lfinish_no_two: testb $1, %al popl %edi movd %mm5, %eax jnz .Lfinish_zero movd 8(%ebx), %mm0 psrlq %mm6, %mm2 movq %mm0, %mm1 psllq %mm7, %mm0 movq %mm3, (%edx) por %mm2, %mm0 psrlq %mm6, %mm1 andl $32, %ecx popl %ebx jz .Lfinish_one_unaligned movd %mm1, 16(%edx) .Lfinish_one_unaligned: movq %mm0, 8(%edx) emms ret .Lfinish_zero: movq %mm3, 4(%edx) psrlq %mm6, %mm2 movd %mm2, 12(%edx) andl $32, %ecx popl %ebx jz .Lfinish_zero_unaligned movq %mm2, 12(%edx) .Lfinish_zero_unaligned: emms ret
al3xtjames/Clover
2,568
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/pentium4/sse2/mpih-mul1.S
/* Intel Pentium-4 mpn_mul_1 -- Multiply a limb vector with a limb and store * the result in a second limb vector. * * Copyright 2001, 2002, 2003, 2005 Free Software Foundation, Inc. * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA * * Note: This code is heavily based on the GNU MP Library. * Actually it's the same code with only minor changes in the * way the data is stored; this is to support the abstraction * of an optional secure memory allocation which may be used * to avoid revealing of sensitive data due to paging etc. */ #include "sysdep.h" #include "asm-syntax.h" /******************* * mpi_limb_t * _gcry_mpih_mul_1( mpi_ptr_t res_ptr, (sp + 4) * mpi_ptr_t s1_ptr, (sp + 8) * mpi_size_t s1_size, (sp + 12) * mpi_limb_t s2_limb) (sp + 16) * * src != dst src == dst * P6 model 9 (Banias) ?.? * P6 model 13 (Dothan) 4.75 4.75 * P4 model 0 (Willamette) 4.0 6.0 * P4 model 1 (?) 4.0 6.0 * P4 model 2 (Northwood) 4.0 6.0 * P4 model 3 (Prescott) ?.? ?.? * P4 model 4 (Nocona) ?.? ?.? * Unfortunately when src==dst the write-combining described in * pentium4/README takes us up to 6 c/l. * */ TEXT ALIGN (3) GLOBL C_SYMBOL_NAME(_gcry_mpih_mul_1) C_SYMBOL_NAME(_gcry_mpih_mul_1:); pxor %mm0, %mm0 .Lstart_1c: movl 8(%esp), %eax movd 16(%esp), %mm7 movl 4(%esp), %edx movl 12(%esp), %ecx .Ltop: /* C eax src, incrementing C ebx C ecx counter, size iterations C edx dst, incrementing C C mm0 carry limb C mm7 multiplier */ movd (%eax), %mm1 addl $4, %eax pmuludq %mm7, %mm1 paddq %mm1, %mm0 movd %mm0, (%edx) addl $4, %edx psrlq $32, %mm0 subl $1, %ecx jnz .Ltop movd %mm0, %eax emms ret
al3xtjames/Clover
3,284
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/pentium4/sse2/mpih-mul2.S
/* Intel Pentium-4 mpn_addmul_1 -- Multiply a limb vector with a limb and add * the result to a second limb vector. * * Copyright 2001, 2002, 2004, 2005 Free Software Foundation, Inc. * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA * * Note: This code is heavily based on the GNU MP Library. * Actually it's the same code with only minor changes in the * way the data is stored; this is to support the abstraction * of an optional secure memory allocation which may be used * to avoid revealing of sensitive data due to paging etc. */ #include "sysdep.h" #include "asm-syntax.h" /******************* * mpi_limb_t * _gcry_mpih_addmul_1( mpi_ptr_t res_ptr, (sp + 4) * mpi_ptr_t s1_ptr, (sp + 8) * mpi_size_t s1_size, (sp + 12) * mpi_limb_t s2_limb) (sp + 16) * * P3 model 9 (Banias) ?.? * P3 model 13 (Dothan) 5.8 * P4 model 0 (Willamette) 5.5 * P4 model 1 (?) 5.5 * P4 model 2 (Northwood) 5.5 * P4 model 3 (Prescott) 6.0 * P4 model 4 (Nocona) * * Only the carry limb propagation is on the dependent chain, but some other * Pentium4 pipeline magic brings down performance to 6 cycles/l from the * ideal 4 cycles/l. */ TEXT ALIGN (4) GLOBL C_SYMBOL_NAME(_gcry_mpih_addmul_1) C_SYMBOL_NAME(_gcry_mpih_addmul_1:) pxor %mm4, %mm4 .Lstart_1c: movl 8(%esp), %eax movl 12(%esp), %ecx movl 4(%esp), %edx movd 16(%esp), %mm7 /* C eax src, incrementing ; 5B C ecx loop counter, decrementing C edx dst, incrementing C C mm4 carry, low 32-bits C mm7 multiplier */ movd (%eax), %mm2 pmuludq %mm7, %mm2 shrl $1, %ecx jnc .Leven leal 4(%eax), %eax movd (%edx), %mm1 paddq %mm2, %mm1 paddq %mm1, %mm4 movd %mm4, (%edx) psrlq $32, %mm4 testl %ecx, %ecx jz .Lrtn leal 4(%edx), %edx movd (%eax), %mm2 pmuludq %mm7, %mm2 .Leven: movd 4(%eax), %mm0 movd (%edx), %mm1 pmuludq %mm7, %mm0 subl $1, %ecx jz .Lend .Lloop: paddq %mm2, %mm1 movd 8(%eax), %mm2 paddq %mm1, %mm4 movd 4(%edx), %mm3 pmuludq %mm7, %mm2 movd %mm4, (%edx) psrlq $32, %mm4 paddq %mm0, %mm3 movd 12(%eax), %mm0 paddq %mm3, %mm4 movd 8(%edx), %mm1 pmuludq %mm7, %mm0 movd %mm4, 4(%edx) psrlq $32, %mm4 leal 8(%eax), %eax leal 8(%edx), %edx subl $1, %ecx jnz .Lloop .Lend: paddq %mm2, %mm1 paddq %mm1, %mm4 movd 4(%edx), %mm3 movd %mm4, (%edx) psrlq $32, %mm4 paddq %mm0, %mm3 paddq %mm3, %mm4 movd %mm4, 4(%edx) psrlq $32, %mm4 .Lrtn: movd %mm4, %eax emms ret
al3xtjames/Clover
4,093
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/pentium4/sse2/mpih-mul3.S
/* Intel Pentium-4 mpn_submul_1 -- Multiply a limb vector with a limb and * subtract the result from a second limb vector. * * Copyright 2001, 2002 Free Software Foundation, Inc. * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA * * Note: This code is heavily based on the GNU MP Library. * Actually it's the same code with only minor changes in the * way the data is stored; this is to support the abstraction * of an optional secure memory allocation which may be used * to avoid revealing of sensitive data due to paging etc. */ #include "sysdep.h" #include "asm-syntax.h" /******************* * mpi_limb_t * _gcry_mpih_submul_1( mpi_ptr_t res_ptr, (sp + 4) * mpi_ptr_t s1_ptr, (sp + 8) * mpi_size_t s1_size, (sp + 12) * mpi_limb_t s2_limb) (sp + 16) * * P4: 7 cycles/limb, unstable timing, at least on early Pentium4 silicon * (stepping 10). * * This code is not particularly good at 7 c/l. The dependent chain is only * 4 c/l and there's only 4 MMX unit instructions, so it's not clear why that * speed isn't achieved. * * The arrangements made here to get a two instruction dependent chain are * slightly subtle. In the loop the carry (or borrow rather) is a negative * so that a paddq can be used to give a low limb ready to store, and a high * limb ready to become the new carry after a psrlq. * * If the carry was a simple twos complement negative then the psrlq shift * would need to bring in 0 bits or 1 bits according to whether the high was * zero or non-zero, since a non-zero value would represent a negative * needing sign extension. That wouldn't be particularly easy to arrange and * certainly would add an instruction to the dependent chain, so instead an * offset is applied so that the high limb will be 0xFFFFFFFF+c. With c in * the range -0xFFFFFFFF to 0, the value 0xFFFFFFFF+c is in the range 0 to * 0xFFFFFFFF and is therefore always positive and can always have 0 bits * shifted in, which is what psrlq does. * * The extra 0xFFFFFFFF must be subtracted before c is used, but that can be * done off the dependent chain. The total adjustment then is to add * 0xFFFFFFFF00000000 to offset the new carry, and subtract * 0x00000000FFFFFFFF to remove the offset from the current carry, for a net * add of 0xFFFFFFFE00000001. In the code this is applied to the destination * limb when fetched. * * It's also possible to view the 0xFFFFFFFF adjustment as a ones-complement * negative, which is how it's undone for the return value, but that doesn't * seem as clear. */ TEXT ALIGN (4) GLOBL C_SYMBOL_NAME(_gcry_mpih_submul_1) C_SYMBOL_NAME(_gcry_mpih_submul_1:) pxor %mm1, %mm1 .Lstart_1c: movl 8(%esp), %eax pcmpeqd %mm0, %mm0 movd 16(%esp), %mm7 pcmpeqd %mm6, %mm6 movl 4(%esp), %edx psrlq $32, %mm0 movl 12(%esp), %ecx psllq $32, %mm6 psubq %mm0, %mm6 psubq %mm1, %mm0 /* C eax src, incrementing C ebx C ecx loop counter, decrementing C edx dst, incrementing C C mm0 0xFFFFFFFF - borrow C mm6 0xFFFFFFFE00000001 C mm7 multiplier */ .Lloop: movd (%eax), %mm1 leal 4(%eax), %eax movd (%edx), %mm2 paddq %mm6, %mm2 pmuludq %mm7, %mm1 psubq %mm1, %mm2 paddq %mm2, %mm0 subl $1, %ecx movd %mm0, (%edx) psrlq $32, %mm0 leal 4(%edx), %edx jnz .Lloop movd %mm0, %eax notl %eax emms ret
al3xtjames/Clover
2,488
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/pentium4/sse2/mpih-add1.S
/* Intel Pentium-4 mpn_add_n -- mpn addition. * * Copyright 2001, 2002 Free Software Foundation, Inc. * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA * * Note: This code is heavily based on the GNU MP Library. * Actually it's the same code with only minor changes in the * way the data is stored; this is to support the abstraction * of an optional secure memory allocation which may be used * to avoid revealing of sensitive data due to paging etc. */ #include "sysdep.h" #include "asm-syntax.h" /******************* * mpi_limb_t * _gcry_mpih_add_n( mpi_ptr_t res_ptr, (sp + 4) * mpi_ptr_t s1_ptr, (sp + 8) * mpi_ptr_t s2_ptr, (sp + 12) * mpi_size_t size) (sp + 16) * * P4 Willamette, Northwood: 4.0 cycles/limb if dst!=src1 and dst!=src2 * 6.0 cycles/limb if dst==src1 or dst==src2 * P4 Prescott: >= 5 cycles/limb * * The 4 c/l achieved here isn't particularly good, but is better than 9 c/l * for a basic adc loop. */ TEXT ALIGN (3) GLOBL C_SYMBOL_NAME(_gcry_mpih_add_n) C_SYMBOL_NAME(_gcry_mpih_add_n:) pxor %mm0, %mm0 movl 8(%esp), %eax /* s1_ptr */ movl %ebx, 8(%esp) /* re-use parameter space */ movl 12(%esp), %ebx /* res_ptr */ movl 4(%esp), %edx /* s2_ptr */ movl 16(%esp), %ecx /* size */ leal (%eax,%ecx,4), %eax /* src1 end */ leal (%ebx,%ecx,4), %ebx /* src2 end */ leal (%edx,%ecx,4), %edx /* dst end */ negl %ecx /* -size */ Ltop: /* C eax src1 end C ebx src2 end C ecx counter, limbs, negative C edx dst end C mm0 carry bit */ movd (%eax,%ecx,4), %mm1 movd (%ebx,%ecx,4), %mm2 paddq %mm2, %mm1 paddq %mm1, %mm0 movd %mm0, (%edx,%ecx,4) psrlq $32, %mm0 addl $1, %ecx jnz Ltop movd %mm0, %eax movl 8(%esp), %ebx /* restore saved EBX */ emms ret
al3xtjames/Clover
2,568
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/pentium4/sse2/mpih-sub1.S
/* Intel Pentium-4 mpn_sub_n -- mpn subtraction. * * Copyright 2001, 2002 Free Software Foundation, Inc. * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA * * Note: This code is heavily based on the GNU MP Library. * Actually it's the same code with only minor changes in the * way the data is stored; this is to support the abstraction * of an optional secure memory allocation which may be used * to avoid revealing of sensitive data due to paging etc. */ #include "sysdep.h" #include "asm-syntax.h" /******************* * mpi_limb_t * _gcry_mpih_sub_n( mpi_ptr_t res_ptr, (sp + 4) * mpi_ptr_t s1_ptr, (sp + 8) * mpi_ptr_t s2_ptr, (sp + 12) * mpi_size_t size) (sp + 16) * * P4 Willamette, Northwood: 4.0 cycles/limb if dst!=src1 and dst!=src2 * 6.0 cycles/limb if dst==src1 or dst==src2 * P4 Prescott: >= 5 cycles/limb * * The main loop code is 2x unrolled so that the carry bit can alternate * between mm0 and mm1. */ .text ALIGN (3) .globl C_SYMBOL_NAME(_gcry_mpih_sub_n) C_SYMBOL_NAME(_gcry_mpih_sub_n:) pxor %mm0, %mm0 .Lstart_nc: movl 8(%esp), %eax movl %ebx, 8(%esp) movl 12(%esp), %ebx movl 4(%esp), %edx movl 16(%esp), %ecx leal (%eax,%ecx,4), %eax leal (%ebx,%ecx,4), %ebx leal (%edx,%ecx,4), %edx negl %ecx .Ltop: /* C eax src1 end C ebx src2 end C ecx counter, limbs, negative C edx dst end C mm0 carry bit */ movd (%eax,%ecx,4), %mm1 movd (%ebx,%ecx,4), %mm2 psubq %mm2, %mm1 psubq %mm0, %mm1 movd %mm1, (%edx,%ecx,4) psrlq $63, %mm1 addl $1, %ecx jz .Ldone_mm1 movd (%eax,%ecx,4), %mm0 movd (%ebx,%ecx,4), %mm2 psubq %mm2, %mm0 psubq %mm1, %mm0 movd %mm0, (%edx,%ecx,4) psrlq $63, %mm0 addl $1, %ecx jnz .Ltop movd %mm0, %eax movl 8(%esp), %ebx emms ret .Ldone_mm1: movd %mm1, %eax movl 8(%esp), %ebx emms ret
al3xtjames/Clover
2,823
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/m68k/mc68020/mpih-mul1.S
/* mc68020 __mpn_mul_1 -- Multiply a limb vector with a limb and store * the result in a second limb vector. * * Copyright (C) 1992, 1994, 1996, 1998, * 2001, 2002 Free Software Foundation, Inc. * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA * * Note: This code is heavily based on the GNU MP Library. * Actually it's the same code with only minor changes in the * way the data is stored; this is to support the abstraction * of an optional secure memory allocation which may be used * to avoid revealing of sensitive data due to paging etc. */ #include "sysdep.h" #include "asm-syntax.h" /******************* * mpi_limb_t * _gcry_mpih_mul_1( mpi_ptr_t res_ptr, (sp + 4) * mpi_ptr_t s1_ptr, (sp + 8) * mpi_size_t s1_size, (sp + 12) * mpi_limb_t s2_limb) (sp + 16) */ TEXT ALIGN GLOBL C_SYMBOL_NAME(_gcry_mpih_mul_1) C_SYMBOL_NAME(_gcry_mpih_mul_1:) PROLOG(_gcry_mpih_mul_1) #define res_ptr a0 #define s1_ptr a1 #define s1_size d2 #define s2_limb d4 /* Save used registers on the stack. */ moveml R(d2)-R(d4),MEM_PREDEC(sp) #if 0 movel R(d2),MEM_PREDEC(sp) movel R(d3),MEM_PREDEC(sp) movel R(d4),MEM_PREDEC(sp) #endif /* Copy the arguments to registers. Better use movem? */ movel MEM_DISP(sp,16),R(res_ptr) movel MEM_DISP(sp,20),R(s1_ptr) movel MEM_DISP(sp,24),R(s1_size) movel MEM_DISP(sp,28),R(s2_limb) eorw #1,R(s1_size) clrl R(d1) lsrl #1,R(s1_size) bcc L(L1) subql #1,R(s1_size) subl R(d0),R(d0) /* (d0,cy) <= (0,0) */ L(Loop:) movel MEM_POSTINC(s1_ptr),R(d3) mulul R(s2_limb),R(d1):R(d3) addxl R(d0),R(d3) movel R(d3),MEM_POSTINC(res_ptr) L(L1:) movel MEM_POSTINC(s1_ptr),R(d3) mulul R(s2_limb),R(d0):R(d3) addxl R(d1),R(d3) movel R(d3),MEM_POSTINC(res_ptr) dbf R(s1_size),L(Loop) clrl R(d3) addxl R(d3),R(d0) subl #0x10000,R(s1_size) bcc L(Loop) /* Restore used registers from stack frame. */ moveml MEM_POSTINC(sp),R(d2)-R(d4) #if 0 movel MEM_POSTINC(sp),R(d4) movel MEM_POSTINC(sp),R(d3) movel MEM_POSTINC(sp),R(d2) #endif rts EPILOG(_gcry_mpih_mul_1)
al3xtjames/Clover
2,708
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/m68k/mc68020/mpih-mul2.S
/* mc68020 __mpn_addmul_1 -- Multiply a limb vector with a limb and add * the result to a second limb vector. * * Copyright (C) 1992, 1994, 1996, 1998, * 2001, 2002 Free Software Foundation, Inc. * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA * * Note: This code is heavily based on the GNU MP Library. * Actually it's the same code with only minor changes in the * way the data is stored; this is to support the abstraction * of an optional secure memory allocation which may be used * to avoid revealing of sensitive data due to paging etc. */ #include "sysdep.h" #include "asm-syntax.h" /******************* * mpi_limb_t * _gcry_mpih_addmul_1( mpi_ptr_t res_ptr, (sp + 4) * mpi_ptr_t s1_ptr, (sp + 8) * mpi_size_t s1_size, (sp + 12) * mpi_limb_t s2_limb) (sp + 16) */ TEXT ALIGN GLOBL C_SYMBOL_NAME(_gcry_mpih_addmul_1) C_SYMBOL_NAME(_gcry_mpih_addmul_1:) PROLOG(_gcry_mpih_addmul_1) #define res_ptr a0 #define s1_ptr a1 #define s1_size d2 #define s2_limb d4 /* Save used registers on the stack. */ moveml R(d2)-R(d5),MEM_PREDEC(sp) /* Copy the arguments to registers. Better use movem? */ movel MEM_DISP(sp,20),R(res_ptr) movel MEM_DISP(sp,24),R(s1_ptr) movel MEM_DISP(sp,28),R(s1_size) movel MEM_DISP(sp,32),R(s2_limb) eorw #1,R(s1_size) clrl R(d1) clrl R(d5) lsrl #1,R(s1_size) bcc L(L1) subql #1,R(s1_size) subl R(d0),R(d0) /* (d0,cy) <= (0,0) */ L(Loop:) movel MEM_POSTINC(s1_ptr),R(d3) mulul R(s2_limb),R(d1):R(d3) addxl R(d0),R(d3) addxl R(d5),R(d1) addl R(d3),MEM_POSTINC(res_ptr) L(L1:) movel MEM_POSTINC(s1_ptr),R(d3) mulul R(s2_limb),R(d0):R(d3) addxl R(d1),R(d3) addxl R(d5),R(d0) addl R(d3),MEM_POSTINC(res_ptr) dbf R(s1_size),L(Loop) addxl R(d5),R(d0) subl #0x10000,R(s1_size) bcc L(Loop) /* Restore used registers from stack frame. */ moveml MEM_POSTINC(sp),R(d2)-R(d5) rts EPILOG(_gcry_mpih_addmul_1)
al3xtjames/Clover
2,717
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/m68k/mc68020/mpih-mul3.S
/* mc68020 __mpn_submul_1 -- Multiply a limb vector with a limb and subtract * the result from a second limb vector. * * Copyright (C) 1992, 1994, 1996, 1998, * 2001, 2002 Free Software Foundation, Inc. * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA * * Note: This code is heavily based on the GNU MP Library. * Actually it's the same code with only minor changes in the * way the data is stored; this is to support the abstraction * of an optional secure memory allocation which may be used * to avoid revealing of sensitive data due to paging etc. */ #include "sysdep.h" #include "asm-syntax.h" /******************* * mpi_limb_t * _gcry_mpih_submul_1( mpi_ptr_t res_ptr, (sp + 4) * mpi_ptr_t s1_ptr, (sp + 8) * mpi_size_t s1_size, (sp + 12) * mpi_limb_t s2_limb) (sp + 16) */ TEXT ALIGN GLOBL C_SYMBOL_NAME(_gcry_mpih_submul_1) C_SYMBOL_NAME(_gcry_mpih_submul_1:) PROLOG(_gcry_mpih_submul_1) #define res_ptr a0 #define s1_ptr a1 #define s1_size d2 #define s2_limb d4 /* Save used registers on the stack. */ moveml R(d2)-R(d5),MEM_PREDEC(sp) /* Copy the arguments to registers. Better use movem? */ movel MEM_DISP(sp,20),R(res_ptr) movel MEM_DISP(sp,24),R(s1_ptr) movel MEM_DISP(sp,28),R(s1_size) movel MEM_DISP(sp,32),R(s2_limb) eorw #1,R(s1_size) clrl R(d1) clrl R(d5) lsrl #1,R(s1_size) bcc L(L1) subql #1,R(s1_size) subl R(d0),R(d0) /* (d0,cy) <= (0,0) */ L(Loop:) movel MEM_POSTINC(s1_ptr),R(d3) mulul R(s2_limb),R(d1):R(d3) addxl R(d0),R(d3) addxl R(d5),R(d1) subl R(d3),MEM_POSTINC(res_ptr) L(L1:) movel MEM_POSTINC(s1_ptr),R(d3) mulul R(s2_limb),R(d0):R(d3) addxl R(d1),R(d3) addxl R(d5),R(d0) subl R(d3),MEM_POSTINC(res_ptr) dbf R(s1_size),L(Loop) addxl R(d5),R(d0) subl #0x10000,R(s1_size) bcc L(Loop) /* Restore used registers from stack frame. */ moveml MEM_POSTINC(sp),R(d2)-R(d5) rts EPILOG(_gcry_mpih_submul_1)
al3xtjames/Clover
2,914
FileSystems/GrubFS/grub/grub-core/lib/i386/xen/relocator.S
/* * GRUB -- GRand Unified Bootloader * Copyright (C) 2013 Free Software Foundation, Inc. * * GRUB is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * GRUB is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GRUB. If not, see <http://www.gnu.org/licenses/>. */ #include <grub/symbol.h> #include <grub/xen.h> .p2align 4 /* force 16-byte alignment */ VARIABLE(grub_relocator_xen_remap_start) LOCAL(base): /* mov imm32, %ebx */ .byte 0xbb VARIABLE(grub_relocator_xen_remapper_virt) .long 0 /* mov imm32, %ecx */ .byte 0xb9 VARIABLE(grub_relocator_xen_remapper_map) .long 0 /* mov imm32, %edx */ .byte 0xba VARIABLE(grub_relocator_xen_remapper_map_high) .long 0 movl %ebx, %ebp movl $2, %esi movl $__HYPERVISOR_update_va_mapping, %eax int $0x82 movl %ebp, %ebx addl $(LOCAL(cont) - LOCAL(base)), %ebx jmp *%ebx LOCAL(cont): xorl %eax, %eax movl %eax, %ebp 1: /* mov imm32, %eax */ .byte 0xb8 VARIABLE(grub_relocator_xen_mfn_list) .long 0 movl %eax, %edi movl %ebp, %eax movl 0(%edi, %eax, 4), %ecx /* mov imm32, %ebx */ .byte 0xbb VARIABLE(grub_relocator_xen_paging_start) .long 0 shll $12, %eax addl %eax, %ebx movl %ecx, %edx shll $12, %ecx shrl $20, %edx orl $5, %ecx movl $2, %esi movl $__HYPERVISOR_update_va_mapping, %eax int $0x82 incl %ebp /* mov imm32, %ecx */ .byte 0xb9 VARIABLE(grub_relocator_xen_paging_size) .long 0 cmpl %ebp, %ecx ja 1b /* mov imm32, %ebx */ .byte 0xbb VARIABLE(grub_relocator_xen_mmu_op_addr) .long 0 movl $3, %ecx movl $0, %edx movl $0x7FF0, %esi movl $__HYPERVISOR_mmuext_op, %eax int $0x82 /* mov imm32, %eax */ .byte 0xb8 VARIABLE(grub_relocator_xen_remap_continue) .long 0 jmp *%eax VARIABLE(grub_relocator_xen_mmu_op) .space 256 VARIABLE(grub_relocator_xen_remap_end) VARIABLE(grub_relocator_xen_start) /* mov imm32, %eax */ .byte 0xb8 VARIABLE(grub_relocator_xen_remapper_virt2) .long 0 movl %eax, %edi xorl %ecx, %ecx xorl %edx, %edx movl $2, %esi movl $__HYPERVISOR_update_va_mapping, %eax int $0x82 /* mov imm32, %eax */ .byte 0xb8 VARIABLE(grub_relocator_xen_stack) .long 0 movl %eax, %esp /* mov imm32, %eax */ .byte 0xb8 VARIABLE(grub_relocator_xen_start_info) .long 0 movl %eax, %esi cld /* mov imm32, %eax */ .byte 0xb8 VARIABLE(grub_relocator_xen_entry_point) .long 0 jmp *%eax VARIABLE(grub_relocator_xen_end)
al3xtjames/Clover
2,919
FileSystems/GrubFS/grub/grub-core/lib/x86_64/xen/relocator.S
/* * GRUB -- GRand Unified Bootloader * Copyright (C) 2013 Free Software Foundation, Inc. * * GRUB is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * GRUB is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GRUB. If not, see <http://www.gnu.org/licenses/>. */ #include <grub/symbol.h> #include <grub/xen.h> .p2align 4 /* force 16-byte alignment */ VARIABLE(grub_relocator_xen_remap_start) LOCAL(base): /* mov imm64, %rax */ .byte 0x48 .byte 0xb8 VARIABLE(grub_relocator_xen_remapper_virt) .quad 0 movq %rax, %rdi movq %rax, %rbx /* mov imm64, %rax */ .byte 0x48 .byte 0xb8 VARIABLE(grub_relocator_xen_remapper_map) .quad 0 movq %rax, %rsi movq $2, %rdx movq $__HYPERVISOR_update_va_mapping, %rax syscall addq $(LOCAL(cont) - LOCAL(base)), %rbx jmp *%rbx LOCAL(cont): /* mov imm64, %rcx */ .byte 0x48 .byte 0xb9 VARIABLE(grub_relocator_xen_paging_size) .quad 0 /* mov imm64, %rax */ .byte 0x48 .byte 0xb8 VARIABLE(grub_relocator_xen_paging_start) .quad 0 movq %rax, %r12 /* mov imm64, %rax */ .byte 0x48 .byte 0xb8 VARIABLE(grub_relocator_xen_mfn_list) .quad 0 movq %rax, %rsi 1: movq %r12, %rdi movq %rsi, %rbx movq 0(%rsi), %rsi shlq $12, %rsi orq $5, %rsi movq $2, %rdx movq %rcx, %r9 movq $__HYPERVISOR_update_va_mapping, %rax syscall movq %r9, %rcx addq $8, %rbx addq $4096, %r12 movq %rbx, %rsi loop 1b leaq EXT_C(grub_relocator_xen_mmu_op) (%rip), %rdi movq $3, %rsi movq $0, %rdx movq $0x7FF0, %r10 movq $__HYPERVISOR_mmuext_op, %rax syscall /* mov imm64, %rax */ .byte 0x48 .byte 0xb8 VARIABLE(grub_relocator_xen_remap_continue) .quad 0 jmp *%rax VARIABLE(grub_relocator_xen_mmu_op) .space 256 VARIABLE(grub_relocator_xen_remap_end) VARIABLE(grub_relocator_xen_start) /* mov imm64, %rax */ .byte 0x48 .byte 0xb8 VARIABLE(grub_relocator_xen_remapper_virt2) .quad 0 movq %rax, %rdi xorq %rax, %rax movq %rax, %rsi movq $2, %rdx movq $__HYPERVISOR_update_va_mapping, %rax syscall /* mov imm64, %rax */ .byte 0x48 .byte 0xb8 VARIABLE(grub_relocator_xen_stack) .quad 0 movq %rax, %rsp /* mov imm64, %rax */ .byte 0x48 .byte 0xb8 VARIABLE(grub_relocator_xen_start_info) .quad 0 movq %rax, %rsi cld /* mov imm64, %rax */ .byte 0x48 .byte 0xb8 VARIABLE(grub_relocator_xen_entry_point) .quad 0 jmp *%rax VARIABLE(grub_relocator_xen_end)
al3xtjames/Clover
2,432
FileSystems/GrubFS/grub/grub-core/kern/i386/int.S
/* * GRUB -- GRand Unified Bootloader * Copyright (C) 2010,2011 Free Software Foundation, Inc. * * GRUB is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * GRUB is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GRUB. If not, see <http://www.gnu.org/licenses/>. */ FUNCTION(grub_bios_interrupt) pushf cli popf pushl %ebp pushl %ecx pushl %eax pushl %ebx pushl %esi pushl %edi pushl %edx movb %al, intno movl (%edx), %eax movl %eax, LOCAL(bios_register_eax) movw 4(%edx), %ax movw %ax, LOCAL(bios_register_es) movw 6(%edx), %ax movw %ax, LOCAL(bios_register_ds) movw 8(%edx), %ax movw %ax, LOCAL(bios_register_flags) movl 12(%edx), %ebx movl 16(%edx), %ecx movl 20(%edx), %edi movl 24(%edx), %esi movl 28(%edx), %edx PROT_TO_REAL .code16 pushf cli mov %ds, %ax push %ax /* movw imm16, %ax*/ .byte 0xb8 LOCAL(bios_register_es): .short 0 movw %ax, %es /* movw imm16, %ax*/ .byte 0xb8 LOCAL(bios_register_ds): .short 0 movw %ax, %ds /* movw imm16, %ax*/ .byte 0xb8 LOCAL(bios_register_flags): .short 0 push %ax popf /* movl imm32, %eax*/ .byte 0x66, 0xb8 LOCAL(bios_register_eax): .long 0 /* int imm8. */ .byte 0xcd intno: .byte 0 movl %eax, %cs:LOCAL(bios_register_eax) movw %ds, %ax movw %ax, %cs:LOCAL(bios_register_ds) pop %ax mov %ax, %ds pushf pop %ax movw %ax, LOCAL(bios_register_flags) mov %es, %ax movw %ax, LOCAL(bios_register_es) popf REAL_TO_PROT .code32 popl %eax movl %ebx, 12(%eax) movl %ecx, 16(%eax) movl %edi, 20(%eax) movl %esi, 24(%eax) movl %edx, 28(%eax) movl %eax, %edx movl LOCAL(bios_register_eax), %eax movl %eax, (%edx) movw LOCAL(bios_register_es), %ax movw %ax, 4(%edx) movw LOCAL(bios_register_ds), %ax movw %ax, 6(%edx) movw LOCAL(bios_register_flags), %ax movw %ax, 8(%edx) popl %edi popl %esi popl %ebx popl %eax popl %ecx popl %ebp ret
al3xtjames/Clover
7,039
FileSystems/GrubFS/grub/grub-core/kern/i386/realmode.S
/* * GRUB -- GRand Unified Bootloader * Copyright (C) 1999,2000,2001,2002,2003,2005,2006,2007,2009,2010 Free Software Foundation, Inc. * * GRUB is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * GRUB is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GRUB. If not, see <http://www.gnu.org/licenses/>. */ #include <grub/machine/memory.h> /* * Note: These functions defined in this file may be called from C. * Be careful of that you must not modify some registers. Quote * from gcc-2.95.2/gcc/config/i386/i386.h: 1 for registers not available across function calls. These must include the FIXED_REGISTERS and also any registers that can be used without being saved. The latter must include the registers where values are returned and the register where structure-value addresses are passed. Aside from that, you can include as many other registers as you like. ax,dx,cx,bx,si,di,bp,sp,st,st1,st2,st3,st4,st5,st6,st7,arg { 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 } */ /* * Note: GRUB is compiled with the options -mrtd and -mregparm=3. * So the first three arguments are passed in %eax, %edx, and %ecx, * respectively, and if a function has a fixed number of arguments * and the number if greater than three, the function must return * with "ret $N" where N is ((the number of arguments) - 3) * 4. */ /* * This is the area for all of the special variables. */ protstack: .long GRUB_MEMORY_MACHINE_PROT_STACK .macro PROT_TO_REAL call prot_to_real .endm .macro REAL_TO_PROT DATA32 call real_to_prot .endm /* * This is the Global Descriptor Table * * An entry, a "Segment Descriptor", looks like this: * * 31 24 19 16 7 0 * ------------------------------------------------------------ * | | |B| |A| | | |1|0|E|W|A| | * | BASE 31..24 |G|/|L|V| LIMIT |P|DPL| TYPE | BASE 23:16 | 4 * | | |D| |L| 19..16| | |1|1|C|R|A| | * ------------------------------------------------------------ * | | | * | BASE 15..0 | LIMIT 15..0 | 0 * | | | * ------------------------------------------------------------ * * Note the ordering of the data items is reversed from the above * description. */ .p2align 5 /* force 4-byte alignment */ gdt: .word 0, 0 .byte 0, 0, 0, 0 /* -- code segment -- * base = 0x00000000, limit = 0xFFFFF (4 KiB Granularity), present * type = 32bit code execute/read, DPL = 0 */ .word 0xFFFF, 0 .byte 0, 0x9A, 0xCF, 0 /* -- data segment -- * base = 0x00000000, limit 0xFFFFF (4 KiB Granularity), present * type = 32 bit data read/write, DPL = 0 */ .word 0xFFFF, 0 .byte 0, 0x92, 0xCF, 0 /* -- 16 bit real mode CS -- * base = 0x00000000, limit 0x0FFFF (1 B Granularity), present * type = 16 bit code execute/read only/conforming, DPL = 0 */ .word 0xFFFF, 0 .byte 0, 0x9E, 0, 0 /* -- 16 bit real mode DS -- * base = 0x00000000, limit 0x0FFFF (1 B Granularity), present * type = 16 bit data read/write, DPL = 0 */ .word 0xFFFF, 0 .byte 0, 0x92, 0, 0 .p2align 5 /* this is the GDT descriptor */ gdtdesc: .word 0x27 /* limit */ .long gdt /* addr */ LOCAL(realidt): .word 0x400 .long 0 protidt: .word 0 .long 0 /* * These next two routines, "real_to_prot" and "prot_to_real" are structured * in a very specific way. Be very careful when changing them. * * NOTE: Use of either one messes up %eax and %ebp. */ real_to_prot: .code16 cli /* load the GDT register */ xorw %ax, %ax movw %ax, %ds DATA32 ADDR32 lgdt gdtdesc /* turn on protected mode */ movl %cr0, %eax orl $GRUB_MEMORY_CPU_CR0_PE_ON, %eax movl %eax, %cr0 /* jump to relocation, flush prefetch queue, and reload %cs */ DATA32 ljmp $GRUB_MEMORY_MACHINE_PROT_MODE_CSEG, $protcseg .code32 protcseg: /* reload other segment registers */ movw $GRUB_MEMORY_MACHINE_PROT_MODE_DSEG, %ax movw %ax, %ds movw %ax, %es movw %ax, %fs movw %ax, %gs movw %ax, %ss /* put the return address in a known safe location */ movl (%esp), %eax movl %eax, GRUB_MEMORY_MACHINE_REAL_STACK /* get protected mode stack */ movl protstack, %eax movl %eax, %esp movl %eax, %ebp /* get return address onto the right stack */ movl GRUB_MEMORY_MACHINE_REAL_STACK, %eax movl %eax, (%esp) /* zero %eax */ xorl %eax, %eax sidt LOCAL(realidt) lidt protidt /* return on the old (or initialized) stack! */ ret /* * GRUB -- GRand Unified Bootloader * Copyright (C) 1999,2000,2001,2002,2003,2005,2006,2007,2009,2010 Free Software Foundation, Inc. * * GRUB is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * GRUB is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GRUB. If not, see <http://www.gnu.org/licenses/>. */ prot_to_real: /* just in case, set GDT */ lgdt gdtdesc sidt protidt lidt LOCAL(realidt) /* save the protected mode stack */ movl %esp, %eax movl %eax, protstack /* get the return address */ movl (%esp), %eax movl %eax, GRUB_MEMORY_MACHINE_REAL_STACK /* set up new stack */ movl $GRUB_MEMORY_MACHINE_REAL_STACK, %eax movl %eax, %esp movl %eax, %ebp /* set up segment limits */ movw $GRUB_MEMORY_MACHINE_PSEUDO_REAL_DSEG, %ax movw %ax, %ds movw %ax, %es movw %ax, %fs movw %ax, %gs movw %ax, %ss /* this might be an extra step */ /* jump to a 16 bit segment */ ljmp $GRUB_MEMORY_MACHINE_PSEUDO_REAL_CSEG, $tmpcseg tmpcseg: .code16 /* clear the PE bit of CR0 */ movl %cr0, %eax andl $(~GRUB_MEMORY_CPU_CR0_PE_ON), %eax movl %eax, %cr0 /* flush prefetch queue, reload %cs */ DATA32 ljmp $0, $realcseg realcseg: /* we are in real mode now * set up the real mode segment registers : DS, SS, ES */ /* zero %eax */ xorl %eax, %eax movw %ax, %ds movw %ax, %es movw %ax, %fs movw %ax, %gs movw %ax, %ss #ifdef GRUB_MACHINE_PCBIOS /* restore interrupts */ sti #endif /* return on new stack! */ DATA32 ret .code32
al3xtjames/Clover
2,045
FileSystems/GrubFS/grub/grub-core/kern/i386/coreboot/startup.S
/* * GRUB -- GRand Unified Bootloader * Copyright (C) 1999,2000,2001,2002,2003,2005,2006,2007,2008 Free Software Foundation, Inc. * * GRUB is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * GRUB is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GRUB. If not, see <http://www.gnu.org/licenses/>. */ #include <grub/symbol.h> #include <grub/machine/memory.h> #include <grub/cpu/linux.h> #include <grub/offsets.h> #include <multiboot.h> #include <multiboot2.h> /* * Note: GRUB is compiled with the options -mrtd and -mregparm=3. * So the first three arguments are passed in %eax, %edx, and %ecx, * respectively, and if a function has a fixed number of arguments * and the number if greater than three, the function must return * with "ret $N" where N is ((the number of arguments) - 3) * 4. */ .file "startup.S" .text .globl start, _start start: _start: #ifdef GRUB_MACHINE_MULTIBOOT cmpl $MULTIBOOT_BOOTLOADER_MAGIC, %eax jne 0f movl %ebx, EXT_C(startup_multiboot_info) 0: #endif /* initialize the stack */ movl $GRUB_MEMORY_MACHINE_PROT_STACK, %esp /* jump to the main body of C code */ jmp EXT_C(grub_main) /* * Support for booting GRUB from a Multiboot boot loader (e.g. GRUB itself). */ .p2align 2 /* force 4-byte alignment */ multiboot_header: /* magic */ .long 0x1BADB002 /* flags */ .long MULTIBOOT_MEMORY_INFO /* checksum */ .long -0x1BADB002 - MULTIBOOT_MEMORY_INFO /* * prot_to_real and associated structures (but NOT real_to_prot, that is * only needed for BIOS gates). */ #include "../realmode.S"
al3xtjames/Clover
1,092
FileSystems/GrubFS/grub/grub-core/kern/i386/xen/startup.S
/* startup.S - bootstrap GRUB itself */ /* * GRUB -- GRand Unified Bootloader * Copyright (C) 2013 Free Software Foundation, Inc. * * GRUB is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * GRUB is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GRUB. If not, see <http://www.gnu.org/licenses/>. */ #include <config.h> #include <grub/symbol.h> .file "startup.S" .text .globl start, _start .code32 start: _start: leal LOCAL(stack_end), %esp movl %esi, EXT_C(grub_xen_start_page_addr) call EXT_C(grub_main) /* Doesn't return. */ .bss .space (1 << 22) LOCAL(stack_end):
al3xtjames/Clover
1,200
FileSystems/GrubFS/grub/grub-core/kern/i386/xen/hypercall.S
/* hypercall.S - wrappers for Xen hypercalls */ /* * GRUB -- GRand Unified Bootloader * Copyright (C) 2013 Free Software Foundation, Inc. * * GRUB is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * GRUB is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GRUB. If not, see <http://www.gnu.org/licenses/>. */ #include <grub/symbol.h> #include <grub/xen.h> FUNCTION(grub_xen_hypercall) pushl %ebp movl %esp, %ebp pushl %esi pushl %edi pushl %ebx /* call number already in %eax. */ /* %edx -> %ebx*/ /* %ecx -> %ecx*/ movl %edx, %ebx movl 8(%ebp), %edx movl 12(%ebp), %esi movl 16(%ebp), %edi movl 20(%ebp), %ebp int $0x82 popl %ebx popl %edi popl %esi popl %ebp ret
al3xtjames/Clover
1,143
FileSystems/GrubFS/grub/grub-core/kern/i386/efi/startup.S
/* startup.S - bootstrap GRUB itself */ /* * GRUB -- GRand Unified Bootloader * Copyright (C) 2006,2007,2010 Free Software Foundation, Inc. * * GRUB is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * GRUB is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GRUB. If not, see <http://www.gnu.org/licenses/>. */ #include <config.h> #include <grub/symbol.h> .file "startup.S" .text .globl start, _start start: _start: /* * EFI_SYSTEM_TABLE * and EFI_HANDLE are passed on the stack. */ movl 4(%esp), %eax movl %eax, EXT_C(grub_efi_image_handle) movl 8(%esp), %eax movl %eax, EXT_C(grub_efi_system_table) call EXT_C(grub_main) ret
al3xtjames/Clover
1,396
FileSystems/GrubFS/grub/grub-core/kern/i386/ieee1275/startup.S
/* * GRUB -- GRand Unified Bootloader * Copyright (C) 1999,2000,2001,2002,2003,2005,2006,2007,2008 Free Software Foundation, Inc. * * GRUB is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * GRUB is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GRUB. If not, see <http://www.gnu.org/licenses/>. */ #include <grub/symbol.h> #include <grub/offsets.h> #include <grub/cpu/linux.h> #include <multiboot.h> #include <multiboot2.h> /* * Note: GRUB is compiled with the options -mrtd and -mregparm=3. * So the first three arguments are passed in %eax, %edx, and %ecx, * respectively, and if a function has a fixed number of arguments * and the number if greater than three, the function must return * with "ret $N" where N is ((the number of arguments) - 3) * 4. */ .file "startup.S" .text .globl start, _start start: _start: movl %eax, EXT_C(grub_ieee1275_entry_fn) jmp EXT_C(grub_main)
al3xtjames/Clover
1,781
FileSystems/GrubFS/grub/grub-core/kern/i386/qemu/startup.S
/* * GRUB -- GRand Unified Bootloader * Copyright (C) 1999,2000,2001,2002,2003,2005,2006,2007,2008,2009 Free Software Foundation, Inc. * * GRUB is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * GRUB is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GRUB. If not, see <http://www.gnu.org/licenses/>. */ #include <config.h> #include <grub/symbol.h> #include <grub/machine/memory.h> #include <grub/machine/kernel.h> .text .code32 .globl _start _start: jmp codestart . = _start + GRUB_KERNEL_I386_QEMU_CORE_ENTRY_ADDR VARIABLE(grub_core_entry_addr) .long 0 codestart: /* Relocate to low memory. First we figure out our location. We will derive the rom start address from it. */ call 1f 1: popl %esi /* Rom size is a multiple of 64 kiB. With this we get the value of `grub_core_entry_addr' in %esi. */ xorw %si, %si movl $(_edata - _start), %ecx movl $_start, %edi cld rep movsb ljmp $GRUB_MEMORY_MACHINE_PROT_MODE_CSEG, $1f 1: /* clean out the bss */ movl $BSS_START_SYMBOL, %edi /* compute the bss length */ movl $END_SYMBOL, %ecx subl %edi, %ecx /* clean out */ xorl %eax, %eax cld rep stosb /* * Call the start of main body of C code. */ call EXT_C(grub_main) /* This should never happen. */ cli 1: hlt jmp 1b #include "../realmode.S"
al3xtjames/Clover
4,743
FileSystems/GrubFS/grub/grub-core/kern/i386/pc/startup.S
/* * GRUB -- GRand Unified Bootloader * Copyright (C) 1999,2000,2001,2002,2003,2005,2006,2007,2008,2009,2011 Free Software Foundation, Inc. * * GRUB is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * GRUB is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GRUB. If not, see <http://www.gnu.org/licenses/>. */ /* * Note: These functions defined in this file may be called from C. * Be careful of that you must not modify some registers. Quote * from gcc-2.95.2/gcc/config/i386/i386.h: 1 for registers not available across function calls. These must include the FIXED_REGISTERS and also any registers that can be used without being saved. The latter must include the registers where values are returned and the register where structure-value addresses are passed. Aside from that, you can include as many other registers as you like. ax,dx,cx,bx,si,di,bp,sp,st,st1,st2,st3,st4,st5,st6,st7,arg { 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 } */ /* * Note: GRUB is compiled with the options -mrtd and -mregparm=3. * So the first three arguments are passed in %eax, %edx, and %ecx, * respectively, and if a function has a fixed number of arguments * and the number is greater than three, the function must return * with "ret $N" where N is ((the number of arguments) - 3) * 4. */ #include <config.h> #include <grub/symbol.h> #include <multiboot.h> #ifdef __APPLE__ #include <grub/i386/pc/memory.h> #endif .file "startup.S" .text .globl start, _start, __start start: _start: __start: #ifdef __APPLE__ LOCAL(start): #endif .code32 movl %ecx, (LOCAL(real_to_prot_addr) - _start) (%esi) movl %edi, (LOCAL(prot_to_real_addr) - _start) (%esi) movl %eax, (EXT_C(grub_realidt) - _start) (%esi) /* copy back the decompressed part (except the modules) */ #ifdef __APPLE__ movl $EXT_C(_edata), %ecx subl $LOCAL(start), %ecx #else movl $(_edata - _start), %ecx #endif movl $(_start), %edi rep movsb movl $LOCAL (cont), %esi jmp *%esi LOCAL(cont): #if 0 /* copy modules before cleaning out the bss */ movl EXT_C(grub_total_module_size), %ecx movl EXT_C(grub_kernel_image_size), %esi addl %ecx, %esi addl $_start, %esi decl %esi movl $END_SYMBOL, %edi addl %ecx, %edi decl %edi std rep movsb #endif #ifdef __APPLE__ /* clean out the bss */ movl $EXT_C(_edata), %edi /* compute the bss length */ movl $GRUB_MEMORY_MACHINE_SCRATCH_ADDR, %ecx #else /* clean out the bss */ movl $BSS_START_SYMBOL, %edi /* compute the bss length */ movl $END_SYMBOL, %ecx #endif subl %edi, %ecx /* clean out */ xorl %eax, %eax cld rep stosb movl %edx, EXT_C(grub_boot_device) /* * Call the start of main body of C code. */ call EXT_C(grub_main) LOCAL(real_to_prot_addr): .long 0 LOCAL(prot_to_real_addr): .long 0 .macro PROT_TO_REAL movl LOCAL(prot_to_real_addr), %eax call *%eax .endm .macro REAL_TO_PROT movl LOCAL(real_to_prot_addr), %eax DATA32 call *%ax .endm /* * grub_exit() * * Exit the system. */ FUNCTION(grub_exit) PROT_TO_REAL .code16 /* Tell the BIOS a boot failure. If this does not work, reboot. */ int $0x18 /* set 0x472 to 0x0000 for cold boot (0x1234 for warm boot) */ xorw %ax, %ax movw $0x0472, %di movw %ax, (%di) ljmp $0xf000, $0xfff0 .code32 /* * int grub_pxe_call (int func, void* data, grub_uint32_t pxe_rm_entry); */ FUNCTION(grub_pxe_call) pushl %ebp movl %esp, %ebp pushl %esi pushl %edi pushl %ebx movl %ecx, %ebx movl %eax, %ecx movl %edx, %eax andl $0xF, %eax shrl $4, %edx shll $16, %edx addl %eax, %edx PROT_TO_REAL .code16 pushl %ebx pushl %edx pushw %cx movw %sp, %bx lcall *%ss:6(%bx) cld addw $10, %sp movw %ax, %cx REAL_TO_PROT .code32 movzwl %cx, %eax popl %ebx popl %edi popl %esi popl %ebp ret #include "../int.S" VARIABLE(grub_realidt) .long 0 #ifdef __APPLE__ /* Older versions of objconv assume that there is the same number of text and data sections. Hence this dummy. */ .section __TEXT, __zz_dummy .byte 0 .globl EXT_C(_edata) .globl EXT_C(grub_boot_device) .zerofill __DATA, __aa_before_bss, EXT_C(_edata), 1, 0 .zerofill __DATA, __bss, EXT_C(grub_boot_device), 4, 2 #else .bss VARIABLE(grub_boot_device) .long 0 #endif
al3xtjames/Clover
1,116
FileSystems/GrubFS/grub/grub-core/kern/x86_64/xen/startup.S
/* startup.S - bootstrap GRUB itself */ /* * GRUB -- GRand Unified Bootloader * Copyright (C) 2013 Free Software Foundation, Inc. * * GRUB is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * GRUB is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GRUB. If not, see <http://www.gnu.org/licenses/>. */ #include <config.h> #include <grub/symbol.h> .file "startup.S" .text .globl start, _start .code64 start: _start: leaq LOCAL(stack_end), %rsp movq %rsi, EXT_C(grub_xen_start_page_addr)(%rip) andq $~0xf, %rsp call EXT_C(grub_main) /* Doesn't return. */ .bss .space (1 << 22) LOCAL(stack_end):
al3xtjames/Clover
1,386
FileSystems/GrubFS/grub/grub-core/kern/x86_64/xen/hypercall.S
/* hypercall.S - wrappers for Xen hypercalls */ /* * GRUB -- GRand Unified Bootloader * Copyright (C) 2011 Free Software Foundation, Inc. * * GRUB is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * GRUB is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GRUB. If not, see <http://www.gnu.org/licenses/>. */ #include <grub/symbol.h> #include <grub/xen.h> FUNCTION(grub_xen_sched_op) movq $__HYPERVISOR_sched_op, %rax syscall ret FUNCTION(grub_xen_event_channel_op) movq $__HYPERVISOR_event_channel_op, %rax syscall ret FUNCTION(grub_xen_update_va_mapping) movq $__HYPERVISOR_update_va_mapping, %rax syscall ret FUNCTION(grub_xen_mmuext_op) movq %rcx, %r10 movq $__HYPERVISOR_mmuext_op, %rax syscall ret FUNCTION(grub_xen_grant_table_op) movq $__HYPERVISOR_grant_table_op, %rax syscall ret FUNCTION(grub_xen_mmu_update) movq %rcx, %r10 movq $__HYPERVISOR_mmu_update, %rax syscall ret
al3xtjames/Clover
1,097
FileSystems/GrubFS/grub/grub-core/kern/x86_64/efi/startup.S
/* startup.S - bootstrap GRUB itself */ /* * GRUB -- GRand Unified Bootloader * Copyright (C) 2006,2007,2009 Free Software Foundation, Inc. * * GRUB is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * GRUB is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GRUB. If not, see <http://www.gnu.org/licenses/>. */ #include <config.h> #include <grub/symbol.h> .file "startup.S" .text .globl start, _start .code64 start: _start: movq %rcx, EXT_C(grub_efi_image_handle)(%rip) movq %rdx, EXT_C(grub_efi_system_table)(%rip) andq $~0xf, %rsp call EXT_C(grub_main) /* Doesn't return. */
al3xtjames/Clover
2,535
FileSystems/GrubFS/grub/grub-core/kern/x86_64/efi/callwrap.S
/* callwrap.S - wrapper for x86_64 efi calls */ /* * GRUB -- GRand Unified Bootloader * Copyright (C) 2006,2007,2009 Free Software Foundation, Inc. * * GRUB is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * GRUB is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GRUB. If not, see <http://www.gnu.org/licenses/>. */ #include <config.h> #include <grub/symbol.h> /* * x86_64 uses registry to pass parameters. Unfortunately, gcc and efi use * different call conversion, so we need to do some conversion. * * gcc: * %rdi, %rsi, %rdx, %rcx, %r8, %r9, 8(%rsp), 16(%rsp), ... * * efi: * %rcx, %rdx, %r8, %r9, 32(%rsp), 40(%rsp), 48(%rsp), ... * */ .file "callwrap.S" .text FUNCTION(efi_wrap_0) subq $40, %rsp call *%rdi addq $40, %rsp ret FUNCTION(efi_wrap_1) subq $40, %rsp mov %rsi, %rcx call *%rdi addq $40, %rsp ret FUNCTION(efi_wrap_2) subq $40, %rsp mov %rsi, %rcx call *%rdi addq $40, %rsp ret FUNCTION(efi_wrap_3) subq $40, %rsp mov %rcx, %r8 mov %rsi, %rcx call *%rdi addq $40, %rsp ret FUNCTION(efi_wrap_4) subq $40, %rsp mov %r8, %r9 mov %rcx, %r8 mov %rsi, %rcx call *%rdi addq $40, %rsp ret FUNCTION(efi_wrap_5) subq $40, %rsp mov %r9, 32(%rsp) mov %r8, %r9 mov %rcx, %r8 mov %rsi, %rcx call *%rdi addq $40, %rsp ret FUNCTION(efi_wrap_6) subq $56, %rsp mov 56+8(%rsp), %rax mov %rax, 40(%rsp) mov %r9, 32(%rsp) mov %r8, %r9 mov %rcx, %r8 mov %rsi, %rcx call *%rdi addq $56, %rsp ret FUNCTION(efi_wrap_7) subq $88, %rsp mov 88+16(%rsp), %rax mov %rax, 48(%rsp) mov 88+8(%rsp), %rax mov %rax, 40(%rsp) mov %r9, 32(%rsp) mov %r8, %r9 mov %rcx, %r8 mov %rsi, %rcx call *%rdi addq $88, %rsp ret FUNCTION(efi_wrap_10) subq $88, %rsp mov 88+40(%rsp), %rax mov %rax, 72(%rsp) mov 88+32(%rsp), %rax mov %rax, 64(%rsp) mov 88+24(%rsp), %rax mov %rax, 56(%rsp) mov 88+16(%rsp), %rax mov %rax, 48(%rsp) mov 88+8(%rsp), %rax mov %rax, 40(%rsp) mov %r9, 32(%rsp) mov %r8, %r9 mov %rcx, %r8 mov %rsi, %rcx call *%rdi addq $88, %rsp ret
al3xtjames/Clover
1,822
Patches_for_EDK2/OvmfPkg/CsmOld/LegacyBiosDxe/IA32/InterruptTable.S
## @file # Interrupt Redirection Template # # Copyright (c) 2006, Intel Corporation. All rights reserved.<BR> # # This program and the accompanying materials # are licensed and made available under the terms and conditions # of the BSD License which accompanies this distribution. The # full text of the license may be found at # http://opensource.org/licenses/bsd-license.php # # THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, # WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. # ## ASM_GLOBAL ASM_PFX(InterruptRedirectionTemplate) #---------------------------------------------------------------------------- # Procedure: InterruptRedirectionTemplate: Redirects interrupts 0x68-0x6F # # Input: None # # Output: None # # Prototype: VOID # InterruptRedirectionTemplate ( # VOID # ); # # Saves: None # # Modified: None # # Description: Contains the code that is copied into low memory (below 640K). # This code reflects interrupts 0x68-0x6f to interrupts 0x08-0x0f. # This template must be copied into low memory, and the IDT entries # 0x68-0x6F must be point to the low memory copy of this code. Each # entry is 4 bytes long, so IDT entries 0x68-0x6F can be easily # computed. # #---------------------------------------------------------------------------- ASM_PFX(InterruptRedirectionTemplate): int $0x8 .byte 0xcf nop int $0x9 .byte 0xcf nop int $0xa .byte 0xcf nop int $0xb .byte 0xcf nop int $0xc .byte 0xcf nop int $0xd .byte 0xcf nop int $0xe .byte 0xcf nop int $0xf .byte 0xcf nop
al3xtjames/Clover
2,008
Patches_for_EDK2/OvmfPkg/CsmOld/LegacyBiosDxe/X64/InterruptTable.S
## @file # Interrupt Redirection Template # # Copyright (c) 2006, Intel Corporation. All rights reserved.<BR> # # This program and the accompanying materials # are licensed and made available under the terms and conditions # of the BSD License which accompanies this distribution. The # full text of the license may be found at # http://opensource.org/licenses/bsd-license.php # # THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, # WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. # ## #text SEGMENT #---------------------------------------------------------------------------- # Procedure: InterruptRedirectionTemplate: Redirects interrupts 0x68-0x6F # # Input: None # # Output: None # # Prototype: VOID # InterruptRedirectionTemplate ( # VOID # ); # # Saves: None # # Modified: None # # Description: Contains the code that is copied into low memory (below 640K). # This code reflects interrupts 0x68-0x6f to interrupts 0x08-0x0f. # This template must be copied into low memory, and the IDT entries # 0x68-0x6F must be point to the low memory copy of this code. Each # entry is 4 bytes long, so IDT entries 0x68-0x6F can be easily # computed. # #---------------------------------------------------------------------------- ASM_GLOBAL ASM_PFX(InterruptRedirectionTemplate) ASM_PFX(InterruptRedirectionTemplate): int $0x08 .byte 0x0cf # IRET nop int $0x09 .byte 0x0cf # IRET nop int $0x0a .byte 0x0cf # IRET nop int $0x0b .byte 0x0cf # IRET nop int $0x0c .byte 0x0cf # IRET nop int $0x0d .byte 0x0cf # IRET nop int $0x0e .byte 0x0cf # IRET nop int $0x0f .byte 0x0cf # IRET nop #END
al3xtjames/Clover
21,906
Patches_for_EDK2/OvmfPkg/CsmOld/LegacyBiosDxe/Ipf/IpfThunk.s
//// @file // // Copyright (c) 1999 - 2008, Intel Corporation. All rights reserved.<BR> // // This program and the accompanying materials // are licensed and made available under the terms and conditions // of the BSD License which accompanies this distribution. The // full text of the license may be found at // http://opensource.org/licenses/bsd-license.php // // THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, // WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. // //// .file "IpfThunk.s" #include "IpfMacro.i" #include "Ipf/IpfThunk.i" .align 0x10 //----------------------------------------------------------------------------- //++ // EfiIaEntryPoint // // Register physical address of Esal Data Area // // On Entry : // in1 = ptr to legacy bios reg // in2 = ptr to Call Stack // in3 = Call Stack Size // // Return Value: // r8 = SAL_SUCCESS // // As per static calling conventions. // //-- //--------------------------------------------------------------------------- PROCEDURE_ENTRY(EfiIaEntryPoint) alloc loc0 = 8,10,8,0;; mov out0 = r0;; mov out1 = r0;; mov out2 = r0;; mov out3 = r0;; mov out4 = r0;; mov out5 = r0;; mov out6 = r0;; mov out7 = r0;; mov loc1 = b0;; // save efi (b0) mov loc2 = psr;; // save efi (PSR) mov loc3 = gp;; // save efi (GP) mov loc4 = pr;; // save efi (PR) mov loc5 = sp;; // save efi (SP) mov loc6 = r13;; // save efi (TP) mov loc7 = ar.lc;; // save efi (LC) mov loc8 = ar.fpsr;; // save efi (FPSR) mov r8 = r0;; // return status mov r9 = r0;; // return value mov r10 = r0;; // return value mov r11 = r0;; // return value bios_int_func:: rsm 0x4000;; // i(14)=0, disable interrupt srlz.d;; srlz.i;; //---------------------// // save fp registers // //---------------------// dep sp = 0,sp,0,4;; // align 16 add sp = -16,sp;; // post decrement int_ip_1x:: mov r2 = ip;; add r2 = (int_ip_1y - int_ip_1x),r2;; mov b7 = r2;; br save_fp_registers;; int_ip_1y:: add sp = 16,sp;; // adjust (SP) mov loc9 = sp;; // save (SP) adds sp = 0x10,in1;; // in1 + 0x10 = SP ld4 sp = [sp];; // SP adds r17 = 0x32,in1;; // in1 + 0x32 = SS ld2 r17 = [r17];; // SS movl r2 = 0xffffffff;; // if no SS:SP, then define new SS:SP cmp.ne p6,p0 = sp,r2;; movl r2 = 0xffff;; cmp.ne.or p6,p0 = r17,r2;; (p6) br.sptk bif_1;; mov sp = in3;; // 16-bit stack pointer mov r2 = psr;; tbit.z p6,p7 = r2,17;; // psr.dt (Physical OR Virtual) bif_ip1x:: mov r2 = in2;; // ia32 callback stack top mov r3 = in3;; // 16-bit stack pointer sub r2 = r2,r3;; shr.u r17 = r2,4;; // 16-bit stack segment bif_1:: extr.u sp = sp,0,16;; // SP (16-bit sp for legacy code) dep sp = 0,sp,0,3;; // align 8 cmp.eq p6,p0 = 0,sp;; // if SP=0000 then wrap to 0x10000 (p6) dep sp = -1,sp,16,1;; shladd r2 = r17,4,sp;; // ESP = SS<<4+SP add r2 = -8,r2;; // post decrement 64 bit pointer add sp = -8,sp;; // post decrement SP sale_ip1x:: mov r18 = ip;; adds r18 = (sale_ip1y - sale_ip1x),r18;; sub r18 = r18,r2;; // return address - CS base add r18 = r18,sp;; // adjustment for stack shl r18 = r18,32;; movl r19 = 0xb80f66fa;; // CLI, JMPE xxxxxxxx or r18 = r18,r19;; st8 [r2] = r18;; // (FA,66,0F,B8,xx,xx,xx,xx) cmp.eq p6,p0 = 0,sp;; // if SP=0000 then wrap to 0x10000 (p6) dep sp = -1,sp,16,1;; shladd r2 = r17,4,sp;; // ESP=SS<<4+SP add r2 = -2,r2;; // post decrement 64 bit pointer add sp = -2,sp;; // post decrement SP movl r18 = 0x8000000000000100;; // CALL FAR function cmp.eq p6,p7 = in0,r18;; (p6) add r19 = 0x28,in1;; // in1 + 0x28 = CS (p6) ld2 r18 = [r19],-4;; // CS (p6) st2 [r2] = r18,-2;; // in1 + 0x24 = EIP (p6) ld2 r18 = [r19];; // EIP (p6) st2 [r2] = r18,-2;; // (p6) movl r18 = 0x9a90;; // nop, CALLFAR xxxx:yyyy (p7) movl r18 = 0xcd;; // INT xx (p7) dep r18 = in0,r18,8,8;; st2 [r2] = r18;; // (CD,xx) mov r18 = r2;; // EIP for legacy execution //------------------------------// // flush 32 bytes legacy code // //------------------------------// dep r2 = 0,r2,0,5;; // align to 32 fc r2;; sync.i;; srlz.i;; srlz.d;; //------------------------------// // load legacy registers // //------------------------------// mov r2 = in1;; // IA32 BIOS register state ld4 r8 = [r2],4;; // in1 + 0 = EAX ld4 r9 = [r2],4;; // in1 + 4 = ECX ld4 r10 = [r2],4;; // in1 + 8 = EDX ld4 r11 = [r2],4;; // in1 + 12 = EBX add r2 = 4,r2;; // in1 + 16 = ESP (skip) ld4 r13 = [r2],4;; // in1 + 20 = EBP ld4 r14 = [r2],4;; // in1 + 24 = ESI ld4 r15 = [r2],4;; // in1 + 28 = EDI ld4 r3 = [r2],4;; // in1 + 32 = EFLAGS mov ar.eflag = r3;; add r2 = 4,r2;; // in1 + 36 = EIP (skip) add r2 = 2,r2;; // in1 + 40 = CS (skip) ld2 r16 = [r2],2;; // in1 + 42 = DS, (r16 = GS,FS,ES,DS) movl r27 = 0xc93fffff00000000;; dep r27 = r16,r27,4,16;; // r27 = DSD ld2 r19 = [r2],2;; // in1 + 44 = ES dep r16 = r19,r16,16,16;; movl r24 = 0xc93fffff00000000;; dep r24 = r19,r24,4,16;; // r24 = ESD ld2 r19 = [r2],2;; // in1 + 46 = FS dep r16 = r19,r16,32,16;; movl r28 = 0xc93fffff00000000;; dep r28 = r19,r28,4,16;; // r28 = FSD ld2 r19 = [r2],2;; // in1 + 48 = GS dep r16 = r19,r16,48,16;; movl r29 = 0xc93fffff00000000;; dep r29 = r19,r29,4,16;; // r29 = GSD mov r30 = r0;; // r30 = LDTD, clear NaT mov r31 = r0;; // r31 = GDTD, clear NaT dep r17 = r17,r17,16,16;; // CS = SS, (r17 = TSS,LDT,SS,CS) movl r3 = 0x0930ffff00000000;; dep r3 = r17,r3,4,16;; mov ar.csd = r3;; // ar25 = CSD mov ar.ssd = r3;; // ar26 = SSD //------------------------------// // give control to INT function // //------------------------------// br.call.sptk b0 = execute_int_function;; //------------------------------// // store legacy registers // //------------------------------// mov r2 = in1;; st4 [r2] = r8,4;; // EAX st4 [r2] = r9,4;; // ECX st4 [r2] = r10,4;; // EDX st4 [r2] = r11,4;; // EBX add r2 = 4,r2;; // ESP (skip) st4 [r2] = r13,4;; // EBP st4 [r2] = r14,4;; // ESI st4 [r2] = r15,4;; // EDI mov r3 = ar.eflag;; st4 [r2] = r3,4;; // EFLAGS add r2 = 4,r2;; // EIP (skip) add r2 = 2,r2;; // CS (skip) st2 [r2] = r16,2;; // DS, (r16 = GS,FS,ES,DS) extr.u r3 = r16,16,16;; st2 [r2] = r3,2;; // ES extr.u r3 = r16,32,16;; st2 [r2] = r3,2;; // FS extr.u r3 = r16,48,16;; st2 [r2] = r3,2;; // GS //------------------------------// // restore fp registers // //------------------------------// mov sp = loc9;; // restore (SP) int_ip_2x:: mov r2 = ip;; add r2 = (int_ip_2y - int_ip_2x),r2;; mov b7 = r2;; br restore_fp_registers;; int_ip_2y:: mov r8 = r0;; // return status mov r9 = r0;; // return value mov r10 = r0;; // return value mov r11 = r0;; // return value mov ar.fpsr = loc8;; // restore efi (FPSR) mov ar.lc = loc7;; // restore efi (LC) mov r13 = loc6;; // restore efi (TP) mov sp = loc5;; // restore efi (SP) mov pr = loc4;; // restore efi (PR) mov gp = loc3;; // restore efi (GP) mov psr.l = loc2;; // restore efi (PSR) srlz.d;; srlz.i;; mov b0 = loc1;; // restore efi (b0) mov ar.pfs = loc0;; br.ret.sptk b0;; // return to efi PROCEDURE_EXIT (EfiIaEntryPoint) //==============================// // EXECUTE_INT_FUNCTION // //==============================// // switch to virtual address // //------------------------------// execute_int_function:: alloc r2 = 0,0,0,0;; // cfm.sof=0 flushrs;; rsm 0x2000;; // ic(13)=0 for control register programming srlz.d;; srlz.i;; mov r2 = psr;; dep r2 = -1,r2,34,1;; // set is(34) dep r2 = -1,r2,44,1;; // set bn(44) dep r2 = -1,r2,36,1;; // set it(36) dep r2 = -1,r2,27,1;; // set rt(27) dep r2 = -1,r2,17,1;; // set dt(17) dep r2 = 0,r2,3,1;; // reset ac(3) dep r2 = -1,r2,13,1;; // set ic(13) mov cr.ipsr = r2;; mov cr.ifs = r0;; // clear interruption function state register mov cr.iip = r18;; rfi;; // go to legacy code execution //------------------------------// // back from legacy code // //------------------------------// // switch to physical address // //------------------------------// sale_ip1y:: rsm 0x6000;; // i(14)=0,ic(13)=0 for control reg programming srlz.d;; srlz.i;; mov r2 = psr;; dep r2 = -1,r2,44,1;; // set bn(44) dep r2 = 0,r2,36,1;; // reset it(36) dep r2 = 0,r2,27,1;; // reset rt(27) dep r2 = 0,r2,17,1;; // reset dt(17) dep r2 = -1,r2,13,1;; // set ic(13) mov cr.ipsr = r2;; sale_ip2x:: mov r2 = ip;; add r2 = (sale_ip2y - sale_ip2x),r2;; mov cr.ifs = r0;; // clear interruption function state register mov cr.iip = r2;; rfi;; sale_ip2y:: br.ret.sptk b0;; // return to SAL //------------------------------// // store fp registers // //------------------------------// save_fp_registers:: stf.spill [sp]=f2,-16;; stf.spill [sp]=f3,-16;; stf.spill [sp]=f4,-16;; stf.spill [sp]=f5,-16;; stf.spill [sp]=f6,-16;; stf.spill [sp]=f7,-16;; stf.spill [sp]=f8,-16;; stf.spill [sp]=f9,-16;; stf.spill [sp]=f10,-16;; stf.spill [sp]=f11,-16;; stf.spill [sp]=f12,-16;; stf.spill [sp]=f13,-16;; stf.spill [sp]=f14,-16;; stf.spill [sp]=f15,-16;; stf.spill [sp]=f16,-16;; stf.spill [sp]=f17,-16;; stf.spill [sp]=f18,-16;; stf.spill [sp]=f19,-16;; stf.spill [sp]=f20,-16;; stf.spill [sp]=f21,-16;; stf.spill [sp]=f22,-16;; stf.spill [sp]=f23,-16;; stf.spill [sp]=f24,-16;; stf.spill [sp]=f25,-16;; stf.spill [sp]=f26,-16;; stf.spill [sp]=f27,-16;; stf.spill [sp]=f28,-16;; stf.spill [sp]=f29,-16;; stf.spill [sp]=f30,-16;; stf.spill [sp]=f31,-16;; stf.spill [sp]=f32,-16;; stf.spill [sp]=f33,-16;; stf.spill [sp]=f34,-16;; stf.spill [sp]=f35,-16;; stf.spill [sp]=f36,-16;; stf.spill [sp]=f37,-16;; stf.spill [sp]=f38,-16;; stf.spill [sp]=f39,-16;; stf.spill [sp]=f40,-16;; stf.spill [sp]=f41,-16;; stf.spill [sp]=f42,-16;; stf.spill [sp]=f43,-16;; stf.spill [sp]=f44,-16;; stf.spill [sp]=f45,-16;; stf.spill [sp]=f46,-16;; stf.spill [sp]=f47,-16;; stf.spill [sp]=f48,-16;; stf.spill [sp]=f49,-16;; stf.spill [sp]=f50,-16;; stf.spill [sp]=f51,-16;; stf.spill [sp]=f52,-16;; stf.spill [sp]=f53,-16;; stf.spill [sp]=f54,-16;; stf.spill [sp]=f55,-16;; stf.spill [sp]=f56,-16;; stf.spill [sp]=f57,-16;; stf.spill [sp]=f58,-16;; stf.spill [sp]=f59,-16;; stf.spill [sp]=f60,-16;; stf.spill [sp]=f61,-16;; stf.spill [sp]=f62,-16;; stf.spill [sp]=f63,-16;; stf.spill [sp]=f64,-16;; stf.spill [sp]=f65,-16;; stf.spill [sp]=f66,-16;; stf.spill [sp]=f67,-16;; stf.spill [sp]=f68,-16;; stf.spill [sp]=f69,-16;; stf.spill [sp]=f70,-16;; stf.spill [sp]=f71,-16;; stf.spill [sp]=f72,-16;; stf.spill [sp]=f73,-16;; stf.spill [sp]=f74,-16;; stf.spill [sp]=f75,-16;; stf.spill [sp]=f76,-16;; stf.spill [sp]=f77,-16;; stf.spill [sp]=f78,-16;; stf.spill [sp]=f79,-16;; stf.spill [sp]=f80,-16;; stf.spill [sp]=f81,-16;; stf.spill [sp]=f82,-16;; stf.spill [sp]=f83,-16;; stf.spill [sp]=f84,-16;; stf.spill [sp]=f85,-16;; stf.spill [sp]=f86,-16;; stf.spill [sp]=f87,-16;; stf.spill [sp]=f88,-16;; stf.spill [sp]=f89,-16;; stf.spill [sp]=f90,-16;; stf.spill [sp]=f91,-16;; stf.spill [sp]=f92,-16;; stf.spill [sp]=f93,-16;; stf.spill [sp]=f94,-16;; stf.spill [sp]=f95,-16;; stf.spill [sp]=f96,-16;; stf.spill [sp]=f97,-16;; stf.spill [sp]=f98,-16;; stf.spill [sp]=f99,-16;; stf.spill [sp]=f100,-16;;stf.spill [sp]=f101,-16;;stf.spill [sp]=f102,-16;;stf.spill [sp]=f103,-16;; stf.spill [sp]=f104,-16;;stf.spill [sp]=f105,-16;;stf.spill [sp]=f106,-16;;stf.spill [sp]=f107,-16;; stf.spill [sp]=f108,-16;;stf.spill [sp]=f109,-16;;stf.spill [sp]=f110,-16;;stf.spill [sp]=f111,-16;; stf.spill [sp]=f112,-16;;stf.spill [sp]=f113,-16;;stf.spill [sp]=f114,-16;;stf.spill [sp]=f115,-16;; stf.spill [sp]=f116,-16;;stf.spill [sp]=f117,-16;;stf.spill [sp]=f118,-16;;stf.spill [sp]=f119,-16;; stf.spill [sp]=f120,-16;;stf.spill [sp]=f121,-16;;stf.spill [sp]=f122,-16;;stf.spill [sp]=f123,-16;; stf.spill [sp]=f124,-16;;stf.spill [sp]=f125,-16;;stf.spill [sp]=f126,-16;;stf.spill [sp]=f127,-16;; invala;; br b7;; //------------------------------// // restore fp registers // //------------------------------// restore_fp_registers:: ldf.fill f127=[sp],16;;ldf.fill f126=[sp],16;;ldf.fill f125=[sp],16;;ldf.fill f124=[sp],16;; ldf.fill f123=[sp],16;;ldf.fill f122=[sp],16;;ldf.fill f121=[sp],16;;ldf.fill f120=[sp],16;; ldf.fill f119=[sp],16;;ldf.fill f118=[sp],16;;ldf.fill f117=[sp],16;;ldf.fill f116=[sp],16;; ldf.fill f115=[sp],16;;ldf.fill f114=[sp],16;;ldf.fill f113=[sp],16;;ldf.fill f112=[sp],16;; ldf.fill f111=[sp],16;;ldf.fill f110=[sp],16;;ldf.fill f109=[sp],16;;ldf.fill f108=[sp],16;; ldf.fill f107=[sp],16;;ldf.fill f106=[sp],16;;ldf.fill f105=[sp],16;;ldf.fill f104=[sp],16;; ldf.fill f103=[sp],16;;ldf.fill f102=[sp],16;;ldf.fill f101=[sp],16;;ldf.fill f100=[sp],16;; ldf.fill f99=[sp],16;; ldf.fill f98=[sp],16;; ldf.fill f97=[sp],16;; ldf.fill f96=[sp],16;; ldf.fill f95=[sp],16;; ldf.fill f94=[sp],16;; ldf.fill f93=[sp],16;; ldf.fill f92=[sp],16;; ldf.fill f91=[sp],16;; ldf.fill f90=[sp],16;; ldf.fill f89=[sp],16;; ldf.fill f88=[sp],16;; ldf.fill f87=[sp],16;; ldf.fill f86=[sp],16;; ldf.fill f85=[sp],16;; ldf.fill f84=[sp],16;; ldf.fill f83=[sp],16;; ldf.fill f82=[sp],16;; ldf.fill f81=[sp],16;; ldf.fill f80=[sp],16;; ldf.fill f79=[sp],16;; ldf.fill f78=[sp],16;; ldf.fill f77=[sp],16;; ldf.fill f76=[sp],16;; ldf.fill f75=[sp],16;; ldf.fill f74=[sp],16;; ldf.fill f73=[sp],16;; ldf.fill f72=[sp],16;; ldf.fill f71=[sp],16;; ldf.fill f70=[sp],16;; ldf.fill f69=[sp],16;; ldf.fill f68=[sp],16;; ldf.fill f67=[sp],16;; ldf.fill f66=[sp],16;; ldf.fill f65=[sp],16;; ldf.fill f64=[sp],16;; ldf.fill f63=[sp],16;; ldf.fill f62=[sp],16;; ldf.fill f61=[sp],16;; ldf.fill f60=[sp],16;; ldf.fill f59=[sp],16;; ldf.fill f58=[sp],16;; ldf.fill f57=[sp],16;; ldf.fill f56=[sp],16;; ldf.fill f55=[sp],16;; ldf.fill f54=[sp],16;; ldf.fill f53=[sp],16;; ldf.fill f52=[sp],16;; ldf.fill f51=[sp],16;; ldf.fill f50=[sp],16;; ldf.fill f49=[sp],16;; ldf.fill f48=[sp],16;; ldf.fill f47=[sp],16;; ldf.fill f46=[sp],16;; ldf.fill f45=[sp],16;; ldf.fill f44=[sp],16;; ldf.fill f43=[sp],16;; ldf.fill f42=[sp],16;; ldf.fill f41=[sp],16;; ldf.fill f40=[sp],16;; ldf.fill f39=[sp],16;; ldf.fill f38=[sp],16;; ldf.fill f37=[sp],16;; ldf.fill f36=[sp],16;; ldf.fill f35=[sp],16;; ldf.fill f34=[sp],16;; ldf.fill f33=[sp],16;; ldf.fill f32=[sp],16;; ldf.fill f31=[sp],16;; ldf.fill f30=[sp],16;; ldf.fill f29=[sp],16;; ldf.fill f28=[sp],16;; ldf.fill f27=[sp],16;; ldf.fill f26=[sp],16;; ldf.fill f25=[sp],16;; ldf.fill f24=[sp],16;; ldf.fill f23=[sp],16;; ldf.fill f22=[sp],16;; ldf.fill f21=[sp],16;; ldf.fill f20=[sp],16;; ldf.fill f19=[sp],16;; ldf.fill f18=[sp],16;; ldf.fill f17=[sp],16;; ldf.fill f16=[sp],16;; ldf.fill f15=[sp],16;; ldf.fill f14=[sp],16;; ldf.fill f13=[sp],16;; ldf.fill f12=[sp],16;; ldf.fill f11=[sp],16;; ldf.fill f10=[sp],16;; ldf.fill f9=[sp],16;; ldf.fill f8=[sp],16;; ldf.fill f7=[sp],16;; ldf.fill f6=[sp],16;; ldf.fill f5=[sp],16;; ldf.fill f4=[sp],16;; ldf.fill f3=[sp],16;; ldf.fill f2=[sp],16;; invala;; br b7;; //----------------------------------------------------------------------------- //++ // EsalSetSalDataArea // // Register physical address of Esal Data Area // // On Entry : // in0 = Reverse Thunk Address // in1 = IntThunk Address // // Return Value: // r8 = SAL_SUCCESS // // As per static calling conventions. // //-- //--------------------------------------------------------------------------- PROCEDURE_ENTRY (EsalSetSalDataArea) NESTED_SETUP (4,8,0,0) EsalCalcStart1_3:: mov r8 = ip;; add r8 = (ReverseThunkAddress - EsalCalcStart1_3), r8;; st8 [r8] = in0;; EsalCalcStart1_4:: mov r8 = ip;; add r8 = (IntThunkAddress - EsalCalcStart1_4), r8;; st8 [r8] = in1;; mov r8 = r0;; NESTED_RETURN PROCEDURE_EXIT (EsalSetSalDataArea) //----------------------------------------------------------------------------- //++ // EsagGetReverseThunkAddress // // Register physical address of Esal Data Area // // On Entry : // out0 = CodeStart // out1 = CodeEnd // out1 = ReverseThunkCode // // Return Value: // r8 = SAL_SUCCESS // // As per static calling conventions. // //-- //--------------------------------------------------------------------------- PROCEDURE_ENTRY (EsalGetReverseThunkAddress) NESTED_SETUP (4,8,0,0) EsalCalcStart1_31:: mov r8 = ip;; add r8 = (Ia32CodeStart - EsalCalcStart1_31), r8;; mov r9 = r8;; EsalCalcStart1_41:: mov r8 = ip;; add r8 = (Ia32CodeEnd - EsalCalcStart1_41), r8;; mov r10 = r8;; EsalCalcStart1_51:: mov r8 = ip;; add r8 = (ReverseThunkAddress - EsalCalcStart1_51), r8;; mov r11 = r8;; mov r8 = r0;; NESTED_RETURN PROCEDURE_EXIT (EsalGetReverseThunkAddress) .align 16 PROCEDURE_ENTRY (InterruptRedirectionTemplate) data8 0x90CFCD08 data8 0x90CFCD09 data8 0x90CFCD0A data8 0x90CFCD0B data8 0x90CFCD0C data8 0x90CFCD0D data8 0x90CFCD0E data8 0x90CFCD0F PROCEDURE_EXIT (InterruptRedirectionTemplate) //------------------------------// // Reverse Thunk Code // //------------------------------// Ia32CodeStart:: br.sptk.few Ia32CodeStart;; // IPF CSM integration -Bug (Write This Code) ReverseThunkCode:: data8 0xb80f66fa // CLI, JMPE xxxx ReverseThunkAddress:: data8 0 // Return Address IntThunkAddress:: data8 0 // IntThunk Address Ia32CodeEnd::
alainesp/HashSuiteDroid
3,818
Hash_Suite/arch_neon64.S
// This file is part of Hash Suite password cracker, // Copyright (c) 2019 by Alain Espinosa. See LICENSE. .text .align 2 .global crypt_ntlm_neon_kernel_asm .type crypt_ntlm_neon_kernel_asm, %function crypt_ntlm_neon_kernel_asm: ret ///////////////////////////////////////////////////////////////////////////////////////////////// // MD5 ///////////////////////////////////////////////////////////////////////////////////////////////// .text .align 2 .global crypt_md5_neon_kernel_asm .type crypt_md5_neon_kernel_asm, %function crypt_md5_neon_kernel_asm: ret .text .align 2 .global md5_one_block_neon .type md5_one_block_neon, %function md5_one_block_neon: ret ///////////////////////////////////////////////////////////////////////////////////////////////// // SHA1 ///////////////////////////////////////////////////////////////////////////////////////////////// .text .align 2 .global crypt_sha1_neon_kernel_asm .type crypt_sha1_neon_kernel_asm, %function crypt_sha1_neon_kernel_asm: ret ///////////////////////////////////////////////////////////////////////////////////////////////// // DCC ///////////////////////////////////////////////////////////////////////////////////////////////// .text .align 2 .global dcc_ntlm_part_neon .type dcc_ntlm_part_neon, %function dcc_ntlm_part_neon: ret #define dcc_salt_part_neon_body(idx) \ .text;\ .align 2;\ .global dcc_salt_part_neon ## idx;\ .type dcc_salt_part_neon ## idx, %function;\ dcc_salt_part_neon ## idx:\ ret // Funtions by salt_lenght dcc_salt_part_neon_body(13) dcc_salt_part_neon_body(12) dcc_salt_part_neon_body(11) dcc_salt_part_neon_body(10) dcc_salt_part_neon_body(9) dcc_salt_part_neon_body(8) dcc_salt_part_neon_body(7) dcc_salt_part_neon_body(6) dcc_salt_part_neon_body(5) dcc_salt_part_neon_body(4) ///////////////////////////////////////////////////////////////////////////////////////////////// // DCC2 format ///////////////////////////////////////////////////////////////////////////////////////////////// .text .align 2 .global sha1_process_sha1_neon .type sha1_process_sha1_neon, %function sha1_process_sha1_neon: ret //////////////////////////////////////////////////////////////////////////////////////////////////////////////// // SHA256 format //////////////////////////////////////////////////////////////////////////////////////////////////////////////// .text .align 2 .global crypt_sha256_neon_kernel_asm .type crypt_sha256_neon_kernel_asm, %function crypt_sha256_neon_kernel_asm: ret ///////////////////////////////////////////////////////////////////////////////////////////////// // SHA512 format ///////////////////////////////////////////////////////////////////////////////////////////////// .text .align 2 .global crypt_sha512_neon_kernel_asm .type crypt_sha512_neon_kernel_asm, %function crypt_sha512_neon_kernel_asm: ret ///////////////////////////////////////////////////////////////////////////////////////////////// // LM format ///////////////////////////////////////////////////////////////////////////////////////////////// // Sboxs .text .align 2 .global s1 .type s1, %function s1: ret .text .align 2 .global s2 .type s2, %function s2: ret .text .align 2 .global s3 .type s3, %function s3: ret .text .align 2 .global s4 .type s4, %function s4: ret .text .align 2 .global s5 .type s5, %function s5: ret .text .align 2 .global s6 .type s6, %function s6: ret .text .align 2 .global s7 .type s7, %function s7: ret .text .align 2 .global s8 .type s8, %function s8: ret .text .align 2 .global lm_eval_neon_kernel .type lm_eval_neon_kernel, %function lm_eval_neon_kernel: ret // Charset .text .align 2 .global memset_uint_neon .type memset_uint_neon, %function memset_uint_neon: ret
Aladdin-Wang/MicroLink
3,366
MicroLink/microlink_app/hpm_sdk_localized_for_hpm5301evklite/soc/HPM5300/HPM5301/toolchains/iar/startup.s
/* * Copyright (c) 2023-2024 HPMicro * SPDX-License-Identifier: BSD-3-Clause */ #include "hpm_csr_regs.h" MODULE ?startup /* Forward declaration of sections */ SECTION CSTACK:DATA:NOROOT(3) SECTION SAFESTACK:DATA:NOROOT(3) SECTION `.vector_table`:CODE:NOROOT(3) EXTERN _clean_up EXTERN reset_handler EXTERN __low_level_init EXTERN irq_handler_trap EXTERN __iar_static_base$$GPREL EXTERN __iar_data_init2 EXTERN l1c_ic_disable EXTERN l1c_ic_enable EXTERN l1c_dc_invalidate_all EXTERN l1c_dc_enable EXTERN l1c_dc_disable // -------------------------------------------------- SECTION `.startup`:CODE:ROOT(2) EXTERN __iar_program_start PUBLIC _start EXTERN reset_handler _start: __iar_cstart_init_gp: .option push .option norelax /* Initialize global pointer */ la gp, __iar_static_base$$GPREL .option pop /* reset mstatus to 0 */ csrrw x0, mstatus, x0 #ifdef __riscv_flen __iar_cstart_init_fpu: /* Enable FPU */ li t0, CSR_MSTATUS_FS_MASK csrrs t0, mstatus, t0 /* Initialize FCSR */ fscsr zero #endif __iar_cstart_init_stack: /* Initialize Stack pointer */ la t0, SFE(CSTACK) mv sp, t0 #ifdef __nds_execit __iar_cstart_init_uitb: EXTERN `.exec.itable$$Base` la a0, `.exec.itable$$Base` csrw 0x800, a0 #endif #ifdef CONFIG_NOT_ENABLE_ICACHE call l1c_ic_disable #else call l1c_ic_enable #endif #ifdef CONFIG_NOT_ENABLE_DCACHE call l1c_dc_invalidate_all call l1c_dc_disable #else call l1c_dc_enable call l1c_dc_invalidate_all #endif /* Call IAR low-levle API to initialize BSS, RW Data, RAM Function, etc */ call __low_level_init call __iar_data_init2 fence.i #ifndef NO_CLEANUP_AT_START /* clean up */ call _clean_up #endif __iar_cstart_init_mvec: #if defined(CONFIG_FREERTOS) && CONFIG_FREERTOS EXTERN freertos_risc_v_trap_handler #define HANDLER_TRAP freertos_risc_v_trap_handler /* Use mscratch to store isr level */ csrw mscratch, x0 #elif defined(CONFIG_UCOS_III) && CONFIG_UCOS_III EXTERN ucos_risc_v_trap_handler #define HANDLER_TRAP ucos_risc_v_trap_handler /* Use mscratch to store isr level */ csrw mscratch, x0 #elif defined(CONFIG_THREADX) && CONFIG_THREADX EXTERN tx_risc_v_trap_handler #define HANDLER_TRAP tx_risc_v_trap_handler #define HANDLER_S_TRAP tx_risc_v_trap_handler /* Use mscratch to store isr level */ csrw mscratch, x0 #elif defined(CONFIG_RTTHREAD) && CONFIG_RTTHREAD EXTERN rtt_risc_v_trap_handler #define HANDLER_TRAP rtt_risc_v_trap_handler /* Use mscratch to store isr level */ csrw mscratch, x0 #else #define HANDLER_TRAP irq_handler_trap #endif #if !defined(USE_NONVECTOR_MODE) || (USE_NONVECTOR_MODE == 0) /* Initial machine trap-vector Base */ la t0, SFB(`.vector_table`) csrw mtvec, t0 /* Enable vectored external PLIC interrupt */ csrsi CSR_MMISC_CTL, 2 #else /* Initial machine trap-vector Base */ la t0, HANDLER_TRAP csrw mtvec, t0 /* Disable vectored external PLIC interrupt */ csrci CSR_MMISC_CTL, 2 #endif /* Jump to reset handler once all settings have done */ call reset_handler __iar_cstart_exit j __iar_cstart_exit #include "../vectors.h"
Aladdin-Wang/MicroLink
3,312
MicroLink/microlink_app/hpm_sdk_localized_for_hpm5301evklite/soc/HPM5300/HPM5301/toolchains/gcc/start.S
/* * Copyright (c) 2021-2022 HPMicro * * SPDX-License-Identifier: BSD-3-Clause * */ #include "hpm_csr_regs.h" .section .start, "ax" .global _start .type _start,@function _start: /* Initialize global pointer */ .option push .option norelax la gp, __global_pointer$ la tp, __thread_pointer$ .option pop /* reset mstatus to 0*/ csrrw x0, mstatus, x0 #ifdef __riscv_flen /* Enable FPU */ li t0, CSR_MSTATUS_FS_MASK csrrs t0, mstatus, t0 /* Initialize FCSR */ fscsr zero #endif #ifdef INIT_EXT_RAM_FOR_DATA la t0, _stack_safe mv sp, t0 call _init_ext_ram #endif /* Initialize stack pointer */ la t0, _stack mv sp, t0 #ifdef CONFIG_NOT_ENABLE_ICACHE call l1c_ic_disable #else call l1c_ic_enable #endif #ifdef CONFIG_NOT_ENABLE_DCACHE call l1c_dc_invalidate_all call l1c_dc_disable #else call l1c_dc_enable call l1c_dc_invalidate_all #endif /* * Initialize LMA/VMA sections. * Relocation for any sections that need to be copied from LMA to VMA. */ call c_startup #if defined(__SES_RISCV) /* Initialize the heap */ la a0, __heap_start__ la a1, __heap_end__ sub a1, a1, a0 la t1, __SEGGER_RTL_init_heap jalr t1 #endif /* Do global constructors */ call __libc_init_array #ifndef NO_CLEANUP_AT_START /* clean up */ call _clean_up #endif #ifdef __nds_execit /* Initialize EXEC.IT table */ la t0, _ITB_BASE_ csrw uitb, t0 #endif #if defined(CONFIG_FREERTOS) && CONFIG_FREERTOS #define HANDLER_TRAP freertos_risc_v_trap_handler #define HANDLER_S_TRAP freertos_risc_v_trap_handler /* Use mscratch to store isr level */ csrw mscratch, 0 #elif defined(CONFIG_UCOS_III) && CONFIG_UCOS_III #define HANDLER_TRAP ucos_risc_v_trap_handler #define HANDLER_S_TRAP ucos_risc_v_trap_handler /* Use mscratch to store isr level */ csrw mscratch, 0 #elif defined(CONFIG_THREADX) && CONFIG_THREADX #define HANDLER_TRAP tx_risc_v_trap_handler #define HANDLER_S_TRAP tx_risc_v_trap_handler /* Use mscratch to store isr level */ csrw mscratch, 0 #elif defined(CONFIG_RTTHREAD) && CONFIG_RTTHREAD #define HANDLER_TRAP rtt_risc_v_trap_handler #define HANDLER_S_TRAP rtt_risc_v_trap_handler /* Use mscratch to store isr level */ csrw mscratch, 0 #else #define HANDLER_TRAP irq_handler_trap #define HANDLER_S_TRAP irq_handler_s_trap #endif #if !defined(USE_NONVECTOR_MODE) || (USE_NONVECTOR_MODE == 0) /* Initial machine trap-vector Base */ la t0, __vector_table csrw mtvec, t0 #if defined (USE_S_MODE_IRQ) la t0, __vector_s_table csrw stvec, t0 #endif /* Enable vectored external PLIC interrupt */ csrsi CSR_MMISC_CTL, 2 #else /* Initial machine trap-vector Base */ la t0, HANDLER_TRAP csrw mtvec, t0 #if defined (USE_S_MODE_IRQ) la t0, HANDLER_S_TRAP csrw stvec, t0 #endif /* Disable vectored external PLIC interrupt */ csrci CSR_MMISC_CTL, 2 #endif /* System reset handler */ call reset_handler /* Infinite loop, if returned accidentally */ 1: j 1b .weak exit exit: 1: j 1b .section .isr_vector, "ax" .weak nmi_handler nmi_handler: 1: j 1b #include "../vectors.h"
Aladdin-Wang/MicroLink
12,474
MicroLink/microlink_app/hpm_sdk_localized_for_hpm5301evklite/soc/HPM5300/HPM5301/toolchains/segger/startup.s
/********************************************************************* * SEGGER Microcontroller GmbH * * The Embedded Experts * ********************************************************************** * * * (c) 2014 - 2021 SEGGER Microcontroller GmbH * * * * www.segger.com Support: support@segger.com * * * ********************************************************************** * * * All rights reserved. * * * * Redistribution and use in source and binary forms, with or * * without modification, are permitted provided that the following * * condition is met: * * * * - Redistributions of source code must retain the above copyright * * notice, this condition and the following disclaimer. * * * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND * * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, * * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * * DISCLAIMED. IN NO EVENT SHALL SEGGER Microcontroller BE LIABLE FOR * * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * * DAMAGE. * * * ********************************************************************** -------------------------- END-OF-HEADER ----------------------------- File : SEGGER_RISCV_crt0.s Purpose : Generic runtime init startup code for RISC-V CPUs. Designed to work with the SEGGER linker to produce smallest possible executables. This file does not normally require any customization. Additional information: Preprocessor Definitions FULL_LIBRARY If defined then - argc, argv are set up by calling SEGGER_SEMIHOST_GetArgs(). - the exit symbol is defined and executes on return from main. - the exit symbol calls destructors, atexit functions and then calls SEGGER_SEMIHOST_Exit(). If not defined then - argc and argv are not valid (main is assumed to not take parameters) - the exit symbol is defined, executes on return from main and halts in a loop. */ #include "hpm_csr_regs.h" /********************************************************************* * * Defines, configurable * ********************************************************************** */ #ifndef APP_ENTRY_POINT #define APP_ENTRY_POINT reset_handler #endif #ifndef ARGSSPACE #define ARGSSPACE 128 #endif /********************************************************************* * * Macros * ********************************************************************** */ // // Declare a label as function symbol (without switching sections) // .macro MARK_FUNC Name .global \Name .type \Name, function \Name: .endm // // Declare a regular function. // Functions from the startup are placed in the init section. // .macro START_FUNC Name .section .init.\Name, "ax" .global \Name #if __riscv_compressed .balign 2 #else .balign 4 #endif .type \Name, function \Name: .endm // // Declare a weak function // .macro WEAK_FUNC Name .section .init.\Name, "ax", %progbits .global \Name .weak \Name #if __riscv_compressed .balign 2 #else .balign 4 #endif .type \Name, function \Name: .endm // // Mark the end of a function and calculate its size // .macro END_FUNC name .size \name,.-\name .endm /********************************************************************* * * Externals * ********************************************************************** */ .extern APP_ENTRY_POINT // typically main /********************************************************************* * * Global functions * ********************************************************************** */ /********************************************************************* * * _start * * Function description * Entry point for the startup code. * Usually called by the reset handler. * Performs all initialisation, based on the entries in the * linker-generated init table, then calls main(). * It is device independent, so there should not be any need for an * end-user to modify it. * * Additional information * At this point, the stack pointer should already have been * initialized * - by hardware (such as on Cortex-M), * - by the device-specific reset handler, * - or by the debugger (such as for RAM Code). */ #undef L #define L(label) .L_start_##label START_FUNC _start .option push .option norelax lui gp, %hi(__global_pointer$) addi gp, gp, %lo(__global_pointer$) lui tp, %hi(__thread_pointer$) addi tp, tp, %lo(__thread_pointer$) .option pop csrw mstatus, zero csrw mcause, zero #ifdef __riscv_flen /* Enable FPU */ li t0, CSR_MSTATUS_FS_MASK csrrs t0, mstatus, t0 /* Initialize FCSR */ fscsr zero #endif #ifdef INIT_EXT_RAM_FOR_DATA la t0, _stack_safe mv sp, t0 call _init_ext_ram #endif lui t0, %hi(__stack_end__) addi sp, t0, %lo(__stack_end__) #ifdef CONFIG_NOT_ENABLE_ICACHE call l1c_ic_disable #else call l1c_ic_enable #endif #ifdef CONFIG_NOT_ENABLE_DCACHE call l1c_dc_invalidate_all call l1c_dc_disable #else call l1c_dc_enable call l1c_dc_invalidate_all #endif #ifndef __NO_SYSTEM_INIT // // Call _init // call _init #endif // // Call linker init functions which in turn performs the following: // * Perform segment init // * Perform heap init (if used) // * Call constructors of global Objects (if any exist) // la s0, __SEGGER_init_table__ // Set table pointer to start of initialization table L(RunInit): lw a0, (s0) // Get next initialization function from table add s0, s0, 4 // Increment table pointer to point to function arguments jalr a0 // Call initialization function j L(RunInit) // MARK_FUNC __SEGGER_init_done // // Time to call main(), the application entry point. // #ifndef NO_CLEANUP_AT_START /* clean up */ call _clean_up #endif #if defined(CONFIG_FREERTOS) && CONFIG_FREERTOS #define HANDLER_TRAP freertos_risc_v_trap_handler #define HANDLER_S_TRAP freertos_risc_v_trap_handler /* Use mscratch to store isr level */ csrw mscratch, 0 #elif defined(CONFIG_UCOS_III) && CONFIG_UCOS_III #define HANDLER_TRAP ucos_risc_v_trap_handler #define HANDLER_S_TRAP ucos_risc_v_trap_handler /* Use mscratch to store isr level */ csrw mscratch, 0 #elif defined(CONFIG_THREADX) && CONFIG_THREADX #define HANDLER_TRAP tx_risc_v_trap_handler #define HANDLER_S_TRAP tx_risc_v_trap_handler /* Use mscratch to store isr level */ csrw mscratch, 0 #elif defined(CONFIG_RTTHREAD) && CONFIG_RTTHREAD #define HANDLER_TRAP rtt_risc_v_trap_handler #define HANDLER_S_TRAP rtt_risc_v_trap_handler /* Use mscratch to store isr level */ csrw mscratch, 0 #else #define HANDLER_TRAP irq_handler_trap #define HANDLER_S_TRAP irq_handler_s_trap #endif #if !defined(USE_NONVECTOR_MODE) || (USE_NONVECTOR_MODE == 0) /* Initial machine trap-vector Base */ la t0, __vector_table csrw mtvec, t0 #if defined (USE_S_MODE_IRQ) la t0, __vector_s_table csrw stvec, t0 #endif /* Enable vectored external PLIC interrupt */ csrsi CSR_MMISC_CTL, 2 #else /* Initial machine trap-vector Base */ la t0, HANDLER_TRAP csrw mtvec, t0 #if defined (USE_S_MODE_IRQ) la t0, HANDLER_S_TRAP csrw stvec, t0 #endif /* Disable vectored external PLIC interrupt */ csrci CSR_MMISC_CTL, 2 #endif __startup_complete: MARK_FUNC start #ifndef FULL_LIBRARY // // In a real embedded application ("Free-standing environment"), // main() does not get any arguments, // which means it is not necessary to init a0 and a1. // call APP_ENTRY_POINT tail exit END_FUNC _start // // end of _start // Fall-through to exit if main ever returns. // MARK_FUNC exit // // In a free-standing environment, if returned from application: // Loop forever. // j . .size exit,.-exit #else // // In a hosted environment, // we need to load a0 and a1 with argc and argv, in order to handle // the command line arguments. // This is required for some programs running under control of a // debugger, such as automated tests. // li a0, ARGSSPACE la a1, args call debug_getargs li a0, ARGSSPACE la a1, args call APP_ENTRY_POINT // Call to application entry point (usually main()) call exit // Call exit function j . // If we unexpectedly return from exit, hang. END_FUNC _start #endif #ifdef FULL_LIBRARY li a0, ARGSSPACE la a1, args call debug_getargs li a0, ARGSSPACE la a1, args #else li a0, 0 li a1, 0 #endif call APP_ENTRY_POINT tail exit END_FUNC _start // #ifdef FULL_LIBRARY /********************************************************************* * * exit * * Function description * Exit of the system. * Called on return from application entry point or explicit call * to exit. * * Additional information * In a hosted environment exit gracefully, by * saving the return value, * calling destructurs of global objects, * calling registered atexit functions, * and notifying the host/debugger. */ #undef L #define L(label) .L_exit_##label WEAK_FUNC exit mv s1, a0 // Save the exit parameter/return result // // Call destructors // la s0, __dtors_start__ L(Loop): la t0, __dtors_end__ beq s0, t0, L(End) lw t1, 0(s0) addi s0, s0, 4 jalr t1 j L(Loop) L(End): // // Call atexit functions // call _execute_at_exit_fns // // Call debug_exit with return result/exit parameter // mv a0, s1 call debug_exit // // If execution is not terminated, loop forever // L(ExitLoop): j L(ExitLoop) // Loop forever. END_FUNC exit #endif #ifdef FULL_LIBRARY .bss args: .space ARGSSPACE .size args, .-args .type args, %object #endif .section .isr_vector, "ax" .weak nmi_handler nmi_handler: 1: j 1b #include "../vectors.h" /*************************** End of file ****************************/
alainesp/HashSuiteDroid
162,483
Hash_Suite/arch_neon.S
// This file is part of Hash Suite password cracker, // Copyright (c) 2014-2015,2019 by Alain Espinosa. See LICENSE. #define ROTATE(reg1,reg2,rot,t1,t2) \ vshr.u32 t1 , reg1, #(32-rot);\ vshr.u32 t2 , reg2, #(32-rot);\ vshl.u32 reg1, reg1, #(rot);\ vshl.u32 reg2, reg2, #(rot);\ vorr.u32 reg1, reg1, t1;\ vorr.u32 reg2, reg2, t2; #define LOAD_NT_BUFFER(a1,a2,index) \ add nt_buffer, nt_buffer_base, #(4*index*NT_NUM_KEYS);\ vld1.u32 {t3,t4}, [nt_buffer:128];\ vadd.u32 a1, a1, t3;\ vadd.u32 a2, a2, t4; #define LOAD_NT_BUFFER_0(a1,a2) \ vld1.u32 {t3,t4}, [nt_buffer_base:128];\ vadd.u32 a1, a1, t3;\ vadd.u32 a2, a2, t4; #define LOAD_NT_BUFFER_14(a1,a2) \ vadd.u32 a1, a1, t5;\ vadd.u32 a2, a2, t6; #define LOAD_NT_BUFFER_1(a1,a2) LOAD_NT_BUFFER(a1,a2,1) #define LOAD_NT_BUFFER_2(a1,a2) LOAD_NT_BUFFER(a1,a2,2) #define LOAD_NT_BUFFER_3(a1,a2) LOAD_NT_BUFFER(a1,a2,3) #define LOAD_NT_BUFFER_4(a1,a2) LOAD_NT_BUFFER(a1,a2,4) #define LOAD_NT_BUFFER_5(a1,a2) LOAD_NT_BUFFER(a1,a2,5) #define LOAD_NT_BUFFER_6(a1,a2) LOAD_NT_BUFFER(a1,a2,6) #define LOAD_NT_BUFFER_7(a1,a2) LOAD_NT_BUFFER(a1,a2,7) #define LOAD_NT_BUFFER_8(a1,a2) LOAD_NT_BUFFER(a1,a2,8) #define LOAD_NT_BUFFER_9(a1,a2) LOAD_NT_BUFFER(a1,a2,9) #define LOAD_NT_BUFFER_10(a1,a2) LOAD_NT_BUFFER(a1,a2,10) #define LOAD_NT_BUFFER_11(a1,a2) LOAD_NT_BUFFER(a1,a2,11) #define LOAD_NT_BUFFER_12(a1,a2) LOAD_NT_BUFFER(a1,a2,12) #define LOAD_NT_BUFFER_13(a1,a2) LOAD_NT_BUFFER(a1,a2,13) #define LOAD_NT_BUFFER_15(a1,a2) #define STEP1(a1,b1,c1,d1,a2,b2,c2,d2,index,rot,NT_NUM_KEYS) \ LOAD_NT_BUFFER_ ## index (a1,a2)\ vmov.u32 t1, b1;\ vmov.u32 t2, b2;\ vbsl t1, c1, d1;\ vbsl t2, c2, d2;\ vadd.u32 a1, a1, t1;\ vadd.u32 a2, a2, t2;\ ROTATE(a1, a2, rot,t1,t2) #define STEP2(a1,b1,c1,d1,a2,b2,c2,d2,index,rot,NT_NUM_KEYS,sqrt_2) \ LOAD_NT_BUFFER_ ## index (a1,a2)\ veor.u32 t1, c1, d1;\ veor.u32 t2, c2, d2;\ vbsl t1, b1, c1;\ vbsl t2, b2, c2;\ vadd.u32 a1, a1, sqrt_2;\ vadd.u32 a2, a2, sqrt_2;\ vadd.u32 a1, a1, t1;\ vadd.u32 a2, a2, t2;\ ROTATE(a1, a2, rot,t1,t2) #define STEP3(a1,b1,c1,d1,a2,b2,c2,d2,index,rot,NT_NUM_KEYS,sqrt_3) \ LOAD_NT_BUFFER_ ## index (a1,a2)\ veor.u32 t1, d1, c1;\ veor.u32 t2, d2, c2;\ vadd.u32 a1, a1, sqrt_3;\ vadd.u32 a2, a2, sqrt_3;\ veor.u32 t1, t1, b1;\ veor.u32 t2, t2, b2;\ vadd.u32 a1, a1, t1;\ vadd.u32 a2, a2, t2;\ ROTATE(a1, a2, rot,t1,t2) #define nt_buffer_base r0 #define table_ptr r1 #define size_bit_table_reg r2 #define nt_buffer r3 #define i r4 #define reg_a1 q0 #define reg_a2 q1 #define reg_b1 q2 #define reg_b2 q3 #define reg_c1 q4 #define reg_c2 q5 #define reg_d1 q6 #define reg_d2 q7 #define t1 q8 #define t1_0 d16[0] #define t1_1 d16[1] #define t1_2 d17[0] #define t1_3 d17[1] #define t2 q9 #define t2_0 d18[0] #define t2_1 d18[1] #define t2_2 d19[0] #define t2_3 d19[1] #define t3 q10 #define t4 q11 #define sqrt_2 q12 #define sqrt_3 q13 #define t7 q12 #define t8 q13 #define t5 q14 #define t5_0 d28[0] #define t5_1 d28[1] #define t5_2 d29[0] #define t5_3 d29[1] #define t6 q15 #define t6_0 d30[0] #define t6_1 d30[1] #define t6_2 d31[0] #define t6_3 d31[1] #define NT_NUM_KEYS 128 #define REG_BYTE_SIZE 16 // Store a 32-bit constant into a register. // eg: SET_REG r1, 0x11223344 // Recommended for ARMv6+ because the number is stored inside the instruction //.macro SET_REG reg,val // movw \reg, #:lower16:\val // movt \reg, #:upper16:\val //.endm #define SET_REG(reg,val) \ movw reg, #(val & 0xffff);\ movt reg, #(val >> 16) .text .align 2 .global crypt_ntlm_neon_kernel_asm .type crypt_ntlm_neon_kernel_asm, %function crypt_ntlm_neon_kernel_asm: vpush {q4,q5,q6,q7} push {r4,r5,r6,r7} push {r8,r9,r10,r11} SET_REG(r7,0x5a827999) SET_REG(r8,0x6ed9eba1) vdup.u32 sqrt_2, r7 vdup.u32 sqrt_3, r8 SET_REG(r5,0x98badcfe) SET_REG(r6,0x10325476) SET_REG(r7,0xefcdab89) mov i, #0 // i=0 while1: // Round 1 vmov.u32 t1, #0xffffffff //Put all 1 in a add nt_buffer, nt_buffer_base, #(4*14*NT_NUM_KEYS); vld1.u32 {t5,t6}, [nt_buffer:128]; vdup.u32 reg_b1, r7 vdup.u32 reg_b2, r7 vdup.u32 reg_c1, r5 vdup.u32 reg_c2, r5 vld1.u32 {t2,t3}, [nt_buffer_base:128] vdup.u32 reg_d1, r6 vdup.u32 reg_d2, r6 vadd.u32 reg_a1, t1, t2// First step vadd.u32 reg_a2, t1, t3 vshl.u32 reg_a1, reg_a1, #3 vshl.u32 reg_a2, reg_a2, #3 STEP1(reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 1 , 7 , NT_NUM_KEYS) STEP1(reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 2 , 11, NT_NUM_KEYS) STEP1(reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 3 , 19, NT_NUM_KEYS) STEP1(reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 4 , 3 , NT_NUM_KEYS) STEP1(reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 5 , 7 , NT_NUM_KEYS) STEP1(reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 6 , 11, NT_NUM_KEYS) STEP1(reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 7 , 19, NT_NUM_KEYS) STEP1(reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 8 , 3 , NT_NUM_KEYS) STEP1(reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 9 , 7 , NT_NUM_KEYS) STEP1(reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 10, 11, NT_NUM_KEYS) STEP1(reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 11, 19, NT_NUM_KEYS) STEP1(reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 12, 3 , NT_NUM_KEYS) STEP1(reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 13, 7 , NT_NUM_KEYS) STEP1(reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 14, 11, NT_NUM_KEYS) STEP1(reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 15, 19, NT_NUM_KEYS) // Round 2 STEP2(reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 0 , 3 , NT_NUM_KEYS,sqrt_2) STEP2(reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 4 , 5 , NT_NUM_KEYS,sqrt_2) STEP2(reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 8 , 9 , NT_NUM_KEYS,sqrt_2) STEP2(reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 12, 13, NT_NUM_KEYS,sqrt_2) STEP2(reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 1 , 3 , NT_NUM_KEYS,sqrt_2) STEP2(reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 5 , 5 , NT_NUM_KEYS,sqrt_2) STEP2(reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 9 , 9 , NT_NUM_KEYS,sqrt_2) STEP2(reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 13, 13, NT_NUM_KEYS,sqrt_2) STEP2(reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 2 , 3 , NT_NUM_KEYS,sqrt_2) STEP2(reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 6 , 5 , NT_NUM_KEYS,sqrt_2) STEP2(reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 10, 9 , NT_NUM_KEYS,sqrt_2) STEP2(reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 14, 13, NT_NUM_KEYS,sqrt_2) STEP2(reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 3 , 3 , NT_NUM_KEYS,sqrt_2) STEP2(reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 7 , 5 , NT_NUM_KEYS,sqrt_2) STEP2(reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 11, 9 , NT_NUM_KEYS,sqrt_2) STEP2(reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 15, 13, NT_NUM_KEYS,sqrt_2) // Round 3 STEP3(reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 0 , 3 , NT_NUM_KEYS,sqrt_3) STEP3(reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 8 , 9 , NT_NUM_KEYS,sqrt_3) STEP3(reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 4 , 11, NT_NUM_KEYS,sqrt_3) STEP3(reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 12, 15, NT_NUM_KEYS,sqrt_3) STEP3(reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 2 , 3 , NT_NUM_KEYS,sqrt_3) STEP3(reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 10, 9 , NT_NUM_KEYS,sqrt_3) STEP3(reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 6 , 11, NT_NUM_KEYS,sqrt_3) STEP3(reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 14, 15, NT_NUM_KEYS,sqrt_3) STEP3(reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 1 , 3 , NT_NUM_KEYS,sqrt_3) STEP3(reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 9 , 9 , NT_NUM_KEYS,sqrt_3) STEP3(reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 5 , 11, NT_NUM_KEYS,sqrt_3) STEP3(reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 13, 15, NT_NUM_KEYS,sqrt_3) add nt_buffer, nt_buffer_base, #(4*3*NT_NUM_KEYS) vld1.u32 {t3,t4}, [nt_buffer:128] vadd.u32 reg_a1, reg_a1, t3 vadd.u32 reg_a2, reg_a2, t4 veor.u32 t1, reg_b1, reg_d1 veor.u32 t2, reg_b2, reg_d2 veor.u32 t1, t1, reg_c1 veor.u32 t2, t2, reg_c2 vadd.u32 reg_a1, reg_a1, t1 vadd.u32 reg_a2, reg_a2, t2 // Save a, b, c, d add nt_buffer, nt_buffer_base, #(16*4*NT_NUM_KEYS+0*4*NT_NUM_KEYS) vst1.u32 {reg_a1,reg_a2}, [nt_buffer:128] add nt_buffer, nt_buffer_base, #(16*4*NT_NUM_KEYS+1*4*NT_NUM_KEYS) vst1.u32 {reg_b1,reg_b2}, [nt_buffer:128] add nt_buffer, nt_buffer_base, #(16*4*NT_NUM_KEYS+2*4*NT_NUM_KEYS) vst1.u32 {reg_c1,reg_c2}, [nt_buffer:128] add nt_buffer, nt_buffer_base, #(16*4*NT_NUM_KEYS+3*4*NT_NUM_KEYS) vst1.u32 {reg_d1,reg_d2}, [nt_buffer:128] add i,i,#1 add nt_buffer_base, nt_buffer_base, #(2*16) cmp i, #(NT_NUM_KEYS/(16/2)) blo while1 pop {r8,r9,r10,r11} pop {r4,r5,r6,r7} vpop {q4,q5,q6,q7} bx lr ///////////////////////////////////////////////////////////////////////////////////////////////// // MD5 ///////////////////////////////////////////////////////////////////////////////////////////////// #define MD5_LOAD_CONST(t_const,const_val) \ SET_REG(r5,const_val);\ vdup.u32 t_const, r5; #define MD5_LOAD_BUFFER(a1,a2,index,t_const,const_val,t1,t2) \ add nt_buffer, nt_buffer_base, #(4*index*NT_NUM_KEYS);\ vld1.u32 {t1,t2}, [nt_buffer:128];\ vadd.u32 a1, a1, t1;\ vadd.u32 a2, a2, t2;\ MD5_LOAD_CONST(t_const,const_val) #define MD5_LOAD_BUFFER_0(a1,a2,t_const,const_val,t1,t2) \ vld1.u32 {t1,t2}, [nt_buffer_base:128];\ vadd.u32 a1, a1, t1;\ vadd.u32 a2, a2, t2;\ MD5_LOAD_CONST(t_const,const_val) #define MD5_LOAD_BUFFER_6(a1,a2,t_const,const_val,t1,t2) \ vadd.u32 a1, a1, t7;\ vadd.u32 a2, a2, t8;\ MD5_LOAD_CONST(t_const,const_val) #define MD5_LOAD_BUFFER_7(a1,a2,t_const,const_val,t1,t2) \ vadd.u32 a1, a1, t5;\ vadd.u32 a2, a2, t6;\ MD5_LOAD_CONST(t_const,const_val) #define MD5_LOAD_BUFFER_1(a1,a2,t_const,const_val,t1,t2) MD5_LOAD_BUFFER(a1,a2,1,t_const,const_val,t1,t2) #define MD5_LOAD_BUFFER_2(a1,a2,t_const,const_val,t1,t2) MD5_LOAD_BUFFER(a1,a2,2,t_const,const_val,t1,t2) #define MD5_LOAD_BUFFER_3(a1,a2,t_const,const_val,t1,t2) MD5_LOAD_BUFFER(a1,a2,3,t_const,const_val,t1,t2) #define MD5_LOAD_BUFFER_4(a1,a2,t_const,const_val,t1,t2) MD5_LOAD_BUFFER(a1,a2,4,t_const,const_val,t1,t2) #define MD5_LOAD_BUFFER_5(a1,a2,t_const,const_val,t1,t2) MD5_LOAD_BUFFER(a1,a2,5,t_const,const_val,t1,t2) #define MD5_LOAD_BUFFER_8(a1,a2,t_const,const_val,t1,t2) MD5_LOAD_CONST(t_const,const_val) #define MD5_LOAD_BUFFER_9(a1,a2,t_const,const_val,t1,t2) MD5_LOAD_CONST(t_const,const_val) #define MD5_LOAD_BUFFER_10(a1,a2,t_const,const_val,t1,t2) MD5_LOAD_CONST(t_const,const_val) #define MD5_LOAD_BUFFER_11(a1,a2,t_const,const_val,t1,t2) MD5_LOAD_CONST(t_const,const_val) #define MD5_LOAD_BUFFER_12(a1,a2,t_const,const_val,t1,t2) MD5_LOAD_CONST(t_const,const_val) #define MD5_LOAD_BUFFER_13(a1,a2,t_const,const_val,t1,t2) MD5_LOAD_CONST(t_const,const_val) #define MD5_LOAD_BUFFER_14(a1,a2,t_const,const_val,t1,t2) MD5_LOAD_CONST(t_const,const_val) #define MD5_LOAD_BUFFER_15(a1,a2,t_const,const_val,t1,t2) MD5_LOAD_CONST(t_const,const_val) #define MD5_STEP1(a1,b1,c1,d1,a2,b2,c2,d2,b1_sum,b2_sum,index,rot,t1,t2,t_const,const_val) \ MD5_LOAD_BUFFER_ ## index (a1,a2,t_const,const_val,t1,t2)\ vmov.u32 t1, b1;\ vmov.u32 t2, b2;\ vbsl t1, c1, d1;\ vbsl t2, c2, d2;\ vadd.u32 a1, a1, t1;\ vadd.u32 a2, a2, t2;\ vadd.u32 a1, a1, t_const;\ vadd.u32 a2, a2, t_const;\ ROTATE(a1, a2, rot,t1,t2);\ vadd.u32 a1, a1, b1_sum;\ vadd.u32 a2, a2, b2_sum #define MD5_STEP3(a1,b1,c1,d1,a2,b2,c2,d2,index,rot,t1,t2,t_const,const_val) \ MD5_LOAD_BUFFER_ ## index (a1,a2,t_const,const_val,t1,t2)\ veor.u32 t1, d1, c1;\ veor.u32 t2, d2, c2;\ vadd.u32 a1, a1, t_const;\ vadd.u32 a2, a2, t_const;\ veor.u32 t1, t1, b1;\ veor.u32 t2, t2, b2;\ vadd.u32 a1, a1, t1;\ vadd.u32 a2, a2, t2;\ ROTATE(a1, a2, rot,t1,t2);\ vadd.u32 a1, a1, b1;\ vadd.u32 a2, a2, b2 #define MD5_STEP4(a1,b1,c1,d1,a2,b2,c2,d2,index,rot,t1,t2,t_const,const_val) \ MD5_LOAD_BUFFER_ ## index (a1,a2,t_const,const_val,t1,t2)\ vorn t1, b1, d1;\ vorn t2, b2, d2;\ vadd.u32 a1, a1, t_const;\ vadd.u32 a2, a2, t_const;\ veor.u32 t1, t1, c1;\ veor.u32 t2, t2, c2;\ vadd.u32 a1, a1, t1;\ vadd.u32 a2, a2, t2;\ ROTATE(a1, a2, rot,t1,t2);\ vadd.u32 a1, a1, b1;\ vadd.u32 a2, a2, b2 .text .align 2 .global crypt_md5_neon_kernel_asm .type crypt_md5_neon_kernel_asm, %function crypt_md5_neon_kernel_asm: vpush {q4,q5,q6,q7} push {r4,r5,r6,r7} push {r8,r9,r10,r11} SET_REG(r6,0xefcdab89) SET_REG(r7,0x98badcfe) mov i, #0 // i=0 md5_while1: // Round 1 vdup.u32 t5, r6 vdup.u32 t6, r7 SET_REG(r5,0xd76aa477) vdup.u32 t7, r5 add nt_buffer, nt_buffer_base, #(4*1*NT_NUM_KEYS); vld1.u32 {reg_d1,reg_d2}, [nt_buffer:128]; add nt_buffer, nt_buffer_base, #(4*2*NT_NUM_KEYS); vld1.u32 {reg_c1,reg_c2}, [nt_buffer:128]; add nt_buffer, nt_buffer_base, #(4*3*NT_NUM_KEYS); vld1.u32 {reg_b1,reg_b2}, [nt_buffer:128]; vld1.u32 {reg_a1,reg_a2}, [nt_buffer_base:128]// First step vadd.u32 reg_a1, reg_a1, t7 vadd.u32 reg_a2, reg_a2, t7 ROTATE(reg_a1,reg_a2,7,t1,t2) vadd.u32 reg_a1, reg_a1, t5 vadd.u32 reg_a2, reg_a2, t5 MD5_STEP1( reg_d1, reg_a1, t5, t6, reg_d2, reg_a2, t5, t6, reg_a1, reg_a2, 8 , 12,t1,t2,t3,0xf8fa0bcc) MD5_STEP1( reg_c1, reg_d1, reg_a1, t5, reg_c2, reg_d2, reg_a2, t5, reg_d1, reg_d2, 8 , 17,t1,t2,t3,0xbcdb4dd9) MD5_STEP1( reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, reg_c1, reg_c2, 8 , 22,t1,t2,t3,0xb18b7a77) add nt_buffer, nt_buffer_base, #(4*7*NT_NUM_KEYS); vld1.u32 {t5,t6}, [nt_buffer:128]; add nt_buffer, nt_buffer_base, #(4*6*NT_NUM_KEYS); vld1.u32 {t7,t8}, [nt_buffer:128]; MD5_STEP1( reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, reg_b1, reg_b2, 4 , 7 ,t1,t2,t3,0xf57c0faf) MD5_STEP1( reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, reg_a1, reg_a2, 5 , 12,t1,t2,t3,0x4787c62a) MD5_STEP1( reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, reg_d1, reg_d2, 6 , 17,t1,t2,t3,0xa8304613) MD5_STEP1( reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, reg_c1, reg_c2, 8 , 22,t1,t2,t3,0xfd469501) MD5_STEP1( reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, reg_b1, reg_b2, 8 , 7 ,t1,t2,t3,0x698098d8) MD5_STEP1( reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, reg_a1, reg_a2, 9 , 12,t1,t2,t3,0x8b44f7af) MD5_STEP1( reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, reg_d1, reg_d2, 10, 17,t1,t2,t3,0xffff5bb1) MD5_STEP1( reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, reg_c1, reg_c2, 11, 22,t1,t2,t3,0x895cd7be) MD5_STEP1( reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, reg_b1, reg_b2, 12, 7 ,t1,t2,t3,0x6b901122) MD5_STEP1( reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, reg_a1, reg_a2, 13, 12,t1,t2,t3,0xfd987193) MD5_STEP1( reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, reg_d1, reg_d2, 7 , 17,t1,t2,t3,0xa679438e) MD5_STEP1( reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, reg_c1, reg_c2, 15, 22,t1,t2,t3,0x49b40821) // Round 2 MD5_STEP1( reg_a1, reg_d1, reg_b1, reg_c1, reg_a2, reg_d2, reg_b2, reg_c2, reg_b1, reg_b2, 1, 5 ,t1,t2,t3,0xf61e2562) MD5_STEP1( reg_d1, reg_c1, reg_a1, reg_b1, reg_d2, reg_c2, reg_a2, reg_b2, reg_a1, reg_a2, 6, 9 ,t1,t2,t3,0xc040b340) MD5_STEP1( reg_c1, reg_b1, reg_d1, reg_a1, reg_c2, reg_b2, reg_d2, reg_a2, reg_d1, reg_d2, 8, 14,t1,t2,t3,0x265e5a51) MD5_STEP1( reg_b1, reg_a1, reg_c1, reg_d1, reg_b2, reg_a2, reg_c2, reg_d2, reg_c1, reg_c2, 0, 20,t1,t2,t3,0xe9b6c7aa) MD5_STEP1( reg_a1, reg_d1, reg_b1, reg_c1, reg_a2, reg_d2, reg_b2, reg_c2, reg_b1, reg_b2, 5, 5 ,t1,t2,t3,0xd62f105d) MD5_STEP1( reg_d1, reg_c1, reg_a1, reg_b1, reg_d2, reg_c2, reg_a2, reg_b2, reg_a1, reg_a2, 8, 9 ,t1,t2,t3,0x02441453) MD5_STEP1( reg_c1, reg_b1, reg_d1, reg_a1, reg_c2, reg_b2, reg_d2, reg_a2, reg_d1, reg_d2, 8, 14,t1,t2,t3,0xd8a1e681) MD5_STEP1( reg_b1, reg_a1, reg_c1, reg_d1, reg_b2, reg_a2, reg_c2, reg_d2, reg_c1, reg_c2, 4, 20,t1,t2,t3,0xe7d3fbc8) MD5_STEP1( reg_a1, reg_d1, reg_b1, reg_c1, reg_a2, reg_d2, reg_b2, reg_c2, reg_b1, reg_b2, 8, 5 ,t1,t2,t3,0x21e1cde6) MD5_STEP1( reg_d1, reg_c1, reg_a1, reg_b1, reg_d2, reg_c2, reg_a2, reg_b2, reg_a1, reg_a2, 7, 9 ,t1,t2,t3,0xc33707d6) MD5_STEP1( reg_c1, reg_b1, reg_d1, reg_a1, reg_c2, reg_b2, reg_d2, reg_a2, reg_d1, reg_d2, 3, 14,t1,t2,t3,0xf4d50d87) MD5_STEP1( reg_b1, reg_a1, reg_c1, reg_d1, reg_b2, reg_a2, reg_c2, reg_d2, reg_c1, reg_c2, 8, 20,t1,t2,t3,0x455a14ed) MD5_STEP1( reg_a1, reg_d1, reg_b1, reg_c1, reg_a2, reg_d2, reg_b2, reg_c2, reg_b1, reg_b2, 8, 5 ,t1,t2,t3,0xa9e3e905) MD5_STEP1( reg_d1, reg_c1, reg_a1, reg_b1, reg_d2, reg_c2, reg_a2, reg_b2, reg_a1, reg_a2, 2, 9 ,t1,t2,t3,0xfcefa3f8) MD5_STEP1( reg_c1, reg_b1, reg_d1, reg_a1, reg_c2, reg_b2, reg_d2, reg_a2, reg_d1, reg_d2, 8, 14,t1,t2,t3,0x676f02d9) MD5_STEP1( reg_b1, reg_a1, reg_c1, reg_d1, reg_b2, reg_a2, reg_c2, reg_d2, reg_c1, reg_c2, 8, 20,t1,t2,t3,0x8d2a4c8a) // Round 3 MD5_STEP3( reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 5, 4 ,t1,t2,t3,0xfffa3942) MD5_STEP3( reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 8, 11,t1,t2,t3,0x8771f681) MD5_STEP3( reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 8, 16,t1,t2,t3,0x6d9d6122) MD5_STEP3( reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 7, 23,t1,t2,t3,0xfde5380c) MD5_STEP3( reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 1, 4 ,t1,t2,t3,0xa4beea44) MD5_STEP3( reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 4, 11,t1,t2,t3,0x4bdecfa9) MD5_STEP3( reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 8, 16,t1,t2,t3,0xf6bb4b60) MD5_STEP3( reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 8, 23,t1,t2,t3,0xbebfbc70) MD5_STEP3( reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 8, 4 ,t1,t2,t3,0x289b7ec6) MD5_STEP3( reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 0, 11,t1,t2,t3,0xeaa127fa) MD5_STEP3( reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 3, 16,t1,t2,t3,0xd4ef3085) MD5_STEP3( reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 6, 23,t1,t2,t3,0x04881d05) MD5_STEP3( reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 8, 4 ,t1,t2,t3,0xd9d4d039) MD5_STEP3( reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 8, 11,t1,t2,t3,0xe6db99e5) MD5_STEP3( reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 8, 16,t1,t2,t3,0x1fa27cf8) MD5_STEP3( reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 2, 23,t1,t2,t3,0xc4ac5665) // Round 4 MD5_STEP4( reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 0, 6 ,t1,t2,t3,0xf4292244) MD5_STEP4( reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 8, 10,t1,t2,t3,0x432aff97) MD5_STEP4( reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 7, 15,t1,t2,t3,0xab9423a7) MD5_STEP4( reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 5, 21,t1,t2,t3,0xfc93a039) MD5_STEP4( reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 8, 6 ,t1,t2,t3,0x655b59c3) MD5_STEP4( reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 3, 10,t1,t2,t3,0x8f0ccc92) MD5_STEP4( reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 8, 15,t1,t2,t3,0xffeff47d) MD5_STEP4( reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 1, 21,t1,t2,t3,0x85845dd1) MD5_STEP4( reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 8, 6 ,t1,t2,t3,0x6fa87e4f) MD5_STEP4( reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 8, 10,t1,t2,t3,0xfe2ce6e0) MD5_STEP4( reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 6, 15,t1,t2,t3,0xa3014314) MD5_STEP4( reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 8, 21,t1,t2,t3,0x4e0811a1) add nt_buffer, nt_buffer_base, #(4*2*NT_NUM_KEYS) vld1.u32 {t1,t2}, [nt_buffer:128] vadd.u32 reg_c1, reg_c1, t1 vadd.u32 reg_c2, reg_c2, t2 // Save a, c, d, b add nt_buffer, nt_buffer_base, #(8*4*NT_NUM_KEYS+0*4*NT_NUM_KEYS) vst1.u32 {reg_a1,reg_a2}, [nt_buffer:128] add nt_buffer, nt_buffer_base, #(8*4*NT_NUM_KEYS+1*4*NT_NUM_KEYS) vst1.u32 {reg_b1,reg_b2}, [nt_buffer:128] add nt_buffer, nt_buffer_base, #(8*4*NT_NUM_KEYS+3*4*NT_NUM_KEYS) vst1.u32 {reg_d1,reg_d2}, [nt_buffer:128] add nt_buffer, nt_buffer_base, #(8*4*NT_NUM_KEYS+2*4*NT_NUM_KEYS) vst1.u32 {reg_c1,reg_c2}, [nt_buffer:128] add i,i,#1 add nt_buffer_base, nt_buffer_base, #(2*REG_BYTE_SIZE) cmp i, #(NT_NUM_KEYS/(REG_BYTE_SIZE/2)) blo md5_while1 pop {r8,r9,r10,r11} pop {r4,r5,r6,r7} vpop {q4,q5,q6,q7} bx lr #define MD5_LOAD_BUFFER(a1,a2,index,t_const,const_val,t1,t2) \ add nt_buffer, nt_buffer_base, #(index*2*16);\ vld1.u32 {t1,t2}, [nt_buffer:128];\ vadd.u32 a1, a1, t1;\ vadd.u32 a2, a2, t2;\ MD5_LOAD_CONST(t_const,const_val) #define MD5_LOAD_BUFFER_0(a1,a2,t_const,const_val,t1,t2) MD5_LOAD_BUFFER(a1,a2, 0,t_const,const_val,t1,t2) #define MD5_LOAD_BUFFER_1(a1,a2,t_const,const_val,t1,t2) MD5_LOAD_BUFFER(a1,a2, 1,t_const,const_val,t1,t2) #define MD5_LOAD_BUFFER_2(a1,a2,t_const,const_val,t1,t2) MD5_LOAD_BUFFER(a1,a2, 2,t_const,const_val,t1,t2) #define MD5_LOAD_BUFFER_3(a1,a2,t_const,const_val,t1,t2) MD5_LOAD_BUFFER(a1,a2, 3,t_const,const_val,t1,t2) #define MD5_LOAD_BUFFER_4(a1,a2,t_const,const_val,t1,t2) MD5_LOAD_BUFFER(a1,a2, 4,t_const,const_val,t1,t2) #define MD5_LOAD_BUFFER_5(a1,a2,t_const,const_val,t1,t2) MD5_LOAD_BUFFER(a1,a2, 5,t_const,const_val,t1,t2) #define MD5_LOAD_BUFFER_6(a1,a2,t_const,const_val,t1,t2) MD5_LOAD_BUFFER(a1,a2, 6,t_const,const_val,t1,t2) #define MD5_LOAD_BUFFER_7(a1,a2,t_const,const_val,t1,t2) MD5_LOAD_BUFFER(a1,a2, 7,t_const,const_val,t1,t2) #define MD5_LOAD_BUFFER_8(a1,a2,t_const,const_val,t1,t2) MD5_LOAD_BUFFER(a1,a2, 8,t_const,const_val,t1,t2) #define MD5_LOAD_BUFFER_9(a1,a2,t_const,const_val,t1,t2) MD5_LOAD_BUFFER(a1,a2, 9,t_const,const_val,t1,t2) #define MD5_LOAD_BUFFER_10(a1,a2,t_const,const_val,t1,t2) MD5_LOAD_BUFFER(a1,a2,10,t_const,const_val,t1,t2) #define MD5_LOAD_BUFFER_11(a1,a2,t_const,const_val,t1,t2) MD5_LOAD_BUFFER(a1,a2,11,t_const,const_val,t1,t2) #define MD5_LOAD_BUFFER_12(a1,a2,t_const,const_val,t1,t2) MD5_LOAD_BUFFER(a1,a2,12,t_const,const_val,t1,t2) #define MD5_LOAD_BUFFER_13(a1,a2,t_const,const_val,t1,t2) MD5_LOAD_BUFFER(a1,a2,13,t_const,const_val,t1,t2) #define MD5_LOAD_BUFFER_14(a1,a2,t_const,const_val,t1,t2) MD5_LOAD_BUFFER(a1,a2,14,t_const,const_val,t1,t2) #define MD5_LOAD_BUFFER_15(a1,a2,t_const,const_val,t1,t2) MD5_LOAD_BUFFER(a1,a2,15,t_const,const_val,t1,t2) #define md5_state r2 .text .align 2 .global md5_one_block_neon .type md5_one_block_neon, %function md5_one_block_neon: vpush {q4,q5,q6,q7} push {r4,r5,r6,r7} mov md5_state, r0 mov r0, r1 // Init SET_REG(r4,0x67452301); vdup.u32 reg_a1, r4; vdup.u32 reg_a2, r4; SET_REG(r5,0xefcdab89); vdup.u32 reg_b1, r5; vdup.u32 reg_b2, r5; SET_REG(r6,0x98badcfe); vdup.u32 reg_c1, r6; vdup.u32 reg_c2, r6; SET_REG(r7,0x10325476); vdup.u32 reg_d1, r7; vdup.u32 reg_d2, r7; MD5_STEP1( reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, reg_b1, reg_b2, 0 , 7 ,t1,t2,t3,0xd76aa478) MD5_STEP1( reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, reg_a1, reg_a2, 1 , 12,t1,t2,t3,0xe8c7b756) MD5_STEP1( reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, reg_d1, reg_d2, 2 , 17,t1,t2,t3,0x242070db) MD5_STEP1( reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, reg_c1, reg_c2, 3 , 22,t1,t2,t3,0xc1bdceee) MD5_STEP1( reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, reg_b1, reg_b2, 4 , 7 ,t1,t2,t3,0xf57c0faf) MD5_STEP1( reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, reg_a1, reg_a2, 5 , 12,t1,t2,t3,0x4787c62a) MD5_STEP1( reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, reg_d1, reg_d2, 6 , 17,t1,t2,t3,0xa8304613) MD5_STEP1( reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, reg_c1, reg_c2, 7 , 22,t1,t2,t3,0xfd469501) MD5_STEP1( reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, reg_b1, reg_b2, 8 , 7 ,t1,t2,t3,0x698098d8) MD5_STEP1( reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, reg_a1, reg_a2, 9 , 12,t1,t2,t3,0x8b44f7af) MD5_STEP1( reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, reg_d1, reg_d2, 10, 17,t1,t2,t3,0xffff5bb1) MD5_STEP1( reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, reg_c1, reg_c2, 11, 22,t1,t2,t3,0x895cd7be) MD5_STEP1( reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, reg_b1, reg_b2, 12, 7 ,t1,t2,t3,0x6b901122) MD5_STEP1( reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, reg_a1, reg_a2, 13, 12,t1,t2,t3,0xfd987193) MD5_STEP1( reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, reg_d1, reg_d2, 14, 17,t1,t2,t3,0xa679438e) MD5_STEP1( reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, reg_c1, reg_c2, 15, 22,t1,t2,t3,0x49b40821) // Round 2 MD5_STEP1( reg_a1, reg_d1, reg_b1, reg_c1, reg_a2, reg_d2, reg_b2, reg_c2, reg_b1, reg_b2, 1 , 5 ,t1,t2,t3,0xf61e2562) MD5_STEP1( reg_d1, reg_c1, reg_a1, reg_b1, reg_d2, reg_c2, reg_a2, reg_b2, reg_a1, reg_a2, 6 , 9 ,t1,t2,t3,0xc040b340) MD5_STEP1( reg_c1, reg_b1, reg_d1, reg_a1, reg_c2, reg_b2, reg_d2, reg_a2, reg_d1, reg_d2, 11, 14,t1,t2,t3,0x265e5a51) MD5_STEP1( reg_b1, reg_a1, reg_c1, reg_d1, reg_b2, reg_a2, reg_c2, reg_d2, reg_c1, reg_c2, 0 , 20,t1,t2,t3,0xe9b6c7aa) MD5_STEP1( reg_a1, reg_d1, reg_b1, reg_c1, reg_a2, reg_d2, reg_b2, reg_c2, reg_b1, reg_b2, 5 , 5 ,t1,t2,t3,0xd62f105d) MD5_STEP1( reg_d1, reg_c1, reg_a1, reg_b1, reg_d2, reg_c2, reg_a2, reg_b2, reg_a1, reg_a2, 10, 9 ,t1,t2,t3,0x02441453) MD5_STEP1( reg_c1, reg_b1, reg_d1, reg_a1, reg_c2, reg_b2, reg_d2, reg_a2, reg_d1, reg_d2, 15, 14,t1,t2,t3,0xd8a1e681) MD5_STEP1( reg_b1, reg_a1, reg_c1, reg_d1, reg_b2, reg_a2, reg_c2, reg_d2, reg_c1, reg_c2, 4 , 20,t1,t2,t3,0xe7d3fbc8) MD5_STEP1( reg_a1, reg_d1, reg_b1, reg_c1, reg_a2, reg_d2, reg_b2, reg_c2, reg_b1, reg_b2, 9 , 5 ,t1,t2,t3,0x21e1cde6) MD5_STEP1( reg_d1, reg_c1, reg_a1, reg_b1, reg_d2, reg_c2, reg_a2, reg_b2, reg_a1, reg_a2, 14, 9 ,t1,t2,t3,0xc33707d6) MD5_STEP1( reg_c1, reg_b1, reg_d1, reg_a1, reg_c2, reg_b2, reg_d2, reg_a2, reg_d1, reg_d2, 3 , 14,t1,t2,t3,0xf4d50d87) MD5_STEP1( reg_b1, reg_a1, reg_c1, reg_d1, reg_b2, reg_a2, reg_c2, reg_d2, reg_c1, reg_c2, 8 , 20,t1,t2,t3,0x455a14ed) MD5_STEP1( reg_a1, reg_d1, reg_b1, reg_c1, reg_a2, reg_d2, reg_b2, reg_c2, reg_b1, reg_b2, 13, 5 ,t1,t2,t3,0xa9e3e905) MD5_STEP1( reg_d1, reg_c1, reg_a1, reg_b1, reg_d2, reg_c2, reg_a2, reg_b2, reg_a1, reg_a2, 2 , 9 ,t1,t2,t3,0xfcefa3f8) MD5_STEP1( reg_c1, reg_b1, reg_d1, reg_a1, reg_c2, reg_b2, reg_d2, reg_a2, reg_d1, reg_d2, 7 , 14,t1,t2,t3,0x676f02d9) MD5_STEP1( reg_b1, reg_a1, reg_c1, reg_d1, reg_b2, reg_a2, reg_c2, reg_d2, reg_c1, reg_c2, 12, 20,t1,t2,t3,0x8d2a4c8a) // Round 3 MD5_STEP3( reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 5 , 4 ,t1,t2,t3,0xfffa3942) MD5_STEP3( reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 8 , 11,t1,t2,t3,0x8771f681) MD5_STEP3( reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 11, 16,t1,t2,t3,0x6d9d6122) MD5_STEP3( reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 14, 23,t1,t2,t3,0xfde5380c) MD5_STEP3( reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 1 , 4 ,t1,t2,t3,0xa4beea44) MD5_STEP3( reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 4 , 11,t1,t2,t3,0x4bdecfa9) MD5_STEP3( reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 7 , 16,t1,t2,t3,0xf6bb4b60) MD5_STEP3( reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 10, 23,t1,t2,t3,0xbebfbc70) MD5_STEP3( reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 13, 4 ,t1,t2,t3,0x289b7ec6) MD5_STEP3( reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 0 , 11,t1,t2,t3,0xeaa127fa) MD5_STEP3( reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 3 , 16,t1,t2,t3,0xd4ef3085) MD5_STEP3( reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 6 , 23,t1,t2,t3,0x04881d05) MD5_STEP3( reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 9 , 4 ,t1,t2,t3,0xd9d4d039) MD5_STEP3( reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 12, 11,t1,t2,t3,0xe6db99e5) MD5_STEP3( reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 15, 16,t1,t2,t3,0x1fa27cf8) MD5_STEP3( reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 2 , 23,t1,t2,t3,0xc4ac5665) // Round 4 MD5_STEP4( reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 0 , 6 ,t1,t2,t3,0xf4292244) MD5_STEP4( reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 7 , 10,t1,t2,t3,0x432aff97) MD5_STEP4( reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 14, 15,t1,t2,t3,0xab9423a7) MD5_STEP4( reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 5 , 21,t1,t2,t3,0xfc93a039) MD5_STEP4( reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 12, 6 ,t1,t2,t3,0x655b59c3) MD5_STEP4( reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 3 , 10,t1,t2,t3,0x8f0ccc92) MD5_STEP4( reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 10, 15,t1,t2,t3,0xffeff47d) MD5_STEP4( reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 1 , 21,t1,t2,t3,0x85845dd1) MD5_STEP4( reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 8 , 6 ,t1,t2,t3,0x6fa87e4f) MD5_STEP4( reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 15, 10,t1,t2,t3,0xfe2ce6e0) MD5_STEP4( reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 6 , 15,t1,t2,t3,0xa3014314) MD5_STEP4( reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 13, 21,t1,t2,t3,0x4e0811a1) MD5_STEP4( reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 4 , 6 ,t1,t2,t3,0xf7537e82) MD5_STEP4( reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 11, 10,t1,t2,t3,0xbd3af235) MD5_STEP4( reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 2 , 15,t1,t2,t3,0x2ad7d2bb) MD5_STEP4( reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 9 , 21,t1,t2,t3,0xeb86d391) // Sum init SET_REG(r4,0x67452301); SET_REG(r5,0xefcdab89); SET_REG(r6,0x98badcfe); SET_REG(r7,0x10325476); vdup.u32 t1, r4; vdup.u32 t2, r5; vdup.u32 t3, r6; vdup.u32 t4, r7; vadd.u32 reg_a1, reg_a1, t1 vadd.u32 reg_a2, reg_a2, t1 vadd.u32 reg_b1, reg_b1, t2 vadd.u32 reg_b2, reg_b2, t2 vadd.u32 reg_c1, reg_c1, t3 vadd.u32 reg_c2, reg_c2, t3 vadd.u32 reg_d1, reg_d1, t4 vadd.u32 reg_d2, reg_d2, t4 // Save a, b, c, d vst1.u32 {reg_a1,reg_a2}, [md5_state:128]; add md5_state, md5_state, #(2*16); vst1.u32 {reg_b1,reg_b2}, [md5_state:128]; add md5_state, md5_state, #(2*16); vst1.u32 {reg_c1,reg_c2}, [md5_state:128]; add md5_state, md5_state, #(2*16); vst1.u32 {reg_d1,reg_d2}, [md5_state:128]; pop {r4,r5,r6,r7} vpop {q4,q5,q6,q7} bx lr ///////////////////////////////////////////////////////////////////////////////////////////////// // SHA1 ///////////////////////////////////////////////////////////////////////////////////////////////// #undef t7 #undef t8 #define reg_e1 q12 #define reg_e2 q13 #define step_const q15 #define ROTATE_1(reg1,reg2,t1,t2) \ vshr.u32 t1 , reg1, #(31);\ vshr.u32 t2 , reg2, #(31);\ vadd.u32 reg1, reg1, reg1;\ vadd.u32 reg2, reg2, reg2;\ vorr.u32 reg1, reg1, t1;\ vorr.u32 reg2, reg2, t2; #define DCC2_ROTATE_5(reg1,reg2,t1,t2,t3,t4) \ vshr.u32 t1, reg1, #(32-5);\ vshr.u32 t3, reg2, #(32-5);\ vshl.u32 t2, reg1, #(5);\ vshl.u32 t4, reg2, #(5);\ vorr.u32 t1, t2, t1;\ vorr.u32 t3, t4, t3; #define CVT_BIG_ENDIAN_AND_CALCULATE_W(t00,t01,t10,t11,t20,t21,t30,t31,t40,t41,t50,t51,t60,t61,tmp0,tmp1) \ vld1.u32 {t00,t01}, [nt_buffer_base:128];\ vrev32.u8 t00,t00;\ vrev32.u8 t01,t01;\ vst1.u32 {t00,t01}, [nt_buffer_base:128];\ \ add nt_buffer, nt_buffer_base, #(1*4*NT_NUM_KEYS);\ vld1.u32 {t10,t11}, [nt_buffer:128];\ vrev32.u8 t10,t10;\ vrev32.u8 t11,t11;\ vst1.u32 {t10,t11}, [nt_buffer:128];\ \ add nt_buffer, nt_buffer_base, #(2*4*NT_NUM_KEYS);\ vld1.u32 {t20,t21}, [nt_buffer:128];\ vrev32.u8 t20,t20;\ vrev32.u8 t21,t21;\ vst1.u32 {t20,t21}, [nt_buffer:128];\ \ add nt_buffer, nt_buffer_base, #(3*4*NT_NUM_KEYS);\ vld1.u32 {t30,t31}, [nt_buffer:128];\ vrev32.u8 t30,t30;\ vrev32.u8 t31,t31;\ vst1.u32 {t30,t31}, [nt_buffer:128];\ \ add nt_buffer, nt_buffer_base, #(4*4*NT_NUM_KEYS);\ vld1.u32 {t40,t41}, [nt_buffer:128];\ vrev32.u8 t40,t40;\ vrev32.u8 t41,t41;\ vst1.u32 {t40,t41}, [nt_buffer:128];\ \ add nt_buffer, nt_buffer_base, #(5*4*NT_NUM_KEYS);\ vld1.u32 {t50,t51}, [nt_buffer:128];\ vrev32.u8 t50,t50;\ vrev32.u8 t51,t51;\ vst1.u32 {t50,t51}, [nt_buffer:128];\ \ add nt_buffer, nt_buffer_base, #(6*4*NT_NUM_KEYS);\ vld1.u32 {t60,t61}, [nt_buffer:128];\ vrev32.u8 t60,t60;\ vrev32.u8 t61,t61;\ vst1.u32 {t60,t61}, [nt_buffer:128];\ \ veor.u32 t00,t00,t20;\ veor.u32 t01,t01,t21;\ ROTATE_1(t00,t01,tmp0,tmp1);\ veor.u32 t10,t10,t30;\ veor.u32 t11,t11,t31;\ ROTATE_1(t10,t11,tmp0,tmp1)\ veor.u32 t20,t20,t40;\ veor.u32 t21,t21,t41;\ add nt_buffer, nt_buffer_base, #(7*4*NT_NUM_KEYS);\ vld1.u32 {tmp0,tmp1}, [nt_buffer:128];\ veor.u32 t20,t20,tmp0;\ veor.u32 t21,t21,tmp1;\ ROTATE_1(t20,t21,tmp0,tmp1);\ veor.u32 t30,t30,t00;\ veor.u32 t31,t31,t01;\ veor.u32 t30,t30,t50;\ veor.u32 t31,t31,t51;\ ROTATE_1(t30,t31,tmp0,tmp1);\ veor.u32 t40,t40,t10;\ veor.u32 t41,t41,t11;\ veor.u32 t40,t40,t60;\ veor.u32 t41,t41,t61;\ ROTATE_1(t40,t41,tmp0,tmp1);\ veor.u32 t50,t50,t20;\ veor.u32 t51,t51,t21;\ ROTATE_1(t50,t51,tmp0,tmp1);\ veor.u32 t60,t60,t30;\ veor.u32 t61,t61,t31;\ ROTATE_1(t60,t61,tmp0,tmp1);\ \ add nt_buffer, nt_buffer_base, #(8*4*NT_NUM_KEYS+0*4*NT_NUM_KEYS);\ vst1.u32 {t00,t01}, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #(8*4*NT_NUM_KEYS+1*4*NT_NUM_KEYS);\ vst1.u32 {t10,t11}, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #(8*4*NT_NUM_KEYS+2*4*NT_NUM_KEYS);\ vst1.u32 {t20,t21}, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #(8*4*NT_NUM_KEYS+3*4*NT_NUM_KEYS);\ vst1.u32 {t30,t31}, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #(8*4*NT_NUM_KEYS+4*4*NT_NUM_KEYS);\ vst1.u32 {t40,t41}, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #(8*4*NT_NUM_KEYS+5*4*NT_NUM_KEYS);\ vst1.u32 {t50,t51}, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #(8*4*NT_NUM_KEYS+6*4*NT_NUM_KEYS);\ vst1.u32 {t60,t61}, [nt_buffer:128];\ \ add nt_buffer, nt_buffer_base, #(7*4*NT_NUM_KEYS);\ vld1.u32 {tmp0,tmp1}, [nt_buffer:128];\ veor.u32 t40,t40,tmp0;\ veor.u32 t41,t41,tmp1;\ ROTATE_1(t40,t41,tmp0,tmp1);\ veor.u32 t00,t00,t50;\ veor.u32 t01,t01,t51;\ ROTATE_1(t00,t01,tmp0,tmp1);\ veor.u32 t10,t10,t60;\ veor.u32 t11,t11,t61;\ ROTATE_1(t10,t11,tmp0,tmp1);\ veor.u32 t20,t20,t40;\ veor.u32 t21,t21,t41;\ ROTATE_1(t20,t21,tmp0,tmp1);\ veor.u32 t30,t30,t00;\ veor.u32 t31,t31,t01;\ ROTATE_1(t30,t31,tmp0,tmp1);\ \ add nt_buffer, nt_buffer_base, #(8*4*NT_NUM_KEYS+7*4*NT_NUM_KEYS);\ vst1.u32 {t40,t41}, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #(8*4*NT_NUM_KEYS+8*4*NT_NUM_KEYS);\ vst1.u32 {t00,t01}, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #(8*4*NT_NUM_KEYS+9*4*NT_NUM_KEYS);\ vst1.u32 {t10,t11}, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #(8*4*NT_NUM_KEYS+10*4*NT_NUM_KEYS);\ vst1.u32 {t20,t21}, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #(8*4*NT_NUM_KEYS+11*4*NT_NUM_KEYS);\ vst1.u32 {t30,t31}, [nt_buffer:128];\ \ add nt_buffer, nt_buffer_base, #(8*4*NT_NUM_KEYS+4*4*NT_NUM_KEYS);\ vld1.u32 {tmp0,tmp1}, [nt_buffer:128];\ veor.u32 t10,t10,tmp0;\ veor.u32 t11,t11,tmp1;\ ROTATE_1(t10,t11,tmp0,tmp1);\ \ add nt_buffer, nt_buffer_base, #(7*4*NT_NUM_KEYS);\ vld1.u32 {tmp0,tmp1}, [nt_buffer:128];\ veor.u32 t50,t50,tmp0;\ veor.u32 t51,t51,tmp1;\ veor.u32 t50,t50,t20;\ veor.u32 t51,t51,t21;\ ROTATE_1(t50,t51,tmp0,tmp1);\ \ add nt_buffer, nt_buffer_base, #(8*4*NT_NUM_KEYS+0*4*NT_NUM_KEYS);\ vld1.u32 {tmp0,tmp1}, [nt_buffer:128];\ veor.u32 t60,t60,tmp0;\ veor.u32 t61,t61,tmp1;\ veor.u32 t60,t60,t30;\ veor.u32 t61,t61,t31;\ ROTATE_1(t60,t61,tmp0,tmp1);\ \ add nt_buffer, nt_buffer_base, #(8*4*NT_NUM_KEYS+1*4*NT_NUM_KEYS);\ vld1.u32 {tmp0,tmp1}, [nt_buffer:128];\ veor.u32 t40,t40,tmp0;\ veor.u32 t41,t41,tmp1;\ veor.u32 t40,t40,t10;\ veor.u32 t41,t41,t11;\ add nt_buffer, nt_buffer_base, #(7*4*NT_NUM_KEYS);\ vld1.u32 {tmp0,tmp1}, [nt_buffer:128];\ veor.u32 t40,t40,tmp0;\ veor.u32 t41,t41,tmp1;\ ROTATE_1(t40,t41,tmp0,tmp1);\ \ add nt_buffer, nt_buffer_base, #(8*4*NT_NUM_KEYS+12*4*NT_NUM_KEYS);\ vst1.u32 {t10,t11}, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #(8*4*NT_NUM_KEYS+13*4*NT_NUM_KEYS);\ vst1.u32 {t50,t51}, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #(8*4*NT_NUM_KEYS+14*4*NT_NUM_KEYS);\ vst1.u32 {t60,t61}, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #(8*4*NT_NUM_KEYS+15*4*NT_NUM_KEYS);\ vst1.u32 {t40,t41}, [nt_buffer:128] #define DCC2_STEP1(reg_e1,reg_a1,reg_b1,reg_c1,reg_d1,reg_e2,reg_a2,reg_b2,reg_c2,reg_d2,t1,t2,t3,t4,step_const) \ DCC2_ROTATE_5(reg_a1,reg_a2,t1,t2,t3,t4)\ vadd.u32 reg_e1, reg_e1, t1;\ vadd.u32 reg_e2, reg_e2, t3;\ \ vmov.u32 t1, reg_b1;\ vmov.u32 t2, reg_b2;\ vbsl t1, reg_c1, reg_d1;\ vbsl t2, reg_c2, reg_d2;\ vadd.u32 reg_e1, reg_e1, t1;\ vadd.u32 reg_e2, reg_e2, t2;\ vadd.u32 reg_e1, reg_e1, step_const;\ vadd.u32 reg_e2, reg_e2, step_const;\ \ ROTATE(reg_b1, reg_b2, 30, t1, t2) #define SHA1_STEP2(step_const,reg_e1,reg_a1,reg_b1,reg_c1,reg_d1,reg_e2,reg_a2,reg_b2,reg_c2,reg_d2,t1,t2,t3,t4,index) \ DCC2_ROTATE_5(reg_a1,reg_a2,t1,t2,t3,t4)\ vadd.u32 reg_e1, reg_e1, t1;\ vadd.u32 reg_e2, reg_e2, t3;\ \ veor.u32 t1, reg_c1, reg_d1;\ veor.u32 t2, reg_c2, reg_d2;\ veor.u32 t1, t1, reg_b1;\ veor.u32 t2, t2, reg_b2;\ vadd.u32 reg_e1, reg_e1, t1;\ vadd.u32 reg_e2, reg_e2, t2;\ vadd.u32 reg_e1, reg_e1, step_const;\ vadd.u32 reg_e2, reg_e2, step_const;\ add nt_buffer, nt_buffer_base, #(8*4*NT_NUM_KEYS+index*4*NT_NUM_KEYS);\ vld1.u32 {t3,t4}, [nt_buffer:128];\ vadd.u32 reg_e1, reg_e1, t3;\ vadd.u32 reg_e2, reg_e2, t4;\ \ ROTATE(reg_b1, reg_b2, 30, t1, t2) #define SHA1_R(t1,t2,t3,t4,w0,w1,w2,w3) \ add nt_buffer, nt_buffer_base, #(8*4*NT_NUM_KEYS+w1*4*NT_NUM_KEYS);\ vld1.u32 {t1,t2}, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #(8*4*NT_NUM_KEYS+w2*4*NT_NUM_KEYS);\ vld1.u32 {t3,t4}, [nt_buffer:128];\ veor.u32 t1, t1, t3;\ veor.u32 t2, t2, t4;\ add nt_buffer, nt_buffer_base, #(8*4*NT_NUM_KEYS+w3*4*NT_NUM_KEYS);\ vld1.u32 {t3,t4}, [nt_buffer:128];\ veor.u32 t1, t1, t3;\ veor.u32 t2, t2, t4;\ add nt_buffer, nt_buffer_base, #(8*4*NT_NUM_KEYS+w0*4*NT_NUM_KEYS);\ vld1.u32 {t3,t4}, [nt_buffer:128];\ veor.u32 t1, t1, t3;\ veor.u32 t2, t2, t4;\ \ ROTATE_1(t1, t2, t3, t4)\ vst1.u32 {t1,t2}, [nt_buffer:128]; #define SHA1_STEP3(step_const,reg_e1,reg_a1,reg_b1,reg_c1,reg_d1,reg_e2,reg_a2,reg_b2,reg_c2,reg_d2,t1,t2,t3,t4,w0,w1,w2,w3) \ DCC2_ROTATE_5(reg_a1,reg_a2,t1,t2,t3,t4)\ vadd.u32 reg_e1, reg_e1, t1;\ vadd.u32 reg_e2, reg_e2, t3;\ \ veor.u32 t1, reg_d1, reg_c1;\ veor.u32 t2, reg_d2, reg_c2;\ vbsl t1, reg_b1, reg_c1;\ vbsl t2, reg_b2, reg_c2;\ \ vadd.u32 reg_e1, reg_e1, t1;\ vadd.u32 reg_e2, reg_e2, t2;\ vadd.u32 reg_e1, reg_e1, step_const;\ vadd.u32 reg_e2, reg_e2, step_const;\ \ ROTATE(reg_b1, reg_b2, 30, t1, t2)\ SHA1_R(t1,t2,t3,t4,w0,w1,w2,w3)\ vadd.u32 reg_e1, reg_e1, t1;\ vadd.u32 reg_e2, reg_e2, t2; #define SHA1_STEP4(step_const,reg_e1,reg_a1,reg_b1,reg_c1,reg_d1,reg_e2,reg_a2,reg_b2,reg_c2,reg_d2,t1,t2,t3,t4,w0,w1,w2,w3) \ DCC2_ROTATE_5(reg_a1,reg_a2,t1,t2,t3,t4)\ vadd.u32 reg_e1, reg_e1, t1;\ vadd.u32 reg_e2, reg_e2, t3;\ \ veor.u32 t1, reg_c1, reg_d1;\ veor.u32 t2, reg_c2, reg_d2;\ veor.u32 t1, t1, reg_b1;\ veor.u32 t2, t2, reg_b2;\ vadd.u32 reg_e1, reg_e1, t1;\ vadd.u32 reg_e2, reg_e2, t2;\ vadd.u32 reg_e1, reg_e1, step_const;\ vadd.u32 reg_e2, reg_e2, step_const;\ \ ROTATE(reg_b1, reg_b2, 30, t1, t2)\ \ SHA1_R(t1,t2,t3,t4,w0,w1,w2,w3)\ vadd.u32 reg_e1, reg_e1, t1;\ vadd.u32 reg_e2, reg_e2, t2; #define SHA1_STEP4_NO_ROT(step_const,reg_e1,reg_a1,reg_b1,reg_c1,reg_d1,reg_e2,reg_a2,reg_b2,reg_c2,reg_d2,t1,t2,t3,t4,w0,w1,w2,w3) \ DCC2_ROTATE_5(reg_a1,reg_a2,t1,t2,t3,t4)\ vadd.u32 reg_e1, reg_e1, t1;\ vadd.u32 reg_e2, reg_e2, t3;\ \ veor.u32 t1, reg_c1, reg_d1;\ veor.u32 t2, reg_c2, reg_d2;\ veor.u32 t1, t1, reg_b1;\ veor.u32 t2, t2, reg_b2;\ vadd.u32 reg_e1, reg_e1, t1;\ vadd.u32 reg_e2, reg_e2, t2;\ vadd.u32 reg_e1, reg_e1, step_const;\ vadd.u32 reg_e2, reg_e2, step_const;\ \ SHA1_R(t1,t2,t3,t4,w0,w1,w2,w3)\ vadd.u32 reg_e1, reg_e1, t1;\ vadd.u32 reg_e2, reg_e2, t2; .text .align 2 .global crypt_sha1_neon_kernel_asm .type crypt_sha1_neon_kernel_asm, %function crypt_sha1_neon_kernel_asm: vpush {q4,q5,q6,q7} push {r4,r5,r6,r7} push {r8,r9,r10,r11} mov i, #0 // i=0 sha1_while1: CVT_BIG_ENDIAN_AND_CALCULATE_W(reg_a1,reg_a2,reg_b1,reg_b2,reg_c1,reg_c2,reg_d1,reg_d2,reg_e1,reg_e2,t1,t2,t3,t4,t5,t6) // Load state vld1.u32 {reg_e1,reg_e2}, [nt_buffer_base:128] add nt_buffer, nt_buffer_base, #(1*4*NT_NUM_KEYS) vld1.u32 {reg_d1,reg_d2}, [nt_buffer:128] add nt_buffer, nt_buffer_base, #(2*4*NT_NUM_KEYS) vld1.u32 {reg_c1,reg_c2}, [nt_buffer:128] add nt_buffer, nt_buffer_base, #(3*4*NT_NUM_KEYS) vld1.u32 {reg_b1,reg_b2}, [nt_buffer:128] add nt_buffer, nt_buffer_base, #(4*4*NT_NUM_KEYS) vld1.u32 {reg_a1,reg_a2}, [nt_buffer:128] //Step 1 SET_REG(r5,0x9fb498b3) vdup.u32 t1, r5 SET_REG(r5,0x66b0cd0d) vdup.u32 t2, r5 vadd.u32 reg_e1, reg_e1, t1 vadd.u32 reg_e2, reg_e2, t1 //Step 2 vadd.u32 reg_d1, reg_d1, t2 vadd.u32 reg_d2, reg_d2, t2 DCC2_ROTATE_5(reg_e1,reg_e2,t3,t4,t5,step_const) vadd.u32 reg_d1, reg_d1, t3 vadd.u32 reg_d2, reg_d2, t5 //Step 3 SET_REG(r5,0xf33d5697) vdup.u32 step_const, r5 vadd.u32 reg_c1, reg_c1, step_const vadd.u32 reg_c2, reg_c2, step_const DCC2_ROTATE_5(reg_d1,reg_d2,t1,t2,t3,t4) vadd.u32 reg_c1, reg_c1, t1 vadd.u32 reg_c2, reg_c2, t3 SET_REG(r5,0x22222222) vdup.u32 t2, r5 SET_REG(r5,0x7bf36ae2) vdup.u32 t5, r5 vand.u32 t4, reg_e1, t2 vand.u32 t2, reg_e2, t2 veor.u32 t4, t4, t5 veor.u32 t2, t2, t5 vadd.u32 reg_c1, reg_c1, t4 vadd.u32 reg_c2, reg_c2, t2 ROTATE(reg_e1,reg_e2,30,t1,t2) //Step 4 SET_REG(r5,0xd675e47b) vdup.u32 step_const, r5 SET_REG(r5,0x59d148c0) vdup.u32 t5, r5 DCC2_STEP1(reg_b1, reg_c1, reg_d1, reg_e1, t5, reg_b2, reg_c2, reg_d2, reg_e2, t5, t1, t2, t3, t4,step_const) SET_REG(r5,0xb453c259) vdup.u32 step_const, r5 DCC2_STEP1(reg_a1, reg_b1, reg_c1, reg_d1, reg_e1, reg_a2, reg_b2, reg_c2, reg_d2, reg_e2, t1, t2, t3, t4,step_const) //Step 5 SET_REG(r5,0x5a827999) vdup.u32 step_const, r5 DCC2_STEP1(reg_e1, reg_a1, reg_b1, reg_c1, reg_d1, reg_e2, reg_a2, reg_b2, reg_c2, reg_d2, t1, t2, t3, t4,step_const) add nt_buffer, nt_buffer_base, #(5*4*NT_NUM_KEYS) vld1.u32 {t1,t2}, [nt_buffer:128] vadd.u32 reg_e1, reg_e1, t1 vadd.u32 reg_e2, reg_e2, t2 //Step 6 DCC2_STEP1(reg_d1, reg_e1, reg_a1, reg_b1, reg_c1, reg_d2, reg_e2, reg_a2, reg_b2, reg_c2, t1, t2, t3, t4,step_const) add nt_buffer, nt_buffer_base, #(6*4*NT_NUM_KEYS) vld1.u32 {t1,t2}, [nt_buffer:128] vadd.u32 reg_d1, reg_d1, t1 vadd.u32 reg_d2, reg_d2, t2 DCC2_STEP1(reg_c1, reg_d1, reg_e1, reg_a1, reg_b1, reg_c2, reg_d2, reg_e2, reg_a2, reg_b2, t1, t2, t3, t4,step_const) DCC2_STEP1(reg_b1, reg_c1, reg_d1, reg_e1, reg_a1, reg_b2, reg_c2, reg_d2, reg_e2, reg_a2, t1, t2, t3, t4,step_const) DCC2_STEP1(reg_a1, reg_b1, reg_c1, reg_d1, reg_e1, reg_a2, reg_b2, reg_c2, reg_d2, reg_e2, t1, t2, t3, t4,step_const) DCC2_STEP1(reg_e1, reg_a1, reg_b1, reg_c1, reg_d1, reg_e2, reg_a2, reg_b2, reg_c2, reg_d2, t1, t2, t3, t4,step_const) DCC2_STEP1(reg_d1, reg_e1, reg_a1, reg_b1, reg_c1, reg_d2, reg_e2, reg_a2, reg_b2, reg_c2, t1, t2, t3, t4,step_const) DCC2_STEP1(reg_c1, reg_d1, reg_e1, reg_a1, reg_b1, reg_c2, reg_d2, reg_e2, reg_a2, reg_b2, t1, t2, t3, t4,step_const) DCC2_STEP1(reg_b1, reg_c1, reg_d1, reg_e1, reg_a1, reg_b2, reg_c2, reg_d2, reg_e2, reg_a2, t1, t2, t3, t4,step_const) DCC2_STEP1(reg_a1, reg_b1, reg_c1, reg_d1, reg_e1, reg_a2, reg_b2, reg_c2, reg_d2, reg_e2, t1, t2, t3, t4,step_const) DCC2_STEP1(reg_e1, reg_a1, reg_b1, reg_c1, reg_d1, reg_e2, reg_a2, reg_b2, reg_c2, reg_d2, t1, t2, t3, t4,step_const) add nt_buffer, nt_buffer_base, #(7*4*NT_NUM_KEYS) vld1.u32 {t1,t2}, [nt_buffer:128] vadd.u32 reg_e1, reg_e1, t1 vadd.u32 reg_e2, reg_e2, t2 // Recalculate W DCC2_STEP1(reg_d1, reg_e1, reg_a1, reg_b1, reg_c1, reg_d2, reg_e2, reg_a2, reg_b2, reg_c2, t1, t2, t3, t4,step_const) add nt_buffer, nt_buffer_base, #(8*4*NT_NUM_KEYS+0*4*NT_NUM_KEYS) vld1.u32 {t1,t2}, [nt_buffer:128] vadd.u32 reg_d1, reg_d1, t1 vadd.u32 reg_d2, reg_d2, t2 DCC2_STEP1(reg_c1, reg_d1, reg_e1, reg_a1, reg_b1, reg_c2, reg_d2, reg_e2, reg_a2, reg_b2, t1, t2, t3, t4,step_const) add nt_buffer, nt_buffer_base, #(8*4*NT_NUM_KEYS+1*4*NT_NUM_KEYS) vld1.u32 {t1,t2}, [nt_buffer:128] vadd.u32 reg_c1, reg_c1, t1 vadd.u32 reg_c2, reg_c2, t2 DCC2_STEP1(reg_b1, reg_c1, reg_d1, reg_e1, reg_a1, reg_b2, reg_c2, reg_d2, reg_e2, reg_a2, t1, t2, t3, t4,step_const) add nt_buffer, nt_buffer_base, #(8*4*NT_NUM_KEYS+2*4*NT_NUM_KEYS) vld1.u32 {t1,t2}, [nt_buffer:128] vadd.u32 reg_b1, reg_b1, t1 vadd.u32 reg_b2, reg_b2, t2 DCC2_STEP1(reg_a1, reg_b1, reg_c1, reg_d1, reg_e1, reg_a2, reg_b2, reg_c2, reg_d2, reg_e2, t1, t2, t3, t4,step_const) add nt_buffer, nt_buffer_base, #(8*4*NT_NUM_KEYS+3*4*NT_NUM_KEYS) vld1.u32 {t1,t2}, [nt_buffer:128] vadd.u32 reg_a1, reg_a1, t1 vadd.u32 reg_a2, reg_a2, t2 //Round 2 SET_REG(r5,0x6ed9eba1) vdup.u32 step_const, r5 SHA1_STEP2(step_const,reg_e1, reg_a1, reg_b1, reg_c1, reg_d1, reg_e2, reg_a2, reg_b2, reg_c2, reg_d2, t1, t2, t3, t4, 4 ) SHA1_STEP2(step_const,reg_d1, reg_e1, reg_a1, reg_b1, reg_c1, reg_d2, reg_e2, reg_a2, reg_b2, reg_c2, t1, t2, t3, t4, 5 ) SHA1_STEP2(step_const,reg_c1, reg_d1, reg_e1, reg_a1, reg_b1, reg_c2, reg_d2, reg_e2, reg_a2, reg_b2, t1, t2, t3, t4, 6 ) SHA1_STEP2(step_const,reg_b1, reg_c1, reg_d1, reg_e1, reg_a1, reg_b2, reg_c2, reg_d2, reg_e2, reg_a2, t1, t2, t3, t4, 7 ) SHA1_STEP2(step_const,reg_a1, reg_b1, reg_c1, reg_d1, reg_e1, reg_a2, reg_b2, reg_c2, reg_d2, reg_e2, t1, t2, t3, t4, 8 ) SHA1_STEP2(step_const,reg_e1, reg_a1, reg_b1, reg_c1, reg_d1, reg_e2, reg_a2, reg_b2, reg_c2, reg_d2, t1, t2, t3, t4, 9 ) SHA1_STEP2(step_const,reg_d1, reg_e1, reg_a1, reg_b1, reg_c1, reg_d2, reg_e2, reg_a2, reg_b2, reg_c2, t1, t2, t3, t4, 10) SHA1_STEP2(step_const,reg_c1, reg_d1, reg_e1, reg_a1, reg_b1, reg_c2, reg_d2, reg_e2, reg_a2, reg_b2, t1, t2, t3, t4, 11) SHA1_STEP2(step_const,reg_b1, reg_c1, reg_d1, reg_e1, reg_a1, reg_b2, reg_c2, reg_d2, reg_e2, reg_a2, t1, t2, t3, t4, 12) SHA1_STEP2(step_const,reg_a1, reg_b1, reg_c1, reg_d1, reg_e1, reg_a2, reg_b2, reg_c2, reg_d2, reg_e2, t1, t2, t3, t4, 13) SHA1_STEP2(step_const,reg_e1, reg_a1, reg_b1, reg_c1, reg_d1, reg_e2, reg_a2, reg_b2, reg_c2, reg_d2, t1, t2, t3, t4, 14) SHA1_STEP2(step_const,reg_d1, reg_e1, reg_a1, reg_b1, reg_c1, reg_d2, reg_e2, reg_a2, reg_b2, reg_c2, t1, t2, t3, t4, 15) SHA1_STEP4(step_const,reg_c1, reg_d1, reg_e1, reg_a1, reg_b1, reg_c2, reg_d2, reg_e2, reg_a2, reg_b2, t1, t2, t3, t4, 0, 13, 8 , 2) SHA1_STEP4(step_const,reg_b1, reg_c1, reg_d1, reg_e1, reg_a1, reg_b2, reg_c2, reg_d2, reg_e2, reg_a2, t1, t2, t3, t4, 1, 14, 9 , 3) SHA1_STEP4(step_const,reg_a1, reg_b1, reg_c1, reg_d1, reg_e1, reg_a2, reg_b2, reg_c2, reg_d2, reg_e2, t1, t2, t3, t4, 2, 15, 10, 4) SHA1_STEP4(step_const,reg_e1, reg_a1, reg_b1, reg_c1, reg_d1, reg_e2, reg_a2, reg_b2, reg_c2, reg_d2, t1, t2, t3, t4, 3, 0 , 11, 5) SHA1_STEP4(step_const,reg_d1, reg_e1, reg_a1, reg_b1, reg_c1, reg_d2, reg_e2, reg_a2, reg_b2, reg_c2, t1, t2, t3, t4, 4, 1 , 12, 6) SHA1_STEP4(step_const,reg_c1, reg_d1, reg_e1, reg_a1, reg_b1, reg_c2, reg_d2, reg_e2, reg_a2, reg_b2, t1, t2, t3, t4, 5, 2 , 13, 7) SHA1_STEP4(step_const,reg_b1, reg_c1, reg_d1, reg_e1, reg_a1, reg_b2, reg_c2, reg_d2, reg_e2, reg_a2, t1, t2, t3, t4, 6, 3 , 14, 8) SHA1_STEP4(step_const,reg_a1, reg_b1, reg_c1, reg_d1, reg_e1, reg_a2, reg_b2, reg_c2, reg_d2, reg_e2, t1, t2, t3, t4, 7, 4 , 15, 9) //Round 3 SET_REG(r5,0x8F1BBCDC) vdup.u32 step_const, r5 SHA1_STEP3(step_const,reg_e1, reg_a1, reg_b1, reg_c1, reg_d1, reg_e2, reg_a2, reg_b2, reg_c2, reg_d2, t1, t2, t3, t4, 8 , 5 , 0 , 10) SHA1_STEP3(step_const,reg_d1, reg_e1, reg_a1, reg_b1, reg_c1, reg_d2, reg_e2, reg_a2, reg_b2, reg_c2, t1, t2, t3, t4, 9 , 6 , 1 , 11) SHA1_STEP3(step_const,reg_c1, reg_d1, reg_e1, reg_a1, reg_b1, reg_c2, reg_d2, reg_e2, reg_a2, reg_b2, t1, t2, t3, t4, 10, 7 , 2 , 12) SHA1_STEP3(step_const,reg_b1, reg_c1, reg_d1, reg_e1, reg_a1, reg_b2, reg_c2, reg_d2, reg_e2, reg_a2, t1, t2, t3, t4, 11, 8 , 3 , 13) SHA1_STEP3(step_const,reg_a1, reg_b1, reg_c1, reg_d1, reg_e1, reg_a2, reg_b2, reg_c2, reg_d2, reg_e2, t1, t2, t3, t4, 12, 9 , 4 , 14) SHA1_STEP3(step_const,reg_e1, reg_a1, reg_b1, reg_c1, reg_d1, reg_e2, reg_a2, reg_b2, reg_c2, reg_d2, t1, t2, t3, t4, 13, 10, 5 , 15) SHA1_STEP3(step_const,reg_d1, reg_e1, reg_a1, reg_b1, reg_c1, reg_d2, reg_e2, reg_a2, reg_b2, reg_c2, t1, t2, t3, t4, 14, 11, 6 , 0 ) SHA1_STEP3(step_const,reg_c1, reg_d1, reg_e1, reg_a1, reg_b1, reg_c2, reg_d2, reg_e2, reg_a2, reg_b2, t1, t2, t3, t4, 15, 12, 7 , 1 ) SHA1_STEP3(step_const,reg_b1, reg_c1, reg_d1, reg_e1, reg_a1, reg_b2, reg_c2, reg_d2, reg_e2, reg_a2, t1, t2, t3, t4, 0 , 13, 8 , 2 ) SHA1_STEP3(step_const,reg_a1, reg_b1, reg_c1, reg_d1, reg_e1, reg_a2, reg_b2, reg_c2, reg_d2, reg_e2, t1, t2, t3, t4, 1 , 14, 9 , 3 ) SHA1_STEP3(step_const,reg_e1, reg_a1, reg_b1, reg_c1, reg_d1, reg_e2, reg_a2, reg_b2, reg_c2, reg_d2, t1, t2, t3, t4, 2 , 15, 10, 4 ) SHA1_STEP3(step_const,reg_d1, reg_e1, reg_a1, reg_b1, reg_c1, reg_d2, reg_e2, reg_a2, reg_b2, reg_c2, t1, t2, t3, t4, 3 , 0 , 11, 5 ) SHA1_STEP3(step_const,reg_c1, reg_d1, reg_e1, reg_a1, reg_b1, reg_c2, reg_d2, reg_e2, reg_a2, reg_b2, t1, t2, t3, t4, 4 , 1 , 12, 6 ) SHA1_STEP3(step_const,reg_b1, reg_c1, reg_d1, reg_e1, reg_a1, reg_b2, reg_c2, reg_d2, reg_e2, reg_a2, t1, t2, t3, t4, 5 , 2 , 13, 7 ) SHA1_STEP3(step_const,reg_a1, reg_b1, reg_c1, reg_d1, reg_e1, reg_a2, reg_b2, reg_c2, reg_d2, reg_e2, t1, t2, t3, t4, 6 , 3 , 14, 8 ) SHA1_STEP3(step_const,reg_e1, reg_a1, reg_b1, reg_c1, reg_d1, reg_e2, reg_a2, reg_b2, reg_c2, reg_d2, t1, t2, t3, t4, 7 , 4 , 15, 9 ) SHA1_STEP3(step_const,reg_d1, reg_e1, reg_a1, reg_b1, reg_c1, reg_d2, reg_e2, reg_a2, reg_b2, reg_c2, t1, t2, t3, t4, 8 , 5 , 0 , 10) SHA1_STEP3(step_const,reg_c1, reg_d1, reg_e1, reg_a1, reg_b1, reg_c2, reg_d2, reg_e2, reg_a2, reg_b2, t1, t2, t3, t4, 9 , 6 , 1 , 11) SHA1_STEP3(step_const,reg_b1, reg_c1, reg_d1, reg_e1, reg_a1, reg_b2, reg_c2, reg_d2, reg_e2, reg_a2, t1, t2, t3, t4, 10, 7 , 2 , 12) SHA1_STEP3(step_const,reg_a1, reg_b1, reg_c1, reg_d1, reg_e1, reg_a2, reg_b2, reg_c2, reg_d2, reg_e2, t1, t2, t3, t4, 11, 8 , 3 , 13) //Round 4 SET_REG(r5,0xCA62C1D6) vdup.u32 step_const, r5 SHA1_STEP4(step_const,reg_e1, reg_a1, reg_b1, reg_c1, reg_d1, reg_e2, reg_a2, reg_b2, reg_c2, reg_d2, t1, t2, t3, t4, 12, 9 , 4 , 14 ) SHA1_STEP4(step_const,reg_d1, reg_e1, reg_a1, reg_b1, reg_c1, reg_d2, reg_e2, reg_a2, reg_b2, reg_c2, t1, t2, t3, t4, 13, 10, 5 , 15 ) SHA1_STEP4(step_const,reg_c1, reg_d1, reg_e1, reg_a1, reg_b1, reg_c2, reg_d2, reg_e2, reg_a2, reg_b2, t1, t2, t3, t4, 14, 11, 6 , 0 ) SHA1_STEP4(step_const,reg_b1, reg_c1, reg_d1, reg_e1, reg_a1, reg_b2, reg_c2, reg_d2, reg_e2, reg_a2, t1, t2, t3, t4, 15, 12, 7 , 1 ) SHA1_STEP4(step_const,reg_a1, reg_b1, reg_c1, reg_d1, reg_e1, reg_a2, reg_b2, reg_c2, reg_d2, reg_e2, t1, t2, t3, t4, 0 , 13, 8 , 2 ) SHA1_STEP4(step_const,reg_e1, reg_a1, reg_b1, reg_c1, reg_d1, reg_e2, reg_a2, reg_b2, reg_c2, reg_d2, t1, t2, t3, t4, 1 , 14, 9 , 3 ) SHA1_STEP4(step_const,reg_d1, reg_e1, reg_a1, reg_b1, reg_c1, reg_d2, reg_e2, reg_a2, reg_b2, reg_c2, t1, t2, t3, t4, 2 , 15, 10, 4 ) SHA1_STEP4(step_const,reg_c1, reg_d1, reg_e1, reg_a1, reg_b1, reg_c2, reg_d2, reg_e2, reg_a2, reg_b2, t1, t2, t3, t4, 3 , 0 , 11, 5 ) SHA1_STEP4(step_const,reg_b1, reg_c1, reg_d1, reg_e1, reg_a1, reg_b2, reg_c2, reg_d2, reg_e2, reg_a2, t1, t2, t3, t4, 4 , 1 , 12, 6 ) SHA1_STEP4(step_const,reg_a1, reg_b1, reg_c1, reg_d1, reg_e1, reg_a2, reg_b2, reg_c2, reg_d2, reg_e2, t1, t2, t3, t4, 5 , 2 , 13, 7 ) SHA1_STEP4(step_const,reg_e1, reg_a1, reg_b1, reg_c1, reg_d1, reg_e2, reg_a2, reg_b2, reg_c2, reg_d2, t1, t2, t3, t4, 6 , 3 , 14, 8 ) SHA1_STEP4(step_const,reg_d1, reg_e1, reg_a1, reg_b1, reg_c1, reg_d2, reg_e2, reg_a2, reg_b2, reg_c2, t1, t2, t3, t4, 7 , 4 , 15, 9 ) SHA1_STEP4(step_const,reg_c1, reg_d1, reg_e1, reg_a1, reg_b1, reg_c2, reg_d2, reg_e2, reg_a2, reg_b2, t1, t2, t3, t4, 8 , 5 , 0 , 10 ) SHA1_STEP4(step_const,reg_b1, reg_c1, reg_d1, reg_e1, reg_a1, reg_b2, reg_c2, reg_d2, reg_e2, reg_a2, t1, t2, t3, t4, 9 , 6 , 1 , 11 ) SHA1_STEP4(step_const,reg_a1, reg_b1, reg_c1, reg_d1, reg_e1, reg_a2, reg_b2, reg_c2, reg_d2, reg_e2, t1, t2, t3, t4, 10, 7 , 2 , 12 ) SHA1_STEP4_NO_ROT(step_const,reg_e1, reg_a1, reg_b1, reg_c1, reg_d1, reg_e2, reg_a2, reg_b2, reg_c2, reg_d2, t1, t2, t3, t4, 11, 8, 3, 13) SHA1_R(t1,t2,t3,t4,12, 9, 4, 14) SHA1_R(t1,t2,t3,t4,15, 12, 7, 1) ROTATE(reg_a1,reg_a2,30,t3,t4) vadd.u32 reg_a1, reg_a1, t1 vadd.u32 reg_a2, reg_a2, t2 // Save a, c, d, b, e add nt_buffer, nt_buffer_base, #(8*4*NT_NUM_KEYS+2*4*NT_NUM_KEYS) vst1.u32 {reg_a1,reg_a2}, [nt_buffer:128] add nt_buffer, nt_buffer_base, #(8*4*NT_NUM_KEYS+4*4*NT_NUM_KEYS) vst1.u32 {reg_b1,reg_b2}, [nt_buffer:128] add nt_buffer, nt_buffer_base, #(8*4*NT_NUM_KEYS+9*4*NT_NUM_KEYS) vst1.u32 {reg_c1,reg_c2}, [nt_buffer:128] add nt_buffer, nt_buffer_base, #(8*4*NT_NUM_KEYS+8*4*NT_NUM_KEYS) vst1.u32 {reg_d1,reg_d2}, [nt_buffer:128] add nt_buffer, nt_buffer_base, #(8*4*NT_NUM_KEYS+3*4*NT_NUM_KEYS) vst1.u32 {reg_e1,reg_e2}, [nt_buffer:128] add i,i,#1 add nt_buffer_base, nt_buffer_base, #(2*REG_BYTE_SIZE) cmp i, #(NT_NUM_KEYS/(REG_BYTE_SIZE/2)) blo sha1_while1 pop {r8,r9,r10,r11} pop {r4,r5,r6,r7} vpop {q4,q5,q6,q7} bx lr ///////////////////////////////////////////////////////////////////////////////////////////////// // DCC ///////////////////////////////////////////////////////////////////////////////////////////////// #define STEP1_DCC(a1,b1,c1,d1,a2,b2,c2,d2,rot,NT_NUM_KEYS,t1,t2,init_value) \ vadd.u32 a1, a1, init_value;\ vadd.u32 a2, a2, init_value;\ vmov.u32 t1, b1;\ vmov.u32 t2, b2;\ vbsl t1, c1, d1;\ vbsl t2, c2, d2;\ vadd.u32 a1, a1, t1;\ vadd.u32 a2, a2, t2;\ ROTATE(a1,a2,rot,t1,t2) #undef nt_buffer #undef NT_NUM_KEYS #define nt_buffer_base r0 #define crypt_result r1 #define nt_buffer r2 #define NT_NUM_KEYS 64 .text .align 2 .global dcc_ntlm_part_neon .type dcc_ntlm_part_neon, %function dcc_ntlm_part_neon: vpush {q4,q5,q6,q7} push {r4,r5} SET_REG(r2,0x5a827999) SET_REG(r3,0x6ed9eba1) vdup.u32 sqrt_2, r2 vdup.u32 sqrt_3, r3 SET_REG(r5,0x98badcfe) SET_REG(r4,0x10325476) SET_REG(r3,0xefcdab89) // Round 1 vmov.u32 t1, #0xffffffff //Put all 1 in a add nt_buffer, nt_buffer_base, #(4*14*NT_NUM_KEYS); vld1.u32 {t5,t6}, [nt_buffer:128]; vdup.u32 reg_b1, r3 vdup.u32 reg_b2, r3 vdup.u32 reg_c1, r5 vdup.u32 reg_c2, r5 vld1.u32 {t2,t3}, [nt_buffer_base:128] vdup.u32 reg_d1, r4 vdup.u32 reg_d2, r4 vadd.u32 reg_a1, t1, t2// First step vadd.u32 reg_a2, t1, t3 vshl.u32 reg_a1, reg_a1, #3 vshl.u32 reg_a2, reg_a2, #3 STEP1(reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 1 , 7 , NT_NUM_KEYS) STEP1(reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 2 , 11, NT_NUM_KEYS) STEP1(reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 3 , 19, NT_NUM_KEYS) STEP1(reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 4 , 3 , NT_NUM_KEYS) STEP1(reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 5 , 7 , NT_NUM_KEYS) STEP1(reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 6 , 11, NT_NUM_KEYS) STEP1(reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 7 , 19, NT_NUM_KEYS) STEP1(reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 8 , 3 , NT_NUM_KEYS) STEP1(reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 9 , 7 , NT_NUM_KEYS) STEP1(reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 10, 11, NT_NUM_KEYS) STEP1(reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 11, 19, NT_NUM_KEYS) STEP1(reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 12, 3 , NT_NUM_KEYS) STEP1(reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 13, 7 , NT_NUM_KEYS) STEP1(reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 14, 11, NT_NUM_KEYS) STEP1(reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 15, 19, NT_NUM_KEYS) // Round 2 STEP2(reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 0 , 3 , NT_NUM_KEYS,sqrt_2) STEP2(reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 4 , 5 , NT_NUM_KEYS,sqrt_2) STEP2(reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 8 , 9 , NT_NUM_KEYS,sqrt_2) STEP2(reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 12, 13, NT_NUM_KEYS,sqrt_2) STEP2(reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 1 , 3 , NT_NUM_KEYS,sqrt_2) STEP2(reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 5 , 5 , NT_NUM_KEYS,sqrt_2) STEP2(reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 9 , 9 , NT_NUM_KEYS,sqrt_2) STEP2(reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 13, 13, NT_NUM_KEYS,sqrt_2) STEP2(reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 2 , 3 , NT_NUM_KEYS,sqrt_2) STEP2(reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 6 , 5 , NT_NUM_KEYS,sqrt_2) STEP2(reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 10, 9 , NT_NUM_KEYS,sqrt_2) STEP2(reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 14, 13, NT_NUM_KEYS,sqrt_2) STEP2(reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 3 , 3 , NT_NUM_KEYS,sqrt_2) STEP2(reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 7 , 5 , NT_NUM_KEYS,sqrt_2) STEP2(reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 11, 9 , NT_NUM_KEYS,sqrt_2) STEP2(reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 15, 13, NT_NUM_KEYS,sqrt_2) // Round 3 STEP3(reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 0 , 3 , NT_NUM_KEYS,sqrt_3) STEP3(reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 8 , 9 , NT_NUM_KEYS,sqrt_3) STEP3(reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 4 , 11, NT_NUM_KEYS,sqrt_3) STEP3(reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 12, 15, NT_NUM_KEYS,sqrt_3) STEP3(reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 2 , 3 , NT_NUM_KEYS,sqrt_3) STEP3(reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 10, 9 , NT_NUM_KEYS,sqrt_3) STEP3(reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 6 , 11, NT_NUM_KEYS,sqrt_3) STEP3(reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 14, 15, NT_NUM_KEYS,sqrt_3) STEP3(reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 1 , 3 , NT_NUM_KEYS,sqrt_3) STEP3(reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 9 , 9 , NT_NUM_KEYS,sqrt_3) STEP3(reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 5 , 11, NT_NUM_KEYS,sqrt_3) STEP3(reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 13, 15, NT_NUM_KEYS,sqrt_3) STEP3(reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 3 , 3 , NT_NUM_KEYS,sqrt_3) STEP3(reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 11, 9 , NT_NUM_KEYS,sqrt_3) STEP3(reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 7 , 11, NT_NUM_KEYS,sqrt_3) STEP3(reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 15, 15, NT_NUM_KEYS,sqrt_3) //LOAD const_init_* vdup.u32 t2, r3 vdup.u32 t3, r5 vdup.u32 t4, r4 SET_REG(r3,0x67452301) vdup.u32 t1, r3 vadd.u32 reg_a1, reg_a1, t1 vadd.u32 reg_a2, reg_a2, t1 vadd.u32 reg_b1, reg_b1, t2 vadd.u32 reg_b2, reg_b2, t2 vadd.u32 reg_c1, reg_c1, t3 vadd.u32 reg_c2, reg_c2, t3 vadd.u32 reg_d1, reg_d1, t4 vadd.u32 reg_d2, reg_d2, t4 vst1.u32 {reg_a1,reg_a2}, [crypt_result:128]! vst1.u32 {reg_b1,reg_b2}, [crypt_result:128]! vst1.u32 {reg_c1,reg_c2}, [crypt_result:128]! vst1.u32 {reg_d1,reg_d2}, [crypt_result:128]! // Round 1 vmov.u32 t1, #0xffffffff //Put all 1 in a vadd.u32 reg_a1, t1, reg_a1// First step vadd.u32 reg_a2, t1, reg_a2 ROTATE(reg_a1,reg_a2,3,t1,sqrt_2) // interchange b and d STEP1_DCC(reg_b1, reg_a1, t2, t3, reg_b2, reg_a2, t2, t3, 7 , NT_NUM_KEYS,t1,sqrt_2,t4) STEP1_DCC(reg_c1, reg_b1, reg_a1, t2, reg_c2, reg_b2, reg_a2, t2, 11, NT_NUM_KEYS,t1,sqrt_2,t3) STEP1_DCC(reg_d1, reg_c1, reg_b1, reg_a1, reg_d2, reg_c2, reg_b2, reg_a2, 19, NT_NUM_KEYS,t1,sqrt_2,t2) vst1.u32 {reg_a1,reg_a2}, [crypt_result:128]! vst1.u32 {reg_d1,reg_d2}, [crypt_result:128]! vst1.u32 {reg_c1,reg_c2}, [crypt_result:128]! vst1.u32 {reg_b1,reg_b2}, [crypt_result:128]! pop {r4,r5} vpop {q4,q5,q6,q7} bx lr #define DCC_LOAD_CRYPT(a1,a2,index) \ add nt_buffer, crypt_result, #(index*2*16);\ vld1.u32 {t3,t4}, [nt_buffer:128];\ vadd.u32 a1, a1, t3;\ vadd.u32 a2, a2, t4; #define DCC_LOAD_NT_BUFFER(a1,a2,index) \ ldr r3, [nt_buffer_base, #(4*(index-4))];\ vdup.u32 t3, r3;\ vadd.u32 a1, a1, t3;\ vadd.u32 a2, a2, t3; #define LOAD_DCC_0(a1,a2) vld1.u32 {t3,t4}, [crypt_result:128];\ vadd.u32 a1, a1, t3;\ vadd.u32 a2, a2, t4; #define LOAD_DCC_1(a1,a2) DCC_LOAD_CRYPT(a1,a2,1) #define LOAD_DCC_2(a1,a2) DCC_LOAD_CRYPT(a1,a2,2) #define LOAD_DCC_3(a1,a2) DCC_LOAD_CRYPT(a1,a2,3) #define LOAD_DCC_4(a1,a2) vadd.u32 a1, a1, t5;\ vadd.u32 a2, a2, t5; #define LOAD_DCC_5(a1,a2) DCC_LOAD_NT_BUFFER(a1,a2,5) #define LOAD_DCC_6(a1,a2) DCC_LOAD_NT_BUFFER(a1,a2,6) #define LOAD_DCC_7(a1,a2) DCC_LOAD_NT_BUFFER(a1,a2,7) #define LOAD_DCC_8(a1,a2) DCC_LOAD_NT_BUFFER(a1,a2,8) #define LOAD_DCC_9(a1,a2) DCC_LOAD_NT_BUFFER(a1,a2,9) #define LOAD_DCC_10(a1,a2) DCC_LOAD_NT_BUFFER(a1,a2,10) #define LOAD_DCC_11(a1,a2) DCC_LOAD_NT_BUFFER(a1,a2,11) #define LOAD_DCC_12(a1,a2) DCC_LOAD_NT_BUFFER(a1,a2,12) #define LOAD_DCC_13(a1,a2) DCC_LOAD_NT_BUFFER(a1,a2,13) #define LOAD_DCC_14(a1,a2) vadd.u32 a1, a1, t6;\ vadd.u32 a2, a2, t6; #define LOAD_DCC_15(a1,a2) #define STEP1_DCC_SALT(a1,b1,c1,d1,a2,b2,c2,d2,index,rot,t1,t2) \ LOAD_DCC_ ## index (a1,a2)\ vmov.u32 t1, b1;\ vmov.u32 t2, b2;\ vbsl t1, c1, d1;\ vbsl t2, c2, d2;\ vadd.u32 a1, a1, t1;\ vadd.u32 a2, a2, t2;\ ROTATE(a1,a2,rot,t1,t2) #define STEP2_DCC_SALT(a1,b1,c1,d1,a2,b2,c2,d2,index,rot,sqrt_2,t1,t2,t3,t4) \ LOAD_DCC_ ## index (a1,a2)\ veor.u32 t1, c1, d1;\ veor.u32 t2, c2, d2;\ vbsl t1, b1, c1;\ vbsl t2, b2, c2;\ vadd.u32 a1, a1, sqrt_2;\ vadd.u32 a2, a2, sqrt_2;\ vadd.u32 a1, a1, t1;\ vadd.u32 a2, a2, t2;\ ROTATE(a1,a2,rot,t1,t2) #define STEP3_DCC_SALT(a1,b1,c1,d1,a2,b2,c2,d2,index,rot,sqrt_3,t1,t2) \ LOAD_DCC_ ## index (a1,a2)\ veor.u32 t1, d1, c1;\ veor.u32 t2, d2, c2;\ vadd.u32 a1, a1, sqrt_3;\ vadd.u32 a2, a2, sqrt_3;\ veor.u32 t1, t1, b1;\ veor.u32 t2, t2, b2;\ vadd.u32 a1, a1, t1;\ vadd.u32 a2, a2, t2;\ ROTATE(a1,a2,rot,t1,t2) #define dcc_salt_part_neon_body(idx) \ .text;\ .align 2;\ .global dcc_salt_part_neon ## idx;\ .type dcc_salt_part_neon ## idx, %function;\ dcc_salt_part_neon ## idx:\ vpush {q4,q5,q6,q7};\ \ SET_REG(r2,0x5a827999);\ SET_REG(r3,0x6ed9eba1);\ vdup.u32 sqrt_2, r2;\ vdup.u32 sqrt_3, r3;\ \ /* Round 1*/\ add nt_buffer, crypt_result, #(8*16);\ vld1.u32 {reg_a1,reg_a2}, [nt_buffer:128]!;\ vld1.u32 {reg_b1,reg_b2}, [nt_buffer:128]!;\ vld1.u32 {reg_c1,reg_c2}, [nt_buffer:128]!;\ vld1.u32 {reg_d1,reg_d2}, [nt_buffer:128]!;\ \ ldr r2, [nt_buffer_base];\ ldr r3, [nt_buffer_base, #(4*(14-4))];\ vdup.u32 t5, r2;\ vdup.u32 t6, r3;\ \ STEP1_DCC_SALT(reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 4 , 3 ,t1,t2);\ STEP1_DCC_SALT(reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 5 , 7 ,t1,t2);\ STEP1_DCC_SALT(reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 6 , 11,t1,t2);\ STEP1_DCC_SALT(reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 7 , 19,t1,t2);\ \ STEP1_DCC_SALT(reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 8 , 3 ,t1,t2);\ STEP1_DCC_SALT(reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 9 , 7 ,t1,t2);\ STEP1_DCC_SALT(reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 10, 11,t1,t2);\ STEP1_DCC_SALT(reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 11, 19,t1,t2);\ \ STEP1_DCC_SALT(reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 12, 3 ,t1,t2);\ STEP1_DCC_SALT(reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 13, 7 ,t1,t2);\ STEP1_DCC_SALT(reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 14, 11,t1,t2);\ STEP1_DCC_SALT(reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 15, 19,t1,t2);\ /* Round 2*/\ STEP2_DCC_SALT(reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 0 , 3 ,sqrt_2,t1,t2,t3,t4);\ STEP2_DCC_SALT(reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 4 , 5 ,sqrt_2,t1,t2,t3,t4);\ STEP2_DCC_SALT(reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 8 , 9 ,sqrt_2,t1,t2,t3,t4);\ STEP2_DCC_SALT(reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 12, 13,sqrt_2,t1,t2,t3,t4);\ \ STEP2_DCC_SALT(reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 1 , 3 ,sqrt_2,t1,t2,t3,t4);\ STEP2_DCC_SALT(reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 5 , 5 ,sqrt_2,t1,t2,t3,t4);\ STEP2_DCC_SALT(reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 9 , 9 ,sqrt_2,t1,t2,t3,t4);\ STEP2_DCC_SALT(reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 13, 13,sqrt_2,t1,t2,t3,t4);\ \ STEP2_DCC_SALT(reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 2 , 3 ,sqrt_2,t1,t2,t3,t4);\ STEP2_DCC_SALT(reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 6 , 5 ,sqrt_2,t1,t2,t3,t4);\ STEP2_DCC_SALT(reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 10, 9 ,sqrt_2,t1,t2,t3,t4);\ STEP2_DCC_SALT(reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 14, 13,sqrt_2,t1,t2,t3,t4);\ \ STEP2_DCC_SALT(reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 3 , 3 ,sqrt_2,t1,t2,t3,t4);\ STEP2_DCC_SALT(reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 7 , 5 ,sqrt_2,t1,t2,t3,t4);\ STEP2_DCC_SALT(reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 11, 9 ,sqrt_2,t1,t2,t3,t4);\ STEP2_DCC_SALT(reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 15, 13,sqrt_2,t1,t2,t3,t4);\ /* Round 3*/\ STEP3_DCC_SALT(reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 0 , 3 ,sqrt_3,t1,t2);\ STEP3_DCC_SALT(reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 8 , 9 ,sqrt_3,t1,t2);\ STEP3_DCC_SALT(reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 4 , 11,sqrt_3,t1,t2);\ STEP3_DCC_SALT(reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 12, 15,sqrt_3,t1,t2);\ \ STEP3_DCC_SALT(reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 2 , 3 ,sqrt_3,t1,t2);\ STEP3_DCC_SALT(reg_d1, reg_a1, reg_b1, reg_c1, reg_d2, reg_a2, reg_b2, reg_c2, 10, 9 ,sqrt_3,t1,t2);\ STEP3_DCC_SALT(reg_c1, reg_d1, reg_a1, reg_b1, reg_c2, reg_d2, reg_a2, reg_b2, 6 , 11,sqrt_3,t1,t2);\ STEP3_DCC_SALT(reg_b1, reg_c1, reg_d1, reg_a1, reg_b2, reg_c2, reg_d2, reg_a2, 14, 15,sqrt_3,t1,t2);\ \ STEP3_DCC_SALT(reg_a1, reg_b1, reg_c1, reg_d1, reg_a2, reg_b2, reg_c2, reg_d2, 1 , 3 ,sqrt_3,t1,t2);\ LOAD_DCC_9(reg_d1,reg_d2);\ veor.u32 t1, reg_c1, reg_b1;\ veor.u32 t2, reg_c2, reg_b2;\ veor.u32 t1, t1, reg_a1;\ veor.u32 t2, t2, reg_a2;\ vadd.u32 reg_d1, reg_d1, t1;\ vadd.u32 reg_d2, reg_d2, t2;\ \ add crypt_result, crypt_result, #(16*16);\ vst1.u32 {reg_a1,reg_a2}, [crypt_result:128]!;\ vst1.u32 {reg_b1,reg_b2}, [crypt_result:128]!;\ vst1.u32 {reg_c1,reg_c2}, [crypt_result:128]!;\ vst1.u32 {reg_d1,reg_d2}, [crypt_result:128]!;\ \ vpop {q4,q5,q6,q7};\ bx lr // Funtions by salt_lenght dcc_salt_part_neon_body(13) #undef LOAD_DCC_13 #define LOAD_DCC_13(a1,a2) dcc_salt_part_neon_body(12) #undef LOAD_DCC_12 #define LOAD_DCC_12(a1,a2) dcc_salt_part_neon_body(11) #undef LOAD_DCC_11 #define LOAD_DCC_11(a1,a2) dcc_salt_part_neon_body(10) #undef LOAD_DCC_10 #define LOAD_DCC_10(a1,a2) dcc_salt_part_neon_body(9) #undef LOAD_DCC_9 #define LOAD_DCC_9(a1,a2) dcc_salt_part_neon_body(8) #undef LOAD_DCC_8 #define LOAD_DCC_8(a1,a2) dcc_salt_part_neon_body(7) #undef LOAD_DCC_7 #define LOAD_DCC_7(a1,a2) dcc_salt_part_neon_body(6) #undef LOAD_DCC_6 #define LOAD_DCC_6(a1,a2) dcc_salt_part_neon_body(5) #undef LOAD_DCC_5 #define LOAD_DCC_5(a1,a2) dcc_salt_part_neon_body(4) ///////////////////////////////////////////////////////////////////////////////////////////////// // DCC2 format ///////////////////////////////////////////////////////////////////////////////////////////////// #define dcc2_state r0 #define sha1_hash r1 #define W r2 #define tmp_ptr r3 #define tmp_ptr0 r3 #define tmp_ptr1 r4 #define DCC2_ADD(reg1,reg2,w_ptr,index1,index2) \ add tmp_ptr, w_ptr, #(index1*REG_BYTE_SIZE);\ vld1.u32 {t1}, [tmp_ptr:128];\ vadd.u32 reg1, reg1, t1;\ \ add tmp_ptr, w_ptr, #(index2*REG_BYTE_SIZE);\ vld1.u32 {t1}, [tmp_ptr:128];\ vadd.u32 reg2, reg2, t1; #define DCC2_STEP2(step_const,reg_e1,reg_a1,reg_b1,reg_c1,reg_d1,reg_e2,reg_a2,reg_b2,reg_c2,reg_d2,t1,t2,t3,t4,index) \ DCC2_ROTATE_5(reg_a1,reg_a2,t1,t2,t3,t4)\ vadd.u32 reg_e1, reg_e1, t1;\ vadd.u32 reg_e2, reg_e2, t3;\ \ veor.u32 t1, reg_c1, reg_d1;\ veor.u32 t2, reg_c2, reg_d2;\ veor.u32 t1, t1, reg_b1;\ veor.u32 t2, t2, reg_b2;\ vadd.u32 reg_e1, reg_e1, t1;\ vadd.u32 reg_e2, reg_e2, t2;\ vadd.u32 reg_e1, reg_e1, step_const;\ vadd.u32 reg_e2, reg_e2, step_const;\ \ add tmp_ptr, W, #(index*REG_BYTE_SIZE);\ vld1.u32 {t3}, [tmp_ptr:128];\ add tmp_ptr, W, #((index+16)*REG_BYTE_SIZE);\ vld1.u32 {t4}, [tmp_ptr:128];\ vadd.u32 reg_e1, reg_e1, t3;\ vadd.u32 reg_e2, reg_e2, t4;\ \ ROTATE(reg_b1, reg_b2, 30, t1, t2) #define DCC2_STEP3(step_const,reg_e1,reg_a1,reg_b1,reg_c1,reg_d1,reg_e2,reg_a2,reg_b2,reg_c2,reg_d2,t1,t2,t3,t4,w0,w1,w2,w3) \ DCC2_ROTATE_5(reg_a1,reg_a2,t1,t2,t3,t4)\ vadd.u32 reg_e1, reg_e1, t1;\ vadd.u32 reg_e2, reg_e2, t3;\ \ veor.u32 t1, reg_d1, reg_c1;\ veor.u32 t2, reg_d2, reg_c2;\ vbsl t1, reg_b1, reg_c1;\ vbsl t2, reg_b2, reg_c2;\ \ vadd.u32 reg_e1, reg_e1, t1;\ vadd.u32 reg_e2, reg_e2, t2;\ vadd.u32 reg_e1, reg_e1, step_const;\ vadd.u32 reg_e2, reg_e2, step_const;\ \ ROTATE(reg_b1, reg_b2, 30, t1, t2)\ \ add tmp_ptr0, W, #(w1*REG_BYTE_SIZE);\ add tmp_ptr1, W, #((w1+16)*REG_BYTE_SIZE);\ vld1.u32 {t1}, [tmp_ptr0:128];\ vld1.u32 {t2}, [tmp_ptr1:128];\ \ add tmp_ptr0, W, #(w2*REG_BYTE_SIZE);\ add tmp_ptr1, W, #((w2+16)*REG_BYTE_SIZE);\ vld1.u32 {t3}, [tmp_ptr0:128];\ vld1.u32 {t4}, [tmp_ptr1:128];\ veor.u32 t1, t1, t3;\ veor.u32 t2, t2, t4;\ \ add tmp_ptr0, W, #(w3*REG_BYTE_SIZE);\ add tmp_ptr1, W, #((w3+16)*REG_BYTE_SIZE);\ vld1.u32 {t3}, [tmp_ptr0:128];\ vld1.u32 {t4}, [tmp_ptr1:128];\ veor.u32 t1, t1, t3;\ veor.u32 t2, t2, t4;\ \ add tmp_ptr0, W, #(w0*REG_BYTE_SIZE);\ add tmp_ptr1, W, #((w0+16)*REG_BYTE_SIZE);\ vld1.u32 {t3}, [tmp_ptr0:128];\ vld1.u32 {t4}, [tmp_ptr1:128];\ veor.u32 t1, t1, t3;\ veor.u32 t2, t2, t4;\ \ ROTATE_1(t1, t2, t3, t4)\ vst1.u32 {t1}, [tmp_ptr0:128];\ vst1.u32 {t2}, [tmp_ptr1:128];\ \ vadd.u32 reg_e1, reg_e1, t1;\ vadd.u32 reg_e2, reg_e2, t2; #define DCC2_STEP4(step_const,reg_e1,reg_a1,reg_b1,reg_c1,reg_d1,reg_e2,reg_a2,reg_b2,reg_c2,reg_d2,t1,t2,t3,t4,w0,w1,w2,w3) \ DCC2_ROTATE_5(reg_a1,reg_a2,t1,t2,t3,t4)\ vadd.u32 reg_e1, reg_e1, t1;\ vadd.u32 reg_e2, reg_e2, t3;\ \ veor.u32 t1, reg_c1, reg_d1;\ veor.u32 t2, reg_c2, reg_d2;\ veor.u32 t1, t1, reg_b1;\ veor.u32 t2, t2, reg_b2;\ vadd.u32 reg_e1, reg_e1, t1;\ vadd.u32 reg_e2, reg_e2, t2;\ vadd.u32 reg_e1, reg_e1, step_const;\ vadd.u32 reg_e2, reg_e2, step_const;\ \ ROTATE(reg_b1, reg_b2, 30, t1, t2)\ \ add tmp_ptr0, W, #(w1*REG_BYTE_SIZE);\ add tmp_ptr1, W, #((w1+16)*REG_BYTE_SIZE);\ vld1.u32 {t1}, [tmp_ptr0:128];\ vld1.u32 {t2}, [tmp_ptr1:128];\ \ add tmp_ptr0, W, #(w2*REG_BYTE_SIZE);\ add tmp_ptr1, W, #((w2+16)*REG_BYTE_SIZE);\ vld1.u32 {t3}, [tmp_ptr0:128];\ vld1.u32 {t4}, [tmp_ptr1:128];\ veor.u32 t1, t1, t3;\ veor.u32 t2, t2, t4;\ \ add tmp_ptr0, W, #(w3*REG_BYTE_SIZE);\ add tmp_ptr1, W, #((w3+16)*REG_BYTE_SIZE);\ vld1.u32 {t3}, [tmp_ptr0:128];\ vld1.u32 {t4}, [tmp_ptr1:128];\ veor.u32 t1, t1, t3;\ veor.u32 t2, t2, t4;\ \ add tmp_ptr0, W, #(w0*REG_BYTE_SIZE);\ add tmp_ptr1, W, #((w0+16)*REG_BYTE_SIZE);\ vld1.u32 {t3}, [tmp_ptr0:128];\ vld1.u32 {t4}, [tmp_ptr1:128];\ veor.u32 t1, t1, t3;\ veor.u32 t2, t2, t4;\ \ ROTATE_1(t1, t2, t3, t4)\ vst1.u32 {t1}, [tmp_ptr0:128];\ vst1.u32 {t2}, [tmp_ptr1:128];\ \ vadd.u32 reg_e1, reg_e1, t1;\ vadd.u32 reg_e2, reg_e2, t2; .text .align 2 .global sha1_process_sha1_neon .type sha1_process_sha1_neon, %function sha1_process_sha1_neon: vpush {q4,q5,q6,q7} push {r4,r5} // Calculate all Qs mov tmp_ptr, sha1_hash vld1.u32 {reg_a1}, [tmp_ptr:128]! vld1.u32 {reg_b1}, [tmp_ptr:128]! vld1.u32 {reg_c1}, [tmp_ptr:128]! vld1.u32 {reg_d1}, [tmp_ptr:128]! vld1.u32 {reg_e1}, [tmp_ptr:128]! vld1.u32 {reg_a2}, [tmp_ptr:128]! vld1.u32 {reg_b2}, [tmp_ptr:128]! vld1.u32 {reg_c2}, [tmp_ptr:128]! vld1.u32 {reg_d2}, [tmp_ptr:128]! vld1.u32 {reg_e2}, [tmp_ptr:128]! // Q0 veor.u32 reg_a1, reg_a1, reg_c1 veor.u32 reg_a2, reg_a2, reg_c2 ROTATE_1(reg_a1, reg_a2, t1, t2) add tmp_ptr0, W, #((0+0*16)*REG_BYTE_SIZE) add tmp_ptr1, W, #((0+1*16)*REG_BYTE_SIZE) vst1.u32 {reg_a1}, [tmp_ptr0:128] vst1.u32 {reg_a2}, [tmp_ptr1:128] // Q1 veor.u32 reg_b1, reg_b1, reg_d1 veor.u32 reg_b2, reg_b2, reg_d2 ROTATE_1(reg_b1, reg_b2, t1, t2) add tmp_ptr0, W, #((1+0*16)*REG_BYTE_SIZE) add tmp_ptr1, W, #((1+1*16)*REG_BYTE_SIZE) vst1.u32 {reg_b1}, [tmp_ptr0:128] vst1.u32 {reg_b2}, [tmp_ptr1:128] // Q2 SET_REG(r5,0x000002A0) vdup.u32 t3, r5 vmov.u32 t5, #0x80000000 veor.u32 reg_c1, reg_c1, reg_e1 veor.u32 reg_c2, reg_c2, reg_e2 veor.u32 reg_c1, reg_c1, t3 veor.u32 reg_c2, reg_c2, t3 ROTATE_1(reg_c1, reg_c2, t1, t2) add tmp_ptr0, W, #((2+0*16)*REG_BYTE_SIZE) add tmp_ptr1, W, #((2+1*16)*REG_BYTE_SIZE) vst1.u32 {reg_c1}, [tmp_ptr0:128] vst1.u32 {reg_c2}, [tmp_ptr1:128] // Q3 veor.u32 reg_d1, reg_d1, t5 veor.u32 reg_d2, reg_d2, t5 veor.u32 reg_d1, reg_d1, reg_a1 veor.u32 reg_d2, reg_d2, reg_a2 ROTATE_1(reg_d1, reg_d2, t1, t2) add tmp_ptr0, W, #((3+0*16)*REG_BYTE_SIZE) add tmp_ptr1, W, #((3+1*16)*REG_BYTE_SIZE) vst1.u32 {reg_d1}, [tmp_ptr0:128] vst1.u32 {reg_d2}, [tmp_ptr1:128] // Q4 veor.u32 reg_e1, reg_e1, reg_b1 veor.u32 reg_e2, reg_e2, reg_b2 ROTATE_1(reg_e1, reg_e2, t1, t2) add tmp_ptr0, W, #((4+0*16)*REG_BYTE_SIZE) add tmp_ptr1, W, #((4+1*16)*REG_BYTE_SIZE) vst1.u32 {reg_e1}, [tmp_ptr0:128] vst1.u32 {reg_e2}, [tmp_ptr1:128] // Q5 veor.u32 step_const, reg_c2, t5 veor.u32 t5, reg_c1, t5 ROTATE_1(t5, step_const, t1, t2) add tmp_ptr0, W, #((5+0*16)*REG_BYTE_SIZE) add tmp_ptr1, W, #((5+1*16)*REG_BYTE_SIZE) vst1.u32 {t5}, [tmp_ptr0:128] vst1.u32 {step_const}, [tmp_ptr1:128] // Q6 vshr.u32 t3, reg_d1, #31 vshr.u32 t4, reg_d2, #31 vadd.u32 t1, reg_d1, reg_d1 vadd.u32 t2, reg_d2, reg_d2 vorr.u32 t3, t3, t1 vorr.u32 t4, t4, t2 add tmp_ptr0, W, #((6+0*16)*REG_BYTE_SIZE) add tmp_ptr1, W, #((6+1*16)*REG_BYTE_SIZE) vst1.u32 {t3}, [tmp_ptr0:128] vst1.u32 {t4}, [tmp_ptr1:128] // Q8 veor.u32 t5, t5, reg_a1 veor.u32 step_const, step_const, reg_a2 ROTATE_1(t5, step_const, t1, t2) add tmp_ptr0, W, #((8+0*16)*REG_BYTE_SIZE) add tmp_ptr1, W, #((8+1*16)*REG_BYTE_SIZE) vst1.u32 {t5}, [tmp_ptr0:128] vst1.u32 {step_const}, [tmp_ptr1:128] // Q11 veor.u32 reg_d1, t5, reg_d1 veor.u32 reg_d2, step_const, reg_d2 ROTATE_1(reg_d1, reg_d2, t1, t2) add tmp_ptr0, W, #((11+0*16)*REG_BYTE_SIZE) add tmp_ptr1, W, #((11+1*16)*REG_BYTE_SIZE) vst1.u32 {reg_d1}, [tmp_ptr0:128] vst1.u32 {reg_d2}, [tmp_ptr1:128] // Q14 SET_REG(r5,0x000002A0) vdup.u32 t5, r5 veor.u32 reg_d1, reg_d1, t3 veor.u32 reg_d2, reg_d2, t4 veor.u32 reg_d1, reg_d1, reg_a1 veor.u32 reg_d2, reg_d2, reg_a2 ROTATE_1(reg_d1, reg_d2, t1, t2) add tmp_ptr0, W, #((14+0*16)*REG_BYTE_SIZE) add tmp_ptr1, W, #((14+1*16)*REG_BYTE_SIZE) vst1.u32 {reg_d1}, [tmp_ptr0:128] vst1.u32 {reg_d2}, [tmp_ptr1:128] // Q7 veor.u32 reg_a1, reg_e1, t5 veor.u32 reg_a2, reg_e2, t5 ROTATE_1(reg_a1, reg_a2, t1, t2) add tmp_ptr0, W, #((7+0*16)*REG_BYTE_SIZE) add tmp_ptr1, W, #((7+1*16)*REG_BYTE_SIZE) vst1.u32 {reg_a1}, [tmp_ptr0:128] vst1.u32 {reg_a2}, [tmp_ptr1:128] // Q9 veor.u32 reg_d1, t3, reg_b1 veor.u32 reg_d2, t4, reg_b2 ROTATE_1(reg_d1, reg_d2, t1, t2) add tmp_ptr0, W, #((9+0*16)*REG_BYTE_SIZE) add tmp_ptr1, W, #((9+1*16)*REG_BYTE_SIZE) vst1.u32 {reg_d1}, [tmp_ptr0:128] vst1.u32 {reg_d2}, [tmp_ptr1:128] // Q10 veor.u32 reg_c1, reg_a1, reg_c1 veor.u32 reg_c2, reg_a2, reg_c2 ROTATE_1(reg_c1, reg_c2, t1, t2) add tmp_ptr0, W, #((10+0*16)*REG_BYTE_SIZE) add tmp_ptr1, W, #((10+1*16)*REG_BYTE_SIZE) vst1.u32 {reg_c1}, [tmp_ptr0:128] vst1.u32 {reg_c2}, [tmp_ptr1:128] // Q12 veor.u32 reg_e1, reg_d1, reg_e1 veor.u32 reg_e2, reg_d2, reg_e2 ROTATE_1(reg_e1, reg_e2, t1, t2) add tmp_ptr0, W, #((12+0*16)*REG_BYTE_SIZE) add tmp_ptr1, W, #((12+1*16)*REG_BYTE_SIZE) vst1.u32 {reg_e1}, [tmp_ptr0:128] vst1.u32 {reg_e2}, [tmp_ptr1:128] // Q13 add tmp_ptr0, W, #((5+0*16)*REG_BYTE_SIZE) add tmp_ptr1, W, #((5+1*16)*REG_BYTE_SIZE) vld1.u32 {t1}, [tmp_ptr0:128] vld1.u32 {t2}, [tmp_ptr1:128] veor.u32 reg_c1, reg_c1, t1 veor.u32 reg_c2, reg_c2, t2 veor.u32 reg_c1, reg_c1, t5 veor.u32 reg_c2, reg_c2, t5 ROTATE_1(reg_c1, reg_c2, t1, t2) add tmp_ptr0, W, #((13+0*16)*REG_BYTE_SIZE) add tmp_ptr1, W, #((13+1*16)*REG_BYTE_SIZE) vst1.u32 {reg_c1}, [tmp_ptr0:128] vst1.u32 {reg_c2}, [tmp_ptr1:128] // Q15 veor.u32 step_const, t5, reg_b2 veor.u32 t5, t5, reg_b1 veor.u32 reg_e1, reg_e1, reg_a1 veor.u32 reg_e2, reg_e2, reg_a2 veor.u32 t5, t5, reg_e1 veor.u32 step_const, step_const, reg_e2 ROTATE_1(t5, step_const, t1, t2) add tmp_ptr0, W, #((15+0*16)*REG_BYTE_SIZE) add tmp_ptr1, W, #((15+1*16)*REG_BYTE_SIZE) vst1.u32 {t5}, [tmp_ptr0:128] vst1.u32 {step_const}, [tmp_ptr1:128] // Load state mov tmp_ptr, dcc2_state vld1.u32 {reg_a1}, [tmp_ptr:128]! vld1.u32 {reg_b1}, [tmp_ptr:128]! vld1.u32 {reg_c1}, [tmp_ptr:128]! vld1.u32 {reg_d1}, [tmp_ptr:128]! vld1.u32 {reg_e1}, [tmp_ptr:128]! vld1.u32 {reg_a2}, [tmp_ptr:128]! vld1.u32 {reg_b2}, [tmp_ptr:128]! vld1.u32 {reg_c2}, [tmp_ptr:128]! vld1.u32 {reg_d2}, [tmp_ptr:128]! vld1.u32 {reg_e2}, [tmp_ptr:128]! SET_REG(r5,0x5a827999) vdup.u32 step_const, r5 // Step1 DCC2_ADD(reg_e1, reg_e2, sha1_hash, 0, 5) DCC2_STEP1(reg_e1, reg_a1, reg_b1, reg_c1, reg_d1, reg_e2, reg_a2, reg_b2, reg_c2, reg_d2, t1, t2, t3, t4,step_const) DCC2_ADD(reg_d1, reg_d2, sha1_hash, 1, 6) DCC2_STEP1(reg_d1, reg_e1, reg_a1, reg_b1, reg_c1, reg_d2, reg_e2, reg_a2, reg_b2, reg_c2, t1, t2, t3, t4,step_const) DCC2_ADD(reg_c1, reg_c2, sha1_hash, 2, 7) DCC2_STEP1(reg_c1, reg_d1, reg_e1, reg_a1, reg_b1, reg_c2, reg_d2, reg_e2, reg_a2, reg_b2, t1, t2, t3, t4,step_const) DCC2_ADD(reg_b1, reg_b2, sha1_hash, 3, 8) DCC2_STEP1(reg_b1, reg_c1, reg_d1, reg_e1, reg_a1, reg_b2, reg_c2, reg_d2, reg_e2, reg_a2, t1, t2, t3, t4,step_const) DCC2_ADD(reg_a1, reg_a2, sha1_hash, 4, 9) DCC2_STEP1(reg_a1, reg_b1, reg_c1, reg_d1, reg_e1, reg_a2, reg_b2, reg_c2, reg_d2, reg_e2, t1, t2, t3, t4,step_const) vmov.u32 t1, #0x80000000 vadd.u32 reg_e1, reg_e1, t1 vadd.u32 reg_e2, reg_e2, t1 DCC2_STEP1(reg_e1, reg_a1, reg_b1, reg_c1, reg_d1, reg_e2, reg_a2, reg_b2, reg_c2, reg_d2, t1, t2, t3, t4,step_const) DCC2_STEP1(reg_d1, reg_e1, reg_a1, reg_b1, reg_c1, reg_d2, reg_e2, reg_a2, reg_b2, reg_c2, t1, t2, t3, t4,step_const) DCC2_STEP1(reg_c1, reg_d1, reg_e1, reg_a1, reg_b1, reg_c2, reg_d2, reg_e2, reg_a2, reg_b2, t1, t2, t3, t4,step_const) DCC2_STEP1(reg_b1, reg_c1, reg_d1, reg_e1, reg_a1, reg_b2, reg_c2, reg_d2, reg_e2, reg_a2, t1, t2, t3, t4,step_const) DCC2_STEP1(reg_a1, reg_b1, reg_c1, reg_d1, reg_e1, reg_a2, reg_b2, reg_c2, reg_d2, reg_e2, t1, t2, t3, t4,step_const) DCC2_STEP1(reg_e1, reg_a1, reg_b1, reg_c1, reg_d1, reg_e2, reg_a2, reg_b2, reg_c2, reg_d2, t1, t2, t3, t4,step_const) DCC2_STEP1(reg_d1, reg_e1, reg_a1, reg_b1, reg_c1, reg_d2, reg_e2, reg_a2, reg_b2, reg_c2, t1, t2, t3, t4,step_const) DCC2_STEP1(reg_c1, reg_d1, reg_e1, reg_a1, reg_b1, reg_c2, reg_d2, reg_e2, reg_a2, reg_b2, t1, t2, t3, t4,step_const) DCC2_STEP1(reg_b1, reg_c1, reg_d1, reg_e1, reg_a1, reg_b2, reg_c2, reg_d2, reg_e2, reg_a2, t1, t2, t3, t4,step_const) DCC2_STEP1(reg_a1, reg_b1, reg_c1, reg_d1, reg_e1, reg_a2, reg_b2, reg_c2, reg_d2, reg_e2, t1, t2, t3, t4,step_const) SET_REG(r5,0x000002A0) vdup.u32 t1, r5 vadd.u32 reg_e1, reg_e1, t1 vadd.u32 reg_e2, reg_e2, t1 DCC2_STEP1(reg_e1, reg_a1, reg_b1, reg_c1, reg_d1, reg_e2, reg_a2, reg_b2, reg_c2, reg_d2, t1, t2, t3, t4,step_const) DCC2_ADD(reg_d1, reg_d2, W, 0, 16) DCC2_STEP1(reg_d1, reg_e1, reg_a1, reg_b1, reg_c1, reg_d2, reg_e2, reg_a2, reg_b2, reg_c2, t1, t2, t3, t4,step_const) DCC2_ADD(reg_c1, reg_c2, W, 1, 17) DCC2_STEP1(reg_c1, reg_d1, reg_e1, reg_a1, reg_b1, reg_c2, reg_d2, reg_e2, reg_a2, reg_b2, t1, t2, t3, t4,step_const) DCC2_ADD(reg_b1, reg_b2, W, 2, 18) DCC2_STEP1(reg_b1, reg_c1, reg_d1, reg_e1, reg_a1, reg_b2, reg_c2, reg_d2, reg_e2, reg_a2, t1, t2, t3, t4,step_const) DCC2_ADD(reg_a1, reg_a2, W, 3, 19) DCC2_STEP1(reg_a1, reg_b1, reg_c1, reg_d1, reg_e1, reg_a2, reg_b2, reg_c2, reg_d2, reg_e2, t1, t2, t3, t4,step_const) // Step2 SET_REG(r5,0x6ed9eba1) vdup.u32 step_const, r5 DCC2_STEP2(step_const, reg_e1, reg_a1, reg_b1, reg_c1, reg_d1, reg_e2, reg_a2, reg_b2, reg_c2, reg_d2, t1, t2, t3, t4, 4 ) DCC2_STEP2(step_const, reg_d1, reg_e1, reg_a1, reg_b1, reg_c1, reg_d2, reg_e2, reg_a2, reg_b2, reg_c2, t1, t2, t3, t4, 5 ) DCC2_STEP2(step_const, reg_c1, reg_d1, reg_e1, reg_a1, reg_b1, reg_c2, reg_d2, reg_e2, reg_a2, reg_b2, t1, t2, t3, t4, 6 ) DCC2_STEP2(step_const, reg_b1, reg_c1, reg_d1, reg_e1, reg_a1, reg_b2, reg_c2, reg_d2, reg_e2, reg_a2, t1, t2, t3, t4, 7 ) DCC2_STEP2(step_const, reg_a1, reg_b1, reg_c1, reg_d1, reg_e1, reg_a2, reg_b2, reg_c2, reg_d2, reg_e2, t1, t2, t3, t4, 8 ) DCC2_STEP2(step_const, reg_e1, reg_a1, reg_b1, reg_c1, reg_d1, reg_e2, reg_a2, reg_b2, reg_c2, reg_d2, t1, t2, t3, t4, 9 ) DCC2_STEP2(step_const, reg_d1, reg_e1, reg_a1, reg_b1, reg_c1, reg_d2, reg_e2, reg_a2, reg_b2, reg_c2, t1, t2, t3, t4, 10) DCC2_STEP2(step_const, reg_c1, reg_d1, reg_e1, reg_a1, reg_b1, reg_c2, reg_d2, reg_e2, reg_a2, reg_b2, t1, t2, t3, t4, 11) DCC2_STEP2(step_const, reg_b1, reg_c1, reg_d1, reg_e1, reg_a1, reg_b2, reg_c2, reg_d2, reg_e2, reg_a2, t1, t2, t3, t4, 12) DCC2_STEP2(step_const, reg_a1, reg_b1, reg_c1, reg_d1, reg_e1, reg_a2, reg_b2, reg_c2, reg_d2, reg_e2, t1, t2, t3, t4, 13) DCC2_STEP2(step_const, reg_e1, reg_a1, reg_b1, reg_c1, reg_d1, reg_e2, reg_a2, reg_b2, reg_c2, reg_d2, t1, t2, t3, t4, 14) DCC2_STEP2(step_const, reg_d1, reg_e1, reg_a1, reg_b1, reg_c1, reg_d2, reg_e2, reg_a2, reg_b2, reg_c2, t1, t2, t3, t4, 15) DCC2_STEP4(step_const, reg_c1, reg_d1, reg_e1, reg_a1, reg_b1, reg_c2, reg_d2, reg_e2, reg_a2, reg_b2, t1, t2, t3, t4, 0, 13, 8 , 2) DCC2_STEP4(step_const, reg_b1, reg_c1, reg_d1, reg_e1, reg_a1, reg_b2, reg_c2, reg_d2, reg_e2, reg_a2, t1, t2, t3, t4, 1, 14, 9 , 3) DCC2_STEP4(step_const, reg_a1, reg_b1, reg_c1, reg_d1, reg_e1, reg_a2, reg_b2, reg_c2, reg_d2, reg_e2, t1, t2, t3, t4, 2, 15, 10, 4) DCC2_STEP4(step_const, reg_e1, reg_a1, reg_b1, reg_c1, reg_d1, reg_e2, reg_a2, reg_b2, reg_c2, reg_d2, t1, t2, t3, t4, 3, 0 , 11, 5) DCC2_STEP4(step_const, reg_d1, reg_e1, reg_a1, reg_b1, reg_c1, reg_d2, reg_e2, reg_a2, reg_b2, reg_c2, t1, t2, t3, t4, 4, 1 , 12, 6) DCC2_STEP4(step_const, reg_c1, reg_d1, reg_e1, reg_a1, reg_b1, reg_c2, reg_d2, reg_e2, reg_a2, reg_b2, t1, t2, t3, t4, 5, 2 , 13, 7) DCC2_STEP4(step_const, reg_b1, reg_c1, reg_d1, reg_e1, reg_a1, reg_b2, reg_c2, reg_d2, reg_e2, reg_a2, t1, t2, t3, t4, 6, 3 , 14, 8) DCC2_STEP4(step_const, reg_a1, reg_b1, reg_c1, reg_d1, reg_e1, reg_a2, reg_b2, reg_c2, reg_d2, reg_e2, t1, t2, t3, t4, 7, 4 , 15, 9) // Step3 SET_REG(r5,0x8F1BBCDC) vdup.u32 step_const, r5 DCC2_STEP3(step_const, reg_e1, reg_a1, reg_b1, reg_c1, reg_d1, reg_e2, reg_a2, reg_b2, reg_c2, reg_d2, t1, t2, t3, t4, 8 , 5 , 0 , 10) DCC2_STEP3(step_const, reg_d1, reg_e1, reg_a1, reg_b1, reg_c1, reg_d2, reg_e2, reg_a2, reg_b2, reg_c2, t1, t2, t3, t4, 9 , 6 , 1 , 11) DCC2_STEP3(step_const, reg_c1, reg_d1, reg_e1, reg_a1, reg_b1, reg_c2, reg_d2, reg_e2, reg_a2, reg_b2, t1, t2, t3, t4, 10, 7 , 2 , 12) DCC2_STEP3(step_const, reg_b1, reg_c1, reg_d1, reg_e1, reg_a1, reg_b2, reg_c2, reg_d2, reg_e2, reg_a2, t1, t2, t3, t4, 11, 8 , 3 , 13) DCC2_STEP3(step_const, reg_a1, reg_b1, reg_c1, reg_d1, reg_e1, reg_a2, reg_b2, reg_c2, reg_d2, reg_e2, t1, t2, t3, t4, 12, 9 , 4 , 14) DCC2_STEP3(step_const, reg_e1, reg_a1, reg_b1, reg_c1, reg_d1, reg_e2, reg_a2, reg_b2, reg_c2, reg_d2, t1, t2, t3, t4, 13, 10, 5 , 15) DCC2_STEP3(step_const, reg_d1, reg_e1, reg_a1, reg_b1, reg_c1, reg_d2, reg_e2, reg_a2, reg_b2, reg_c2, t1, t2, t3, t4, 14, 11, 6 , 0 ) DCC2_STEP3(step_const, reg_c1, reg_d1, reg_e1, reg_a1, reg_b1, reg_c2, reg_d2, reg_e2, reg_a2, reg_b2, t1, t2, t3, t4, 15, 12, 7 , 1 ) DCC2_STEP3(step_const, reg_b1, reg_c1, reg_d1, reg_e1, reg_a1, reg_b2, reg_c2, reg_d2, reg_e2, reg_a2, t1, t2, t3, t4, 0 , 13, 8 , 2 ) DCC2_STEP3(step_const, reg_a1, reg_b1, reg_c1, reg_d1, reg_e1, reg_a2, reg_b2, reg_c2, reg_d2, reg_e2, t1, t2, t3, t4, 1 , 14, 9 , 3 ) DCC2_STEP3(step_const, reg_e1, reg_a1, reg_b1, reg_c1, reg_d1, reg_e2, reg_a2, reg_b2, reg_c2, reg_d2, t1, t2, t3, t4, 2 , 15, 10, 4 ) DCC2_STEP3(step_const, reg_d1, reg_e1, reg_a1, reg_b1, reg_c1, reg_d2, reg_e2, reg_a2, reg_b2, reg_c2, t1, t2, t3, t4, 3 , 0 , 11, 5 ) DCC2_STEP3(step_const, reg_c1, reg_d1, reg_e1, reg_a1, reg_b1, reg_c2, reg_d2, reg_e2, reg_a2, reg_b2, t1, t2, t3, t4, 4 , 1 , 12, 6 ) DCC2_STEP3(step_const, reg_b1, reg_c1, reg_d1, reg_e1, reg_a1, reg_b2, reg_c2, reg_d2, reg_e2, reg_a2, t1, t2, t3, t4, 5 , 2 , 13, 7 ) DCC2_STEP3(step_const, reg_a1, reg_b1, reg_c1, reg_d1, reg_e1, reg_a2, reg_b2, reg_c2, reg_d2, reg_e2, t1, t2, t3, t4, 6 , 3 , 14, 8 ) DCC2_STEP3(step_const, reg_e1, reg_a1, reg_b1, reg_c1, reg_d1, reg_e2, reg_a2, reg_b2, reg_c2, reg_d2, t1, t2, t3, t4, 7 , 4 , 15, 9 ) DCC2_STEP3(step_const, reg_d1, reg_e1, reg_a1, reg_b1, reg_c1, reg_d2, reg_e2, reg_a2, reg_b2, reg_c2, t1, t2, t3, t4, 8 , 5 , 0 , 10) DCC2_STEP3(step_const, reg_c1, reg_d1, reg_e1, reg_a1, reg_b1, reg_c2, reg_d2, reg_e2, reg_a2, reg_b2, t1, t2, t3, t4, 9 , 6 , 1 , 11) DCC2_STEP3(step_const, reg_b1, reg_c1, reg_d1, reg_e1, reg_a1, reg_b2, reg_c2, reg_d2, reg_e2, reg_a2, t1, t2, t3, t4, 10, 7 , 2 , 12) DCC2_STEP3(step_const, reg_a1, reg_b1, reg_c1, reg_d1, reg_e1, reg_a2, reg_b2, reg_c2, reg_d2, reg_e2, t1, t2, t3, t4, 11, 8 , 3 , 13) // Step4 SET_REG(r5,0xCA62C1D6) vdup.u32 step_const, r5 DCC2_STEP4(step_const, reg_e1, reg_a1, reg_b1, reg_c1, reg_d1, reg_e2, reg_a2, reg_b2, reg_c2, reg_d2, t1, t2, t3, t4, 12, 9 , 4 , 14) DCC2_STEP4(step_const, reg_d1, reg_e1, reg_a1, reg_b1, reg_c1, reg_d2, reg_e2, reg_a2, reg_b2, reg_c2, t1, t2, t3, t4, 13, 10, 5 , 15) DCC2_STEP4(step_const, reg_c1, reg_d1, reg_e1, reg_a1, reg_b1, reg_c2, reg_d2, reg_e2, reg_a2, reg_b2, t1, t2, t3, t4, 14, 11, 6 , 0 ) DCC2_STEP4(step_const, reg_b1, reg_c1, reg_d1, reg_e1, reg_a1, reg_b2, reg_c2, reg_d2, reg_e2, reg_a2, t1, t2, t3, t4, 15, 12, 7 , 1 ) DCC2_STEP4(step_const, reg_a1, reg_b1, reg_c1, reg_d1, reg_e1, reg_a2, reg_b2, reg_c2, reg_d2, reg_e2, t1, t2, t3, t4, 0 , 13, 8 , 2 ) DCC2_STEP4(step_const, reg_e1, reg_a1, reg_b1, reg_c1, reg_d1, reg_e2, reg_a2, reg_b2, reg_c2, reg_d2, t1, t2, t3, t4, 1 , 14, 9 , 3 ) DCC2_STEP4(step_const, reg_d1, reg_e1, reg_a1, reg_b1, reg_c1, reg_d2, reg_e2, reg_a2, reg_b2, reg_c2, t1, t2, t3, t4, 2 , 15, 10, 4 ) DCC2_STEP4(step_const, reg_c1, reg_d1, reg_e1, reg_a1, reg_b1, reg_c2, reg_d2, reg_e2, reg_a2, reg_b2, t1, t2, t3, t4, 3 , 0 , 11, 5 ) DCC2_STEP4(step_const, reg_b1, reg_c1, reg_d1, reg_e1, reg_a1, reg_b2, reg_c2, reg_d2, reg_e2, reg_a2, t1, t2, t3, t4, 4 , 1 , 12, 6 ) DCC2_STEP4(step_const, reg_a1, reg_b1, reg_c1, reg_d1, reg_e1, reg_a2, reg_b2, reg_c2, reg_d2, reg_e2, t1, t2, t3, t4, 5 , 2 , 13, 7 ) DCC2_STEP4(step_const, reg_e1, reg_a1, reg_b1, reg_c1, reg_d1, reg_e2, reg_a2, reg_b2, reg_c2, reg_d2, t1, t2, t3, t4, 6 , 3 , 14, 8 ) DCC2_STEP4(step_const, reg_d1, reg_e1, reg_a1, reg_b1, reg_c1, reg_d2, reg_e2, reg_a2, reg_b2, reg_c2, t1, t2, t3, t4, 7 , 4 , 15, 9 ) DCC2_STEP4(step_const, reg_c1, reg_d1, reg_e1, reg_a1, reg_b1, reg_c2, reg_d2, reg_e2, reg_a2, reg_b2, t1, t2, t3, t4, 8 , 5 , 0 , 10) DCC2_STEP4(step_const, reg_b1, reg_c1, reg_d1, reg_e1, reg_a1, reg_b2, reg_c2, reg_d2, reg_e2, reg_a2, t1, t2, t3, t4, 9 , 6 , 1 , 11) DCC2_STEP4(step_const, reg_a1, reg_b1, reg_c1, reg_d1, reg_e1, reg_a2, reg_b2, reg_c2, reg_d2, reg_e2, t1, t2, t3, t4, 10, 7 , 2 , 12) DCC2_STEP4(step_const, reg_e1, reg_a1, reg_b1, reg_c1, reg_d1, reg_e2, reg_a2, reg_b2, reg_c2, reg_d2, t1, t2, t3, t4, 11, 8 , 3 , 13) DCC2_STEP4(step_const, reg_d1, reg_e1, reg_a1, reg_b1, reg_c1, reg_d2, reg_e2, reg_a2, reg_b2, reg_c2, t1, t2, t3, t4, 12, 9 , 4 , 14) DCC2_STEP4(step_const, reg_c1, reg_d1, reg_e1, reg_a1, reg_b1, reg_c2, reg_d2, reg_e2, reg_a2, reg_b2, t1, t2, t3, t4, 13, 10, 5 , 15) DCC2_STEP4(step_const, reg_b1, reg_c1, reg_d1, reg_e1, reg_a1, reg_b2, reg_c2, reg_d2, reg_e2, reg_a2, t1, t2, t3, t4, 14, 11, 6 , 0 ) DCC2_STEP4(step_const, reg_a1, reg_b1, reg_c1, reg_d1, reg_e1, reg_a2, reg_b2, reg_c2, reg_d2, reg_e2, t1, t2, t3, t4, 15, 12, 7 , 1 ) mov tmp_ptr, dcc2_state vld1.u32 {t1,t2}, [tmp_ptr:128]! vld1.u32 {t3,t4}, [tmp_ptr:128]! vadd.u32 reg_a1, reg_a1, t1 vadd.u32 reg_b1, reg_b1, t2 vadd.u32 reg_c1, reg_c1, t3 vadd.u32 reg_d1, reg_d1, t4 vld1.u32 {t1,t2}, [tmp_ptr:128]! vld1.u32 {t3,t4}, [tmp_ptr:128]! vadd.u32 reg_e1, reg_e1, t1 vadd.u32 reg_a2, reg_a2, t2 vadd.u32 reg_b2, reg_b2, t3 vadd.u32 reg_c2, reg_c2, t4 vld1.u32 {t1,t2}, [tmp_ptr:128]! vadd.u32 reg_d2, reg_d2, t1 vadd.u32 reg_e2, reg_e2, t2 mov tmp_ptr, sha1_hash vst1.u32 {reg_a1}, [tmp_ptr:128]! vst1.u32 {reg_b1}, [tmp_ptr:128]! vst1.u32 {reg_c1}, [tmp_ptr:128]! vst1.u32 {reg_d1}, [tmp_ptr:128]! vst1.u32 {reg_e1}, [tmp_ptr:128]! vst1.u32 {reg_a2}, [tmp_ptr:128]! vst1.u32 {reg_b2}, [tmp_ptr:128]! vst1.u32 {reg_c2}, [tmp_ptr:128]! vst1.u32 {reg_d2}, [tmp_ptr:128]! vst1.u32 {reg_e2}, [tmp_ptr:128]! pop {r4,r5} vpop {q4,q5,q6,q7} bx lr //////////////////////////////////////////////////////////////////////////////////////////////////////////////// // SHA256 format //////////////////////////////////////////////////////////////////////////////////////////////////////////////// #define SHA256_ROUND1(reg_h,reg_e,reg_g,reg_f,const,reg_d,reg_a,reg_b,reg_c,t0,t1,t2,t3,t4,t5,t6,t7) \ MD5_LOAD_CONST(t7,const);\ \ vshr.u32 t0, reg_e, #(32-26);\ vshr.u32 t1, reg_e, #(32-21);\ vshr.u32 t2, reg_e, #(32-7);\ \ vmov t6, reg_e;\ vbsl t6, reg_f, reg_g;\ \ vshl.u32 t3, reg_e, #26;\ vshl.u32 t4, reg_e, #21;\ vshl.u32 t5, reg_e, #7;\ \ vorr t0, t3, t0;\ vorr t1, t4, t1;\ vorr t2, t5, t2;\ vadd.u32 reg_h,reg_h,t7;\ \ veor.u32 t0,t0,t1;\ vadd.u32 reg_h,reg_h,t6;\ veor.u32 t0,t0,t2;\ \ vadd.u32 reg_h,reg_h,t0;\ \ vadd.u32 reg_d,reg_h,reg_d;\ \ vshr.u32 t0, reg_a, #(32-30);\ vshr.u32 t1, reg_a, #(32-19);\ vshr.u32 t2, reg_a, #(32-10);\ \ vshl.u32 t3, reg_a, #30;\ vshl.u32 t4, reg_a, #19;\ vshl.u32 t5, reg_a, #10;\ \ veor t6, reg_b, reg_c;\ vbsl t6, reg_a, reg_b;\ \ vorr t0, t3, t0;\ vorr t1, t4, t1;\ vorr t2, t5, t2;\ veor.u32 t0,t0,t1;\ veor.u32 t0,t0,t2;\ vadd.u32 reg_h,reg_h,t6;\ vadd.u32 reg_h,reg_h,t0; #define SHA256_ROUNDW(reg_h,reg_e,reg_g,reg_f,const,reg_d,reg_a,reg_b,reg_c,t0,t1,t2,t3,t4,t5,t6,t7,w_index0,w_index1,w_index_r1,w_index_sum,w_index_r0,NT_NUM_KEYS) \ SHA256_RW_1SUM0_SAME(w_index0,w_index_r1,w_index_sum,w_index_r0,t0,t1,t2,t3,t4,t5,t6,t7,NT_NUM_KEYS);\ vadd.u32 reg_h,reg_h,t6;\ SHA256_ROUND1(reg_h,reg_e,reg_g,reg_f,const,reg_d,reg_a,reg_b,reg_c,t0,t1,t2,t3,t4,t5,t6,t7); #define SHA256_ROUNDW_316(reg_h,reg_e,reg_g,reg_f,const,reg_d,reg_a,reg_b,reg_c,t0,t1,t2,t3,t4,t5,t6,t7,w_index0,w_index1,w_index_r1,w_index_sum,w_index_r0,NT_NUM_KEYS) \ SHA256_RW_1SUM0_316(w_index0,w_index_r1,w_index_sum,w_index_r0,t0,t1,t2,t3,t4,t5,t6,t7,NT_NUM_KEYS);\ vadd.u32 reg_h,reg_h,t6;\ SHA256_ROUND1(reg_h,reg_e,reg_g,reg_f,const,reg_d,reg_a,reg_b,reg_c,t0,t1,t2,t3,t4,t5,t6,t7); #define SHA256_ROUNDW_416(reg_h,reg_e,reg_g,reg_f,const,reg_d,reg_a,reg_b,reg_c,t0,t1,t2,t3,t4,t5,t6,t7,w_index0,w_index1,w_index_r1,w_index_sum,w_index_r0,NT_NUM_KEYS) \ SHA256_RW_1SUM0_416(w_index0,w_index_r1,w_index_sum,w_index_r0,t0,t1,t2,t3,t4,t5,t6,t7,NT_NUM_KEYS);\ vadd.u32 reg_h,reg_h,t6;\ SHA256_ROUND1(reg_h,reg_e,reg_g,reg_f,const,reg_d,reg_a,reg_b,reg_c,t0,t1,t2,t3,t4,t5,t6,t7); #define SHA256_ROUNDW1416(reg_h,reg_e,reg_g,reg_f,const,reg_d,reg_a,reg_b,reg_c,t0,t1,t2,t3,t4,t5,t6,t7,w_index0,w_index1,w_index_r1,w_index_sum,w_index_r0,NT_NUM_KEYS) \ SHA256_RW_1SUM01416(w_index0,w_index1,w_index_r1,w_index_sum,w_index_r0,t0,t1,t2,t3,t4,t5,t6,t7,NT_NUM_KEYS);\ vadd.u32 reg_h,reg_h,t6;\ SHA256_ROUND1(reg_h,reg_e,reg_g,reg_f,const,reg_d,reg_a,reg_b,reg_c,t0,t1,t2,t3,t4,t5,t6,t7); #define SHA256_ROUNDW_116(reg_h,reg_e,reg_g,reg_f,const,reg_d,reg_a,reg_b,reg_c,t0,t1,t2,t3,t4,t5,t6,t7,w_index0,w_index1,w_index_r1,w_index_sum,w_index_r0,NT_NUM_KEYS) \ SHA256_RW_1SUM0_116(w_index0,w_index1,w_index_r1,w_index_sum,w_index_r0,t0,t1,t2,t3,t4,t5,t6,t7,NT_NUM_KEYS);\ vadd.u32 reg_h,reg_h,t6;\ SHA256_ROUND1(reg_h,reg_e,reg_g,reg_f,const,reg_d,reg_a,reg_b,reg_c,t0,t1,t2,t3,t4,t5,t6,t7); #define SHA256_RW_1SUM0_316(r_index,r1_index,rsum1_index,r0_index,t0,t1,t2,t3,t4,t5,t6,t7,NT_NUM_KEYS) \ add nt_buffer,nt_buffer_base,#((8+r1_index)*4*NT_NUM_KEYS);\ vld1.u32 {t6},[nt_buffer:128];\ add nt_buffer,nt_buffer_base,#((8+r0_index)*4*NT_NUM_KEYS);\ vld1.u32 {t7},[nt_buffer:128];\ \ vshr.u32 t0, t6, #(32-15);\ vshr.u32 t1, t6, #(32-13);\ vshr.u32 t2, t6, #10;\ \ vshl.u32 t3, t6, #15;\ vshl.u32 t6, t6, #13;\ vshr.u32 t4, t7, #(32-25);\ vshr.u32 t5, t7, #(32-14);\ \ vorr t0, t3, t0;\ vorr t6, t6, t1;\ \ vshr.u32 t1, t7, #3;\ vshl.u32 t3, t7, #25;\ \ veor t6,t0,t6;\ vshl.u32 t7, t7, #14;\ vorr t4, t3, t4;\ vorr t7, t7, t5;\ \ veor t6,t6,t2;\ veor t7,t4,t7;\ \ veor t7,t7,t1;\ vadd.u32 t6,t7,t6;\ \ add nt_buffer, nt_buffer_base, #((8+r_index)*4*NT_NUM_KEYS);\ vld1.u32 {t0},[nt_buffer:128];\ vadd.u32 t6,t6,t0;\ vst1.u32 {t6},[nt_buffer:128]; #define SHA256_RW_1SUM0_416(r_index,r1_index,rsum1_index,r0_index,t0,t1,t2,t3,t4,t5,t6,t7,NT_NUM_KEYS) \ add nt_buffer,nt_buffer_base,#((8+r1_index)*4*NT_NUM_KEYS);\ vld1.u32 {t6},[nt_buffer:128];\ \ vshr.u32 t0, t6, #(32-15);\ vshr.u32 t1, t6, #(32-13);\ vshr.u32 t2, t6, #10;\ \ vshl.u32 t3, t6, #15;\ vshl.u32 t6, t6, #13;\ \ vorr t0, t3, t0;\ vorr t6, t6, t1;\ \ veor t6,t0,t6;\ veor t6,t6,t2;\ \ add nt_buffer,nt_buffer_base,#((8+rsum1_index)*4*NT_NUM_KEYS);\ vld1.u32 {t0},[nt_buffer:128];\ vadd.u32 t6,t6,t0;\ add nt_buffer,nt_buffer_base,#((8+r_index)*4*NT_NUM_KEYS);\ vld1.u32 {t2},[nt_buffer:128];\ vadd.u32 t6,t6,t2;\ \ vst1.u32 {t6},[nt_buffer:128]; #define SHA256_RW_1SUM01416(r_index,rsum0_index,r1_index,rsum1_index,r0_index,t0,t1,t2,t3,t4,t5,t6,t7,NT_NUM_KEYS) \ add nt_buffer,nt_buffer_base,#((8+r1_index)*4*NT_NUM_KEYS);\ vld1.u32 {t6},[nt_buffer:128];\ \ vshr.u32 t0, t6, #(32-15);\ vshr.u32 t1, t6, #(32-13);\ vshr.u32 t2, t6, #10;\ \ vshl.u32 t3, t6, #15;\ vshl.u32 t6, t6, #13;\ \ vorr t0, t3, t0;\ vorr t6, t6, t1;\ \ veor t6,t0,t6;\ veor t6,t6,t2;\ \ add nt_buffer,nt_buffer_base,#((8+rsum1_index)*4*NT_NUM_KEYS);\ vld1.u32 {t0},[nt_buffer:128];\ vadd.u32 t6,t6,t0;\ add nt_buffer,nt_buffer_base,#((8+r_index)*4*NT_NUM_KEYS);\ vst1.u32 {t6},[nt_buffer:128]; #define SHA256_RW_1SUM0_116(r_index,rsum0_index,r1_index,rsum1_index,r0_index,t0,t1,t2,t3,t4,t5,t6,t7,NT_NUM_KEYS) \ add nt_buffer,nt_buffer_base,#((8+r1_index)*4*NT_NUM_KEYS);\ vld1.u32 {t6},[nt_buffer:128];\ add nt_buffer,nt_buffer_base,#((8+r0_index)*4*NT_NUM_KEYS);\ vld1.u32 {t7},[nt_buffer:128];\ \ vshr.u32 t0, t6, #(32-15);\ vshr.u32 t1, t6, #(32-13);\ vshr.u32 t2, t6, #10;\ \ vshl.u32 t3, t6, #15;\ vshl.u32 t6, t6, #13;\ vshr.u32 t4, t7, #(32-25);\ vshr.u32 t5, t7, #(32-14);\ \ vorr t0, t3, t0;\ vorr t6, t6, t1;\ vshr.u32 t1, t7, #3;\ vshl.u32 t3, t7, #25;\ \ veor t6,t0,t6;\ vshl.u32 t7, t7, #14;\ vorr t4, t3, t4;\ vorr t7, t7, t5;\ veor t6,t6,t2;\ \ veor t7,t4,t7;\ \ add nt_buffer,nt_buffer_base,#((8+rsum1_index)*4*NT_NUM_KEYS);\ vld1.u32 {t0},[nt_buffer:128];\ vadd.u32 t6,t6,t0;\ veor t7,t7,t1;\ vadd.u32 t6,t7,t6;\ \ add nt_buffer,nt_buffer_base,#((8+r_index)*4*NT_NUM_KEYS);\ vst1.u32 {t6},[nt_buffer:128]; #define SHA256_RW_1SUM0_SAME(r_index,r1_index,rsum1_index,r0_index,t0,t1,t2,t3,t4,t5,t6,t7,NT_NUM_KEYS) \ add nt_buffer,nt_buffer_base,#((8+r1_index)*4*NT_NUM_KEYS);\ vld1.u32 {t6},[nt_buffer:128];\ add nt_buffer,nt_buffer_base,#((8+r0_index)*4*NT_NUM_KEYS);\ vld1.u32 {t7},[nt_buffer:128];\ \ vshr.u32 t0, t6, #(32-15);\ vshr.u32 t1, t6, #(32-13);\ vshr.u32 t2, t6, #10;\ \ vshl.u32 t3, t6, #15;\ vshl.u32 t6, t6, #13;\ vshr.u32 t4, t7, #(32-25);\ vshr.u32 t5, t7, #(32-14);\ \ vorr t0, t3, t0;\ vorr t6, t6, t1;\ vshr.u32 t1, t7, #3;\ vshl.u32 t3, t7, #25;\ \ veor t6,t0,t6;\ vshl.u32 t7, t7, #14;\ vorr t4, t3, t4;\ vorr t7, t7, t5;\ veor t6,t6,t2;\ \ veor t7,t4,t7;\ add nt_buffer,nt_buffer_base,#((8+rsum1_index)*4*NT_NUM_KEYS);\ vld1.u32 {t0},[nt_buffer:128];\ vadd.u32 t6,t6,t0;\ \ veor t7,t7,t1;\ vadd.u32 t6,t7,t6;\ add nt_buffer,nt_buffer_base,#((8+r_index)*4*NT_NUM_KEYS);\ vld1.u32 {t2},[nt_buffer:128];\ vadd.u32 t6,t6,t2;\ vst1.u32 {t6},[nt_buffer:128]; #define SHA256_RW_1SUM0(r_index,rsum0_index,r1_index,rsum1_index,r0_index,t0,t1,t2,t3,t4,t5,t6,t7,NT_NUM_KEYS) \ add nt_buffer,nt_buffer_base,#((8+r1_index)*4*NT_NUM_KEYS);\ vld1.u32 {t6},[nt_buffer:128];\ add nt_buffer,nt_buffer_base,#((8+r0_index)*4*NT_NUM_KEYS);\ vld1.u32 {t7},[nt_buffer:128];\ \ vshr.u32 t0, t6, #(32-15);\ vshr.u32 t1, t6, #(32-13);\ vshr.u32 t2, t6, #10;\ \ vshl.u32 t3, t6, #15;\ vshl.u32 t6, t6, #13;\ vshr.u32 t4, t7, #(32-25);\ vshr.u32 t5, t7, #(32-14);\ \ vorr t0, t3, t0;\ vorr t6, t6, t1;\ vshr.u32 t1, t7, #3;\ vshl.u32 t3, t7, #25;\ \ veor t6,t0,t6;\ vshl.u32 t7, t7, #14;\ vorr t4, t3, t4;\ vorr t7, t7, t5;\ veor t6,t6,t2;\ \ veor t7,t4,t7;\ add nt_buffer,nt_buffer_base,#((8+rsum1_index)*4*NT_NUM_KEYS);\ vld1.u32 {t0},[nt_buffer:128];\ vadd.u32 t6,t6,t0;\ \ veor t7,t7,t1;\ vadd.u32 t6,t7,t6;\ add nt_buffer,nt_buffer_base,#((8+rsum0_index)*4*NT_NUM_KEYS);\ vld1.u32 {t2},[nt_buffer:128];\ vadd.u32 t6,t6,t2;\ \ add nt_buffer,nt_buffer_base,#((8+r_index)*4*NT_NUM_KEYS);\ vst1.u32 {t6},[nt_buffer:128]; #define CRYPT_SHA256_AVX_KERNEL_ASM_BODY(NT_NUM_KEYS,reg_a,reg_b,reg_c,reg_d,reg_e,reg_f,reg_g,reg_h,t0,t1,t2,t3,t4,t5,t6,step_const) \ mov i, #0;\ sha256_while1:\ /* Convert to Big-Endian*/\ /* Load */\ mov nt_buffer, nt_buffer_base; vld1.u32 {reg_h}, [nt_buffer:128];\ add nt_buffer, nt_buffer, #1*4*NT_NUM_KEYS;vld1.u32 {reg_g}, [nt_buffer:128];\ add nt_buffer, nt_buffer, #1*4*NT_NUM_KEYS;vld1.u32 {t2 }, [nt_buffer:128];\ add nt_buffer, nt_buffer, #1*4*NT_NUM_KEYS;vld1.u32 {t3 }, [nt_buffer:128];\ add nt_buffer, nt_buffer, #1*4*NT_NUM_KEYS;vld1.u32 {t4 }, [nt_buffer:128];\ add nt_buffer, nt_buffer, #1*4*NT_NUM_KEYS;vld1.u32 {t5 }, [nt_buffer:128];\ add nt_buffer, nt_buffer, #1*4*NT_NUM_KEYS;vld1.u32 {t6 }, [nt_buffer:128];\ add nt_buffer, nt_buffer, #1*4*NT_NUM_KEYS;vld1.u32 {t1 }, [nt_buffer:128];\ /* Shuffle bytes*/\ vrev32.u8 reg_h, reg_h;\ vrev32.u8 reg_g, reg_g;\ vrev32.u8 t2 , t2 ;\ vrev32.u8 t3 , t3 ;\ vrev32.u8 t4 , t4 ;\ vrev32.u8 t5 , t5 ;\ vrev32.u8 t6 , t6 ;\ /* Write to W*/\ add nt_buffer, nt_buffer_base, #((8+0 )*4*NT_NUM_KEYS);vst1.u32 {reg_h}, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #((8+1 )*4*NT_NUM_KEYS);vst1.u32 {reg_g}, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #((8+2 )*4*NT_NUM_KEYS);vst1.u32 {t2 }, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #((8+3 )*4*NT_NUM_KEYS);vst1.u32 {t3 }, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #((8+4 )*4*NT_NUM_KEYS);vst1.u32 {t4 }, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #((8+5 )*4*NT_NUM_KEYS);vst1.u32 {t5 }, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #((8+6 )*4*NT_NUM_KEYS);vst1.u32 {t6 }, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #((8+15)*4*NT_NUM_KEYS);vst1.u32 {t1 }, [nt_buffer:128];\ \ /*Step 1: H = 0xfc08884d + W[0 * NT_NUM_KEYS]; D=0x9cbf5a55+H;*/\ MD5_LOAD_CONST(step_const,0xfc08884d);\ MD5_LOAD_CONST(t6 ,0x9cbf5a55);\ MD5_LOAD_CONST(reg_a,0x6a09e667);\ MD5_LOAD_CONST(reg_b,0xbb67ae85);\ vadd.u32 reg_h, reg_h, step_const;\ MD5_LOAD_CONST(reg_c,0x3c6ef372);\ MD5_LOAD_CONST(reg_e,0x510e527f);\ MD5_LOAD_CONST(reg_f,0x9b05688c);\ vadd.u32 reg_d, reg_h, t6;\ \ SHA256_ROUND1(reg_g,reg_d,reg_f,reg_e,0x90bb1e3c ,reg_c,reg_h,reg_a,reg_b,t0,t1,t2,t3,t4,t5,t6,step_const);\ add nt_buffer, nt_buffer_base, #((8+2)*4*NT_NUM_KEYS);\ vld1.u32 {reg_f}, [nt_buffer:128];\ SHA256_ROUND1(reg_f,reg_c,reg_e,reg_d,0x50c6645b ,reg_b,reg_g,reg_h,reg_a,t0,t1,t2,t3,t4,t5,t6,step_const);\ add nt_buffer, nt_buffer_base, #((8+3)*4*NT_NUM_KEYS);\ vld1.u32 {reg_e}, [nt_buffer:128];\ SHA256_ROUND1(reg_e,reg_b,reg_d,reg_c,0x3ac42e24 ,reg_a,reg_f,reg_g,reg_h,t0,t1,t2,t3,t4,t5,t6,step_const);\ add nt_buffer, nt_buffer_base, #((8+4)*4*NT_NUM_KEYS);\ vld1.u32 {t0}, [nt_buffer:128];\ vadd.u32 reg_d,reg_d,t0;\ SHA256_ROUND1(reg_d,reg_a,reg_c,reg_b,0x3956C25B ,reg_h,reg_e,reg_f,reg_g,t0,t1,t2,t3,t4,t5,t6,step_const);\ add nt_buffer, nt_buffer_base, #((8+5)*4*NT_NUM_KEYS);\ vld1.u32 {t0}, [nt_buffer:128];\ vadd.u32 reg_c,reg_c,t0;\ SHA256_ROUND1(reg_c,reg_h,reg_b,reg_a,0x59F111F1 ,reg_g,reg_d,reg_e,reg_f,t0,t1,t2,t3,t4,t5,t6,step_const);\ add nt_buffer, nt_buffer_base, #((8+6)*4*NT_NUM_KEYS);\ vld1.u32 {t0}, [nt_buffer:128];\ vadd.u32 reg_b,reg_b,t0;\ SHA256_ROUND1(reg_b,reg_g,reg_a,reg_h,0x923F82A4,reg_f,reg_c,reg_d,reg_e,t0,t1,t2,t3,t4,t5,t6,step_const);\ SHA256_ROUND1(reg_a,reg_f,reg_h,reg_g,0xAB1C5ED5,reg_e,reg_b,reg_c,reg_d,t0,t1,t2,t3,t4,t5,t6,step_const);\ SHA256_ROUND1(reg_h,reg_e,reg_g,reg_f,0xD807AA98,reg_d,reg_a,reg_b,reg_c,t0,t1,t2,t3,t4,t5,t6,step_const);\ SHA256_ROUND1(reg_g,reg_d,reg_f,reg_e,0x12835B01,reg_c,reg_h,reg_a,reg_b,t0,t1,t2,t3,t4,t5,t6,step_const);\ SHA256_ROUND1(reg_f,reg_c,reg_e,reg_d,0x243185BE,reg_b,reg_g,reg_h,reg_a,t0,t1,t2,t3,t4,t5,t6,step_const);\ SHA256_ROUND1(reg_e,reg_b,reg_d,reg_c,0x550C7DC3,reg_a,reg_f,reg_g,reg_h,t0,t1,t2,t3,t4,t5,t6,step_const);\ SHA256_ROUND1(reg_d,reg_a,reg_c,reg_b,0x72BE5D74,reg_h,reg_e,reg_f,reg_g,t0,t1,t2,t3,t4,t5,t6,step_const);\ SHA256_ROUND1(reg_c,reg_h,reg_b,reg_a,0x80DEB1FE,reg_g,reg_d,reg_e,reg_f,t0,t1,t2,t3,t4,t5,t6,step_const);\ SHA256_ROUND1(reg_b,reg_g,reg_a,reg_h,0x9BDC06A7,reg_f,reg_c,reg_d,reg_e,t0,t1,t2,t3,t4,t5,t6,step_const);\ add nt_buffer, nt_buffer_base, #(7*4*NT_NUM_KEYS);\ vld1.u32 {t0}, [nt_buffer:128];\ vadd.u32 reg_a,reg_a,t0;\ SHA256_ROUND1(reg_a,reg_f,reg_h,reg_g,0xC19BF174,reg_e,reg_b,reg_c,reg_d,t0,t1,t2,t3,t4,t5,t6,step_const);\ \ add nt_buffer, nt_buffer_base, #((8+1)*4*NT_NUM_KEYS);\ vld1.u32 {t5}, [nt_buffer:128];\ vshr.u32 t0, t5, #(32-25);\ vshr.u32 t1, t5, #(32-14);\ vshr.u32 t2, t5, #3;\ vshl.u32 t3, t5, #25;\ vshl.u32 t4, t5, #14;\ vorr t0, t3, t0;\ vorr t1, t4, t1;\ veor t0, t0, t1;\ veor t0, t0, t2;\ add nt_buffer, nt_buffer_base, #((8+0)*4*NT_NUM_KEYS);\ vld1.u32 {t6}, [nt_buffer:128];\ vadd.u32 t0, t0, t6;\ vst1.u32 {t0}, [nt_buffer:128];\ vadd.u32 reg_h, reg_h, t0;\ SHA256_ROUND1(reg_h,reg_e,reg_g,reg_f,0xE49B69C1,reg_d,reg_a,reg_b,reg_c,t0,t1,t2,t3,t4,t5,t6,step_const);\ SHA256_ROUNDW_316(reg_g,reg_d,reg_f,reg_e,0xEFBE4786,reg_c,reg_h,reg_a,reg_b,t0,t1,t2,t3,t4,t5,t6,step_const,1 , 1, 15, 16, 2,NT_NUM_KEYS);\ SHA256_ROUNDW_316(reg_f,reg_c,reg_e,reg_d,0x0FC19DC6,reg_b,reg_g,reg_h,reg_a,t0,t1,t2,t3,t4,t5,t6,step_const,2 , 2, 0 , 16, 3,NT_NUM_KEYS);\ SHA256_ROUNDW_316(reg_e,reg_b,reg_d,reg_c,0x240CA1CC,reg_a,reg_f,reg_g,reg_h,t0,t1,t2,t3,t4,t5,t6,step_const,3 , 3, 1 , 16, 4,NT_NUM_KEYS);\ SHA256_ROUNDW_316(reg_d,reg_a,reg_c,reg_b,0x2DE92C6F,reg_h,reg_e,reg_f,reg_g,t0,t1,t2,t3,t4,t5,t6,step_const,4 , 4, 2 , 16, 5,NT_NUM_KEYS);\ SHA256_ROUNDW_316(reg_c,reg_h,reg_b,reg_a,0x4A7484AA,reg_g,reg_d,reg_e,reg_f,t0,t1,t2,t3,t4,t5,t6,step_const,5 , 5, 3 , 16, 6,NT_NUM_KEYS);\ SHA256_ROUNDW_416(reg_b,reg_g,reg_a,reg_h,0x5CB0A9DC,reg_f,reg_c,reg_d,reg_e,t0,t1,t2,t3,t4,t5,t6,step_const,6 , 6, 4 , 15, 16,NT_NUM_KEYS);\ SHA256_ROUNDW1416(reg_a,reg_f,reg_h,reg_g,0x76F988DA,reg_e,reg_b,reg_c,reg_d,t0,t1,t2,t3,t4,t5,t6,step_const,7 , 16, 5 , 0, 16,NT_NUM_KEYS);\ SHA256_ROUNDW1416(reg_h,reg_e,reg_g,reg_f,0x983E5152,reg_d,reg_a,reg_b,reg_c,t0,t1,t2,t3,t4,t5,t6,step_const,8 , 16, 6 , 1, 16,NT_NUM_KEYS);\ SHA256_ROUNDW1416(reg_g,reg_d,reg_f,reg_e,0xA831C66D,reg_c,reg_h,reg_a,reg_b,t0,t1,t2,t3,t4,t5,t6,step_const,9 , 16, 7 , 2, 16,NT_NUM_KEYS);\ SHA256_ROUNDW1416(reg_f,reg_c,reg_e,reg_d,0xB00327C8,reg_b,reg_g,reg_h,reg_a,t0,t1,t2,t3,t4,t5,t6,step_const,10, 16, 8 , 3, 16,NT_NUM_KEYS);\ SHA256_ROUNDW1416(reg_e,reg_b,reg_d,reg_c,0xBF597FC7,reg_a,reg_f,reg_g,reg_h,t0,t1,t2,t3,t4,t5,t6,step_const,11, 16, 9 , 4, 16,NT_NUM_KEYS);\ SHA256_ROUNDW1416(reg_d,reg_a,reg_c,reg_b,0xC6E00BF3,reg_h,reg_e,reg_f,reg_g,t0,t1,t2,t3,t4,t5,t6,step_const,12, 16, 10, 5, 16,NT_NUM_KEYS);\ SHA256_ROUNDW1416(reg_c,reg_h,reg_b,reg_a,0xD5A79147,reg_g,reg_d,reg_e,reg_f,t0,t1,t2,t3,t4,t5,t6,step_const,13, 16, 11, 6, 16,NT_NUM_KEYS);\ SHA256_ROUNDW_116(reg_b,reg_g,reg_a,reg_h,0x06CA6351,reg_f,reg_c,reg_d,reg_e,t0,t1,t2,t3,t4,t5,t6,step_const,14, 16, 12, 7, 15,NT_NUM_KEYS);\ SHA256_ROUNDW(reg_a,reg_f,reg_h,reg_g,0x14292967,reg_e,reg_b,reg_c,reg_d,t0,t1,t2,t3,t4,t5,t6,step_const,15, 15, 13, 8, 0 ,NT_NUM_KEYS);\ \ SHA256_ROUNDW(reg_h,reg_e,reg_g,reg_f,0x27B70A85,reg_d,reg_a,reg_b,reg_c,t0,t1,t2,t3,t4,t5,t6,step_const,0 ,0 ,14,9 ,1 ,NT_NUM_KEYS);\ SHA256_ROUNDW(reg_g,reg_d,reg_f,reg_e,0x2E1B2138,reg_c,reg_h,reg_a,reg_b,t0,t1,t2,t3,t4,t5,t6,step_const,1 ,1 ,15,10,2 ,NT_NUM_KEYS);\ SHA256_ROUNDW(reg_f,reg_c,reg_e,reg_d,0x4D2C6DFC,reg_b,reg_g,reg_h,reg_a,t0,t1,t2,t3,t4,t5,t6,step_const,2 ,2 ,0 ,11,3 ,NT_NUM_KEYS);\ SHA256_ROUNDW(reg_e,reg_b,reg_d,reg_c,0x53380D13,reg_a,reg_f,reg_g,reg_h,t0,t1,t2,t3,t4,t5,t6,step_const,3 ,3 ,1 ,12,4 ,NT_NUM_KEYS);\ SHA256_ROUNDW(reg_d,reg_a,reg_c,reg_b,0x650A7354,reg_h,reg_e,reg_f,reg_g,t0,t1,t2,t3,t4,t5,t6,step_const,4 ,4 ,2 ,13,5 ,NT_NUM_KEYS);\ SHA256_ROUNDW(reg_c,reg_h,reg_b,reg_a,0x766A0ABB,reg_g,reg_d,reg_e,reg_f,t0,t1,t2,t3,t4,t5,t6,step_const,5 ,5 ,3 ,14,6 ,NT_NUM_KEYS);\ SHA256_ROUNDW(reg_b,reg_g,reg_a,reg_h,0x81C2C92E,reg_f,reg_c,reg_d,reg_e,t0,t1,t2,t3,t4,t5,t6,step_const,6 ,6 ,4 ,15,7 ,NT_NUM_KEYS);\ SHA256_ROUNDW(reg_a,reg_f,reg_h,reg_g,0x92722C85,reg_e,reg_b,reg_c,reg_d,t0,t1,t2,t3,t4,t5,t6,step_const,7 ,7 ,5 ,0 ,8 ,NT_NUM_KEYS);\ SHA256_ROUNDW(reg_h,reg_e,reg_g,reg_f,0xA2BFE8A1,reg_d,reg_a,reg_b,reg_c,t0,t1,t2,t3,t4,t5,t6,step_const,8 ,8 ,6 ,1 ,9 ,NT_NUM_KEYS);\ SHA256_ROUNDW(reg_g,reg_d,reg_f,reg_e,0xA81A664B,reg_c,reg_h,reg_a,reg_b,t0,t1,t2,t3,t4,t5,t6,step_const,9 ,9 ,7 ,2 ,10,NT_NUM_KEYS);\ SHA256_ROUNDW(reg_f,reg_c,reg_e,reg_d,0xC24B8B70,reg_b,reg_g,reg_h,reg_a,t0,t1,t2,t3,t4,t5,t6,step_const,10,10,8 ,3 ,11,NT_NUM_KEYS);\ SHA256_ROUNDW(reg_e,reg_b,reg_d,reg_c,0xC76C51A3,reg_a,reg_f,reg_g,reg_h,t0,t1,t2,t3,t4,t5,t6,step_const,11,11,9 ,4 ,12,NT_NUM_KEYS);\ SHA256_ROUNDW(reg_d,reg_a,reg_c,reg_b,0xD192E819,reg_h,reg_e,reg_f,reg_g,t0,t1,t2,t3,t4,t5,t6,step_const,12,12,10,5 ,13,NT_NUM_KEYS);\ SHA256_ROUNDW(reg_c,reg_h,reg_b,reg_a,0xD6990624,reg_g,reg_d,reg_e,reg_f,t0,t1,t2,t3,t4,t5,t6,step_const,13,13,11,6 ,14,NT_NUM_KEYS);\ SHA256_ROUNDW(reg_b,reg_g,reg_a,reg_h,0xF40E3585,reg_f,reg_c,reg_d,reg_e,t0,t1,t2,t3,t4,t5,t6,step_const,14,14,12,7 ,15,NT_NUM_KEYS);\ SHA256_ROUNDW(reg_a,reg_f,reg_h,reg_g,0x106AA070,reg_e,reg_b,reg_c,reg_d,t0,t1,t2,t3,t4,t5,t6,step_const,15,15,13,8 ,0 ,NT_NUM_KEYS);\ \ SHA256_ROUNDW(reg_h,reg_e,reg_g,reg_f,0x19A4C116,reg_d,reg_a,reg_b,reg_c,t0,t1,t2,t3,t4,t5,t6,step_const,0 ,0 ,14,9 ,1,NT_NUM_KEYS);\ SHA256_ROUNDW(reg_g,reg_d,reg_f,reg_e,0x1E376C08,reg_c,reg_h,reg_a,reg_b,t0,t1,t2,t3,t4,t5,t6,step_const,1 ,1 ,15,10,2,NT_NUM_KEYS);\ SHA256_ROUNDW(reg_f,reg_c,reg_e,reg_d,0x2748774C,reg_b,reg_g,reg_h,reg_a,t0,t1,t2,t3,t4,t5,t6,step_const,2 ,2 ,0 ,11,3,NT_NUM_KEYS);\ SHA256_ROUNDW(reg_e,reg_b,reg_d,reg_c,0x34B0BCB5,reg_a,reg_f,reg_g,reg_h,t0,t1,t2,t3,t4,t5,t6,step_const,3 ,3 ,1 ,12,4,NT_NUM_KEYS);\ SHA256_ROUNDW(reg_d,reg_a,reg_c,reg_b,0x391C0CB3,reg_h,reg_e,reg_f,reg_g,t0,t1,t2,t3,t4,t5,t6,step_const,4 ,4 ,2 ,13,5,NT_NUM_KEYS);\ SHA256_ROUNDW(reg_c,reg_h,reg_b,reg_a,0x4ED8AA4A,reg_g,reg_d,reg_e,reg_f,t0,t1,t2,t3,t4,t5,t6,step_const,5 ,5 ,3 ,14,6,NT_NUM_KEYS);\ SHA256_ROUNDW(reg_b,reg_g,reg_a,reg_h,0x5B9CCA4F,reg_f,reg_c,reg_d,reg_e,t0,t1,t2,t3,t4,t5,t6,step_const,6 ,6 ,4 ,15,7,NT_NUM_KEYS);\ SHA256_ROUNDW(reg_a,reg_f,reg_h,reg_g,0x682E6FF3,reg_e,reg_b,reg_c,reg_d,t0,t1,t2,t3,t4,t5,t6,step_const,7 ,7 ,5 , 0,8,NT_NUM_KEYS);\ SHA256_ROUNDW(reg_h,reg_e,reg_g,reg_f,0x748F82EE,reg_d,reg_a,reg_b,reg_c,t0,t1,t2,t3,t4,t5,t6,step_const,8 ,8 ,6 , 1,9,NT_NUM_KEYS);\ SHA256_RW_1SUM0(9,9 ,7,2,10,t0,t1,t2,t3,t4,t5,t6,step_const,NT_NUM_KEYS);\ SHA256_RW_1SUM0(2,11,9,4,12,t0,t1,t2,t3,t4,t5,t6,step_const,NT_NUM_KEYS);\ SHA256_RW_1SUM0(1,13,2,6,14,t0,t1,t2,t3,t4,t5,t6,step_const,NT_NUM_KEYS);\ SHA256_RW_1SUM0(0,15,1,8,0 ,t0,t1,t2,t3,t4,t5,t6,step_const,NT_NUM_KEYS);\ add nt_buffer, nt_buffer_base, #((8+0)*4*NT_NUM_KEYS);\ vld1.u32 {t5}, [nt_buffer:128];\ vadd.u32 reg_a,reg_a,t5;\ \ add nt_buffer, nt_buffer_base, #((8+4 )*4*NT_NUM_KEYS); vst1.u32 {reg_a}, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #((8+16)*4*NT_NUM_KEYS); vst1.u32 {reg_b}, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #((8+17)*4*NT_NUM_KEYS); vst1.u32 {reg_c}, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #((8+18)*4*NT_NUM_KEYS); vst1.u32 {reg_d}, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #((8+19)*4*NT_NUM_KEYS); vst1.u32 {reg_e}, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #((8+20)*4*NT_NUM_KEYS); vst1.u32 {reg_f}, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #((8+21)*4*NT_NUM_KEYS); vst1.u32 {reg_g}, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #((8+6 )*4*NT_NUM_KEYS); vst1.u32 {reg_h}, [nt_buffer:128];\ \ add i,i,#1;\ add nt_buffer_base, nt_buffer_base, #16;\ cmp i, #(NT_NUM_KEYS/4);\ blo sha256_while1; #undef nt_buffer #undef NT_NUM_KEYS #define nt_buffer_base r0 #define table_ptr r1 #define size_bit_table_reg r2 #define nt_buffer r3 #define i r4 #define NT_NUM_KEYS 128 .text .align 2 .global crypt_sha256_neon_kernel_asm .type crypt_sha256_neon_kernel_asm, %function crypt_sha256_neon_kernel_asm: vpush {q4,q5,q6,q7} push {r4,r5} CRYPT_SHA256_AVX_KERNEL_ASM_BODY(128,q0,q1,q2,q3,q4,q5,q6,q7,q8,q9,q10,q11,q12,q13,q14,q15) pop {r4,r5} vpop {q4,q5,q6,q7} bx lr ///////////////////////////////////////////////////////////////////////////////////////////////// // SHA512 format ///////////////////////////////////////////////////////////////////////////////////////////////// #undef nt_buffer #define sha512_const_array r1 #define nt_buffer r5 #define SHA512_ROUND1(reg_h,reg_e,reg_g,reg_f,const_index,reg_d,reg_a,reg_b,reg_c,t0,t1,t2,t3,t4,t5,t6,t7) \ add nt_buffer, sha512_const_array, #(const_index*16);\ vld1.u32 {t7}, [nt_buffer:128];\ \ vshr.u64 t0, reg_e, #(64-50);\ vshr.u64 t1, reg_e, #(64-46);\ vshr.u64 t2, reg_e, #(64-23);\ \ vmov t6, reg_e;\ vbsl t6, reg_f, reg_g;\ \ vshl.u64 t3, reg_e, #50;\ vshl.u64 t4, reg_e, #46;\ vshl.u64 t5, reg_e, #23;\ \ vorr t0, t3, t0;\ vorr t1, t4, t1;\ vorr t2, t5, t2;\ vadd.u64 reg_h,reg_h,t7;\ \ veor t0,t0,t1;\ vadd.u64 reg_h,reg_h,t6;\ veor t0,t0,t2;\ \ vadd.u64 reg_h,reg_h,t0;\ /* D += H*/\ vadd.u64 reg_d,reg_h,reg_d;\ /* H += R_A(A) + ((A & B) | (C & (A | B)));*/\ vshr.u64 t0, reg_a, #(64-36);\ vshr.u64 t1, reg_a, #(64-30);\ vshr.u64 t2, reg_a, #(64-25);\ \ vshl.u64 t3, reg_a, #36;\ vshl.u64 t4, reg_a, #30;\ vshl.u64 t5, reg_a, #25;\ \ vorr t0, t3, t0;\ veor t6, reg_b, reg_c;\ vorr t1, t4, t1;\ vbsl t6, reg_a, reg_b;\ \ vorr t2, t5, t2;\ veor t0, t0, t1;\ veor t0, t0, t2;\ vadd.u64 reg_h,reg_h,t6;\ vadd.u64 reg_h,reg_h,t0; #define SHA512_ROUNDW(reg_h,reg_e,reg_g,reg_f,const,reg_d,reg_a,reg_b,reg_c,t0,t1,t2,t3,t4,t5,t6,t7,w_index0,w_index1,w_index_r1,w_index_sum,w_index_r0,NT_NUM_KEYS) \ SHA512_RW_1SUM0(w_index0,w_index1,w_index_r1,w_index_sum,w_index_r0,t0,t1,t2,t3,t4,t5,t6,t7,NT_NUM_KEYS);\ vadd.u64 reg_h,reg_h,t6;\ SHA512_ROUND1(reg_h,reg_e,reg_g,reg_f,const,reg_d,reg_a,reg_b,reg_c,t0,t1,t2,t3,t4,t5,t6,t7); #define SHA512_ROUNDW_316(reg_h,reg_e,reg_g,reg_f,const,reg_d,reg_a,reg_b,reg_c,t0,t1,t2,t3,t4,t5,t6,t7,w_index0,w_index1,w_index_r1,w_index_sum,w_index_r0,NT_NUM_KEYS) \ SHA512_RW_1SUM0_316(w_index0,w_index1,w_index_r1,w_index_sum,w_index_r0,t0,t1,t2,t3,t4,t5,t6,t7,NT_NUM_KEYS);\ vadd.u64 reg_h,reg_h,t6;\ SHA512_ROUND1(reg_h,reg_e,reg_g,reg_f,const,reg_d,reg_a,reg_b,reg_c,t0,t1,t2,t3,t4,t5,t6,t7); #define SHA512_ROUNDW3416(reg_h,reg_e,reg_g,reg_f,const,reg_d,reg_a,reg_b,reg_c,t0,t1,t2,t3,t4,t5,t6,t7,w_index0,w_index1,w_index_r1,w_index_sum,w_index_r0,NT_NUM_KEYS) \ SHA512_RW_1SUM03416(w_index0,w_index1,w_index_r1,w_index_sum,w_index_r0,t0,t1,t2,t3,t4,t5,t6,t7,NT_NUM_KEYS);\ vadd.u64 reg_h,reg_h,t6;\ SHA512_ROUND1(reg_h,reg_e,reg_g,reg_f,const,reg_d,reg_a,reg_b,reg_c,t0,t1,t2,t3,t4,t5,t6,t7); #define SHA512_ROUND13416(reg_h,reg_e,reg_g,reg_f,const,reg_d,reg_a,reg_b,reg_c,t0,t1,t2,t3,t4,t5,t6,t7,w_index0,w_index1,w_index_r1,w_index_sum,w_index_r0,NT_NUM_KEYS) \ SHA512_RW_1SUM013416(w_index0,w_index1,w_index_r1,w_index_sum,w_index_r0,t0,t1,t2,t3,t4,t5,t6,t7,NT_NUM_KEYS);\ vadd.u64 reg_h,reg_h,t6;\ SHA512_ROUND1(reg_h,reg_e,reg_g,reg_f,const,reg_d,reg_a,reg_b,reg_c,t0,t1,t2,t3,t4,t5,t6,t7); #define SHA512_ROUNDW1416(reg_h,reg_e,reg_g,reg_f,const,reg_d,reg_a,reg_b,reg_c,t0,t1,t2,t3,t4,t5,t6,t7,w_index0,w_index1,w_index_r1,w_index_sum,w_index_r0,NT_NUM_KEYS) \ SHA512_RW_1SUM01416(w_index0,w_index1,w_index_r1,w_index_sum,w_index_r0,t0,t1,t2,t3,t4,t5,t6,t7,NT_NUM_KEYS);\ vadd.u64 reg_h,reg_h,t6;\ SHA512_ROUND1(reg_h,reg_e,reg_g,reg_f,const,reg_d,reg_a,reg_b,reg_c,t0,t1,t2,t3,t4,t5,t6,t7); #define SHA512_ROUNDW116(reg_h,reg_e,reg_g,reg_f,const,reg_d,reg_a,reg_b,reg_c,t0,t1,t2,t3,t4,t5,t6,t7,w_index0,w_index1,w_index_r1,w_index_sum,w_index_r0,NT_NUM_KEYS) \ SHA512_RW_1SUM0116(w_index0,w_index1,w_index_r1,w_index_sum,w_index_r0,t0,t1,t2,t3,t4,t5,t6,t7,NT_NUM_KEYS);\ vadd.u64 reg_h,reg_h,t6;\ SHA512_ROUND1(reg_h,reg_e,reg_g,reg_f,const,reg_d,reg_a,reg_b,reg_c,t0,t1,t2,t3,t4,t5,t6,t7); #define SHA512_RW_1SUM0_316(r_index,rsum0_index,r1_index,rsum1_index,r0_index,t0,t1,t2,t3,t4,t5,t6,t7,NT_NUM_KEYS) \ add nt_buffer, nt_buffer_base, #((4+r1_index)*8*NT_NUM_KEYS); vld1.u64 {t6}, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #((4+r0_index)*8*NT_NUM_KEYS); vld1.u64 {t7}, [nt_buffer:128];\ \ vshr.u64 t0, t6, #(64-45);\ vshr.u64 t1, t6, #(64-3);\ vshr.u64 t2, t6, #6;\ \ vshl.u64 t3, t6, #45;\ vshl.u64 t6, t6, #3;\ vshr.u64 t4, t7, #(64-63);\ vshr.u64 t5, t7, #(64-56);\ \ vorr t0, t3, t0;\ vorr t6, t6, t1;\ vshr.u64 t1, t7, #7;\ vshl.u64 t3, t7, #63;\ \ veor t6,t0,t6;\ vshl.u64 t7, t7, #56;\ vorr t4, t3, t4;\ vorr t7, t7, t5;\ veor t6,t6,t2;\ \ veor t7,t4,t7;\ veor t7,t7,t1;\ vadd.u64 t6,t7,t6;\ \ add nt_buffer, nt_buffer_base, #((4+rsum0_index)*8*NT_NUM_KEYS); vld1.u64 {t0}, [nt_buffer:128];\ vadd.u64 t6,t6,t0;\ add nt_buffer, nt_buffer_base, #((4+r_index)*8*NT_NUM_KEYS); vst1.u64 {t6}, [nt_buffer:128]; #define SHA512_RW_1SUM03416(r_index,rsum0_index,r1_index,rsum1_index,r0_index,t0,t1,t2,t3,t4,t5,t6,t7,NT_NUM_KEYS) \ add nt_buffer, nt_buffer_base, #((4+r1_index)*8*NT_NUM_KEYS); vld1.u64 {t6}, [nt_buffer:128];\ \ vshr.u64 t0, t6, #(64-45);\ vshr.u64 t1, t6, #(64-3);\ vshr.u64 t2, t6, #6;\ \ vshl.u64 t3, t6, #45;\ vshl.u64 t6, t6, #3;\ vorr t0, t3, t0;\ vorr t6, t6, t1;\ \ veor t6,t0,t6;\ veor t6,t6,t2;\ \ add nt_buffer, nt_buffer_base, #((4+rsum0_index)*8*NT_NUM_KEYS); vld1.u64 {t0}, [nt_buffer:128];\ vadd.u64 t6,t6,t0;\ add nt_buffer, nt_buffer_base, #((4+r_index)*8*NT_NUM_KEYS); vst1.u64 {t6}, [nt_buffer:128]; #define SHA512_RW_1SUM013416(r_index,rsum0_index,r1_index,rsum1_index,r0_index,t0,t1,t2,t3,t4,t5,t6,t7,NT_NUM_KEYS) \ add nt_buffer, nt_buffer_base, #((4+r1_index)*8*NT_NUM_KEYS); vld1.u64 {t6}, [nt_buffer:128];\ \ vshr.u64 t0, t6, #(64-45);\ vshr.u64 t1, t6, #(64-3);\ vshr.u64 t2, t6, #6;\ \ vshl.u64 t3, t6, #45;\ vshl.u64 t6, t6, #3;\ vorr t0, t3, t0;\ vorr t6, t6, t1;\ \ veor t6,t0,t6;\ veor t6,t6,t2;\ \ add nt_buffer, nt_buffer_base, #((4+r_index)*8*NT_NUM_KEYS); vst1.u64 {t6}, [nt_buffer:128]; #define SHA512_RW_1SUM01416(r_index,rsum0_index,r1_index,rsum1_index,r0_index,t0,t1,t2,t3,t4,t5,t6,t7,NT_NUM_KEYS) \ add nt_buffer, nt_buffer_base, #((4+r1_index)*8*NT_NUM_KEYS); vld1.u64 {t6}, [nt_buffer:128];\ \ vshr.u64 t0, t6, #(64-45);\ vshr.u64 t1, t6, #(64-3);\ vshr.u64 t2, t6, #6;\ \ vshl.u64 t3, t6, #45;\ vshl.u64 t6, t6, #3;\ vorr t0, t3, t0;\ vorr t6, t6, t1;\ \ veor t6,t0,t6;\ veor t6,t6,t2;\ \ add nt_buffer, nt_buffer_base, #((4+rsum1_index)*8*NT_NUM_KEYS); vld1.u64 {t0}, [nt_buffer:128];\ vadd.u64 t6,t6,t0;\ add nt_buffer, nt_buffer_base, #((4+r_index)*8*NT_NUM_KEYS); vst1.u64 {t6}, [nt_buffer:128]; #define SHA512_RW_1SUM0116(r_index,rsum0_index,r1_index,rsum1_index,r0_index,t0,t1,t2,t3,t4,t5,t6,t7,NT_NUM_KEYS) \ add nt_buffer, nt_buffer_base, #((4+r1_index)*8*NT_NUM_KEYS); vld1.u64 {t6}, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #((4+r0_index)*8*NT_NUM_KEYS); vld1.u64 {t7}, [nt_buffer:128];\ \ vshr.u64 t0, t6, #(64-45);\ vshr.u64 t1, t6, #(64-3);\ vshr.u64 t2, t6, #6;\ \ vshl.u64 t3, t6, #45;\ vshl.u64 t6, t6, #3;\ vshr.u64 t4, t7, #(64-63);\ vshr.u64 t5, t7, #(64-56);\ \ vorr t0, t3, t0;\ vorr t6, t6, t1;\ vshr.u64 t1, t7, #7;\ vshl.u64 t3, t7, #63;\ \ veor t6,t0,t6;\ vshl.u64 t7, t7, #56;\ vorr t4, t3, t4;\ vorr t7, t7, t5;\ veor t6,t6,t2;\ veor t7,t4,t7;\ \ add nt_buffer, nt_buffer_base, #((4+rsum1_index)*8*NT_NUM_KEYS); vld1.u64 {t0}, [nt_buffer:128];\ vadd.u64 t6,t6,t0;\ \ veor t7,t7,t1;\ vadd.u64 t6,t7,t6;\ add nt_buffer, nt_buffer_base, #((4+r_index)*8*NT_NUM_KEYS); vst1.u64 {t6}, [nt_buffer:128]; #define SHA512_RW_1SUM0(r_index,rsum0_index,r1_index,rsum1_index,r0_index,t0,t1,t2,t3,t4,t5,t6,t7,NT_NUM_KEYS) \ add nt_buffer, nt_buffer_base, #((4+r1_index)*8*NT_NUM_KEYS); vld1.u64 {t6}, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #((4+r0_index)*8*NT_NUM_KEYS); vld1.u64 {t7}, [nt_buffer:128];\ \ vshr.u64 t0, t6, #(64-45);\ vshr.u64 t1, t6, #(64-3);\ vshr.u64 t2, t6, #6;\ \ vshl.u64 t3, t6, #45;\ vshl.u64 t6, t6, #3;\ vshr.u64 t4, t7, #(64-63);\ vshr.u64 t5, t7, #(64-56);\ \ vorr t0, t3, t0;\ vorr t6, t6, t1;\ vshr.u64 t1, t7, #7;\ vshl.u64 t3, t7, #63;\ \ veor t6,t0,t6;\ vshl.u64 t7, t7, #56;\ vorr t4, t3, t4;\ vorr t7, t7, t5;\ \ veor t6,t6,t2;\ veor t7,t4,t7;\ add nt_buffer, nt_buffer_base, #((4+rsum1_index)*8*NT_NUM_KEYS); vld1.u64 {t0}, [nt_buffer:128];\ vadd.u64 t6,t6,t0;\ \ veor t7,t7,t1;\ vadd.u64 t6,t7,t6;\ add nt_buffer, nt_buffer_base, #((4+rsum0_index)*8*NT_NUM_KEYS); vld1.u64 {t0}, [nt_buffer:128];\ vadd.u64 t6,t6,t0;\ add nt_buffer, nt_buffer_base, #((4+r_index)*8*NT_NUM_KEYS); vst1.u64 {t6}, [nt_buffer:128]; #define SHA512_CONVERT_ENDIAN(NT_NUM_KEYS,t0,t1,t2,t3,t4,t5,t6,t7,t8,t9) \ mov i, #0;\ add r6, nt_buffer_base, #(4*8*NT_NUM_KEYS);\ sha512_while_endian:\ /* Load*/\ add nt_buffer, nt_buffer_base, #(0*4*NT_NUM_KEYS); vld1.u32 {t1}, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #(1*4*NT_NUM_KEYS); vld1.u32 {t0}, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #(2*4*NT_NUM_KEYS); vld1.u32 {t3}, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #(3*4*NT_NUM_KEYS); vld1.u32 {t2}, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #(4*4*NT_NUM_KEYS); vld1.u32 {t5}, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #(5*4*NT_NUM_KEYS); vld1.u32 {t4}, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #(6*4*NT_NUM_KEYS); vld1.u32 {t7}, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #(7*4*NT_NUM_KEYS); vld1.u32 {t8}, [nt_buffer:128];\ \ /* Shuffle bytes*/\ vrev32.u8 t0, t0;\ vrev32.u8 t1, t1;\ vrev32.u8 t2, t2;\ vrev32.u8 t3, t3;\ vrev32.u8 t4, t4;\ vrev32.u8 t5, t5;\ vrev32.u8 t7, t7;\ \ /* Interleave dword*/\ vmov.u64 t6, #0;\ vmov.u64 t9, #0;\ vzip.u32 t0, t1;\ vzip.u32 t2, t3;\ vzip.u32 t4, t5;\ vzip.u32 t6, t7;\ vzip.u32 t8, t9;\ \ /* Write to W*/\ add nt_buffer, r6, #(0 *8*NT_NUM_KEYS); vst1.u64 {t0,t1}, [nt_buffer:128];\ add nt_buffer, r6, #(1 *8*NT_NUM_KEYS); vst1.u64 {t2,t3}, [nt_buffer:128];\ add nt_buffer, r6, #(2 *8*NT_NUM_KEYS); vst1.u64 {t4,t5}, [nt_buffer:128];\ add nt_buffer, r6, #(3 *8*NT_NUM_KEYS); vst1.u64 {t6,t7}, [nt_buffer:128];\ add nt_buffer, r6, #(15*8*NT_NUM_KEYS); vst1.u64 {t8,t9}, [nt_buffer:128];\ \ add i, #1;\ add nt_buffer_base, #16;\ add r6, #(2*16);\ cmp i, #(NT_NUM_KEYS/4);\ blo sha512_while_endian; #define CRYPT_SHA512_AVX_KERNEL_ASM_BODY(NT_NUM_KEYS,reg_a,reg_b,reg_c,reg_d,reg_e,reg_f,reg_g,reg_h,t0,t1,t2,t3,t4,t5,t6,step_const) \ mov i, #0;\ sha512_while1:\ add nt_buffer, nt_buffer_base, #((4+0 )*8*NT_NUM_KEYS); vld1.u32 {reg_h}, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #((4+1 )*8*NT_NUM_KEYS); vld1.u32 {reg_g}, [nt_buffer:128];\ \ /*Step 1: H = 0xfc08884d + W[0 * NT_NUM_KEYS]; D=0x9cbf5a55+H;*/\ add nt_buffer, sha512_const_array, #(0 *16); vld1.u32 {step_const}, [nt_buffer:128];\ add nt_buffer, sha512_const_array, #(75*16); vld1.u32 {t6}, [nt_buffer:128];\ add nt_buffer, sha512_const_array, #(72*16); vld1.u32 {reg_a}, [nt_buffer:128];\ add nt_buffer, sha512_const_array, #(77*16); vld1.u32 {reg_b}, [nt_buffer:128];\ vadd.u64 reg_h, reg_h, step_const;\ add nt_buffer, sha512_const_array, #(76*16); vld1.u32 {reg_c}, [nt_buffer:128];\ add nt_buffer, sha512_const_array, #(73*16); vld1.u32 {reg_e}, [nt_buffer:128];\ add nt_buffer, sha512_const_array, #(74*16); vld1.u32 {reg_f}, [nt_buffer:128];\ vadd.u64 reg_d, reg_h, t6;\ \ /*Step 2*/\ SHA512_ROUND1(reg_g,reg_d,reg_f,reg_e,1 ,reg_c,reg_h,reg_a,reg_b,t0,t1,t2,t3,t4,t5,t6,step_const);\ add nt_buffer, nt_buffer_base, #((4+2)*8*NT_NUM_KEYS);\ vld1.u32 {reg_f},[nt_buffer:128];\ SHA512_ROUND1(reg_f,reg_c,reg_e,reg_d,2 ,reg_b,reg_g,reg_h,reg_a,t0,t1,t2,t3,t4,t5,t6,step_const);\ add nt_buffer, nt_buffer_base, #((4+3)*8*NT_NUM_KEYS);\ vld1.u32 {reg_e},[nt_buffer:128];\ SHA512_ROUND1(reg_e,reg_b,reg_d,reg_c,3 ,reg_a,reg_f,reg_g,reg_h,t0,t1,t2,t3,t4,t5,t6,step_const);\ SHA512_ROUND1(reg_d,reg_a,reg_c,reg_b,4 ,reg_h,reg_e,reg_f,reg_g,t0,t1,t2,t3,t4,t5,t6,step_const);\ SHA512_ROUND1(reg_c,reg_h,reg_b,reg_a,5 ,reg_g,reg_d,reg_e,reg_f,t0,t1,t2,t3,t4,t5,t6,step_const);\ SHA512_ROUND1(reg_b,reg_g,reg_a,reg_h,6 ,reg_f,reg_c,reg_d,reg_e,t0,t1,t2,t3,t4,t5,t6,step_const);\ SHA512_ROUND1(reg_a,reg_f,reg_h,reg_g,7 ,reg_e,reg_b,reg_c,reg_d,t0,t1,t2,t3,t4,t5,t6,step_const);\ SHA512_ROUND1(reg_h,reg_e,reg_g,reg_f,8 ,reg_d,reg_a,reg_b,reg_c,t0,t1,t2,t3,t4,t5,t6,step_const);\ SHA512_ROUND1(reg_g,reg_d,reg_f,reg_e,9 ,reg_c,reg_h,reg_a,reg_b,t0,t1,t2,t3,t4,t5,t6,step_const);\ SHA512_ROUND1(reg_f,reg_c,reg_e,reg_d,10,reg_b,reg_g,reg_h,reg_a,t0,t1,t2,t3,t4,t5,t6,step_const);\ SHA512_ROUND1(reg_e,reg_b,reg_d,reg_c,11,reg_a,reg_f,reg_g,reg_h,t0,t1,t2,t3,t4,t5,t6,step_const);\ SHA512_ROUND1(reg_d,reg_a,reg_c,reg_b,12,reg_h,reg_e,reg_f,reg_g,t0,t1,t2,t3,t4,t5,t6,step_const);\ SHA512_ROUND1(reg_c,reg_h,reg_b,reg_a,13,reg_g,reg_d,reg_e,reg_f,t0,t1,t2,t3,t4,t5,t6,step_const);\ SHA512_ROUND1(reg_b,reg_g,reg_a,reg_h,14,reg_f,reg_c,reg_d,reg_e,t0,t1,t2,t3,t4,t5,t6,step_const);\ add nt_buffer, nt_buffer_base, #((4+15)*8*NT_NUM_KEYS);\ vld1.u32 {t0},[nt_buffer:128];\ vadd.u64 reg_a,reg_a,t0;\ SHA512_ROUND1(reg_a,reg_f,reg_h,reg_g,15,reg_e,reg_b,reg_c,reg_d,t0,t1,t2,t3,t4,t5,t6,step_const);\ \ /* Recalculate W*/\ /*W[ 0 * NT_NUM_KEYS] += R0(W[1 * NT_NUM_KEYS]);*/\ add nt_buffer, nt_buffer_base, #((4+1)*8*NT_NUM_KEYS);vld1.u64 {t5},[nt_buffer:128];\ vshr.u64 t0, t5, #(64-63);\ vshr.u64 t1, t5, #(64-56);\ vshr.u64 t2, t5, #7;\ vshl.u64 t3, t5, #63;\ vshl.u64 t4, t5, #56;\ vorr t0, t3, t0;\ vorr t1, t4, t1;\ veor t0, t0, t1;\ veor t0, t0, t2;\ add nt_buffer, nt_buffer_base, #((4+0)*8*NT_NUM_KEYS);vld1.u64 {t5},[nt_buffer:128];\ vadd.u64 t0, t0, t5;\ vst1.u64 {t0}, [nt_buffer:128];\ /*Round 2*/\ vadd.u64 reg_h, reg_h, t0;\ SHA512_ROUND1(reg_h,reg_e,reg_g,reg_f,16,reg_d,reg_a,reg_b,reg_c,t0,t1,t2,t3,t4,t5,t6,step_const);\ SHA512_ROUNDW_316(reg_g,reg_d,reg_f,reg_e,17,reg_c,reg_h,reg_a,reg_b,t0,t1,t2,t3,t4,t5,t6,step_const,1 , 1, 15, 16, 2,NT_NUM_KEYS);\ SHA512_ROUNDW_316(reg_f,reg_c,reg_e,reg_d,18,reg_b,reg_g,reg_h,reg_a,t0,t1,t2,t3,t4,t5,t6,step_const,2 , 2, 0 , 16, 3,NT_NUM_KEYS);\ SHA512_ROUNDW3416(reg_e,reg_b,reg_d,reg_c,19,reg_a,reg_f,reg_g,reg_h,t0,t1,t2,t3,t4,t5,t6,step_const,3 , 3, 1 , 16, 16,NT_NUM_KEYS);\ SHA512_ROUND13416(reg_d,reg_a,reg_c,reg_b,20,reg_h,reg_e,reg_f,reg_g,t0,t1,t2,t3,t4,t5,t6,step_const,4 , 16, 2 , 16, 16,NT_NUM_KEYS);\ SHA512_ROUND13416(reg_c,reg_h,reg_b,reg_a,21,reg_g,reg_d,reg_e,reg_f,t0,t1,t2,t3,t4,t5,t6,step_const,5 , 16, 3 , 16, 16,NT_NUM_KEYS);\ SHA512_ROUNDW1416(reg_b,reg_g,reg_a,reg_h,22,reg_f,reg_c,reg_d,reg_e,t0,t1,t2,t3,t4,t5,t6,step_const,6 , 16, 4 , 15, 16,NT_NUM_KEYS);\ SHA512_ROUNDW1416(reg_a,reg_f,reg_h,reg_g,23,reg_e,reg_b,reg_c,reg_d,t0,t1,t2,t3,t4,t5,t6,step_const,7 , 16, 5 , 0, 16,NT_NUM_KEYS);\ SHA512_ROUNDW1416(reg_h,reg_e,reg_g,reg_f,24,reg_d,reg_a,reg_b,reg_c,t0,t1,t2,t3,t4,t5,t6,step_const,8 , 16, 6 , 1, 16,NT_NUM_KEYS);\ SHA512_ROUNDW1416(reg_g,reg_d,reg_f,reg_e,25,reg_c,reg_h,reg_a,reg_b,t0,t1,t2,t3,t4,t5,t6,step_const,9 , 16, 7 , 2, 16,NT_NUM_KEYS);\ SHA512_ROUNDW1416(reg_f,reg_c,reg_e,reg_d,26,reg_b,reg_g,reg_h,reg_a,t0,t1,t2,t3,t4,t5,t6,step_const,10, 16, 8 , 3, 16,NT_NUM_KEYS);\ SHA512_ROUNDW1416(reg_e,reg_b,reg_d,reg_c,27,reg_a,reg_f,reg_g,reg_h,t0,t1,t2,t3,t4,t5,t6,step_const,11, 16, 9 , 4, 16,NT_NUM_KEYS);\ SHA512_ROUNDW1416(reg_d,reg_a,reg_c,reg_b,28,reg_h,reg_e,reg_f,reg_g,t0,t1,t2,t3,t4,t5,t6,step_const,12, 16, 10, 5, 16,NT_NUM_KEYS);\ SHA512_ROUNDW1416(reg_c,reg_h,reg_b,reg_a,29,reg_g,reg_d,reg_e,reg_f,t0,t1,t2,t3,t4,t5,t6,step_const,13, 16, 11, 6, 16,NT_NUM_KEYS);\ SHA512_ROUNDW116(reg_b,reg_g,reg_a,reg_h,30,reg_f,reg_c,reg_d,reg_e,t0,t1,t2,t3,t4,t5,t6,step_const,14, 16, 12, 7, 15,NT_NUM_KEYS);\ SHA512_ROUNDW(reg_a,reg_f,reg_h,reg_g,31,reg_e,reg_b,reg_c,reg_d,t0,t1,t2,t3,t4,t5,t6,step_const,15, 15, 13, 8, 0 ,NT_NUM_KEYS);\ \ /*Round 3*/\ SHA512_ROUNDW(reg_h,reg_e,reg_g,reg_f,32,reg_d,reg_a,reg_b,reg_c,t0,t1,t2,t3,t4,t5,t6,step_const,0 ,0 ,14,9 ,1 ,NT_NUM_KEYS);\ SHA512_ROUNDW(reg_g,reg_d,reg_f,reg_e,33,reg_c,reg_h,reg_a,reg_b,t0,t1,t2,t3,t4,t5,t6,step_const,1 ,1 ,15,10,2 ,NT_NUM_KEYS);\ SHA512_ROUNDW(reg_f,reg_c,reg_e,reg_d,34,reg_b,reg_g,reg_h,reg_a,t0,t1,t2,t3,t4,t5,t6,step_const,2 ,2 ,0 ,11,3 ,NT_NUM_KEYS);\ SHA512_ROUNDW(reg_e,reg_b,reg_d,reg_c,35,reg_a,reg_f,reg_g,reg_h,t0,t1,t2,t3,t4,t5,t6,step_const,3 ,3 ,1 ,12,4 ,NT_NUM_KEYS);\ SHA512_ROUNDW(reg_d,reg_a,reg_c,reg_b,36,reg_h,reg_e,reg_f,reg_g,t0,t1,t2,t3,t4,t5,t6,step_const,4 ,4 ,2 ,13,5 ,NT_NUM_KEYS);\ SHA512_ROUNDW(reg_c,reg_h,reg_b,reg_a,37,reg_g,reg_d,reg_e,reg_f,t0,t1,t2,t3,t4,t5,t6,step_const,5 ,5 ,3 ,14,6 ,NT_NUM_KEYS);\ SHA512_ROUNDW(reg_b,reg_g,reg_a,reg_h,38,reg_f,reg_c,reg_d,reg_e,t0,t1,t2,t3,t4,t5,t6,step_const,6 ,6 ,4 ,15,7 ,NT_NUM_KEYS);\ SHA512_ROUNDW(reg_a,reg_f,reg_h,reg_g,39,reg_e,reg_b,reg_c,reg_d,t0,t1,t2,t3,t4,t5,t6,step_const,7 ,7 ,5 ,0 ,8 ,NT_NUM_KEYS);\ SHA512_ROUNDW(reg_h,reg_e,reg_g,reg_f,40,reg_d,reg_a,reg_b,reg_c,t0,t1,t2,t3,t4,t5,t6,step_const,8 ,8 ,6 ,1 ,9 ,NT_NUM_KEYS);\ SHA512_ROUNDW(reg_g,reg_d,reg_f,reg_e,41,reg_c,reg_h,reg_a,reg_b,t0,t1,t2,t3,t4,t5,t6,step_const,9 ,9 ,7 ,2 ,10,NT_NUM_KEYS);\ SHA512_ROUNDW(reg_f,reg_c,reg_e,reg_d,42,reg_b,reg_g,reg_h,reg_a,t0,t1,t2,t3,t4,t5,t6,step_const,10,10,8 ,3 ,11,NT_NUM_KEYS);\ SHA512_ROUNDW(reg_e,reg_b,reg_d,reg_c,43,reg_a,reg_f,reg_g,reg_h,t0,t1,t2,t3,t4,t5,t6,step_const,11,11,9 ,4 ,12,NT_NUM_KEYS);\ SHA512_ROUNDW(reg_d,reg_a,reg_c,reg_b,44,reg_h,reg_e,reg_f,reg_g,t0,t1,t2,t3,t4,t5,t6,step_const,12,12,10,5 ,13,NT_NUM_KEYS);\ SHA512_ROUNDW(reg_c,reg_h,reg_b,reg_a,45,reg_g,reg_d,reg_e,reg_f,t0,t1,t2,t3,t4,t5,t6,step_const,13,13,11,6 ,14,NT_NUM_KEYS);\ SHA512_ROUNDW(reg_b,reg_g,reg_a,reg_h,46,reg_f,reg_c,reg_d,reg_e,t0,t1,t2,t3,t4,t5,t6,step_const,14,14,12,7 ,15,NT_NUM_KEYS);\ SHA512_ROUNDW(reg_a,reg_f,reg_h,reg_g,47,reg_e,reg_b,reg_c,reg_d,t0,t1,t2,t3,t4,t5,t6,step_const,15,15,13,8 ,0 ,NT_NUM_KEYS);\ \ /*Round 4*/\ SHA512_ROUNDW(reg_h,reg_e,reg_g,reg_f,48,reg_d,reg_a,reg_b,reg_c,t0,t1,t2,t3,t4,t5,t6,step_const,0 ,0 ,14,9 ,1 ,NT_NUM_KEYS);\ SHA512_ROUNDW(reg_g,reg_d,reg_f,reg_e,49,reg_c,reg_h,reg_a,reg_b,t0,t1,t2,t3,t4,t5,t6,step_const,1 ,1 ,15,10,2 ,NT_NUM_KEYS);\ SHA512_ROUNDW(reg_f,reg_c,reg_e,reg_d,50,reg_b,reg_g,reg_h,reg_a,t0,t1,t2,t3,t4,t5,t6,step_const,2 ,2 ,0 ,11,3 ,NT_NUM_KEYS);\ SHA512_ROUNDW(reg_e,reg_b,reg_d,reg_c,51,reg_a,reg_f,reg_g,reg_h,t0,t1,t2,t3,t4,t5,t6,step_const,3 ,3 ,1 ,12,4 ,NT_NUM_KEYS);\ SHA512_ROUNDW(reg_d,reg_a,reg_c,reg_b,52,reg_h,reg_e,reg_f,reg_g,t0,t1,t2,t3,t4,t5,t6,step_const,4 ,4 ,2 ,13,5 ,NT_NUM_KEYS);\ SHA512_ROUNDW(reg_c,reg_h,reg_b,reg_a,53,reg_g,reg_d,reg_e,reg_f,t0,t1,t2,t3,t4,t5,t6,step_const,5 ,5 ,3 ,14,6 ,NT_NUM_KEYS);\ SHA512_ROUNDW(reg_b,reg_g,reg_a,reg_h,54,reg_f,reg_c,reg_d,reg_e,t0,t1,t2,t3,t4,t5,t6,step_const,6 ,6 ,4 ,15,7 ,NT_NUM_KEYS);\ SHA512_ROUNDW(reg_a,reg_f,reg_h,reg_g,55,reg_e,reg_b,reg_c,reg_d,t0,t1,t2,t3,t4,t5,t6,step_const,7 ,7 ,5 ,0 ,8 ,NT_NUM_KEYS);\ SHA512_ROUNDW(reg_h,reg_e,reg_g,reg_f,56,reg_d,reg_a,reg_b,reg_c,t0,t1,t2,t3,t4,t5,t6,step_const,8 ,8 ,6 ,1 ,9 ,NT_NUM_KEYS);\ SHA512_ROUNDW(reg_g,reg_d,reg_f,reg_e,57,reg_c,reg_h,reg_a,reg_b,t0,t1,t2,t3,t4,t5,t6,step_const,9 ,9 ,7 ,2 ,10,NT_NUM_KEYS);\ SHA512_ROUNDW(reg_f,reg_c,reg_e,reg_d,58,reg_b,reg_g,reg_h,reg_a,t0,t1,t2,t3,t4,t5,t6,step_const,10,10,8 ,3 ,11,NT_NUM_KEYS);\ SHA512_ROUNDW(reg_e,reg_b,reg_d,reg_c,59,reg_a,reg_f,reg_g,reg_h,t0,t1,t2,t3,t4,t5,t6,step_const,11,11,9 ,4 ,12,NT_NUM_KEYS);\ SHA512_ROUNDW(reg_d,reg_a,reg_c,reg_b,60,reg_h,reg_e,reg_f,reg_g,t0,t1,t2,t3,t4,t5,t6,step_const,12,12,10,5 ,13,NT_NUM_KEYS);\ SHA512_ROUNDW(reg_c,reg_h,reg_b,reg_a,61,reg_g,reg_d,reg_e,reg_f,t0,t1,t2,t3,t4,t5,t6,step_const,13,13,11,6 ,14,NT_NUM_KEYS);\ SHA512_ROUNDW(reg_b,reg_g,reg_a,reg_h,62,reg_f,reg_c,reg_d,reg_e,t0,t1,t2,t3,t4,t5,t6,step_const,14,14,12,7 ,15,NT_NUM_KEYS);\ SHA512_ROUNDW(reg_a,reg_f,reg_h,reg_g,63,reg_e,reg_b,reg_c,reg_d,t0,t1,t2,t3,t4,t5,t6,step_const,15,15,13,8 ,0 ,NT_NUM_KEYS);\ \ /*Round 5*/\ SHA512_ROUNDW(reg_h,reg_e,reg_g,reg_f,64,reg_d,reg_a,reg_b,reg_c,t0,t1,t2,t3,t4,t5,t6,step_const,0 ,0 ,14,9 ,1,NT_NUM_KEYS);\ SHA512_ROUNDW(reg_g,reg_d,reg_f,reg_e,65,reg_c,reg_h,reg_a,reg_b,t0,t1,t2,t3,t4,t5,t6,step_const,1 ,1 ,15,10,2,NT_NUM_KEYS);\ SHA512_ROUNDW(reg_f,reg_c,reg_e,reg_d,66,reg_b,reg_g,reg_h,reg_a,t0,t1,t2,t3,t4,t5,t6,step_const,2 ,2 ,0 ,11,3,NT_NUM_KEYS);\ SHA512_ROUNDW(reg_e,reg_b,reg_d,reg_c,67,reg_a,reg_f,reg_g,reg_h,t0,t1,t2,t3,t4,t5,t6,step_const,3 ,3 ,1 ,12,4,NT_NUM_KEYS);\ SHA512_ROUNDW(reg_d,reg_a,reg_c,reg_b,68,reg_h,reg_e,reg_f,reg_g,t0,t1,t2,t3,t4,t5,t6,step_const,4 ,4 ,2 ,13,5,NT_NUM_KEYS);\ SHA512_ROUNDW(reg_c,reg_h,reg_b,reg_a,69,reg_g,reg_d,reg_e,reg_f,t0,t1,t2,t3,t4,t5,t6,step_const,5 ,5 ,3 ,14,6,NT_NUM_KEYS);\ SHA512_ROUNDW(reg_b,reg_g,reg_a,reg_h,70,reg_f,reg_c,reg_d,reg_e,t0,t1,t2,t3,t4,t5,t6,step_const,6 ,6 ,4 ,15,7,NT_NUM_KEYS);\ SHA512_ROUNDW(reg_a,reg_f,reg_h,reg_g,71,reg_e,reg_b,reg_c,reg_d,t0,t1,t2,t3,t4,t5,t6,step_const,7 ,7 ,5 ,0 ,8,NT_NUM_KEYS);\ SHA512_RW_1SUM0(8,8 ,6,1,9 ,t0,t1,t2,t3,t4,t5,t6,step_const,NT_NUM_KEYS);\ SHA512_RW_1SUM0(9,9 ,7,2,10,t0,t1,t2,t3,t4,t5,t6,step_const,NT_NUM_KEYS);\ SHA512_RW_1SUM0(2,11,9,4,12,t0,t1,t2,t3,t4,t5,t6,step_const,NT_NUM_KEYS);\ SHA512_RW_1SUM0(1,13,2,6,14,t0,t1,t2,t3,t4,t5,t6,step_const,NT_NUM_KEYS);\ SHA512_RW_1SUM0(0,15,1,8,0 ,t0,t1,t2,t3,t4,t5,t6,step_const,NT_NUM_KEYS);\ add nt_buffer, nt_buffer_base, #((4+0)*8*NT_NUM_KEYS);\ vld1.u64 {t0}, [nt_buffer:128];\ vadd.u64 reg_a,reg_a,t0;\ \ add nt_buffer, nt_buffer_base, #((4+4 )*8*NT_NUM_KEYS); vst1.u32 {reg_a}, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #((4+6 )*8*NT_NUM_KEYS); vst1.u32 {reg_b}, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #((4+16)*8*NT_NUM_KEYS); vst1.u32 {reg_c}, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #((4+17)*8*NT_NUM_KEYS); vst1.u32 {reg_d}, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #((4+18)*8*NT_NUM_KEYS); vst1.u32 {reg_e}, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #((4+19)*8*NT_NUM_KEYS); vst1.u32 {reg_f}, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #((4+20)*8*NT_NUM_KEYS); vst1.u32 {reg_g}, [nt_buffer:128];\ add nt_buffer, nt_buffer_base, #((4+21)*8*NT_NUM_KEYS); vst1.u32 {reg_h}, [nt_buffer:128];\ \ add i, #1;\ add nt_buffer_base, #16;\ cmp i, #(NT_NUM_KEYS/2);\ blo sha512_while1; .text .align 2 .global crypt_sha512_neon_kernel_asm .type crypt_sha512_neon_kernel_asm, %function crypt_sha512_neon_kernel_asm: vpush {q4,q5,q6,q7} push {r4,r5,r6} SHA512_CONVERT_ENDIAN(128,q0,q1,q2,q3,q4,q5,q6,q7,q8,q9) sub nt_buffer_base, nt_buffer_base, #(4*NT_NUM_KEYS) CRYPT_SHA512_AVX_KERNEL_ASM_BODY(128,q0,q1,q2,q3,q4,q5,q6,q7,q8,q9,q10,q11,q12,q13,q14,q15) pop {r4,r5,r6} vpop {q4,q5,q6,q7} bx lr #undef NT_NUM_KEYS ///////////////////////////////////////////////////////////////////////////////////////////////// // LM format ///////////////////////////////////////////////////////////////////////////////////////////////// #define first_k r4 #define first_c r5 #define p_out r6 #undef i #define i r7 #define first_c_ptr r0 #define first_k_ptr r1 // Repeats #define MAX_REPEAT 8// If change this change also SET_0 and SET_FFFFFFFF #define SET_0 \ vst1.u32 {q0,q1}, [first_c_ptr:128]!;\ vst1.u32 {q0,q1}, [first_c_ptr:128]!;\ vst1.u32 {q0,q1}, [first_c_ptr:128]!;\ vst1.u32 {q0,q1}, [first_c_ptr:128]!; #define SET_FFFFFFFF \ vst1.u32 {q2,q3}, [first_c_ptr:128]!;\ vst1.u32 {q2,q3}, [first_c_ptr:128]!;\ vst1.u32 {q2,q3}, [first_c_ptr:128]!;\ vst1.u32 {q2,q3}, [first_c_ptr:128]!; // Sboxs .text .align 2 .global s1 .type s1, %function s1: veor q0, q14, q9 vorr q2, q12, q15 vand q0, q10, q0 veor q3, q10, q12 veor q1, q13, q0 vand q4, q2, q3 veor q6, q1, q9 veor q8, q13, q4 veor q7, q14, q15 vand q6, q8, q6 veor q12, q12, q9 veor q5, q12, q7 vorr q4, q15, q4 vand q5, q1, q5 vorr q0, q12, q0 veor q4, q5, q4 vorr q15, q10, q15 veor q5, q6, q9 vorr q12, q4, q15 vand q5, q4, q5 veor q8, q8, q9 veor q15, q15, q9 vand q8, q14, q8 vand q15, q13, q15 veor q3, q3, q9 veor q13, q12, q8 veor q15, q8, q15 vand q3, q7, q3 vand q4, q2, q4 vorr q15, q15, q3 veor q3, q1, q12 veor q12, q2, q12 vand q0, q3, q0 vorr q12, q15, q12 veor q3, q0, q9 veor q0, q7, q0 veor q4, q3, q4 vorr q0, q8, q0 vorr q14, q14, q1 veor q3, q11, q9 veor q0, q2, q0 vand q3, q13, q3 veor q10, q10, q0 veor q3, q3, q4 vorr q6, q6, q11 veor q4, q4, q10 add first_c_ptr, p_out, #(2*16*MAX_REPEAT) vld1.u32 {q0}, [first_c_ptr:128] veor q0, q0, q3 vst1.u32 {q0}, [first_c_ptr:128] veor q6, q6, q4 veor q2, q10, q12 vorr q4, q7, q4 vand q10, q5, q10 veor q4, q2, q4 vld1.u32 {q0}, [p_out:128] veor q0, q0, q6 vst1.u32 {q0}, [p_out:128] veor q2, q2, q9 vorr q6, q5, q11 vand q2, q14, q2 veor q4, q6, q4 veor q10, q2, q10 add first_c_ptr, p_out, #(1*16*MAX_REPEAT) vld1.u32 {q0}, [first_c_ptr:128] veor q0, q0, q4 vst1.u32 {q0}, [first_c_ptr:128] vorr q11, q10, q11 veor q11, q11, q15 add first_c_ptr, p_out, #(3*16*MAX_REPEAT) vld1.u32 {q0}, [first_c_ptr:128] veor q0, q0, q11 vst1.u32 {q0}, [first_c_ptr:128] add p_out, p_out, #(4*16*MAX_REPEAT) bx lr .text .align 2 .global s2 .type s2, %function s2: veor q0, q11, q14 veor q7, q10, q9 vorr q1, q7, q15 vand q1, q14, q1 vorr q2, q11, q1 veor q3, q15, q9 vand q3, q0, q3 vand q4, q10, q0 veor q14, q14, q4 veor q5, q3, q9 vand q5, q14, q5 vorr q5, q5, q13 vand q4, q12, q15 veor q1, q1, q3 vand q1, q2, q1 veor q3, q1, q9 vorr q3, q3, q4 vand q6, q12, q1 veor q10, q6, q7 veor q15, q15, q0 veor q7, q15, q9 vorr q7, q7, q4 vand q11, q11, q7//q7 veor q7, q7, q9 veor q7, q10, q7 vand q3, q13, q3 veor q3, q3, q7 veor q14, q14, q11 veor q6, q6, q11//q11 add first_c_ptr, p_out, #(1*16*MAX_REPEAT) vld1.u32 {q11}, [first_c_ptr:128] veor q11, q11, q3 vst1.u32 {q11}, [first_c_ptr:128] veor q14, q14, q9 vand q3, q10, q14 veor q12, q12, q15 veor q3, q3, q12 veor q10, q13, q9 vand q10, q2, q10 veor q10, q10, q3 vld1.u32 {q11}, [p_out:128] veor q11, q11, q10 vst1.u32 {q11}, [p_out:128] vorr q12, q12, q6 veor q2, q2, q7 vorr q4, q4, q2 veor q6, q12, q4 veor q1, q1, q7//q8 veor q1, q3, q1 vand q1, q4, q1//q4 vand q12, q0, q12//q0 veor q12, q1, q12//q1 vorr q3, q12, q13 veor q6, q3, q6 add first_c_ptr, p_out, #(2*16*MAX_REPEAT) vld1.u32 {q11}, [first_c_ptr:128] veor q11, q11, q6 vst1.u32 {q11}, [first_c_ptr:128] vand q14, q12, q14//q12 vorr q15, q15, q2//q2 veor q14, q14, q15//q15 veor q5, q5, q14//q14 add first_c_ptr, p_out, #(3*16*MAX_REPEAT) vld1.u32 {q11}, [first_c_ptr:128] veor q11, q11, q5 vst1.u32 {q11}, [first_c_ptr:128] add p_out, p_out, #(4*16*MAX_REPEAT) bx lr .text .align 2 .global s3 .type s3, %function s3: veor q0, q11, q9// repeted below vand q0, q10, q0 veor q1, q12, q15 vorr q2, q0, q1 veor q3, q13, q15 veor q4, q10, q9 vand q4, q3, q4 veor q5, q2, q4 veor q6, q11, q1 veor q7, q15, q9 vand q7, q6, q7 veor q2, q2, q7 veor q7, q5, q9 vorr q7, q7, q2 vand q1, q1, q3 vand q3, q15, q5 vorr q3, q13, q3 vand q3, q10, q3 veor q3, q6, q3 veor q8, q10, q13 vorr q4, q4, q8 veor q8, q2, q8 vorr q8, q12, q8 veor q1, q1, q9 vand q1, q8, q1 veor q8, q4, q9 vand q8, q3, q8 vand q15, q13, q15 vorr q12, q11, q12 veor q11, q11, q9 vand q11, q15, q11 veor q11, q8, q11//q8 vand q2, q2, q11 vorr q15, q6, q15 veor q2, q2, q9 vand q15, q15, q2//q2 vand q7, q14, q7 veor q2, q14, q9 vand q2, q5, q2 veor q2, q2, q3 veor q10, q10, q15 add first_c_ptr, p_out, #(3*16*MAX_REPEAT) vld1.u32 {q15}, [first_c_ptr:128] veor q15, q15, q2 vst1.u32 {q15}, [first_c_ptr:128] vand q1, q1, q14 veor q1, q1, q10 add first_c_ptr, p_out, #(1*16*MAX_REPEAT) vld1.u32 {q15}, [first_c_ptr:128] veor q15, q15, q1 vst1.u32 {q15}, [first_c_ptr:128] veor q5, q5, q9 vorr q12, q12, q5 veor q12, q6, q12//q6 veor q4, q4, q12 veor q4, q7, q4//q7--------- vld1.u32 {q15}, [p_out:128] veor q15, q15, q4 vst1.u32 {q15}, [p_out:128] vand q13, q13, q5//q5 veor q13, q3, q13//q3 vorr q13, q12, q13//q12 veor q10, q0, q10//q0 veor q13, q13, q10//q10 vorr q14, q11, q14//q3 veor q14, q14, q13//q13 add first_c_ptr, p_out, #(2*16*MAX_REPEAT) vld1.u32 {q15}, [first_c_ptr:128] veor q15, q15, q14 vst1.u32 {q15}, [first_c_ptr:128] add p_out, p_out, #(4*16*MAX_REPEAT) bx lr .text .align 2 .global s4 .type s4, %function s4: veor q10, q10, q12//q10 veor q12, q12, q14//q12 veor q1, q11, q9 veor q0, q1, q13 vand q1, q12, q1 veor q2, q13, q1 vorr q13, q11, q13//q13 veor q13, q14, q13 vorr q1, q14, q1//q14 veor q13, q13, q9 vand q13, q12, q13 vorr q3, q10, q2 veor q14, q13, q9 vand q14, q3, q14 veor q11, q11, q14//q11 vand q2, q2, q11 veor q3, q12, q9//q12 vorr q3, q3, q2 veor q10, q10, q11 vand q3, q10, q3 veor q3, q13, q3//q13 veor q1, q10, q1//q10 vand q10, q1, q0 veor q10, q14, q10 veor q13, q3, q9 vand q13, q15, q13 veor q13, q13, q10 vld1.u32 {q12}, [p_out:128] veor q12, q12, q13 vst1.u32 {q12}, [p_out:128] veor q10, q10, q9 vorr q14, q11, q15 vand q11, q11, q15 veor q15, q15, q9 vand q15, q3, q15//q15 veor q15, q15, q10 add first_c_ptr, p_out, #(1*16*MAX_REPEAT) vld1.u32 {q12}, [first_c_ptr:128] veor q12, q12, q15 vst1.u32 {q12}, [first_c_ptr:128] veor q10, q3, q10//q3 vand q0, q10, q0//q6 vorr q0, q2, q0//q2 veor q0, q1, q0//q1 veor q14, q14, q0 add first_c_ptr, p_out, #(2*16*MAX_REPEAT) vld1.u32 {q12}, [first_c_ptr:128] veor q12, q12, q14 vst1.u32 {q12}, [first_c_ptr:128] veor q0, q11, q0 add first_c_ptr, p_out, #(3*16*MAX_REPEAT) vld1.u32 {q12}, [first_c_ptr:128] veor q12, q12, q0 vst1.u32 {q12}, [first_c_ptr:128] add p_out, p_out, #(4*16*MAX_REPEAT) bx lr .text .align 2 .global s5 .type s5, %function s5: vorr q0, q10, q12 veor q1, q15, q9 vand q1, q0, q1 veor q2, q13, q9 vand q2, q1, q2 veor q1, q10, q1 veor q2, q12, q2 veor q12, q12, q1//q12 vorr q4, q13, q12 vand q5, q14, q2 vorr q12, q10, q12 veor q5, q5, q12 veor q5, q13, q5 veor q15, q15, q5//q15 vorr q8, q1, q15 veor q0, q10, q0 veor q10, q10, q9 vand q10, q8, q10 vand q3, q13, q12 veor q3, q1, q3 veor q7, q2, q10 vand q8, q14, q8 veor q14, q14, q4 veor q10, q10, q14 veor q3, q3, q8 vorr q10, q3, q10 vand q12, q2, q12 veor q6, q2, q9 vorr q6, q6, q8 vand q10, q10, q6 vand q15, q15, q10 veor q0, q10, q0 veor q10, q10, q9 vand q10, q4, q10 veor q15, q14, q15 vorr q12, q15, q12 veor q12, q8, q12//q8 vand q12, q12, q11 veor q12, q12, q3//q3 add first_c_ptr, p_out, #(3*16*MAX_REPEAT) vld1.u32 {q6}, [first_c_ptr:128] veor q6, q6, q12 vst1.u32 {q6}, [first_c_ptr:128] vand q13, q13, q15//q13 veor q0, q0, q13 vorr q10, q10, q11 veor q10, q10, q0 vld1.u32 {q6}, [p_out:128] veor q6, q6, q10 vst1.u32 {q6}, [p_out:128] veor q2, q4, q2 veor q0, q0, q9 vand q0, q2, q0 veor q1, q1, q15 veor q0, q0, q1 vand q4, q4, q11//q11 veor q11, q11, q9 veor q1, q14, q9 vorr q7, q1, q7 vand q7, q11, q7 veor q5, q7, q5//q7 add first_c_ptr, p_out, #(2*16*MAX_REPEAT) vld1.u32 {q6}, [first_c_ptr:128] veor q6, q6, q5 vst1.u32 {q6}, [first_c_ptr:128] veor q0, q4, q0 add first_c_ptr, p_out, #(1*16*MAX_REPEAT) vld1.u32 {q6}, [first_c_ptr:128] veor q6, q6, q0 vst1.u32 {q6}, [first_c_ptr:128] add p_out, p_out, #(4*16*MAX_REPEAT) bx lr .text .align 2 .global s6 .type s6, %function s6: veor q0, q11, q14 vorr q1, q11, q15 vand q1, q10, q1 veor q0, q0, q1 veor q2, q15, q0 vand q3, q10, q2 veor q2, q2, q9 vand q2, q14, q2 veor q4, q11, q3 veor q3, q15, q3 veor q5, q10, q12 vorr q6, q4, q5 vorr q4, q2, q4 vorr q5, q11, q5 veor q11, q11, q6 veor q6, q0, q6 veor q11, q11, q9 vand q8, q15, q11 veor q8, q12, q8 vand q12, q12, q6 veor q15, q15, q9 vand q15, q12, q15 veor q7, q10, q8 vorr q10, q10, q6 vand q10, q4, q10 veor q4, q15, q4 veor q11, q5, q11 veor q5, q4, q5 veor q10, q8, q10 veor q15, q15, q9 vand q15, q10, q15 veor q0, q0, q10 veor q0, q0, q9 vand q0, q14, q0 veor q0, q0, q11 veor q11, q12, q11 veor q12, q12, q9 vand q12, q14, q12//q14 vorr q12, q8, q12//q8 vand q4, q4, q13 veor q4, q4, q6//q6 add first_c_ptr, p_out, #(3*16*MAX_REPEAT) vld1.u32 {q14}, [first_c_ptr:128] veor q14, q14, q4 vst1.u32 {q14}, [first_c_ptr:128] vorr q2, q2, q13 veor q2, q2, q15//q15 add first_c_ptr, p_out, #(2*16*MAX_REPEAT) vld1.u32 {q14}, [first_c_ptr:128] veor q14, q14, q2 vst1.u32 {q14}, [first_c_ptr:128] vorr q1, q1, q12 veor q1, q5, q1//q5 veor q13, q13, q9 vand q0, q0, q13//q0 veor q1, q0, q1 add first_c_ptr, p_out, #(1*16*MAX_REPEAT) vld1.u32 {q14}, [first_c_ptr:128] veor q14, q14, q1 vst1.u32 {q14}, [first_c_ptr:128] vand q3, q3, q7 veor q3, q3, q11//q11 vand q13, q12, q13//q13 veor q3, q13, q3 vld1.u32 {q14}, [p_out:128] veor q14, q14, q3 vst1.u32 {q14}, [p_out:128] add p_out, p_out, #(4*16*MAX_REPEAT) bx lr .text .align 2 .global s7 .type s7, %function s7: veor q0, q13, q14 veor q1, q12, q0 vand q2, q15, q1 vand q3, q13, q0 veor q4, q11, q3 vand q5, q2, q4 vand q6, q15, q3 veor q6, q12, q6 vorr q7, q4, q6 veor q0, q15, q0 veor q7, q7, q0 veor q5, q5, q9 vand q5, q10, q5 veor q5, q5, q7 vorr q3, q3, q7 add first_c_ptr, p_out, #(3*16*MAX_REPEAT) vld1.u32 {q7}, [first_c_ptr:128] veor q7, q7, q5 vst1.u32 {q7}, [first_c_ptr:128] veor q7, q1, q9 vand q7, q14, q7 vorr q5, q4, q7 veor q6, q2, q6 veor q5, q5, q6 veor q2, q2, q0 veor q13, q13, q9 vorr q13, q13, q2 vand q13, q4, q13 veor q6, q14, q6 veor q13, q13, q6 vand q12, q12, q13 vorr q3, q3, q12 veor q0, q0, q9 vand q0, q1, q0//q1 veor q0, q3, q0//q3 veor q2, q10, q9 vand q2, q0, q2 veor q2, q2, q5 vld1.u32 {q1}, [p_out:128] veor q1, q1, q2 vst1.u32 {q1}, [p_out:128] vorr q4, q13, q0 vand q15, q15, q4//q4 vand q11, q11, q15 veor q5, q5, q0 veor q11, q11, q5 vorr q12, q12, q11 veor q12, q15, q12 veor q14, q14, q5//q5 vorr q12, q12, q14//q14 vand q6, q12, q10 veor q13, q6, q13//q6 add first_c_ptr, p_out, #(2*16*MAX_REPEAT) vld1.u32 {q1}, [first_c_ptr:128] veor q1, q1, q13 vst1.u32 {q1}, [first_c_ptr:128] veor q12, q15, q12//q15 vorr q12, q7, q12//q7 veor q0, q0, q9 veor q12, q12, q0//q0 veor q10, q10, q9 vand q10, q12, q10//q12 veor q10, q10, q11//q11 add first_c_ptr, p_out, #(1*16*MAX_REPEAT) vld1.u32 {q1}, [first_c_ptr:128] veor q1, q1, q10 vst1.u32 {q1}, [first_c_ptr:128] add p_out, p_out, #(4*16*MAX_REPEAT) bx lr .text .align 2 .global s8 .type s8, %function s8: veor q6, q12, q9 vorr q0, q6, q11 vand q1, q14, q6 veor q1, q13, q1 vand q2, q10, q1 veor q1, q1, q9 vand q3, q11, q1 vorr q4, q10, q3 veor q5, q4, q9 vand q5, q12, q5 vand q12, q11, q6//q12 veor q12, q14, q12 vand q4, q4, q12 veor q1, q4, q1 veor q1, q1, q5 veor q6, q0, q9 veor q6, q6, q1 vand q0, q2, q0 vorr q2, q2, q4 vorr q5, q0, q15 veor q5, q5, q6 veor q6, q10, q6 vand q4, q14, q6 veor q6, q14, q6//q14 add first_c_ptr, p_out, #(1*16*MAX_REPEAT) vld1.u32 {q14}, [first_c_ptr:128] veor q14, q14, q5 vst1.u32 {q14}, [first_c_ptr:128] veor q1, q11, q1 veor q4, q4, q1 veor q3, q3, q4 veor q4, q2, q4 vorr q4, q11, q4//q11 veor q4, q4, q6//q6 vand q2, q2, q15 veor q2, q2, q4 add first_c_ptr, p_out, #(2*16*MAX_REPEAT) vld1.u32 {q14}, [first_c_ptr:128] veor q14, q14, q2 vst1.u32 {q14}, [first_c_ptr:128] veor q12, q12, q3//q12 vorr q1, q13, q1 veor q1, q12, q1 veor q10, q10, q1//q10 veor q1, q0, q1 vand q10, q10, q15 veor q10, q10, q3 add first_c_ptr, p_out, #(3*16*MAX_REPEAT) vld1.u32 {q14}, [first_c_ptr:128] veor q14, q14, q10 vst1.u32 {q14}, [first_c_ptr:128] veor q13, q13, q9 vand q13, q12, q13//q13 vand q4, q4, q13//q13 veor q4, q4, q1 vorr q4, q4, q15//q15 veor q4, q4, q3 vld1.u32 {q14}, [p_out:128] veor q14, q14, q4 vst1.u32 {q14}, [p_out:128] add p_out, p_out, #(4*16*MAX_REPEAT) bx lr #define LM_CALL_STEP(fun,c1,k1,c2,k2,c3,k3,c4,k4,c5,k5,c6,k6) \ add r0, first_c, #(c1*16*MAX_REPEAT);\ add r1, first_c, #(c2*16*MAX_REPEAT);\ add r2, first_c, #(c3*16*MAX_REPEAT);\ add r3, first_c, #(c4*16*MAX_REPEAT);\ vld1.u32 {q10}, [r0:128];\ vld1.u32 {q11}, [r1:128];\ vld1.u32 {q12}, [r2:128];\ vld1.u32 {q13}, [r3:128];\ \ add r0, first_c, #(c5*16*MAX_REPEAT);\ add r1, first_c, #(c6*16*MAX_REPEAT);\ add r2, first_k, #(k1*16*MAX_REPEAT);\ add r3, first_k, #(k2*16*MAX_REPEAT);\ vld1.u32 {q14}, [r0:128];\ vld1.u32 {q15}, [r1:128];\ vld1.u32 {q0}, [r2:128];\ vld1.u32 {q1}, [r3:128];\ \ add r0, first_k, #(k3*16*MAX_REPEAT);\ add r1, first_k, #(k4*16*MAX_REPEAT);\ add r2, first_k, #(k5*16*MAX_REPEAT);\ add r3, first_k, #(k6*16*MAX_REPEAT);\ vld1.u32 {q2}, [r0:128];\ vld1.u32 {q3}, [r1:128];\ vld1.u32 {q4}, [r2:128];\ vld1.u32 {q5}, [r3:128];\ \ veor.u32 q10, q10, q0;\ veor.u32 q11, q11, q1;\ veor.u32 q12, q12, q2;\ veor.u32 q13, q13, q3;\ veor.u32 q14, q14, q4;\ veor.u32 q15, q15, q5;\ \ bl fun .text .align 2 .global lm_eval_neon_kernel .type lm_eval_neon_kernel, %function lm_eval_neon_kernel: vpush {q4,q5,q6,q7} push {r4,r5,r6,r7} push {lr} mov first_k, r0 mov first_c, r1 // Initialize cs vmov.u32 q9, #0xffffffff vmov.u32 q0, #0 vmov.u32 q1, #0 vmov.u32 q2, #0xffffffff //Put all 1 vmov.u32 q3, #0xffffffff mov first_c_ptr, first_c SET_0 SET_FFFFFFFF SET_FFFFFFFF SET_FFFFFFFF SET_0 SET_0 SET_0 SET_FFFFFFFF SET_0 SET_0 SET_FFFFFFFF SET_FFFFFFFF SET_0 SET_0 SET_0 SET_0 SET_FFFFFFFF SET_FFFFFFFF SET_FFFFFFFF SET_0 SET_FFFFFFFF SET_FFFFFFFF SET_0 SET_0 SET_FFFFFFFF SET_0 SET_0 SET_FFFFFFFF SET_0 SET_FFFFFFFF SET_0 SET_0 SET_FFFFFFFF SET_0 SET_0 SET_FFFFFFFF SET_FFFFFFFF SET_0 SET_0 SET_0 SET_FFFFFFFF SET_0 SET_FFFFFFFF SET_0 SET_0 SET_0 SET_FFFFFFFF SET_0 SET_0 SET_0 SET_0 SET_0 SET_0 SET_0 SET_FFFFFFFF SET_0 SET_FFFFFFFF SET_0 SET_0 SET_0 SET_0 SET_FFFFFFFF SET_0 SET_0 mov i, #0 // i=0 init_while: mov p_out, first_c//1 LM_CALL_STEP(s1, 56, 47, 47, 11, 38, 26, 51, 3 , 52, 13, 60, 41) LM_CALL_STEP(s2, 52, 27, 60, 6 , 43, 54, 59, 48, 48, 39, 32, 19) LM_CALL_STEP(s3, 48, 53, 32, 25, 46, 33, 54, 34, 57, 17, 36, 5 ) LM_CALL_STEP(s4, 57, 4 , 36, 55, 49, 24, 62, 32, 41, 40, 33, 20) LM_CALL_STEP(s5, 41, 36, 33, 31, 39, 21, 55, 8 , 45, 23, 63, 52) LM_CALL_STEP(s6, 45, 14, 63, 29, 58, 51, 34, 9 , 40, 35, 50, 30) LM_CALL_STEP(s7, 40, 2 , 50, 37, 44, 22, 61, 0 , 37, 42, 53, 38) LM_CALL_STEP(s8, 37, 16, 53, 43, 42, 44, 35, 1 , 56, 7 , 47, 28) //2 LM_CALL_STEP(s1, 24, 54, 15, 18, 6 , 33, 19, 10, 20, 20, 28, 48) LM_CALL_STEP(s2, 20, 34, 28, 13, 11, 4 , 27, 55, 16, 46, 0 , 26) LM_CALL_STEP(s3, 16, 3 , 0 , 32, 14, 40, 22, 41, 25, 24, 4 , 12) LM_CALL_STEP(s4, 25, 11, 4 , 5 , 17, 6 , 30, 39, 9 , 47, 1 , 27) LM_CALL_STEP(s5, 9 , 43, 1 , 38, 7 , 28, 23, 15, 13, 30, 31, 0 ) LM_CALL_STEP(s6, 13, 21, 31, 36, 26, 31, 2 , 16, 8 , 42, 18, 37) LM_CALL_STEP(s7, 8 , 9 , 18, 44, 12, 29, 29, 7 , 5 , 49, 21, 45) LM_CALL_STEP(s8, 5 , 23, 21, 50, 10, 51, 3 , 8 , 24, 14, 15, 35) mov p_out, first_c//3 LM_CALL_STEP(s1, 56, 11, 47, 32, 38, 47, 51, 24, 52, 34, 60, 5 ) LM_CALL_STEP(s2, 52, 48, 60, 27, 43, 18, 59, 12, 48, 3 , 32, 40) LM_CALL_STEP(s3, 48, 17, 32, 46, 46, 54, 54, 55, 57, 13, 36, 26) LM_CALL_STEP(s4, 57, 25, 36, 19, 49, 20, 62, 53, 41, 4 , 33, 41) LM_CALL_STEP(s5, 41, 2 , 33, 52, 39, 42, 55, 29, 45, 44, 63, 14) LM_CALL_STEP(s6, 45, 35, 63, 50, 58, 45, 34, 30, 40, 1 , 50, 51) LM_CALL_STEP(s7, 40, 23, 50, 31, 44, 43, 61, 21, 37, 8 , 53, 0 ) LM_CALL_STEP(s8, 37, 37, 53, 9 , 42, 38, 35, 22, 56, 28, 47, 49) //4 LM_CALL_STEP(s1, 24, 25, 15, 46, 6 , 4 , 19, 13, 20, 48, 28, 19) LM_CALL_STEP(s2, 20, 5 , 28, 41, 11, 32, 27, 26, 16, 17, 0 , 54) LM_CALL_STEP(s3, 16, 6 , 0 , 3 , 14, 11, 22, 12, 25, 27, 4 , 40) LM_CALL_STEP(s4, 25, 39, 4 , 33, 17, 34, 30, 10, 9 , 18, 1 , 55) LM_CALL_STEP(s5, 9 , 16, 1 , 7 , 7 , 1 , 23, 43, 13, 31, 31, 28) LM_CALL_STEP(s6, 13, 49, 31, 9 , 26, 0 , 2 , 44, 8 , 15, 18, 38) LM_CALL_STEP(s7, 8 , 37, 18, 45, 12, 2 , 29, 35, 5 , 22, 21, 14) LM_CALL_STEP(s8, 5 , 51, 21, 23, 10, 52, 3 , 36, 24, 42, 15, 8 ) mov p_out, first_c//5 LM_CALL_STEP(s1, 56, 39, 47, 3 , 38, 18, 51, 27, 52, 5 , 60, 33) LM_CALL_STEP(s2, 52, 19, 60, 55, 43, 46, 59, 40, 48, 6 , 32, 11) LM_CALL_STEP(s3, 48, 20, 32, 17, 46, 25, 54, 26, 57, 41, 36, 54) LM_CALL_STEP(s4, 57, 53, 36, 47, 49, 48, 62, 24, 41, 32, 33, 12) LM_CALL_STEP(s5, 41, 30, 33, 21, 39, 15, 55, 2 , 45, 45, 63, 42) LM_CALL_STEP(s6, 45, 8 , 63, 23, 58, 14, 34, 31, 40, 29, 50, 52) LM_CALL_STEP(s7, 40, 51, 50, 0 , 44, 16, 61, 49, 37, 36, 53, 28) LM_CALL_STEP(s8, 37, 38, 53, 37, 42, 7 , 35, 50, 56, 1 , 47, 22) //6 LM_CALL_STEP(s1, 24, 53, 15, 17, 6 , 32, 19, 41, 20, 19, 28, 47) LM_CALL_STEP(s2, 20, 33, 28, 12, 11, 3 , 27, 54, 16, 20, 0 , 25) LM_CALL_STEP(s3, 16, 34, 0 , 6 , 14, 39, 22, 40, 25, 55, 4 , 11) LM_CALL_STEP(s4, 25, 10, 4 , 4 , 17, 5 , 30, 13, 9 , 46, 1 , 26) LM_CALL_STEP(s5, 9 , 44, 1 , 35, 7 , 29, 23, 16, 13, 0 , 31, 1 ) LM_CALL_STEP(s6, 13, 22, 31, 37, 26, 28, 2 , 45, 8 , 43, 18, 7 ) LM_CALL_STEP(s7, 8 , 38, 18, 14, 12, 30, 29, 8 , 5 , 50, 21, 42) LM_CALL_STEP(s8, 5 , 52, 21, 51, 10, 21, 3 , 9 , 24, 15, 15, 36) mov p_out, first_c//7 LM_CALL_STEP(s1, 56, 10, 47, 6 , 38, 46, 51, 55, 52, 33, 60, 4 ) LM_CALL_STEP(s2, 52, 47, 60, 26, 43, 17, 59, 11, 48, 34, 32, 39) LM_CALL_STEP(s3, 48, 48, 32, 20, 46, 53, 54, 54, 57, 12, 36, 25) LM_CALL_STEP(s4, 57, 24, 36, 18, 49, 19, 62, 27, 41, 3 , 33, 40) LM_CALL_STEP(s5, 41, 31, 33, 49, 39, 43, 55, 30, 45, 14, 63, 15) LM_CALL_STEP(s6, 45, 36, 63, 51, 58, 42, 34, 0 , 40, 2 , 50, 21) LM_CALL_STEP(s7, 40, 52, 50, 28, 44, 44, 61, 22, 37, 9 , 53, 1 ) LM_CALL_STEP(s8, 37, 7 , 53, 38, 42, 35, 35, 23, 56, 29, 47, 50) //8 LM_CALL_STEP(s1, 24, 24, 15, 20, 6 , 3 , 19, 12, 20, 47, 28, 18) LM_CALL_STEP(s2, 20, 4 , 28, 40, 11, 6 , 27, 25, 16, 48, 0 , 53) LM_CALL_STEP(s3, 16, 5 , 0 , 34, 14, 10, 22, 11, 25, 26, 4 , 39) LM_CALL_STEP(s4, 25, 13, 4 , 32, 17, 33, 30, 41, 9 , 17, 1 , 54) LM_CALL_STEP(s5, 9 , 45, 1 , 8 , 7 , 2 , 23, 44, 13, 28, 31, 29) LM_CALL_STEP(s6, 13, 50, 31, 38, 26, 1 , 2 , 14, 8 , 16, 18, 35) LM_CALL_STEP(s7, 8 , 7 , 18, 42, 12, 31, 29, 36, 5 , 23, 21, 15) LM_CALL_STEP(s8, 5 , 21, 21, 52, 10, 49, 3 , 37, 24, 43, 15, 9 ) mov p_out, first_c//9 LM_CALL_STEP(s1, 56, 6 , 47, 27, 38, 10, 51, 19, 52, 54, 60, 25) LM_CALL_STEP(s2, 52, 11, 60, 47, 43, 13, 59, 32, 48, 55, 32, 3 ) LM_CALL_STEP(s3, 48, 12, 32, 41, 46, 17, 54, 18, 57, 33, 36, 46) LM_CALL_STEP(s4, 57, 20, 36, 39, 49, 40, 62, 48, 41, 24, 33, 4 ) LM_CALL_STEP(s5, 41, 52, 33, 15, 39, 9 , 55, 51, 45, 35, 63, 36) LM_CALL_STEP(s6, 45, 2 , 63, 45, 58, 8 , 34, 21, 40, 23, 50, 42) LM_CALL_STEP(s7, 40, 14, 50, 49, 44, 38, 61, 43, 37, 30, 53, 22) LM_CALL_STEP(s8, 37, 28, 53, 0 , 42, 1 , 35, 44, 56, 50, 47, 16) //10 LM_CALL_STEP(s1, 24, 20, 15, 41, 6 , 24, 19, 33, 20, 11, 28, 39) LM_CALL_STEP(s2, 20, 25, 28, 4 , 11, 27, 27, 46, 16, 12, 0 , 17) LM_CALL_STEP(s3, 16, 26, 0 , 55, 14, 6 , 22, 32, 25, 47, 4 , 3 ) LM_CALL_STEP(s4, 25, 34, 4 , 53, 17, 54, 30, 5 , 9 , 13, 1 , 18) LM_CALL_STEP(s5, 9 , 7 , 1 , 29, 7 , 23, 23, 38, 13, 49, 31, 50) LM_CALL_STEP(s6, 13, 16, 31, 0 , 26, 22, 2 , 35, 8 , 37, 18, 1 ) LM_CALL_STEP(s7, 8 , 28, 18, 8 , 12, 52, 29, 2 , 5 , 44, 21, 36) LM_CALL_STEP(s8, 5 , 42, 21, 14, 10, 15, 3 , 31, 24, 9 , 15, 30) mov p_out, first_c//11 LM_CALL_STEP(s1, 56, 34, 47, 55, 38, 13, 51, 47, 52, 25, 60, 53) LM_CALL_STEP(s2, 52, 39, 60, 18, 43, 41, 59, 3 , 48, 26, 32, 6 ) LM_CALL_STEP(s3, 48, 40, 32, 12, 46, 20, 54, 46, 57, 4 , 36, 17) LM_CALL_STEP(s4, 57, 48, 36, 10, 49, 11, 62, 19, 41, 27, 33, 32) LM_CALL_STEP(s5, 41, 21, 33, 43, 39, 37, 55, 52, 45, 8 , 63, 9 ) LM_CALL_STEP(s6, 45, 30, 63, 14, 58, 36, 34, 49, 40, 51, 50, 15) LM_CALL_STEP(s7, 40, 42, 50, 22, 44, 7 , 61, 16, 37, 31, 53, 50) LM_CALL_STEP(s8, 37, 1 , 53, 28, 42, 29, 35, 45, 56, 23, 47, 44) //12 LM_CALL_STEP(s1, 24, 48, 15, 12, 6 , 27, 19, 4 , 20, 39, 28, 10) LM_CALL_STEP(s2, 20, 53, 28, 32, 11, 55, 27, 17, 16, 40, 0 , 20) LM_CALL_STEP(s3, 16, 54, 0 , 26, 14, 34, 22, 3 , 25, 18, 4 , 6 ) LM_CALL_STEP(s4, 25, 5 , 4 , 24, 17, 25, 30, 33, 9 , 41, 1 , 46) LM_CALL_STEP(s5, 9 , 35, 1 , 2 , 7 , 51, 23, 7 , 13, 22, 31, 23) LM_CALL_STEP(s6, 13, 44, 31, 28, 26, 50, 2 , 8 , 8 , 38, 18, 29) LM_CALL_STEP(s7, 8 , 1 , 18, 36, 12, 21, 29, 30, 5 , 45, 21, 9 ) LM_CALL_STEP(s8, 5 , 15, 21, 42, 10, 43, 3 , 0 , 24, 37, 15, 31) mov p_out, first_c//13 LM_CALL_STEP(s1, 56, 5 , 47, 26, 38, 41, 51, 18, 52, 53, 60, 24) LM_CALL_STEP(s2, 52, 10, 60, 46, 43, 12, 59, 6 , 48, 54, 32, 34) LM_CALL_STEP(s3, 48, 11, 32, 40, 46, 48, 54, 17, 57, 32, 36, 20) LM_CALL_STEP(s4, 57, 19, 36, 13, 49, 39, 62, 47, 41, 55, 33, 3 ) LM_CALL_STEP(s5, 41, 49, 33, 16, 39, 38, 55, 21, 45, 36, 63, 37) LM_CALL_STEP(s6, 45, 31, 63, 42, 58, 9 , 34, 22, 40, 52, 50, 43) LM_CALL_STEP(s7, 40, 15, 50, 50, 44, 35, 61, 44, 37, 0 , 53, 23) LM_CALL_STEP(s8, 37, 29, 53, 1 , 42, 2 , 35, 14, 56, 51, 47, 45) //14 LM_CALL_STEP(s1, 24, 19, 15, 40, 6 , 55, 19, 32, 20, 10, 28, 13) LM_CALL_STEP(s2, 20, 24, 28, 3 , 11, 26, 27, 20, 16, 11, 0 , 48) LM_CALL_STEP(s3, 16, 25, 0 , 54, 14, 5 , 22, 6 , 25, 46, 4 , 34) LM_CALL_STEP(s4, 25, 33, 4 , 27, 17, 53, 30, 4 , 9 , 12, 1 , 17) LM_CALL_STEP(s5, 9 , 8 , 1 , 30, 7 , 52, 23, 35, 13, 50, 31, 51) LM_CALL_STEP(s6, 13, 45, 31, 1 , 26, 23, 2 , 36, 8 , 7 , 18, 2 ) LM_CALL_STEP(s7, 8 , 29, 18, 9 , 12, 49, 29, 31, 5 , 14, 21, 37) LM_CALL_STEP(s8, 5 , 43, 21, 15, 10, 16, 3 , 28, 24, 38, 15, 0 ) mov p_out, first_c//15 LM_CALL_STEP(s1, 56, 33, 47, 54, 38, 12, 51, 46, 52, 24, 60, 27) LM_CALL_STEP(s2, 52, 13, 60, 17, 43, 40, 59, 34, 48, 25, 32, 5 ) add i, i, #1 add first_k, first_k, #16 add first_c, first_c, #16 cmp i, #(MAX_REPEAT) blo init_while pop {lr} pop {r4,r5,r6,r7} vpop {q4,q5,q6,q7} bx lr // Charset #define buffer r0 #define value r1 #define size r2 .text .align 2 .global memset_uint_neon .type memset_uint_neon, %function memset_uint_neon: vdup.u32 q0, value //vmov q1, q0 while: //vst1.u32 {q0,q1}, [buffer:128]! //vst1.u32 {q0,q1}, [buffer:128]! //sub size, size, #16 vst1.u32 {q0}, [buffer:128]! sub size, size, #4 cmp size, #0 bgt while bx lr
Aladdin-Wang/MicroLink
3,366
MicroLink/microlink_app/linkers/iar/startup.s
/* * Copyright (c) 2023-2024 HPMicro * SPDX-License-Identifier: BSD-3-Clause */ #include "hpm_csr_regs.h" MODULE ?startup /* Forward declaration of sections */ SECTION CSTACK:DATA:NOROOT(3) SECTION SAFESTACK:DATA:NOROOT(3) SECTION `.vector_table`:CODE:NOROOT(3) EXTERN _clean_up EXTERN reset_handler EXTERN __low_level_init EXTERN irq_handler_trap EXTERN __iar_static_base$$GPREL EXTERN __iar_data_init2 EXTERN l1c_ic_disable EXTERN l1c_ic_enable EXTERN l1c_dc_invalidate_all EXTERN l1c_dc_enable EXTERN l1c_dc_disable // -------------------------------------------------- SECTION `.startup`:CODE:ROOT(2) EXTERN __iar_program_start PUBLIC _start EXTERN reset_handler _start: __iar_cstart_init_gp: .option push .option norelax /* Initialize global pointer */ la gp, __iar_static_base$$GPREL .option pop /* reset mstatus to 0 */ csrrw x0, mstatus, x0 #ifdef __riscv_flen __iar_cstart_init_fpu: /* Enable FPU */ li t0, CSR_MSTATUS_FS_MASK csrrs t0, mstatus, t0 /* Initialize FCSR */ fscsr zero #endif __iar_cstart_init_stack: /* Initialize Stack pointer */ la t0, SFE(CSTACK) mv sp, t0 #ifdef __nds_execit __iar_cstart_init_uitb: EXTERN `.exec.itable$$Base` la a0, `.exec.itable$$Base` csrw 0x800, a0 #endif #ifdef CONFIG_NOT_ENABLE_ICACHE call l1c_ic_disable #else call l1c_ic_enable #endif #ifdef CONFIG_NOT_ENABLE_DCACHE call l1c_dc_invalidate_all call l1c_dc_disable #else call l1c_dc_enable call l1c_dc_invalidate_all #endif /* Call IAR low-levle API to initialize BSS, RW Data, RAM Function, etc */ call __low_level_init call __iar_data_init2 fence.i #ifndef NO_CLEANUP_AT_START /* clean up */ call _clean_up #endif __iar_cstart_init_mvec: #if defined(CONFIG_FREERTOS) && CONFIG_FREERTOS EXTERN freertos_risc_v_trap_handler #define HANDLER_TRAP freertos_risc_v_trap_handler /* Use mscratch to store isr level */ csrw mscratch, x0 #elif defined(CONFIG_UCOS_III) && CONFIG_UCOS_III EXTERN ucos_risc_v_trap_handler #define HANDLER_TRAP ucos_risc_v_trap_handler /* Use mscratch to store isr level */ csrw mscratch, x0 #elif defined(CONFIG_THREADX) && CONFIG_THREADX EXTERN tx_risc_v_trap_handler #define HANDLER_TRAP tx_risc_v_trap_handler #define HANDLER_S_TRAP tx_risc_v_trap_handler /* Use mscratch to store isr level */ csrw mscratch, x0 #elif defined(CONFIG_RTTHREAD) && CONFIG_RTTHREAD EXTERN rtt_risc_v_trap_handler #define HANDLER_TRAP rtt_risc_v_trap_handler /* Use mscratch to store isr level */ csrw mscratch, x0 #else #define HANDLER_TRAP irq_handler_trap #endif #if !defined(USE_NONVECTOR_MODE) || (USE_NONVECTOR_MODE == 0) /* Initial machine trap-vector Base */ la t0, SFB(`.vector_table`) csrw mtvec, t0 /* Enable vectored external PLIC interrupt */ csrsi CSR_MMISC_CTL, 2 #else /* Initial machine trap-vector Base */ la t0, HANDLER_TRAP csrw mtvec, t0 /* Disable vectored external PLIC interrupt */ csrci CSR_MMISC_CTL, 2 #endif /* Jump to reset handler once all settings have done */ call reset_handler __iar_cstart_exit j __iar_cstart_exit #include "../vectors.h"
Aladdin-Wang/MicroLink
3,312
MicroLink/microlink_app/linkers/gcc/start.S
/* * Copyright (c) 2021-2022 HPMicro * * SPDX-License-Identifier: BSD-3-Clause * */ #include "hpm_csr_regs.h" .section .start, "ax" .global _start .type _start,@function _start: /* Initialize global pointer */ .option push .option norelax la gp, __global_pointer$ la tp, __thread_pointer$ .option pop /* reset mstatus to 0*/ csrrw x0, mstatus, x0 #ifdef __riscv_flen /* Enable FPU */ li t0, CSR_MSTATUS_FS_MASK csrrs t0, mstatus, t0 /* Initialize FCSR */ fscsr zero #endif #ifdef INIT_EXT_RAM_FOR_DATA la t0, _stack_safe mv sp, t0 call _init_ext_ram #endif /* Initialize stack pointer */ la t0, _stack mv sp, t0 #ifdef CONFIG_NOT_ENABLE_ICACHE call l1c_ic_disable #else call l1c_ic_enable #endif #ifdef CONFIG_NOT_ENABLE_DCACHE call l1c_dc_invalidate_all call l1c_dc_disable #else call l1c_dc_enable call l1c_dc_invalidate_all #endif /* * Initialize LMA/VMA sections. * Relocation for any sections that need to be copied from LMA to VMA. */ call c_startup #if defined(__SES_RISCV) /* Initialize the heap */ la a0, __heap_start__ la a1, __heap_end__ sub a1, a1, a0 la t1, __SEGGER_RTL_init_heap jalr t1 #endif /* Do global constructors */ call __libc_init_array #ifndef NO_CLEANUP_AT_START /* clean up */ call _clean_up #endif #ifdef __nds_execit /* Initialize EXEC.IT table */ la t0, _ITB_BASE_ csrw uitb, t0 #endif #if defined(CONFIG_FREERTOS) && CONFIG_FREERTOS #define HANDLER_TRAP freertos_risc_v_trap_handler #define HANDLER_S_TRAP freertos_risc_v_trap_handler /* Use mscratch to store isr level */ csrw mscratch, 0 #elif defined(CONFIG_UCOS_III) && CONFIG_UCOS_III #define HANDLER_TRAP ucos_risc_v_trap_handler #define HANDLER_S_TRAP ucos_risc_v_trap_handler /* Use mscratch to store isr level */ csrw mscratch, 0 #elif defined(CONFIG_THREADX) && CONFIG_THREADX #define HANDLER_TRAP tx_risc_v_trap_handler #define HANDLER_S_TRAP tx_risc_v_trap_handler /* Use mscratch to store isr level */ csrw mscratch, 0 #elif defined(CONFIG_RTTHREAD) && CONFIG_RTTHREAD #define HANDLER_TRAP rtt_risc_v_trap_handler #define HANDLER_S_TRAP rtt_risc_v_trap_handler /* Use mscratch to store isr level */ csrw mscratch, 0 #else #define HANDLER_TRAP irq_handler_trap #define HANDLER_S_TRAP irq_handler_s_trap #endif #if !defined(USE_NONVECTOR_MODE) || (USE_NONVECTOR_MODE == 0) /* Initial machine trap-vector Base */ la t0, __vector_table csrw mtvec, t0 #if defined (USE_S_MODE_IRQ) la t0, __vector_s_table csrw stvec, t0 #endif /* Enable vectored external PLIC interrupt */ csrsi CSR_MMISC_CTL, 2 #else /* Initial machine trap-vector Base */ la t0, HANDLER_TRAP csrw mtvec, t0 #if defined (USE_S_MODE_IRQ) la t0, HANDLER_S_TRAP csrw stvec, t0 #endif /* Disable vectored external PLIC interrupt */ csrci CSR_MMISC_CTL, 2 #endif /* System reset handler */ call reset_handler /* Infinite loop, if returned accidentally */ 1: j 1b .weak exit exit: 1: j 1b .section .isr_vector, "ax" .weak nmi_handler nmi_handler: 1: j 1b #include "../vectors.h"
Aladdin-Wang/MicroLink
12,474
MicroLink/microlink_app/linkers/segger/startup.s
/********************************************************************* * SEGGER Microcontroller GmbH * * The Embedded Experts * ********************************************************************** * * * (c) 2014 - 2021 SEGGER Microcontroller GmbH * * * * www.segger.com Support: support@segger.com * * * ********************************************************************** * * * All rights reserved. * * * * Redistribution and use in source and binary forms, with or * * without modification, are permitted provided that the following * * condition is met: * * * * - Redistributions of source code must retain the above copyright * * notice, this condition and the following disclaimer. * * * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND * * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, * * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * * DISCLAIMED. IN NO EVENT SHALL SEGGER Microcontroller BE LIABLE FOR * * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * * DAMAGE. * * * ********************************************************************** -------------------------- END-OF-HEADER ----------------------------- File : SEGGER_RISCV_crt0.s Purpose : Generic runtime init startup code for RISC-V CPUs. Designed to work with the SEGGER linker to produce smallest possible executables. This file does not normally require any customization. Additional information: Preprocessor Definitions FULL_LIBRARY If defined then - argc, argv are set up by calling SEGGER_SEMIHOST_GetArgs(). - the exit symbol is defined and executes on return from main. - the exit symbol calls destructors, atexit functions and then calls SEGGER_SEMIHOST_Exit(). If not defined then - argc and argv are not valid (main is assumed to not take parameters) - the exit symbol is defined, executes on return from main and halts in a loop. */ #include "hpm_csr_regs.h" /********************************************************************* * * Defines, configurable * ********************************************************************** */ #ifndef APP_ENTRY_POINT #define APP_ENTRY_POINT reset_handler #endif #ifndef ARGSSPACE #define ARGSSPACE 128 #endif /********************************************************************* * * Macros * ********************************************************************** */ // // Declare a label as function symbol (without switching sections) // .macro MARK_FUNC Name .global \Name .type \Name, function \Name: .endm // // Declare a regular function. // Functions from the startup are placed in the init section. // .macro START_FUNC Name .section .init.\Name, "ax" .global \Name #if __riscv_compressed .balign 2 #else .balign 4 #endif .type \Name, function \Name: .endm // // Declare a weak function // .macro WEAK_FUNC Name .section .init.\Name, "ax", %progbits .global \Name .weak \Name #if __riscv_compressed .balign 2 #else .balign 4 #endif .type \Name, function \Name: .endm // // Mark the end of a function and calculate its size // .macro END_FUNC name .size \name,.-\name .endm /********************************************************************* * * Externals * ********************************************************************** */ .extern APP_ENTRY_POINT // typically main /********************************************************************* * * Global functions * ********************************************************************** */ /********************************************************************* * * _start * * Function description * Entry point for the startup code. * Usually called by the reset handler. * Performs all initialisation, based on the entries in the * linker-generated init table, then calls main(). * It is device independent, so there should not be any need for an * end-user to modify it. * * Additional information * At this point, the stack pointer should already have been * initialized * - by hardware (such as on Cortex-M), * - by the device-specific reset handler, * - or by the debugger (such as for RAM Code). */ #undef L #define L(label) .L_start_##label START_FUNC _start .option push .option norelax lui gp, %hi(__global_pointer$) addi gp, gp, %lo(__global_pointer$) lui tp, %hi(__thread_pointer$) addi tp, tp, %lo(__thread_pointer$) .option pop csrw mstatus, zero csrw mcause, zero #ifdef __riscv_flen /* Enable FPU */ li t0, CSR_MSTATUS_FS_MASK csrrs t0, mstatus, t0 /* Initialize FCSR */ fscsr zero #endif #ifdef INIT_EXT_RAM_FOR_DATA la t0, _stack_safe mv sp, t0 call _init_ext_ram #endif lui t0, %hi(__stack_end__) addi sp, t0, %lo(__stack_end__) #ifdef CONFIG_NOT_ENABLE_ICACHE call l1c_ic_disable #else call l1c_ic_enable #endif #ifdef CONFIG_NOT_ENABLE_DCACHE call l1c_dc_invalidate_all call l1c_dc_disable #else call l1c_dc_enable call l1c_dc_invalidate_all #endif #ifndef __NO_SYSTEM_INIT // // Call _init // call _init #endif // // Call linker init functions which in turn performs the following: // * Perform segment init // * Perform heap init (if used) // * Call constructors of global Objects (if any exist) // la s0, __SEGGER_init_table__ // Set table pointer to start of initialization table L(RunInit): lw a0, (s0) // Get next initialization function from table add s0, s0, 4 // Increment table pointer to point to function arguments jalr a0 // Call initialization function j L(RunInit) // MARK_FUNC __SEGGER_init_done // // Time to call main(), the application entry point. // #ifndef NO_CLEANUP_AT_START /* clean up */ call _clean_up #endif #if defined(CONFIG_FREERTOS) && CONFIG_FREERTOS #define HANDLER_TRAP freertos_risc_v_trap_handler #define HANDLER_S_TRAP freertos_risc_v_trap_handler /* Use mscratch to store isr level */ csrw mscratch, 0 #elif defined(CONFIG_UCOS_III) && CONFIG_UCOS_III #define HANDLER_TRAP ucos_risc_v_trap_handler #define HANDLER_S_TRAP ucos_risc_v_trap_handler /* Use mscratch to store isr level */ csrw mscratch, 0 #elif defined(CONFIG_THREADX) && CONFIG_THREADX #define HANDLER_TRAP tx_risc_v_trap_handler #define HANDLER_S_TRAP tx_risc_v_trap_handler /* Use mscratch to store isr level */ csrw mscratch, 0 #elif defined(CONFIG_RTTHREAD) && CONFIG_RTTHREAD #define HANDLER_TRAP rtt_risc_v_trap_handler #define HANDLER_S_TRAP rtt_risc_v_trap_handler /* Use mscratch to store isr level */ csrw mscratch, 0 #else #define HANDLER_TRAP irq_handler_trap #define HANDLER_S_TRAP irq_handler_s_trap #endif #if !defined(USE_NONVECTOR_MODE) || (USE_NONVECTOR_MODE == 0) /* Initial machine trap-vector Base */ la t0, __vector_table csrw mtvec, t0 #if defined (USE_S_MODE_IRQ) la t0, __vector_s_table csrw stvec, t0 #endif /* Enable vectored external PLIC interrupt */ csrsi CSR_MMISC_CTL, 2 #else /* Initial machine trap-vector Base */ la t0, HANDLER_TRAP csrw mtvec, t0 #if defined (USE_S_MODE_IRQ) la t0, HANDLER_S_TRAP csrw stvec, t0 #endif /* Disable vectored external PLIC interrupt */ csrci CSR_MMISC_CTL, 2 #endif __startup_complete: MARK_FUNC start #ifndef FULL_LIBRARY // // In a real embedded application ("Free-standing environment"), // main() does not get any arguments, // which means it is not necessary to init a0 and a1. // call APP_ENTRY_POINT tail exit END_FUNC _start // // end of _start // Fall-through to exit if main ever returns. // MARK_FUNC exit // // In a free-standing environment, if returned from application: // Loop forever. // j . .size exit,.-exit #else // // In a hosted environment, // we need to load a0 and a1 with argc and argv, in order to handle // the command line arguments. // This is required for some programs running under control of a // debugger, such as automated tests. // li a0, ARGSSPACE la a1, args call debug_getargs li a0, ARGSSPACE la a1, args call APP_ENTRY_POINT // Call to application entry point (usually main()) call exit // Call exit function j . // If we unexpectedly return from exit, hang. END_FUNC _start #endif #ifdef FULL_LIBRARY li a0, ARGSSPACE la a1, args call debug_getargs li a0, ARGSSPACE la a1, args #else li a0, 0 li a1, 0 #endif call APP_ENTRY_POINT tail exit END_FUNC _start // #ifdef FULL_LIBRARY /********************************************************************* * * exit * * Function description * Exit of the system. * Called on return from application entry point or explicit call * to exit. * * Additional information * In a hosted environment exit gracefully, by * saving the return value, * calling destructurs of global objects, * calling registered atexit functions, * and notifying the host/debugger. */ #undef L #define L(label) .L_exit_##label WEAK_FUNC exit mv s1, a0 // Save the exit parameter/return result // // Call destructors // la s0, __dtors_start__ L(Loop): la t0, __dtors_end__ beq s0, t0, L(End) lw t1, 0(s0) addi s0, s0, 4 jalr t1 j L(Loop) L(End): // // Call atexit functions // call _execute_at_exit_fns // // Call debug_exit with return result/exit parameter // mv a0, s1 call debug_exit // // If execution is not terminated, loop forever // L(ExitLoop): j L(ExitLoop) // Loop forever. END_FUNC exit #endif #ifdef FULL_LIBRARY .bss args: .space ARGSSPACE .size args, .-args .type args, %object #endif .section .isr_vector, "ax" .weak nmi_handler nmi_handler: 1: j 1b #include "../vectors.h" /*************************** End of file ****************************/
Aladdin-Wang/MicroLink
1,878
MicroLink/external/perf_counter/systick_wrapper_gnu.s
;/**************************************************************************** ;* Copyright 2024 Gorgon Meducer (Email:embedded_zhuoran@hotmail.com) * ;* * ;* Licensed under the Apache License, Version 2.0 (the "License"); * ;* you may not use this file except in compliance with the License. * ;* You may obtain a copy of the License at * ;* * ;* http://www.apache.org/licenses/LICENSE-2.0 * ;* * ;* Unless required by applicable law or agreed to in writing, software * ;* distributed under the License is distributed on an "AS IS" BASIS, * ;* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * ;* See the License for the specific language governing permissions and * ;* limitations under the License. * ;* * ;****************************************************************************/ .syntax unified .arch armv6-m .eabi_attribute Tag_ABI_align_preserved, 1 .text .thumb .thumb_func .align 2 .globl $Sub$$SysTick_Handler .type $Sub$$SysTick_Handler, %function $Sub$$SysTick_Handler: push {r4, r5} push {r4, lr} ldr R0, =perfc_port_insert_to_system_timer_insert_ovf_handler blx R0 pop {r4, r5} mov lr, r5 pop {r4, r5} ldr R0, =$Super$$SysTick_Handler bx R0 .globl __ensure_systick_wrapper .type __ensure_systick_wrapper, %function __ensure_systick_wrapper: bx lr
Aladdin-Wang/MicroLink
1,848
MicroLink/external/perf_counter/systick_wrapper_gcc.S
;/**************************************************************************** ;* Copyright 2024 Gorgon Meducer (Email:embedded_zhuoran@hotmail.com) * ;* * ;* Licensed under the Apache License, Version 2.0 (the "License"); * ;* you may not use this file except in compliance with the License. * ;* You may obtain a copy of the License at * ;* * ;* http://www.apache.org/licenses/LICENSE-2.0 * ;* * ;* Unless required by applicable law or agreed to in writing, software * ;* distributed under the License is distributed on an "AS IS" BASIS, * ;* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * ;* See the License for the specific language governing permissions and * ;* limitations under the License. * ;* * ;****************************************************************************/ .syntax unified .arch armv6-m .text .thumb .thumb_func .align 2 .globl __wrap_SysTick_Handler .type __wrap_SysTick_Handler, %function __wrap_SysTick_Handler: push {r4, r5} push {r4, lr} ldr R0, =perfc_port_insert_to_system_timer_insert_ovf_handler blx R0 pop {r4, r5} mov lr, r5 pop {r4, r5} ldr R0, =__real_SysTick_Handler bx R0 .globl __ensure_systick_wrapper .type __ensure_systick_wrapper, %function __ensure_systick_wrapper: bx lr
Aladdin-Wang/MicroLink
2,225
MicroLink/external/perf_counter/systick_wrapper_ual.s
;/**************************************************************************** ;* Copyright 2022 Gorgon Meducer (Email:embedded_zhuoran@hotmail.com) * ;* * ;* Licensed under the Apache License, Version 2.0 (the "License"); * ;* you may not use this file except in compliance with the License. * ;* You may obtain a copy of the License at * ;* * ;* http://www.apache.org/licenses/LICENSE-2.0 * ;* * ;* Unless required by applicable law or agreed to in writing, software * ;* distributed under the License is distributed on an "AS IS" BASIS, * ;* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * ;* See the License for the specific language governing permissions and * ;* limitations under the License. * ;* * ;****************************************************************************/ PRESERVE8 THUMB AREA |.text|, CODE, READONLY |$Sub$$SysTick_Handler| PROC EXPORT |$Sub$$SysTick_Handler| IMPORT perfc_port_insert_to_system_timer_insert_ovf_handler IMPORT |$Super$$SysTick_Handler| push {r4, r5} push {r4, lr} LDR R0, =perfc_port_insert_to_system_timer_insert_ovf_handler BLX R0 pop {r4, r5} mov lr, r5 pop {r4, r5} LDR R0, =|$Super$$SysTick_Handler| BX R0 ENDP ALIGN AREA |.text|, CODE, READONLY __ensure_systick_wrapper PROC EXPORT __ensure_systick_wrapper NOP BX LR ENDP END
Aladdin-Wang/MicroLink
12,193
MicroLink/external/perf_counter/CI/perf_counter_template_gcc/startup_stm32f103xe.s
/** *************** (C) COPYRIGHT 2017 STMicroelectronics ************************ * @file startup_stm32f103xe.s * @author MCD Application Team * @brief STM32F103xE Devices vector table for Atollic toolchain. * This module performs: * - Set the initial SP * - Set the initial PC == Reset_Handler, * - Set the vector table entries with the exceptions ISR address * - Configure the clock system * - Configure external SRAM mounted on STM3210E-EVAL board * to be used as data memory (optional, to be enabled by user) * - Branches to main in the C library (which eventually * calls main()). * After Reset the Cortex-M3 processor is in Thread mode, * priority is Privileged, and the Stack is set to Main. ****************************************************************************** * @attention * * <h2><center>&copy; Copyright (c) 2017 STMicroelectronics. * All rights reserved.</center></h2> * * This software component is licensed by ST under BSD 3-Clause license, * the "License"; You may not use this file except in compliance with the * License. You may obtain a copy of the License at: * opensource.org/licenses/BSD-3-Clause * ****************************************************************************** */ .syntax unified .cpu cortex-m3 .fpu softvfp .thumb .global g_pfnVectors .global Default_Handler /* start address for the initialization values of the .data section. defined in linker script */ .word _sidata /* start address for the .data section. defined in linker script */ .word _sdata /* end address for the .data section. defined in linker script */ .word _edata /* start address for the .bss section. defined in linker script */ .word _sbss /* end address for the .bss section. defined in linker script */ .word _ebss .equ BootRAM, 0xF1E0F85F /** * @brief This is the code that gets called when the processor first * starts execution following a reset event. Only the absolutely * necessary set is performed, after which the application * supplied main() routine is called. * @param None * @retval : None */ .section .text.Reset_Handler .weak Reset_Handler .type Reset_Handler, %function Reset_Handler: /* Copy the data segment initializers from flash to SRAM */ ldr r0, =_sdata ldr r1, =_edata ldr r2, =_sidata movs r3, #0 b LoopCopyDataInit CopyDataInit: ldr r4, [r2, r3] str r4, [r0, r3] adds r3, r3, #4 LoopCopyDataInit: adds r4, r0, r3 cmp r4, r1 bcc CopyDataInit /* Zero fill the bss segment. */ ldr r2, =_sbss ldr r4, =_ebss movs r3, #0 b LoopFillZerobss FillZerobss: str r3, [r2] adds r2, r2, #4 LoopFillZerobss: cmp r2, r4 bcc FillZerobss /* Call the clock system intitialization function.*/ bl SystemInit /* Call static constructors */ bl __libc_init_array /* Call the application's entry point.*/ bl main bx lr .size Reset_Handler, .-Reset_Handler /** * @brief This is the code that gets called when the processor receives an * unexpected interrupt. This simply enters an infinite loop, preserving * the system state for examination by a debugger. * * @param None * @retval : None */ .section .text.Default_Handler,"ax",%progbits Default_Handler: Infinite_Loop: b Infinite_Loop .size Default_Handler, .-Default_Handler /****************************************************************************** * * The minimal vector table for a Cortex M3. Note that the proper constructs * must be placed on this to ensure that it ends up at physical address * 0x0000.0000. * ******************************************************************************/ .section .isr_vector,"a",%progbits .type g_pfnVectors, %object .size g_pfnVectors, .-g_pfnVectors g_pfnVectors: .word _estack .word Reset_Handler .word NMI_Handler .word HardFault_Handler .word MemManage_Handler .word BusFault_Handler .word UsageFault_Handler .word 0 .word 0 .word 0 .word 0 .word SVC_Handler .word DebugMon_Handler .word 0 .word PendSV_Handler .word SysTick_Handler .word WWDG_IRQHandler .word PVD_IRQHandler .word TAMPER_IRQHandler .word RTC_IRQHandler .word FLASH_IRQHandler .word RCC_IRQHandler .word EXTI0_IRQHandler .word EXTI1_IRQHandler .word EXTI2_IRQHandler .word EXTI3_IRQHandler .word EXTI4_IRQHandler .word DMA1_Channel1_IRQHandler .word DMA1_Channel2_IRQHandler .word DMA1_Channel3_IRQHandler .word DMA1_Channel4_IRQHandler .word DMA1_Channel5_IRQHandler .word DMA1_Channel6_IRQHandler .word DMA1_Channel7_IRQHandler .word ADC1_2_IRQHandler .word USB_HP_CAN1_TX_IRQHandler .word USB_LP_CAN1_RX0_IRQHandler .word CAN1_RX1_IRQHandler .word CAN1_SCE_IRQHandler .word EXTI9_5_IRQHandler .word TIM1_BRK_IRQHandler .word TIM1_UP_IRQHandler .word TIM1_TRG_COM_IRQHandler .word TIM1_CC_IRQHandler .word TIM2_IRQHandler .word TIM3_IRQHandler .word TIM4_IRQHandler .word I2C1_EV_IRQHandler .word I2C1_ER_IRQHandler .word I2C2_EV_IRQHandler .word I2C2_ER_IRQHandler .word SPI1_IRQHandler .word SPI2_IRQHandler .word USART1_IRQHandler .word USART2_IRQHandler .word USART3_IRQHandler .word EXTI15_10_IRQHandler .word RTC_Alarm_IRQHandler .word USBWakeUp_IRQHandler .word TIM8_BRK_IRQHandler .word TIM8_UP_IRQHandler .word TIM8_TRG_COM_IRQHandler .word TIM8_CC_IRQHandler .word ADC3_IRQHandler .word FSMC_IRQHandler .word SDIO_IRQHandler .word TIM5_IRQHandler .word SPI3_IRQHandler .word UART4_IRQHandler .word UART5_IRQHandler .word TIM6_IRQHandler .word TIM7_IRQHandler .word DMA2_Channel1_IRQHandler .word DMA2_Channel2_IRQHandler .word DMA2_Channel3_IRQHandler .word DMA2_Channel4_5_IRQHandler .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word BootRAM /* @0x1E0. This is for boot in RAM mode for STM32F10x High Density devices. */ /******************************************************************************* * * Provide weak aliases for each Exception handler to the Default_Handler. * As they are weak aliases, any function with the same name will override * this definition. * *******************************************************************************/ .weak NMI_Handler .thumb_set NMI_Handler,Default_Handler .weak HardFault_Handler .thumb_set HardFault_Handler,Default_Handler .weak MemManage_Handler .thumb_set MemManage_Handler,Default_Handler .weak BusFault_Handler .thumb_set BusFault_Handler,Default_Handler .weak UsageFault_Handler .thumb_set UsageFault_Handler,Default_Handler .weak SVC_Handler .thumb_set SVC_Handler,Default_Handler .weak DebugMon_Handler .thumb_set DebugMon_Handler,Default_Handler .weak PendSV_Handler .thumb_set PendSV_Handler,Default_Handler .weak SysTick_Handler .thumb_set SysTick_Handler,Default_Handler .weak WWDG_IRQHandler .thumb_set WWDG_IRQHandler,Default_Handler .weak PVD_IRQHandler .thumb_set PVD_IRQHandler,Default_Handler .weak TAMPER_IRQHandler .thumb_set TAMPER_IRQHandler,Default_Handler .weak RTC_IRQHandler .thumb_set RTC_IRQHandler,Default_Handler .weak FLASH_IRQHandler .thumb_set FLASH_IRQHandler,Default_Handler .weak RCC_IRQHandler .thumb_set RCC_IRQHandler,Default_Handler .weak EXTI0_IRQHandler .thumb_set EXTI0_IRQHandler,Default_Handler .weak EXTI1_IRQHandler .thumb_set EXTI1_IRQHandler,Default_Handler .weak EXTI2_IRQHandler .thumb_set EXTI2_IRQHandler,Default_Handler .weak EXTI3_IRQHandler .thumb_set EXTI3_IRQHandler,Default_Handler .weak EXTI4_IRQHandler .thumb_set EXTI4_IRQHandler,Default_Handler .weak DMA1_Channel1_IRQHandler .thumb_set DMA1_Channel1_IRQHandler,Default_Handler .weak DMA1_Channel2_IRQHandler .thumb_set DMA1_Channel2_IRQHandler,Default_Handler .weak DMA1_Channel3_IRQHandler .thumb_set DMA1_Channel3_IRQHandler,Default_Handler .weak DMA1_Channel4_IRQHandler .thumb_set DMA1_Channel4_IRQHandler,Default_Handler .weak DMA1_Channel5_IRQHandler .thumb_set DMA1_Channel5_IRQHandler,Default_Handler .weak DMA1_Channel6_IRQHandler .thumb_set DMA1_Channel6_IRQHandler,Default_Handler .weak DMA1_Channel7_IRQHandler .thumb_set DMA1_Channel7_IRQHandler,Default_Handler .weak ADC1_2_IRQHandler .thumb_set ADC1_2_IRQHandler,Default_Handler .weak USB_HP_CAN1_TX_IRQHandler .thumb_set USB_HP_CAN1_TX_IRQHandler,Default_Handler .weak USB_LP_CAN1_RX0_IRQHandler .thumb_set USB_LP_CAN1_RX0_IRQHandler,Default_Handler .weak CAN1_RX1_IRQHandler .thumb_set CAN1_RX1_IRQHandler,Default_Handler .weak CAN1_SCE_IRQHandler .thumb_set CAN1_SCE_IRQHandler,Default_Handler .weak EXTI9_5_IRQHandler .thumb_set EXTI9_5_IRQHandler,Default_Handler .weak TIM1_BRK_IRQHandler .thumb_set TIM1_BRK_IRQHandler,Default_Handler .weak TIM1_UP_IRQHandler .thumb_set TIM1_UP_IRQHandler,Default_Handler .weak TIM1_TRG_COM_IRQHandler .thumb_set TIM1_TRG_COM_IRQHandler,Default_Handler .weak TIM1_CC_IRQHandler .thumb_set TIM1_CC_IRQHandler,Default_Handler .weak TIM2_IRQHandler .thumb_set TIM2_IRQHandler,Default_Handler .weak TIM3_IRQHandler .thumb_set TIM3_IRQHandler,Default_Handler .weak TIM4_IRQHandler .thumb_set TIM4_IRQHandler,Default_Handler .weak I2C1_EV_IRQHandler .thumb_set I2C1_EV_IRQHandler,Default_Handler .weak I2C1_ER_IRQHandler .thumb_set I2C1_ER_IRQHandler,Default_Handler .weak I2C2_EV_IRQHandler .thumb_set I2C2_EV_IRQHandler,Default_Handler .weak I2C2_ER_IRQHandler .thumb_set I2C2_ER_IRQHandler,Default_Handler .weak SPI1_IRQHandler .thumb_set SPI1_IRQHandler,Default_Handler .weak SPI2_IRQHandler .thumb_set SPI2_IRQHandler,Default_Handler .weak USART1_IRQHandler .thumb_set USART1_IRQHandler,Default_Handler .weak USART2_IRQHandler .thumb_set USART2_IRQHandler,Default_Handler .weak USART3_IRQHandler .thumb_set USART3_IRQHandler,Default_Handler .weak EXTI15_10_IRQHandler .thumb_set EXTI15_10_IRQHandler,Default_Handler .weak RTC_Alarm_IRQHandler .thumb_set RTC_Alarm_IRQHandler,Default_Handler .weak USBWakeUp_IRQHandler .thumb_set USBWakeUp_IRQHandler,Default_Handler .weak TIM8_BRK_IRQHandler .thumb_set TIM8_BRK_IRQHandler,Default_Handler .weak TIM8_UP_IRQHandler .thumb_set TIM8_UP_IRQHandler,Default_Handler .weak TIM8_TRG_COM_IRQHandler .thumb_set TIM8_TRG_COM_IRQHandler,Default_Handler .weak TIM8_CC_IRQHandler .thumb_set TIM8_CC_IRQHandler,Default_Handler .weak ADC3_IRQHandler .thumb_set ADC3_IRQHandler,Default_Handler .weak FSMC_IRQHandler .thumb_set FSMC_IRQHandler,Default_Handler .weak SDIO_IRQHandler .thumb_set SDIO_IRQHandler,Default_Handler .weak TIM5_IRQHandler .thumb_set TIM5_IRQHandler,Default_Handler .weak SPI3_IRQHandler .thumb_set SPI3_IRQHandler,Default_Handler .weak UART4_IRQHandler .thumb_set UART4_IRQHandler,Default_Handler .weak UART5_IRQHandler .thumb_set UART5_IRQHandler,Default_Handler .weak TIM6_IRQHandler .thumb_set TIM6_IRQHandler,Default_Handler .weak TIM7_IRQHandler .thumb_set TIM7_IRQHandler,Default_Handler .weak DMA2_Channel1_IRQHandler .thumb_set DMA2_Channel1_IRQHandler,Default_Handler .weak DMA2_Channel2_IRQHandler .thumb_set DMA2_Channel2_IRQHandler,Default_Handler .weak DMA2_Channel3_IRQHandler .thumb_set DMA2_Channel3_IRQHandler,Default_Handler .weak DMA2_Channel4_5_IRQHandler .thumb_set DMA2_Channel4_5_IRQHandler,Default_Handler /************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
Aladdin-Wang/MicroLink
14,685
MicroLink/external/perf_counter/CI/perf_counter_template_gcc/MDK-ARM/startup_stm32f103xe.s
;******************** (C) COPYRIGHT 2017 STMicroelectronics ******************** ;* File Name : startup_stm32f103xe.s ;* Author : MCD Application Team ;* Description : STM32F103xE Devices vector table for MDK-ARM toolchain. ;* This module performs: ;* - Set the initial SP ;* - Set the initial PC == Reset_Handler ;* - Set the vector table entries with the exceptions ISR address ;* - Configure the clock system ;* - Branches to __main in the C library (which eventually ;* calls main()). ;* After Reset the Cortex-M3 processor is in Thread mode, ;* priority is Privileged, and the Stack is set to Main. ;****************************************************************************** ;* @attention ;* ;* Copyright (c) 2017 STMicroelectronics. ;* All rights reserved. ;* ;* This software component is licensed by ST under BSD 3-Clause license, ;* the "License"; You may not use this file except in compliance with the ;* License. You may obtain a copy of the License at: ;* opensource.org/licenses/BSD-3-Clause ;* ;****************************************************************************** ; Amount of memory (in bytes) allocated for Stack ; Tailor this value to your application needs ; <h> Stack Configuration ; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8> ; </h> Stack_Size EQU 0x400 AREA STACK, NOINIT, READWRITE, ALIGN=3 Stack_Mem SPACE Stack_Size __initial_sp ; <h> Heap Configuration ; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8> ; </h> Heap_Size EQU 0x200 AREA HEAP, NOINIT, READWRITE, ALIGN=3 __heap_base Heap_Mem SPACE Heap_Size __heap_limit PRESERVE8 THUMB ; Vector Table Mapped to Address 0 at Reset AREA RESET, DATA, READONLY EXPORT __Vectors EXPORT __Vectors_End EXPORT __Vectors_Size __Vectors DCD __initial_sp ; Top of Stack DCD Reset_Handler ; Reset Handler DCD NMI_Handler ; NMI Handler DCD HardFault_Handler ; Hard Fault Handler DCD MemManage_Handler ; MPU Fault Handler DCD BusFault_Handler ; Bus Fault Handler DCD UsageFault_Handler ; Usage Fault Handler DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD SVC_Handler ; SVCall Handler DCD DebugMon_Handler ; Debug Monitor Handler DCD 0 ; Reserved DCD PendSV_Handler ; PendSV Handler DCD SysTick_Handler ; SysTick Handler ; External Interrupts DCD WWDG_IRQHandler ; Window Watchdog DCD PVD_IRQHandler ; PVD through EXTI Line detect DCD TAMPER_IRQHandler ; Tamper DCD RTC_IRQHandler ; RTC DCD FLASH_IRQHandler ; Flash DCD RCC_IRQHandler ; RCC DCD EXTI0_IRQHandler ; EXTI Line 0 DCD EXTI1_IRQHandler ; EXTI Line 1 DCD EXTI2_IRQHandler ; EXTI Line 2 DCD EXTI3_IRQHandler ; EXTI Line 3 DCD EXTI4_IRQHandler ; EXTI Line 4 DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1 DCD DMA1_Channel2_IRQHandler ; DMA1 Channel 2 DCD DMA1_Channel3_IRQHandler ; DMA1 Channel 3 DCD DMA1_Channel4_IRQHandler ; DMA1 Channel 4 DCD DMA1_Channel5_IRQHandler ; DMA1 Channel 5 DCD DMA1_Channel6_IRQHandler ; DMA1 Channel 6 DCD DMA1_Channel7_IRQHandler ; DMA1 Channel 7 DCD ADC1_2_IRQHandler ; ADC1 & ADC2 DCD USB_HP_CAN1_TX_IRQHandler ; USB High Priority or CAN1 TX DCD USB_LP_CAN1_RX0_IRQHandler ; USB Low Priority or CAN1 RX0 DCD CAN1_RX1_IRQHandler ; CAN1 RX1 DCD CAN1_SCE_IRQHandler ; CAN1 SCE DCD EXTI9_5_IRQHandler ; EXTI Line 9..5 DCD TIM1_BRK_IRQHandler ; TIM1 Break DCD TIM1_UP_IRQHandler ; TIM1 Update DCD TIM1_TRG_COM_IRQHandler ; TIM1 Trigger and Commutation DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare DCD TIM2_IRQHandler ; TIM2 DCD TIM3_IRQHandler ; TIM3 DCD TIM4_IRQHandler ; TIM4 DCD I2C1_EV_IRQHandler ; I2C1 Event DCD I2C1_ER_IRQHandler ; I2C1 Error DCD I2C2_EV_IRQHandler ; I2C2 Event DCD I2C2_ER_IRQHandler ; I2C2 Error DCD SPI1_IRQHandler ; SPI1 DCD SPI2_IRQHandler ; SPI2 DCD USART1_IRQHandler ; USART1 DCD USART2_IRQHandler ; USART2 DCD USART3_IRQHandler ; USART3 DCD EXTI15_10_IRQHandler ; EXTI Line 15..10 DCD RTC_Alarm_IRQHandler ; RTC Alarm through EXTI Line DCD USBWakeUp_IRQHandler ; USB Wakeup from suspend DCD TIM8_BRK_IRQHandler ; TIM8 Break DCD TIM8_UP_IRQHandler ; TIM8 Update DCD TIM8_TRG_COM_IRQHandler ; TIM8 Trigger and Commutation DCD TIM8_CC_IRQHandler ; TIM8 Capture Compare DCD ADC3_IRQHandler ; ADC3 DCD FSMC_IRQHandler ; FSMC DCD SDIO_IRQHandler ; SDIO DCD TIM5_IRQHandler ; TIM5 DCD SPI3_IRQHandler ; SPI3 DCD UART4_IRQHandler ; UART4 DCD UART5_IRQHandler ; UART5 DCD TIM6_IRQHandler ; TIM6 DCD TIM7_IRQHandler ; TIM7 DCD DMA2_Channel1_IRQHandler ; DMA2 Channel1 DCD DMA2_Channel2_IRQHandler ; DMA2 Channel2 DCD DMA2_Channel3_IRQHandler ; DMA2 Channel3 DCD DMA2_Channel4_5_IRQHandler ; DMA2 Channel4 & Channel5 __Vectors_End __Vectors_Size EQU __Vectors_End - __Vectors AREA |.text|, CODE, READONLY ; Reset handler Reset_Handler PROC EXPORT Reset_Handler [WEAK] IMPORT __main IMPORT SystemInit LDR R0, =SystemInit BLX R0 LDR R0, =__main BX R0 ENDP ; Dummy Exception Handlers (infinite loops which can be modified) NMI_Handler PROC EXPORT NMI_Handler [WEAK] B . ENDP HardFault_Handler\ PROC EXPORT HardFault_Handler [WEAK] B . ENDP MemManage_Handler\ PROC EXPORT MemManage_Handler [WEAK] B . ENDP BusFault_Handler\ PROC EXPORT BusFault_Handler [WEAK] B . ENDP UsageFault_Handler\ PROC EXPORT UsageFault_Handler [WEAK] B . ENDP SVC_Handler PROC EXPORT SVC_Handler [WEAK] B . ENDP DebugMon_Handler\ PROC EXPORT DebugMon_Handler [WEAK] B . ENDP PendSV_Handler PROC EXPORT PendSV_Handler [WEAK] B . ENDP SysTick_Handler PROC EXPORT SysTick_Handler [WEAK] B . ENDP Default_Handler PROC EXPORT WWDG_IRQHandler [WEAK] EXPORT PVD_IRQHandler [WEAK] EXPORT TAMPER_IRQHandler [WEAK] EXPORT RTC_IRQHandler [WEAK] EXPORT FLASH_IRQHandler [WEAK] EXPORT RCC_IRQHandler [WEAK] EXPORT EXTI0_IRQHandler [WEAK] EXPORT EXTI1_IRQHandler [WEAK] EXPORT EXTI2_IRQHandler [WEAK] EXPORT EXTI3_IRQHandler [WEAK] EXPORT EXTI4_IRQHandler [WEAK] EXPORT DMA1_Channel1_IRQHandler [WEAK] EXPORT DMA1_Channel2_IRQHandler [WEAK] EXPORT DMA1_Channel3_IRQHandler [WEAK] EXPORT DMA1_Channel4_IRQHandler [WEAK] EXPORT DMA1_Channel5_IRQHandler [WEAK] EXPORT DMA1_Channel6_IRQHandler [WEAK] EXPORT DMA1_Channel7_IRQHandler [WEAK] EXPORT ADC1_2_IRQHandler [WEAK] EXPORT USB_HP_CAN1_TX_IRQHandler [WEAK] EXPORT USB_LP_CAN1_RX0_IRQHandler [WEAK] EXPORT CAN1_RX1_IRQHandler [WEAK] EXPORT CAN1_SCE_IRQHandler [WEAK] EXPORT EXTI9_5_IRQHandler [WEAK] EXPORT TIM1_BRK_IRQHandler [WEAK] EXPORT TIM1_UP_IRQHandler [WEAK] EXPORT TIM1_TRG_COM_IRQHandler [WEAK] EXPORT TIM1_CC_IRQHandler [WEAK] EXPORT TIM2_IRQHandler [WEAK] EXPORT TIM3_IRQHandler [WEAK] EXPORT TIM4_IRQHandler [WEAK] EXPORT I2C1_EV_IRQHandler [WEAK] EXPORT I2C1_ER_IRQHandler [WEAK] EXPORT I2C2_EV_IRQHandler [WEAK] EXPORT I2C2_ER_IRQHandler [WEAK] EXPORT SPI1_IRQHandler [WEAK] EXPORT SPI2_IRQHandler [WEAK] EXPORT USART1_IRQHandler [WEAK] EXPORT USART2_IRQHandler [WEAK] EXPORT USART3_IRQHandler [WEAK] EXPORT EXTI15_10_IRQHandler [WEAK] EXPORT RTC_Alarm_IRQHandler [WEAK] EXPORT USBWakeUp_IRQHandler [WEAK] EXPORT TIM8_BRK_IRQHandler [WEAK] EXPORT TIM8_UP_IRQHandler [WEAK] EXPORT TIM8_TRG_COM_IRQHandler [WEAK] EXPORT TIM8_CC_IRQHandler [WEAK] EXPORT ADC3_IRQHandler [WEAK] EXPORT FSMC_IRQHandler [WEAK] EXPORT SDIO_IRQHandler [WEAK] EXPORT TIM5_IRQHandler [WEAK] EXPORT SPI3_IRQHandler [WEAK] EXPORT UART4_IRQHandler [WEAK] EXPORT UART5_IRQHandler [WEAK] EXPORT TIM6_IRQHandler [WEAK] EXPORT TIM7_IRQHandler [WEAK] EXPORT DMA2_Channel1_IRQHandler [WEAK] EXPORT DMA2_Channel2_IRQHandler [WEAK] EXPORT DMA2_Channel3_IRQHandler [WEAK] EXPORT DMA2_Channel4_5_IRQHandler [WEAK] WWDG_IRQHandler PVD_IRQHandler TAMPER_IRQHandler RTC_IRQHandler FLASH_IRQHandler RCC_IRQHandler EXTI0_IRQHandler EXTI1_IRQHandler EXTI2_IRQHandler EXTI3_IRQHandler EXTI4_IRQHandler DMA1_Channel1_IRQHandler DMA1_Channel2_IRQHandler DMA1_Channel3_IRQHandler DMA1_Channel4_IRQHandler DMA1_Channel5_IRQHandler DMA1_Channel6_IRQHandler DMA1_Channel7_IRQHandler ADC1_2_IRQHandler USB_HP_CAN1_TX_IRQHandler USB_LP_CAN1_RX0_IRQHandler CAN1_RX1_IRQHandler CAN1_SCE_IRQHandler EXTI9_5_IRQHandler TIM1_BRK_IRQHandler TIM1_UP_IRQHandler TIM1_TRG_COM_IRQHandler TIM1_CC_IRQHandler TIM2_IRQHandler TIM3_IRQHandler TIM4_IRQHandler I2C1_EV_IRQHandler I2C1_ER_IRQHandler I2C2_EV_IRQHandler I2C2_ER_IRQHandler SPI1_IRQHandler SPI2_IRQHandler USART1_IRQHandler USART2_IRQHandler USART3_IRQHandler EXTI15_10_IRQHandler RTC_Alarm_IRQHandler USBWakeUp_IRQHandler TIM8_BRK_IRQHandler TIM8_UP_IRQHandler TIM8_TRG_COM_IRQHandler TIM8_CC_IRQHandler ADC3_IRQHandler FSMC_IRQHandler SDIO_IRQHandler TIM5_IRQHandler SPI3_IRQHandler UART4_IRQHandler UART5_IRQHandler TIM6_IRQHandler TIM7_IRQHandler DMA2_Channel1_IRQHandler DMA2_Channel2_IRQHandler DMA2_Channel3_IRQHandler DMA2_Channel4_5_IRQHandler B . ENDP ALIGN ;******************************************************************************* ; User Stack and Heap initialization ;******************************************************************************* IF :DEF:__MICROLIB EXPORT __initial_sp EXPORT __heap_base EXPORT __heap_limit ELSE IMPORT __use_two_region_memory EXPORT __user_initial_stackheap __user_initial_stackheap LDR R0, = Heap_Mem LDR R1, =(Stack_Mem + Stack_Size) LDR R2, = (Heap_Mem + Heap_Size) LDR R3, = Stack_Mem BX LR ALIGN ENDIF END ;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
Aladdin-Wang/MicroLink
25,677
MicroLink/external/perf_counter/example/gcc/startup_ARMCM7.S
/* File: startup_ARMCM7.S * Purpose: startup file for Cortex-M7 devices. Should use with * GCC for ARM Embedded Processors * Version: V2.0 * Date: 01 August 2014 * * Copyright (c) 2011 - 2014 ARM LIMITED All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither the name of ARM nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------*/ .syntax unified .arch armv7e-m .section .stack .align 3 .equ Stack_Size, 0x0800 .globl __StackTop .globl __StackLimit __StackLimit: .space Stack_Size .size __StackLimit, . - __StackLimit __StackTop: .size __StackTop, . - __StackTop .section .heap .align 3 .equ Heap_Size, 0x0400 .globl __HeapBase .globl __HeapLimit __HeapBase: .if Heap_Size .space Heap_Size .endif .size __HeapBase, . - __HeapBase __HeapLimit: .size __HeapLimit, . - __HeapLimit .section .isr_vector .align 2 .globl __isr_vector __isr_vector: .long __StackTop /* Top of Stack */ .long Reset_Handler /* Reset Handler */ .long NMI_Handler /* NMI Handler */ .long HardFault_Handler /* Hard Fault Handler */ .long MemManage_Handler /* MPU Fault Handler */ .long BusFault_Handler /* Bus Fault Handler */ .long UsageFault_Handler /* Usage Fault Handler */ .long 0 /* Reserved */ .long 0 /* Reserved */ .long 0 /* Reserved */ .long 0 /* Reserved */ .long SVC_Handler /* SVCall Handler */ .long DebugMon_Handler /* Debug Monitor Handler */ .long 0 /* Reserved */ .long PendSV_Handler /* PendSV Handler */ .long SysTick_Handler /* SysTick Handler */ /* External interrupts */ /* External Interrupts */ .word WWDG_IRQHandler /* Window WatchDog */ .word PVD_IRQHandler /* PVD through EXTI Line detection */ .word TAMP_STAMP_IRQHandler /* Tamper and TimeStamps through the EXTI line */ .word RTC_WKUP_IRQHandler /* RTC Wakeup through the EXTI line */ .word FLASH_IRQHandler /* FLASH */ .word RCC_IRQHandler /* RCC */ .word EXTI0_IRQHandler /* EXTI Line0 */ .word EXTI1_IRQHandler /* EXTI Line1 */ .word EXTI2_IRQHandler /* EXTI Line2 */ .word EXTI3_IRQHandler /* EXTI Line3 */ .word EXTI4_IRQHandler /* EXTI Line4 */ .word DMA1_Stream0_IRQHandler /* DMA1 Stream 0 */ .word DMA1_Stream1_IRQHandler /* DMA1 Stream 1 */ .word DMA1_Stream2_IRQHandler /* DMA1 Stream 2 */ .word DMA1_Stream3_IRQHandler /* DMA1 Stream 3 */ .word DMA1_Stream4_IRQHandler /* DMA1 Stream 4 */ .word DMA1_Stream5_IRQHandler /* DMA1 Stream 5 */ .word DMA1_Stream6_IRQHandler /* DMA1 Stream 6 */ .word ADC_IRQHandler /* ADC1, ADC2 and ADC3s */ .word CAN1_TX_IRQHandler /* CAN1 TX */ .word CAN1_RX0_IRQHandler /* CAN1 RX0 */ .word CAN1_RX1_IRQHandler /* CAN1 RX1 */ .word CAN1_SCE_IRQHandler /* CAN1 SCE */ .word EXTI9_5_IRQHandler /* External Line[9:5]s */ .word TIM1_BRK_TIM9_IRQHandler /* TIM1 Break and TIM9 */ .word TIM1_UP_TIM10_IRQHandler /* TIM1 Update and TIM10 */ .word TIM1_TRG_COM_TIM11_IRQHandler /* TIM1 Trigger and Commutation and TIM11 */ .word TIM1_CC_IRQHandler /* TIM1 Capture Compare */ .word TIM2_IRQHandler /* TIM2 */ .word TIM3_IRQHandler /* TIM3 */ .word TIM4_IRQHandler /* TIM4 */ .word I2C1_EV_IRQHandler /* I2C1 Event */ .word I2C1_ER_IRQHandler /* I2C1 Error */ .word I2C2_EV_IRQHandler /* I2C2 Event */ .word I2C2_ER_IRQHandler /* I2C2 Error */ .word SPI1_IRQHandler /* SPI1 */ .word SPI2_IRQHandler /* SPI2 */ .word USART1_IRQHandler /* USART1 */ .word USART2_IRQHandler /* USART2 */ .word USART3_IRQHandler /* USART3 */ .word EXTI15_10_IRQHandler /* External Line[15:10]s */ .word RTC_Alarm_IRQHandler /* RTC Alarm (A and B) through EXTI Line */ .word OTG_FS_WKUP_IRQHandler /* USB OTG FS Wakeup through EXTI line */ .word TIM8_BRK_TIM12_IRQHandler /* TIM8 Break and TIM12 */ .word TIM8_UP_TIM13_IRQHandler /* TIM8 Update and TIM13 */ .word TIM8_TRG_COM_TIM14_IRQHandler /* TIM8 Trigger and Commutation and TIM14 */ .word TIM8_CC_IRQHandler /* TIM8 Capture Compare */ .word DMA1_Stream7_IRQHandler /* DMA1 Stream7 */ .word FMC_IRQHandler /* FMC */ .word SDMMC1_IRQHandler /* SDMMC1 */ .word TIM5_IRQHandler /* TIM5 */ .word SPI3_IRQHandler /* SPI3 */ .word UART4_IRQHandler /* UART4 */ .word UART5_IRQHandler /* UART5 */ .word TIM6_DAC_IRQHandler /* TIM6 and DAC1&2 underrun errors */ .word TIM7_IRQHandler /* TIM7 */ .word DMA2_Stream0_IRQHandler /* DMA2 Stream 0 */ .word DMA2_Stream1_IRQHandler /* DMA2 Stream 1 */ .word DMA2_Stream2_IRQHandler /* DMA2 Stream 2 */ .word DMA2_Stream3_IRQHandler /* DMA2 Stream 3 */ .word DMA2_Stream4_IRQHandler /* DMA2 Stream 4 */ .word ETH_IRQHandler /* Ethernet */ .word ETH_WKUP_IRQHandler /* Ethernet Wakeup through EXTI line */ .word CAN2_TX_IRQHandler /* CAN2 TX */ .word CAN2_RX0_IRQHandler /* CAN2 RX0 */ .word CAN2_RX1_IRQHandler /* CAN2 RX1 */ .word CAN2_SCE_IRQHandler /* CAN2 SCE */ .word OTG_FS_IRQHandler /* USB OTG FS */ .word DMA2_Stream5_IRQHandler /* DMA2 Stream 5 */ .word DMA2_Stream6_IRQHandler /* DMA2 Stream 6 */ .word DMA2_Stream7_IRQHandler /* DMA2 Stream 7 */ .word USART6_IRQHandler /* USART6 */ .word I2C3_EV_IRQHandler /* I2C3 event */ .word I2C3_ER_IRQHandler /* I2C3 error */ .word OTG_HS_EP1_OUT_IRQHandler /* USB OTG HS End Point 1 Out */ .word OTG_HS_EP1_IN_IRQHandler /* USB OTG HS End Point 1 In */ .word OTG_HS_WKUP_IRQHandler /* USB OTG HS Wakeup through EXTI */ .word OTG_HS_IRQHandler /* USB OTG HS */ .word DCMI_IRQHandler /* DCMI */ .word 0 /* Reserved */ .word RNG_IRQHandler /* Rng */ .word FPU_IRQHandler /* FPU */ .word UART7_IRQHandler /* UART7 */ .word UART8_IRQHandler /* UART8 */ .word SPI4_IRQHandler /* SPI4 */ .word SPI5_IRQHandler /* SPI5 */ .word SPI6_IRQHandler /* SPI6 */ .word SAI1_IRQHandler /* SAI1 */ .word LTDC_IRQHandler /* LTDC */ .word LTDC_ER_IRQHandler /* LTDC error */ .word DMA2D_IRQHandler /* DMA2D */ .word SAI2_IRQHandler /* SAI2 */ .word QUADSPI_IRQHandler /* QUADSPI */ .word LPTIM1_IRQHandler /* LPTIM1 */ .word CEC_IRQHandler /* HDMI_CEC */ .word I2C4_EV_IRQHandler /* I2C4 Event */ .word I2C4_ER_IRQHandler /* I2C4 Error */ .word SPDIF_RX_IRQHandler /* SPDIF_RX */ .long Default_Handler .size __isr_vector, . - __isr_vector .text .thumb .thumb_func .align 2 .globl Reset_Handler .type Reset_Handler, %function Reset_Handler: /* Firstly it copies data from read only memory to RAM. There are two schemes * to copy. One can copy more than one sections. Another can only copy * one section. The former scheme needs more instructions and read-only * data to implement than the latter. * Macro __STARTUP_COPY_MULTIPLE is used to choose between two schemes. */ /* Single section scheme. * * The ranges of copy from/to are specified by following symbols * __etext: LMA of start of the section to copy from. Usually end of text * __data_start__: VMA of start of the section to copy to * __data_end__: VMA of end of the section to copy to * * All addresses must be aligned to 4 bytes boundary. */ ldr r1, =__etext ldr r2, =__data_start__ ldr r3, =__data_end__ .L_loop1: cmp r2, r3 ittt lt ldrlt r0, [r1], #4 strlt r0, [r2], #4 blt .L_loop1 /* Single BSS section scheme. * * The BSS section is specified by following symbols * __bss_start__: start of the BSS section. * __bss_end__: end of the BSS section. * * Both addresses must be aligned to 4 bytes boundary. */ ldr r1, =__bss_start__ ldr r2, =__bss_end__ movs r0, 0 .L_loop3: cmp r1, r2 itt lt strlt r0, [r1], #4 blt .L_loop3 bl SystemInit bl main .pool .size Reset_Handler, . - Reset_Handler .align 1 .thumb_func .weak Default_Handler .type Default_Handler, %function Default_Handler: b . .size Default_Handler, . - Default_Handler /* Macro to define default handlers. Default handler * will be weak symbol and just dead loops. They can be * overwritten by other handlers */ .macro def_irq_handler handler_name .weak \handler_name .set \handler_name, Default_Handler .endm def_irq_handler NMI_Handler def_irq_handler HardFault_Handler def_irq_handler MemManage_Handler def_irq_handler BusFault_Handler def_irq_handler UsageFault_Handler def_irq_handler SVC_Handler def_irq_handler DebugMon_Handler def_irq_handler PendSV_Handler def_irq_handler SysTick_Handler def_irq_handler DEF_IRQHandler def_irq_handler WWDG_IRQHandler /* Window WatchDog */ def_irq_handler PVD_IRQHandler /* PVD through EXTI Line detection */ def_irq_handler TAMP_STAMP_IRQHandler /* Tamper and TimeStamps through the EXTI line */ def_irq_handler RTC_WKUP_IRQHandler /* RTC Wakeup through the EXTI line */ def_irq_handler FLASH_IRQHandler /* FLASH */ def_irq_handler RCC_IRQHandler /* RCC */ def_irq_handler EXTI0_IRQHandler /* EXTI Line0 */ def_irq_handler EXTI1_IRQHandler /* EXTI Line1 */ def_irq_handler EXTI2_IRQHandler /* EXTI Line2 */ def_irq_handler EXTI3_IRQHandler /* EXTI Line3 */ def_irq_handler EXTI4_IRQHandler /* EXTI Line4 */ def_irq_handler DMA1_Stream0_IRQHandler /* DMA1 Stream 0 */ def_irq_handler DMA1_Stream1_IRQHandler /* DMA1 Stream 1 */ def_irq_handler DMA1_Stream2_IRQHandler /* DMA1 Stream 2 */ def_irq_handler DMA1_Stream3_IRQHandler /* DMA1 Stream 3 */ def_irq_handler DMA1_Stream4_IRQHandler /* DMA1 Stream 4 */ def_irq_handler DMA1_Stream5_IRQHandler /* DMA1 Stream 5 */ def_irq_handler DMA1_Stream6_IRQHandler /* DMA1 Stream 6 */ def_irq_handler ADC_IRQHandler /* ADC1, ADC2 and ADC3s */ def_irq_handler CAN1_TX_IRQHandler /* CAN1 TX */ def_irq_handler CAN1_RX0_IRQHandler /* CAN1 RX0 */ def_irq_handler CAN1_RX1_IRQHandler /* CAN1 RX1 */ def_irq_handler CAN1_SCE_IRQHandler /* CAN1 SCE */ def_irq_handler EXTI9_5_IRQHandler /* External Line[9:5]s */ def_irq_handler TIM1_BRK_TIM9_IRQHandler /* TIM1 Break and TIM9 */ def_irq_handler TIM1_UP_TIM10_IRQHandler /* TIM1 Update and TIM10 */ def_irq_handler TIM1_TRG_COM_TIM11_IRQHandler /* TIM1 Trigger and Commutation and TIM11 */ def_irq_handler TIM1_CC_IRQHandler /* TIM1 Capture Compare */ def_irq_handler TIM2_IRQHandler /* TIM2 */ def_irq_handler TIM3_IRQHandler /* TIM3 */ def_irq_handler TIM4_IRQHandler /* TIM4 */ def_irq_handler I2C1_EV_IRQHandler /* I2C1 Event */ def_irq_handler I2C1_ER_IRQHandler /* I2C1 Error */ def_irq_handler I2C2_EV_IRQHandler /* I2C2 Event */ def_irq_handler I2C2_ER_IRQHandler /* I2C2 Error */ def_irq_handler SPI1_IRQHandler /* SPI1 */ def_irq_handler SPI2_IRQHandler /* SPI2 */ def_irq_handler USART1_IRQHandler /* USART1 */ def_irq_handler USART2_IRQHandler /* USART2 */ def_irq_handler USART3_IRQHandler /* USART3 */ def_irq_handler EXTI15_10_IRQHandler /* External Line[15:10]s */ def_irq_handler RTC_Alarm_IRQHandler /* RTC Alarm (A and B) through EXTI Line */ def_irq_handler OTG_FS_WKUP_IRQHandler /* USB OTG FS Wakeup through EXTI line */ def_irq_handler TIM8_BRK_TIM12_IRQHandler /* TIM8 Break and TIM12 */ def_irq_handler TIM8_UP_TIM13_IRQHandler /* TIM8 Update and TIM13 */ def_irq_handler TIM8_TRG_COM_TIM14_IRQHandler /* TIM8 Trigger and Commutation and TIM14 */ def_irq_handler TIM8_CC_IRQHandler /* TIM8 Capture Compare */ def_irq_handler DMA1_Stream7_IRQHandler /* DMA1 Stream7 */ def_irq_handler FMC_IRQHandler /* FMC */ def_irq_handler SDMMC1_IRQHandler /* SDMMC1 */ def_irq_handler TIM5_IRQHandler /* TIM5 */ def_irq_handler SPI3_IRQHandler /* SPI3 */ def_irq_handler UART4_IRQHandler /* UART4 */ def_irq_handler UART5_IRQHandler /* UART5 */ def_irq_handler TIM6_DAC_IRQHandler /* TIM6 and DAC1&2 underrun errors */ def_irq_handler TIM7_IRQHandler /* TIM7 */ def_irq_handler DMA2_Stream0_IRQHandler /* DMA2 Stream 0 */ def_irq_handler DMA2_Stream1_IRQHandler /* DMA2 Stream 1 */ def_irq_handler DMA2_Stream2_IRQHandler /* DMA2 Stream 2 */ def_irq_handler DMA2_Stream3_IRQHandler /* DMA2 Stream 3 */ def_irq_handler DMA2_Stream4_IRQHandler /* DMA2 Stream 4 */ def_irq_handler ETH_IRQHandler /* Ethernet */ def_irq_handler ETH_WKUP_IRQHandler /* Ethernet Wakeup through EXTI line */ def_irq_handler CAN2_TX_IRQHandler /* CAN2 TX */ def_irq_handler CAN2_RX0_IRQHandler /* CAN2 RX0 */ def_irq_handler CAN2_RX1_IRQHandler /* CAN2 RX1 */ def_irq_handler CAN2_SCE_IRQHandler /* CAN2 SCE */ def_irq_handler OTG_FS_IRQHandler /* USB OTG FS */ def_irq_handler DMA2_Stream5_IRQHandler /* DMA2 Stream 5 */ def_irq_handler DMA2_Stream6_IRQHandler /* DMA2 Stream 6 */ def_irq_handler DMA2_Stream7_IRQHandler /* DMA2 Stream 7 */ def_irq_handler USART6_IRQHandler /* USART6 */ def_irq_handler I2C3_EV_IRQHandler /* I2C3 event */ def_irq_handler I2C3_ER_IRQHandler /* I2C3 error */ def_irq_handler OTG_HS_EP1_OUT_IRQHandler /* USB OTG HS End Point 1 Out */ def_irq_handler OTG_HS_EP1_IN_IRQHandler /* USB OTG HS End Point 1 In */ def_irq_handler OTG_HS_WKUP_IRQHandler /* USB OTG HS Wakeup through EXTI */ def_irq_handler OTG_HS_IRQHandler /* USB OTG HS */ def_irq_handler DCMI_IRQHandler /* DCMI */ def_irq_handler RNG_IRQHandler /* Rng */ def_irq_handler FPU_IRQHandler /* FPU */ def_irq_handler UART7_IRQHandler /* UART7 */ def_irq_handler UART8_IRQHandler /* UART8 */ def_irq_handler SPI4_IRQHandler /* SPI4 */ def_irq_handler SPI5_IRQHandler /* SPI5 */ def_irq_handler SPI6_IRQHandler /* SPI6 */ def_irq_handler SAI1_IRQHandler /* SAI1 */ def_irq_handler LTDC_IRQHandler /* LTDC */ def_irq_handler LTDC_ER_IRQHandler /* LTDC error */ def_irq_handler DMA2D_IRQHandler /* DMA2D */ def_irq_handler SAI2_IRQHandler /* SAI2 */ def_irq_handler QUADSPI_IRQHandler /* QUADSPI */ def_irq_handler LPTIM1_IRQHandler /* LPTIM1 */ def_irq_handler CEC_IRQHandler /* HDMI_CEC */ def_irq_handler I2C4_EV_IRQHandler /* I2C4 Event */ def_irq_handler I2C4_ER_IRQHandler /* I2C4 Error */ def_irq_handler SPDIF_RX_IRQHandler /* SPDIF_RX */ .end
Aladdin-Wang/MicroLink
11,418
MicroLink/external/perf_counter/example/RTE/Device/CMSDK_CM3/startup_CMSDK_CM3.s
;/**************************************************************************//** ; * @file startup_CMSDK_CM3.s ; * @brief CMSIS Core Device Startup File for ; * CMSDK_CM3 Device ; * @version V3.05 ; * @date 09. November 2016 ; ******************************************************************************/ ;/* Copyright (c) 2011 - 2016 ARM LIMITED ; ; All rights reserved. ; Redistribution and use in source and binary forms, with or without ; modification, are permitted provided that the following conditions are met: ; - Redistributions of source code must retain the above copyright ; notice, this list of conditions and the following disclaimer. ; - Redistributions in binary form must reproduce the above copyright ; notice, this list of conditions and the following disclaimer in the ; documentation and/or other materials provided with the distribution. ; - Neither the name of ARM nor the names of its contributors may be used ; to endorse or promote products derived from this software without ; specific prior written permission. ; * ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ; ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE ; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ; POSSIBILITY OF SUCH DAMAGE. ; ---------------------------------------------------------------------------*/ ;/* ;//-------- <<< Use Configuration Wizard in Context Menu >>> ------------------ ;*/ ; <h> Stack Configuration ; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8> ; </h> Stack_Size EQU 0x00000400 AREA STACK, NOINIT, READWRITE, ALIGN=3 Stack_Mem SPACE Stack_Size __initial_sp ; <h> Heap Configuration ; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8> ; </h> Heap_Size EQU 0x00000C00 AREA HEAP, NOINIT, READWRITE, ALIGN=3 __heap_base Heap_Mem SPACE Heap_Size __heap_limit PRESERVE8 THUMB ; Vector Table Mapped to Address 0 at Reset AREA RESET, DATA, READONLY EXPORT __Vectors EXPORT __Vectors_End EXPORT __Vectors_Size __Vectors DCD __initial_sp ; Top of Stack DCD Reset_Handler ; Reset Handler DCD NMI_Handler ; NMI Handler DCD HardFault_Handler ; Hard Fault Handler DCD MemManage_Handler ; MPU Fault Handler DCD BusFault_Handler ; Bus Fault Handler DCD UsageFault_Handler ; Usage Fault Handler DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD SVC_Handler ; SVCall Handler DCD DebugMon_Handler ; Debug Monitor Handler DCD 0 ; Reserved DCD PendSV_Handler ; PendSV Handler DCD SysTick_Handler ; SysTick Handler ; External Interrupts DCD UART0RX_Handler ; 0 UART 0 receive interrupt DCD UART0TX_Handler ; 1 UART 0 transmit interrupt DCD UART1RX_Handler ; 2 UART 1 receive interrupt DCD UART1TX_Handler ; 3 UART 1 transmit interrupt DCD UART2RX_Handler ; 4 UART 2 receive interrupt DCD UART2TX_Handler ; 5 UART 2 transmit interrupt DCD GPIO0ALL_Handler ; 6 GPIO 0 combined interrupt DCD GPIO1ALL_Handler ; 7 GPIO 1 combined interrupt DCD TIMER0_Handler ; 8 Timer 0 interrupt DCD TIMER1_Handler ; 9 Timer 1 interrupt DCD DUALTIMER_Handler ; 10 Dual Timer interrupt DCD SPI_0_1_Handler ; 11 SPI #0, #1 interrupt DCD UART_0_1_2_OVF_Handler ; 12 UART overflow (0, 1 & 2) interrupt DCD ETHERNET_Handler ; 13 Ethernet interrupt DCD I2S_Handler ; 14 Audio I2S interrupt DCD TOUCHSCREEN_Handler ; 15 Touch Screen interrupt DCD GPIO2_Handler ; 16 GPIO 2 combined interrupt DCD GPIO3_Handler ; 17 GPIO 3 combined interrupt DCD UART3RX_Handler ; 18 UART 3 receive interrupt DCD UART3TX_Handler ; 19 UART 3 transmit interrupt DCD UART4RX_Handler ; 20 UART 4 receive interrupt DCD UART4TX_Handler ; 21 UART 4 transmit interrupt DCD SPI_2_Handler ; 22 SPI #2 interrupt DCD SPI_3_4_Handler ; 23 SPI #3, SPI #4 interrupt DCD GPIO0_0_Handler ; 24 GPIO 0 individual interrupt ( 0) DCD GPIO0_1_Handler ; 25 GPIO 0 individual interrupt ( 1) DCD GPIO0_2_Handler ; 26 GPIO 0 individual interrupt ( 2) DCD GPIO0_3_Handler ; 27 GPIO 0 individual interrupt ( 3) DCD GPIO0_4_Handler ; 28 GPIO 0 individual interrupt ( 4) DCD GPIO0_5_Handler ; 29 GPIO 0 individual interrupt ( 5) DCD GPIO0_6_Handler ; 30 GPIO 0 individual interrupt ( 6) DCD GPIO0_7_Handler ; 31 GPIO 0 individual interrupt ( 7) __Vectors_End __Vectors_Size EQU __Vectors_End - __Vectors AREA |.text|, CODE, READONLY ; Reset Handler Reset_Handler PROC EXPORT Reset_Handler [WEAK] IMPORT SystemInit IMPORT __main LDR R0, =SystemInit BLX R0 LDR R0, =__main BX R0 ENDP ; Dummy Exception Handlers (infinite loops which can be modified) NMI_Handler PROC EXPORT NMI_Handler [WEAK] B . ENDP HardFault_Handler\ PROC EXPORT HardFault_Handler [WEAK] B . ENDP MemManage_Handler\ PROC EXPORT MemManage_Handler [WEAK] B . ENDP BusFault_Handler\ PROC EXPORT BusFault_Handler [WEAK] B . ENDP UsageFault_Handler\ PROC EXPORT UsageFault_Handler [WEAK] B . ENDP SVC_Handler PROC EXPORT SVC_Handler [WEAK] B . ENDP DebugMon_Handler\ PROC EXPORT DebugMon_Handler [WEAK] B . ENDP PendSV_Handler PROC EXPORT PendSV_Handler [WEAK] B . ENDP SysTick_Handler PROC EXPORT SysTick_Handler [WEAK] B . ENDP Default_Handler PROC EXPORT UART0RX_Handler [WEAK] EXPORT UART0TX_Handler [WEAK] EXPORT UART1RX_Handler [WEAK] EXPORT UART1TX_Handler [WEAK] EXPORT UART2RX_Handler [WEAK] EXPORT UART2TX_Handler [WEAK] EXPORT GPIO0ALL_Handler [WEAK] EXPORT GPIO1ALL_Handler [WEAK] EXPORT TIMER0_Handler [WEAK] EXPORT TIMER1_Handler [WEAK] EXPORT DUALTIMER_Handler [WEAK] EXPORT SPI_0_1_Handler [WEAK] EXPORT UART_0_1_2_OVF_Handler [WEAK] EXPORT ETHERNET_Handler [WEAK] EXPORT I2S_Handler [WEAK] EXPORT TOUCHSCREEN_Handler [WEAK] EXPORT GPIO2_Handler [WEAK] EXPORT GPIO3_Handler [WEAK] EXPORT UART3RX_Handler [WEAK] EXPORT UART3TX_Handler [WEAK] EXPORT UART4RX_Handler [WEAK] EXPORT UART4TX_Handler [WEAK] EXPORT SPI_2_Handler [WEAK] EXPORT SPI_3_4_Handler [WEAK] EXPORT GPIO0_0_Handler [WEAK] EXPORT GPIO0_1_Handler [WEAK] EXPORT GPIO0_2_Handler [WEAK] EXPORT GPIO0_3_Handler [WEAK] EXPORT GPIO0_4_Handler [WEAK] EXPORT GPIO0_5_Handler [WEAK] EXPORT GPIO0_6_Handler [WEAK] EXPORT GPIO0_7_Handler [WEAK] UART0RX_Handler UART0TX_Handler UART1RX_Handler UART1TX_Handler UART2RX_Handler UART2TX_Handler GPIO0ALL_Handler GPIO1ALL_Handler TIMER0_Handler TIMER1_Handler DUALTIMER_Handler SPI_0_1_Handler UART_0_1_2_OVF_Handler ETHERNET_Handler I2S_Handler TOUCHSCREEN_Handler GPIO2_Handler GPIO3_Handler UART3RX_Handler UART3TX_Handler UART4RX_Handler UART4TX_Handler SPI_2_Handler SPI_3_4_Handler GPIO0_0_Handler GPIO0_1_Handler GPIO0_2_Handler GPIO0_3_Handler GPIO0_4_Handler GPIO0_5_Handler GPIO0_6_Handler GPIO0_7_Handler B . ENDP ALIGN ; User Initial Stack & Heap IF :DEF:__MICROLIB EXPORT __initial_sp EXPORT __heap_base EXPORT __heap_limit ELSE IMPORT __use_two_region_memory EXPORT __user_initial_stackheap __user_initial_stackheap PROC LDR R0, = Heap_Mem LDR R1, =(Stack_Mem + Stack_Size) LDR R2, = (Heap_Mem + Heap_Size) LDR R3, = Stack_Mem BX LR ENDP ALIGN ENDIF END
Aladdin-Wang/MicroLink
10,801
MicroLink/external/perf_counter/example/RTE/Device/CMSDK_CM0/startup_CMSDK_CM0.s
;/**************************************************************************//** ; * @file startup_CMSDK_CM0.s ; * @brief CMSIS Core Device Startup File for ; * CMSDK_CM0 Device ; * @version V3.05 ; * @date 09. November 2016 ; ******************************************************************************/ ;/* Copyright (c) 2011 - 2016 ARM LIMITED ; ; All rights reserved. ; Redistribution and use in source and binary forms, with or without ; modification, are permitted provided that the following conditions are met: ; - Redistributions of source code must retain the above copyright ; notice, this list of conditions and the following disclaimer. ; - Redistributions in binary form must reproduce the above copyright ; notice, this list of conditions and the following disclaimer in the ; documentation and/or other materials provided with the distribution. ; - Neither the name of ARM nor the names of its contributors may be used ; to endorse or promote products derived from this software without ; specific prior written permission. ; * ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ; ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE ; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ; POSSIBILITY OF SUCH DAMAGE. ; ---------------------------------------------------------------------------*/ ;/* ;//-------- <<< Use Configuration Wizard in Context Menu >>> ------------------ ;*/ ; <h> Stack Configuration ; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8> ; </h> Stack_Size EQU 0x00000400 AREA STACK, NOINIT, READWRITE, ALIGN=3 Stack_Mem SPACE Stack_Size __initial_sp ; <h> Heap Configuration ; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8> ; </h> Heap_Size EQU 0x00000C00 AREA HEAP, NOINIT, READWRITE, ALIGN=3 __heap_base Heap_Mem SPACE Heap_Size __heap_limit PRESERVE8 THUMB ; Vector Table Mapped to Address 0 at Reset AREA RESET, DATA, READONLY EXPORT __Vectors EXPORT __Vectors_End EXPORT __Vectors_Size __Vectors DCD __initial_sp ; Top of Stack DCD Reset_Handler ; Reset Handler DCD NMI_Handler ; NMI Handler DCD HardFault_Handler ; Hard Fault Handler DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD SVC_Handler ; SVCall Handler DCD 0 ; Reserved DCD 0 ; Reserved DCD PendSV_Handler ; PendSV Handler DCD SysTick_Handler ; SysTick Handler ; External Interrupts DCD UART0RX_Handler ; 0 UART 0 receive interrupt DCD UART0TX_Handler ; 1 UART 0 transmit interrupt DCD UART1RX_Handler ; 2 UART 1 receive interrupt DCD UART1TX_Handler ; 3 UART 1 transmit interrupt DCD UART2RX_Handler ; 4 UART 2 receive interrupt DCD UART2TX_Handler ; 5 UART 2 transmit interrupt DCD GPIO0ALL_Handler ; 6 GPIO 0 combined interrupt DCD GPIO1ALL_Handler ; 7 GPIO 1 combined interrupt DCD TIMER0_Handler ; 8 Timer 0 interrupt DCD TIMER1_Handler ; 9 Timer 1 interrupt DCD DUALTIMER_Handler ; 10 Dual Timer interrupt DCD SPI_0_1_Handler ; 11 SPI #0, #1 interrupt DCD UART_0_1_2_OVF_Handler ; 12 UART overflow (0, 1 & 2) interrupt DCD ETHERNET_Handler ; 13 Ethernet interrupt DCD I2S_Handler ; 14 Audio I2S interrupt DCD TOUCHSCREEN_Handler ; 15 Touch Screen interrupt DCD GPIO2_Handler ; 16 GPIO 2 combined interrupt DCD GPIO3_Handler ; 17 GPIO 3 combined interrupt DCD UART3RX_Handler ; 18 UART 3 receive interrupt DCD UART3TX_Handler ; 19 UART 3 transmit interrupt DCD UART4RX_Handler ; 20 UART 4 receive interrupt DCD UART4TX_Handler ; 21 UART 4 transmit interrupt DCD SPI_2_Handler ; 22 SPI #2 interrupt DCD SPI_3_4_Handler ; 23 SPI #3, SPI #4 interrupt DCD GPIO0_0_Handler ; 24 GPIO 0 individual interrupt ( 0) DCD GPIO0_1_Handler ; 25 GPIO 0 individual interrupt ( 1) DCD GPIO0_2_Handler ; 26 GPIO 0 individual interrupt ( 2) DCD GPIO0_3_Handler ; 27 GPIO 0 individual interrupt ( 3) DCD GPIO0_4_Handler ; 28 GPIO 0 individual interrupt ( 4) DCD GPIO0_5_Handler ; 29 GPIO 0 individual interrupt ( 5) DCD GPIO0_6_Handler ; 30 GPIO 0 individual interrupt ( 6) DCD GPIO0_7_Handler ; 31 GPIO 0 individual interrupt ( 7) __Vectors_End __Vectors_Size EQU __Vectors_End - __Vectors AREA |.text|, CODE, READONLY ; Reset Handler Reset_Handler PROC EXPORT Reset_Handler [WEAK] IMPORT SystemInit IMPORT __main LDR R0, =SystemInit BLX R0 LDR R0, =__main BX R0 ENDP ; Dummy Exception Handlers (infinite loops which can be modified) NMI_Handler PROC EXPORT NMI_Handler [WEAK] B . ENDP HardFault_Handler\ PROC EXPORT HardFault_Handler [WEAK] B . ENDP SVC_Handler PROC EXPORT SVC_Handler [WEAK] B . ENDP PendSV_Handler PROC EXPORT PendSV_Handler [WEAK] B . ENDP SysTick_Handler PROC EXPORT SysTick_Handler [WEAK] B . ENDP Default_Handler PROC EXPORT UART0RX_Handler [WEAK] EXPORT UART0TX_Handler [WEAK] EXPORT UART1RX_Handler [WEAK] EXPORT UART1TX_Handler [WEAK] EXPORT UART2RX_Handler [WEAK] EXPORT UART2TX_Handler [WEAK] EXPORT GPIO0ALL_Handler [WEAK] EXPORT GPIO1ALL_Handler [WEAK] EXPORT TIMER0_Handler [WEAK] EXPORT TIMER1_Handler [WEAK] EXPORT DUALTIMER_Handler [WEAK] EXPORT SPI_0_1_Handler [WEAK] EXPORT UART_0_1_2_OVF_Handler [WEAK] EXPORT ETHERNET_Handler [WEAK] EXPORT I2S_Handler [WEAK] EXPORT TOUCHSCREEN_Handler [WEAK] EXPORT GPIO2_Handler [WEAK] EXPORT GPIO3_Handler [WEAK] EXPORT UART3RX_Handler [WEAK] EXPORT UART3TX_Handler [WEAK] EXPORT UART4RX_Handler [WEAK] EXPORT UART4TX_Handler [WEAK] EXPORT SPI_2_Handler [WEAK] EXPORT SPI_3_4_Handler [WEAK] EXPORT GPIO0_0_Handler [WEAK] EXPORT GPIO0_1_Handler [WEAK] EXPORT GPIO0_2_Handler [WEAK] EXPORT GPIO0_3_Handler [WEAK] EXPORT GPIO0_4_Handler [WEAK] EXPORT GPIO0_5_Handler [WEAK] EXPORT GPIO0_6_Handler [WEAK] EXPORT GPIO0_7_Handler [WEAK] UART0RX_Handler UART0TX_Handler UART1RX_Handler UART1TX_Handler UART2RX_Handler UART2TX_Handler GPIO0ALL_Handler GPIO1ALL_Handler TIMER0_Handler TIMER1_Handler DUALTIMER_Handler SPI_0_1_Handler UART_0_1_2_OVF_Handler ETHERNET_Handler I2S_Handler TOUCHSCREEN_Handler GPIO2_Handler GPIO3_Handler UART3RX_Handler UART3TX_Handler UART4RX_Handler UART4TX_Handler SPI_2_Handler SPI_3_4_Handler GPIO0_0_Handler GPIO0_1_Handler GPIO0_2_Handler GPIO0_3_Handler GPIO0_4_Handler GPIO0_5_Handler GPIO0_6_Handler GPIO0_7_Handler B . ENDP ALIGN ; User Initial Stack & Heap IF :DEF:__MICROLIB EXPORT __initial_sp EXPORT __heap_base EXPORT __heap_limit ELSE IMPORT __use_two_region_memory EXPORT __user_initial_stackheap __user_initial_stackheap PROC LDR R0, = Heap_Mem LDR R1, =(Stack_Mem + Stack_Size) LDR R2, = (Heap_Mem + Heap_Size) LDR R3, = Stack_Mem BX LR ENDP ALIGN ENDIF END
Aladdin-Wang/MicroLink
6,481
MicroLink/external/perf_counter/example/RTE/Device/ARMCM0/startup_ARMCM0.s
;/**************************************************************************//** ; * @file startup_ARMCM0.s ; * @brief CMSIS Core Device Startup File for ; * ARMCM0 Device ; * @version V1.0.1 ; * @date 23. July 2019 ; ******************************************************************************/ ;/* ; * Copyright (c) 2009-2019 Arm Limited. All rights reserved. ; * ; * SPDX-License-Identifier: Apache-2.0 ; * ; * Licensed under the Apache License, Version 2.0 (the License); you may ; * not use this file except in compliance with the License. ; * You may obtain a copy of the License at ; * ; * www.apache.org/licenses/LICENSE-2.0 ; * ; * Unless required by applicable law or agreed to in writing, software ; * distributed under the License is distributed on an AS IS BASIS, WITHOUT ; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ; * See the License for the specific language governing permissions and ; * limitations under the License. ; */ ;//-------- <<< Use Configuration Wizard in Context Menu >>> ------------------ ;<h> Stack Configuration ; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8> ;</h> ;Stack_Size EQU 0x00000400 ; AREA STACK, NOINIT, READWRITE, ALIGN=3 ;__stack_limit ;Stack_Mem SPACE Stack_Size ;__initial_sp ;;<h> Heap Configuration ;; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8> ;;</h> ;Heap_Size EQU 0x00000C00 ; IF Heap_Size != 0 ; Heap is provided ; AREA HEAP, NOINIT, READWRITE, ALIGN=3 ;__heap_base ;Heap_Mem SPACE Heap_Size ;__heap_limit ; ENDIF PRESERVE8 THUMB ; Vector Table Mapped to Address 0 at Reset AREA RESET, DATA, READONLY EXPORT __Vectors EXPORT __Vectors_End EXPORT __Vectors_Size IMPORT |Image$$ARM_LIB_STACK$$ZI$$Limit| __Vectors DCD |Image$$ARM_LIB_STACK$$ZI$$Limit| ; Top of Stack DCD Reset_Handler ; Reset Handler DCD NMI_Handler ; -14 NMI Handler DCD HardFault_Handler ; -13 Hard Fault Handler DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD SVC_Handler ; -5 SVCall Handler DCD 0 ; Reserved DCD 0 ; Reserved DCD PendSV_Handler ; -2 PendSV Handler DCD SysTick_Handler ; -1 SysTick Handler ; Interrupts DCD Interrupt0_Handler ; 0 Interrupt 0 DCD Interrupt1_Handler ; 1 Interrupt 1 DCD Interrupt2_Handler ; 2 Interrupt 2 DCD Interrupt3_Handler ; 3 Interrupt 3 DCD Interrupt4_Handler ; 4 Interrupt 4 DCD Interrupt5_Handler ; 5 Interrupt 5 DCD Interrupt6_Handler ; 6 Interrupt 6 DCD Interrupt7_Handler ; 7 Interrupt 7 DCD Interrupt8_Handler ; 8 Interrupt 8 DCD Interrupt9_Handler ; 9 Interrupt 9 SPACE ( 22 * 4) ; Interrupts 10 .. 31 are left out __Vectors_End __Vectors_Size EQU __Vectors_End - __Vectors AREA |.text|, CODE, READONLY ; Reset Handler Reset_Handler PROC EXPORT Reset_Handler [WEAK] IMPORT SystemInit IMPORT __main LDR R0, =SystemInit BLX R0 LDR R0, =__main BX R0 ENDP ; The default macro is not used for HardFault_Handler ; because this results in a poor debug illusion. HardFault_Handler PROC EXPORT HardFault_Handler [WEAK] B . ENDP ; Macro to define default exception/interrupt handlers. ; Default handler are weak symbols with an endless loop. ; They can be overwritten by real handlers. MACRO Set_Default_Handler $Handler_Name $Handler_Name PROC EXPORT $Handler_Name [WEAK] B . ENDP MEND ; Default exception/interrupt handler Set_Default_Handler NMI_Handler Set_Default_Handler SVC_Handler Set_Default_Handler PendSV_Handler Set_Default_Handler SysTick_Handler Set_Default_Handler Interrupt0_Handler Set_Default_Handler Interrupt1_Handler Set_Default_Handler Interrupt2_Handler Set_Default_Handler Interrupt3_Handler Set_Default_Handler Interrupt4_Handler Set_Default_Handler Interrupt5_Handler Set_Default_Handler Interrupt6_Handler Set_Default_Handler Interrupt7_Handler Set_Default_Handler Interrupt8_Handler Set_Default_Handler Interrupt9_Handler ALIGN ; User setup Stack & Heap ; IF :LNOT::DEF:__MICROLIB ; IMPORT __use_two_region_memory ; ENDIF ; EXPORT __stack_limit ; EXPORT __initial_sp ; IF Heap_Size != 0 ; Heap is provided ; EXPORT __heap_base ; EXPORT __heap_limit ; ENDIF END
Aladdin-Wang/MicroLink
11,418
MicroLink/external/perf_counter/example/RTE/Device/CMSDK_CM7_SP/startup_CMSDK_CM7.s
;/**************************************************************************//** ; * @file startup_CMSDK_CM7.s ; * @brief CMSIS Core Device Startup File for ; * CMSDK_CM7 Device ; * @version V3.05 ; * @date 09. November 2016 ; ******************************************************************************/ ;/* Copyright (c) 2011 - 2016 ARM LIMITED ; ; All rights reserved. ; Redistribution and use in source and binary forms, with or without ; modification, are permitted provided that the following conditions are met: ; - Redistributions of source code must retain the above copyright ; notice, this list of conditions and the following disclaimer. ; - Redistributions in binary form must reproduce the above copyright ; notice, this list of conditions and the following disclaimer in the ; documentation and/or other materials provided with the distribution. ; - Neither the name of ARM nor the names of its contributors may be used ; to endorse or promote products derived from this software without ; specific prior written permission. ; * ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ; ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE ; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ; POSSIBILITY OF SUCH DAMAGE. ; ---------------------------------------------------------------------------*/ ;/* ;//-------- <<< Use Configuration Wizard in Context Menu >>> ------------------ ;*/ ; <h> Stack Configuration ; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8> ; </h> Stack_Size EQU 0x00000400 AREA STACK, NOINIT, READWRITE, ALIGN=3 Stack_Mem SPACE Stack_Size __initial_sp ; <h> Heap Configuration ; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8> ; </h> Heap_Size EQU 0x00000C00 AREA HEAP, NOINIT, READWRITE, ALIGN=3 __heap_base Heap_Mem SPACE Heap_Size __heap_limit PRESERVE8 THUMB ; Vector Table Mapped to Address 0 at Reset AREA RESET, DATA, READONLY EXPORT __Vectors EXPORT __Vectors_End EXPORT __Vectors_Size __Vectors DCD __initial_sp ; Top of Stack DCD Reset_Handler ; Reset Handler DCD NMI_Handler ; NMI Handler DCD HardFault_Handler ; Hard Fault Handler DCD MemManage_Handler ; MPU Fault Handler DCD BusFault_Handler ; Bus Fault Handler DCD UsageFault_Handler ; Usage Fault Handler DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD SVC_Handler ; SVCall Handler DCD DebugMon_Handler ; Debug Monitor Handler DCD 0 ; Reserved DCD PendSV_Handler ; PendSV Handler DCD SysTick_Handler ; SysTick Handler ; External Interrupts DCD UART0RX_Handler ; 0 UART 0 receive interrupt DCD UART0TX_Handler ; 1 UART 0 transmit interrupt DCD UART1RX_Handler ; 2 UART 1 receive interrupt DCD UART1TX_Handler ; 3 UART 1 transmit interrupt DCD UART2RX_Handler ; 4 UART 2 receive interrupt DCD UART2TX_Handler ; 5 UART 2 transmit interrupt DCD GPIO0ALL_Handler ; 6 GPIO 0 combined interrupt DCD GPIO1ALL_Handler ; 7 GPIO 1 combined interrupt DCD TIMER0_Handler ; 8 Timer 0 interrupt DCD TIMER1_Handler ; 9 Timer 1 interrupt DCD DUALTIMER_Handler ; 10 Dual Timer interrupt DCD SPI_0_1_Handler ; 11 SPI #0, #1 interrupt DCD UART_0_1_2_OVF_Handler ; 12 UART overflow (0, 1 & 2) interrupt DCD ETHERNET_Handler ; 13 Ethernet interrupt DCD I2S_Handler ; 14 Audio I2S interrupt DCD TOUCHSCREEN_Handler ; 15 Touch Screen interrupt DCD GPIO2_Handler ; 16 GPIO 2 combined interrupt DCD GPIO3_Handler ; 17 GPIO 3 combined interrupt DCD UART3RX_Handler ; 18 UART 3 receive interrupt DCD UART3TX_Handler ; 19 UART 3 transmit interrupt DCD UART4RX_Handler ; 20 UART 4 receive interrupt DCD UART4TX_Handler ; 21 UART 4 transmit interrupt DCD SPI_2_Handler ; 22 SPI #2 interrupt DCD SPI_3_4_Handler ; 23 SPI #3, SPI #4 interrupt DCD GPIO0_0_Handler ; 24 GPIO 0 individual interrupt ( 0) DCD GPIO0_1_Handler ; 25 GPIO 0 individual interrupt ( 1) DCD GPIO0_2_Handler ; 26 GPIO 0 individual interrupt ( 2) DCD GPIO0_3_Handler ; 27 GPIO 0 individual interrupt ( 3) DCD GPIO0_4_Handler ; 28 GPIO 0 individual interrupt ( 4) DCD GPIO0_5_Handler ; 29 GPIO 0 individual interrupt ( 5) DCD GPIO0_6_Handler ; 30 GPIO 0 individual interrupt ( 6) DCD GPIO0_7_Handler ; 31 GPIO 0 individual interrupt ( 7) __Vectors_End __Vectors_Size EQU __Vectors_End - __Vectors AREA |.text|, CODE, READONLY ; Reset Handler Reset_Handler PROC EXPORT Reset_Handler [WEAK] IMPORT SystemInit IMPORT __main LDR R0, =SystemInit BLX R0 LDR R0, =__main BX R0 ENDP ; Dummy Exception Handlers (infinite loops which can be modified) NMI_Handler PROC EXPORT NMI_Handler [WEAK] B . ENDP HardFault_Handler\ PROC EXPORT HardFault_Handler [WEAK] B . ENDP MemManage_Handler\ PROC EXPORT MemManage_Handler [WEAK] B . ENDP BusFault_Handler\ PROC EXPORT BusFault_Handler [WEAK] B . ENDP UsageFault_Handler\ PROC EXPORT UsageFault_Handler [WEAK] B . ENDP SVC_Handler PROC EXPORT SVC_Handler [WEAK] B . ENDP DebugMon_Handler\ PROC EXPORT DebugMon_Handler [WEAK] B . ENDP PendSV_Handler PROC EXPORT PendSV_Handler [WEAK] B . ENDP SysTick_Handler PROC EXPORT SysTick_Handler [WEAK] B . ENDP Default_Handler PROC EXPORT UART0RX_Handler [WEAK] EXPORT UART0TX_Handler [WEAK] EXPORT UART1RX_Handler [WEAK] EXPORT UART1TX_Handler [WEAK] EXPORT UART2RX_Handler [WEAK] EXPORT UART2TX_Handler [WEAK] EXPORT GPIO0ALL_Handler [WEAK] EXPORT GPIO1ALL_Handler [WEAK] EXPORT TIMER0_Handler [WEAK] EXPORT TIMER1_Handler [WEAK] EXPORT DUALTIMER_Handler [WEAK] EXPORT SPI_0_1_Handler [WEAK] EXPORT UART_0_1_2_OVF_Handler [WEAK] EXPORT ETHERNET_Handler [WEAK] EXPORT I2S_Handler [WEAK] EXPORT TOUCHSCREEN_Handler [WEAK] EXPORT GPIO2_Handler [WEAK] EXPORT GPIO3_Handler [WEAK] EXPORT UART3RX_Handler [WEAK] EXPORT UART3TX_Handler [WEAK] EXPORT UART4RX_Handler [WEAK] EXPORT UART4TX_Handler [WEAK] EXPORT SPI_2_Handler [WEAK] EXPORT SPI_3_4_Handler [WEAK] EXPORT GPIO0_0_Handler [WEAK] EXPORT GPIO0_1_Handler [WEAK] EXPORT GPIO0_2_Handler [WEAK] EXPORT GPIO0_3_Handler [WEAK] EXPORT GPIO0_4_Handler [WEAK] EXPORT GPIO0_5_Handler [WEAK] EXPORT GPIO0_6_Handler [WEAK] EXPORT GPIO0_7_Handler [WEAK] UART0RX_Handler UART0TX_Handler UART1RX_Handler UART1TX_Handler UART2RX_Handler UART2TX_Handler GPIO0ALL_Handler GPIO1ALL_Handler TIMER0_Handler TIMER1_Handler DUALTIMER_Handler SPI_0_1_Handler UART_0_1_2_OVF_Handler ETHERNET_Handler I2S_Handler TOUCHSCREEN_Handler GPIO2_Handler GPIO3_Handler UART3RX_Handler UART3TX_Handler UART4RX_Handler UART4TX_Handler SPI_2_Handler SPI_3_4_Handler GPIO0_0_Handler GPIO0_1_Handler GPIO0_2_Handler GPIO0_3_Handler GPIO0_4_Handler GPIO0_5_Handler GPIO0_6_Handler GPIO0_7_Handler B . ENDP ALIGN ; User Initial Stack & Heap IF :DEF:__MICROLIB EXPORT __initial_sp EXPORT __heap_base EXPORT __heap_limit ELSE IMPORT __use_two_region_memory EXPORT __user_initial_stackheap __user_initial_stackheap PROC LDR R0, = Heap_Mem LDR R1, =(Stack_Mem + Stack_Size) LDR R2, = (Heap_Mem + Heap_Size) LDR R3, = Stack_Mem BX LR ENDP ALIGN ENDIF END
Aladdin-Wang/MicroLink
6,644
MicroLink/external/perf_counter/example/RTE/Device/ARMCM3/startup_ARMCM3.s
;/**************************************************************************//** ; * @file startup_ARMCM3.s ; * @brief CMSIS Core Device Startup File for ; * ARMCM3 Device ; * @version V1.0.1 ; * @date 23. July 2019 ; ******************************************************************************/ ;/* ; * Copyright (c) 2009-2019 Arm Limited. All rights reserved. ; * ; * SPDX-License-Identifier: Apache-2.0 ; * ; * Licensed under the Apache License, Version 2.0 (the License); you may ; * not use this file except in compliance with the License. ; * You may obtain a copy of the License at ; * ; * www.apache.org/licenses/LICENSE-2.0 ; * ; * Unless required by applicable law or agreed to in writing, software ; * distributed under the License is distributed on an AS IS BASIS, WITHOUT ; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ; * See the License for the specific language governing permissions and ; * limitations under the License. ; */ ;//-------- <<< Use Configuration Wizard in Context Menu >>> ------------------ ;<h> Stack Configuration ; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8> ;</h> Stack_Size EQU 0x00000400 AREA STACK, NOINIT, READWRITE, ALIGN=3 __stack_limit Stack_Mem SPACE Stack_Size __initial_sp ;<h> Heap Configuration ; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8> ;</h> Heap_Size EQU 0x00000C00 IF Heap_Size != 0 ; Heap is provided AREA HEAP, NOINIT, READWRITE, ALIGN=3 __heap_base Heap_Mem SPACE Heap_Size __heap_limit ENDIF PRESERVE8 THUMB ; Vector Table Mapped to Address 0 at Reset AREA RESET, DATA, READONLY EXPORT __Vectors EXPORT __Vectors_End EXPORT __Vectors_Size __Vectors DCD __initial_sp ; Top of Stack DCD Reset_Handler ; Reset Handler DCD NMI_Handler ; -14 NMI Handler DCD HardFault_Handler ; -13 Hard Fault Handler DCD MemManage_Handler ; -12 MPU Fault Handler DCD BusFault_Handler ; -11 Bus Fault Handler DCD UsageFault_Handler ; -10 Usage Fault Handler DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD SVC_Handler ; -5 SVCall Handler DCD DebugMon_Handler ; -4 Debug Monitor Handler DCD 0 ; Reserved DCD PendSV_Handler ; -2 PendSV Handler DCD SysTick_Handler ; -1 SysTick Handler ; Interrupts DCD Interrupt0_Handler ; 0 Interrupt 0 DCD Interrupt1_Handler ; 1 Interrupt 1 DCD Interrupt2_Handler ; 2 Interrupt 2 DCD Interrupt3_Handler ; 3 Interrupt 3 DCD Interrupt4_Handler ; 4 Interrupt 4 DCD Interrupt5_Handler ; 5 Interrupt 5 DCD Interrupt6_Handler ; 6 Interrupt 6 DCD Interrupt7_Handler ; 7 Interrupt 7 DCD Interrupt8_Handler ; 8 Interrupt 8 DCD Interrupt9_Handler ; 9 Interrupt 9 SPACE (214 * 4) ; Interrupts 10 .. 224 are left out __Vectors_End __Vectors_Size EQU __Vectors_End - __Vectors AREA |.text|, CODE, READONLY ; Reset Handler Reset_Handler PROC EXPORT Reset_Handler [WEAK] IMPORT SystemInit IMPORT __main LDR R0, =SystemInit BLX R0 LDR R0, =__main BX R0 ENDP ; The default macro is not used for HardFault_Handler ; because this results in a poor debug illusion. HardFault_Handler PROC EXPORT HardFault_Handler [WEAK] B . ENDP ; Macro to define default exception/interrupt handlers. ; Default handler are weak symbols with an endless loop. ; They can be overwritten by real handlers. MACRO Set_Default_Handler $Handler_Name $Handler_Name PROC EXPORT $Handler_Name [WEAK] B . ENDP MEND ; Default exception/interrupt handler Set_Default_Handler NMI_Handler Set_Default_Handler MemManage_Handler Set_Default_Handler BusFault_Handler Set_Default_Handler UsageFault_Handler Set_Default_Handler SVC_Handler Set_Default_Handler DebugMon_Handler Set_Default_Handler PendSV_Handler Set_Default_Handler SysTick_Handler Set_Default_Handler Interrupt0_Handler Set_Default_Handler Interrupt1_Handler Set_Default_Handler Interrupt2_Handler Set_Default_Handler Interrupt3_Handler Set_Default_Handler Interrupt4_Handler Set_Default_Handler Interrupt5_Handler Set_Default_Handler Interrupt6_Handler Set_Default_Handler Interrupt7_Handler Set_Default_Handler Interrupt8_Handler Set_Default_Handler Interrupt9_Handler ALIGN ; User setup Stack & Heap IF :LNOT::DEF:__MICROLIB IMPORT __use_two_region_memory ENDIF EXPORT __stack_limit EXPORT __initial_sp IF Heap_Size != 0 ; Heap is provided EXPORT __heap_base EXPORT __heap_limit ENDIF END
Aladdin-Wang/MicroBoot_Demo
18,719
STM32F207_BOOT/MDK-ARM/startup_stm32f207xx.s
******************* (C) COPYRIGHT 2017 STMicroelectronics ******************** ;* File Name : startup_stm32f207xx.s ;* Author : MCD Application Team ;* Description : STM32F207xx devices vector table for MDK-ARM toolchain. ;* This module performs: ;* - Set the initial SP ;* - Set the initial PC == Reset_Handler ;* - Set the vector table entries with the exceptions ISR address ;* - Branches to __main in the C library (which eventually ;* calls main()). ;* After Reset the CortexM3 processor is in Thread mode, ;* priority is Privileged, and the Stack is set to Main. ;* <<< Use Configuration Wizard in Context Menu >>> ;****************************************************************************** ;* @attention ;* ;* Copyright (c) 2017-2021 STMicroelectronics. ;* All rights reserved. ;* ;* This software is licensed under terms that can be found in the LICENSE file ;* in the root directory of this software component. ;* If no LICENSE file comes with this software, it is provided AS-IS. ;* ;****************************************************************************** ; Amount of memory (in bytes) allocated for Stack ; Tailor this value to your application needs ; <h> Stack Configuration ; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8> ; </h> Stack_Size EQU 0x400 AREA STACK, NOINIT, READWRITE, ALIGN=3 Stack_Mem SPACE Stack_Size __initial_sp ; <h> Heap Configuration ; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8> ; </h> Heap_Size EQU 0x200 AREA HEAP, NOINIT, READWRITE, ALIGN=3 __heap_base Heap_Mem SPACE Heap_Size __heap_limit PRESERVE8 THUMB ; Vector Table Mapped to Address 0 at Reset AREA RESET, DATA, READONLY EXPORT __Vectors EXPORT __Vectors_End EXPORT __Vectors_Size __Vectors DCD __initial_sp ; Top of Stack DCD Reset_Handler ; Reset Handler DCD NMI_Handler ; NMI Handler DCD HardFault_Handler ; Hard Fault Handler DCD MemManage_Handler ; MPU Fault Handler DCD BusFault_Handler ; Bus Fault Handler DCD UsageFault_Handler ; Usage Fault Handler DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD SVC_Handler ; SVCall Handler DCD DebugMon_Handler ; Debug Monitor Handler DCD 0 ; Reserved DCD PendSV_Handler ; PendSV Handler DCD SysTick_Handler ; SysTick Handler ; External Interrupts DCD WWDG_IRQHandler ; Window WatchDog DCD PVD_IRQHandler ; PVD through EXTI Line detection DCD TAMP_STAMP_IRQHandler ; Tamper and TimeStamps through the EXTI line DCD RTC_WKUP_IRQHandler ; RTC Wakeup through the EXTI line DCD FLASH_IRQHandler ; FLASH DCD RCC_IRQHandler ; RCC DCD EXTI0_IRQHandler ; EXTI Line0 DCD EXTI1_IRQHandler ; EXTI Line1 DCD EXTI2_IRQHandler ; EXTI Line2 DCD EXTI3_IRQHandler ; EXTI Line3 DCD EXTI4_IRQHandler ; EXTI Line4 DCD DMA1_Stream0_IRQHandler ; DMA1 Stream 0 DCD DMA1_Stream1_IRQHandler ; DMA1 Stream 1 DCD DMA1_Stream2_IRQHandler ; DMA1 Stream 2 DCD DMA1_Stream3_IRQHandler ; DMA1 Stream 3 DCD DMA1_Stream4_IRQHandler ; DMA1 Stream 4 DCD DMA1_Stream5_IRQHandler ; DMA1 Stream 5 DCD DMA1_Stream6_IRQHandler ; DMA1 Stream 6 DCD ADC_IRQHandler ; ADC1, ADC2 and ADC3s DCD CAN1_TX_IRQHandler ; CAN1 TX DCD CAN1_RX0_IRQHandler ; CAN1 RX0 DCD CAN1_RX1_IRQHandler ; CAN1 RX1 DCD CAN1_SCE_IRQHandler ; CAN1 SCE DCD EXTI9_5_IRQHandler ; External Line[9:5]s DCD TIM1_BRK_TIM9_IRQHandler ; TIM1 Break and TIM9 DCD TIM1_UP_TIM10_IRQHandler ; TIM1 Update and TIM10 DCD TIM1_TRG_COM_TIM11_IRQHandler ; TIM1 Trigger and Commutation and TIM11 DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare DCD TIM2_IRQHandler ; TIM2 DCD TIM3_IRQHandler ; TIM3 DCD TIM4_IRQHandler ; TIM4 DCD I2C1_EV_IRQHandler ; I2C1 Event DCD I2C1_ER_IRQHandler ; I2C1 Error DCD I2C2_EV_IRQHandler ; I2C2 Event DCD I2C2_ER_IRQHandler ; I2C2 Error DCD SPI1_IRQHandler ; SPI1 DCD SPI2_IRQHandler ; SPI2 DCD USART1_IRQHandler ; USART1 DCD USART2_IRQHandler ; USART2 DCD USART3_IRQHandler ; USART3 DCD EXTI15_10_IRQHandler ; External Line[15:10]s DCD RTC_Alarm_IRQHandler ; RTC Alarm (A and B) through EXTI Line DCD OTG_FS_WKUP_IRQHandler ; USB OTG FS Wakeup through EXTI line DCD TIM8_BRK_TIM12_IRQHandler ; TIM8 Break and TIM12 DCD TIM8_UP_TIM13_IRQHandler ; TIM8 Update and TIM13 DCD TIM8_TRG_COM_TIM14_IRQHandler ; TIM8 Trigger and Commutation and TIM14 DCD TIM8_CC_IRQHandler ; TIM8 Capture Compare DCD DMA1_Stream7_IRQHandler ; DMA1 Stream7 DCD FSMC_IRQHandler ; FSMC DCD SDIO_IRQHandler ; SDIO DCD TIM5_IRQHandler ; TIM5 DCD SPI3_IRQHandler ; SPI3 DCD UART4_IRQHandler ; UART4 DCD UART5_IRQHandler ; UART5 DCD TIM6_DAC_IRQHandler ; TIM6 and DAC1&2 underrun errors DCD TIM7_IRQHandler ; TIM7 DCD DMA2_Stream0_IRQHandler ; DMA2 Stream 0 DCD DMA2_Stream1_IRQHandler ; DMA2 Stream 1 DCD DMA2_Stream2_IRQHandler ; DMA2 Stream 2 DCD DMA2_Stream3_IRQHandler ; DMA2 Stream 3 DCD DMA2_Stream4_IRQHandler ; DMA2 Stream 4 DCD ETH_IRQHandler ; Ethernet DCD ETH_WKUP_IRQHandler ; Ethernet Wakeup through EXTI line DCD CAN2_TX_IRQHandler ; CAN2 TX DCD CAN2_RX0_IRQHandler ; CAN2 RX0 DCD CAN2_RX1_IRQHandler ; CAN2 RX1 DCD CAN2_SCE_IRQHandler ; CAN2 SCE DCD OTG_FS_IRQHandler ; USB OTG FS DCD DMA2_Stream5_IRQHandler ; DMA2 Stream 5 DCD DMA2_Stream6_IRQHandler ; DMA2 Stream 6 DCD DMA2_Stream7_IRQHandler ; DMA2 Stream 7 DCD USART6_IRQHandler ; USART6 DCD I2C3_EV_IRQHandler ; I2C3 event DCD I2C3_ER_IRQHandler ; I2C3 error DCD OTG_HS_EP1_OUT_IRQHandler ; USB OTG HS End Point 1 Out DCD OTG_HS_EP1_IN_IRQHandler ; USB OTG HS End Point 1 In DCD OTG_HS_WKUP_IRQHandler ; USB OTG HS Wakeup through EXTI DCD OTG_HS_IRQHandler ; USB OTG HS DCD DCMI_IRQHandler ; DCMI DCD 0 ; Reserved DCD HASH_RNG_IRQHandler ; Hash and Rng __Vectors_End __Vectors_Size EQU __Vectors_End - __Vectors AREA |.text|, CODE, READONLY ; Reset handler Reset_Handler PROC EXPORT Reset_Handler [WEAK] IMPORT SystemInit IMPORT __main LDR R0, =SystemInit BLX R0 LDR R0, =__main BX R0 ENDP ; Dummy Exception Handlers (infinite loops which can be modified) NMI_Handler PROC EXPORT NMI_Handler [WEAK] B . ENDP HardFault_Handler\ PROC EXPORT HardFault_Handler [WEAK] B . ENDP MemManage_Handler\ PROC EXPORT MemManage_Handler [WEAK] B . ENDP BusFault_Handler\ PROC EXPORT BusFault_Handler [WEAK] B . ENDP UsageFault_Handler\ PROC EXPORT UsageFault_Handler [WEAK] B . ENDP SVC_Handler PROC EXPORT SVC_Handler [WEAK] B . ENDP DebugMon_Handler\ PROC EXPORT DebugMon_Handler [WEAK] B . ENDP PendSV_Handler PROC EXPORT PendSV_Handler [WEAK] B . ENDP SysTick_Handler PROC EXPORT SysTick_Handler [WEAK] B . ENDP Default_Handler PROC EXPORT WWDG_IRQHandler [WEAK] EXPORT PVD_IRQHandler [WEAK] EXPORT TAMP_STAMP_IRQHandler [WEAK] EXPORT RTC_WKUP_IRQHandler [WEAK] EXPORT FLASH_IRQHandler [WEAK] EXPORT RCC_IRQHandler [WEAK] EXPORT EXTI0_IRQHandler [WEAK] EXPORT EXTI1_IRQHandler [WEAK] EXPORT EXTI2_IRQHandler [WEAK] EXPORT EXTI3_IRQHandler [WEAK] EXPORT EXTI4_IRQHandler [WEAK] EXPORT DMA1_Stream0_IRQHandler [WEAK] EXPORT DMA1_Stream1_IRQHandler [WEAK] EXPORT DMA1_Stream2_IRQHandler [WEAK] EXPORT DMA1_Stream3_IRQHandler [WEAK] EXPORT DMA1_Stream4_IRQHandler [WEAK] EXPORT DMA1_Stream5_IRQHandler [WEAK] EXPORT DMA1_Stream6_IRQHandler [WEAK] EXPORT ADC_IRQHandler [WEAK] EXPORT CAN1_TX_IRQHandler [WEAK] EXPORT CAN1_RX0_IRQHandler [WEAK] EXPORT CAN1_RX1_IRQHandler [WEAK] EXPORT CAN1_SCE_IRQHandler [WEAK] EXPORT EXTI9_5_IRQHandler [WEAK] EXPORT TIM1_BRK_TIM9_IRQHandler [WEAK] EXPORT TIM1_UP_TIM10_IRQHandler [WEAK] EXPORT TIM1_TRG_COM_TIM11_IRQHandler [WEAK] EXPORT TIM1_CC_IRQHandler [WEAK] EXPORT TIM2_IRQHandler [WEAK] EXPORT TIM3_IRQHandler [WEAK] EXPORT TIM4_IRQHandler [WEAK] EXPORT I2C1_EV_IRQHandler [WEAK] EXPORT I2C1_ER_IRQHandler [WEAK] EXPORT I2C2_EV_IRQHandler [WEAK] EXPORT I2C2_ER_IRQHandler [WEAK] EXPORT SPI1_IRQHandler [WEAK] EXPORT SPI2_IRQHandler [WEAK] EXPORT USART1_IRQHandler [WEAK] EXPORT USART2_IRQHandler [WEAK] EXPORT USART3_IRQHandler [WEAK] EXPORT EXTI15_10_IRQHandler [WEAK] EXPORT RTC_Alarm_IRQHandler [WEAK] EXPORT OTG_FS_WKUP_IRQHandler [WEAK] EXPORT TIM8_BRK_TIM12_IRQHandler [WEAK] EXPORT TIM8_UP_TIM13_IRQHandler [WEAK] EXPORT TIM8_TRG_COM_TIM14_IRQHandler [WEAK] EXPORT TIM8_CC_IRQHandler [WEAK] EXPORT DMA1_Stream7_IRQHandler [WEAK] EXPORT FSMC_IRQHandler [WEAK] EXPORT SDIO_IRQHandler [WEAK] EXPORT TIM5_IRQHandler [WEAK] EXPORT SPI3_IRQHandler [WEAK] EXPORT UART4_IRQHandler [WEAK] EXPORT UART5_IRQHandler [WEAK] EXPORT TIM6_DAC_IRQHandler [WEAK] EXPORT TIM7_IRQHandler [WEAK] EXPORT DMA2_Stream0_IRQHandler [WEAK] EXPORT DMA2_Stream1_IRQHandler [WEAK] EXPORT DMA2_Stream2_IRQHandler [WEAK] EXPORT DMA2_Stream3_IRQHandler [WEAK] EXPORT DMA2_Stream4_IRQHandler [WEAK] EXPORT ETH_IRQHandler [WEAK] EXPORT ETH_WKUP_IRQHandler [WEAK] EXPORT CAN2_TX_IRQHandler [WEAK] EXPORT CAN2_RX0_IRQHandler [WEAK] EXPORT CAN2_RX1_IRQHandler [WEAK] EXPORT CAN2_SCE_IRQHandler [WEAK] EXPORT OTG_FS_IRQHandler [WEAK] EXPORT DMA2_Stream5_IRQHandler [WEAK] EXPORT DMA2_Stream6_IRQHandler [WEAK] EXPORT DMA2_Stream7_IRQHandler [WEAK] EXPORT USART6_IRQHandler [WEAK] EXPORT I2C3_EV_IRQHandler [WEAK] EXPORT I2C3_ER_IRQHandler [WEAK] EXPORT OTG_HS_EP1_OUT_IRQHandler [WEAK] EXPORT OTG_HS_EP1_IN_IRQHandler [WEAK] EXPORT OTG_HS_WKUP_IRQHandler [WEAK] EXPORT OTG_HS_IRQHandler [WEAK] EXPORT DCMI_IRQHandler [WEAK] EXPORT HASH_RNG_IRQHandler [WEAK] WWDG_IRQHandler PVD_IRQHandler TAMP_STAMP_IRQHandler RTC_WKUP_IRQHandler FLASH_IRQHandler RCC_IRQHandler EXTI0_IRQHandler EXTI1_IRQHandler EXTI2_IRQHandler EXTI3_IRQHandler EXTI4_IRQHandler DMA1_Stream0_IRQHandler DMA1_Stream1_IRQHandler DMA1_Stream2_IRQHandler DMA1_Stream3_IRQHandler DMA1_Stream4_IRQHandler DMA1_Stream5_IRQHandler DMA1_Stream6_IRQHandler ADC_IRQHandler CAN1_TX_IRQHandler CAN1_RX0_IRQHandler CAN1_RX1_IRQHandler CAN1_SCE_IRQHandler EXTI9_5_IRQHandler TIM1_BRK_TIM9_IRQHandler TIM1_UP_TIM10_IRQHandler TIM1_TRG_COM_TIM11_IRQHandler TIM1_CC_IRQHandler TIM2_IRQHandler TIM3_IRQHandler TIM4_IRQHandler I2C1_EV_IRQHandler I2C1_ER_IRQHandler I2C2_EV_IRQHandler I2C2_ER_IRQHandler SPI1_IRQHandler SPI2_IRQHandler USART1_IRQHandler USART2_IRQHandler USART3_IRQHandler EXTI15_10_IRQHandler RTC_Alarm_IRQHandler OTG_FS_WKUP_IRQHandler TIM8_BRK_TIM12_IRQHandler TIM8_UP_TIM13_IRQHandler TIM8_TRG_COM_TIM14_IRQHandler TIM8_CC_IRQHandler DMA1_Stream7_IRQHandler FSMC_IRQHandler SDIO_IRQHandler TIM5_IRQHandler SPI3_IRQHandler UART4_IRQHandler UART5_IRQHandler TIM6_DAC_IRQHandler TIM7_IRQHandler DMA2_Stream0_IRQHandler DMA2_Stream1_IRQHandler DMA2_Stream2_IRQHandler DMA2_Stream3_IRQHandler DMA2_Stream4_IRQHandler ETH_IRQHandler ETH_WKUP_IRQHandler CAN2_TX_IRQHandler CAN2_RX0_IRQHandler CAN2_RX1_IRQHandler CAN2_SCE_IRQHandler OTG_FS_IRQHandler DMA2_Stream5_IRQHandler DMA2_Stream6_IRQHandler DMA2_Stream7_IRQHandler USART6_IRQHandler I2C3_EV_IRQHandler I2C3_ER_IRQHandler OTG_HS_EP1_OUT_IRQHandler OTG_HS_EP1_IN_IRQHandler OTG_HS_WKUP_IRQHandler OTG_HS_IRQHandler DCMI_IRQHandler HASH_RNG_IRQHandler B . ENDP ALIGN ;******************************************************************************* ; User Stack and Heap initialization ;******************************************************************************* IF :DEF:__MICROLIB EXPORT __initial_sp EXPORT __heap_base EXPORT __heap_limit ELSE IMPORT __use_two_region_memory EXPORT __user_initial_stackheap __user_initial_stackheap LDR R0, = Heap_Mem LDR R1, =(Stack_Mem + Stack_Size) LDR R2, = (Heap_Mem + Heap_Size) LDR R3, = Stack_Mem BX LR ALIGN ENDIF END
alambe94/I-CUBE-USBD-Composite
29,821
USBD_Test/Core/Startup/startup_stm32h7b3lihxq.s
/** ****************************************************************************** * @file startup_stm32h7b3xxq.s * @author MCD Application Team * @brief STM32H7B3xx Devices vector table for GCC based toolchain. * This module performs: * - Set the initial SP * - Set the initial PC == Reset_Handler, * - Set the vector table entries with the exceptions ISR address * - Branches to main in the C library (which eventually * calls main()). * After Reset the Cortex-M processor is in Thread mode, * priority is Privileged, and the Stack is set to Main. ****************************************************************************** * @attention * * <h2><center>&copy; Copyright (c) 2019 STMicroelectronics. * All rights reserved.</center></h2> * * This software component is licensed by ST under BSD 3-Clause license, * the "License"; You may not use this file except in compliance with the * License. You may obtain a copy of the License at: * opensource.org/licenses/BSD-3-Clause * ****************************************************************************** */ .syntax unified .cpu cortex-m7 .fpu softvfp .thumb .global g_pfnVectors .global Default_Handler /* start address for the initialization values of the .data section. defined in linker script */ .word _sidata /* start address for the .data section. defined in linker script */ .word _sdata /* end address for the .data section. defined in linker script */ .word _edata /* start address for the .bss section. defined in linker script */ .word _sbss /* end address for the .bss section. defined in linker script */ .word _ebss /* stack used for SystemInit_ExtMemCtl; always internal RAM used */ /** * @brief This is the code that gets called when the processor first * starts execution following a reset event. Only the absolutely * necessary set is performed, after which the application * supplied main() routine is called. * @param None * @retval : None */ .section .text.Reset_Handler .weak Reset_Handler .type Reset_Handler, %function Reset_Handler: ldr sp, =_estack /* set stack pointer */ /* Call the clock system initialization function.*/ bl SystemInit /* Copy the data segment initializers from flash to SRAM */ ldr r0, =_sdata ldr r1, =_edata ldr r2, =_sidata movs r3, #0 b LoopCopyDataInit CopyDataInit: ldr r4, [r2, r3] str r4, [r0, r3] adds r3, r3, #4 LoopCopyDataInit: adds r4, r0, r3 cmp r4, r1 bcc CopyDataInit /* Zero fill the bss segment. */ ldr r2, =_sbss ldr r4, =_ebss movs r3, #0 b LoopFillZerobss FillZerobss: str r3, [r2] adds r2, r2, #4 LoopFillZerobss: cmp r2, r4 bcc FillZerobss /* Call static constructors */ bl __libc_init_array /* Call the application's entry point.*/ bl main bx lr .size Reset_Handler, .-Reset_Handler /** * @brief This is the code that gets called when the processor receives an * unexpected interrupt. This simply enters an infinite loop, preserving * the system state for examination by a debugger. * @param None * @retval None */ .section .text.Default_Handler,"ax",%progbits Default_Handler: Infinite_Loop: b Infinite_Loop .size Default_Handler, .-Default_Handler /****************************************************************************** * * The minimal vector table for a Cortex M. Note that the proper constructs * must be placed on this to ensure that it ends up at physical address * 0x0000.0000. * *******************************************************************************/ .section .isr_vector,"a",%progbits .type g_pfnVectors, %object .size g_pfnVectors, .-g_pfnVectors g_pfnVectors: .word _estack .word Reset_Handler .word NMI_Handler .word HardFault_Handler .word MemManage_Handler .word BusFault_Handler .word UsageFault_Handler .word 0 .word 0 .word 0 .word 0 .word SVC_Handler .word DebugMon_Handler .word 0 .word PendSV_Handler .word SysTick_Handler /* External Interrupts */ .word WWDG_IRQHandler /* Window WatchDog */ .word PVD_PVM_IRQHandler /* PVD/PVM through EXTI Line detection */ .word RTC_TAMP_STAMP_CSS_LSE_IRQHandler /* Tamper and TimeStamps through the EXTI line */ .word RTC_WKUP_IRQHandler /* RTC Wakeup through the EXTI line */ .word FLASH_IRQHandler /* FLASH */ .word RCC_IRQHandler /* RCC */ .word EXTI0_IRQHandler /* EXTI Line0 */ .word EXTI1_IRQHandler /* EXTI Line1 */ .word EXTI2_IRQHandler /* EXTI Line2 */ .word EXTI3_IRQHandler /* EXTI Line3 */ .word EXTI4_IRQHandler /* EXTI Line4 */ .word DMA1_Stream0_IRQHandler /* DMA1 Stream 0 */ .word DMA1_Stream1_IRQHandler /* DMA1 Stream 1 */ .word DMA1_Stream2_IRQHandler /* DMA1 Stream 2 */ .word DMA1_Stream3_IRQHandler /* DMA1 Stream 3 */ .word DMA1_Stream4_IRQHandler /* DMA1 Stream 4 */ .word DMA1_Stream5_IRQHandler /* DMA1 Stream 5 */ .word DMA1_Stream6_IRQHandler /* DMA1 Stream 6 */ .word ADC_IRQHandler /* ADC1, ADC2 and ADC3s */ .word FDCAN1_IT0_IRQHandler /* FDCAN1 interrupt line 0 */ .word FDCAN2_IT0_IRQHandler /* FDCAN2 interrupt line 0 */ .word FDCAN1_IT1_IRQHandler /* FDCAN1 interrupt line 1 */ .word FDCAN2_IT1_IRQHandler /* FDCAN2 interrupt line 1 */ .word EXTI9_5_IRQHandler /* External Line[9:5]s */ .word TIM1_BRK_IRQHandler /* TIM1 Break interrupt */ .word TIM1_UP_IRQHandler /* TIM1 Update interrupt */ .word TIM1_TRG_COM_IRQHandler /* TIM1 Trigger and Commutation interrupt */ .word TIM1_CC_IRQHandler /* TIM1 Capture Compare */ .word TIM2_IRQHandler /* TIM2 */ .word TIM3_IRQHandler /* TIM3 */ .word TIM4_IRQHandler /* TIM4 */ .word I2C1_EV_IRQHandler /* I2C1 Event */ .word I2C1_ER_IRQHandler /* I2C1 Error */ .word I2C2_EV_IRQHandler /* I2C2 Event */ .word I2C2_ER_IRQHandler /* I2C2 Error */ .word SPI1_IRQHandler /* SPI1 */ .word SPI2_IRQHandler /* SPI2 */ .word USART1_IRQHandler /* USART1 */ .word USART2_IRQHandler /* USART2 */ .word USART3_IRQHandler /* USART3 */ .word EXTI15_10_IRQHandler /* External Line[15:10]s */ .word RTC_Alarm_IRQHandler /* RTC Alarm (A and B) through EXTI Line */ .word DFSDM2_IRQHandler /* DFSDM2 Interrupt */ .word TIM8_BRK_TIM12_IRQHandler /* TIM8 Break and TIM12 */ .word TIM8_UP_TIM13_IRQHandler /* TIM8 Update and TIM13 */ .word TIM8_TRG_COM_TIM14_IRQHandler /* TIM8 Trigger and Commutation and TIM14 */ .word TIM8_CC_IRQHandler /* TIM8 Capture Compare */ .word DMA1_Stream7_IRQHandler /* DMA1 Stream7 */ .word FMC_IRQHandler /* FMC */ .word SDMMC1_IRQHandler /* SDMMC1 */ .word TIM5_IRQHandler /* TIM5 */ .word SPI3_IRQHandler /* SPI3 */ .word UART4_IRQHandler /* UART4 */ .word UART5_IRQHandler /* UART5 */ .word TIM6_DAC_IRQHandler /* TIM6 and DAC1&2 underrun errors */ .word TIM7_IRQHandler /* TIM7 */ .word DMA2_Stream0_IRQHandler /* DMA2 Stream 0 */ .word DMA2_Stream1_IRQHandler /* DMA2 Stream 1 */ .word DMA2_Stream2_IRQHandler /* DMA2 Stream 2 */ .word DMA2_Stream3_IRQHandler /* DMA2 Stream 3 */ .word DMA2_Stream4_IRQHandler /* DMA2 Stream 4 */ .word 0 /* Reserved */ .word 0 /* Reserved */ .word FDCAN_CAL_IRQHandler /* FDCAN calibration unit interrupt*/ .word DFSDM1_FLT4_IRQHandler /* DFSDM Filter4 Interrupt */ .word DFSDM1_FLT5_IRQHandler /* DFSDM Filter5 Interrupt */ .word DFSDM1_FLT6_IRQHandler /* DFSDM Filter6 Interrupt */ .word DFSDM1_FLT7_IRQHandler /* DFSDM Filter7 Interrupt */ .word DMA2_Stream5_IRQHandler /* DMA2 Stream 5 */ .word DMA2_Stream6_IRQHandler /* DMA2 Stream 6 */ .word DMA2_Stream7_IRQHandler /* DMA2 Stream 7 */ .word USART6_IRQHandler /* USART6 */ .word I2C3_EV_IRQHandler /* I2C3 event */ .word I2C3_ER_IRQHandler /* I2C3 error */ .word OTG_HS_EP1_OUT_IRQHandler /* USB OTG HS End Point 1 Out */ .word OTG_HS_EP1_IN_IRQHandler /* USB OTG HS End Point 1 In */ .word OTG_HS_WKUP_IRQHandler /* USB OTG HS Wakeup through EXTI */ .word OTG_HS_IRQHandler /* USB OTG HS */ .word DCMI_PSSI_IRQHandler /* DCMI, PSSI */ .word CRYP_IRQHandler /* CRYP crypto global interrupt */ .word HASH_RNG_IRQHandler /* RNG, HASH */ .word FPU_IRQHandler /* FPU */ .word UART7_IRQHandler /* UART7 */ .word UART8_IRQHandler /* UART8 */ .word SPI4_IRQHandler /* SPI4 */ .word SPI5_IRQHandler /* SPI5 */ .word SPI6_IRQHandler /* SPI6 */ .word SAI1_IRQHandler /* SAI1 */ .word LTDC_IRQHandler /* LTDC */ .word LTDC_ER_IRQHandler /* LTDC error */ .word DMA2D_IRQHandler /* DMA2D */ .word SAI2_IRQHandler /* SAI2 */ .word OCTOSPI1_IRQHandler /* OCTOSPI1 */ .word LPTIM1_IRQHandler /* LPTIM1 */ .word CEC_IRQHandler /* HDMI_CEC */ .word I2C4_EV_IRQHandler /* I2C4 Event */ .word I2C4_ER_IRQHandler /* I2C4 Error */ .word SPDIF_RX_IRQHandler /* SPDIF_RX */ .word 0 /* Reserved */ .word 0 /* Reserved */ .word 0 /* Reserved */ .word 0 /* Reserved */ .word DMAMUX1_OVR_IRQHandler /* DMAMUX1 Overrun interrupt */ .word 0 /* Reserved */ .word 0 /* Reserved */ .word 0 /* Reserved */ .word 0 /* Reserved */ .word 0 /* Reserved */ .word 0 /* Reserved */ .word 0 /* Reserved */ .word DFSDM1_FLT0_IRQHandler /* DFSDM Filter0 Interrupt */ .word DFSDM1_FLT1_IRQHandler /* DFSDM Filter1 Interrupt */ .word DFSDM1_FLT2_IRQHandler /* DFSDM Filter2 Interrupt */ .word DFSDM1_FLT3_IRQHandler /* DFSDM Filter3 Interrupt */ .word 0 /* Reserved */ .word SWPMI1_IRQHandler /* Serial Wire Interface 1 global interrupt */ .word TIM15_IRQHandler /* TIM15 global Interrupt */ .word TIM16_IRQHandler /* TIM16 global Interrupt */ .word TIM17_IRQHandler /* TIM17 global Interrupt */ .word MDIOS_WKUP_IRQHandler /* MDIOS Wakeup Interrupt */ .word MDIOS_IRQHandler /* MDIOS global Interrupt */ .word JPEG_IRQHandler /* JPEG global Interrupt */ .word MDMA_IRQHandler /* MDMA global Interrupt */ .word 0 /* Reserved */ .word SDMMC2_IRQHandler /* SDMMC2 global Interrupt */ .word HSEM1_IRQHandler /* HSEM1 global Interrupt */ .word 0 /* Reserved */ .word DAC2_IRQHandler /* DAC2 global Interrupt */ .word DMAMUX2_OVR_IRQHandler /* DMAMUX Overrun interrupt */ .word BDMA2_Channel0_IRQHandler /* BDMA2 Channel 0 global Interrupt */ .word BDMA2_Channel1_IRQHandler /* BDMA2 Channel 1 global Interrupt */ .word BDMA2_Channel2_IRQHandler /* BDMA2 Channel 2 global Interrupt */ .word BDMA2_Channel3_IRQHandler /* BDMA2 Channel 3 global Interrupt */ .word BDMA2_Channel4_IRQHandler /* BDMA2 Channel 4 global Interrupt */ .word BDMA2_Channel5_IRQHandler /* BDMA2 Channel 5 global Interrupt */ .word BDMA2_Channel6_IRQHandler /* BDMA2 Channel 6 global Interrupt */ .word BDMA2_Channel7_IRQHandler /* BDMA2 Channel 7 global Interrupt */ .word COMP_IRQHandler /* COMP global Interrupt */ .word LPTIM2_IRQHandler /* LP TIM2 global interrupt */ .word LPTIM3_IRQHandler /* LP TIM3 global interrupt */ .word UART9_IRQHandler /* UART9 global interrupt */ .word USART10_IRQHandler /* USART10 global interrupt */ .word LPUART1_IRQHandler /* LP UART1 interrupt */ .word 0 /* Reserved */ .word CRS_IRQHandler /* Clock Recovery Global Interrupt */ .word ECC_IRQHandler /* ECC diagnostic Global Interrupt */ .word 0 /* Reserved */ .word DTS_IRQHandler /* DTS */ .word 0 /* Reserved */ .word WAKEUP_PIN_IRQHandler /* Interrupt for all 6 wake-up pins */ .word OCTOSPI2_IRQHandler /* OCTOSPI2 */ .word OTFDEC1_IRQHandler /* OTFDEC1 */ .word OTFDEC2_IRQHandler /* OTFDEC2 */ .word GFXMMU_IRQHandler /* GFXMMU */ .word BDMA1_IRQHandler /* BDMA1 */ /******************************************************************************* * * Provide weak aliases for each Exception handler to the Default_Handler. * As they are weak aliases, any function with the same name will override * this definition. * *******************************************************************************/ .weak NMI_Handler .thumb_set NMI_Handler,Default_Handler .weak HardFault_Handler .thumb_set HardFault_Handler,Default_Handler .weak MemManage_Handler .thumb_set MemManage_Handler,Default_Handler .weak BusFault_Handler .thumb_set BusFault_Handler,Default_Handler .weak UsageFault_Handler .thumb_set UsageFault_Handler,Default_Handler .weak SVC_Handler .thumb_set SVC_Handler,Default_Handler .weak DebugMon_Handler .thumb_set DebugMon_Handler,Default_Handler .weak PendSV_Handler .thumb_set PendSV_Handler,Default_Handler .weak SysTick_Handler .thumb_set SysTick_Handler,Default_Handler .weak WWDG_IRQHandler .thumb_set WWDG_IRQHandler,Default_Handler .weak PVD_PVM_IRQHandler .thumb_set PVD_PVM_IRQHandler,Default_Handler .weak RTC_TAMP_STAMP_CSS_LSE_IRQHandler .thumb_set RTC_TAMP_STAMP_CSS_LSE_IRQHandler,Default_Handler .weak RTC_WKUP_IRQHandler .thumb_set RTC_WKUP_IRQHandler,Default_Handler .weak FLASH_IRQHandler .thumb_set FLASH_IRQHandler,Default_Handler .weak RCC_IRQHandler .thumb_set RCC_IRQHandler,Default_Handler .weak EXTI0_IRQHandler .thumb_set EXTI0_IRQHandler,Default_Handler .weak EXTI1_IRQHandler .thumb_set EXTI1_IRQHandler,Default_Handler .weak EXTI2_IRQHandler .thumb_set EXTI2_IRQHandler,Default_Handler .weak EXTI3_IRQHandler .thumb_set EXTI3_IRQHandler,Default_Handler .weak EXTI4_IRQHandler .thumb_set EXTI4_IRQHandler,Default_Handler .weak DMA1_Stream0_IRQHandler .thumb_set DMA1_Stream0_IRQHandler,Default_Handler .weak DMA1_Stream1_IRQHandler .thumb_set DMA1_Stream1_IRQHandler,Default_Handler .weak DMA1_Stream2_IRQHandler .thumb_set DMA1_Stream2_IRQHandler,Default_Handler .weak DMA1_Stream3_IRQHandler .thumb_set DMA1_Stream3_IRQHandler,Default_Handler .weak DMA1_Stream4_IRQHandler .thumb_set DMA1_Stream4_IRQHandler,Default_Handler .weak DMA1_Stream5_IRQHandler .thumb_set DMA1_Stream5_IRQHandler,Default_Handler .weak DMA1_Stream6_IRQHandler .thumb_set DMA1_Stream6_IRQHandler,Default_Handler .weak ADC_IRQHandler .thumb_set ADC_IRQHandler,Default_Handler .weak FDCAN1_IT0_IRQHandler .thumb_set FDCAN1_IT0_IRQHandler,Default_Handler .weak FDCAN2_IT0_IRQHandler .thumb_set FDCAN2_IT0_IRQHandler,Default_Handler .weak FDCAN1_IT1_IRQHandler .thumb_set FDCAN1_IT1_IRQHandler,Default_Handler .weak FDCAN2_IT1_IRQHandler .thumb_set FDCAN2_IT1_IRQHandler,Default_Handler .weak EXTI9_5_IRQHandler .thumb_set EXTI9_5_IRQHandler,Default_Handler .weak TIM1_BRK_IRQHandler .thumb_set TIM1_BRK_IRQHandler,Default_Handler .weak TIM1_UP_IRQHandler .thumb_set TIM1_UP_IRQHandler,Default_Handler .weak TIM1_TRG_COM_IRQHandler .thumb_set TIM1_TRG_COM_IRQHandler,Default_Handler .weak TIM1_CC_IRQHandler .thumb_set TIM1_CC_IRQHandler,Default_Handler .weak TIM2_IRQHandler .thumb_set TIM2_IRQHandler,Default_Handler .weak TIM3_IRQHandler .thumb_set TIM3_IRQHandler,Default_Handler .weak TIM4_IRQHandler .thumb_set TIM4_IRQHandler,Default_Handler .weak I2C1_EV_IRQHandler .thumb_set I2C1_EV_IRQHandler,Default_Handler .weak I2C1_ER_IRQHandler .thumb_set I2C1_ER_IRQHandler,Default_Handler .weak I2C2_EV_IRQHandler .thumb_set I2C2_EV_IRQHandler,Default_Handler .weak I2C2_ER_IRQHandler .thumb_set I2C2_ER_IRQHandler,Default_Handler .weak SPI1_IRQHandler .thumb_set SPI1_IRQHandler,Default_Handler .weak SPI2_IRQHandler .thumb_set SPI2_IRQHandler,Default_Handler .weak USART1_IRQHandler .thumb_set USART1_IRQHandler,Default_Handler .weak USART2_IRQHandler .thumb_set USART2_IRQHandler,Default_Handler .weak USART3_IRQHandler .thumb_set USART3_IRQHandler,Default_Handler .weak EXTI15_10_IRQHandler .thumb_set EXTI15_10_IRQHandler,Default_Handler .weak RTC_Alarm_IRQHandler .thumb_set RTC_Alarm_IRQHandler,Default_Handler .weak DFSDM2_IRQHandler .thumb_set DFSDM2_IRQHandler,Default_Handler .weak TIM8_BRK_TIM12_IRQHandler .thumb_set TIM8_BRK_TIM12_IRQHandler,Default_Handler .weak TIM8_UP_TIM13_IRQHandler .thumb_set TIM8_UP_TIM13_IRQHandler,Default_Handler .weak TIM8_TRG_COM_TIM14_IRQHandler .thumb_set TIM8_TRG_COM_TIM14_IRQHandler,Default_Handler .weak TIM8_CC_IRQHandler .thumb_set TIM8_CC_IRQHandler,Default_Handler .weak DMA1_Stream7_IRQHandler .thumb_set DMA1_Stream7_IRQHandler,Default_Handler .weak FMC_IRQHandler .thumb_set FMC_IRQHandler,Default_Handler .weak SDMMC1_IRQHandler .thumb_set SDMMC1_IRQHandler,Default_Handler .weak TIM5_IRQHandler .thumb_set TIM5_IRQHandler,Default_Handler .weak SPI3_IRQHandler .thumb_set SPI3_IRQHandler,Default_Handler .weak UART4_IRQHandler .thumb_set UART4_IRQHandler,Default_Handler .weak UART5_IRQHandler .thumb_set UART5_IRQHandler,Default_Handler .weak TIM6_DAC_IRQHandler .thumb_set TIM6_DAC_IRQHandler,Default_Handler .weak TIM7_IRQHandler .thumb_set TIM7_IRQHandler,Default_Handler .weak DMA2_Stream0_IRQHandler .thumb_set DMA2_Stream0_IRQHandler,Default_Handler .weak DMA2_Stream1_IRQHandler .thumb_set DMA2_Stream1_IRQHandler,Default_Handler .weak DMA2_Stream2_IRQHandler .thumb_set DMA2_Stream2_IRQHandler,Default_Handler .weak DMA2_Stream3_IRQHandler .thumb_set DMA2_Stream3_IRQHandler,Default_Handler .weak DMA2_Stream4_IRQHandler .thumb_set DMA2_Stream4_IRQHandler,Default_Handler .weak FDCAN_CAL_IRQHandler .thumb_set FDCAN_CAL_IRQHandler,Default_Handler .weak DFSDM1_FLT4_IRQHandler .thumb_set DFSDM1_FLT4_IRQHandler,Default_Handler .weak DFSDM1_FLT5_IRQHandler .thumb_set DFSDM1_FLT5_IRQHandler,Default_Handler .weak DFSDM1_FLT6_IRQHandler .thumb_set DFSDM1_FLT6_IRQHandler,Default_Handler .weak DFSDM1_FLT7_IRQHandler .thumb_set DFSDM1_FLT7_IRQHandler,Default_Handler .weak DMA2_Stream5_IRQHandler .thumb_set DMA2_Stream5_IRQHandler,Default_Handler .weak DMA2_Stream6_IRQHandler .thumb_set DMA2_Stream6_IRQHandler,Default_Handler .weak DMA2_Stream7_IRQHandler .thumb_set DMA2_Stream7_IRQHandler,Default_Handler .weak USART6_IRQHandler .thumb_set USART6_IRQHandler,Default_Handler .weak I2C3_EV_IRQHandler .thumb_set I2C3_EV_IRQHandler,Default_Handler .weak I2C3_ER_IRQHandler .thumb_set I2C3_ER_IRQHandler,Default_Handler .weak OTG_HS_EP1_OUT_IRQHandler .thumb_set OTG_HS_EP1_OUT_IRQHandler,Default_Handler .weak OTG_HS_EP1_IN_IRQHandler .thumb_set OTG_HS_EP1_IN_IRQHandler,Default_Handler .weak OTG_HS_WKUP_IRQHandler .thumb_set OTG_HS_WKUP_IRQHandler,Default_Handler .weak OTG_HS_IRQHandler .thumb_set OTG_HS_IRQHandler,Default_Handler .weak DCMI_PSSI_IRQHandler .thumb_set DCMI_PSSI_IRQHandler,Default_Handler .weak CRYP_IRQHandler .thumb_set CRYP_IRQHandler,Default_Handler .weak HASH_RNG_IRQHandler .thumb_set HASH_RNG_IRQHandler,Default_Handler .weak FPU_IRQHandler .thumb_set FPU_IRQHandler,Default_Handler .weak UART7_IRQHandler .thumb_set UART7_IRQHandler,Default_Handler .weak UART8_IRQHandler .thumb_set UART8_IRQHandler,Default_Handler .weak SPI4_IRQHandler .thumb_set SPI4_IRQHandler,Default_Handler .weak SPI5_IRQHandler .thumb_set SPI5_IRQHandler,Default_Handler .weak SPI6_IRQHandler .thumb_set SPI6_IRQHandler,Default_Handler .weak SAI1_IRQHandler .thumb_set SAI1_IRQHandler,Default_Handler .weak LTDC_IRQHandler .thumb_set LTDC_IRQHandler,Default_Handler .weak LTDC_ER_IRQHandler .thumb_set LTDC_ER_IRQHandler,Default_Handler .weak DMA2D_IRQHandler .thumb_set DMA2D_IRQHandler,Default_Handler .weak SAI2_IRQHandler .thumb_set SAI2_IRQHandler,Default_Handler .weak OCTOSPI1_IRQHandler .thumb_set OCTOSPI1_IRQHandler,Default_Handler .weak LPTIM1_IRQHandler .thumb_set LPTIM1_IRQHandler,Default_Handler .weak CEC_IRQHandler .thumb_set CEC_IRQHandler,Default_Handler .weak I2C4_EV_IRQHandler .thumb_set I2C4_EV_IRQHandler,Default_Handler .weak I2C4_ER_IRQHandler .thumb_set I2C4_ER_IRQHandler,Default_Handler .weak SPDIF_RX_IRQHandler .thumb_set SPDIF_RX_IRQHandler,Default_Handler .weak DMAMUX1_OVR_IRQHandler .thumb_set DMAMUX1_OVR_IRQHandler,Default_Handler .weak DFSDM1_FLT0_IRQHandler .thumb_set DFSDM1_FLT0_IRQHandler,Default_Handler .weak DFSDM1_FLT1_IRQHandler .thumb_set DFSDM1_FLT1_IRQHandler,Default_Handler .weak DFSDM1_FLT2_IRQHandler .thumb_set DFSDM1_FLT2_IRQHandler,Default_Handler .weak DFSDM1_FLT3_IRQHandler .thumb_set DFSDM1_FLT3_IRQHandler,Default_Handler .weak SWPMI1_IRQHandler .thumb_set SWPMI1_IRQHandler,Default_Handler .weak TIM15_IRQHandler .thumb_set TIM15_IRQHandler,Default_Handler .weak TIM16_IRQHandler .thumb_set TIM16_IRQHandler,Default_Handler .weak TIM17_IRQHandler .thumb_set TIM17_IRQHandler,Default_Handler .weak MDIOS_WKUP_IRQHandler .thumb_set MDIOS_WKUP_IRQHandler,Default_Handler .weak MDIOS_IRQHandler .thumb_set MDIOS_IRQHandler,Default_Handler .weak JPEG_IRQHandler .thumb_set JPEG_IRQHandler,Default_Handler .weak MDMA_IRQHandler .thumb_set MDMA_IRQHandler,Default_Handler .weak SDMMC2_IRQHandler .thumb_set SDMMC2_IRQHandler,Default_Handler .weak HSEM1_IRQHandler .thumb_set HSEM1_IRQHandler,Default_Handler .weak DAC2_IRQHandler .thumb_set DAC2_IRQHandler,Default_Handler .weak DMAMUX2_OVR_IRQHandler .thumb_set DMAMUX2_OVR_IRQHandler,Default_Handler .weak BDMA2_Channel0_IRQHandler .thumb_set BDMA2_Channel0_IRQHandler,Default_Handler .weak BDMA2_Channel1_IRQHandler .thumb_set BDMA2_Channel1_IRQHandler,Default_Handler .weak BDMA2_Channel2_IRQHandler .thumb_set BDMA2_Channel2_IRQHandler,Default_Handler .weak BDMA2_Channel3_IRQHandler .thumb_set BDMA2_Channel3_IRQHandler,Default_Handler .weak BDMA2_Channel4_IRQHandler .thumb_set BDMA2_Channel4_IRQHandler,Default_Handler .weak BDMA2_Channel5_IRQHandler .thumb_set BDMA2_Channel5_IRQHandler,Default_Handler .weak BDMA2_Channel6_IRQHandler .thumb_set BDMA2_Channel6_IRQHandler,Default_Handler .weak BDMA2_Channel7_IRQHandler .thumb_set BDMA2_Channel7_IRQHandler,Default_Handler .weak COMP_IRQHandler .thumb_set COMP_IRQHandler,Default_Handler .weak LPTIM2_IRQHandler .thumb_set LPTIM2_IRQHandler,Default_Handler .weak LPTIM3_IRQHandler .thumb_set LPTIM3_IRQHandler,Default_Handler .weak LPTIM4_IRQHandler .thumb_set LPTIM4_IRQHandler,Default_Handler .weak LPTIM5_IRQHandler .thumb_set LPTIM5_IRQHandler,Default_Handler .weak UART9_IRQHandler .thumb_set UART9_IRQHandler,Default_Handler .weak USART10_IRQHandler .thumb_set USART10_IRQHandler,Default_Handler .weak LPUART1_IRQHandler .thumb_set LPUART1_IRQHandler,Default_Handler .weak CRS_IRQHandler .thumb_set CRS_IRQHandler,Default_Handler .weak ECC_IRQHandler .thumb_set ECC_IRQHandler,Default_Handler .weak DTS_IRQHandler .thumb_set DTS_IRQHandler,Default_Handler .weak WAKEUP_PIN_IRQHandler .thumb_set WAKEUP_PIN_IRQHandler,Default_Handler .weak OCTOSPI2_IRQHandler .thumb_set OCTOSPI2_IRQHandler,Default_Handler .weak OTFDEC1_IRQHandler .thumb_set OTFDEC1_IRQHandler,Default_Handler .weak OTFDEC2_IRQHandler .thumb_set OTFDEC2_IRQHandler,Default_Handler .weak GFXMMU_IRQHandler .thumb_set GFXMMU_IRQHandler,Default_Handler .weak BDMA1_IRQHandler .thumb_set BDMA1_IRQHandler,Default_Handler /************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
Aladdin-Wang/MicroBoot_Demo
6,431
STM32F7_APP/rt-thread/components/lwp/arch/risc-v/rv64/lwp_gcc.S
/* * Copyright (c) 2006-2020, RT-Thread Development Team * * SPDX-License-Identifier: Apache-2.0 * * Change Logs: * Date Author Notes * 2018-12-10 Jesven first version * 2021-02-03 lizhirui port to riscv64 * 2021-02-19 lizhirui port to new version of rt-smart * 2022-11-08 Wangxiaoyao Cleanup codes; * Support new context switch * 2023-07-16 Shell Move part of the codes to C from asm in signal handling */ #include "rtconfig.h" #ifndef __ASSEMBLY__ #define __ASSEMBLY__ #endif /* __ASSEMBLY__ */ #include "cpuport.h" #include "encoding.h" #include "stackframe.h" #include "asm-generic.h" .section .text.lwp /* * void arch_start_umode(args, text, ustack, kstack); */ .global arch_start_umode .type arch_start_umode, % function arch_start_umode: // load kstack for user process csrw sscratch, a3 li t0, SSTATUS_SPP | SSTATUS_SIE // set as user mode, close interrupt csrc sstatus, t0 li t0, SSTATUS_SPIE // enable interrupt when return to user mode csrs sstatus, t0 csrw sepc, a1 mv a3, a2 sret//enter user mode /* * void arch_crt_start_umode(args, text, ustack, kstack); */ .global arch_crt_start_umode .type arch_crt_start_umode, % function arch_crt_start_umode: li t0, SSTATUS_SPP | SSTATUS_SIE // set as user mode, close interrupt csrc sstatus, t0 li t0, SSTATUS_SPIE // enable interrupt when return to user mode csrs sstatus, t0 csrw sepc, a1 mv s0, a0 mv s1, a1 mv s2, a2 mv s3, a3 mv a0, s2 call lwp_copy_return_code_to_user_stack mv a0, s2 call lwp_fix_sp mv sp, a0//user_sp mv ra, a0//return address mv a0, s0//args csrw sscratch, s3 sret//enter user mode /** * Unify exit point from kernel mode to enter user space * we handle following things here: * 1. restoring user mode debug state (not support yet) * 2. handling thread's exit request * 3. handling POSIX signal * 4. restoring user context * 5. jump to user mode */ .global arch_ret_to_user arch_ret_to_user: // TODO: we don't support kernel gdb server in risc-v yet // so we don't check debug state here and handle debugging bussiness call lwp_check_exit_request beqz a0, 1f mv a0, x0 call sys_exit 1: mv a0, sp call lwp_thread_signal_catch ret_to_user_exit: RESTORE_ALL // `RESTORE_ALL` also reset sp to user sp, and setup sscratch sret /** * Restore user context from exception frame stroraged in ustack * And handle pending signals; */ arch_signal_quit: LOAD a0, FRAME_OFF_SP(sp) call arch_signal_ucontext_restore /* reset kernel sp to the stack */ STORE sp, FRAME_OFF_SP(a0) /* return value is user sp */ mv sp, a0 /* restore user sp before enter trap */ addi a0, sp, CTX_REG_NR * REGBYTES csrw sscratch, a0 RESTORE_ALL SAVE_ALL j arch_ret_to_user /** * rt_noreturn * void arch_thread_signal_enter( * int signo, -> a0 * siginfo_t *psiginfo, -> a1 * void *exp_frame, -> a2 * void *entry_uaddr, -> a3 * lwp_sigset_t *save_sig_mask, -> a4 * ) */ .global arch_thread_signal_enter arch_thread_signal_enter: mv s3, a2 mv s2, a0 mv s1, a3 LOAD t0, FRAME_OFF_SP(a2) mv a3, t0 call arch_signal_ucontext_save /** restore kernel sp */ addi sp, s3, CTX_REG_NR * REGBYTES /** * set regiter RA to user signal handler * set sp to user sp & save kernel sp in sscratch */ mv ra, a0 csrw sscratch, sp mv sp, a0 /** * s1 is signal_handler, * s1 = !s1 ? lwp_sigreturn : s1; */ bnez s1, 1f mv s1, ra 1: /* enter user mode and enable interrupt when return to user mode */ li t0, SSTATUS_SPP csrc sstatus, t0 li t0, SSTATUS_SPIE csrs sstatus, t0 /* sepc <- signal_handler */ csrw sepc, s1 /* a0 <- signal id */ mv a0, s2 /* a1 <- siginfo */ add a1, sp, 16 /* dummy a2 */ mv a2, a1 /** * handler(signo, psi, ucontext); */ sret .align 3 lwp_debugreturn: li a7, 0xff ecall .align 3 .global lwp_sigreturn lwp_sigreturn: li a7, 0xfe ecall .align 3 lwp_sigreturn_end: .align 3 .global lwp_thread_return lwp_thread_return: li a0, 0 li a7, 1 ecall .align 3 .global lwp_thread_return_end lwp_thread_return_end: .globl arch_get_tidr arch_get_tidr: mv a0, tp ret .global arch_set_thread_area arch_set_thread_area: .globl arch_set_tidr arch_set_tidr: mv tp, a0 ret .global arch_clone_exit .global arch_fork_exit arch_fork_exit: arch_clone_exit: j arch_syscall_exit START_POINT(syscall_entry) #ifndef ARCH_USING_NEW_CTX_SWITCH //swap to thread kernel stack csrr t0, sstatus andi t0, t0, 0x100 beqz t0, __restore_sp_from_tcb __restore_sp_from_sscratch: // from kernel csrr t0, sscratch j __move_stack_context __restore_sp_from_tcb: // from user la a0, rt_current_thread LOAD a0, 0(a0) jal get_thread_kernel_stack_top mv t0, a0 __move_stack_context: mv t1, sp//src mv sp, t0//switch stack addi sp, sp, -CTX_REG_NR * REGBYTES //copy context li s0, CTX_REG_NR//cnt mv t2, sp//dst copy_context_loop: LOAD t0, 0(t1) STORE t0, 0(t2) addi s0, s0, -1 addi t1, t1, 8 addi t2, t2, 8 bnez s0, copy_context_loop #endif /* ARCH_USING_NEW_CTX_SWITCH */ /* fetch SYSCALL ID */ LOAD a7, 17 * REGBYTES(sp) addi a7, a7, -0xfe beqz a7, arch_signal_quit #ifdef ARCH_MM_MMU /* save setting when syscall enter */ call rt_thread_self call lwp_user_setting_save #endif mv a0, sp OPEN_INTERRUPT call syscall_handler j arch_syscall_exit START_POINT_END(syscall_entry) .global arch_syscall_exit arch_syscall_exit: CLOSE_INTERRUPT #if defined(ARCH_MM_MMU) LOAD s0, 2 * REGBYTES(sp) andi s0, s0, 0x100 bnez s0, dont_ret_to_user j arch_ret_to_user #endif dont_ret_to_user: #ifdef ARCH_MM_MMU /* restore setting when syscall exit */ call rt_thread_self call lwp_user_setting_restore /* after restore the reg `tp`, need modify context */ STORE tp, 4 * REGBYTES(sp) #endif //restore context RESTORE_ALL csrw sscratch, zero sret
Aladdin-Wang/MicroBoot_Demo
13,128
STM32F7_APP/rt-thread/components/lwp/arch/aarch64/cortex-a/lwp_gcc.S
/* * Copyright (c) 2006-2023, RT-Thread Development Team * * SPDX-License-Identifier: Apache-2.0 * * Change Logs: * Date Author Notes * 2021-05-18 Jesven first version * 2023-07-16 Shell Move part of the codes to C from asm in signal handling * 2023-08-03 Shell Support of syscall restart (SA_RESTART) */ #ifndef __ASSEMBLY__ #define __ASSEMBLY__ #endif #include "rtconfig.h" #include "asm-generic.h" #include "asm-fpu.h" #include "armv8.h" #include "lwp_arch.h" /********************* * SPSR BIT * *********************/ #define SPSR_Mode(v) ((v) << 0) #define SPSR_A64 (0 << 4) #define SPSR_RESEVRED_5 (0 << 5) #define SPSR_FIQ_MASKED(v) ((v) << 6) #define SPSR_IRQ_MASKED(v) ((v) << 7) #define SPSR_SERROR_MASKED(v) ((v) << 8) #define SPSR_D_MASKED(v) ((v) << 9) #define SPSR_RESEVRED_10_19 (0 << 10) #define SPSR_IL(v) ((v) << 20) #define SPSR_SS(v) ((v) << 21) #define SPSR_RESEVRED_22_27 (0 << 22) #define SPSR_V(v) ((v) << 28) #define SPSR_C(v) ((v) << 29) #define SPSR_Z(v) ((v) << 30) #define SPSR_N(v) ((v) << 31) /**************************************************/ .text /* * void arch_start_umode(args, text, ustack, kstack); */ .global arch_start_umode .type arch_start_umode, % function arch_start_umode: mov sp, x3 mov x4, #(SPSR_Mode(0) | SPSR_A64) msr daifset, #3 dsb sy mrs x30, sp_el0 /* user stack top */ msr sp_el0, x2 mov x3, x2 msr spsr_el1, x4 msr elr_el1, x1 eret /* * void arch_crt_start_umode(args, text, ustack, kstack); */ .global arch_crt_start_umode .type arch_crt_start_umode, % function arch_crt_start_umode: sub x4, x2, #0x10 adr x2, lwp_thread_return ldr x5, [x2] str x5, [x4] ldr x5, [x2, #4] str x5, [x4, #4] ldr x5, [x2, #8] str x5, [x4, #8] mov x5, x4 dc cvau, x5 add x5, x5, #8 dc cvau, x5 dsb sy ic ialluis dsb sy msr sp_el0, x4 mov sp, x3 mov x4, #(SPSR_Mode(0) | SPSR_A64) msr daifset, #3 dsb sy mrs x30, sp_el0 msr spsr_el1, x4 msr elr_el1, x1 eret .global arch_get_user_sp arch_get_user_sp: mrs x0, sp_el0 ret .global arch_fork_exit .global arch_clone_exit arch_fork_exit: arch_clone_exit: b arch_syscall_exit /* void lwp_exec_user(void *args, void *kernel_stack, void *user_entry) */ .global lwp_exec_user lwp_exec_user: mov sp, x1 mov x4, #(SPSR_Mode(0) | SPSR_A64) ldr x3, =0x0000ffff80000000 msr daifset, #3 msr spsr_el1, x4 msr elr_el1, x2 eret /* * void SVC_Handler(regs); * since this routine reset the SP, we take it as a start point */ START_POINT(SVC_Handler) /* x0 is initial sp */ mov sp, x0 msr daifclr, #3 /* enable interrupt */ bl rt_thread_self bl lwp_user_setting_save ldp x8, x9, [sp, #(CONTEXT_OFFSET_X8)] and x0, x8, #0xf000 cmp x0, #0xe000 beq arch_signal_quit cmp x0, #0xf000 beq ret_from_user uxtb x0, w8 bl lwp_get_sys_api cmp x0, xzr mov x30, x0 beq arch_syscall_exit ldp x0, x1, [sp, #(CONTEXT_OFFSET_X0)] ldp x2, x3, [sp, #(CONTEXT_OFFSET_X2)] ldp x4, x5, [sp, #(CONTEXT_OFFSET_X4)] ldp x6, x7, [sp, #(CONTEXT_OFFSET_X6)] blr x30 /* jump explictly, make this code position independant */ b arch_syscall_exit START_POINT_END(SVC_Handler) .global arch_syscall_exit arch_syscall_exit: /** * @brief back up former x0 which is required to restart syscall, then setup * syscall return value in stack frame */ mov x1, sp bl arch_syscall_prepare_signal msr daifset, #3 ldp x2, x3, [sp], #0x10 /* SPSR and ELR. */ msr spsr_el1, x3 msr elr_el1, x2 ldp x29, x30, [sp], #0x10 msr sp_el0, x29 ldp x28, x29, [sp], #0x10 msr fpcr, x28 msr fpsr, x29 ldp x28, x29, [sp], #0x10 ldp x26, x27, [sp], #0x10 ldp x24, x25, [sp], #0x10 ldp x22, x23, [sp], #0x10 ldp x20, x21, [sp], #0x10 ldp x18, x19, [sp], #0x10 ldp x16, x17, [sp], #0x10 ldp x14, x15, [sp], #0x10 ldp x12, x13, [sp], #0x10 ldp x10, x11, [sp], #0x10 ldp x8, x9, [sp], #0x10 ldp x6, x7, [sp], #0x10 ldp x4, x5, [sp], #0x10 ldp x2, x3, [sp], #0x10 ldp x0, x1, [sp], #0x10 RESTORE_FPU sp /* the sp is reset to the outer most level, irq and fiq are disabled */ START_POINT(arch_ret_to_user) msr daifset, #3 /* save exception frame */ SAVE_FPU sp stp x0, x1, [sp, #-0x10]! stp x2, x3, [sp, #-0x10]! stp x4, x5, [sp, #-0x10]! stp x6, x7, [sp, #-0x10]! stp x8, x9, [sp, #-0x10]! stp x10, x11, [sp, #-0x10]! stp x12, x13, [sp, #-0x10]! stp x14, x15, [sp, #-0x10]! stp x16, x17, [sp, #-0x10]! stp x18, x19, [sp, #-0x10]! stp x20, x21, [sp, #-0x10]! stp x22, x23, [sp, #-0x10]! stp x24, x25, [sp, #-0x10]! stp x26, x27, [sp, #-0x10]! stp x28, x29, [sp, #-0x10]! mrs x0, fpcr mrs x1, fpsr stp x0, x1, [sp, #-0x10]! stp x29, x30, [sp, #-0x10]! /* pre-action */ bl lwp_check_debug bl lwp_check_exit_request cbz w0, 1f /* exit on event */ msr daifclr, #3 mov x0, xzr b sys_exit 1: /* check if dbg ops exist */ ldr x0, =rt_dbg_ops ldr x0, [x0] cbz x0, 3f bl dbg_thread_in_debug mov x1, #(1 << 21) mrs x2, spsr_el1 cbz w0, 2f orr x2, x2, x1 msr spsr_el1, x2 b 3f 2: bic x2, x2, x1 msr spsr_el1, x2 3: /** * push 2 dummy words to simulate a exception frame of interrupt * Note: in kernel state, the context switch dont saved the context */ mrs x0, spsr_el1 mrs x1, elr_el1 stp x1, x0, [sp, #-0x10]! mov x0, sp msr daifclr, #3 bl lwp_thread_signal_catch msr daifset, #3 ldp x1, x0, [sp], #0x10 msr spsr_el1, x0 msr elr_el1, x1 /* check debug */ /* restore exception frame */ ldp x29, x30, [sp], #0x10 ldp x0, x1, [sp], #0x10 msr fpcr, x0 msr fpsr, x1 ldp x28, x29, [sp], #0x10 ldp x26, x27, [sp], #0x10 ldp x24, x25, [sp], #0x10 ldp x22, x23, [sp], #0x10 ldp x20, x21, [sp], #0x10 ldp x18, x19, [sp], #0x10 ldp x16, x17, [sp], #0x10 ldp x14, x15, [sp], #0x10 ldp x12, x13, [sp], #0x10 ldp x10, x11, [sp], #0x10 ldp x8, x9, [sp], #0x10 ldp x6, x7, [sp], #0x10 ldp x4, x5, [sp], #0x10 ldp x2, x3, [sp], #0x10 ldp x0, x1, [sp], #0x10 RESTORE_FPU sp stp x0, x1, [sp, #-0x10]! ldr x0, =rt_dbg_ops ldr x0, [x0] cmp x0, xzr ldp x0, x1, [sp], #0x10 beq 1f /* save */ SAVE_FPU sp stp x0, x1, [sp, #-0x10]! stp x2, x3, [sp, #-0x10]! stp x4, x5, [sp, #-0x10]! stp x6, x7, [sp, #-0x10]! stp x8, x9, [sp, #-0x10]! stp x10, x11, [sp, #-0x10]! stp x12, x13, [sp, #-0x10]! stp x14, x15, [sp, #-0x10]! stp x16, x17, [sp, #-0x10]! stp x18, x19, [sp, #-0x10]! stp x20, x21, [sp, #-0x10]! stp x22, x23, [sp, #-0x10]! stp x24, x25, [sp, #-0x10]! stp x26, x27, [sp, #-0x10]! stp x28, x29, [sp, #-0x10]! mrs x0, fpcr mrs x1, fpsr stp x0, x1, [sp, #-0x10]! stp x29, x30, [sp, #-0x10]! mrs x0, elr_el1 bl dbg_attach_req /* restore */ ldp x29, x30, [sp], #0x10 ldp x0, x1, [sp], #0x10 msr fpcr, x0 msr fpsr, x1 ldp x28, x29, [sp], #0x10 ldp x26, x27, [sp], #0x10 ldp x24, x25, [sp], #0x10 ldp x22, x23, [sp], #0x10 ldp x20, x21, [sp], #0x10 ldp x18, x19, [sp], #0x10 ldp x16, x17, [sp], #0x10 ldp x14, x15, [sp], #0x10 ldp x12, x13, [sp], #0x10 ldp x10, x11, [sp], #0x10 ldp x8, x9, [sp], #0x10 ldp x6, x7, [sp], #0x10 ldp x4, x5, [sp], #0x10 ldp x2, x3, [sp], #0x10 ldp x0, x1, [sp], #0x10 RESTORE_FPU sp 1: eret START_POINT_END(arch_ret_to_user) .global lwp_check_debug lwp_check_debug: ldr x0, =rt_dbg_ops ldr x0, [x0] cbnz x0, 1f ret 1: stp x29, x30, [sp, #-0x10]! bl dbg_check_suspend cbz w0, lwp_check_debug_quit mrs x2, sp_el0 sub x2, x2, #0x10 mov x3, x2 msr sp_el0, x2 ldr x0, =lwp_debugreturn ldr w1, [x0] str w1, [x2] ldr w1, [x0, #4] str w1, [x2, #4] dc cvau, x2 add x2, x2, #4 dc cvau, x2 dsb sy isb sy ic ialluis isb sy mrs x0, elr_el1 mrs x1, spsr_el1 stp x0, x1, [sp, #-0x10]! msr elr_el1, x3 /* lwp_debugreturn */ mov x1, #(SPSR_Mode(0) | SPSR_A64) orr x1, x1, #(1 << 21) msr spsr_el1, x1 eret ret_from_user: /* sp_el0 += 16 for drop ins lwp_debugreturn */ mrs x0, sp_el0 add x0, x0, #0x10 msr sp_el0, x0 /* now is el1, sp is pos(empty) - sizeof(context) */ mov x0, sp add x0, x0, #0x220 mov sp, x0 ldp x0, x1, [sp], #0x10 /* x1 is origin spsr_el1 */ msr elr_el1, x0 /* x0 is origin elr_el1 */ msr spsr_el1, x1 lwp_check_debug_quit: ldp x29, x30, [sp], #0x10 ret .global arch_syscall_restart arch_syscall_restart: msr daifset, 3 mov sp, x1 /* drop exception frame in user stack */ msr sp_el0, x0 /* restore previous exception frame */ msr spsel, #0 ldp x2, x3, [sp], #0x10 msr elr_el1, x2 msr spsr_el1, x3 ldp x29, x30, [sp], #0x10 ldp x28, x29, [sp], #0x10 msr fpcr, x28 msr fpsr, x29 ldp x28, x29, [sp], #0x10 ldp x26, x27, [sp], #0x10 ldp x24, x25, [sp], #0x10 ldp x22, x23, [sp], #0x10 ldp x20, x21, [sp], #0x10 ldp x18, x19, [sp], #0x10 ldp x16, x17, [sp], #0x10 ldp x14, x15, [sp], #0x10 ldp x12, x13, [sp], #0x10 ldp x10, x11, [sp], #0x10 ldp x8, x9, [sp], #0x10 ldp x6, x7, [sp], #0x10 ldp x4, x5, [sp], #0x10 ldp x2, x3, [sp], #0x10 ldp x0, x1, [sp], #0x10 RESTORE_FPU sp msr spsel, #1 b vector_exception arch_signal_quit: /* drop current exception frame */ add sp, sp, #CONTEXT_SIZE mov x1, sp mrs x0, sp_el0 bl arch_signal_ucontext_restore add x0, x0, #-CONTEXT_SIZE msr sp_el0, x0 /** * Note: Since we will reset spsr, but the reschedule will * corrupt the spsr, we diable irq for a short period here */ msr daifset, #3 /* restore previous exception frame */ msr spsel, #0 ldp x2, x3, [sp], #0x10 msr elr_el1, x2 msr spsr_el1, x3 ldp x29, x30, [sp], #0x10 ldp x28, x29, [sp], #0x10 msr fpcr, x28 msr fpsr, x29 ldp x28, x29, [sp], #0x10 ldp x26, x27, [sp], #0x10 ldp x24, x25, [sp], #0x10 ldp x22, x23, [sp], #0x10 ldp x20, x21, [sp], #0x10 ldp x18, x19, [sp], #0x10 ldp x16, x17, [sp], #0x10 ldp x14, x15, [sp], #0x10 ldp x12, x13, [sp], #0x10 ldp x10, x11, [sp], #0x10 ldp x8, x9, [sp], #0x10 ldp x6, x7, [sp], #0x10 ldp x4, x5, [sp], #0x10 ldp x2, x3, [sp], #0x10 ldp x0, x1, [sp], #0x10 RESTORE_FPU sp msr spsel, #1 b arch_ret_to_user /** * rt_noreturn * void arch_thread_signal_enter( * int signo, -> x0 * siginfo_t *psiginfo, -> x1 * void *exp_frame, -> x2 * void *entry_uaddr, -> x3 * lwp_sigset_t *save_sig_mask, -> x4 * ) */ .global arch_thread_signal_enter arch_thread_signal_enter: mov x19, x0 mov x20, x2 /* exp_frame */ mov x21, x3 /** * move exception frame to user stack */ mrs x0, sp_el0 mov x3, x4 /* arch_signal_ucontext_save(user_sp, psiginfo, exp_frame, save_sig_mask); */ bl arch_signal_ucontext_save mov x22, x0 /* get and saved pointer to uframe */ bl arch_signal_ucontext_get_frame mov x2, x0 mov x0, x22 dc cvau, x0 dsb sy ic ialluis dsb sy /** * Brief: Prepare the environment for signal handler */ /** * reset the cpsr * and drop exp frame on kernel stack, reset kernel sp * * Note: Since we will reset spsr, but the reschedule will * corrupt the spsr, we diable irq for a short period here */ msr daifset, #3 ldr x1, [x20, #CONTEXT_OFFSET_SPSR_EL1] msr spsr_el1, x1 add sp, x20, #CONTEXT_SIZE /** reset user sp */ msr sp_el0, x0 /** set the return address to the sigreturn */ mov x30, x0 cbnz x21, 1f mov x21, x30 1: /** set the entry address of signal handler */ msr elr_el1, x21 /* siginfo is above the return address */ add x1, x30, UCTX_ABI_OFFSET_TO_SI /* uframe is saved in x2 */ mov x0, x19 /** * handler(signo, psi, ucontext); * */ eret lwp_debugreturn: mov x8, 0xf000 svc #0 .global lwp_sigreturn lwp_sigreturn: mov x8, #0xe000 svc #0 lwp_thread_return: mov x0, xzr mov x8, #0x01 svc #0 .globl arch_get_tidr arch_get_tidr: mrs x0, tpidr_el0 ret .global arch_set_thread_area arch_set_thread_area: .globl arch_set_tidr arch_set_tidr: msr tpidr_el0, x0 ret
Aladdin-Wang/MicroBoot_Demo
9,481
STM32F7_APP/rt-thread/components/lwp/arch/arm/cortex-a/lwp_gcc.S
/* * Copyright (c) 2006-2020, RT-Thread Development Team * * SPDX-License-Identifier: Apache-2.0 * * Change Logs: * Date Author Notes * 2018-12-10 Jesven first version * 2023-07-16 Shell Move part of the codes to C from asm in signal handling */ #include "rtconfig.h" #include "asm-generic.h" #define Mode_USR 0x10 #define Mode_FIQ 0x11 #define Mode_IRQ 0x12 #define Mode_SVC 0x13 #define Mode_MON 0x16 #define Mode_ABT 0x17 #define Mode_UDF 0x1B #define Mode_SYS 0x1F #define A_Bit 0x100 #define I_Bit 0x80 @; when I bit is set, IRQ is disabled #define F_Bit 0x40 @; when F bit is set, FIQ is disabled #define T_Bit 0x20 .cpu cortex-a9 .syntax unified .text /* * void arch_start_umode(args, text, ustack, kstack); */ .global arch_start_umode .type arch_start_umode, % function arch_start_umode: mrs r9, cpsr bic r9, #0x1f orr r9, #Mode_USR cpsid i msr spsr, r9 mov sp, r3 /* set user stack top */ cps #Mode_SYS mov sp, r2 cps #Mode_SVC mov r3, r2 /* set data address. */ movs pc, r1 /* * void arch_crt_start_umode(args, text, ustack, kstack); */ .global arch_crt_start_umode .type arch_crt_start_umode, % function arch_crt_start_umode: cps #Mode_SYS sub sp, r2, #16 ldr r2, =lwp_thread_return ldr r4, [r2] str r4, [sp] ldr r4, [r2, #4] str r4, [sp, #4] ldr r4, [r2, #8] str r4, [sp, #8] mov r4, sp mcr p15, 0, r4, c7, c11, 1 ;//dc cmvau add r4, #4 mcr p15, 0, r4, c7, c11, 1 ;//dc cmvau add r4, #4 mcr p15, 0, r4, c7, c11, 1 ;//dc cmvau dsb isb mcr p15, 0, r4, c7, c5, 0 ;//iciallu dsb isb mov lr, sp cps #Mode_SVC mrs r9, cpsr bic r9, #0x1f orr r9, #Mode_USR cpsid i msr spsr, r9 mov sp, r3 /* set data address. */ movs pc, r1 /* void arch_set_thread_context(void *exit_addr, void *new_thread_stack, void *user_stack, void **thread_sp); */ .global arch_set_thread_context arch_set_thread_context: sub r1, #(10 * 4 + 4 * 4) /* {r4 - r12, lr} , {r4, r5, spsr, u_pc} */ stmfd r1!, {r0} mov r12, #0 stmfd r1!, {r12} stmfd r1!, {r1 - r12} stmfd r1!, {r12} /* new thread return value */ mrs r12, cpsr orr r12, #(1 << 7) /* disable irq */ stmfd r1!, {r12} /* spsr */ mov r12, #0 stmfd r1!, {r12} /* now user lr is 0 */ stmfd r1!, {r2} /* user sp */ #ifdef RT_USING_FPU stmfd r1!, {r12} /* not use fpu */ #endif str r1, [r3] mov pc, lr .global arch_get_user_sp arch_get_user_sp: cps #Mode_SYS mov r0, sp cps #Mode_SVC mov pc, lr .global sys_fork .global sys_vfork .global arch_fork_exit sys_fork: sys_vfork: push {r4 - r12, lr} bl _sys_fork arch_fork_exit: pop {r4 - r12, lr} b arch_syscall_exit .global sys_clone .global arch_clone_exit sys_clone: push {r4 - r12, lr} bl _sys_clone arch_clone_exit: pop {r4 - r12, lr} b arch_syscall_exit /* void lwp_exec_user(void *args, void *kernel_stack, void *user_entry) */ .global lwp_exec_user lwp_exec_user: cpsid i mov sp, r1 mov lr, r2 mov r2, #Mode_USR msr spsr_cxsf, r2 ldr r3, =0x80000000 b arch_ret_to_user /* * void SVC_Handler(void); */ .global vector_swi .type vector_swi, % function START_POINT(vector_swi) push {lr} mrs lr, spsr push {r4, r5, lr} cpsie i push {r0 - r3, r12} bl rt_thread_self bl lwp_user_setting_save and r0, r7, #0xf000 cmp r0, #0xe000 beq arch_signal_quit cmp r0, #0xf000 beq ret_from_user and r0, r7, #0xff bl lwp_get_sys_api cmp r0, #0 /* r0 = api */ mov lr, r0 pop {r0 - r3, r12} beq arch_syscall_exit blx lr START_POINT_END(vector_swi) .global arch_syscall_exit arch_syscall_exit: cpsid i pop {r4, r5, lr} msr spsr_cxsf, lr pop {lr} .global arch_ret_to_user arch_ret_to_user: push {r0-r12, lr} bl lwp_check_debug bl lwp_check_exit_request cmp r0, #0 beq 1f mov r0, #0 b sys_exit 1: mov r0, sp /* r0 -> exp frame */ bl lwp_thread_signal_catch pop {r0-r12, lr} push {r0} ldr r0, =rt_dbg_ops ldr r0, [r0] cmp r0, #0 pop {r0} beq 2f push {r0-r3, r12, lr} mov r0, lr bl dbg_attach_req pop {r0-r3, r12, lr} 2: movs pc, lr #ifdef RT_USING_SMART .global lwp_check_debug lwp_check_debug: ldr r0, =rt_dbg_ops ldr r0, [r0] cmp r0, #0 bne 1f bx lr 1: push {lr} bl dbg_check_suspend cmp r0, #0 beq lwp_check_debug_quit cps #Mode_SYS sub sp, #8 ldr r0, =lwp_debugreturn ldr r1, [r0] str r1, [sp] ldr r1, [r0, #4] str r1, [sp, #4] mov r1, sp mcr p15, 0, r1, c7, c11, 1 ;//dc cmvau add r1, #4 mcr p15, 0, r1, c7, c11, 1 ;//dc cmvau dsb isb mcr p15, 0, r0, c7, c5, 0 ;//iciallu dsb isb mov r0, sp /* lwp_debugreturn */ cps #Mode_SVC mrs r1, spsr push {r1} mov r1, #Mode_USR msr spsr_cxsf, r1 movs pc, r0 ret_from_user: cps #Mode_SYS add sp, #8 cps #Mode_SVC /* pop {r0 - r3, r12} pop {r4 - r6, lr} */ add sp, #(4*9) pop {r4} msr spsr_cxsf, r4 lwp_check_debug_quit: pop {pc} arch_signal_quit: cpsid i /* drop context of signal handler */ pop {r0 - r3, r12} pop {r4, r5, lr} pop {lr} /* restore context */ cps #Mode_SYS mov r0, sp cps #Mode_SVC bl arch_signal_ucontext_restore /* lr <- *(&frame.ip) */ ldr lr, [r0] cps #Mode_SYS mov sp, r0 /* drop ip in the frame and restore cpsr */ pop {r0} pop {r0} msr spsr_cxsf, r0 pop {r0-r12, lr} cps #Mode_SVC b arch_ret_to_user /** * rt_noreturn * void arch_thread_signal_enter( * int signo, -> r0 * siginfo_t *psiginfo, -> r1 * void *exp_frame, -> r2 * void *entry_uaddr, -> r3 * lwp_sigset_t *save_sig_mask, -> ?? * ) */ .global arch_thread_signal_enter arch_thread_signal_enter: mov r4, r0 mov r5, r3 cps #Mode_SYS mov r0, lr mov r3, sp cps #Mode_SVC bl arch_signal_ucontext_save /* reset user sp */ cps #Mode_SYS mov sp, r0 mov lr, r0 cps #Mode_SVC /* r1,r2 <- new_user_sp */ mov r1, r0 mov r2, r0 /* r0 <- signo */ mov r0, r4 mov r1, r0 mcr p15, 0, r1, c7, c11, 1 ;//dc cmvau add r1, #4 mcr p15, 0, r1, c7, c11, 1 ;//dc cmvau dsb isb mcr p15, 0, r0, c7, c5, 0 ;//iciallu dsb isb /* r4 <- &sigreturn */ mov r4, r2 /* lr <- user_handler() */ mov lr, r5 cmp lr, #0 moveq lr, r4 /* r1 <- siginfo */ mov r1, r2 add r1, #8 /* handler(signo, siginfo, ucontext) */ movs pc, lr lwp_debugreturn: mov r7, #0xf000 svc #0 .global lwp_sigreturn lwp_sigreturn: mov r7, #0xe000 svc #0 lwp_thread_return: mov r0, #0 mov r7, #0x01 svc #0 #endif .global check_vfp check_vfp: #ifdef RT_USING_FPU vmrs r0, fpexc ubfx r0, r0, #30, #1 #else mov r0, #0 #endif mov pc, lr .global get_vfp get_vfp: #ifdef RT_USING_FPU vstmia r0!, {d0-d15} vstmia r0!, {d16-d31} vmrs r1, fpscr str r1, [r0] #endif mov pc, lr .globl arch_get_tidr arch_get_tidr: mrc p15, 0, r0, c13, c0, 3 bx lr .global arch_set_thread_area arch_set_thread_area: .globl arch_set_tidr arch_set_tidr: mcr p15, 0, r0, c13, c0, 3 bx lr /* kuser suppurt */ .macro kuser_pad, sym, size .if (. - \sym) & 3 .rept 4 - (. - \sym) & 3 .byte 0 .endr .endif .rept (\size - (. - \sym)) / 4 .word 0xe7fddef1 .endr .endm .align 5 .globl __kuser_helper_start __kuser_helper_start: __kuser_cmpxchg64: @ 0xffff0f60 stmfd sp!, {r4, r5, r6, lr} ldmia r0, {r4, r5} @ load old val ldmia r1, {r6, lr} @ load new val 1: ldmia r2, {r0, r1} @ load current val eors r3, r0, r4 @ compare with oldval (1) eorseq r3, r1, r5 @ compare with oldval (2) 2: stmiaeq r2, {r6, lr} @ store newval if eq rsbs r0, r3, #0 @ set return val and C flag ldmfd sp!, {r4, r5, r6, pc} kuser_pad __kuser_cmpxchg64, 64 __kuser_memory_barrier: @ 0xffff0fa0 dmb mov pc, lr kuser_pad __kuser_memory_barrier, 32 __kuser_cmpxchg: @ 0xffff0fc0 1: ldr r3, [r2] @ load current val subs r3, r3, r0 @ compare with oldval 2: streq r1, [r2] @ store newval if eq rsbs r0, r3, #0 @ set return val and C flag mov pc, lr kuser_pad __kuser_cmpxchg, 32 __kuser_get_tls: @ 0xffff0fe0 mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code mov pc, lr ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init kuser_pad __kuser_get_tls, 16 .rep 3 .word 0 @ 0xffff0ff0 software TLS value, then .endr @ pad up to __kuser_helper_version __kuser_helper_version: @ 0xffff0ffc .word ((__kuser_helper_end - __kuser_helper_start) >> 5) .globl __kuser_helper_end __kuser_helper_end:
Aladdin-Wang/MicroBoot_Demo
1,321
STM32F7_APP/rt-thread/components/lwp/arch/x86/i386/lwp_gcc.S
/* * Copyright (c) 2006-2021, RT-Thread Development Team * * SPDX-License-Identifier: Apache-2.0 * * Change Logs: * Date Author Notes * 2021-7-14 JasonHu first version */ #include "rtconfig.h" .section .text.lwp /* * void lwp_switch_to_user(frame); */ .global lwp_switch_to_user lwp_switch_to_user: movl 0x4(%esp), %esp addl $4,%esp // skip intr no popal popl %gs popl %fs popl %es popl %ds addl $4, %esp // skip error_code iret // enter to user mode .extern arch_syscall_exit .global sys_fork .global sys_vfork .global arch_fork_exit sys_fork: sys_vfork: jmp _sys_fork arch_fork_exit: jmp arch_syscall_exit .global sys_clone .global arch_clone_exit sys_clone: jmp _sys_clone arch_clone_exit: jmp arch_syscall_exit /** * rt thread return code */ .align 4 .global lwp_thread_return lwp_thread_return: movl $1, %eax // eax = 1, sys_exit movl $0, %ebx int $0x80 .align 4 .global lwp_thread_return_end lwp_thread_return_end: #ifdef RT_USING_SIGNALS /** * signal return code */ .align 4 .global lwp_signal_return lwp_signal_return: movl $0xe000, %eax // special syscall id for return code int $0x80 .align 4 .global lwp_signal_return_end lwp_signal_return_end: #endif /* RT_USING_SIGNALS */
Aladdin-Wang/MicroBoot_Demo
5,653
STM32F7_APP/rt-thread/libcpu/arm/cortex-m3/context_rvds.S
;/* ; * Copyright (c) 2006-2022, RT-Thread Development Team ; * ; * SPDX-License-Identifier: Apache-2.0 ; * ; * Change Logs: ; * Date Author Notes ; * 2009-01-17 Bernard first version ; * 2013-06-18 aozima add restore MSP feature. ; * 2013-07-09 aozima enhancement hard fault exception handler. ; */ ;/** ; * @addtogroup CORTEX-M3 ; */ ;/*@{*/ SCB_VTOR EQU 0xE000ED08 ; Vector Table Offset Register NVIC_INT_CTRL EQU 0xE000ED04 ; interrupt control state register NVIC_SYSPRI2 EQU 0xE000ED20 ; system priority register (2) NVIC_PENDSV_PRI EQU 0xFFFF0000 ; PendSV and SysTick priority value (lowest) NVIC_PENDSVSET EQU 0x10000000 ; value to trigger PendSV exception AREA |.text|, CODE, READONLY, ALIGN=2 THUMB REQUIRE8 PRESERVE8 IMPORT rt_thread_switch_interrupt_flag IMPORT rt_interrupt_from_thread IMPORT rt_interrupt_to_thread ;/* ; * rt_base_t rt_hw_interrupt_disable(); ; */ rt_hw_interrupt_disable PROC EXPORT rt_hw_interrupt_disable MRS r0, PRIMASK CPSID I BX LR ENDP ;/* ; * void rt_hw_interrupt_enable(rt_base_t level); ; */ rt_hw_interrupt_enable PROC EXPORT rt_hw_interrupt_enable MSR PRIMASK, r0 BX LR ENDP ;/* ; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to); ; * r0 --> from ; * r1 --> to ; */ rt_hw_context_switch_interrupt EXPORT rt_hw_context_switch_interrupt rt_hw_context_switch PROC EXPORT rt_hw_context_switch ; set rt_thread_switch_interrupt_flag to 1 LDR r2, =rt_thread_switch_interrupt_flag LDR r3, [r2] CMP r3, #1 BEQ _reswitch MOV r3, #1 STR r3, [r2] LDR r2, =rt_interrupt_from_thread ; set rt_interrupt_from_thread STR r0, [r2] _reswitch LDR r2, =rt_interrupt_to_thread ; set rt_interrupt_to_thread STR r1, [r2] LDR r0, =NVIC_INT_CTRL ; trigger the PendSV exception (causes context switch) LDR r1, =NVIC_PENDSVSET STR r1, [r0] BX LR ENDP ; r0 --> switch from thread stack ; r1 --> switch to thread stack ; psr, pc, lr, r12, r3, r2, r1, r0 are pushed into [from] stack PendSV_Handler PROC EXPORT PendSV_Handler ; disable interrupt to protect context switch MRS r2, PRIMASK CPSID I ; get rt_thread_switch_interrupt_flag LDR r0, =rt_thread_switch_interrupt_flag LDR r1, [r0] CBZ r1, pendsv_exit ; pendsv already handled ; clear rt_thread_switch_interrupt_flag to 0 MOV r1, #0x00 STR r1, [r0] LDR r0, =rt_interrupt_from_thread LDR r1, [r0] CBZ r1, switch_to_thread ; skip register save at the first time MRS r1, psp ; get from thread stack pointer STMFD r1!, {r4 - r11} ; push r4 - r11 register LDR r0, [r0] STR r1, [r0] ; update from thread stack pointer switch_to_thread LDR r1, =rt_interrupt_to_thread LDR r1, [r1] LDR r1, [r1] ; load thread stack pointer LDMFD r1!, {r4 - r11} ; pop r4 - r11 register MSR psp, r1 ; update stack pointer pendsv_exit ; restore interrupt MSR PRIMASK, r2 ORR lr, lr, #0x04 BX lr ENDP ;/* ; * void rt_hw_context_switch_to(rt_uint32 to); ; * r0 --> to ; * this fucntion is used to perform the first thread switch ; */ rt_hw_context_switch_to PROC EXPORT rt_hw_context_switch_to ; set to thread LDR r1, =rt_interrupt_to_thread STR r0, [r1] ; set from thread to 0 LDR r1, =rt_interrupt_from_thread MOV r0, #0x0 STR r0, [r1] ; set interrupt flag to 1 LDR r1, =rt_thread_switch_interrupt_flag MOV r0, #1 STR r0, [r1] ; set the PendSV and SysTick exception priority LDR r0, =NVIC_SYSPRI2 LDR r1, =NVIC_PENDSV_PRI LDR.W r2, [r0,#0x00] ; read ORR r1,r1,r2 ; modify STR r1, [r0] ; write-back ; trigger the PendSV exception (causes context switch) LDR r0, =NVIC_INT_CTRL LDR r1, =NVIC_PENDSVSET STR r1, [r0] ; restore MSP LDR r0, =SCB_VTOR LDR r0, [r0] LDR r0, [r0] MSR msp, r0 ; enable interrupts at processor level CPSIE F CPSIE I ; ensure PendSV exception taken place before subsequent operation DSB ISB ; never reach here! ENDP ; compatible with old version rt_hw_interrupt_thread_switch PROC EXPORT rt_hw_interrupt_thread_switch BX lr ENDP IMPORT rt_hw_hard_fault_exception EXPORT HardFault_Handler HardFault_Handler PROC ; get current context TST lr, #0x04 ; if(!EXC_RETURN[2]) ITE EQ MRSEQ r0, msp ; [2]=0 ==> Z=1, get fault context from handler. MRSNE r0, psp ; [2]=1 ==> Z=0, get fault context from thread. STMFD r0!, {r4 - r11} ; push r4 - r11 register STMFD r0!, {lr} ; push exec_return register TST lr, #0x04 ; if(!EXC_RETURN[2]) ITE EQ MSREQ msp, r0 ; [2]=0 ==> Z=1, update stack pointer to MSP. MSRNE psp, r0 ; [2]=1 ==> Z=0, update stack pointer to PSP. PUSH {lr} BL rt_hw_hard_fault_exception POP {lr} ORR lr, lr, #0x04 BX lr ENDP ALIGN 4 END
Aladdin-Wang/MicroBoot_Demo
6,133
STM32F7_APP/rt-thread/libcpu/arm/cortex-m3/context_gcc.S
/* * Copyright (c) 2006-2022, RT-Thread Development Team * * SPDX-License-Identifier: Apache-2.0 * * Change Logs: * Date Author Notes * 2009-10-11 Bernard First version * 2010-12-29 onelife Modify for EFM32 * 2011-06-17 onelife Merge all of the assembly source code into context_gcc.S * 2011-07-12 onelife Add interrupt context check function * 2013-06-18 aozima add restore MSP feature. * 2013-07-09 aozima enhancement hard fault exception handler. */ .cpu cortex-m3 .fpu softvfp .syntax unified .thumb .text .equ SCB_VTOR, 0xE000ED08 /* Vector Table Offset Register */ .equ ICSR, 0xE000ED04 /* interrupt control state register */ .equ PENDSVSET_BIT, 0x10000000 /* value to trigger PendSV exception */ .equ SHPR3, 0xE000ED20 /* system priority register (3) */ .equ PENDSV_PRI_LOWEST, 0xFFFF0000 /* PendSV and SysTick priority value (lowest) */ /* * rt_base_t rt_hw_interrupt_disable(); */ .global rt_hw_interrupt_disable .type rt_hw_interrupt_disable, %function rt_hw_interrupt_disable: MRS R0, PRIMASK CPSID I BX LR /* * void rt_hw_interrupt_enable(rt_base_t level); */ .global rt_hw_interrupt_enable .type rt_hw_interrupt_enable, %function rt_hw_interrupt_enable: MSR PRIMASK, R0 BX LR /* * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to); * R0 --> from * R1 --> to */ .global rt_hw_context_switch_interrupt .type rt_hw_context_switch_interrupt, %function .global rt_hw_context_switch .type rt_hw_context_switch, %function rt_hw_context_switch_interrupt: rt_hw_context_switch: /* set rt_thread_switch_interrupt_flag to 1 */ LDR R2, =rt_thread_switch_interrupt_flag LDR R3, [R2] CMP R3, #1 BEQ _reswitch MOV R3, #1 STR R3, [R2] LDR R2, =rt_interrupt_from_thread /* set rt_interrupt_from_thread */ STR R0, [R2] _reswitch: LDR R2, =rt_interrupt_to_thread /* set rt_interrupt_to_thread */ STR R1, [R2] LDR R0, =ICSR /* trigger the PendSV exception (causes context switch) */ LDR R1, =PENDSVSET_BIT STR R1, [R0] BX LR /* R0 --> switch from thread stack * R1 --> switch to thread stack * psr, pc, LR, R12, R3, R2, R1, R0 are pushed into [from] stack */ .global PendSV_Handler .type PendSV_Handler, %function PendSV_Handler: /* disable interrupt to protect context switch */ MRS R2, PRIMASK CPSID I /* get rt_thread_switch_interrupt_flag */ LDR R0, =rt_thread_switch_interrupt_flag LDR R1, [R0] CBZ R1, pendsv_exit /* pendsv already handled */ /* clear rt_thread_switch_interrupt_flag to 0 */ MOV R1, #0 STR R1, [R0] LDR R0, =rt_interrupt_from_thread LDR R1, [R0] CBZ R1, switch_to_thread /* skip register save at the first time */ MRS R1, PSP /* get from thread stack pointer */ STMFD R1!, {R4 - R11} /* push R4 - R11 register */ LDR R0, [R0] STR R1, [R0] /* update from thread stack pointer */ switch_to_thread: LDR R1, =rt_interrupt_to_thread LDR R1, [R1] LDR R1, [R1] /* load thread stack pointer */ LDMFD R1!, {R4 - R11} /* pop R4 - R11 register */ MSR PSP, R1 /* update stack pointer */ pendsv_exit: /* restore interrupt */ MSR PRIMASK, R2 ORR LR, LR, #0x04 BX LR /* * void rt_hw_context_switch_to(rt_uint32 to); * R0 --> to */ .global rt_hw_context_switch_to .type rt_hw_context_switch_to, %function rt_hw_context_switch_to: LDR R1, =rt_interrupt_to_thread STR R0, [R1] /* set from thread to 0 */ LDR R1, =rt_interrupt_from_thread MOV R0, #0 STR R0, [R1] /* set interrupt flag to 1 */ LDR R1, =rt_thread_switch_interrupt_flag MOV R0, #1 STR R0, [R1] /* set the PendSV and SysTick exception priority */ LDR R0, =SHPR3 LDR R1, =PENDSV_PRI_LOWEST LDR.W R2, [R0,#0] /* read */ ORR R1, R1, R2 /* modify */ STR R1, [R0] /* write-back */ LDR R0, =ICSR /* trigger the PendSV exception (causes context switch) */ LDR R1, =PENDSVSET_BIT STR R1, [R0] /* restore MSP */ LDR r0, =SCB_VTOR LDR r0, [r0] LDR r0, [r0] NOP MSR msp, r0 /* enable interrupts at processor level */ CPSIE F CPSIE I /* ensure PendSV exception taken place before subsequent operation */ DSB ISB /* never reach here! */ /* compatible with old version */ .global rt_hw_interrupt_thread_switch .type rt_hw_interrupt_thread_switch, %function rt_hw_interrupt_thread_switch: BX LR NOP .global HardFault_Handler .type HardFault_Handler, %function HardFault_Handler: /* get current context */ MRS r0, msp /* get fault context from handler. */ TST lr, #0x04 /* if(!EXC_RETURN[2]) */ BEQ _get_sp_done MRS r0, psp /* get fault context from thread. */ _get_sp_done: STMFD r0!, {r4 - r11} /* push r4 - r11 register */ STMFD r0!, {lr} /* push exec_return register */ TST lr, #0x04 /* if(!EXC_RETURN[2]) */ BEQ _update_msp MSR psp, r0 /* update stack pointer to PSP. */ B _update_done _update_msp: MSR msp, r0 /* update stack pointer to MSP. */ _update_done: PUSH {LR} BL rt_hw_hard_fault_exception POP {LR} ORR LR, LR, #0x04 BX LR /* * rt_uint32_t rt_hw_interrupt_check(void); * R0 --> state */ .global rt_hw_interrupt_check .type rt_hw_interrupt_check, %function rt_hw_interrupt_check: MRS R0, IPSR BX LR
Aladdin-Wang/MicroBoot_Demo
5,584
STM32F7_APP/rt-thread/libcpu/arm/cortex-m3/context_iar.S
;/* ; * Copyright (c) 2006-2018, RT-Thread Development Team ; * ; * SPDX-License-Identifier: Apache-2.0 ; * ; * Change Logs: ; * Date Author Notes ; * 2009-01-17 Bernard first version ; * 2009-09-27 Bernard add protect when contex switch occurs ; * 2013-06-18 aozima add restore MSP feature. ; * 2013-07-09 aozima enhancement hard fault exception handler. ; */ ;/** ; * @addtogroup cortex-m3 ; */ ;/*@{*/ SCB_VTOR EQU 0xE000ED08 ; Vector Table Offset Register NVIC_INT_CTRL EQU 0xE000ED04 ; interrupt control state register NVIC_SYSPRI2 EQU 0xE000ED20 ; system priority register (2) NVIC_PENDSV_PRI EQU 0xFFFF0000 ; PendSV and SysTick priority value (lowest) NVIC_PENDSVSET EQU 0x10000000 ; value to trigger PendSV exception SECTION .text:CODE(2) THUMB REQUIRE8 PRESERVE8 IMPORT rt_thread_switch_interrupt_flag IMPORT rt_interrupt_from_thread IMPORT rt_interrupt_to_thread ;/* ; * rt_base_t rt_hw_interrupt_disable(); ; */ EXPORT rt_hw_interrupt_disable rt_hw_interrupt_disable: MRS r0, PRIMASK CPSID I BX LR ;/* ; * void rt_hw_interrupt_enable(rt_base_t level); ; */ EXPORT rt_hw_interrupt_enable rt_hw_interrupt_enable: MSR PRIMASK, r0 BX LR ;/* ; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to); ; * r0 --> from ; * r1 --> to ; */ EXPORT rt_hw_context_switch_interrupt EXPORT rt_hw_context_switch rt_hw_context_switch_interrupt: rt_hw_context_switch: ; set rt_thread_switch_interrupt_flag to 1 LDR r2, =rt_thread_switch_interrupt_flag LDR r3, [r2] CMP r3, #1 BEQ _reswitch MOV r3, #1 STR r3, [r2] LDR r2, =rt_interrupt_from_thread ; set rt_interrupt_from_thread STR r0, [r2] _reswitch LDR r2, =rt_interrupt_to_thread ; set rt_interrupt_to_thread STR r1, [r2] LDR r0, =NVIC_INT_CTRL ; trigger the PendSV exception (causes context switch) LDR r1, =NVIC_PENDSVSET STR r1, [r0] BX LR ; r0 --> switch from thread stack ; r1 --> switch to thread stack ; psr, pc, lr, r12, r3, r2, r1, r0 are pushed into [from] stack EXPORT PendSV_Handler PendSV_Handler: ; disable interrupt to protect context switch MRS r2, PRIMASK CPSID I ; get rt_thread_switch_interrupt_flag LDR r0, =rt_thread_switch_interrupt_flag LDR r1, [r0] CBZ r1, pendsv_exit ; pendsv already handled ; clear rt_thread_switch_interrupt_flag to 0 MOV r1, #0x00 STR r1, [r0] LDR r0, =rt_interrupt_from_thread LDR r1, [r0] CBZ r1, switch_to_thread ; skip register save at the first time MRS r1, psp ; get from thread stack pointer STMFD r1!, {r4 - r11} ; push r4 - r11 register LDR r0, [r0] STR r1, [r0] ; update from thread stack pointer switch_to_thread LDR r1, =rt_interrupt_to_thread LDR r1, [r1] LDR r1, [r1] ; load thread stack pointer LDMFD r1!, {r4 - r11} ; pop r4 - r11 register MSR psp, r1 ; update stack pointer pendsv_exit ; restore interrupt MSR PRIMASK, r2 ORR lr, lr, #0x04 BX lr ;/* ; * void rt_hw_context_switch_to(rt_uint32 to); ; * r0 --> to ; */ EXPORT rt_hw_context_switch_to rt_hw_context_switch_to: LDR r1, =rt_interrupt_to_thread STR r0, [r1] ; set from thread to 0 LDR r1, =rt_interrupt_from_thread MOV r0, #0x0 STR r0, [r1] ; set interrupt flag to 1 LDR r1, =rt_thread_switch_interrupt_flag MOV r0, #1 STR r0, [r1] ; set the PendSV and SysTick exception priority LDR r0, =NVIC_SYSPRI2 LDR r1, =NVIC_PENDSV_PRI LDR.W r2, [r0,#0x00] ; read ORR r1,r1,r2 ; modify STR r1, [r0] ; write-back LDR r0, =NVIC_INT_CTRL ; trigger the PendSV exception (causes context switch) LDR r1, =NVIC_PENDSVSET STR r1, [r0] ; restore MSP LDR r0, =SCB_VTOR LDR r0, [r0] LDR r0, [r0] NOP MSR msp, r0 ; enable interrupts at processor level CPSIE F CPSIE I ; ensure PendSV exception taken place before subsequent operation DSB ISB ; never reach here! ; compatible with old version EXPORT rt_hw_interrupt_thread_switch rt_hw_interrupt_thread_switch: BX lr IMPORT rt_hw_hard_fault_exception EXPORT HardFault_Handler HardFault_Handler: ; get current context MRS r0, msp ; get fault context from handler. TST lr, #0x04 ; if(!EXC_RETURN[2]) BEQ _get_sp_done MRS r0, psp ; get fault context from thread. _get_sp_done STMFD r0!, {r4 - r11} ; push r4 - r11 register ;STMFD r0!, {lr} ; push exec_return register SUB r0, r0, #0x04 STR lr, [r0] TST lr, #0x04 ; if(!EXC_RETURN[2]) BEQ _update_msp MSR psp, r0 ; update stack pointer to PSP. B _update_done _update_msp MSR msp, r0 ; update stack pointer to MSP. _update_done PUSH {lr} BL rt_hw_hard_fault_exception POP {lr} ORR lr, lr, #0x04 BX lr END
Aladdin-Wang/MicroBoot_Demo
2,590
STM32F7_APP/rt-thread/libcpu/arm/dm36x/context_rvds.S
;/* ; * Copyright (c) 2006-2022, RT-Thread Development Team ; * ; * SPDX-License-Identifier: Apache-2.0 ; * ; * Change Logs: ; * Date Author Notes ; * 2011-08-14 weety copy from mini2440 ; */ NOINT EQU 0xc0 ; disable interrupt in psr AREA |.text|, CODE, READONLY, ALIGN=2 ARM REQUIRE8 PRESERVE8 ;/* ; * rt_base_t rt_hw_interrupt_disable(); ; */ rt_hw_interrupt_disable PROC EXPORT rt_hw_interrupt_disable MRS r0, cpsr ORR r1, r0, #NOINT MSR cpsr_c, r1 BX lr ENDP ;/* ; * void rt_hw_interrupt_enable(rt_base_t level); ; */ rt_hw_interrupt_enable PROC EXPORT rt_hw_interrupt_enable MSR cpsr_c, r0 BX lr ENDP ;/* ; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to); ; * r0 --> from ; * r1 --> to ; */ rt_hw_context_switch PROC EXPORT rt_hw_context_switch STMFD sp!, {lr} ; push pc (lr should be pushed in place of PC) STMFD sp!, {r0-r12, lr} ; push lr & register file MRS r4, cpsr STMFD sp!, {r4} ; push cpsr MRS r4, spsr STMFD sp!, {r4} ; push spsr STR sp, [r0] ; store sp in preempted tasks TCB LDR sp, [r1] ; get new task stack pointer LDMFD sp!, {r4} ; pop new task spsr MSR spsr_cxsf, r4 LDMFD sp!, {r4} ; pop new task cpsr MSR spsr_cxsf, r4 LDMFD sp!, {r0-r12, lr, pc}^ ; pop new task r0-r12, lr & pc ENDP ;/* ; * void rt_hw_context_switch_to(rt_uint32 to); ; * r0 --> to ; */ rt_hw_context_switch_to PROC EXPORT rt_hw_context_switch_to LDR sp, [r0] ; get new task stack pointer LDMFD sp!, {r4} ; pop new task spsr MSR spsr_cxsf, r4 LDMFD sp!, {r4} ; pop new task cpsr MSR cpsr_cxsf, r4 LDMFD sp!, {r0-r12, lr, pc} ; pop new task r0-r12, lr & pc ENDP ;/* ; * void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to); ; */ IMPORT rt_thread_switch_interrupt_flag IMPORT rt_interrupt_from_thread IMPORT rt_interrupt_to_thread rt_hw_context_switch_interrupt PROC EXPORT rt_hw_context_switch_interrupt LDR r2, =rt_thread_switch_interrupt_flag LDR r3, [r2] CMP r3, #1 BEQ _reswitch MOV r3, #1 ; set rt_thread_switch_interrupt_flag to 1 STR r3, [r2] LDR r2, =rt_interrupt_from_thread ; set rt_interrupt_from_thread STR r0, [r2] _reswitch LDR r2, =rt_interrupt_to_thread ; set rt_interrupt_to_thread STR r1, [r2] BX lr ENDP END
Aladdin-Wang/MicroBoot_Demo
2,277
STM32F7_APP/rt-thread/libcpu/arm/dm36x/context_gcc.S
/* * Copyright (c) 2006-2018, RT-Thread Development Team * * SPDX-License-Identifier: Apache-2.0 * * Change Logs: * Date Author Notes * 2011-01-13 weety */ /*! * \addtogroup DM36X */ /*@{*/ #define NOINT 0xc0 /* * rt_base_t rt_hw_interrupt_disable(); */ .globl rt_hw_interrupt_disable rt_hw_interrupt_disable: mrs r0, cpsr orr r1, r0, #NOINT msr cpsr_c, r1 bx lr /* * void rt_hw_interrupt_enable(rt_base_t level); */ .globl rt_hw_interrupt_enable rt_hw_interrupt_enable: msr cpsr, r0 bx lr /* * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to); * r0 --> from * r1 --> to */ .globl rt_hw_context_switch rt_hw_context_switch: stmfd sp!, {lr} @ push pc (lr should be pushed in place of PC) stmfd sp!, {r0-r12, lr} @ push lr & register file mrs r4, cpsr tst lr, #0x01 orrne r4, r4, #0x20 @ it's thumb code stmfd sp!, {r4} @ push cpsr str sp, [r0] @ store sp in preempted tasks TCB ldr sp, [r1] @ get new task stack pointer ldmfd sp!, {r4} @ pop new task cpsr to spsr msr spsr_cxsf, r4 _do_switch: ldmfd sp!, {r0-r12, lr, pc}^ @ pop new task r0-r12, lr & pc, copy spsr to cpsr /* * void rt_hw_context_switch_to(rt_uint32 to); * r0 --> to */ .globl rt_hw_context_switch_to rt_hw_context_switch_to: ldr sp, [r0] @ get new task stack pointer ldmfd sp!, {r4} @ pop new task spsr msr spsr_cxsf, r4 bic r4, r4, #0x20 @ must be ARM mode msr cpsr_cxsf, r4 ldmfd sp!, {r0-r12, lr, pc}^ @ pop new task r0-r12, lr & pc /* * void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to); */ .globl rt_thread_switch_interrupt_flag .globl rt_interrupt_from_thread .globl rt_interrupt_to_thread .globl rt_hw_context_switch_interrupt rt_hw_context_switch_interrupt: ldr r2, =rt_thread_switch_interrupt_flag ldr r3, [r2] cmp r3, #1 beq _reswitch mov r3, #1 @ set rt_thread_switch_interrupt_flag to 1 str r3, [r2] ldr r2, =rt_interrupt_from_thread @ set rt_interrupt_from_thread str r0, [r2] _reswitch: ldr r2, =rt_interrupt_to_thread @ set rt_interrupt_to_thread str r1, [r2] bx lr
Aladdin-Wang/MicroBoot_Demo
3,181
STM32F7_APP/rt-thread/libcpu/arm/cortex-a/cp15_gcc.S
/* * Copyright (c) 2006-2018, RT-Thread Development Team * * SPDX-License-Identifier: Apache-2.0 * * Change Logs: * Date Author Notes * 2013-07-05 Bernard the first version */ .globl rt_cpu_get_smp_id rt_cpu_get_smp_id: mrc p15, #0, r0, c0, c0, #5 bx lr .globl rt_cpu_vector_set_base rt_cpu_vector_set_base: /* clear SCTRL.V to customize the vector address */ mrc p15, #0, r1, c1, c0, #0 bic r1, #(1 << 13) mcr p15, #0, r1, c1, c0, #0 /* set up the vector address */ mcr p15, #0, r0, c12, c0, #0 dsb bx lr .globl rt_hw_cpu_dcache_enable rt_hw_cpu_dcache_enable: mrc p15, #0, r0, c1, c0, #0 orr r0, r0, #0x00000004 mcr p15, #0, r0, c1, c0, #0 bx lr .globl rt_hw_cpu_icache_enable rt_hw_cpu_icache_enable: mrc p15, #0, r0, c1, c0, #0 orr r0, r0, #0x00001000 mcr p15, #0, r0, c1, c0, #0 bx lr _FLD_MAX_WAY: .word 0x3ff _FLD_MAX_IDX: .word 0x7fff .globl rt_cpu_dcache_clean_flush rt_cpu_dcache_clean_flush: push {r4-r11} dmb mrc p15, #1, r0, c0, c0, #1 @ read clid register ands r3, r0, #0x7000000 @ get level of coherency mov r3, r3, lsr #23 beq finished mov r10, #0 loop1: add r2, r10, r10, lsr #1 mov r1, r0, lsr r2 and r1, r1, #7 cmp r1, #2 blt skip mcr p15, #2, r10, c0, c0, #0 isb mrc p15, #1, r1, c0, c0, #0 and r2, r1, #7 add r2, r2, #4 ldr r4, _FLD_MAX_WAY ands r4, r4, r1, lsr #3 clz r5, r4 ldr r7, _FLD_MAX_IDX ands r7, r7, r1, lsr #13 loop2: mov r9, r4 loop3: orr r11, r10, r9, lsl r5 orr r11, r11, r7, lsl r2 mcr p15, #0, r11, c7, c14, #2 subs r9, r9, #1 bge loop3 subs r7, r7, #1 bge loop2 skip: add r10, r10, #2 cmp r3, r10 bgt loop1 finished: dsb isb pop {r4-r11} bx lr .globl rt_cpu_icache_flush rt_cpu_icache_flush: mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate dsb isb bx lr .globl rt_hw_cpu_dcache_disable rt_hw_cpu_dcache_disable: push {r4-r11, lr} bl rt_cpu_dcache_clean_flush mrc p15, #0, r0, c1, c0, #0 bic r0, r0, #0x00000004 mcr p15, #0, r0, c1, c0, #0 pop {r4-r11, lr} bx lr .globl rt_hw_cpu_icache_disable rt_hw_cpu_icache_disable: mrc p15, #0, r0, c1, c0, #0 bic r0, r0, #0x00001000 mcr p15, #0, r0, c1, c0, #0 bx lr .globl rt_cpu_mmu_disable rt_cpu_mmu_disable: mcr p15, #0, r0, c8, c7, #0 @ invalidate tlb mrc p15, #0, r0, c1, c0, #0 bic r0, r0, #1 mcr p15, #0, r0, c1, c0, #0 @ clear mmu bit dsb bx lr .globl rt_cpu_mmu_enable rt_cpu_mmu_enable: mrc p15, #0, r0, c1, c0, #0 orr r0, r0, #0x001 mcr p15, #0, r0, c1, c0, #0 @ set mmu enable bit dsb bx lr .globl rt_cpu_tlb_set rt_cpu_tlb_set: mcr p15, #0, r0, c2, c0, #0 dmb bx lr
Aladdin-Wang/MicroBoot_Demo
16,702
STM32F7_APP/rt-thread/libcpu/arm/cortex-a/start_gcc.S
/* * Copyright (c) 2006-2018, RT-Thread Development Team * * SPDX-License-Identifier: Apache-2.0 * * Change Logs: * Date Author Notes * 2013-07-05 Bernard the first version * 2018-11-22 Jesven in the interrupt context, use rt_scheduler_do_irq_switch checks * and switches to a new thread * 2024-01-16 huanghe restructure this code section following the aarch64 architectural style */ #include "rtconfig.h" #define ARM_CPU_STACK_SIZE_OFFSET 12 #define ARM_CPU_STACK_SIZE (1<<ARM_CPU_STACK_SIZE_OFFSET) .equ Mode_USR, 0x10 .equ Mode_FIQ, 0x11 .equ Mode_IRQ, 0x12 .equ Mode_SVC, 0x13 .equ Mode_ABT, 0x17 .equ Mode_UND, 0x1B .equ Mode_SYS, 0x1F .equ I_Bit, 0x80 /* when I bit is set, IRQ is disabled */ .equ F_Bit, 0x40 /* when F bit is set, FIQ is disabled */ /*Load the physical address of a symbol into a register. Through pv_off calculates the offset of the physical address */ .macro get_phy, reg, symbol, _pvoff ldr \reg, =\symbol add \reg, \_pvoff .endm /*Calculate the offset between the physical address and the virtual address of the "_reset".*/ .macro get_pvoff, tmp, out ldr \tmp, =_reset adr \out, _reset sub \out, \out, \tmp .endm pv_off .req r11 /* Used to store the offset between physical address and the virtual address */ cpu_id .req r10 /* Used to store the cpu id */ /* reset entry */ .globl _reset _reset: /* Calculate the offset between the physical address and the virtual address */ get_pvoff r0, pv_off /* exit hyp mode */ bl init_cpu_mode /* clear bss section */ bl init_kernel_bss /* Initializes the assembly environment stack */ bl init_cpu_stack_early /* init mmu */ b init_mmu_page_table_early init_cpu_stack_early: cps #Mode_SVC get_phy r0, svc_stack_top, pv_off mov sp, r0 #ifdef RT_USING_FPU mov r4, #0xfffffff mcr p15, 0, r4, c1, c0, 2 #endif mov pc, lr init_kernel_bss: /* enable I cache + branch prediction */ mrc p15, 0, r0, c1, c0, 0 orr r0, r0, #(1<<12) orr r0, r0, #(1<<11) mcr p15, 0, r0, c1, c0, 0 mov r0,#0 /* get a zero */ get_phy r1, __bss_start, pv_off get_phy r2, __bss_end, pv_off bss_loop: cmp r1,r2 /* check if data to clear */ strlo r0,[r1],#4 /* clear 4 bytes */ blo bss_loop /* loop until done */ mov pc, lr init_cpu_mode: #ifdef ARCH_ARMV8 /* Check for HYP mode */ mrs r0, cpsr_all and r0, r0, #0x1F mov r8, #0x1A cmp r0, r8 beq overHyped b continue_exit overHyped: /* Get out of HYP mode */ mov r9, lr /* HYP mode has a dedicated register, called ELR_hyp, to store the exception return address. The lr register needs to be temporarily saved, otherwise "mov pc lr" cannot be used after switching modes. */ adr r1, continue_exit msr ELR_hyp, r1 mrs r1, cpsr_all and r1, r1, #0xFFFFFFE0 /* CPSR_MODE_MASK */ orr r1, r1, #0x13 /* CPSR_MODE_SUPERVISOR */ msr SPSR_hyp, r1 eret continue_exit: mov lr ,r9 #endif #ifdef SOC_BCM283x /* Suspend the other cpu cores */ mrc p15, 0, r0, c0, c0, 5 ands r0, #3 bne _halt /* Disable IRQ & FIQ */ cpsid if /* Check for HYP mode */ mrs r0, cpsr_all and r0, r0, #0x1F mov r8, #0x1A cmp r0, r8 beq overHyped b continue_exit overHyped: /* Get out of HYP mode */ mov r9, lr /* HYP mode has a dedicated register, called ELR_hyp, to store the exception return address. The lr register needs to be temporarily saved, otherwise "mov pc lr" cannot be used after switching modes. */ adr r1, continue_exit msr ELR_hyp, r1 mrs r1, cpsr_all and r1, r1, #0xFFFFFFE0 /* CPSR_MODE_MASK */ orr r1, r1, #0x13 /* CPSR_MODE_SUPERVISOR */ msr SPSR_hyp, r1 eret continue_exit: mov lr ,r9 /* set the cpu to SVC32 mode and disable interrupt */ mrs r0, cpsr bic r0, r0, #0x1f orr r0, r0, #0x13 msr cpsr_c, r0 #endif /* invalid tlb before enable mmu */ mrc p15, 0, r0, c1, c0, 0 bic r0, #1 mcr p15, 0, r0, c1, c0, 0 dsb isb mov r0, #0 mcr p15, 0, r0, c8, c7, 0 mcr p15, 0, r0, c7, c5, 0 /* iciallu */ mcr p15, 0, r0, c7, c5, 6 /* bpiall */ dsb isb mov pc, lr init_mmu_page_table_early: get_phy r0, init_mtbl, pv_off mov r1, pv_off bl rt_hw_mem_setup_early /* get cpu id */ bl rt_hw_cpu_id_early mov cpu_id ,r0 /* enable_mmu_page_table_early is changed to master_core_startup */ ldr lr, =master_core_startup cmp cpu_id, #0 beq enable_mmu_page_table_early #ifdef RT_USING_SMP #ifdef RT_SMP_AUTO_BOOT /* if cpu id > 0, stop or wait */ ldr r0, =secondary_cpu_entry mov r1, #0 str r1, [r0] /* clean secondary_cpu_entry */ #endif #endif secondary_loop: @ cpu core 1 goes into sleep until core 0 wakeup it wfe #ifdef RT_SMP_AUTO_BOOT ldr r1, =secondary_cpu_entry ldr r0, [r1] cmp r0, #0 blxne r0 /* if(secondary_cpu_entry) secondary_cpu_entry(); */ #endif /* RT_SMP_AUTO_BOOT */ b secondary_loop enable_mmu_page_table_early: /* init TTBR0 */ get_phy r0, init_mtbl, pv_off mcr p15, #0, r0, c2, c0, #0 dmb ldr r0,=#0x55555555 mcr p15, #0, r0, c3, c0, #0 /* disable ttbr1 */ mov r0, #(1 << 5) /* PD1=1 */ mcr p15, 0, r0, c2, c0, 2 /* ttbcr */ /* init stack for cpu mod */ cps #Mode_UND ldr r1,=und_stack_top sub sp, r1, cpu_id, asl #ARM_CPU_STACK_SIZE_OFFSET cps #Mode_IRQ ldr r1, =irq_stack_top sub sp, r1, cpu_id, asl #ARM_CPU_STACK_SIZE_OFFSET cps #Mode_FIQ ldr r1, =irq_stack_top sub sp, r1, cpu_id, asl #ARM_CPU_STACK_SIZE_OFFSET cps #Mode_ABT ldr r1, =abt_stack_top sub sp, r1, cpu_id, asl #ARM_CPU_STACK_SIZE_OFFSET cps #Mode_SVC ldr r1, =svc_stack_top sub sp, r1, cpu_id, asl #ARM_CPU_STACK_SIZE_OFFSET /* invalid tlb before enable mmu */ mov r0, #0 mcr p15, 0, r0, c8, c7, 0 mcr p15, 0, r0, c7, c5, 0 /* iciallu */ mcr p15, 0, r0, c7, c5, 6 /* bpiall */ mrc p15, 0, r0, c1, c0, 0 bic r0, r0, #0x7 /* clear bit1~3 */ orr r0, #((1 << 12) | (1 << 11)) /* instruction cache, branch prediction */ orr r0, #((1 << 2) | (1 << 0)) /* data cache, mmu enable */ mcr p15, 0, r0, c1, c0, 0 dsb isb mov pc, lr master_core_startup : mov r0 ,pv_off bl rt_kmem_pvoff_set ldr lr, =rtthread_startup mov pc, lr .global rt_hw_mmu_tbl_get rt_hw_mmu_tbl_get: mrc p15, 0, r0, c2, c0, 0 /* ttbr0 */ bic r0, #0x18 mov pc, lr .weak rt_hw_cpu_id_early rt_hw_cpu_id_early: mrc p15, 0, r0, c0, c0, 5 and r0, r0, #0xf mov pc, lr #ifdef RT_USING_SMP .global rt_secondary_cpu_entry rt_secondary_cpu_entry: ldr r0, =_reset adr pv_off, _reset sub pv_off, pv_off, r0 bl init_cpu_stack_early /* init mmu */ bl rt_hw_cpu_id_early mov cpu_id ,r0 ldr lr ,= rt_hw_secondary_cpu_bsp_start b enable_mmu_page_table_early #endif /* exception handlers: undef, swi, padt, dabt, resv, irq, fiq */ .section .text.isr, "ax" .align 5 .globl vector_fiq vector_fiq: stmfd sp!,{r0-r7,lr} bl rt_hw_trap_fiq ldmfd sp!,{r0-r7,lr} subs pc, lr, #4 .globl rt_interrupt_enter .globl rt_interrupt_leave .globl rt_thread_switch_interrupt_flag .globl rt_interrupt_from_thread .globl rt_interrupt_to_thread .globl rt_current_thread .globl vmm_thread .globl vmm_virq_check .align 5 .globl vector_irq vector_irq: #ifdef RT_USING_SMP stmfd sp!, {r0, r1} cps #Mode_SVC mov r0, sp /* svc_sp */ mov r1, lr /* svc_lr */ cps #Mode_IRQ sub lr, #4 stmfd r0!, {r1, lr} /* svc_lr, svc_pc */ stmfd r0!, {r2 - r12} ldmfd sp!, {r1, r2} /* original r0, r1 */ stmfd r0!, {r1 - r2} mrs r1, spsr /* original mode */ stmfd r0!, {r1} #ifdef RT_USING_SMART stmfd r0, {r13, r14}^ /* usr_sp, usr_lr */ sub r0, #8 #endif #ifdef RT_USING_FPU /* fpu context */ vmrs r6, fpexc tst r6, #(1<<30) beq 1f vstmdb r0!, {d0-d15} vstmdb r0!, {d16-d31} vmrs r5, fpscr stmfd r0!, {r5} 1: stmfd r0!, {r6} #endif /* now irq stack is clean */ /* r0 is task svc_sp */ /* backup r0 -> r8 */ mov r8, r0 cps #Mode_SVC mov sp, r8 bl rt_interrupt_enter bl rt_hw_trap_irq bl rt_interrupt_leave mov r0, r8 bl rt_scheduler_do_irq_switch b rt_hw_context_switch_exit #else stmfd sp!, {r0-r12,lr} bl rt_interrupt_enter bl rt_hw_trap_irq bl rt_interrupt_leave /* if rt_thread_switch_interrupt_flag set, jump to * rt_hw_context_switch_interrupt_do and don't return */ ldr r0, =rt_thread_switch_interrupt_flag ldr r1, [r0] cmp r1, #1 beq rt_hw_context_switch_interrupt_do #ifdef RT_USING_SMART ldmfd sp!, {r0-r12,lr} cps #Mode_SVC push {r0-r12} mov r7, lr cps #Mode_IRQ mrs r4, spsr sub r5, lr, #4 cps #Mode_SVC and r6, r4, #0x1f cmp r6, #0x10 bne 1f msr spsr_csxf, r4 mov lr, r5 pop {r0-r12} b arch_ret_to_user 1: mov lr, r7 cps #Mode_IRQ msr spsr_csxf, r4 mov lr, r5 cps #Mode_SVC pop {r0-r12} cps #Mode_IRQ movs pc, lr #else ldmfd sp!, {r0-r12,lr} subs pc, lr, #4 #endif rt_hw_context_switch_interrupt_do: mov r1, #0 /* clear flag */ str r1, [r0] mov r1, sp /* r1 point to {r0-r3} in stack */ add sp, sp, #4*4 ldmfd sp!, {r4-r12,lr} /* reload saved registers */ mrs r0, spsr /* get cpsr of interrupt thread */ sub r2, lr, #4 /* save old task's pc to r2 */ /* Switch to SVC mode with no interrupt. If the usr mode guest is * interrupted, this will just switch to the stack of kernel space. * save the registers in kernel space won't trigger data abort. */ msr cpsr_c, #I_Bit|F_Bit|Mode_SVC stmfd sp!, {r2} /* push old task's pc */ stmfd sp!, {r4-r12,lr} /* push old task's lr,r12-r4 */ ldmfd r1, {r1-r4} /* restore r0-r3 of the interrupt thread */ stmfd sp!, {r1-r4} /* push old task's r0-r3 */ stmfd sp!, {r0} /* push old task's cpsr */ #ifdef RT_USING_SMART stmfd sp, {r13, r14}^ /*push usr_sp, usr_lr */ sub sp, #8 #endif #ifdef RT_USING_FPU /* fpu context */ vmrs r6, fpexc tst r6, #(1<<30) beq 1f vstmdb sp!, {d0-d15} vstmdb sp!, {d16-d31} vmrs r5, fpscr stmfd sp!, {r5} 1: stmfd sp!, {r6} #endif ldr r4, =rt_interrupt_from_thread ldr r5, [r4] str sp, [r5] /* store sp in preempted tasks's TCB */ ldr r6, =rt_interrupt_to_thread ldr r6, [r6] ldr sp, [r6] /* get new task's stack pointer */ #ifdef RT_USING_SMART bl rt_thread_self mov r4, r0 bl lwp_aspace_switch mov r0, r4 bl lwp_user_setting_restore #endif #ifdef RT_USING_FPU /* fpu context */ ldmfd sp!, {r6} vmsr fpexc, r6 tst r6, #(1<<30) beq 1f ldmfd sp!, {r5} vmsr fpscr, r5 vldmia sp!, {d16-d31} vldmia sp!, {d0-d15} 1: #endif #ifdef RT_USING_SMART ldmfd sp, {r13, r14}^ /*pop usr_sp, usr_lr */ add sp, #8 #endif ldmfd sp!, {r4} /* pop new task's cpsr to spsr */ msr spsr_cxsf, r4 #ifdef RT_USING_SMART and r4, #0x1f cmp r4, #0x10 bne 1f ldmfd sp!, {r0-r12,lr} ldmfd sp!, {lr} b arch_ret_to_user 1: #endif /* pop new task's r0-r12,lr & pc, copy spsr to cpsr */ ldmfd sp!, {r0-r12,lr,pc}^ #endif .macro push_svc_reg sub sp, sp, #17 * 4 /* Sizeof(struct rt_hw_exp_stack) */ stmia sp, {r0 - r12} /* Calling r0-r12 */ mov r0, sp add sp, sp, #17 * 4 mrs r6, spsr /* Save CPSR */ str lr, [r0, #15*4] /* Push PC */ str r6, [r0, #16*4] /* Push CPSR */ and r1, r6, #0x1f cmp r1, #0x10 cps #Mode_SYS streq sp, [r0, #13*4] /* Save calling SP */ streq lr, [r0, #14*4] /* Save calling PC */ cps #Mode_SVC strne sp, [r0, #13*4] /* Save calling SP */ strne lr, [r0, #14*4] /* Save calling PC */ .endm .align 5 .weak vector_swi vector_swi: push_svc_reg bl rt_hw_trap_swi b . .align 5 .globl vector_undef vector_undef: push_svc_reg bl rt_hw_trap_undef #ifdef RT_USING_FPU cps #Mode_UND sub sp, sp, #17 * 4 ldr lr, [sp, #15*4] ldmia sp, {r0 - r12} add sp, sp, #17 * 4 movs pc, lr #endif b . .align 5 .globl vector_pabt vector_pabt: push_svc_reg #ifdef RT_USING_SMART /* cp Mode_ABT stack to SVC */ sub sp, sp, #17 * 4 /* Sizeof(struct rt_hw_exp_stack) */ mov lr, r0 ldmia lr, {r0 - r12} stmia sp, {r0 - r12} add r1, lr, #13 * 4 add r2, sp, #13 * 4 ldmia r1, {r4 - r7} stmia r2, {r4 - r7} mov r0, sp bl rt_hw_trap_pabt /* return to user */ ldr lr, [sp, #16*4] /* orign spsr */ msr spsr_cxsf, lr ldr lr, [sp, #15*4] /* orign pc */ ldmia sp, {r0 - r12} add sp, #17 * 4 b arch_ret_to_user #else bl rt_hw_trap_pabt b . #endif .align 5 .globl vector_dabt vector_dabt: push_svc_reg #ifdef RT_USING_SMART /* cp Mode_ABT stack to SVC */ sub sp, sp, #17 * 4 /* Sizeof(struct rt_hw_exp_stack) */ mov lr, r0 ldmia lr, {r0 - r12} stmia sp, {r0 - r12} add r1, lr, #13 * 4 add r2, sp, #13 * 4 ldmia r1, {r4 - r7} stmia r2, {r4 - r7} mov r0, sp bl rt_hw_trap_dabt /* return to user */ ldr lr, [sp, #16*4] /* orign spsr */ msr spsr_cxsf, lr ldr lr, [sp, #15*4] /* orign pc */ ldmia sp, {r0 - r12} add sp, #17 * 4 b arch_ret_to_user #else bl rt_hw_trap_dabt b . #endif .align 5 .globl vector_resv vector_resv: push_svc_reg bl rt_hw_trap_resv b . .global rt_hw_clz rt_hw_clz: clz r0, r0 bx lr #include "asm-generic.h" START_POINT(_thread_start) mov r10, lr blx r1 blx r10 b . /* never here */ START_POINT_END(_thread_start) .data .align 14 init_mtbl: .space (4*4096) /* The L1 translation table therefore contains 4096 32-bit (word-sized) entries. */ .global rt_hw_mmu_switch rt_hw_mmu_switch: orr r0, #0x18 mcr p15, 0, r0, c2, c0, 0 // ttbr0 //invalid tlb mov r0, #0 mcr p15, 0, r0, c8, c7, 0 mcr p15, 0, r0, c7, c5, 0 //iciallu mcr p15, 0, r0, c7, c5, 6 //bpiall dsb isb mov pc, lr .global rt_hw_set_process_id rt_hw_set_process_id: LSL r0, r0, #8 MCR p15, 0, r0, c13, c0, 1 mov pc, lr .bss .align 3 /* align to 2~3=8 */ .cpus_stack: svc_stack_n: #if defined(RT_USING_SMP) && (RT_CPUS_NR > 1) .space ((RT_CPUS_NR - 1) * ARM_CPU_STACK_SIZE) #endif .space (ARM_CPU_STACK_SIZE) svc_stack_top: irq_stack_n: #if defined(RT_USING_SMP) && (RT_CPUS_NR > 1) .space ((RT_CPUS_NR - 1) * ARM_CPU_STACK_SIZE) #endif .space (ARM_CPU_STACK_SIZE) irq_stack_top: und_stack_n: #if defined(RT_USING_SMP) && (RT_CPUS_NR > 1) .space ((RT_CPUS_NR - 1) * ARM_CPU_STACK_SIZE) #endif .space (ARM_CPU_STACK_SIZE) und_stack_top: abt_stack_n: #if defined(RT_USING_SMP) && (RT_CPUS_NR > 1) .space ((RT_CPUS_NR - 1) * ARM_CPU_STACK_SIZE) #endif .space (ARM_CPU_STACK_SIZE) abt_stack_top:
Aladdin-Wang/MicroBoot_Demo
6,016
STM32F7_APP/rt-thread/libcpu/arm/cortex-a/context_gcc.S
/* * Copyright (c) 2006-2018, RT-Thread Development Team * * SPDX-License-Identifier: Apache-2.0 * * Change Logs: * Date Author Notes * 2013-07-05 Bernard the first version */ #include "rtconfig.h" .section .text, "ax" #ifdef RT_USING_SMP #define rt_hw_interrupt_disable rt_hw_local_irq_disable #define rt_hw_interrupt_enable rt_hw_local_irq_enable #endif /* * rt_base_t rt_hw_interrupt_disable(); */ .globl rt_hw_interrupt_disable rt_hw_interrupt_disable: mrs r0, cpsr cpsid i bx lr /* * void rt_hw_interrupt_enable(rt_base_t level); */ .globl rt_hw_interrupt_enable rt_hw_interrupt_enable: msr cpsr, r0 bx lr /* * void rt_hw_context_switch_to(rt_uint32 to, struct rt_thread *to_thread); * r0 --> to (thread stack) * r1 --> to_thread */ .globl rt_hw_context_switch_to rt_hw_context_switch_to: clrex ldr sp, [r0] @ get new task stack pointer #ifdef RT_USING_SMP mov r0, r1 bl rt_cpus_lock_status_restore #ifdef RT_USING_SMART bl rt_thread_self bl lwp_user_setting_restore #endif #else #ifdef RT_USING_SMART bl rt_thread_self mov r4, r0 bl lwp_aspace_switch mov r0, r4 bl lwp_user_setting_restore #endif #endif /*RT_USING_SMP*/ b rt_hw_context_switch_exit .section .bss.share.isr _guest_switch_lvl: .word 0 .globl vmm_virq_update .section .text.isr, "ax" /* * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to, struct rt_thread *to_thread); * r0 --> from (from_thread stack) * r1 --> to (to_thread stack) * r2 --> to_thread */ .globl rt_hw_context_switch rt_hw_context_switch: clrex stmfd sp!, {lr} @ push pc (lr should be pushed in place of PC) stmfd sp!, {r0-r12, lr} @ push lr & register file mrs r4, cpsr tst lr, #0x01 orrne r4, r4, #0x20 @ it's thumb code stmfd sp!, {r4} @ push cpsr #ifdef RT_USING_SMART stmfd sp, {r13, r14}^ @ push usr_sp usr_lr sub sp, #8 #endif #ifdef RT_USING_FPU /* fpu context */ vmrs r6, fpexc tst r6, #(1<<30) beq 1f vstmdb sp!, {d0-d15} vstmdb sp!, {d16-d31} vmrs r5, fpscr stmfd sp!, {r5} 1: stmfd sp!, {r6} #endif str sp, [r0] @ store sp in preempted tasks TCB ldr sp, [r1] @ get new task stack pointer #ifdef RT_USING_SMP mov r0, r2 bl rt_cpus_lock_status_restore #ifdef RT_USING_SMART bl rt_thread_self bl lwp_user_setting_restore #endif #else #ifdef RT_USING_SMART bl rt_thread_self mov r4, r0 bl lwp_aspace_switch mov r0, r4 bl lwp_user_setting_restore #endif #endif /*RT_USING_SMP*/ b rt_hw_context_switch_exit /* * void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to); */ .equ Mode_USR, 0x10 .equ Mode_FIQ, 0x11 .equ Mode_IRQ, 0x12 .equ Mode_SVC, 0x13 .equ Mode_ABT, 0x17 .equ Mode_UND, 0x1B .equ Mode_SYS, 0x1F .equ I_Bit, 0x80 @ when I bit is set, IRQ is disabled .equ F_Bit, 0x40 @ when F bit is set, FIQ is disabled .globl rt_thread_switch_interrupt_flag .globl rt_interrupt_from_thread .globl rt_interrupt_to_thread .globl rt_hw_context_switch_interrupt rt_hw_context_switch_interrupt: clrex #ifdef RT_USING_SMP /* r0 :svc_mod context * r1 :addr of from_thread's sp * r2 :addr of to_thread's sp * r3 :to_thread's tcb */ #ifdef RT_USING_SMART push {r0 - r3, lr} #ifdef RT_USING_SMART bl rt_thread_self bl lwp_user_setting_save #endif pop {r0 - r3, lr} #endif str r0, [r1] ldr sp, [r2] mov r0, r3 #ifdef RT_USING_SMART mov r4, r0 #endif bl rt_cpus_lock_status_restore #ifdef RT_USING_SMART mov r0, r4 bl lwp_user_setting_restore #endif b rt_hw_context_switch_exit #else /*RT_USING_SMP*/ /* r0 :addr of from_thread's sp * r1 :addr of to_thread's sp * r2 :from_thread's tcb * r3 :to_thread's tcb */ #ifdef RT_USING_SMART /* now to_thread(r3) not used */ ldr ip, =rt_thread_switch_interrupt_flag ldr r3, [ip] cmp r3, #1 beq _reswitch ldr r3, =rt_interrupt_from_thread @ set rt_interrupt_from_thread str r0, [r3] mov r3, #1 @ set rt_thread_switch_interrupt_flag to 1 str r3, [ip] #ifdef RT_USING_SMART push {r1, lr} mov r0, r2 bl lwp_user_setting_save pop {r1, lr} #endif _reswitch: ldr ip, =rt_interrupt_to_thread @ set rt_interrupt_to_thread str r1, [ip] bx lr #else /* now from_thread(r2) to_thread(r3) not used */ ldr ip, =rt_thread_switch_interrupt_flag ldr r3, [ip] cmp r3, #1 beq _reswitch ldr r3, =rt_interrupt_from_thread @ set rt_interrupt_from_thread str r0, [r3] mov r3, #1 @ set rt_thread_switch_interrupt_flag to 1 str r3, [ip] _reswitch: ldr ip, =rt_interrupt_to_thread @ set rt_interrupt_to_thread str r1, [ip] bx lr #endif #endif /*RT_USING_SMP*/ .global rt_hw_context_switch_exit rt_hw_context_switch_exit: #ifdef RT_USING_SMP #ifdef RT_USING_SIGNALS mov r0, sp cps #Mode_IRQ bl rt_signal_check cps #Mode_SVC mov sp, r0 #endif #endif #ifdef RT_USING_FPU /* fpu context */ ldmfd sp!, {r6} vmsr fpexc, r6 tst r6, #(1<<30) beq 1f ldmfd sp!, {r5} vmsr fpscr, r5 vldmia sp!, {d16-d31} vldmia sp!, {d0-d15} 1: #endif #ifdef RT_USING_SMART ldmfd sp, {r13, r14}^ /* usr_sp, usr_lr */ add sp, #8 #endif ldmfd sp!, {r1} msr spsr_cxsf, r1 /* original mode */ #ifdef RT_USING_SMART and r1, #0x1f cmp r1, #0x10 bne 1f ldmfd sp!, {r0-r12,lr} ldmfd sp!, {lr} b arch_ret_to_user 1: #endif ldmfd sp!, {r0-r12,lr,pc}^ /* irq return */ #ifdef RT_USING_FPU .global set_fpexc set_fpexc: vmsr fpexc, r0 bx lr #endif
Aladdin-Wang/MicroBoot_Demo
2,589
STM32F7_APP/rt-thread/libcpu/arm/sep4020/context_rvds.S
;/* ; * Copyright (c) 2006-2022, RT-Thread Development Team ; * ; * SPDX-License-Identifier: Apache-2.0 ; * ; * Change Logs: ; * Date Author Notes ; * 2009-01-20 Bernard first version ; */ NOINT EQU 0xc0 ; disable interrupt in psr AREA |.text|, CODE, READONLY, ALIGN=2 ARM REQUIRE8 PRESERVE8 ;/* ; * rt_base_t rt_hw_interrupt_disable(); ; */ rt_hw_interrupt_disable PROC EXPORT rt_hw_interrupt_disable MRS r0, cpsr ORR r1, r0, #NOINT MSR cpsr_c, r1 BX lr ENDP ;/* ; * void rt_hw_interrupt_enable(rt_base_t level); ; */ rt_hw_interrupt_enable PROC EXPORT rt_hw_interrupt_enable MSR cpsr_c, r0 BX lr ENDP ;/* ; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to); ; * r0 --> from ; * r1 --> to ; */ rt_hw_context_switch PROC EXPORT rt_hw_context_switch STMFD sp!, {lr} ; push pc (lr should be pushed in place of PC) STMFD sp!, {r0-r12, lr} ; push lr & register file MRS r4, cpsr STMFD sp!, {r4} ; push cpsr MRS r4, spsr STMFD sp!, {r4} ; push spsr STR sp, [r0] ; store sp in preempted tasks TCB LDR sp, [r1] ; get new task stack pointer LDMFD sp!, {r4} ; pop new task spsr MSR spsr_cxsf, r4 LDMFD sp!, {r4} ; pop new task cpsr MSR cpsr_cxsf, r4 LDMFD sp!, {r0-r12, lr, pc} ; pop new task r0-r12, lr & pc ENDP ;/* ; * void rt_hw_context_switch_to(rt_uint32 to); ; * r0 --> to ; */ rt_hw_context_switch_to PROC EXPORT rt_hw_context_switch_to LDR sp, [r0] ; get new task stack pointer LDMFD sp!, {r4} ; pop new task spsr MSR spsr_cxsf, r4 LDMFD sp!, {r4} ; pop new task cpsr MSR cpsr_cxsf, r4 LDMFD sp!, {r0-r12, lr, pc} ; pop new task r0-r12, lr & pc ENDP ;/* ; * void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to); ; */ IMPORT rt_thread_switch_interrupt_flag IMPORT rt_interrupt_from_thread IMPORT rt_interrupt_to_thread rt_hw_context_switch_interrupt PROC EXPORT rt_hw_context_switch_interrupt LDR r2, =rt_thread_switch_interrupt_flag LDR r3, [r2] CMP r3, #1 BEQ _reswitch MOV r3, #1 ; set rt_thread_switch_interrupt_flag to 1 STR r3, [r2] LDR r2, =rt_interrupt_from_thread ; set rt_interrupt_from_thread STR r0, [r2] _reswitch LDR r2, =rt_interrupt_to_thread ; set rt_interrupt_to_thread STR r1, [r2] BX lr ENDP END
Aladdin-Wang/MicroBoot_Demo
11,690
STM32F7_APP/rt-thread/libcpu/arm/sep4020/start_rvds.S
;============================================================================================== ; star_rvds.s for Keil MDK 4.10 ; ; SEP4020 start up code ; ; Change Logs: ; Date Author Notes ; 2010-03-17 zchong ;============================================================================================= PMU_PLTR EQU 0x10001000 ; PLLȶʱ PMU_PMCR EQU 0x10001004 ; ϵͳʱPLLĿƼĴ PMU_PUCR EQU 0x10001008 ; USBʱPLLĿƼĴ PMU_PCSR EQU 0x1000100C ; ڲģʱԴĿƼĴ PMU_PDSLOW EQU 0x10001010 ; SLOW״̬ʱӵķƵ PMU_PMDR EQU 0x10001014 ; оƬģʽĴ PMU_RCTR EQU 0x10001018 ; ResetƼĴ PMU_CLRWAKUP EQU 0x1000101C ; WakeUpĴ RTC_CTR EQU 0x1000200C ; RTCƼĴ INTC_IER EQU 0x10000000 ; IRQжĴ INTC_IMR EQU 0x10000008 ; IRQжμĴ INTC_IFSR EQU 0x10000030 ; IRQж״̬Ĵ INTC_FIER EQU 0x100000C0 ; FIQжĴ INTC_FIMR EQU 0x100000C4 ; FIQжμĴ EMI_CSACONF EQU 0x11000000 ; CSAüĴ EMI_CSECONF EQU 0x11000010 ; CSEüĴ EMI_CSFCONF EQU 0x11000014 ; CSFüĴ EMI_SDCONF1 EQU 0x11000018 ; SDRAMʱüĴ1 EMI_SDCONF2 EQU 0x1100001C ; SDRAMʱüĴ2, SDRAMʼõϢ EMI_REMAPCONF EQU 0x11000020 ; Ƭѡռ估ַӳREMAPüĴ Mode_USR EQU 0x10 Mode_FIQ EQU 0x11 Mode_IRQ EQU 0x12 Mode_SVC EQU 0x13 Mode_ABT EQU 0x17 Mode_UND EQU 0x1B Mode_SYS EQU 0x1F I_Bit EQU 0x80 ; when I bit is set, IRQ is disabled F_Bit EQU 0x40 ; when F bit is set, FIQ is disabled NOINT EQU 0xc0 MASK_MODE EQU 0x0000003F MODE_SVC32 EQU 0x00000013 ; Internal Memory Base Addresses FLASH_BASE EQU 0x20000000 RAM_BASE EQU 0x04000000 SDRAM_BASE EQU 0x30000000 ; Stack Unused_Stack_Size EQU 0x00000100 Svc_Stack_Size EQU 0x00001000 Abt_Stack_Size EQU 0x00000000 Fiq_Stack_Size EQU 0x00000000 Irq_Stack_Size EQU 0x00001000 Usr_Stack_Size EQU 0x00000000 ;SVC STACK AREA STACK, NOINIT, READWRITE, ALIGN=3 Svc_Stack SPACE Svc_Stack_Size __initial_sp Svc_Stack_Top ;IRQ STACK AREA STACK, NOINIT, READWRITE, ALIGN=3 Irq_Stack SPACE Irq_Stack_Size Irq_Stack_Top ;UNUSED STACK AREA STACK, NOINIT, READWRITE, ALIGN=3 Unused_Stack SPACE Unused_Stack_Size Unused_Stack_Top ; Heap Heap_Size EQU 0x0000100 AREA HEAP, NOINIT, READWRITE, ALIGN=3 EXPORT Heap_Mem __heap_base Heap_Mem SPACE Heap_Size __heap_limit PRESERVE8 ; Area Definition and Entry Point ; Startup Code must be linked first at Address at which it expects to run. AREA RESET, CODE, READONLY ARM ; Exception Vectors ; Mapped to Address 0. ; Absolute addressing mode must be used. ; Dummy Handlers are implemented as infinite loops which can be modified. EXPORT Entry_Point Entry_Point Vectors LDR PC,Reset_Addr LDR PC,Undef_Addr LDR PC,SWI_Addr LDR PC,PAbt_Addr LDR PC,DAbt_Addr NOP ; Reserved Vector LDR PC,IRQ_Addr LDR PC,FIQ_Addr Reset_Addr DCD Reset_Handler Undef_Addr DCD Undef_Handler SWI_Addr DCD SWI_Handler PAbt_Addr DCD PAbt_Handler DAbt_Addr DCD DAbt_Handler DCD 0 ; Reserved Address IRQ_Addr DCD IRQ_Handler FIQ_Addr DCD FIQ_Handler Undef_Handler B Undef_Handler SWI_Handler B SWI_Handler PAbt_Handler B Abort_Handler DAbt_Handler B Abort_Handler FIQ_Handler B FIQ_Handler Abort_Handler PROC ARM EXPORT Abort_Handler DeadLoop BHI DeadLoop ; Abort happened in irq mode, halt system. ENDP ; Reset Handler ;IMPORT __user_initial_stackheap EXPORT Reset_Handler Reset_Handler ;**************************************************************** ;* Shutdown watchdog ;**************************************************************** LDR R0,=RTC_CTR LDR R1,=0x0 STR R1,[R0] ;**************************************************************** ;* shutdown interrupts ;**************************************************************** MRS R0, CPSR BIC R0, R0, #MASK_MODE ORR R0, R0, #MODE_SVC32 ORR R0, R0, #I_Bit ORR R0, R0, #F_Bit MSR CPSR_c, r0 LDR R0,=INTC_IER LDR R1,=0x0 STR R1,[R0] LDR R0,=INTC_IMR LDR R1,=0xFFFFFFFF STR R1,[R0] LDR R0,=INTC_FIER LDR R1,=0x0 STR R1,[R0] LDR R0,=INTC_FIMR LDR R1,=0x0F STR R1,[R0] ;**************************************************************** ;* Initialize Stack Pointer ;**************************************************************** LDR SP, =Svc_Stack_Top ;init SP_svc MOV R4, #0xD2 ;chmod to irq and init SP_irq MSR cpsr_c, R4 LDR SP, =Irq_Stack_Top MOV R4, #0XD1 ;chomod to fiq and init SP_fiq MSR cpsr_c, R4 LDR SP, =Unused_Stack_Top MOV R4, #0XD7 ;chomod to abt and init SP_ABT MSR cpsr_c, R4 LDR SP, =Unused_Stack_Top MOV R4, #0XDB ;chomod to undf and init SP_UNDF MSR cpsr_c, R4 LDR SP, =Unused_Stack_Top ;chomod to abt and init SP_sys MOV R4, #0xDF ;all interrupts disabled MSR cpsr_c, R4 ;SYSTEM mode, @32-bit code mode LDR SP, =Unused_Stack_Top MOV R4, #0XD3 ;chmod to svc modle, CPSR IRQ bit is disable MSR cpsr_c, R4 ;**************************************************************** ;* Initialize PMU & System Clock ;**************************************************************** LDR R4, =PMU_PCSR ; ģʱ LDR R5, =0x0001ffff STR R5, [ R4 ] LDR R4, =PMU_PLTR ; PLLȶʱΪֵ50us*100M. LDR R5, =0x00fa00fa STR R5, [ R4 ] LDR R4, =PMU_PMDR ; SLOWģʽNORMALģʽ LDR R5, =0x00000001 STR R5, [ R4 ] LDR R4, =PMU_PMCR ; ϵͳʱΪ80MHz LDR R5, =0x00004009 ; 400b -- 88M STR R5, [ R4 ] ;PMU_PMCRĴ15λҪдӵ͵ߵķתܴPLLʱ LDR R4, =PMU_PMCR LDR R5, =0x0000c009 STR R5, [ R4 ] ;**************************************************************** ;* ʼEMI ;**************************************************************** IF :DEF:INIT_EMI LDR R4, =EMI_CSACONF ; CSAƬѡʱ LDR R5, =0x08a6a6a1 STR R5, [ R4 ] LDR R4, =EMI_CSECONF ; CSEƬѡʱ, LDR R5, =0x8cfffff1 STR R5, [ R4 ] LDR R4, =EMI_SDCONF1 ; SDRAM1 LDR R5, =0x1E104177 STR R5, [ R4 ] LDR R4, =EMI_SDCONF2 ; SDRAM2 LDR R5, =0x80001860 STR R5, [ R4 ] ENDIF ; Copy Exception Vectors to Internal RAM IF :DEF:RAM_INTVEC ADR R8, Vectors ; Source LDR R9, =RAM_BASE ; Destination LDMIA R8!, {R0-R7} ; Load Vectors STMIA R9!, {R0-R7} ; Store Vectors LDMIA R8!, {R0-R7} ; Load Handler Addresses STMIA R9!, {R0-R7} ; Store Handler Addresses ENDIF ; Remap on-chip RAM to address 0 IF :DEF:REMAP LDR R0, =EMI_REMAPCONF IF :DEF:RAM_INTVEC MOV R1, #0x80000000 ELSE MOV R1, #0x0000000b ENDIF STR R1, [R0, #0] ; Remap ENDIF ;*************************************************************** ;* Open irq interrupt ;*************************************************************** MRS R4, cpsr BIC R4, R4, #0x80 ; set bit7 to zero MSR cpsr_c, R4 ; Enter the C code IMPORT __main LDR R0,=__main BX R0 IMPORT rt_interrupt_enter IMPORT rt_interrupt_leave IMPORT rt_thread_switch_interrupt_flag IMPORT rt_interrupt_from_thread IMPORT rt_interrupt_to_thread IMPORT rt_hw_trap_irq IRQ_Handler PROC EXPORT IRQ_Handler STMFD sp!, {r0-r12,lr} BL rt_interrupt_enter BL rt_hw_trap_irq BL rt_interrupt_leave ; if rt_thread_switch_interrupt_flag set, jump to ; rt_hw_context_switch_interrupt_do and don't return LDR r0, =rt_thread_switch_interrupt_flag LDR r1, [r0] CMP r1, #1 BEQ rt_hw_context_switch_interrupt_do LDMFD sp!, {r0-r12,lr} SUBS pc, lr, #4 ENDP ; /* ; * void rt_hw_context_switch_interrupt_do(rt_base_t flag) ; */ rt_hw_context_switch_interrupt_do PROC EXPORT rt_hw_context_switch_interrupt_do MOV r1, #0 ; clear flag STR r1, [r0] LDMFD sp!, {r0-r12,lr}; reload saved registers STMFD sp!, {r0-r3} ; save r0-r3 MOV r1, sp ADD sp, sp, #16 ; restore sp SUB r2, lr, #4 ; save old task's pc to r2 MRS r3, spsr ; get cpsr of interrupt thread ; switch to SVC mode and no interrupt MSR cpsr_c, #I_Bit :OR F_Bit :OR Mode_SVC STMFD sp!, {r2} ; push old task's pc STMFD sp!, {r4-r12,lr}; push old task's lr,r12-r4 MOV r4, r1 ; Special optimised code below MOV r5, r3 LDMFD r4!, {r0-r3} STMFD sp!, {r0-r3} ; push old task's r3-r0 STMFD sp!, {r5} ; push old task's cpsr MRS r4, spsr STMFD sp!, {r4} ; push old task's spsr LDR r4, =rt_interrupt_from_thread LDR r5, [r4] STR sp, [r5] ; store sp in preempted tasks's TCB LDR r6, =rt_interrupt_to_thread LDR r6, [r6] LDR sp, [r6] ; get new task's stack pointer LDMFD sp!, {r4} ; pop new task's spsr MSR spsr_cxsf, r4 LDMFD sp!, {r4} ; pop new task's psr MSR cpsr_cxsf, r4 LDMFD sp!, {r0-r12,lr,pc} ; pop new task's r0-r12,lr & pc ENDP ALIGN IF :DEF:__MICROLIB EXPORT __heap_base EXPORT __heap_limit EXPORT __initial_sp ELSE ;__MICROLIB ; User Initial Stack & Heap AREA |.text|, CODE, READONLY IMPORT __use_two_region_memory EXPORT __user_initial_stackheap __user_initial_stackheap LDR R0, = Heap_Mem LDR R1, = (Svc_Stack + Svc_Stack_Size) LDR R2, = (Heap_Mem + Heap_Size) LDR R3, = Svc_Stack BX LR ALIGN ENDIF END
Aladdin-Wang/MicroBoot_Demo
2,589
STM32F7_APP/rt-thread/libcpu/arm/s3c44b0/context_rvds.S
;/* ; * Copyright (c) 2006-2022, RT-Thread Development Team ; * ; * SPDX-License-Identifier: Apache-2.0 ; * ; * Change Logs: ; * Date Author Notes ; * 2009-01-20 Bernard first version ; */ NOINT EQU 0xc0 ; disable interrupt in psr AREA |.text|, CODE, READONLY, ALIGN=2 ARM REQUIRE8 PRESERVE8 ;/* ; * rt_base_t rt_hw_interrupt_disable(); ; */ rt_hw_interrupt_disable PROC EXPORT rt_hw_interrupt_disable MRS r0, cpsr ORR r1, r0, #NOINT MSR cpsr_c, r1 BX lr ENDP ;/* ; * void rt_hw_interrupt_enable(rt_base_t level); ; */ rt_hw_interrupt_enable PROC EXPORT rt_hw_interrupt_enable MSR cpsr_c, r0 BX lr ENDP ;/* ; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to); ; * r0 --> from ; * r1 --> to ; */ rt_hw_context_switch PROC EXPORT rt_hw_context_switch STMFD sp!, {lr} ; push pc (lr should be pushed in place of PC) STMFD sp!, {r0-r12, lr} ; push lr & register file MRS r4, cpsr STMFD sp!, {r4} ; push cpsr MRS r4, spsr STMFD sp!, {r4} ; push spsr STR sp, [r0] ; store sp in preempted tasks TCB LDR sp, [r1] ; get new task stack pointer LDMFD sp!, {r4} ; pop new task spsr MSR spsr_cxsf, r4 LDMFD sp!, {r4} ; pop new task cpsr MSR cpsr_cxsf, r4 LDMFD sp!, {r0-r12, lr, pc} ; pop new task r0-r12, lr & pc ENDP ;/* ; * void rt_hw_context_switch_to(rt_uint32 to); ; * r0 --> to ; */ rt_hw_context_switch_to PROC EXPORT rt_hw_context_switch_to LDR sp, [r0] ; get new task stack pointer LDMFD sp!, {r4} ; pop new task spsr MSR spsr_cxsf, r4 LDMFD sp!, {r4} ; pop new task cpsr MSR cpsr_cxsf, r4 LDMFD sp!, {r0-r12, lr, pc} ; pop new task r0-r12, lr & pc ENDP ;/* ; * void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to); ; */ IMPORT rt_thread_switch_interrupt_flag IMPORT rt_interrupt_from_thread IMPORT rt_interrupt_to_thread rt_hw_context_switch_interrupt PROC EXPORT rt_hw_context_switch_interrupt LDR r2, =rt_thread_switch_interrupt_flag LDR r3, [r2] CMP r3, #1 BEQ _reswitch MOV r3, #1 ; set rt_thread_switch_interrupt_flag to 1 STR r3, [r2] LDR r2, =rt_interrupt_from_thread ; set rt_interrupt_from_thread STR r0, [r2] _reswitch LDR r2, =rt_interrupt_to_thread ; set rt_interrupt_to_thread STR r1, [r2] BX lr ENDP END
Aladdin-Wang/MicroBoot_Demo
6,112
STM32F7_APP/rt-thread/libcpu/arm/s3c44b0/start_gcc.S
/* * Copyright (c) 2006-2022, RT-Thread Development Team * * SPDX-License-Identifier: Apache-2.0 * * Change Logs: * Date Author Notes * 2006-09-06 XuXinming first version * 2006-09-20 Bernard clean the code */ /** * @addtogroup S3C44B0 */ /*@{*/ .section .init, "ax" .code 32 .globl _start _start: b reset ldr pc, _vector_undef ldr pc, _vector_swi ldr pc, _vector_pabt ldr pc, _vector_dabt ldr pc, _vector_resv ldr pc, _vector_irq ldr pc, _vector_fiq _vector_undef: .word vector_undef _vector_swi: .word vector_swi _vector_pabt: .word vector_pabt _vector_dabt: .word vector_dabt _vector_resv: .word vector_resv _vector_irq: .word vector_irq _vector_fiq: .word vector_fiq .text .code 32 /* * rtthread kernel start and end * which are defined in linker script */ .globl _rtthread_start _rtthread_start:.word _start .globl _rtthread_end _rtthread_end: .word _end /* * rtthread bss start and end * which are defined in linker script */ .globl _bss_start _bss_start: .word __bss_start .globl _bss_end _bss_end: .word __bss_end #if defined(__FLASH_BUILD__) /* * TEXT_BASE, * which is defined in macro of make */ _TEXT_BASE: .word TEXT_BASE #endif .equ WTCON, 0x1d30000 .equ INTCON, 0x1e00000 .equ INTMSK, 0x1e0000c /* the system entry */ reset: /* enter svc mode */ msr cpsr_c, #SVCMODE|NOINT /*watch dog disable */ ldr r0,=WTCON ldr r1,=0x0 str r1,[r0] /* all interrupt disable */ ldr r0,=INTMSK ldr r1,=0x07ffffff str r1,[r0] ldr r1, =INTCON ldr r0, =0x05 str r0, [r1] #if defined(__FLASH_BUILD__) /* init lowlevel */ bl lowlevel_init #endif /* setup stack */ bl stack_setup #if defined(__FLASH_BUILD__) mov r0, #0x0 /* r0 <- flash base address */ ldr r1, _TEXT_BASE /* r1 <- the taget address */ ldr r2, _rtthread_start ldr r3, _bss_start sub r2, r3, r2 /* r2 <- size of rtthread kernel */ add r2, r0, r2 /* r2 <- source end address */ copy_loop: ldmia r0!, {r3-r10} /* copy from source address [r0] */ stmia r1!, {r3-r10} /* copy to target address [r1] */ cmp r0, r2 /* until source end address [r2] */ ble copy_loop #endif /* start RT-Thread Kernel */ ldr pc, _rtthread_startup _rtthread_startup: .word rtthread_startup .equ USERMODE, 0x10 .equ FIQMODE, 0x11 .equ IRQMODE, 0x12 .equ SVCMODE, 0x13 .equ ABORTMODE, 0x17 .equ UNDEFMODE, 0x1b .equ MODEMASK, 0x1f .equ NOINT, 0xc0 /* exception handlers */ vector_undef: bl rt_hw_trap_udef vector_swi: bl rt_hw_trap_swi vector_pabt: bl rt_hw_trap_pabt vector_dabt: bl rt_hw_trap_dabt vector_resv: bl rt_hw_trap_resv .globl rt_interrupt_enter .globl rt_interrupt_leave .globl rt_thread_switch_interrupt_flag .globl rt_interrupt_from_thread .globl rt_interrupt_to_thread vector_irq: stmfd sp!, {r0-r12,lr} bl led_off bl rt_interrupt_enter bl rt_hw_trap_irq bl rt_interrupt_leave /* if rt_thread_switch_interrupt_flag set, jump to _interrupt_thread_switch and don't return */ ldr r0, =rt_thread_switch_interrupt_flag ldr r1, [r0] cmp r1, #1 beq _interrupt_thread_switch ldmfd sp!, {r0-r12,lr} subs pc, lr, #4 .align 5 vector_fiq: stmfd sp!,{r0-r7,lr} bl rt_hw_trap_fiq ldmfd sp!,{r0-r7,lr} subs pc,lr,#4 _interrupt_thread_switch: mov r1, #0 @ clear rt_thread_switch_interrupt_flag str r1, [r0] ldmfd sp!, {r0-r12,lr} @ reload saved registers stmfd sp!, {r0-r3} @ save r0-r3 mov r1, sp add sp, sp, #16 @ restore sp sub r2, lr, #4 @ save old task's pc to r2 mrs r3, spsr @ disable interrupt orr r0, r3, #NOINT msr spsr_c, r0 ldr r0, =.+8 @ switch to interrupted task's stack movs pc, r0 stmfd sp!, {r2} @ push old task's pc stmfd sp!, {r4-r12,lr} @ push old task's lr,r12-r4 mov r4, r1 @ Special optimised code below mov r5, r3 ldmfd r4!, {r0-r3} stmfd sp!, {r0-r3} @ push old task's r3-r0 stmfd sp!, {r5} @ push old task's psr mrs r4, spsr stmfd sp!, {r4} @ push old task's spsr ldr r4, =rt_interrupt_from_thread ldr r5, [r4] str sp, [r5] @ store sp in preempted tasks's TCB ldr r6, =rt_interrupt_to_thread ldr r6, [r6] ldr sp, [r6] @ get new task's stack pointer ldmfd sp!, {r4} @ pop new task's spsr msr SPSR_cxsf, r4 ldmfd sp!, {r4} @ pop new task's psr msr CPSR_cxsf, r4 ldmfd sp!, {r0-r12,lr,pc} @ pop new task's r0-r12,lr & pc /* each mode stack memory */ UNDSTACK_START: .word _undefined_stack_start + 128 ABTSTACK_START: .word _abort_stack_start + 128 FIQSTACK_START: .word _fiq_stack_start + 1024 IRQSTACK_START: .word _irq_stack_start + 1024 SVCSTACK_START: .word _svc_stack_start + 4096 stack_setup: /* undefined instruction mode */ msr cpsr_c, #UNDEFMODE|NOINT ldr sp, UNDSTACK_START /* abort mode */ msr cpsr_c, #ABORTMODE|NOINT ldr sp, ABTSTACK_START /* FIQ mode */ msr cpsr_c, #FIQMODE|NOINT ldr sp, FIQSTACK_START /* IRQ mode */ msr cpsr_c, #IRQMODE|NOINT ldr sp, IRQSTACK_START /* supervisor mode */ msr cpsr_c, #SVCMODE|NOINT ldr sp, SVCSTACK_START mov pc,lr @ The LR register may be not valid for the mode changes. .globl led_on led_on: ldr r1, =0x1d20014 @ r1<-PDATC ldr r0, [r1] @ r0<-[r1] orr r0, r0, #0x0e @ r0=r0 or 0x0e str r0, [r1] @ r0->[r1] mov pc, lr .globl led_off led_off: ldr r1, =0x1d20010 @ r1<-PCONC ldr r0, =0x5f555555 @ r0<-0x5f555555 str r0, [r1] @ r0->[r1] ldr r1, =0x1d20014 @ r1<-PDATC ldr r0, =0x0 @ r0<-00 str r0, [r1] @ r0->[r1] mov pc, lr
Aladdin-Wang/MicroBoot_Demo
2,336
STM32F7_APP/rt-thread/libcpu/arm/s3c44b0/context_gcc.S
/* * Copyright (c) 2006-2022, RT-Thread Development Team * * SPDX-License-Identifier: Apache-2.0 * * Change Logs: * Date Author Notes * 2006-09-06 XuXinming first version */ /*! * \addtogroup S3C44B0 */ /*@{*/ #define NOINT 0xc0 /* * rt_base_t rt_hw_interrupt_disable(); */ .globl rt_hw_interrupt_disable rt_hw_interrupt_disable: mrs r0, cpsr orr r1, r0, #NOINT msr cpsr_c, r1 mov pc, lr /* * void rt_hw_interrupt_enable(rt_base_t level); */ .globl rt_hw_interrupt_enable rt_hw_interrupt_enable: msr cpsr, r0 mov pc, lr /* * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to); * r0 --> from * r1 --> to */ .globl rt_hw_context_switch rt_hw_context_switch: stmfd sp!, {lr} @ push pc (lr should be pushed in place of PC) stmfd sp!, {r0-r12, lr} @ push lr & register file mrs r4, cpsr stmfd sp!, {r4} @ push cpsr mrs r4, spsr stmfd sp!, {r4} @ push spsr str sp, [r0] @ store sp in preempted tasks TCB ldr sp, [r1] @ get new task stack pointer ldmfd sp!, {r4} @ pop new task spsr msr spsr_cxsf, r4 ldmfd sp!, {r4} @ pop new task cpsr msr cpsr_cxsf, r4 ldmfd sp!, {r0-r12, lr, pc} @ pop new task r0-r12, lr & pc /* * void rt_hw_context_switch_to(rt_uint32 to); * r0 --> to */ .globl rt_hw_context_switch_to rt_hw_context_switch_to: ldr sp, [r0] @ get new task stack pointer ldmfd sp!, {r4} @ pop new task spsr msr spsr_cxsf, r4 ldmfd sp!, {r4} @ pop new task cpsr msr cpsr_cxsf, r4 ldmfd sp!, {r0-r12, lr, pc} @ pop new task r0-r12, lr & pc /* * void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to); */ .globl rt_thread_switch_interrupt_flag .globl rt_interrupt_from_thread .globl rt_interrupt_to_thread .globl rt_hw_context_switch_interrupt rt_hw_context_switch_interrupt: ldr r2, =rt_thread_switch_interrupt_flag ldr r3, [r2] cmp r3, #1 beq _reswitch mov r3, #1 @ set rt_thread_switch_interrupt_flag to 1 str r3, [r2] ldr r2, =rt_interrupt_from_thread @ set rt_interrupt_from_thread str r0, [r2] _reswitch: ldr r2, =rt_interrupt_to_thread @ set rt_interrupt_to_thread str r1, [r2] mov pc, lr
Aladdin-Wang/MicroBoot_Demo
43,817
STM32F7_APP/rt-thread/libcpu/arm/s3c44b0/start_rvds.S
;/*****************************************************************************/ ;/* S3C44B0X.S: Startup file for Samsung S3C44B0X */ ;/*****************************************************************************/ ;/* <<< Use Configuration Wizard in Context Menu >>> */ ;/*****************************************************************************/ ;/* This file is part of the uVision/ARM development tools. */ ;/* Copyright (c) 2005-2006 Keil Software. All rights reserved. */ ;/* This software may only be used under the terms of a valid, current, */ ;/* end user licence from KEIL for a compatible version of KEIL software */ ;/* development tools. Nothing else gives you the right to use this software. */ ;/*****************************************************************************/ ; *** Startup Code (executed after Reset) *** ; Standard definitions of Mode bits and Interrupt (I & F) flags in PSRs Mode_USR EQU 0x10 Mode_FIQ EQU 0x11 Mode_IRQ EQU 0x12 Mode_SVC EQU 0x13 Mode_ABT EQU 0x17 Mode_UND EQU 0x1B Mode_SYS EQU 0x1F I_Bit EQU 0x80 ; when I bit is set, IRQ is disabled F_Bit EQU 0x40 ; when F bit is set, FIQ is disabled ;// <h> Stack Configuration (Stack Sizes in Bytes) ;// <o0> Undefined Mode <0x0-0xFFFFFFFF:8> ;// <o1> Supervisor Mode <0x0-0xFFFFFFFF:8> ;// <o2> Abort Mode <0x0-0xFFFFFFFF:8> ;// <o3> Fast Interrupt Mode <0x0-0xFFFFFFFF:8> ;// <o4> Interrupt Mode <0x0-0xFFFFFFFF:8> ;// <o5> User/System Mode <0x0-0xFFFFFFFF:8> ;// </h> UND_Stack_Size EQU 0x00000000 SVC_Stack_Size EQU 0x00000100 ABT_Stack_Size EQU 0x00000000 FIQ_Stack_Size EQU 0x00000000 IRQ_Stack_Size EQU 0x00000100 USR_Stack_Size EQU 0x00000100 ISR_Stack_Size EQU (UND_Stack_Size + SVC_Stack_Size + ABT_Stack_Size + \ FIQ_Stack_Size + IRQ_Stack_Size) AREA STACK, NOINIT, READWRITE, ALIGN=3 Stack_Mem SPACE USR_Stack_Size __initial_sp SPACE ISR_Stack_Size Stack_Top ;// <h> Heap Configuration ;// <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF> ;// </h> Heap_Size EQU 0x00000000 AREA HEAP, NOINIT, READWRITE, ALIGN=3 __heap_base Heap_Mem SPACE Heap_Size __heap_limit ; CPU Wrapper and Bus Priorities definitions CPUW_BASE EQU 0x01C00000 ; CPU Wrapper Base Address SYSCFG_OFS EQU 0x00 ; SYSCFG Offset NCACHBE0_OFS EQU 0x04 ; NCACHBE0 Offset NCACHBE1_OFS EQU 0x08 ; NCACHBE0 Offset BUSP_BASE EQU 0x01C40000 ; Bus Priority Base Address SBUSCON_OFS EQU 0x00 ; SBUSCON Offset ;// <e> CPU Wrapper and Bus Priorities ;// <h> CPU Wrapper ;// <o1.0> SE: Stall Enable ;// <o1.1..2> CM: Cache Mode ;// <0=> Disable Cache (8kB SRAM) ;// <1=> Half Cache Enable (4kB Cache, 4kB SRAM) ;// <2=> Reserved ;// <3=> Full Cache Enable (8kB Cache) ;// <o1.3> WE: Write Buffer Enable ;// <o1.4> RSE: Read Stall Enable ;// <o1.5> DA: Data Abort <0=> Enable <1=> Disable ;// <h> Non-cacheable Area 0 ;// <o2.0..15> Start Address <0x0-0x0FFFF000:0x1000><#/0x1000> ;// <i> SA = (Start Address) / 4k ;// <o2.16..31> End Address + 1 <0x0-0x10000000:0x1000><#/0x1000> ;// <i> SE = (End Address + 1) / 4k ;// </h> ;// <h> Non-cacheable Area 1 ;// <o3.0..15> Start Address <0x0-0x0FFFF000:0x1000><#/0x1000> ;// <i> SA = (Start Address) / 4k ;// <o3.16..31> End Address + 1 <0x0-0x10000000:0x1000><#/0x1000> ;// <i> SE = (End Address + 1) / 4k ;// </h> ;// </h> ;// <h> Bus Priorities ;// <o4.31> FIX: Fixed Priorities ;// <o4.6..7> LCD_DMA <0=> 1st <1=> 2nd <2=> 3rd <3=> 4th ;// <o4.4..5> ZDMA <0=> 1st <1=> 2nd <2=> 3rd <3=> 4th ;// <o4.2..3> BDMA <0=> 1st <1=> 2nd <2=> 3rd <3=> 4th ;// <o4.0..1> nBREQ <0=> 1st <1=> 2nd <2=> 3rd <3=> 4th ;// </h> ;// </e> SYS_SETUP EQU 0 SYSCFG_Val EQU 0x00000001 NCACHBE0_Val EQU 0x00000000 NCACHBE1_Val EQU 0x00000000 SBUSCON_Val EQU 0x80001B1B ;// <e> Vectored Interrupt Mode (for IRQ) ;// <o1.25> EINT0 <i> External Interrupt 0 ;// <o1.24> EINT1 <i> External Interrupt 1 ;// <o1.23> EINT2 <i> External Interrupt 2 ;// <o1.22> EINT3 <i> External Interrupt 3 ;// <o1.21> EINT4567 <i> External Interrupt 4/5/6/7 ;// <o1.20> TICK <i> RTC Time Tick Interrupt ;// <o1.19> ZDMA0 <i> General DMA0 Interrupt ;// <o1.18> ZDMA1 <i> General DMA1 Interrupt ;// <o1.17> BDMA0 <i> Bridge DMA0 Interrupt ;// <o1.16> BDMA1 <i> Bridge DMA1 Interrupt ;// <o1.15> WDT <i> Watchdog Timer Interrupt ;// <o1.14> UERR01 <i> UART0/1 Error Interrupt ;// <o1.13> TIMER0 <i> Timer0 Interrupt ;// <o1.12> TIMER1 <i> Timer1 Interrupt ;// <o1.11> TIMER2 <i> Timer2 Interrupt ;// <o1.10> TIMER3 <i> Timer3 Interrupt ;// <o1.9> TIMER4 <i> Timer4 Interrupt ;// <o1.8> TIMER5 <i> Timer5 Interrupt ;// <o1.7> URXD0 <i> UART0 Rx Interrupt ;// <o1.6> URXD1 <i> UART1 Rx Interrupt ;// <o1.5> IIC <i> IIC Interrupt ;// <o1.4> SIO <i> SIO Interrupt ;// <o1.3> UTXD0 <i> UART0 Tx Interrupt ;// <o1.2> UTXD1 <i> UART1 Tx Interrupt ;// <o1.1> RTC <i> RTC Alarm Interrupt ;// <o1.0> ADC <i> ADC EOC Interrupt ;// </e> VIM_SETUP EQU 0 VIM_CFG EQU 0x00000000 ; Clock Management definitions CLK_BASE EQU 0x01D80000 ; Clock Base Address PLLCON_OFS EQU 0x00 ; PLLCON Offset CLKCON_OFS EQU 0x04 ; CLKCON Offset CLKSLOW_OFS EQU 0x08 ; CLKSLOW Offset LOCKTIME_OFS EQU 0x0C ; LOCKTIME Offset ;// <e> Clock Management ;// <h> PLL Settings ;// <i> Fpllo = (m * Fin) / (p * 2^s), 20MHz < Fpllo < 66MHz ;// <o1.12..19> MDIV: Main divider <0x0-0xFF> ;// <i> m = MDIV + 8 ;// <o1.4..9> PDIV: Pre-divider <0x0-0x3F> ;// <i> p = PDIV + 2, 1MHz <= Fin/p < 2MHz ;// <o1.0..1> SDIV: Post Divider <0x0-0x03> ;// <i> s = SDIV, Fpllo * 2^s < 170MHz ;// <o4.0..11> LTIME CNT: PLL Lock Time Count <0x0-0x0FFF> ;// </h> ;// <h> Master Clock ;// <i> PLL Clock: Fout = Fpllo ;// <i> Slow Clock: Fout = Fin / (2 * SLOW_VAL), SLOW_VAL > 0 ;// <i> Slow Clock: Fout = Fin, SLOW_VAL = 0 ;// <o3.5> PLL_OFF: PLL Off ;// <i> PLL is turned Off only when SLOW_BIT = 1 ;// <o3.4> SLOW_BIT: Slow Clock ;// <o3.0..3> SLOW_VAL: Slow Clock divider <0x0-0x0F> ;// </h> ;// <h> Clock Generation ;// <o2.14> IIS <0=> Disable <1=> Enable ;// <o2.13> IIC <0=> Disable <1=> Enable ;// <o2.12> ADC <0=> Disable <1=> Enable ;// <o2.11> RTC <0=> Disable <1=> Enable ;// <o2.10> GPIO <0=> Disable <1=> Enable ;// <o2.9> UART1 <0=> Disable <1=> Enable ;// <o2.8> UART0 <0=> Disable <1=> Enable ;// <o2.7> BDMA0,1 <0=> Disable <1=> Enable ;// <o2.6> LCDC <0=> Disable <1=> Enable ;// <o2.5> SIO <0=> Disable <1=> Enable ;// <o2.4> ZDMA0,1 <0=> Disable <1=> Enable ;// <o2.3> PWMTIMER <0=> Disable <1=> Enable ;// </h> ;// </e> CLK_SETUP EQU 1 PLLCON_Val EQU 0x00038080 CLKCON_Val EQU 0x00007FF8 CLKSLOW_Val EQU 0x00000009 LOCKTIME_Val EQU 0x00000FFF ; Watchdog Timer definitions WT_BASE EQU 0x01D30000 ; WT Base Address WTCON_OFS EQU 0x00 ; WTCON Offset WTDAT_OFS EQU 0x04 ; WTDAT Offset WTCNT_OFS EQU 0x08 ; WTCNT Offset ;// <e> Watchdog Timer ;// <o1.5> Watchdog Timer Enable/Disable ;// <o1.0> Reset Enable/Disable ;// <o1.2> Interrupt Enable/Disable ;// <o1.3..4> Clock Select ;// <0=> 1/16 <1=> 1/32 <2=> 1/64 <3=> 1/128 ;// <i> Clock Division Factor ;// <o1.8..15> Prescaler Value <0x0-0xFF> ;// <o2.0..15> Time-out Value <0x0-0xFFFF> ;// </e> WT_SETUP EQU 1 WTCON_Val EQU 0x00008000 WTDAT_Val EQU 0x00008000 ; Memory Controller definitions MC_BASE EQU 0x01C80000 ; Memory Controller Base Address ;// <e> Memory Controller MC_SETUP EQU 1 ;// <h> Bank 0 ;// <o0.0..1> PMC: Page Mode Configuration ;// <0=> 1 Data <1=> 4 Data <2=> 8 Data <3=> 16 Data ;// <o0.2..3> Tpac: Page Mode Access Cycle ;// <0=> 2 clks <1=> 3 clks <2=> 4 clks <3=> 6 clks ;// <o0.4..5> Tcah: Address Holding Time after nGCSn ;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks ;// <o0.6..7> Toch: Chip Select Hold on nOE ;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks ;// <o0.8..10> Tacc: Access Cycle ;// <0=> 1 clk <1=> 2 clks <2=> 3 clks <3=> 4 clks ;// <4=> 6 clk <5=> 8 clks <6=> 10 clks <7=> 14 clks ;// <o0.11..12> Tcos: Chip Select Set-up nOE ;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks ;// <o0.13..14> Tacs: Address Set-up before nGCSn ;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks ;// </h> ;// ;// <h> Bank 1 ;// <o8.4..5> DW: Data Bus Width ;// <0=> 8-bit <1=> 16-bit <2=> 32-bit <3=> Rsrvd ;// <o8.6> WS: WAIT Status ;// <0=> WAIT Disable ;// <1=> WAIT Enable ;// <o8.7> ST: SRAM Type ;// <0=> Not using UB/LB ;// <1=> Using UB/LB ;// <o1.0..1> PMC: Page Mode Configuration ;// <0=> 1 Data <1=> 4 Data <2=> 8 Data <3=> 16 Data ;// <o1.2..3> Tpac: Page Mode Access Cycle ;// <0=> 2 clks <1=> 3 clks <2=> 4 clks <3=> 6 clks ;// <o1.4..5> Tcah: Address Holding Time after nGCSn ;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks ;// <o1.6..7> Toch: Chip Select Hold on nOE ;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks ;// <o1.8..10> Tacc: Access Cycle ;// <0=> 1 clk <1=> 2 clks <2=> 3 clks <3=> 4 clks ;// <4=> 6 clk <5=> 8 clks <6=> 10 clks <7=> 14 clks ;// <o1.11..12> Tcos: Chip Select Set-up nOE ;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks ;// <o1.13..14> Tacs: Address Set-up before nGCSn ;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks ;// </h> ;// ;// <h> Bank 2 ;// <o8.8..9> DW: Data Bus Width ;// <0=> 8-bit <1=> 16-bit <2=> 32-bit <3=> Rsrvd ;// <o8.10> WS: WAIT Status ;// <0=> WAIT Disable ;// <1=> WAIT Enable ;// <o8.11> ST: SRAM Type ;// <0=> Not using UB/LB ;// <1=> Using UB/LB ;// <o2.0..1> PMC: Page Mode Configuration ;// <0=> 1 Data <1=> 4 Data <2=> 8 Data <3=> 16 Data ;// <o2.2..3> Tpac: Page Mode Access Cycle ;// <0=> 2 clks <1=> 3 clks <2=> 4 clks <3=> 6 clks ;// <o2.4..5> Tcah: Address Holding Time after nGCSn ;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks ;// <o2.6..7> Toch: Chip Select Hold on nOE ;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks ;// <o2.8..10> Tacc: Access Cycle ;// <0=> 1 clk <1=> 2 clks <2=> 3 clks <3=> 4 clks ;// <4=> 6 clk <5=> 8 clks <6=> 10 clks <7=> 14 clks ;// <o2.11..12> Tcos: Chip Select Set-up nOE ;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks ;// <o2.13..14> Tacs: Address Set-up before nGCSn ;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks ;// </h> ;// ;// <h> Bank 3 ;// <o8.12..13> DW: Data Bus Width ;// <0=> 8-bit <1=> 16-bit <2=> 32-bit <3=> Rsrvd ;// <o8.14> WS: WAIT Status ;// <0=> WAIT Disable ;// <1=> WAIT Enable ;// <o8.15> ST: SRAM Type ;// <0=> Not using UB/LB ;// <1=> Using UB/LB ;// <o3.0..1> PMC: Page Mode Configuration ;// <0=> 1 Data <1=> 4 Data <2=> 8 Data <3=> 16 Data ;// <o3.2..3> Tpac: Page Mode Access Cycle ;// <0=> 2 clks <1=> 3 clks <2=> 4 clks <3=> 6 clks ;// <o3.4..5> Tcah: Address Holding Time after nGCSn ;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks ;// <o3.6..7> Toch: Chip Select Hold on nOE ;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks ;// <o3.8..10> Tacc: Access Cycle ;// <0=> 1 clk <1=> 2 clks <2=> 3 clks <3=> 4 clks ;// <4=> 6 clk <5=> 8 clks <6=> 10 clks <7=> 14 clks ;// <o3.11..12> Tcos: Chip Select Set-up nOE ;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks ;// <o3.13..14> Tacs: Address Set-up before nGCSn ;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks ;// </h> ;// ;// <h> Bank 4 ;// <o8.16..17> DW: Data Bus Width ;// <0=> 8-bit <1=> 16-bit <2=> 32-bit <3=> Rsrvd ;// <o8.18> WS: WAIT Status ;// <0=> WAIT Disable ;// <1=> WAIT Enable ;// <o8.19> ST: SRAM Type ;// <0=> Not using UB/LB ;// <1=> Using UB/LB ;// <o4.0..1> PMC: Page Mode Configuration ;// <0=> 1 Data <1=> 4 Data <2=> 8 Data <3=> 16 Data ;// <o4.2..3> Tpac: Page Mode Access Cycle ;// <0=> 2 clks <1=> 3 clks <2=> 4 clks <3=> 6 clks ;// <o4.4..5> Tcah: Address Holding Time after nGCSn ;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks ;// <o4.6..7> Toch: Chip Select Hold on nOE ;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks ;// <o4.8..10> Tacc: Access Cycle ;// <0=> 1 clk <1=> 2 clks <2=> 3 clks <3=> 4 clks ;// <4=> 6 clk <5=> 8 clks <6=> 10 clks <7=> 14 clks ;// <o4.11..12> Tcos: Chip Select Set-up nOE ;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks ;// <o4.13..14> Tacs: Address Set-up before nGCSn ;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks ;// </h> ;// ;// <h> Bank 5 ;// <o8.20..21> DW: Data Bus Width ;// <0=> 8-bit <1=> 16-bit <2=> 32-bit <3=> Rsrvd ;// <o8.22> WS: WAIT Status ;// <0=> WAIT Disable ;// <1=> WAIT Enable ;// <o8.23> ST: SRAM Type ;// <0=> Not using UB/LB ;// <1=> Using UB/LB ;// <o5.0..1> PMC: Page Mode Configuration ;// <0=> 1 Data <1=> 4 Data <2=> 8 Data <3=> 16 Data ;// <o5.2..3> Tpac: Page Mode Access Cycle ;// <0=> 2 clks <1=> 3 clks <2=> 4 clks <3=> 6 clks ;// <o5.4..5> Tcah: Address Holding Time after nGCSn ;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks ;// <o5.6..7> Toch: Chip Select Hold on nOE ;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks ;// <o5.8..10> Tacc: Access Cycle ;// <0=> 1 clk <1=> 2 clks <2=> 3 clks <3=> 4 clks ;// <4=> 6 clk <5=> 8 clks <6=> 10 clks <7=> 14 clks ;// <o5.11..12> Tcos: Chip Select Set-up nOE ;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks ;// <o5.13..14> Tacs: Address Set-up before nGCSn ;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks ;// </h> ;// ;// <h> Bank 6 ;// <o10.0..2> BK76MAP: Bank 6/7 Memory Map ;// <0=> 32M <4=> 2M <5=> 4M <6=> 8M <7=> 16M ;// <o8.24..25> DW: Data Bus Width ;// <0=> 8-bit <1=> 16-bit <2=> 32-bit <3=> Rsrvd ;// <o8.26> WS: WAIT Status ;// <0=> WAIT Disable ;// <1=> WAIT Enable ;// <o8.27> ST: SRAM Type ;// <0=> Not using UB/LB ;// <1=> Using UB/LB ;// <o6.15..16> MT: Memory Type ;// <0=> ROM or SRAM ;// <1=> FP DRAMP ;// <2=> EDO DRAM ;// <3=> SDRAM ;// <h> ROM or SRAM ;// <o6.0..1> PMC: Page Mode Configuration ;// <0=> 1 Data <1=> 4 Data <2=> 8 Data <3=> 16 Data ;// <o6.2..3> Tpac: Page Mode Access Cycle ;// <0=> 2 clks <1=> 3 clks <2=> 4 clks <3=> 6 clks ;// <o6.4..5> Tcah: Address Holding Time after nGCSn ;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks ;// <o6.6..7> Toch: Chip Select Hold on nOE ;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks ;// <o6.8..10> Tacc: Access Cycle ;// <0=> 1 clk <1=> 2 clks <2=> 3 clks <3=> 4 clks ;// <4=> 6 clk <5=> 8 clks <6=> 10 clks <7=> 14 clks ;// <o6.11..12> Tcos: Chip Select Set-up nOE ;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks ;// <o6.13..14> Tacs: Address Set-up before nGCSn ;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks ;// </h> ;// <h> FP DRAM or EDO DRAM ;// <o6.0..1> CAN: Columnn Address Number ;// <0=> 8-bit <1=> 9-bit <2=> 10-bit <3=> 11-bit ;// <o6.2> Tcp: CAS Pre-charge ;// <0=> 1 clk <1=> 2 clks ;// <o6.3> Tcas: CAS Pulse Width ;// <0=> 1 clk <1=> 2 clks ;// <o6.4..5> Trcd: RAS to CAS Delay ;// <0=> 1 clk <1=> 2 clks <2=> 3 clks <3=> 4 clks ;// </h> ;// <h> SDRAM ;// <o6.0..1> SCAN: Columnn Address Number ;// <0=> 8-bit <1=> 9-bit <2=> 10-bit <3=> Rsrvd ;// <o6.2..3> Trcd: RAS to CAS Delay ;// <0=> 2 clks <1=> 3 clks <2=> 4 clks <3=> Rsrvd ;// <o10.4> SCLKEN: SCLK Selection (Bank 6/7) ;// <0=> Normal ;// <1=> Reduced Power ;// <o11.0..2> BL: Burst Length ;// <0=> 1 ;// <o11.3> BT: Burst Type ;// <0=> Sequential ;// <o11.4..6> CL: CAS Latency ;// <0=> 1 clk <1=> 2 clks <2=> 3 clks ;// <o11.7..8> TM: Test Mode ;// <0=> Mode Register Set ;// <o11.9> WBL: Write Burst Length ;// <0=> 0 ;// </h> ;// </h> ;// ;// <h> Bank 7 ;// <o10.0..2> BK76MAP: Bank 6/7 Memory Map ;// <0=> 32M <4=> 2M <5=> 4M <6=> 8M <7=> 16M ;// <o8.28..29> DW: Data Bus Width ;// <0=> 8-bit <1=> 16-bit <2=> 32-bit <3=> Rsrvd ;// <o8.30> WS: WAIT Status ;// <0=> WAIT Disable ;// <1=> WAIT Enable ;// <o8.31> ST: SRAM Type ;// <0=> Not using UB/LB ;// <1=> Using UB/LB ;// <o7.15..16> MT: Memory Type ;// <0=> ROM or SRAM ;// <1=> FP DRAMP ;// <2=> EDO DRAM ;// <3=> SDRAM ;// <h> ROM or SRAM ;// <o7.0..1> PMC: Page Mode Configuration ;// <0=> 1 Data <1=> 4 Data <2=> 8 Data <3=> 16 Data ;// <o7.2..3> Tpac: Page Mode Access Cycle ;// <0=> 2 clks <1=> 3 clks <2=> 4 clks <3=> 6 clks ;// <o7.4..5> Tcah: Address Holding Time after nGCSn ;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks ;// <o7.6..7> Toch: Chip Select Hold on nOE ;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks ;// <o7.8..10> Tacc: Access Cycle ;// <0=> 1 clk <1=> 2 clks <2=> 3 clks <3=> 4 clks ;// <4=> 6 clk <5=> 8 clks <6=> 10 clks <7=> 14 clks ;// <o7.11..12> Tcos: Chip Select Set-up nOE ;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks ;// <o7.13..14> Tacs: Address Set-up before nGCSn ;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks ;// </h> ;// <h> FP DRAM or EDO DRAM ;// <o7.0..1> CAN: Columnn Address Number ;// <0=> 8-bit <1=> 9-bit <2=> 10-bit <3=> 11-bit ;// <o7.2> Tcp: CAS Pre-charge ;// <0=> 1 clk <1=> 2 clks ;// <o7.3> Tcas: CAS Pulse Width ;// <0=> 1 clk <1=> 2 clks ;// <o7.4..5> Trcd: RAS to CAS Delay ;// <0=> 1 clk <1=> 2 clks <2=> 3 clks <3=> 4 clks ;// </h> ;// <h> SDRAM ;// <o7.0..1> SCAN: Columnn Address Number ;// <0=> 8-bit <1=> 9-bit <2=> 10-bit <3=> Rsrvd ;// <o7.2..3> Trcd: RAS to CAS Delay ;// <0=> 2 clks <1=> 3 clks <2=> 4 clks <3=> Rsrvd ;// <o10.4> SCLKEN: SCLK Selection (Bank 6/7) ;// <0=> Normal ;// <1=> Reduced Power ;// <o12.0..2> BL: Burst Length ;// <0=> 1 ;// <o12.3> BT: Burst Type ;// <0=> Sequential ;// <o12.4..6> CL: CAS Latency ;// <0=> 1 clk <1=> 2 clks <2=> 3 clks ;// <o12.7..8> TM: Test Mode ;// <0=> Mode Register Set ;// <o12.9> WBL: Write Burst Length ;// <0=> 0 ;// </h> ;// </h> ;// ;// <h> Refresh ;// <o9.23> REFEN: DRAM/SDRAM Refresh ;// <0=> Disable <1=> Enable ;// <o9.22> TREFMD: DRAM/SDRAM Refresh Mode ;// <0=> CBR/Auto Refresh ;// <1=> Self Refresh ;// <o9.20..21> Trp: DRAM/SDRAM RAS Pre-charge Time ;// <0=> 1.5 clks (DRAM) / 2 clks (SDRAM) ;// <1=> 2.5 clks (DRAM) / 3 clks (SDRAM) ;// <2=> 3.5 clks (DRAM) / 4 clks (SDRAM) ;// <3=> 4.5 clks (DRAM) / Rsrvd (SDRAM) ;// <o9.18..19> Trc: SDRAM RC Min Time ;// <0=> 4 clks <1=> 5 clks <2=> 6 clks <3=> 7 clks ;// <o9.16..17> Tchr: DRAM CAS Hold Time ;// <0=> 1 clks <1=> 2 clks <2=> 3 clks <3=> 4 clks ;// <o9.0..10> Refresh Counter <0x0-0x07FF> ;// <i> Refresh Period = (2^11 - Refresh Count + 1) / MCLK ;// </h> BANKCON0_Val EQU 0x00000700 BANKCON1_Val EQU 0x00000700 BANKCON2_Val EQU 0x00000700 BANKCON3_Val EQU 0x00000700 BANKCON4_Val EQU 0x00000700 BANKCON5_Val EQU 0x00000700 BANKCON6_Val EQU 0x00018008 BANKCON7_Val EQU 0x00018008 BWSCON_Val EQU 0x00000000 REFRESH_Val EQU 0x00AC0000 BANKSIZE_Val EQU 0x00000000 MRSRB6_Val EQU 0x00000000 MRSRB7_Val EQU 0x00000000 ;// </e> End of MC ; I/O Ports definitions PIO_BASE EQU 0x01D20000 ; PIO Base Address PCONA_OFS EQU 0x00 ; PCONA Offset PCONB_OFS EQU 0x08 ; PCONB Offset PCONC_OFS EQU 0x10 ; PCONC Offset PCOND_OFS EQU 0x1C ; PCOND Offset PCONE_OFS EQU 0x28 ; PCONE Offset PCONF_OFS EQU 0x34 ; PCONF Offset PCONG_OFS EQU 0x40 ; PCONG Offset PUPC_OFS EQU 0x18 ; PUPC Offset PUPD_OFS EQU 0x24 ; PUPD Offset PUPE_OFS EQU 0x30 ; PUPE Offset PUPF_OFS EQU 0x3C ; PUPF Offset PUPG_OFS EQU 0x48 ; PUPG Offset SPUCR_OFS EQU 0x4C ; SPUCR Offset ;// <e> I/O Configuration PIO_SETUP EQU 0 ;// <e> Port A ;// <o1.0> PA0 <0=> Output <1=> ADDR0 ;// <o1.1> PA1 <0=> Output <1=> ADDR16 ;// <o1.2> PA2 <0=> Output <1=> ADDR17 ;// <o1.3> PA3 <0=> Output <1=> ADDR18 ;// <o1.4> PA4 <0=> Output <1=> ADDR19 ;// <o1.5> PA5 <0=> Output <1=> ADDR20 ;// <o1.6> PA6 <0=> Output <1=> ADDR21 ;// <o1.7> PA7 <0=> Output <1=> ADDR22 ;// <o1.8> PA8 <0=> Output <1=> ADDR23 ;// <o1.9> PA9 <0=> Output <1=> ADDR24 ;// </e> PIOA_SETUP EQU 1 PCONA_Val EQU 0x000003FF ;// <e> Port B ;// <o1.0> PB0 <0=> Output <1=> SCKE ;// <o1.1> PB1 <0=> Output <1=> CKLK ;// <o1.2> PB2 <0=> Output <1=> nSCAS/nCAS2 ;// <o1.3> PB3 <0=> Output <1=> nSRAS/nCAS3 ;// <o1.4> PB4 <0=> Output <1=> nWBE2/nBE2/DQM2 ;// <o1.5> PB5 <0=> Output <1=> nWBE3/nBE3/DQM3 ;// <o1.6> PB6 <0=> Output <1=> nGCS1 ;// <o1.7> PB7 <0=> Output <1=> nGCS2 ;// <o1.8> PB8 <0=> Output <1=> nGCS3 ;// <o1.9> PB9 <0=> Output <1=> nGCS4 ;// <o1.10> PB10 <0=> Output <1=> nGCS5 ;// </e> PIOB_SETUP EQU 1 PCONB_Val EQU 0x000007FF ;// <e> Port C ;// <o1.0..1> PC0 <0=> Input <1=> Output <2=> DATA16 <3=> IISLRCK ;// <o1.2..3> PC1 <0=> Input <1=> Output <2=> DATA17 <3=> IISDO ;// <o1.4..5> PC2 <0=> Input <1=> Output <2=> DATA18 <3=> IISDI ;// <o1.6..7> PC3 <0=> Input <1=> Output <2=> DATA19 <3=> IISCLK ;// <o1.8..9> PC4 <0=> Input <1=> Output <2=> DATA20 <3=> VD7 ;// <o1.10..11> PC5 <0=> Input <1=> Output <2=> DATA21 <3=> VD6 ;// <o1.12..13> PC6 <0=> Input <1=> Output <2=> DATA22 <3=> VD5 ;// <o1.14..15> PC7 <0=> Input <1=> Output <2=> DATA23 <3=> VD4 ;// <o1.16..17> PC8 <0=> Input <1=> Output <2=> DATA24 <3=> nXDACK1 ;// <o1.18..19> PC9 <0=> Input <1=> Output <2=> DATA25 <3=> nXDREQ1 ;// <o1.20..21> PC10 <0=> Input <1=> Output <2=> DATA26 <3=> nRTS1 ;// <o1.22..23> PC11 <0=> Input <1=> Output <2=> DATA27 <3=> nCTS1 ;// <o1.24..25> PC12 <0=> Input <1=> Output <2=> DATA28 <3=> TxD1 ;// <o1.26..27> PC13 <0=> Input <1=> Output <2=> DATA29 <3=> RxD1 ;// <o1.28..29> PC14 <0=> Input <1=> Output <2=> DATA30 <3=> nRTS0 ;// <o1.30..31> PC15 <0=> Input <1=> Output <2=> DATA31 <3=> nCTS0 ;// <h> Pull-up Resistors ;// <o2.0> PC0 Pull-up <0=> Enabled <1=> Disabled ;// <o2.1> PC1 Pull-up <0=> Enabled <1=> Disabled ;// <o2.2> PC2 Pull-up <0=> Enabled <1=> Disabled ;// <o2.3> PC3 Pull-up <0=> Enabled <1=> Disabled ;// <o2.4> PC4 Pull-up <0=> Enabled <1=> Disabled ;// <o2.5> PC5 Pull-up <0=> Enabled <1=> Disabled ;// <o2.6> PC6 Pull-up <0=> Enabled <1=> Disabled ;// <o2.7> PC7 Pull-up <0=> Enabled <1=> Disabled ;// <o2.8> PC8 Pull-up <0=> Enabled <1=> Disabled ;// <o2.9> PC9 Pull-up <0=> Enabled <1=> Disabled ;// <o2.10> PC10 Pull-up <0=> Enabled <1=> Disabled ;// <o2.11> PC11 Pull-up <0=> Enabled <1=> Disabled ;// <o2.12> PC12 Pull-up <0=> Enabled <1=> Disabled ;// <o2.13> PC13 Pull-up <0=> Enabled <1=> Disabled ;// <o2.14> PC14 Pull-up <0=> Enabled <1=> Disabled ;// <o2.15> PC15 Pull-up <0=> Enabled <1=> Disabled ;// </h> ;// </e> PIOC_SETUP EQU 1 PCONC_Val EQU 0xAAAAAAAA PUPC_Val EQU 0x00000000 ;// <e> Port D ;// <o1.0..1> PD0 <0=> Input <1=> Output <2=> VD0 <3=> Reserved ;// <o1.2..3> PD1 <0=> Input <1=> Output <2=> VD1 <3=> Reserved ;// <o1.4..5> PD2 <0=> Input <1=> Output <2=> VD2 <3=> Reserved ;// <o1.6..7> PD3 <0=> Input <1=> Output <2=> VD3 <3=> Reserved ;// <o1.8..9> PD4 <0=> Input <1=> Output <2=> VCLK <3=> Reserved ;// <o1.10..11> PD5 <0=> Input <1=> Output <2=> VLINE <3=> Reserved ;// <o1.12..13> PD6 <0=> Input <1=> Output <2=> VM <3=> Reserved ;// <o1.14..15> PD7 <0=> Input <1=> Output <2=> VFRAME <3=> Reserved ;// <h> Pull-up Resistors ;// <o2.0> PD0 Pull-up <0=> Enabled <1=> Disabled ;// <o2.1> PD1 Pull-up <0=> Enabled <1=> Disabled ;// <o2.2> PD2 Pull-up <0=> Enabled <1=> Disabled ;// <o2.3> PD3 Pull-up <0=> Enabled <1=> Disabled ;// <o2.4> PD4 Pull-up <0=> Enabled <1=> Disabled ;// <o2.5> PD5 Pull-up <0=> Enabled <1=> Disabled ;// <o2.6> PD6 Pull-up <0=> Enabled <1=> Disabled ;// <o2.7> PD7 Pull-up <0=> Enabled <1=> Disabled ;// </h> ;// </e> PIOD_SETUP EQU 1 PCOND_Val EQU 0x00000000 PUPD_Val EQU 0x00000000 ;// <e> Port E ;// <o1.0..1> PE0 <0=> Input <1=> Output <2=> Fpllo <3=> Fout ;// <o1.2..3> PE1 <0=> Input <1=> Output <2=> TxD0 <3=> Reserved ;// <o1.4..5> PE2 <0=> Input <1=> Output <2=> RxD0 <3=> Reserved ;// <o1.6..7> PE3 <0=> Input <1=> Output <2=> TOUT0 <3=> Reserved ;// <o1.8..9> PE4 <0=> Input <1=> Output <2=> TOUT1 <3=> TCLK ;// <o1.10..11> PE5 <0=> Input <1=> Output <2=> TOUT2 <3=> TCLK ;// <o1.12..13> PE6 <0=> Input <1=> Output <2=> TOUT3 <3=> VD6 ;// <o1.14..15> PE7 <0=> Input <1=> Output <2=> TOUT4 <3=> VD7 ;// <o1.16..17> PE8 <0=> Input <1=> Output <2=> CODECLK <3=> Reserved ;// <h> Pull-up Resistors ;// <o2.0> PE0 Pull-up <0=> Enabled <1=> Disabled ;// <o2.1> PE1 Pull-up <0=> Enabled <1=> Disabled ;// <o2.2> PE2 Pull-up <0=> Enabled <1=> Disabled ;// <o2.3> PE3 Pull-up <0=> Enabled <1=> Disabled ;// <o2.4> PE4 Pull-up <0=> Enabled <1=> Disabled ;// <o2.5> PE5 Pull-up <0=> Enabled <1=> Disabled ;// <o2.6> PE6 Pull-up <0=> Enabled <1=> Disabled ;// <o2.7> PE7 Pull-up <0=> Enabled <1=> Disabled ;// <o2.8> PE8 Pull-up <0=> Enabled <1=> Disabled ;// </h> ;// </e> PIOE_SETUP EQU 1 PCONE_Val EQU 0x00000000 PUPE_Val EQU 0x00000000 ;// <e> Port F ;// <o1.0..1> PF0 <0=> Input <1=> Output <2=> IICSCL <3=> Reserved ;// <o1.2..3> PF1 <0=> Input <1=> Output <2=> IICSDA <3=> Reserved ;// <o1.4..5> PF2 <0=> Input <1=> Output <2=> nWAIT <3=> Reserved ;// <o1.6..7> PF3 <0=> Input <1=> Output <2=> nXBACK <3=> nXDACK0 ;// <o1.8..9> PF4 <0=> Input <1=> Output <2=> nXBREQ <3=> nXDREQ0 ;// <o1.10..12> PF5 <0=> Input <1=> Output <2=> nRTS1 <3=> SIOTxD ;// <4=> IISLRCK <5=> Reserved <6=> Reserved <7=> Reserved ;// <o1.13..15> PF6 <0=> Input <1=> Output <2=> TxD1 <3=> SIORDY ;// <4=> IISDO <5=> Reserved <6=> Reserved <7=> Reserved ;// <o1.16..18> PF7 <0=> Input <1=> Output <2=> RxD1 <3=> SIORxD ;// <4=> IISDI <5=> Reserved <6=> Reserved <7=> Reserved ;// <o1.19..21> PF8 <0=> Input <1=> Output <2=> nCTS1 <3=> SIOCLK ;// <4=> IISCLK <5=> Reserved <6=> Reserved <7=> Reserved ;// <h> Pull-up Resistors ;// <o2.0> PF0 Pull-up <0=> Enabled <1=> Disabled ;// <o2.1> PF1 Pull-up <0=> Enabled <1=> Disabled ;// <o2.2> PF2 Pull-up <0=> Enabled <1=> Disabled ;// <o2.3> PF3 Pull-up <0=> Enabled <1=> Disabled ;// <o2.4> PF4 Pull-up <0=> Enabled <1=> Disabled ;// <o2.5> PF5 Pull-up <0=> Enabled <1=> Disabled ;// <o2.6> PF6 Pull-up <0=> Enabled <1=> Disabled ;// <o2.7> PF7 Pull-up <0=> Enabled <1=> Disabled ;// <o2.8> PF8 Pull-up <0=> Enabled <1=> Disabled ;// </h> ;// </e> PIOF_SETUP EQU 1 PCONF_Val EQU 0x00000000 PUPF_Val EQU 0x00000000 ;// <e> Port G ;// <o1.0..1> PG0 <0=> Input <1=> Output <2=> VD4 <3=> EINT0 ;// <o1.2..3> PG1 <0=> Input <1=> Output <2=> VD5 <3=> EINT1 ;// <o1.4..5> PG2 <0=> Input <1=> Output <2=> nCTS0 <3=> EINT2 ;// <o1.6..7> PG3 <0=> Input <1=> Output <2=> nRTS0 <3=> EINT3 ;// <o1.8..9> PG4 <0=> Input <1=> Output <2=> IISCLK <3=> EINT4 ;// <o1.10..11> PG5 <0=> Input <1=> Output <2=> IISDI <3=> EINT5 ;// <o1.12..13> PG6 <0=> Input <1=> Output <2=> IISDO <3=> EINT6 ;// <o1.14..15> PG7 <0=> Input <1=> Output <2=> IISLRCK <3=> EINT7 ;// <h> Pull-up Resistors ;// <o2.0> PG0 Pull-up <0=> Enabled <1=> Disabled ;// <o2.1> PG1 Pull-up <0=> Enabled <1=> Disabled ;// <o2.2> PG2 Pull-up <0=> Enabled <1=> Disabled ;// <o2.3> PG3 Pull-up <0=> Enabled <1=> Disabled ;// <o2.4> PG4 Pull-up <0=> Enabled <1=> Disabled ;// <o2.5> PG5 Pull-up <0=> Enabled <1=> Disabled ;// <o2.6> PG6 Pull-up <0=> Enabled <1=> Disabled ;// <o2.7> PG7 Pull-up <0=> Enabled <1=> Disabled ;// </h> ;// </e> PIOG_SETUP EQU 1 PCONG_Val EQU 0x00000000 PUPG_Val EQU 0x00000000 ;// <e> Special Pull-up ;// <o1.0> SPUCR0: DATA[7:0] Pull-up Resistor ;// <0=> Enabled <1=> Disabled ;// <o1.1> SPUCR1: DATA[15:8] Pull-up Resistor ;// <0=> Enabled <1=> Disabled ;// <o1.2> HZ@STOP ;// <0=> Prevoius state of PAD ;// <1=> HZ @ Stop ;// </e> PSPU_SETUP EQU 1 SPUCR_Val EQU 0x00000004 ;// </e> PRESERVE8 ; Area Definition and Entry Point ; Startup Code must be linked first at Address at which it expects to run. AREA RESET, CODE, READONLY ARM ; Exception Vectors ; Mapped to Address 0. ; Absolute addressing mode must be used. ; Dummy Handlers are implemented as infinite loops which can be modified. Vectors LDR PC, Reset_Addr LDR PC, Undef_Addr LDR PC, SWI_Addr LDR PC, PAbt_Addr LDR PC, DAbt_Addr NOP ; Reserved Vector LDR PC, IRQ_Addr LDR PC, FIQ_Addr Reset_Addr DCD Reset_Handler Undef_Addr DCD Undef_Handler SWI_Addr DCD SWI_Handler PAbt_Addr DCD PAbt_Handler DAbt_Addr DCD DAbt_Handler DCD 0 ; Reserved Address IRQ_Addr DCD IRQ_Handler FIQ_Addr DCD FIQ_Handler Undef_Handler B Undef_Handler SWI_Handler B SWI_Handler PAbt_Handler B PAbt_Handler DAbt_Handler B DAbt_Handler FIQ_Handler B FIQ_Handler ; CPU Wrapper and Bus Priorities Configuration IF SYS_SETUP <> 0 SYS_CFG DCD CPUW_BASE DCD BUSP_BASE DCD SYSCFG_Val DCD NCACHBE0_Val DCD NCACHBE1_Val DCD SBUSCON_Val ENDIF ; Memory Controller Configuration IF MC_SETUP <> 0 MC_CFG DCD BWSCON_Val DCD BANKCON0_Val DCD BANKCON1_Val DCD BANKCON2_Val DCD BANKCON3_Val DCD BANKCON4_Val DCD BANKCON5_Val DCD BANKCON6_Val DCD BANKCON7_Val DCD REFRESH_Val DCD BANKSIZE_Val DCD MRSRB6_Val DCD MRSRB7_Val ENDIF ; Clock Management Configuration IF CLK_SETUP <> 0 CLK_CFG DCD CLK_BASE DCD PLLCON_Val DCD CLKCON_Val DCD CLKSLOW_Val DCD LOCKTIME_Val ENDIF ; I/O Configuration IF PIO_SETUP <> 0 PIO_CFG DCD PCONA_Val DCD PCONB_Val DCD PCONC_Val DCD PCOND_Val DCD PCONE_Val DCD PCONF_Val DCD PCONG_Val DCD PUPC_Val DCD PUPD_Val DCD PUPE_Val DCD PUPF_Val DCD PUPG_Val DCD SPUCR_Val ENDIF ; Reset Handler EXPORT Reset_Handler Reset_Handler IF SYS_SETUP <> 0 ADR R8, SYS_CFG LDMIA R8, {R0-R5} STMIA R0, {R2-R4} STR R5, [R1] ENDIF IF MC_SETUP <> 0 ADR R14, MC_CFG LDMIA R14, {R0-R12} LDR R14, =MC_BASE STMIA R14, {R0-R12} ENDIF IF CLK_SETUP <> 0 ADR R8, CLK_CFG LDMIA R8, {R0-R4} STR R4, [R0, #LOCKTIME_OFS] STR R1, [R0, #PLLCON_OFS] STR R3, [R0, #CLKSLOW_OFS] STR R2, [R0, #CLKCON_OFS] ENDIF IF WT_SETUP <> 0 LDR R0, =WT_BASE LDR R1, =WTCON_Val LDR R2, =WTDAT_Val STR R2, [R0, #WTCNT_OFS] STR R2, [R0, #WTDAT_OFS] STR R1, [R0, #WTCON_OFS] ENDIF IF PIO_SETUP <> 0 ADR R14, PIO_CFG LDMIA R14, {R0-R12} LDR R14, =PIO_BASE IF PIOA_SETUP <> 0 STR R0, [R14, #PCONA_OFS] ENDIF IF PIOB_SETUP <> 0 STR R1, [R14, #PCONB_OFS] ENDIF IF PIOC_SETUP <> 0 STR R2, [R14, #PCONC_OFS] STR R7, [R14, #PUPC_OFS] ENDIF IF PIOD_SETUP <> 0 STR R3, [R14, #PCOND_OFS] STR R8, [R14, #PUPD_OFS] ENDIF IF PIOE_SETUP <> 0 STR R4, [R14, #PCONE_OFS] STR R9, [R14, #PUPE_OFS] ENDIF IF PIOF_SETUP <> 0 STR R5, [R14, #PCONF_OFS] STR R10,[R14, #PUPF_OFS] ENDIF IF PIOG_SETUP <> 0 STR R6, [R14, #PCONG_OFS] STR R11,[R14, #PUPG_OFS] ENDIF IF PSPU_SETUP <> 0 STR R12,[R14, #SPUCR_OFS] ENDIF ENDIF ; Setup Stack for each mode LDR R0, =Stack_Top ; Enter Undefined Instruction Mode and set its Stack Pointer MSR CPSR_c, #Mode_UND:OR:I_Bit:OR:F_Bit MOV SP, R0 SUB R0, R0, #UND_Stack_Size ; Enter Abort Mode and set its Stack Pointer MSR CPSR_c, #Mode_ABT:OR:I_Bit:OR:F_Bit MOV SP, R0 SUB R0, R0, #ABT_Stack_Size ; Enter FIQ Mode and set its Stack Pointer MSR CPSR_c, #Mode_FIQ:OR:I_Bit:OR:F_Bit MOV SP, R0 SUB R0, R0, #FIQ_Stack_Size ; Enter IRQ Mode and set its Stack Pointer MSR CPSR_c, #Mode_IRQ:OR:I_Bit:OR:F_Bit MOV SP, R0 SUB R0, R0, #IRQ_Stack_Size ; Enter Supervisor Mode and set its Stack Pointer MSR CPSR_c, #Mode_SVC:OR:I_Bit:OR:F_Bit MOV SP, R0 SUB R0, R0, #SVC_Stack_Size ; Enter User Mode and set its Stack Pointer ; MSR CPSR_c, #Mode_USR IF :DEF:__MICROLIB EXPORT __initial_sp ELSE ; MOV SP, R0 ; SUB SL, SP, #USR_Stack_Size ENDIF ; Enter the C code IMPORT __main LDR R0, =__main BX R0 IMPORT rt_interrupt_enter IMPORT rt_interrupt_leave IMPORT rt_thread_switch_interrupt_flag IMPORT rt_interrupt_from_thread IMPORT rt_interrupt_to_thread IMPORT rt_hw_trap_irq IRQ_Handler PROC EXPORT IRQ_Handler STMFD sp!, {r0-r12,lr} BL rt_interrupt_enter BL rt_hw_trap_irq BL rt_interrupt_leave ; if rt_thread_switch_interrupt_flag set, jump to ; rt_hw_context_switch_interrupt_do and don't return LDR r0, =rt_thread_switch_interrupt_flag LDR r1, [r0] CMP r1, #1 BEQ rt_hw_context_switch_interrupt_do LDMFD sp!, {r0-r12,lr} SUBS pc, lr, #4 ENDP ; /* ; * void rt_hw_context_switch_interrupt_do(rt_base_t flag) ; */ rt_hw_context_switch_interrupt_do PROC EXPORT rt_hw_context_switch_interrupt_do MOV r1, #0 ; clear flag STR r1, [r0] LDMFD sp!, {r0-r12,lr}; reload saved registers STMFD sp!, {r0-r3} ; save r0-r3 MOV r1, sp ADD sp, sp, #16 ; restore sp SUB r2, lr, #4 ; save old task's pc to r2 MRS r3, spsr ; get cpsr of interrupt thread ; switch to SVC mode and no interrupt MSR cpsr_c, #I_Bit|F_Bit|Mode_SVC STMFD sp!, {r2} ; push old task's pc STMFD sp!, {r4-r12,lr}; push old task's lr,r12-r4 MOV r4, r1 ; Special optimised code below MOV r5, r3 LDMFD r4!, {r0-r3} STMFD sp!, {r0-r3} ; push old task's r3-r0 STMFD sp!, {r5} ; push old task's cpsr MRS r4, spsr STMFD sp!, {r4} ; push old task's spsr LDR r4, =rt_interrupt_from_thread LDR r5, [r4] STR sp, [r5] ; store sp in preempted tasks's TCB LDR r6, =rt_interrupt_to_thread LDR r6, [r6] LDR sp, [r6] ; get new task's stack pointer LDMFD sp!, {r4} ; pop new task's spsr MSR spsr_cxsf, r4 LDMFD sp!, {r4} ; pop new task's psr MSR cpsr_cxsf, r4 LDMFD sp!, {r0-r12,lr,pc} ; pop new task's r0-r12,lr & pc ENDP IF :DEF:__MICROLIB EXPORT __heap_base EXPORT __heap_limit ELSE ; User Initial Stack & Heap AREA |.text|, CODE, READONLY IMPORT __use_two_region_memory EXPORT __user_initial_stackheap __user_initial_stackheap LDR R0, = Heap_Mem LDR R1, =(Stack_Mem + USR_Stack_Size) LDR R2, = (Heap_Mem + Heap_Size) LDR R3, = Stack_Mem BX LR ENDIF END
Aladdin-Wang/MicroBoot_Demo
10,887
STM32F7_APP/rt-thread/libcpu/arm/cortex-m33/context_rvds.S
;/* ;* Copyright (c) 2006-2018, RT-Thread Development Team ;* ;* SPDX-License-Identifier: Apache-2.0 ;* ; * Change Logs: ; * Date Author Notes ; * 2009-01-17 Bernard first version. ; * 2012-01-01 aozima support context switch load/store FPU register. ; * 2013-06-18 aozima add restore MSP feature. ; * 2013-06-23 aozima support lazy stack optimized. ; * 2018-07-24 aozima enhancement hard fault exception handler. ; */ ;/** ; * @addtogroup cortex-m33 ; */ ;/*@{*/ SCB_VTOR EQU 0xE000ED08 ; Vector Table Offset Register NVIC_INT_CTRL EQU 0xE000ED04 ; interrupt control state register NVIC_SYSPRI2 EQU 0xE000ED20 ; system priority register (2) NVIC_PENDSV_PRI EQU 0xFFFF0000 ; PendSV and SysTick priority value (lowest) NVIC_PENDSVSET EQU 0x10000000 ; value to trigger PendSV exception AREA |.text|, CODE, READONLY, ALIGN=2 THUMB REQUIRE8 PRESERVE8 IMPORT rt_thread_switch_interrupt_flag IMPORT rt_interrupt_from_thread IMPORT rt_interrupt_to_thread IMPORT rt_trustzone_current_context IMPORT rt_trustzone_context_load IMPORT rt_trustzone_context_store ;/* ; * rt_base_t rt_hw_interrupt_disable(); ; */ rt_hw_interrupt_disable PROC EXPORT rt_hw_interrupt_disable MRS r0, PRIMASK CPSID I BX LR ENDP ;/* ; * void rt_hw_interrupt_enable(rt_base_t level); ; */ rt_hw_interrupt_enable PROC EXPORT rt_hw_interrupt_enable MSR PRIMASK, r0 BX LR ENDP ;/* ; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to); ; * r0 --> from ; * r1 --> to ; */ rt_hw_context_switch_interrupt EXPORT rt_hw_context_switch_interrupt rt_hw_context_switch PROC EXPORT rt_hw_context_switch ; set rt_thread_switch_interrupt_flag to 1 LDR r2, =rt_thread_switch_interrupt_flag LDR r3, [r2] CMP r3, #1 BEQ _reswitch MOV r3, #1 STR r3, [r2] LDR r2, =rt_interrupt_from_thread ; set rt_interrupt_from_thread STR r0, [r2] _reswitch LDR r2, =rt_interrupt_to_thread ; set rt_interrupt_to_thread STR r1, [r2] LDR r0, =NVIC_INT_CTRL ; trigger the PendSV exception (causes context switch) LDR r1, =NVIC_PENDSVSET STR r1, [r0] BX LR ENDP ; r0 --> switch from thread stack ; r1 --> switch to thread stack ; psr, pc, lr, r12, r3, r2, r1, r0 are pushed into [from] stack PendSV_Handler PROC EXPORT PendSV_Handler ; disable interrupt to protect context switch MRS r2, PRIMASK ; R2 = PRIMASK CPSID I ; disable all interrupt ; get rt_thread_switch_interrupt_flag LDR r0, =rt_thread_switch_interrupt_flag ; r0 = &rt_thread_switch_interrupt_flag LDR r1, [r0] ; r1 = *r1 CMP r1, #0x00 ; compare r1 == 0x00 BNE schedule MSR PRIMASK, r2 ; if r1 == 0x00, do msr PRIMASK, r2 BX lr ; if r1 == 0x00, do bx lr schedule PUSH {r2} ; store interrupt state ; clear rt_thread_switch_interrupt_flag to 0 MOV r1, #0x00 ; r1 = 0x00 STR r1, [r0] ; *r0 = r1 ; skip register save at the first time LDR r0, =rt_interrupt_from_thread ; r0 = &rt_interrupt_from_thread LDR r1, [r0] ; r1 = *r0 CBZ r1, switch_to_thread ; if r1 == 0, goto switch_to_thread ; Whether TrustZone thread stack exists LDR r1, =rt_trustzone_current_context ; r1 = &rt_secure_current_context LDR r1, [r1] ; r1 = *r1 CBZ r1, contex_ns_store ; if r1 == 0, goto contex_ns_store ;call TrustZone fun, Save TrustZone stack STMFD sp!, {r0-r1, lr} ; push register MOV r0, r1 ; r0 = rt_secure_current_context BL rt_trustzone_context_store ; call TrustZone store fun LDMFD sp!, {r0-r1, lr} ; pop register ; check break from TrustZone MOV r2, lr ; r2 = lr TST r2, #0x40 ; if EXC_RETURN[6] is 1, TrustZone stack was used BEQ contex_ns_store ; if r2 & 0x40 == 0, goto contex_ns_store ; push PSPLIM CONTROL PSP LR current_context to stack MRS r3, psplim ; r3 = psplim MRS r4, control ; r4 = control MRS r5, psp ; r5 = psp STMFD r5!, {r1-r4} ; push to thread stack ; update from thread stack pointer LDR r0, [r0] ; r0 = rt_thread_switch_interrupt_flag STR r5, [r0] ; *r0 = r5 b switch_to_thread ; goto switch_to_thread contex_ns_store MRS r1, psp ; get from thread stack pointer #if defined (__VFP_FP__) && !defined(__SOFTFP__) TST lr, #0x10 ; if(!EXC_RETURN[4]) VSTMFDEQ r1!, {d8 - d15} ; push FPU register s16~s31 #endif STMFD r1!, {r4 - r11} ; push r4 - r11 register LDR r2, =rt_trustzone_current_context ; r2 = &rt_secure_current_context LDR r2, [r2] ; r2 = *r2 MOV r3, lr ; r3 = lr MRS r4, psplim ; r4 = psplim MRS r5, control ; r5 = control STMFD r1!, {r2-r5} ; push to thread stack LDR r0, [r0] STR r1, [r0] ; update from thread stack pointer switch_to_thread LDR r1, =rt_interrupt_to_thread LDR r1, [r1] LDR r1, [r1] ; load thread stack pointer ; update current TrustZone context LDMFD r1!, {r2-r5} ; pop thread stack MSR psplim, r4 ; psplim = r4 MSR control, r5 ; control = r5 MOV lr, r3 ; lr = r3 LDR r6, =rt_trustzone_current_context ; r6 = &rt_secure_current_context STR r2, [r6] ; *r6 = r2 MOV r0, r2 ; r0 = r2 ; Whether TrustZone thread stack exists CBZ r0, contex_ns_load ; if r0 == 0, goto contex_ns_load PUSH {r1, r3} ; push lr, thread_stack BL rt_trustzone_context_load ; call TrustZone load fun POP {r1, r3} ; pop lr, thread_stack MOV lr, r3 ; lr = r1 TST r3, #0x40 ; if EXC_RETURN[6] is 1, TrustZone stack was used BEQ contex_ns_load ; if r1 & 0x40 == 0, goto contex_ns_load B pendsv_exit contex_ns_load LDMFD r1!, {r4 - r11} ; pop r4 - r11 register #if defined (__VFP_FP__) && !defined(__SOFTFP__) TST lr, #0x10 ; if(!EXC_RETURN[4]) VLDMFDEQ r1!, {d8 - d15} ; pop FPU register s16~s31 #endif pendsv_exit MSR psp, r1 ; update stack pointer ; restore interrupt POP {r2} MSR PRIMASK, r2 BX lr ENDP ;/* ; * void rt_hw_context_switch_to(rt_uint32 to); ; * r0 --> to ; * this fucntion is used to perform the first thread switch ; */ rt_hw_context_switch_to PROC EXPORT rt_hw_context_switch_to ; set to thread LDR r1, =rt_interrupt_to_thread STR r0, [r1] #if defined (__VFP_FP__) && !defined(__SOFTFP__) ; CLEAR CONTROL.FPCA MRS r2, CONTROL ; read BIC r2, #0x04 ; modify MSR CONTROL, r2 ; write-back #endif ; set from thread to 0 LDR r1, =rt_interrupt_from_thread MOV r0, #0x0 STR r0, [r1] ; set interrupt flag to 1 LDR r1, =rt_thread_switch_interrupt_flag MOV r0, #1 STR r0, [r1] ; set the PendSV and SysTick exception priority LDR r0, =NVIC_SYSPRI2 LDR r1, =NVIC_PENDSV_PRI LDR.W r2, [r0,#0x00] ; read ORR r1,r1,r2 ; modify STR r1, [r0] ; write-back ; trigger the PendSV exception (causes context switch) LDR r0, =NVIC_INT_CTRL LDR r1, =NVIC_PENDSVSET STR r1, [r0] ; restore MSP LDR r0, =SCB_VTOR LDR r0, [r0] LDR r0, [r0] MSR msp, r0 ; enable interrupts at processor level CPSIE F CPSIE I ; ensure PendSV exception taken place before subsequent operation DSB ISB ; never reach here! ENDP ; compatible with old version rt_hw_interrupt_thread_switch PROC EXPORT rt_hw_interrupt_thread_switch BX lr ENDP IMPORT rt_hw_hard_fault_exception EXPORT HardFault_Handler HardFault_Handler PROC ; get current context MRS r0, msp ;get fault context from handler TST lr, #0x04 ;if(!EXC_RETURN[2]) BEQ get_sp_done MRS r0, psp ;get fault context from thread get_sp_done STMFD r0!, {r4 - r11} ; push r4 - r11 register LDR r2, =rt_trustzone_current_context ; r2 = &rt_secure_current_context LDR r2, [r2] ; r2 = *r2 MOV r3, lr ; r3 = lr MRS r4, psplim ; r4 = psplim MRS r5, control ; r5 = control STMFD r0!, {r2-r5} ; push to thread stack STMFD r0!, {lr} ; push exec_return register TST lr, #0x04 ; if(!EXC_RETURN[2]) BEQ update_msp MSR psp, r0 ; update stack pointer to PSP B update_done update_msp MSR msp, r0 ; update stack pointer to MSP update_done PUSH {lr} BL rt_hw_hard_fault_exception POP {lr} ORR lr, lr, #0x04 BX lr ENDP ALIGN 4 END
Aladdin-Wang/MicroBoot_Demo
11,337
STM32F7_APP/rt-thread/libcpu/arm/cortex-m33/context_gcc.S
/* * Copyright (c) 2006-2018, RT-Thread Development Team * * SPDX-License-Identifier: Apache-2.0 * * Change Logs: * Date Author Notes * 2009-10-11 Bernard first version * 2012-01-01 aozima support context switch load/store FPU register. * 2013-06-18 aozima add restore MSP feature. * 2013-06-23 aozima support lazy stack optimized. * 2018-07-24 aozima enhancement hard fault exception handler. */ /** * @addtogroup cortex-m4 */ /*@{*/ #include <rtconfig.h> .cpu cortex-m4 .syntax unified .thumb .text .equ SCB_VTOR, 0xE000ED08 /* Vector Table Offset Register */ .equ NVIC_INT_CTRL, 0xE000ED04 /* interrupt control state register */ .equ NVIC_SYSPRI2, 0xE000ED20 /* system priority register (2) */ .equ NVIC_PENDSV_PRI, 0xFFFF0000 /* PendSV and SysTick priority value (lowest) */ .equ NVIC_PENDSVSET, 0x10000000 /* value to trigger PendSV exception */ /* * rt_base_t rt_hw_interrupt_disable(); */ .global rt_hw_interrupt_disable .type rt_hw_interrupt_disable, %function rt_hw_interrupt_disable: MRS r0, PRIMASK CPSID I BX LR /* * void rt_hw_interrupt_enable(rt_base_t level); */ .global rt_hw_interrupt_enable .type rt_hw_interrupt_enable, %function rt_hw_interrupt_enable: MSR PRIMASK, r0 BX LR /* * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to); * r0 --> from * r1 --> to */ .global rt_hw_context_switch_interrupt .type rt_hw_context_switch_interrupt, %function .global rt_hw_context_switch .type rt_hw_context_switch, %function rt_hw_context_switch_interrupt: rt_hw_context_switch: /* set rt_thread_switch_interrupt_flag to 1 */ LDR r2, =rt_thread_switch_interrupt_flag LDR r3, [r2] CMP r3, #1 BEQ _reswitch MOV r3, #1 STR r3, [r2] LDR r2, =rt_interrupt_from_thread /* set rt_interrupt_from_thread */ STR r0, [r2] _reswitch: LDR r2, =rt_interrupt_to_thread /* set rt_interrupt_to_thread */ STR r1, [r2] LDR r0, =NVIC_INT_CTRL /* trigger the PendSV exception (causes context switch) */ LDR r1, =NVIC_PENDSVSET STR r1, [r0] BX LR /* r0 --> switch from thread stack * r1 --> switch to thread stack * psr, pc, lr, r12, r3, r2, r1, r0 are pushed into [from] stack */ .global PendSV_Handler .type PendSV_Handler, %function PendSV_Handler: /* disable interrupt to protect context switch */ MRS r2, PRIMASK CPSID I /* get rt_thread_switch_interrupt_flag */ LDR r0, =rt_thread_switch_interrupt_flag /* r0 = &rt_thread_switch_interrupt_flag */ LDR r1, [r0] /* r1 = *r1 */ CMP r1, #0x00 /* compare r1 == 0x00 */ BNE schedule MSR PRIMASK, r2 /* if r1 == 0x00, do msr PRIMASK, r2 */ BX lr /* if r1 == 0x00, do bx lr */ schedule: PUSH {r2} /* store interrupt state */ /* clear rt_thread_switch_interrupt_flag to 0 */ MOV r1, #0x00 /* r1 = 0x00 */ STR r1, [r0] /* *r0 = r1 */ /* skip register save at the first time */ LDR r0, =rt_interrupt_from_thread /* r0 = &rt_interrupt_from_thread */ LDR r1, [r0] /* r1 = *r0 */ CBZ r1, switch_to_thread /* if r1 == 0, goto switch_to_thread */ /* Whether TrustZone thread stack exists */ LDR r1, =rt_trustzone_current_context /* r1 = &rt_secure_current_context */ LDR r1, [r1] /* r1 = *r1 */ CBZ r1, contex_ns_store /* if r1 == 0, goto contex_ns_store */ /*call TrustZone fun, Save TrustZone stack */ STMFD sp!, {r0-r1, lr} /* push register */ MOV r0, r1 /* r0 = rt_secure_current_context */ BL rt_trustzone_context_store /* call TrustZone store fun */ LDMFD sp!, {r0-r1, lr} /* pop register */ /* check break from TrustZone */ MOV r2, lr /* r2 = lr */ TST r2, #0x40 /* if EXC_RETURN[6] is 1, TrustZone stack was used */ BEQ contex_ns_store /* if r2 & 0x40 == 0, goto contex_ns_store */ /* push PSPLIM CONTROL PSP LR current_context to stack */ MRS r3, psplim /* r3 = psplim */ MRS r4, control /* r4 = control */ MRS r5, psp /* r5 = psp */ STMFD r5!, {r1-r4} /* push to thread stack */ /* update from thread stack pointer */ LDR r0, [r0] /* r0 = rt_thread_switch_interrupt_flag */ STR r5, [r0] /* *r0 = r5 */ b switch_to_thread /* goto switch_to_thread */ contex_ns_store: MRS r1, psp /* get from thread stack pointer */ #if defined (__VFP_FP__) && !defined(__SOFTFP__) TST lr, #0x10 /* if(!EXC_RETURN[4]) */ IT EQ VSTMDBEQ r1!, {d8 - d15} /* push FPU register s16~s31 */ #endif STMFD r1!, {r4 - r11} /* push r4 - r11 register */ LDR r2, =rt_trustzone_current_context /* r2 = &rt_secure_current_context */ LDR r2, [r2] /* r2 = *r2 */ MOV r3, lr /* r3 = lr */ MRS r4, psplim /* r4 = psplim */ MRS r5, control /* r5 = control */ STMFD r1!, {r2-r5} /* push to thread stack */ LDR r0, [r0] STR r1, [r0] /* update from thread stack pointer */ switch_to_thread: LDR r1, =rt_interrupt_to_thread LDR r1, [r1] LDR r1, [r1] /* load thread stack pointer */ /* update current TrustZone context */ LDMFD r1!, {r2-r5} /* pop thread stack */ MSR psplim, r4 /* psplim = r4 */ MSR control, r5 /* control = r5 */ MOV lr, r3 /* lr = r3 */ LDR r6, =rt_trustzone_current_context /* r6 = &rt_secure_current_context */ STR r2, [r6] /* *r6 = r2 */ MOV r0, r2 /* r0 = r2 */ /* Whether TrustZone thread stack exists */ CBZ r0, contex_ns_load /* if r0 == 0, goto contex_ns_load */ PUSH {r1, r3} /* push lr, thread_stack */ BL rt_trustzone_context_load /* call TrustZone load fun */ POP {r1, r3} /* pop lr, thread_stack */ MOV lr, r3 /* lr = r1 */ TST r3, #0x40 /* if EXC_RETURN[6] is 1, TrustZone stack was used */ BEQ contex_ns_load /* if r1 & 0x40 == 0, goto contex_ns_load */ B pendsv_exit contex_ns_load: LDMFD r1!, {r4 - r11} /* pop r4 - r11 register */ #if defined (__VFP_FP__) && !defined(__SOFTFP__) TST lr, #0x10 /* if(!EXC_RETURN[4]) */ IT EQ VLDMIAEQ r1!, {d8 - d15} /* pop FPU register s16~s31 */ #endif #if defined (RT_USING_MEM_PROTECTION) PUSH {r0-r3, r12, lr} LDR r1, =rt_current_thread LDR r0, [r1] BL rt_hw_mpu_table_switch POP {r0-r3, r12, lr} #endif pendsv_exit: MSR psp, r1 /* update stack pointer */ /* restore interrupt */ POP {r2} MSR PRIMASK, r2 BX lr /* * void rt_hw_context_switch_to(rt_uint32 to); * r0 --> to */ .global rt_hw_context_switch_to .type rt_hw_context_switch_to, %function rt_hw_context_switch_to: LDR r1, =rt_interrupt_to_thread STR r0, [r1] #if defined (__VFP_FP__) && !defined(__SOFTFP__) /* CLEAR CONTROL.FPCA */ MRS r2, CONTROL /* read */ BIC r2, #0x04 /* modify */ MSR CONTROL, r2 /* write-back */ #endif /* set from thread to 0 */ LDR r1, =rt_interrupt_from_thread MOV r0, #0x0 STR r0, [r1] /* set interrupt flag to 1 */ LDR r1, =rt_thread_switch_interrupt_flag MOV r0, #1 STR r0, [r1] /* set the PendSV and SysTick exception priority */ LDR r0, =NVIC_SYSPRI2 LDR r1, =NVIC_PENDSV_PRI LDR.W r2, [r0,#0x00] /* read */ ORR r1,r1,r2 /* modify */ STR r1, [r0] /* write-back */ LDR r0, =NVIC_INT_CTRL /* trigger the PendSV exception (causes context switch) */ LDR r1, =NVIC_PENDSVSET STR r1, [r0] /* restore MSP */ LDR r0, =SCB_VTOR LDR r0, [r0] LDR r0, [r0] NOP MSR msp, r0 /* enable interrupts at processor level */ CPSIE F CPSIE I /* ensure PendSV exception taken place before subsequent operation */ DSB ISB /* never reach here! */ /* compatible with old version */ .global rt_hw_interrupt_thread_switch .type rt_hw_interrupt_thread_switch, %function rt_hw_interrupt_thread_switch: BX lr NOP .global HardFault_Handler .type HardFault_Handler, %function HardFault_Handler: /* get current context */ MRS r0, msp /* get fault context from handler. */ TST lr, #0x04 /* if(!EXC_RETURN[2]) */ BEQ get_sp_done MRS r0, psp /* get fault context from thread. */ get_sp_done: STMFD r0!, {r4 - r11} /* push r4 - r11 register */ LDR r2, =rt_trustzone_current_context /* r2 = &rt_secure_current_context */ LDR r2, [r2] /* r2 = *r2 */ MOV r3, lr /* r3 = lr */ MRS r4, psplim /* r4 = psplim */ MRS r5, control /* r5 = control */ STMFD r0!, {r2-r5} /* push to thread stack */ STMFD r0!, {lr} /* push exec_return register */ TST lr, #0x04 /* if(!EXC_RETURN[2]) */ BEQ update_msp MSR psp, r0 /* update stack pointer to PSP. */ B update_done update_msp: MSR msp, r0 /* update stack pointer to MSP. */ update_done: PUSH {LR} BL rt_hw_hard_fault_exception POP {LR} ORR lr, lr, #0x04 BX lr
Aladdin-Wang/MicroBoot_Demo
10,762
STM32F7_APP/rt-thread/libcpu/arm/cortex-m33/context_iar.S
;/* ; * Copyright (c) 2006-2018, RT-Thread Development Team ; * ; * SPDX-License-Identifier: Apache-2.0 ; * ; * Change Logs: ; * Date Author Notes ; * 2009-01-17 Bernard first version ; * 2009-09-27 Bernard add protect when contex switch occurs ; * 2012-01-01 aozima support context switch load/store FPU register. ; * 2013-06-18 aozima add restore MSP feature. ; * 2013-06-23 aozima support lazy stack optimized. ; * 2018-07-24 aozima enhancement hard fault exception handler. ; */ ;/** ; * @addtogroup cortex-m33 ; */ ;/*@{*/ SCB_VTOR EQU 0xE000ED08 ; Vector Table Offset Register NVIC_INT_CTRL EQU 0xE000ED04 ; interrupt control state register NVIC_SYSPRI2 EQU 0xE000ED20 ; system priority register (2) NVIC_PENDSV_PRI EQU 0xFFFF0000 ; PendSV and SysTick priority value (lowest) NVIC_PENDSVSET EQU 0x10000000 ; value to trigger PendSV exception SECTION .text:CODE(2) THUMB REQUIRE8 PRESERVE8 IMPORT rt_thread_switch_interrupt_flag IMPORT rt_interrupt_from_thread IMPORT rt_interrupt_to_thread IMPORT rt_trustzone_current_context IMPORT rt_trustzone_context_load IMPORT rt_trustzone_context_store ;/* ; * rt_base_t rt_hw_interrupt_disable(); ; */ EXPORT rt_hw_interrupt_disable rt_hw_interrupt_disable: MRS r0, PRIMASK CPSID I BX LR ;/* ; * void rt_hw_interrupt_enable(rt_base_t level); ; */ EXPORT rt_hw_interrupt_enable rt_hw_interrupt_enable: MSR PRIMASK, r0 BX LR ;/* ; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to); ; * r0 --> from ; * r1 --> to ; */ EXPORT rt_hw_context_switch_interrupt EXPORT rt_hw_context_switch rt_hw_context_switch_interrupt: rt_hw_context_switch: ; set rt_thread_switch_interrupt_flag to 1 LDR r2, =rt_thread_switch_interrupt_flag LDR r3, [r2] CMP r3, #1 BEQ _reswitch MOV r3, #1 STR r3, [r2] LDR r2, =rt_interrupt_from_thread ; set rt_interrupt_from_thread STR r0, [r2] _reswitch LDR r2, =rt_interrupt_to_thread ; set rt_interrupt_to_thread STR r1, [r2] LDR r0, =NVIC_INT_CTRL ; trigger the PendSV exception (causes context switch) LDR r1, =NVIC_PENDSVSET STR r1, [r0] BX LR ; r0 --> switch from thread stack ; r1 --> switch to thread stack ; psr, pc, lr, r12, r3, r2, r1, r0 are pushed into [from] stack EXPORT PendSV_Handler PendSV_Handler: ; disable interrupt to protect context switch MRS r2, PRIMASK CPSID I ; get rt_thread_switch_interrupt_flag LDR r0, =rt_thread_switch_interrupt_flag ; r0 = &rt_thread_switch_interrupt_flag LDR r1, [r0] ; r1 = *r1 CMP r1, #0x00 ; compare r1 == 0x00 BNE schedule MSR PRIMASK, r2 ; if r1 == 0x00, do msr PRIMASK, r2 BX lr ; if r1 == 0x00, do bx lr schedule PUSH {r2} ; store interrupt state ; clear rt_thread_switch_interrupt_flag to 0 MOV r1, #0x00 ; r1 = 0x00 STR r1, [r0] ; *r0 = r1 ; skip register save at the first time LDR r0, =rt_interrupt_from_thread ; r0 = &rt_interrupt_from_thread LDR r1, [r0] ; r1 = *r0 CBZ r1, switch_to_thread ; if r1 == 0, goto switch_to_thread ; Whether TrustZone thread stack exists LDR r1, =rt_trustzone_current_context ; r1 = &rt_secure_current_context LDR r1, [r1] ; r1 = *r1 CBZ r1, contex_ns_store ; if r1 == 0, goto contex_ns_store ;call TrustZone fun, Save TrustZone stack STMFD sp!, {r0-r1, lr} ; push register MOV r0, r1 ; r0 = rt_secure_current_context BL rt_trustzone_context_store ; call TrustZone store fun LDMFD sp!, {r0-r1, lr} ; pop register ; check break from TrustZone MOV r2, lr ; r2 = lr TST r2, #0x40 ; if EXC_RETURN[6] is 1, TrustZone stack was used BEQ contex_ns_store ; if r2 & 0x40 == 0, goto contex_ns_store ; push PSPLIM CONTROL PSP LR current_context to stack MRS r3, psplim ; r3 = psplim MRS r4, control ; r4 = control MRS r5, psp ; r5 = psp STMFD r5!, {r1-r4} ; push to thread stack ; update from thread stack pointer LDR r0, [r0] ; r0 = rt_thread_switch_interrupt_flag STR r5, [r0] ; *r0 = r5 b switch_to_thread ; goto switch_to_thread contex_ns_store MRS r1, psp ; get from thread stack pointer #if defined ( __ARMVFP__ ) TST lr, #0x10 ; if(!EXC_RETURN[4]) BNE skip_push_fpu VSTMDB r1!, {d8 - d15} ; push FPU register s16~s31 skip_push_fpu #endif STMFD r1!, {r4 - r11} ; push r4 - r11 register LDR r2, =rt_trustzone_current_context ; r2 = &rt_secure_current_context LDR r2, [r2] ; r2 = *r2 MOV r3, lr ; r3 = lr MRS r4, psplim ; r4 = psplim MRS r5, control ; r5 = control STMFD r1!, {r2-r5} ; push to thread stack LDR r0, [r0] STR r1, [r0] ; update from thread stack pointer switch_to_thread LDR r1, =rt_interrupt_to_thread LDR r1, [r1] LDR r1, [r1] ; load thread stack pointer ; update current TrustZone context LDMFD r1!, {r2-r5} ; pop thread stack MSR psplim, r4 ; psplim = r4 MSR control, r5 ; control = r5 MOV lr, r3 ; lr = r3 LDR r6, =rt_trustzone_current_context ; r6 = &rt_secure_current_context STR r2, [r6] ; *r6 = r2 MOV r0, r2 ; r0 = r2 ; Whether TrustZone thread stack exists CBZ r0, contex_ns_load ; if r0 == 0, goto contex_ns_load PUSH {r1, r3} ; push lr, thread_stack BL rt_trustzone_context_load ; call TrustZone load fun POP {r1, r3} ; pop lr, thread_stack MOV lr, r3 ; lr = r1 TST r3, #0x40 ; if EXC_RETURN[6] is 1, TrustZone stack was used BEQ contex_ns_load ; if r1 & 0x40 == 0, goto contex_ns_load B pendsv_exit contex_ns_load LDMFD r1!, {r4 - r11} ; pop r4 - r11 register #if defined ( __ARMVFP__ ) TST lr, #0x10 ; if(!EXC_RETURN[4]) BNE skip_pop_fpu VLDMIA r1!, {d8 - d15} ; pop FPU register s16~s31 skip_pop_fpu #endif pendsv_exit MSR psp, r1 ; update stack pointer ; restore interrupt POP {r2} MSR PRIMASK, r2 BX lr ;/* ; * void rt_hw_context_switch_to(rt_uint32 to); ; * r0 --> to ; */ EXPORT rt_hw_context_switch_to rt_hw_context_switch_to: LDR r1, =rt_interrupt_to_thread STR r0, [r1] #if defined ( __ARMVFP__ ) ; CLEAR CONTROL.FPCA MRS r2, CONTROL ; read BIC r2, r2, #0x04 ; modify MSR CONTROL, r2 ; write-back #endif ; set from thread to 0 LDR r1, =rt_interrupt_from_thread MOV r0, #0x0 STR r0, [r1] ; set interrupt flag to 1 LDR r1, =rt_thread_switch_interrupt_flag MOV r0, #1 STR r0, [r1] ; set the PendSV and SysTick exception priority LDR r0, =NVIC_SYSPRI2 LDR r1, =NVIC_PENDSV_PRI LDR.W r2, [r0,#0x00] ; read ORR r1,r1,r2 ; modify STR r1, [r0] ; write-back LDR r0, =NVIC_INT_CTRL ; trigger the PendSV exception (causes context switch) LDR r1, =NVIC_PENDSVSET STR r1, [r0] ; restore MSP LDR r0, =SCB_VTOR LDR r0, [r0] LDR r0, [r0] NOP MSR msp, r0 ; enable interrupts at processor level CPSIE F CPSIE I ; ensure PendSV exception taken place before subsequent operation DSB ISB ; never reach here! ; compatible with old version EXPORT rt_hw_interrupt_thread_switch rt_hw_interrupt_thread_switch: BX lr IMPORT rt_hw_hard_fault_exception EXPORT HardFault_Handler HardFault_Handler: ; get current context MRS r0, msp ; get fault context from handler. TST lr, #0x04 ; if(!EXC_RETURN[2]) BEQ get_sp_done MRS r0, psp ; get fault context from thread. get_sp_done STMFD r0!, {r4 - r11} ; push r4 - r11 register LDR r2, =rt_trustzone_current_context ; r2 = &rt_secure_current_context LDR r2, [r2] ; r2 = *r2 MOV r3, lr ; r3 = lr MRS r4, psplim ; r4 = psplim MRS r5, control ; r5 = control STMFD r0!, {r2-r5} ; push to thread stack STMFD r0!, {lr} ; push exec_return register TST lr, #0x04 ; if(!EXC_RETURN[2]) BEQ update_msp MSR psp, r0 ; update stack pointer to PSP. B update_done update_msp MSR msp, r0 ; update stack pointer to MSP. update_done PUSH {lr} BL rt_hw_hard_fault_exception POP {lr} ORR lr, lr, #0x04 BX lr END
Aladdin-Wang/MicroBoot_Demo
1,658
STM32F7_APP/rt-thread/libcpu/arm/cortex-m33/syscall_rvds.S
;/* ; * Copyright (c) 2006-2022, RT-Thread Development Team ; * ; * SPDX-License-Identifier: Apache-2.0 ; * ; * Change Logs: ; * Date Author Notes ; * 2019-10-25 tyx first version ; */ AREA |.text|, CODE, READONLY, ALIGN=2 THUMB REQUIRE8 PRESERVE8 IMPORT rt_secure_svc_handle ;/* ; * int tzcall(int id, rt_ubase_t arg0, rt_ubase_t arg1, rt_ubase_t arg2); ; */ tzcall PROC EXPORT tzcall SVC 1 ;call SVC 1 BX LR ENDP tzcall_entry PROC PUSH {R1, R4, LR} MOV R4, R1 ; copy thread SP to R4 LDMFD R4!, {r0 - r3} ; pop user stack, get input arg0, arg1, arg2 STMFD R4!, {r0 - r3} ; push stack, user stack recovery BL rt_secure_svc_handle ; call fun POP {R1, R4, LR} STR R0, [R1] ; update return value BX LR ; return to thread ENDP syscall_entry PROC BX LR ; return to user app ENDP ;/* ; * void SVC_Handler(void); ; */ SVC_Handler PROC EXPORT SVC_Handler ; get SP, save to R1 MRS R1, MSP ;get fault context from handler TST LR, #0x04 ;if(!EXC_RETURN[2]) BEQ get_sp_done MRS R1, PSP ;get fault context from thread get_sp_done ; get svc index LDR R0, [R1, #24] LDRB R0, [R0, #-2] ;if svc == 0, do system call CMP R0, #0x0 BEQ syscall_entry ;if svc == 1, do TrustZone call CMP R0, #0x1 BEQ tzcall_entry ENDP ALIGN END
Aladdin-Wang/MicroBoot_Demo
1,570
STM32F7_APP/rt-thread/libcpu/arm/cortex-m33/syscall_gcc.S
/* * Copyright (c) 2006-2022, RT-Thread Development Team * * SPDX-License-Identifier: Apache-2.0 * * Change Logs: * Date Author Notes * 2019-10-25 tyx first version */ .cpu cortex-m4 .syntax unified .thumb .text /* * int tzcall(int id, rt_ubase_t arg0, rt_ubase_t arg1, rt_ubase_t arg2); */ .global tzcall .type tzcall, %function tzcall: SVC 1 /* call SVC 1 */ BX LR tzcall_entry: PUSH {R1, R4, LR} MOV R4, R1 /* copy thread SP to R4 */ LDMFD R4!, {r0 - r3} /* pop user stack, get input arg0, arg1, arg2 */ STMFD R4!, {r0 - r3} /* push stack, user stack recovery */ BL rt_secure_svc_handle /* call fun */ POP {R1, R4, LR} STR R0, [R1] /* update return value */ BX LR /* return to thread */ syscall_entry: BX LR /* return to user app */ .global SVC_Handler .type SVC_Handler, %function SVC_Handler: /* get SP, save to R1 */ MRS R1, MSP /* get fault context from handler. */ TST LR, #0x04 /* if(!EXC_RETURN[2]) */ BEQ get_sp_done MRS R1, PSP /* get fault context from thread. */ get_sp_done: /* get svc index */ LDR R0, [R1, #24] LDRB R0, [R0, #-2] /* if svc == 0, do system call */ CMP R0, #0x0 BEQ syscall_entry /* if svc == 1, do TrustZone call */ CMP R0, #0x1 BEQ tzcall_entry
Aladdin-Wang/MicroBoot_Demo
1,707
STM32F7_APP/rt-thread/libcpu/arm/cortex-m33/syscall_iar.S
;/* ; * Copyright (c) 2006-2022, RT-Thread Development Team ; * ; * SPDX-License-Identifier: Apache-2.0 ; * ; * Change Logs: ; * Date Author Notes ; * 2019-10-25 tyx first version ; * 2021-03-26 lxf modify bad instruction ; */ ;/* ; * @addtogroup cortex-m33 ; */ SECTION .text:CODE(2) THUMB REQUIRE8 PRESERVE8 IMPORT rt_secure_svc_handle ;/* ; * int tzcall(int id, rt_ubase_t arg0, rt_ubase_t arg1, rt_ubase_t arg2); ; */ EXPORT tzcall tzcall: SVC 1 ;/* call SVC 1 */ BX LR tzcall_entry: PUSH {R1, R4, LR} MOV R4, R1 ;/* copy thread SP to R4 */ LDMFD R4!, {r0 - r3} ;/* pop user stack, get input arg0, arg1, arg2 */ STMFD R4!, {r0 - r3} ;/* push stack, user stack recovery */ BL rt_secure_svc_handle ;/* call fun */ POP {R1, R4, LR} STR R0, [R1] ;/* update return value */ BX LR ;/* return to thread */ syscall_entry: BX LR ;/* return to user app */ EXPORT SVC_Handler SVC_Handler: ;/* get SP, save to R1 */ MRS R1, MSP ;/* get fault context from handler. */ TST LR, #0x04 ;/* if(!EXC_RETURN[2]) */ BEQ get_sp_done MRS R1, PSP ;/* get fault context from thread. */ get_sp_done: ;/* get svc index */ LDR R0, [R1, #24] LDRB R0, [R0, #-2] ;/* if svc == 0, do system call */ CMP R0, #0x0 BEQ syscall_entry ;/* if svc == 1, do TrustZone call */ CMP R0, #0x1 BEQ tzcall_entry END
Aladdin-Wang/MicroBoot_Demo
7,204
STM32F7_APP/rt-thread/libcpu/arm/cortex-m85/context_gcc.S
/* * Copyright (c) 2006-2018, RT-Thread Development Team * * SPDX-License-Identifier: Apache-2.0 * * Change Logs: * Date Author Notes * 2009-10-11 Bernard first version * 2012-01-01 aozima support context switch load/store FPU register. * 2013-06-18 aozima add restore MSP feature. * 2013-06-23 aozima support lazy stack optimized. * 2018-07-24 aozima enhancement hard fault exception handler. */ /** * @addtogroup cortex-m85 */ /*@{*/ .cpu cortex-m7 .syntax unified .thumb .text .equ SCB_VTOR, 0xE000ED08 /* Vector Table Offset Register */ .equ NVIC_INT_CTRL, 0xE000ED04 /* interrupt control state register */ .equ NVIC_SYSPRI2, 0xE000ED20 /* system priority register (2) */ .equ NVIC_PENDSV_PRI, 0xFFFF0000 /* PendSV and SysTick priority value (lowest) */ .equ NVIC_PENDSVSET, 0x10000000 /* value to trigger PendSV exception */ /* * rt_base_t rt_hw_interrupt_disable(); */ .global rt_hw_interrupt_disable .type rt_hw_interrupt_disable, %function rt_hw_interrupt_disable: MRS r0, PRIMASK CPSID I BX LR /* * void rt_hw_interrupt_enable(rt_base_t level); */ .global rt_hw_interrupt_enable .type rt_hw_interrupt_enable, %function rt_hw_interrupt_enable: MSR PRIMASK, r0 BX LR /* * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to); * r0 --> from * r1 --> to */ .global rt_hw_context_switch_interrupt .type rt_hw_context_switch_interrupt, %function .global rt_hw_context_switch .type rt_hw_context_switch, %function rt_hw_context_switch_interrupt: rt_hw_context_switch: /* set rt_thread_switch_interrupt_flag to 1 */ LDR r2, =rt_thread_switch_interrupt_flag LDR r3, [r2] CMP r3, #1 BEQ _reswitch MOV r3, #1 STR r3, [r2] LDR r2, =rt_interrupt_from_thread /* set rt_interrupt_from_thread */ STR r0, [r2] _reswitch: LDR r2, =rt_interrupt_to_thread /* set rt_interrupt_to_thread */ STR r1, [r2] LDR r0, =NVIC_INT_CTRL /* trigger the PendSV exception (causes context switch) */ LDR r1, =NVIC_PENDSVSET STR r1, [r0] BX LR /* r0 --> switch from thread stack * r1 --> switch to thread stack * psr, pc, lr, r12, r3, r2, r1, r0 are pushed into [from] stack */ .global PendSV_Handler .type PendSV_Handler, %function PendSV_Handler: /* disable interrupt to protect context switch */ MRS r2, PRIMASK CPSID I /* get rt_thread_switch_interrupt_flag */ LDR r0, =rt_thread_switch_interrupt_flag LDR r1, [r0] CBZ r1, pendsv_exit /* pendsv already handled */ /* clear rt_thread_switch_interrupt_flag to 0 */ MOV r1, #0x00 STR r1, [r0] LDR r0, =rt_interrupt_from_thread LDR r1, [r0] CBZ r1, switch_to_thread /* skip register save at the first time */ MRS r1, psp /* get from thread stack pointer */ #if defined (__VFP_FP__) && !defined(__SOFTFP__) TST lr, #0x10 /* if(!EXC_RETURN[4]) */ IT EQ VSTMDBEQ r1!, {d8 - d15} /* push FPU register s16~s31 */ #endif STMFD r1!, {r4 - r11} /* push r4 - r11 register */ #if defined (__VFP_FP__) && !defined(__SOFTFP__) MOV r4, #0x00 /* flag = 0 */ TST lr, #0x10 /* if(!EXC_RETURN[4]) */ IT EQ MOVEQ r4, #0x01 /* flag = 1 */ STMFD r1!, {r4} /* push flag */ #endif LDR r0, [r0] STR r1, [r0] /* update from thread stack pointer */ switch_to_thread: /* set PSPLIM register */ PUSH {LR} bl TaskSwitch_StackCheck POP {LR} LDR r1, =rt_interrupt_to_thread LDR r1, [r1] LDR r1, [r1] /* load thread stack pointer */ #if defined (__VFP_FP__) && !defined(__SOFTFP__) LDMFD r1!, {r3} /* pop flag */ #endif LDMFD r1!, {r4 - r11} /* pop r4 - r11 register */ #if defined (__VFP_FP__) && !defined(__SOFTFP__) CMP r3, #0 /* if(flag_r3 != 0) */ IT NE VLDMIANE r1!, {d8 - d15} /* pop FPU register s16~s31 */ #endif MSR psp, r1 /* update stack pointer */ #if defined (__VFP_FP__) && !defined(__SOFTFP__) ORR lr, lr, #0x10 /* lr |= (1 << 4), clean FPCA. */ CMP r3, #0 /* if(flag_r3 != 0) */ IT NE BICNE lr, lr, #0x10 /* lr &= ~(1 << 4), set FPCA. */ #endif pendsv_exit: /* restore interrupt */ MSR PRIMASK, r2 ORR lr, lr, #0x04 BX lr /* * void rt_hw_context_switch_to(rt_uint32 to); * r0 --> to */ .global rt_hw_context_switch_to .type rt_hw_context_switch_to, %function rt_hw_context_switch_to: LDR r1, =rt_interrupt_to_thread STR r0, [r1] #if defined (__VFP_FP__) && !defined(__SOFTFP__) /* CLEAR CONTROL.FPCA */ MRS r2, CONTROL /* read */ BIC r2, #0x04 /* modify */ MSR CONTROL, r2 /* write-back */ #endif /* set from thread to 0 */ LDR r1, =rt_interrupt_from_thread MOV r0, #0x0 STR r0, [r1] /* set interrupt flag to 1 */ LDR r1, =rt_thread_switch_interrupt_flag MOV r0, #1 STR r0, [r1] /* set the PendSV and SysTick exception priority */ LDR r0, =NVIC_SYSPRI2 LDR r1, =NVIC_PENDSV_PRI LDR.W r2, [r0,#0x00] /* read */ ORR r1,r1,r2 /* modify */ STR r1, [r0] /* write-back */ LDR r0, =NVIC_INT_CTRL /* trigger the PendSV exception (causes context switch) */ LDR r1, =NVIC_PENDSVSET STR r1, [r0] /* restore MSP */ LDR r0, =SCB_VTOR LDR r0, [r0] LDR r0, [r0] NOP MSR msp, r0 /* enable interrupts at processor level */ CPSIE F CPSIE I /* ensure PendSV exception taken place before subsequent operation */ DSB ISB /* never reach here! */ /* compatible with old version */ .global rt_hw_interrupt_thread_switch .type rt_hw_interrupt_thread_switch, %function rt_hw_interrupt_thread_switch: BX lr NOP .global HardFault_Handler .type HardFault_Handler, %function HardFault_Handler: /* get current context */ MRS r0, msp /* get fault context from handler. */ TST lr, #0x04 /* if(!EXC_RETURN[2]) */ BEQ _get_sp_done MRS r0, psp /* get fault context from thread. */ _get_sp_done: STMFD r0!, {r4 - r11} /* push r4 - r11 register */ #if defined (__VFP_FP__) && !defined(__SOFTFP__) STMFD r0!, {lr} /* push dummy for flag */ #endif STMFD r0!, {lr} /* push exec_return register */ TST lr, #0x04 /* if(!EXC_RETURN[2]) */ BEQ _update_msp MSR psp, r0 /* update stack pointer to PSP. */ B _update_done _update_msp: MSR msp, r0 /* update stack pointer to MSP. */ _update_done: PUSH {LR} BL rt_hw_hard_fault_exception POP {LR} ORR lr, lr, #0x04 BX lr
Aladdin-Wang/MicroBoot_Demo
4,835
STM32F7_APP/rt-thread/libcpu/arm/cortex-r52/cp15_gcc.S
/* * Copyright (c) 2011-2022, Shanghai Real-Thread Electronic Technology Co.,Ltd * * Change Logs: * Date Author Notes * 2022-08-29 RT-Thread first version */ .globl rt_cpu_get_smp_id rt_cpu_get_smp_id: mrc p15, #0, r0, c0, c0, #5 bx lr .globl rt_cpu_vector_set_base rt_cpu_vector_set_base: /* clear SCTRL.V to customize the vector address */ mrc p15, #0, r1, c1, c0, #0 bic r1, #(1 << 13) mcr p15, #0, r1, c1, c0, #0 /* set up the vector address */ mcr p15, #0, r0, c12, c0, #0 dsb bx lr .globl rt_hw_cpu_dcache_enable rt_hw_cpu_dcache_enable: mrc p15, #0, r0, c1, c0, #0 orr r0, r0, #0x00000004 mcr p15, #0, r0, c1, c0, #0 bx lr .globl rt_hw_cpu_icache_enable rt_hw_cpu_icache_enable: mrc p15, #0, r0, c1, c0, #0 orr r0, r0, #0x00001000 mcr p15, #0, r0, c1, c0, #0 bx lr _FLD_MAX_WAY: .word 0x3ff _FLD_MAX_IDX: .word 0x7fff .globl rt_cpu_dcache_clean_flush rt_cpu_dcache_clean_flush: stmfd sp!, {r0-r12, lr} bl v7_flush_dcache_all mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate dsb isb ldmfd sp!, {r0-r12, lr} mov pc, lr v7_flush_dcache_all: dmb @ ensure ordering with previous memory accesses mrc p15, 1, r0, c0, c0, 1 @ read clidr ands r3, r0, #0x7000000 @ extract loc from clidr mov r3, r3, lsr #23 @ left align loc bit field beq finished @ if loc is 0, then no need to clean mov r10, #0 @ start clean at cache level 0 loop1: add r2, r10, r10, lsr #1 @ work out 3x current cache level mov r1, r0, lsr r2 @ extract cache type bits from clidr and r1, r1, #7 @ mask of the bits for current cache only cmp r1, #2 @ see what cache we have at this level blt skip @ skip if no cache, or just i-cache mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr isb @ isb to sych the new cssr&csidr mrc p15, 1, r1, c0, c0, 0 @ read the new csidr and r2, r1, #7 @ extract the length of the cache lines add r2, r2, #4 @ add 4 (line length offset) ldr r4, =0x3ff ands r4, r4, r1, lsr #3 @ find maximum number on the way size clz r5, r4 @ find bit position of way size increment ldr r7, =0x7fff ands r7, r7, r1, lsr #13 @ extract max number of the index size loop2: mov r9, r4 @ create working copy of max way size loop3: orr r11, r10, r9, lsl r5 @ factor way and cache number into r11 orr r11, r11, r7, lsl r2 @ factor index number into r11 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way subs r9, r9, #1 @ decrement the way bge loop3 subs r7, r7, #1 @ decrement the index bge loop2 skip: add r10, r10, #2 @ increment cache number cmp r3, r10 bgt loop1 finished: mov r10, #0 @ swith back to cache level 0 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr dsb isb mov pc, lr #if 0 push {r4-r11} dmb mrc p15, #1, r0, c0, c0, #1 @ read clid register ands r3, r0, #0x7000000 @ get level of coherency mov r3, r3, lsr #23 beq finished mov r10, #0 loop1: add r2, r10, r10, lsr #1 mov r1, r0, lsr r2 and r1, r1, #7 cmp r1, #2 blt skip mcr p15, #2, r10, c0, c0, #0 isb mrc p15, #1, r1, c0, c0, #0 and r2, r1, #7 add r2, r2, #4 ldr r4, _FLD_MAX_WAY ands r4, r4, r1, lsr #3 clz r5, r4 ldr r7, _FLD_MAX_IDX ands r7, r7, r1, lsr #13 loop2: mov r9, r4 loop3: orr r11, r10, r9, lsl r5 orr r11, r11, r7, lsl r2 mcr p15, #0, r11, c7, c14, #2 subs r9, r9, #1 bge loop3 subs r7, r7, #1 bge loop2 skip: add r10, r10, #2 cmp r3, r10 bgt loop1 finished: dsb isb pop {r4-r11} bx lr #endif .globl rt_cpu_icache_flush rt_cpu_icache_flush: mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate dsb isb bx lr .globl rt_hw_cpu_dcache_disable rt_hw_cpu_dcache_disable: push {r4-r11, lr} bl rt_cpu_dcache_clean_flush mrc p15, #0, r0, c1, c0, #0 bic r0, r0, #0x00000004 mcr p15, #0, r0, c1, c0, #0 pop {r4-r11, lr} bx lr .globl rt_hw_cpu_icache_disable rt_hw_cpu_icache_disable: mrc p15, #0, r0, c1, c0, #0 bic r0, r0, #0x00001000 mcr p15, #0, r0, c1, c0, #0 bx lr
Aladdin-Wang/MicroBoot_Demo
9,508
STM32F7_APP/rt-thread/libcpu/arm/cortex-r52/start_iar.S
;/* ; * Copyright (c) 2006-2022, RT-Thread Development Team ; * ; * SPDX-License-Identifier: Apache-2.0 ; * ; * Change Logs: ; * Date Author Notes ; * 2024-03-11 Wangyuqiang first version ; */ ;@------------------------------------------------------------------------------- ;@ sys_core.asm ;@ ;@ (c) Texas Instruments 2009-2013, All rights reserved. ;@ ; Constants Mode_USR EQU 0x10 Mode_FIQ EQU 0x11 Mode_IRQ EQU 0x12 Mode_SVC EQU 0x13 Mode_ABT EQU 0x17 Mode_UND EQU 0x1B Mode_SYS EQU 0x1F I_Bit EQU 0x80 F_Bit EQU 0x40 UND_Stack_Size EQU 0x00000000 SVC_Stack_Size EQU 0x00000000 ABT_Stack_Size EQU 0x00000000 FIQ_Stack_Size EQU 0x00001000 IRQ_Stack_Size EQU 0x00001000 IMPORT _c_int00 IMPORT rt_hw_trap_svc IMPORT rt_hw_trap_pabt IMPORT rt_hw_trap_dabt IMPORT rt_hw_trap_resv IMPORT system_init IMPORT __iar_program_start ; Define sections SECTION .text:CODE:REORDER:NOROOT(2) ; Define stack start and top EXPORT stack_start EXPORT stack_top ; Align stack start to a 4-byte boundary (32-bit word) ALIGNRAM 5 stack_start: ; Reserve stack memory REPT (UND_Stack_Size + SVC_Stack_Size + ABT_Stack_Size + FIQ_Stack_Size + IRQ_Stack_Size) DCB 0 ; Define a byte of data and clear it to zero ENDR ; Define stack top label stack_top: ; Define code section SECTION .text:CODE:REORDER:NOROOT(2) ; Specify ARM mode THUMB ;@------------------------------------------------------------------------------- ;@ Enable RAM ECC Support EXPORT _coreEnableRamEcc_ _coreEnableRamEcc_: stmfd sp!, {r0} mrc p15, #0x00, r0, c1, c0, #0x01 orr r0, r0, #0x0C000000 mcr p15, #0x00, r0, c1, c0, #0x01 ldmfd sp!, {r0} bx lr ;@------------------------------------------------------------------------------- ;@ Disable RAM ECC Support EXPORT _coreDisableRamEcc_ _coreDisableRamEcc_: stmfd sp!, {r0} mrc p15, #0x00, r0, c1, c0, #0x01 bic r0, r0, #0x0C000000 mcr p15, #0x00, r0, c1, c0, #0x01 ldmfd sp!, {r0} bx lr ;@------------------------------------------------------------------------------- ;@ Enable Flash ECC Support EXPORT _coreEnableFlashEcc_ _coreEnableFlashEcc_: stmfd sp!, {r0} mrc p15, #0x00, r0, c1, c0, #0x01 orr r0, r0, #0x02000000 dmb mcr p15, #0x00, r0, c1, c0, #0x01 ldmfd sp!, {r0} bx lr ;@------------------------------------------------------------------------------- ;@ Disable Flash ECC Support EXPORT _coreDisableFlashEcc_ _coreDisableFlashEcc_: stmfd sp!, {r0} mrc p15, #0x00, r0, c1, c0, #0x01 bic r0, r0, #0x02000000 mcr p15, #0x00, r0, c1, c0, #0x01 ldmfd sp!, {r0} bx lr ;@------------------------------------------------------------------------------- ;@ Get data fault status register EXPORT _coreGetDataFault_ _coreGetDataFault_: mrc p15, #0, r0, c5, c0, #0 bx lr ;@------------------------------------------------------------------------------- ;@ Clear data fault status register EXPORT _coreClearDataFault_ _coreClearDataFault_: stmfd sp!, {r0} mov r0, #0 mcr p15, #0, r0, c5, c0, #0 ldmfd sp!, {r0} bx lr ;@------------------------------------------------------------------------------- ;@ Get instruction fault status register EXPORT _coreGetInstructionFault_ _coreGetInstructionFault_: mrc p15, #0, r0, c5, c0, #1 bx lr ;@------------------------------------------------------------------------------- ;@ Clear instruction fault status register EXPORT _coreClearInstructionFault_ _coreClearInstructionFault_: stmfd sp!, {r0} mov r0, #0 mcr p15, #0, r0, c5, c0, #1 ldmfd sp!, {r0} bx lr ;@------------------------------------------------------------------------------- ;@ Get data fault address register EXPORT _coreGetDataFaultAddress_ _coreGetDataFaultAddress_: mrc p15, #0, r0, c6, c0, #0 bx lr ;@------------------------------------------------------------------------------- ;@ Clear data fault address register EXPORT _coreClearDataFaultAddress_ _coreClearDataFaultAddress_: stmfd sp!, {r0} mov r0, #0 mcr p15, #0, r0, c6, c0, #0 ldmfd sp!, {r0} bx lr ;@------------------------------------------------------------------------------- ;@ Get instruction fault address register EXPORT _coreGetInstructionFaultAddress_ _coreGetInstructionFaultAddress_: mrc p15, #0, r0, c6, c0, #2 bx lr ;@------------------------------------------------------------------------------- ;@ Clear instruction fault address register EXPORT _coreClearInstructionFaultAddress_ _coreClearInstructionFaultAddress_: stmfd sp!, {r0} mov r0, #0 mcr p15, #0, r0, c6, c0, #2 ldmfd sp!, {r0} bx lr ;@------------------------------------------------------------------------------- ;@ Get auxiliary data fault status register EXPORT _coreGetAuxiliaryDataFault_ _coreGetAuxiliaryDataFault_: mrc p15, #0, r0, c5, c1, #0 bx lr ;@------------------------------------------------------------------------------- ;@ Clear auxiliary data fault status register EXPORT _coreClearAuxiliaryDataFault_ _coreClearAuxiliaryDataFault_: stmfd sp!, {r0} mov r0, #0 mcr p15, #0, r0, c5, c1, #0 ldmfd sp!, {r0} bx lr ;@------------------------------------------------------------------------------- ;@ Get auxiliary instruction fault status register EXPORT _coreGetAuxiliaryInstructionFault_ _coreGetAuxiliaryInstructionFault_: mrc p15, #0, r0, c5, c1, #1 bx lr ;@------------------------------------------------------------------------------- ;@ Clear auxiliary instruction fault status register EXPORT _coreClearAuxiliaryInstructionFault_ _coreClearAuxiliaryInstructionFault_: stmfd sp!, {r0} mov r0, #0 mrc p15, #0, r0, c5, c1, #1 ldmfd sp!, {r0} bx lr ;@------------------------------------------------------------------------------- ;@ Work Around for Errata CORTEX-R4#57: ;@ ;@ Errata Description: ;@ Conditional VMRS APSR_Nzcv, FPSCR May Evaluate With Incorrect Flags ;@ Workaround: ;@ Disable out-of-order single-precision floating point ;@ multiply-accumulate instruction completion EXPORT _errata_CORTEXR4_57_ _errata_CORTEXR4_57_: push {r0} mrc p15, #0, r0, c15, c0, #0 ;@ Read Secondary Auxiliary Control Register orr r0, r0, #0x10000 ;@ Set BIT 16 (Set DOOFMACS) mcr p15, #0, r0, c15, c0, #0 ;@ Write Secondary Auxiliary Control Register pop {r0} bx lr ;@------------------------------------------------------------------------------- ;@ Work Around for Errata CORTEX-R4#66: ;@ ;@ Errata Description: ;@ Register Corruption During A Load-Multiple Instruction At ;@ an Exception Vector ;@ Workaround: ;@ Disable out-of-order completion for divide instructions in ;@ Auxiliary Control register EXPORT _errata_CORTEXR4_66_ _errata_CORTEXR4_66_: push {r0} mrc p15, #0, r0, c1, c0, #1 ;@ Read Auxiliary Control register orr r0, r0, #0x80 ;@ Set BIT 7 (Disable out-of-order completion ;@ for divide instructions.) mcr p15, #0, r0, c1, c0, #1 ;@ Write Auxiliary Control register pop {r0} bx lr EXPORT turnon_VFP turnon_VFP: ;@ Enable FPV stmdb sp!, {r0} fmrx r0, fpexc orr r0, r0, #0x40000000 fmxr fpexc, r0 ldmia sp!, {r0} subs pc, lr, #4 macro push_svc_reg sub sp, sp, #17 * 4 ;@/* Sizeof(struct rt_hw_exp_stack) */ stmia sp, {r0 - r12} ;@/* Calling r0-r12 */ mov r0, sp mrs r6, spsr ;@/* Save CPSR */ str lr, [r0, #15*4] ;@/* Push PC */ str r6, [r0, #16*4] ;@/* Push CPSR */ cps #Mode_SVC str sp, [r0, #13*4] ;@/* Save calling SP */ str lr, [r0, #14*4] ;@/* Save calling PC */ endm EXPORT SVC_Handler SVC_Handler: push_svc_reg bl rt_hw_trap_svc b . EXPORT Prefetch_Handler Prefetch_Handler: push_svc_reg bl rt_hw_trap_pabt b . EXPORT Abort_Handler Abort_Handler: push_svc_reg bl rt_hw_trap_dabt b . EXPORT Reserved_Handler Reserved_Handler: push_svc_reg bl rt_hw_trap_resv b . END
Aladdin-Wang/MicroBoot_Demo
13,975
STM32F7_APP/rt-thread/libcpu/arm/cortex-r52/start_gcc.S
/* * Copyright (c) 2006-2022, RT-Thread Development Team * * SPDX-License-Identifier: Apache-2.0 * * Change Logs: * Date Author Notes */ @------------------------------------------------------------------------------- @ sys_core.asm @ @ (c) Texas Instruments 2009-2013, All rights reserved. @ //#include <rtconfig.h> .equ Mode_USR, 0x10 .equ Mode_FIQ, 0x11 .equ Mode_IRQ, 0x12 .equ Mode_SVC, 0x13 .equ Mode_ABT, 0x17 .equ Mode_UND, 0x1B .equ Mode_SYS, 0x1F .equ I_Bit, 0x80 @ when I bit is set, IRQ is disabled .equ F_Bit, 0x40 @ when F bit is set, FIQ is disabled .equ UND_Stack_Size, 0x00000000 .equ SVC_Stack_Size, 0x00000000 .equ ABT_Stack_Size, 0x00000000 .equ FIQ_Stack_Size, 0x00001000 .equ IRQ_Stack_Size, 0x00001000 .section .bss.noinit /* stack */ .globl stack_start .globl stack_top .align 3 stack_start: .rept (UND_Stack_Size + SVC_Stack_Size + ABT_Stack_Size + FIQ_Stack_Size + IRQ_Stack_Size) .byte 0 .endr stack_top: .section .text, "ax" .text .arm .globl _c_int00 .globl _reset _reset: @------------------------------------------------------------------------------- @ Initialize CPU Registers @ After reset, the CPU is in the Supervisor mode (M = 10011) mov r0, #0x0000 mov r1, #0x0000 mov r2, #0x0000 mov r3, #0x0000 mov r4, #0x0000 mov r5, #0x0000 mov r6, #0x0000 mov r7, #0x0000 mov r8, #0x0000 mov r9, #0x0000 mov r10, #0x0000 mov r11, #0x0000 mov r12, #0x0000 mov r13, #0x0000 mrs r1, cpsr msr spsr_cxsf, r1 cpsid if, #19 #if defined (__VFP_FP__) && !defined(__SOFTFP__) && defined(RT_VFP_LAZY_STACKING) @ Turn on FPV coprocessor mrc p15, #0x00, r2, c1, c0, #0x02 orr r2, r2, #0xF00000 mcr p15, #0x00, r2, c1, c0, #0x02 fmrx r2, fpexc orr r2, r2, #0x40000000 fmxr fpexc, r2 #endif @------------------------------------------------------------------------------- @ Initialize Stack Pointers ldr r0, =stack_top @ Set the startup stack for svc mov sp, r0 @ Enter Undefined Instruction Mode and set its Stack Pointer msr cpsr_c, #Mode_UND|I_Bit|F_Bit mov sp, r0 sub r0, r0, #UND_Stack_Size @ Enter Abort Mode and set its Stack Pointer msr cpsr_c, #Mode_ABT|I_Bit|F_Bit mov sp, r0 sub r0, r0, #ABT_Stack_Size @ Enter FIQ Mode and set its Stack Pointer msr cpsr_c, #Mode_FIQ|I_Bit|F_Bit mov sp, r0 sub r0, r0, #FIQ_Stack_Size @ Enter IRQ Mode and set its Stack Pointer msr cpsr_c, #Mode_IRQ|I_Bit|F_Bit mov sp, r0 sub r0, r0, #IRQ_Stack_Size @ Switch back to SVC msr cpsr_c, #Mode_SVC|I_Bit|F_Bit bl next1 next1: bl next2 next2: bl next3 next3: bl next4 next4: ldr lr, =_c_int00 bx lr .globl data_init data_init: /* copy .data to SRAM */ ldr r1, =_mdata /* .data start in image */ ldr r2, =_data_end /* .data end in image */ ldr r3, =_data_start /* sram data start */ data_loop: ldr r0, [r1, #0] str r0, [r3] add r1, r1, #4 add r3, r3, #4 cmp r3, r2 /* check if data to clear */ blo data_loop /* loop until done */ /* clear .bss */ mov r0,#0 /* get a zero */ ldr r1,=__bss_start__ /* bss start */ ldr r2,=__bss_end__ /* bss end */ bss_loop: cmp r1,r2 /* check if data to clear */ strlo r0,[r1],#4 /* clear 4 bytes */ blo bss_loop /* loop until done */ /* call C++ constructors of global objects */ ldr r0, =__ctors_start__ ldr r1, =__ctors_end__ ctor_loop: cmp r0, r1 beq ctor_end ldr r2, [r0], #4 stmfd sp!, {r0-r3, ip, lr} mov lr, pc bx r2 ldmfd sp!, {r0-r3, ip, lr} b ctor_loop ctor_end: bx lr @------------------------------------------------------------------------------- @ Enable RAM ECC Support .globl _coreEnableRamEcc_ _coreEnableRamEcc_: stmfd sp!, {r0} mrc p15, #0x00, r0, c1, c0, #0x01 orr r0, r0, #0x0C000000 mcr p15, #0x00, r0, c1, c0, #0x01 ldmfd sp!, {r0} bx lr @------------------------------------------------------------------------------- @ Disable RAM ECC Support .globl _coreDisableRamEcc_ _coreDisableRamEcc_: stmfd sp!, {r0} mrc p15, #0x00, r0, c1, c0, #0x01 bic r0, r0, #0x0C000000 mcr p15, #0x00, r0, c1, c0, #0x01 ldmfd sp!, {r0} bx lr @------------------------------------------------------------------------------- @ Enable Flash ECC Support .globl _coreEnableFlashEcc_ _coreEnableFlashEcc_: stmfd sp!, {r0} mrc p15, #0x00, r0, c1, c0, #0x01 orr r0, r0, #0x02000000 dmb mcr p15, #0x00, r0, c1, c0, #0x01 ldmfd sp!, {r0} bx lr @------------------------------------------------------------------------------- @ Disable Flash ECC Support .globl _coreDisableFlashEcc_ _coreDisableFlashEcc_: stmfd sp!, {r0} mrc p15, #0x00, r0, c1, c0, #0x01 bic r0, r0, #0x02000000 mcr p15, #0x00, r0, c1, c0, #0x01 ldmfd sp!, {r0} bx lr @------------------------------------------------------------------------------- @ Get data fault status register .globl _coreGetDataFault_ _coreGetDataFault_: mrc p15, #0, r0, c5, c0, #0 bx lr @------------------------------------------------------------------------------- @ Clear data fault status register .globl _coreClearDataFault_ _coreClearDataFault_: stmfd sp!, {r0} mov r0, #0 mcr p15, #0, r0, c5, c0, #0 ldmfd sp!, {r0} bx lr @------------------------------------------------------------------------------- @ Get instruction fault status register .globl _coreGetInstructionFault_ _coreGetInstructionFault_: mrc p15, #0, r0, c5, c0, #1 bx lr @------------------------------------------------------------------------------- @ Clear instruction fault status register .globl _coreClearInstructionFault_ _coreClearInstructionFault_: stmfd sp!, {r0} mov r0, #0 mcr p15, #0, r0, c5, c0, #1 ldmfd sp!, {r0} bx lr @------------------------------------------------------------------------------- @ Get data fault address register .globl _coreGetDataFaultAddress_ _coreGetDataFaultAddress_: mrc p15, #0, r0, c6, c0, #0 bx lr @------------------------------------------------------------------------------- @ Clear data fault address register .globl _coreClearDataFaultAddress_ _coreClearDataFaultAddress_: stmfd sp!, {r0} mov r0, #0 mcr p15, #0, r0, c6, c0, #0 ldmfd sp!, {r0} bx lr @------------------------------------------------------------------------------- @ Get instruction fault address register .globl _coreGetInstructionFaultAddress_ _coreGetInstructionFaultAddress_: mrc p15, #0, r0, c6, c0, #2 bx lr @------------------------------------------------------------------------------- @ Clear instruction fault address register .globl _coreClearInstructionFaultAddress_ _coreClearInstructionFaultAddress_: stmfd sp!, {r0} mov r0, #0 mcr p15, #0, r0, c6, c0, #2 ldmfd sp!, {r0} bx lr @------------------------------------------------------------------------------- @ Get auxiliary data fault status register .globl _coreGetAuxiliaryDataFault_ _coreGetAuxiliaryDataFault_: mrc p15, #0, r0, c5, c1, #0 bx lr @------------------------------------------------------------------------------- @ Clear auxiliary data fault status register .globl _coreClearAuxiliaryDataFault_ _coreClearAuxiliaryDataFault_: stmfd sp!, {r0} mov r0, #0 mcr p15, #0, r0, c5, c1, #0 ldmfd sp!, {r0} bx lr @------------------------------------------------------------------------------- @ Get auxiliary instruction fault status register .globl _coreGetAuxiliaryInstructionFault_ _coreGetAuxiliaryInstructionFault_: mrc p15, #0, r0, c5, c1, #1 bx lr @------------------------------------------------------------------------------- @ Clear auxiliary instruction fault status register .globl _coreClearAuxiliaryInstructionFault_ _coreClearAuxiliaryInstructionFault_: stmfd sp!, {r0} mov r0, #0 mrc p15, #0, r0, c5, c1, #1 ldmfd sp!, {r0} bx lr @------------------------------------------------------------------------------- @ Clear ESM CCM errorss .globl _esmCcmErrorsClear_ _esmCcmErrorsClear_: stmfd sp!, {r0-r2} ldr r0, ESMSR1_REG @ load the ESMSR1 status register address ldr r2, ESMSR1_ERR_CLR str r2, [r0] @ clear the ESMSR1 register ldr r0, ESMSR2_REG @ load the ESMSR2 status register address ldr r2, ESMSR2_ERR_CLR str r2, [r0] @ clear the ESMSR2 register ldr r0, ESMSSR2_REG @ load the ESMSSR2 status register address ldr r2, ESMSSR2_ERR_CLR str r2, [r0] @ clear the ESMSSR2 register ldr r0, ESMKEY_REG @ load the ESMKEY register address mov r2, #0x5 @ load R2 with 0x5 str r2, [r0] @ clear the ESMKEY register ldr r0, VIM_INTREQ @ load the INTREQ register address ldr r2, VIM_INT_CLR str r2, [r0] @ clear the INTREQ register ldr r0, CCMR4_STAT_REG @ load the CCMR4 status register address ldr r2, CCMR4_ERR_CLR str r2, [r0] @ clear the CCMR4 status register ldmfd sp!, {r0-r2} bx lr ESMSR1_REG: .word 0xFFFFF518 ESMSR2_REG: .word 0xFFFFF51C ESMSR3_REG: .word 0xFFFFF520 ESMKEY_REG: .word 0xFFFFF538 ESMSSR2_REG: .word 0xFFFFF53C CCMR4_STAT_REG: .word 0xFFFFF600 ERR_CLR_WRD: .word 0xFFFFFFFF CCMR4_ERR_CLR: .word 0x00010000 ESMSR1_ERR_CLR: .word 0x80000000 ESMSR2_ERR_CLR: .word 0x00000004 ESMSSR2_ERR_CLR: .word 0x00000004 VIM_INT_CLR: .word 0x00000001 VIM_INTREQ: .word 0xFFFFFE20 @------------------------------------------------------------------------------- @ Work Around for Errata CORTEX-R4#57: @ @ Errata Description: @ Conditional VMRS APSR_Nzcv, FPSCR May Evaluate With Incorrect Flags @ Workaround: @ Disable out-of-order single-precision floating point @ multiply-accumulate instruction completion .globl _errata_CORTEXR4_57_ _errata_CORTEXR4_57_: push {r0} mrc p15, #0, r0, c15, c0, #0 @ Read Secondary Auxiliary Control Register orr r0, r0, #0x10000 @ Set BIT 16 (Set DOOFMACS) mcr p15, #0, r0, c15, c0, #0 @ Write Secondary Auxiliary Control Register pop {r0} bx lr @------------------------------------------------------------------------------- @ Work Around for Errata CORTEX-R4#66: @ @ Errata Description: @ Register Corruption During A Load-Multiple Instruction At @ an Exception Vector @ Workaround: @ Disable out-of-order completion for divide instructions in @ Auxiliary Control register .globl _errata_CORTEXR4_66_ _errata_CORTEXR4_66_: push {r0} mrc p15, #0, r0, c1, c0, #1 @ Read Auxiliary Control register orr r0, r0, #0x80 @ Set BIT 7 (Disable out-of-order completion @ for divide instructions.) mcr p15, #0, r0, c1, c0, #1 @ Write Auxiliary Control register pop {r0} bx lr .globl turnon_VFP turnon_VFP: @ Enable FPV STMDB sp!, {r0} fmrx r0, fpexc orr r0, r0, #0x40000000 fmxr fpexc, r0 LDMIA sp!, {r0} subs pc, lr, #4 .macro push_svc_reg sub sp, sp, #17 * 4 @/* Sizeof(struct rt_hw_exp_stack) */ stmia sp, {r0 - r12} @/* Calling r0-r12 */ mov r0, sp mrs r6, spsr @/* Save CPSR */ str lr, [r0, #15*4] @/* Push PC */ str r6, [r0, #16*4] @/* Push CPSR */ cps #Mode_SVC str sp, [r0, #13*4] @/* Save calling SP */ str lr, [r0, #14*4] @/* Save calling PC */ .endm .globl vector_svc vector_svc: push_svc_reg bl rt_hw_trap_svc b . .globl vector_pabort vector_pabort: push_svc_reg bl rt_hw_trap_pabt b . .globl vector_dabort vector_dabort: push_svc_reg bl rt_hw_trap_dabt b . .globl vector_resv vector_resv: push_svc_reg bl rt_hw_trap_resv b .
Aladdin-Wang/MicroBoot_Demo
7,405
STM32F7_APP/rt-thread/libcpu/arm/cortex-r52/context_gcc.S
/* * Copyright (c) 2006-2024, RT-Thread Development Team * * SPDX-License-Identifier: Apache-2.0 * * Change Logs: * Date Author Notes * 2024-03-01 Wangyuqiang first version */ /** * @addtogroup cortex-r52 */ /*@{*/ //#include <rtconfig.h> .text .arm .globl rt_thread_switch_interrupt_flag .globl rt_interrupt_from_thread .globl rt_interrupt_to_thread .globl rt_interrupt_enter .globl rt_interrupt_leave .globl rt_hw_trap_irq /* * rt_base_t rt_hw_interrupt_disable() */ .globl rt_hw_interrupt_disable rt_hw_interrupt_disable: MRS r0, cpsr CPSID IF BX lr /* * void rt_hw_interrupt_enable(rt_base_t level) */ .globl rt_hw_interrupt_enable rt_hw_interrupt_enable: MSR cpsr_c, r0 BX lr /* * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to) * r0 --> from * r1 --> to */ .globl rt_hw_context_switch rt_hw_context_switch: STMDB sp!, {lr} @ push pc (lr should be pushed in place of PC) STMDB sp!, {r0-r12, lr} @ push lr & register file MRS r4, cpsr TST lr, #0x01 ORRNE r4, r4, #0x20 @ it's thumb code STMDB sp!, {r4} @ push cpsr #if defined (__VFP_FP__) && !defined(__SOFTFP__) && defined(RT_VFP_LAZY_STACKING) VMRS r4, fpexc TST r4, #0x40000000 BEQ __no_vfp_frame1 VSTMDB sp!, {d0-d15} VMRS r5, fpscr @ TODO: add support for Common VFPv3. @ Save registers like FPINST, FPINST2 STMDB sp!, {r5} __no_vfp_frame1: STMDB sp!, {r4} #endif STR sp, [r0] @ store sp in preempted tasks TCB LDR sp, [r1] @ get new task stack pointer #if defined (__VFP_FP__) && !defined(__SOFTFP__) && defined(RT_VFP_LAZY_STACKING) LDMIA sp!, {r0} @ get fpexc VMSR fpexc, r0 @ restore fpexc TST r0, #0x40000000 BEQ __no_vfp_frame2 LDMIA sp!, {r1} @ get fpscr VMSR fpscr, r1 VLDMIA sp!, {d0-d15} __no_vfp_frame2: #endif LDMIA sp!, {r4} @ pop new task cpsr to spsr MSR spsr_cxsf, r4 LDMIA sp!, {r0-r12, lr, pc}^ @ pop new task r0-r12, lr & pc, copy spsr to cpsr /* * void rt_hw_context_switch_to(rt_uint32 to) * r0 --> to */ .globl rt_hw_context_switch_to rt_hw_context_switch_to: LDR sp, [r0] @ get new task stack pointer #if defined (__VFP_FP__) && !defined(__SOFTFP__) && defined(RT_VFP_LAZY_STACKING) LDMIA sp!, {r0} @ get fpexc VMSR fpexc, r0 TST r0, #0x40000000 BEQ __no_vfp_frame_to LDMIA sp!, {r1} @ get fpscr VMSR fpscr, r1 VLDMIA sp!, {d0-d15} __no_vfp_frame_to: #endif LDMIA sp!, {r4} @ pop new task cpsr to spsr MSR spsr_cxsf, r4 LDMIA sp!, {r0-r12, lr, pc}^ @ pop new task r0-r12, lr & pc, copy spsr to cpsr /* * void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to)@ */ .globl rt_hw_context_switch_interrupt rt_hw_context_switch_interrupt: LDR r2, =rt_thread_switch_interrupt_flag LDR r3, [r2] CMP r3, #1 BEQ _reswitch MOV r3, #1 @ set rt_thread_switch_interrupt_flag to 1 STR r3, [r2] LDR r2, =rt_interrupt_from_thread @ set rt_interrupt_from_thread STR r0, [r2] _reswitch: LDR r2, =rt_interrupt_to_thread @ set rt_interrupt_to_thread STR r1, [r2] BX lr .globl IRQ_Handler IRQ_Handler: STMDB sp!, {r0-r12,lr} #if defined (__VFP_FP__) && !defined(__SOFTFP__) && defined(RT_VFP_LAZY_STACKING) VMRS r0, fpexc TST r0, #0x40000000 BEQ __no_vfp_frame_str_irq VSTMDB sp!, {d0-d15} VMRS r1, fpscr @ TODO: add support for Common VFPv3. @ Save registers like FPINST, FPINST2 STMDB sp!, {r1} __no_vfp_frame_str_irq: STMDB sp!, {r0} #endif BL rt_interrupt_enter BL rt_hw_trap_irq BL rt_interrupt_leave @ if rt_thread_switch_interrupt_flag set, jump to @ rt_hw_context_switch_interrupt_do and don't return LDR r0, =rt_thread_switch_interrupt_flag LDR r1, [r0] CMP r1, #1 BEQ rt_hw_context_switch_interrupt_do #if defined (__VFP_FP__) && !defined(__SOFTFP__) && defined(RT_VFP_LAZY_STACKING) LDMIA sp!, {r0} @ get fpexc VMSR fpexc, r0 TST r0, #0x40000000 BEQ __no_vfp_frame_ldr_irq LDMIA sp!, {r1} @ get fpscr VMSR fpscr, r1 VLDMIA sp!, {d0-d15} __no_vfp_frame_ldr_irq: #endif LDMIA sp!, {r0-r12,lr} SUBS pc, lr, #4 /* * void rt_hw_context_switch_interrupt_do(rt_base_t flag) */ .globl rt_hw_context_switch_interrupt_do rt_hw_context_switch_interrupt_do: MOV r1, #0 @ clear flag STR r1, [r0] #if defined (__VFP_FP__) && !defined(__SOFTFP__) && defined(RT_VFP_LAZY_STACKING) LDMIA sp!, {r0} @ get fpexc VMSR fpexc, r0 TST r0, #0x40000000 BEQ __no_vfp_frame_do1 LDMIA sp!, {r1} @ get fpscr VMSR fpscr, r1 VLDMIA sp!, {d0-d15} __no_vfp_frame_do1: #endif LDMIA sp!, {r0-r12,lr} @ reload saved registers STMDB sp, {r0-r3} @ save r0-r3. We will restore r0-r3 in the SVC @ mode so there is no need to update SP. SUB r1, sp, #16 @ save the right SP value in r1, so we could restore r0-r3. SUB r2, lr, #4 @ save old task's pc to r2 MRS r3, spsr @ get cpsr of interrupt thread @ switch to SVC mode and no interrupt CPSID IF, #0x13 STMDB sp!, {r2} @ push old task's pc STMDB sp!, {r4-r12,lr} @ push old task's lr,r12-r4 LDMIA r1!, {r4-r7} @ restore r0-r3 of the interrupted thread STMDB sp!, {r4-r7} @ push old task's r3-r0. We don't need to push/pop them to @ r0-r3 because we just want to transfer the data and don't @ use them here. STMDB sp!, {r3} @ push old task's cpsr #if defined (__VFP_FP__) && !defined(__SOFTFP__) && defined(RT_VFP_LAZY_STACKING) VMRS r0, fpexc TST r0, #0x40000000 BEQ __no_vfp_frame_do2 VSTMDB sp!, {d0-d15} VMRS r1, fpscr @ TODO: add support for Common VFPv3. @ Save registers like FPINST, FPINST2 STMDB sp!, {r1} __no_vfp_frame_do2: STMDB sp!, {r0} #endif LDR r4, =rt_interrupt_from_thread LDR r5, [r4] STR sp, [r5] @ store sp in preempted tasks's TCB LDR r6, =rt_interrupt_to_thread LDR r6, [r6] LDR sp, [r6] @ get new task's stack pointer #if defined (__VFP_FP__) && !defined(__SOFTFP__) && defined(RT_VFP_LAZY_STACKING) LDMIA sp!, {r0} @ get fpexc VMSR fpexc, r0 TST r0, #0x40000000 BEQ __no_vfp_frame_do3 LDMIA sp!, {r1} @ get fpscr VMSR fpscr, r1 VLDMIA sp!, {d0-d15} __no_vfp_frame_do3: #endif LDMIA sp!, {r4} @ pop new task's cpsr to spsr MSR spsr_cxsf, r4 LDMIA sp!, {r0-r12,lr,pc}^ @ pop new task's r0-r12,lr & pc, copy spsr to cpsr
Aladdin-Wang/MicroBoot_Demo
7,759
STM32F7_APP/rt-thread/libcpu/arm/cortex-r52/context_iar.S
;/* ; * Copyright (c) 2006-2022, RT-Thread Development Team ; * ; * SPDX-License-Identifier: Apache-2.0 ; * ; * Change Logs: ; * Date Author Notes ; * 2009-01-20 Bernard first version ; * 2011-07-22 Bernard added thumb mode porting ; * 2013-05-24 Grissiom port to CCS ; * 2013-05-26 Grissiom optimize for ARMv7 ; * 2013-10-20 Grissiom port to GCC ; * 2024-03-11 Wangyuqiang rzn2l adapt ; */ SECTION .text:CODE(2) ARM IMPORT rt_thread_switch_interrupt_flag IMPORT rt_interrupt_from_thread IMPORT rt_interrupt_to_thread IMPORT rt_interrupt_enter IMPORT rt_interrupt_leave IMPORT rt_hw_trap_irq ;/* ; * rt_base_t rt_hw_interrupt_disable(); ; */ EXPORT rt_hw_interrupt_disable rt_hw_interrupt_disable: MRS r0, CPSR CPSID I BX lr ;/* ; * void rt_hw_interrupt_enable(rt_base_t level); ; */ EXPORT rt_hw_interrupt_enable rt_hw_interrupt_enable: MSR CPSR_c, r0 BX lr ;/* ; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to) ; * r0 --> from ; * r1 --> to ; */ EXPORT rt_hw_context_switch rt_hw_context_switch: STMDB sp!, {lr} ; push pc (lr should be pushed in place of PC) STMDB sp!, {r0-r12, lr} ; push lr & register file MRS r4, CPSR TST lr, #0x01 ;ORRNE r4, r4, #0x20 ; it's thumb code CMP r4, #0 BNE ne_label ne_label: ORR r4, r4, #0x20 STMDB sp!, {r4} ; push cpsr #if defined (__VFP_FP__) && !defined(__SOFTFP__) && defined(RT_VFP_LAZY_STACKING) VMRS r4, fpexc TST r4, #0x40000000 BEQ __no_vfp_frame1 VSTMDB sp!, {d0-d15} VMRS r5, fpscr ; TODO: add support for Common VFPv3. ; Save registers like FPINST, FPINST2 STMDB sp!, {r5} __no_vfp_frame1: STMDB sp!, {r4} #endif STR sp, [r0] ; store sp in preempted tasks TCB LDR sp, [r1] ; get new task stack pointer #if defined (__VFP_FP__) && !defined(__SOFTFP__) && defined(RT_VFP_LAZY_STACKING) LDMIA sp!, {r0} ;@ get fpexc VMSR fpexc, r0 ;@ restore fpexc TST r0, #0x40000000 BEQ __no_vfp_frame2 LDMIA sp!, {r1} ;@ get fpscr VMSR fpscr, r1 VLDMIA sp!, {d0-d15} __no_vfp_frame2: #endif LDMIA sp!, {r4} ; pop new task cpsr to spsr MSR spsr_cxsf, r4 LDMIA sp!, {r0-r12, lr, pc}^ ; pop new task r0-r12, lr & pc, copy spsr to cpsr MOV lr, pc STMIA sp!, {lr} ;/* ; * void rt_hw_context_switch_to(rt_uint32 to) ; * r0 --> to ; */ EXPORT rt_hw_context_switch_to rt_hw_context_switch_to: LDR sp, [r0] ; get new task stack pointer #if defined (__VFP_FP__) && !defined(__SOFTFP__) && defined(RT_VFP_LAZY_STACKING) LDMIA sp!, {r0} @ get fpexc VMSR fpexc, r0 TST r0, #0x40000000 BEQ __no_vfp_frame_to LDMIA sp!, {r1} @ get fpscr VMSR fpscr, r1 VLDMIA sp!, {d0-d15} __no_vfp_frame_to: #endif LDMIA sp!, {r4} ; pop new task cpsr to spsr MSR spsr_cxsf, r4 LDMIA sp!, {r0-r12, lr, pc}^ ;/* ; * void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to)@ ; */ EXPORT rt_hw_context_switch_interrupt rt_hw_context_switch_interrupt: LDR r2, =rt_thread_switch_interrupt_flag LDR r3, [r2] CMP r3, #1 BEQ _reswitch MOV r3, #1 ; set rt_thread_switch_interrupt_flag to 1 STR r3, [r2] LDR r2, =rt_interrupt_from_thread ; set rt_interrupt_from_thread STR r0, [r2] _reswitch: LDR r2, =rt_interrupt_to_thread ; set rt_interrupt_to_thread STR r1, [r2] BX lr EXPORT IRQ_Handler IRQ_Handler: STMDB sp!, {r0-r12,lr} #if defined (__VFP_FP__) && !defined(__SOFTFP__) && defined(RT_VFP_LAZY_STACKING) VMRS r0, fpexc TST r0, #0x40000000 BEQ __no_vfp_frame_str_irq VSTMDB sp!, {d0-d15} VMRS r1, fpscr ; TODO: add support for Common VFPv3. ; Save registers like FPINST, FPINST2 STMDB sp!, {r1} __no_vfp_frame_str_irq: STMDB sp!, {r0} #endif BL rt_interrupt_enter BL rt_hw_trap_irq BL rt_interrupt_leave ; if rt_thread_switch_interrupt_flag set, jump to ; rt_hw_context_switch_interrupt_do and don't return LDR r0, =rt_thread_switch_interrupt_flag LDR r1, [r0] CMP r1, #1 BEQ rt_hw_context_switch_interrupt_do #if defined (__VFP_FP__) && !defined(__SOFTFP__) && defined(RT_VFP_LAZY_STACKING) LDMIA sp!, {r0} ; get fpexc VMSR fpexc, r0 TST r0, #0x40000000 BEQ __no_vfp_frame_ldr_irq LDMIA sp!, {r1} ; get fpscr VMSR fpscr, r1 VLDMIA sp!, {d0-d15} __no_vfp_frame_ldr_irq: #endif LDMIA sp!, {r0-r12,lr} SUBS pc, lr, #4 ;/* ; * void rt_hw_context_switch_interrupt_do(rt_base_t flag) ; */ EXPORT rt_hw_context_switch_interrupt_do rt_hw_context_switch_interrupt_do: MOV r1, #0 ; clear flag STR r1, [r0] #if defined (__VFP_FP__) && !defined(__SOFTFP__) && defined(RT_VFP_LAZY_STACKING) LDMIA sp!, {r0} ; get fpexc VMSR fpexc, r0 TST r0, #0x40000000 BEQ __no_vfp_frame_do1 LDMIA sp!, {r1} ; get fpscr VMSR fpscr, r1 VLDMIA sp!, {d0-d15} __no_vfp_frame_do1: #endif LDMIA sp!, {r0-r12,lr} ; reload saved registers STMDB sp, {r0-r3} ; save r0-r3. We will restore r0-r3 in the SVC ; mode so there is no need to update SP. SUB r1, sp, #16 ; save the right SP value in r1, so we could restore r0-r3. SUB r2, lr, #4 ; save old task's pc to r2 MRS r3, spsr ; get cpsr of interrupt thread ; switch to SVC mode and no interrupt CPSID IF, #0x13 STMDB sp!, {r2} ; push old task's pc STMDB sp!, {r4-r12,lr} ; push old task's lr,r12-r4 LDMIA r1!, {r4-r7} ; restore r0-r3 of the interrupted thread STMDB sp!, {r4-r7} ; push old task's r3-r0. We don't need to push/pop them to ; r0-r3 because we just want to transfer the data and don't ; use them here. STMDB sp!, {r3} ; push old task's cpsr #if defined (__VFP_FP__) && !defined(__SOFTFP__) && defined(RT_VFP_LAZY_STACKING) VMRS r0, fpexc TST r0, #0x40000000 BEQ __no_vfp_frame_do2 VSTMDB sp!, {d0-d15} VMRS r1, fpscr ; TODO: add support for Common VFPv3. ; Save registers like FPINST, FPINST2 STMDB sp!, {r1} __no_vfp_frame_do2: STMDB sp!, {r0} #endif LDR r4, =rt_interrupt_from_thread LDR r5, [r4] STR sp, [r5] ; store sp in preempted tasks's TCB LDR r6, =rt_interrupt_to_thread LDR r6, [r6] LDR sp, [r6] ; get new task's stack pointer #if defined (__VFP_FP__) && !defined(__SOFTFP__) && defined(RT_VFP_LAZY_STACKING) LDMIA sp!, {r0} ; get fpexc VMSR fpexc, r0 TST r0, #0x40000000 BEQ __no_vfp_frame_do3 LDMIA sp!, {r1} ; get fpscr VMSR fpscr, r1 VLDMIA sp!, {d0-d15} __no_vfp_frame_do3: #endif LDMIA sp!, {r4} ; pop new task's cpsr to spsr MSR spsr_cxsf, r4 LDMIA sp!, {r0-r12,lr,pc}^ ; pop new task's r0-r12,lr & pc, copy spsr to cpsr MOV lr, pc STMIA sp!, {r0-r12, lr} STR pc, [sp] END
Aladdin-Wang/MicroBoot_Demo
1,072
STM32F7_APP/rt-thread/libcpu/arm/cortex-r52/vector_iar.S
;/* ; * Copyright (c) 2006-2018, RT-Thread Development Team ; * ; * SPDX-License-Identifier: Apache-2.0 ; * ; * Change Logs: ; * Date Author Notes ; * 2024-03-11 Wangyuqiang first version ; */ ;@------------------------------------------------------------------------------- ;@ sys_intvecs.asm ;@ ;@ (c) Texas Instruments 2009-2013, All rights reserved. ;@ ;SECTION .vectors:"ax" ;@------------------------------------------------------------------------------- ;@ import reference for interrupt routines IMPORT Reset_Handler IMPORT Undefined_Handler IMPORT SVC_Handler IMPORT Prefetch_Handler IMPORT Abort_Handler IMPORT Reserved_Handler IMPORT IRQ_Handler IMPORT FIQ_Handler ;/* ; * int system_vectors(void); ; */ EXPORT system_vectors SECTION .intvec:CODE:NOROOT(2) system_vectors: b Reset_Handler b Undefined_Handler b SVC_Handler b Prefetch_Handler b Abort_Handler b Reserved_Handler b IRQ_Handler b FIQ_Handler END
Aladdin-Wang/MicroBoot_Demo
12,636
STM32F7_APP/rt-thread/libcpu/arm/zynqmp-r5/start_gcc.S
/* * Copyright (c) 2006-2022, RT-Thread Development Team * * SPDX-License-Identifier: Apache-2.0 * * Change Logs: * Date Author Notes * 2020-03-19 WangHuachen first version * 2021-05-11 WangHuachen Added call to Xil_InitializeExistingMPURegConfig to * initialize the MPU configuration table with the MPU * configurations already set in Init_Mpu function. */ .equ Mode_USR, 0x10 .equ Mode_FIQ, 0x11 .equ Mode_IRQ, 0x12 .equ Mode_SVC, 0x13 .equ Mode_ABT, 0x17 .equ Mode_UND, 0x1B .equ Mode_SYS, 0x1F .equ I_Bit, 0x80 @ when I bit is set, IRQ is disabled .equ F_Bit, 0x40 @ when F bit is set, FIQ is disabled .equ UND_Stack_Size, 0x00000000 .equ SVC_Stack_Size, 0x00000000 .equ ABT_Stack_Size, 0x00000000 .equ FIQ_Stack_Size, 0x00000200 .equ IRQ_Stack_Size, 0x00000200 .equ USR_Stack_Size, 0x00000000 .set RPU_GLBL_CNTL, 0xFF9A0000 .set RPU_ERR_INJ, 0xFF9A0020 .set RPU_0_CFG, 0xFF9A0100 .set RPU_1_CFG, 0xFF9A0200 .set RST_LPD_DBG, 0xFF5E0240 .set BOOT_MODE_USER, 0xFF5E0200 .set fault_log_enable, 0x101 #define ISR_Stack_Size (UND_Stack_Size + SVC_Stack_Size + ABT_Stack_Size + \ FIQ_Stack_Size + IRQ_Stack_Size) .section .data.share.isr /* stack */ .globl stack_start .globl stack_top .align 3 .bss stack_start: .rept ISR_Stack_Size .long 0 .endr stack_top: .section .boot,"axS" /* reset entry */ .globl _reset _reset: /* Initialize processor registers to 0 */ mov r0,#0 mov r1,#0 mov r2,#0 mov r3,#0 mov r4,#0 mov r5,#0 mov r6,#0 mov r7,#0 mov r8,#0 mov r9,#0 mov r10,#0 mov r11,#0 mov r12,#0 /* set the cpu to SVC32 mode and disable interrupt */ cpsid if, #Mode_SVC /* setup stack */ bl stack_setup /* * Enable access to VFP by enabling access to Coprocessors 10 and 11. * Enables Full Access i.e. in both privileged and non privileged modes */ mrc p15, 0, r0, c1, c0, 2 /* Read Coprocessor Access Control Register (CPACR) */ orr r0, r0, #(0xF << 20) /* Enable access to CP 10 & 11 */ mcr p15, 0, r0, c1, c0, 2 /* Write Coprocessor Access Control Register (CPACR) */ isb /* enable fpu access */ vmrs r3, FPEXC orr r1, r3, #(1<<30) vmsr FPEXC, r1 /* clear the floating point register*/ mov r1,#0 vmov d0,r1,r1 vmov d1,r1,r1 vmov d2,r1,r1 vmov d3,r1,r1 vmov d4,r1,r1 vmov d5,r1,r1 vmov d6,r1,r1 vmov d7,r1,r1 vmov d8,r1,r1 vmov d9,r1,r1 vmov d10,r1,r1 vmov d11,r1,r1 vmov d12,r1,r1 vmov d13,r1,r1 vmov d14,r1,r1 vmov d15,r1,r1 #ifdef __SOFTFP__ /* Disable the FPU if SOFTFP is defined*/ vmsr FPEXC,r3 #endif /* Disable MPU and caches */ mrc p15, 0, r0, c1, c0, 0 /* Read CP15 Control Register*/ bic r0, r0, #0x05 /* Disable MPU (M bit) and data cache (C bit) */ bic r0, r0, #0x1000 /* Disable instruction cache (I bit) */ dsb /* Ensure all previous loads/stores have completed */ mcr p15, 0, r0, c1, c0, 0 /* Write CP15 Control Register */ isb /* Ensure subsequent insts execute wrt new MPU settings */ /* Disable Branch prediction, TCM ECC checks */ mrc p15, 0, r0, c1, c0, 1 /* Read ACTLR */ orr r0, r0, #(0x1 << 17) /* Enable RSDIS bit 17 to disable the return stack */ orr r0, r0, #(0x1 << 16) /* Clear BP bit 15 and set BP bit 16:*/ bic r0, r0, #(0x1 << 15) /* Branch always not taken and history table updates disabled*/ orr r0, r0, #(0x1 << 27) /* Enable B1TCM ECC check */ orr r0, r0, #(0x1 << 26) /* Enable B0TCM ECC check */ orr r0, r0, #(0x1 << 25) /* Enable ATCM ECC check */ bic r0, r0, #(0x1 << 5) /* Generate abort on parity errors, with [5:3]=b 000*/ bic r0, r0, #(0x1 << 4) bic r0, r0, #(0x1 << 3) mcr p15, 0, r0, c1, c0, 1 /* Write ACTLR*/ dsb /* Complete all outstanding explicit memory operations*/ /* Invalidate caches */ mov r0,#0 /* r0 = 0 */ dsb mcr p15, 0, r0, c7, c5, 0 /* invalidate icache */ mcr p15, 0, r0, c15, c5, 0 /* Invalidate entire data cache*/ isb /* enable fault log for lock step */ ldr r0,=RPU_GLBL_CNTL ldr r1, [r0] ands r1, r1, #0x8 /* branch to initialization if split mode*/ bne init /* check for boot mode if in lock step, branch to init if JTAG boot mode*/ ldr r0,=BOOT_MODE_USER ldr r1, [r0] ands r1, r1, #0xF beq init /* reset the debug logic */ ldr r0,=RST_LPD_DBG ldr r1, [r0] orr r1, r1, #(0x1 << 4) orr r1, r1, #(0x1 << 5) str r1, [r0] /* enable fault log */ ldr r0,=RPU_ERR_INJ ldr r1,=fault_log_enable ldr r2, [r0] orr r2, r2, r1 str r2, [r0] nop nop init: bl Init_MPU /* Initialize MPU */ /* Enable Branch prediction */ mrc p15, 0, r0, c1, c0, 1 /* Read ACTLR*/ bic r0, r0, #(0x1 << 17) /* Clear RSDIS bit 17 to enable return stack*/ bic r0, r0, #(0x1 << 16) /* Clear BP bit 15 and BP bit 16:*/ bic r0, r0, #(0x1 << 15) /* Normal operation, BP is taken from the global history table.*/ orr r0, r0, #(0x1 << 14) /* Disable DBWR for errata 780125 */ mcr p15, 0, r0, c1, c0, 1 /* Write ACTLR*/ /* Enable icahce and dcache */ mrc p15,0,r1,c1,c0,0 ldr r0, =0x1005 orr r1,r1,r0 dsb mcr p15,0,r1,c1,c0,0 /* Enable cache */ isb /* isb flush prefetch buffer */ /* Set vector table in TCM/LOVEC */ mrc p15, 0, r0, c1, c0, 0 mvn r1, #0x2000 and r0, r0, r1 mcr p15, 0, r0, c1, c0, 0 /* Clear VINITHI to enable LOVEC on reset */ #if 1 ldr r0, =RPU_0_CFG #else ldr r0, =RPU_1_CFG #endif ldr r1, [r0] bic r1, r1, #(0x1 << 2) str r1, [r0] /* enable asynchronous abort exception */ mrs r0, cpsr bic r0, r0, #0x100 msr cpsr_xsf, r0 /* clear .bss */ mov r0,#0 /* get a zero */ ldr r1,=__bss_start /* bss start */ ldr r2,=__bss_end /* bss end */ bss_loop: cmp r1,r2 /* check if data to clear */ strlo r0,[r1],#4 /* clear 4 bytes */ blo bss_loop /* loop until done */ /* call C++ constructors of global objects */ ldr r0, =__ctors_start__ ldr r1, =__ctors_end__ ctor_loop: cmp r0, r1 beq ctor_end ldr r2, [r0], #4 stmfd sp!, {r0-r1} mov lr, pc bx r2 ldmfd sp!, {r0-r1} b ctor_loop ctor_end: bl Xil_InitializeExistingMPURegConfig /* Initialize MPU config */ /* start RT-Thread Kernel */ ldr pc, _entry _entry: .word entry stack_setup: ldr r0, =stack_top @ Set the startup stack for svc mov sp, r0 @ Enter Undefined Instruction Mode and set its Stack Pointer msr cpsr_c, #Mode_UND|I_Bit|F_Bit mov sp, r0 sub r0, r0, #UND_Stack_Size @ Enter Abort Mode and set its Stack Pointer msr cpsr_c, #Mode_ABT|I_Bit|F_Bit mov sp, r0 sub r0, r0, #ABT_Stack_Size @ Enter FIQ Mode and set its Stack Pointer msr cpsr_c, #Mode_FIQ|I_Bit|F_Bit mov sp, r0 sub r0, r0, #FIQ_Stack_Size @ Enter IRQ Mode and set its Stack Pointer msr cpsr_c, #Mode_IRQ|I_Bit|F_Bit mov sp, r0 sub r0, r0, #IRQ_Stack_Size @ Switch back to SVC msr cpsr_c, #Mode_SVC|I_Bit|F_Bit bx lr .section .text.isr, "ax" /* exception handlers: undef, swi, padt, dabt, resv, irq, fiq */ .align 5 .globl vector_fiq vector_fiq: stmfd sp!,{r0-r7,lr} bl rt_hw_trap_fiq ldmfd sp!,{r0-r7,lr} subs pc,lr,#4 .globl rt_interrupt_enter .globl rt_interrupt_leave .globl rt_thread_switch_interrupt_flag .globl rt_interrupt_from_thread .globl rt_interrupt_to_thread .align 5 .globl vector_irq vector_irq: stmfd sp!, {r0-r12,lr} #if defined (__VFP_FP__) && !defined(__SOFTFP__) vstmdb sp!, {d0-d15} /* Store floating point registers */ vmrs r1, FPSCR stmfd sp!,{r1} vmrs r1, FPEXC stmfd sp!,{r1} #endif bl rt_interrupt_enter bl rt_hw_trap_irq bl rt_interrupt_leave @ if rt_thread_switch_interrupt_flag set, jump to @ rt_hw_context_switch_interrupt_do and don't return ldr r0, =rt_thread_switch_interrupt_flag ldr r1, [r0] cmp r1, #1 beq rt_hw_context_switch_interrupt_do #if defined (__VFP_FP__) && !defined(__SOFTFP__) ldmfd sp!, {r1} /* Restore floating point registers */ vmsr FPEXC, r1 ldmfd sp!, {r1} vmsr FPSCR, r1 vldmia sp!, {d0-d15} #endif ldmfd sp!, {r0-r12,lr} subs pc, lr, #4 rt_hw_context_switch_interrupt_do: mov r1, #0 @ clear flag str r1, [r0] #if defined (__VFP_FP__) && !defined(__SOFTFP__) ldmfd sp!, {r1} /* Restore floating point registers */ vmsr FPEXC, r1 ldmfd sp!, {r1} vmsr FPSCR, r1 vldmia sp!, {d0-d15} #endif mov r1, sp @ r1 point to {r0-r3} in stack add sp, sp, #4*4 ldmfd sp!, {r4-r12,lr}@ reload saved registers mrs r0, spsr @ get cpsr of interrupt thread sub r2, lr, #4 @ save old task's pc to r2 @ Switch to SVC mode with no interrupt. msr cpsr_c, #I_Bit|F_Bit|Mode_SVC stmfd sp!, {r2} @ push old task's pc stmfd sp!, {r4-r12,lr}@ push old task's lr,r12-r4 ldmfd r1, {r1-r4} @ restore r0-r3 of the interrupt thread stmfd sp!, {r1-r4} @ push old task's r0-r3 stmfd sp!, {r0} @ push old task's cpsr #if defined (__VFP_FP__) && !defined(__SOFTFP__) vstmdb sp!, {d0-d15} /* Store floating point registers */ vmrs r1, FPSCR stmfd sp!,{r1} vmrs r1, FPEXC stmfd sp!,{r1} #endif ldr r4, =rt_interrupt_from_thread ldr r5, [r4] str sp, [r5] @ store sp in preempted tasks's TCB ldr r6, =rt_interrupt_to_thread ldr r7, [r6] ldr sp, [r7] @ get new task's stack pointer #if defined (__VFP_FP__) && !defined(__SOFTFP__) ldmfd sp!, {r1} /* Restore floating point registers */ vmsr FPEXC, r1 ldmfd sp!, {r1} vmsr FPSCR, r1 vldmia sp!, {d0-d15} #endif ldmfd sp!, {r4} @ pop new task's cpsr to spsr msr spsr_cxsf, r4 ldmfd sp!, {r0-r12,lr,pc}^ @ pop new task's r0-r12,lr & pc, copy spsr to cpsr .macro push_svc_reg sub sp, sp, #17 * 4 @/* Sizeof(struct rt_hw_exp_stack) */ stmia sp, {r0 - r12} @/* Calling r0-r12 */ mov r0, sp mrs r6, spsr @/* Save CPSR */ str lr, [r0, #15*4] @/* Push PC */ str r6, [r0, #16*4] @/* Push CPSR */ cps #Mode_SVC str sp, [r0, #13*4] @/* Save calling SP */ str lr, [r0, #14*4] @/* Save calling PC */ .endm .align 5 .globl vector_swi vector_swi: push_svc_reg bl rt_hw_trap_swi b . .align 5 .globl vector_undef vector_undef: push_svc_reg bl rt_hw_trap_undef b . .align 5 .globl vector_pabt vector_pabt: push_svc_reg bl rt_hw_trap_pabt b . .align 5 .globl vector_dabt vector_dabt: push_svc_reg bl rt_hw_trap_dabt b . .align 5 .globl vector_resv vector_resv: push_svc_reg bl rt_hw_trap_resv b .
Aladdin-Wang/MicroBoot_Demo
2,819
STM32F7_APP/rt-thread/libcpu/arm/zynqmp-r5/context_gcc.S
/* * Copyright (c) 2006-2022, RT-Thread Development Team * * SPDX-License-Identifier: Apache-2.0 * * Change Logs: * Date Author Notes * 2020-03-19 WangHuachen first version */ .section .text, "ax" /* * rt_base_t rt_hw_interrupt_disable(); */ .globl rt_hw_interrupt_disable rt_hw_interrupt_disable: mrs r0, cpsr cpsid if bx lr /* * void rt_hw_interrupt_enable(rt_base_t level); */ .globl rt_hw_interrupt_enable rt_hw_interrupt_enable: msr cpsr, r0 bx lr /* * void rt_hw_context_switch_to(rt_uint32 to); * r0 --> to */ .globl rt_hw_context_switch_to rt_hw_context_switch_to: ldr sp, [r0] @ get new task stack pointer #if defined (__VFP_FP__) && !defined(__SOFTFP__) ldmfd sp!, {r1} /* Restore floating point registers */ vmsr FPEXC, r1 ldmfd sp!, {r1} vmsr FPSCR, r1 vldmia sp!, {d0-d15} #endif ldmfd sp!, {r4} @ pop new task spsr msr spsr_cxsf, r4 ldmfd sp!, {r0-r12, lr, pc}^ @ pop new task r0-r12, lr & pc .section .text.isr, "ax" /* * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to); * r0 --> from * r1 --> to */ .globl rt_hw_context_switch rt_hw_context_switch: stmfd sp!, {lr} @ push pc (lr should be pushed in place of PC) stmfd sp!, {r0-r12, lr} @ push lr & register file mrs r4, cpsr tst lr, #0x01 beq _ARM_MODE orr r4, r4, #0x20 @ it's thumb code _ARM_MODE: stmfd sp!, {r4} @ push cpsr #if defined (__VFP_FP__) && !defined(__SOFTFP__) vstmdb sp!, {d0-d15} /* Store floating point registers */ vmrs r4, FPSCR stmfd sp!,{r4} vmrs r4, FPEXC stmfd sp!,{r4} #endif str sp, [r0] @ store sp in preempted tasks TCB ldr sp, [r1] @ get new task stack pointer #if defined (__VFP_FP__) && !defined(__SOFTFP__) ldmfd sp!, {r1} /* Restore floating point registers */ vmsr FPEXC, r1 ldmfd sp!, {r1} vmsr FPSCR, r1 vldmia sp!, {d0-d15} #endif ldmfd sp!, {r4} @ pop new task cpsr to spsr msr spsr_cxsf, r4 ldmfd sp!, {r0-r12, lr, pc}^ @ pop new task r0-r12, lr & pc, copy spsr to cpsr /* * void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to); */ .globl rt_thread_switch_interrupt_flag .globl rt_interrupt_from_thread .globl rt_interrupt_to_thread .globl rt_hw_context_switch_interrupt rt_hw_context_switch_interrupt: ldr r2, =rt_thread_switch_interrupt_flag ldr r3, [r2] cmp r3, #1 beq _reswitch mov r3, #1 @ set rt_thread_switch_interrupt_flag to 1 str r3, [r2] ldr r2, =rt_interrupt_from_thread @ set rt_interrupt_from_thread str r0, [r2] _reswitch: ldr r2, =rt_interrupt_to_thread @ set rt_interrupt_to_thread str r1, [r2] bx lr
Aladdin-Wang/MicroBoot_Demo
5,903
STM32F7_APP/rt-thread/libcpu/arm/cortex-m0/context_rvds.S
;/* ; * Copyright (c) 2006-2022, RT-Thread Development Team ; * ; * SPDX-License-Identifier: Apache-2.0 ; * ; * Change Logs: ; * Date Author Notes ; * 2010-01-25 Bernard first version ; * 2012-06-01 aozima set pendsv priority to 0xFF. ; * 2012-08-17 aozima fixed bug: store r8 - r11. ; * 2013-06-18 aozima add restore MSP feature. ; */ ;/** ; * @addtogroup CORTEX-M0 ; */ ;/*@{*/ SCB_VTOR EQU 0xE000ED08 ; Vector Table Offset Register NVIC_INT_CTRL EQU 0xE000ED04 ; interrupt control state register NVIC_SHPR3 EQU 0xE000ED20 ; system priority register (2) NVIC_PENDSV_PRI EQU 0xFFFF0000 ; PendSV and SysTick priority value (lowest) NVIC_PENDSVSET EQU 0x10000000 ; value to trigger PendSV exception AREA |.text|, CODE, READONLY, ALIGN=2 THUMB REQUIRE8 PRESERVE8 IMPORT rt_thread_switch_interrupt_flag IMPORT rt_interrupt_from_thread IMPORT rt_interrupt_to_thread ;/* ; * rt_base_t rt_hw_interrupt_disable(); ; */ rt_hw_interrupt_disable PROC EXPORT rt_hw_interrupt_disable MRS r0, PRIMASK CPSID I BX LR ENDP ;/* ; * void rt_hw_interrupt_enable(rt_base_t level); ; */ rt_hw_interrupt_enable PROC EXPORT rt_hw_interrupt_enable MSR PRIMASK, r0 BX LR ENDP ;/* ; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to); ; * r0 --> from ; * r1 --> to ; */ rt_hw_context_switch_interrupt EXPORT rt_hw_context_switch_interrupt rt_hw_context_switch PROC EXPORT rt_hw_context_switch ; set rt_thread_switch_interrupt_flag to 1 LDR r2, =rt_thread_switch_interrupt_flag LDR r3, [r2] CMP r3, #1 BEQ _reswitch MOVS r3, #0x01 STR r3, [r2] LDR r2, =rt_interrupt_from_thread ; set rt_interrupt_from_thread STR r0, [r2] _reswitch LDR r2, =rt_interrupt_to_thread ; set rt_interrupt_to_thread STR r1, [r2] LDR r0, =NVIC_INT_CTRL ; trigger the PendSV exception (causes context switch) LDR r1, =NVIC_PENDSVSET STR r1, [r0] BX LR ENDP ; r0 --> switch from thread stack ; r1 --> switch to thread stack ; psr, pc, lr, r12, r3, r2, r1, r0 are pushed into [from] stack PendSV_Handler PROC EXPORT PendSV_Handler ; disable interrupt to protect context switch MRS r2, PRIMASK CPSID I ; get rt_thread_switch_interrupt_flag LDR r0, =rt_thread_switch_interrupt_flag LDR r1, [r0] CMP r1, #0x00 BEQ pendsv_exit ; pendsv already handled ; clear rt_thread_switch_interrupt_flag to 0 MOVS r1, #0x00 STR r1, [r0] LDR r0, =rt_interrupt_from_thread LDR r1, [r0] CMP r1, #0x00 BEQ switch_to_thread ; skip register save at the first time MRS r1, psp ; get from thread stack pointer SUBS r1, r1, #0x20 ; space for {r4 - r7} and {r8 - r11} LDR r0, [r0] STR r1, [r0] ; update from thread stack pointer STMIA r1!, {r4 - r7} ; push thread {r4 - r7} register to thread stack MOV r4, r8 ; mov thread {r8 - r11} to {r4 - r7} MOV r5, r9 MOV r6, r10 MOV r7, r11 STMIA r1!, {r4 - r7} ; push thread {r8 - r11} high register to thread stack switch_to_thread LDR r1, =rt_interrupt_to_thread LDR r1, [r1] LDR r1, [r1] ; load thread stack pointer LDMIA r1!, {r4 - r7} ; pop thread {r4 - r7} register from thread stack PUSH {r4 - r7} ; push {r4 - r7} to MSP for copy {r8 - r11} LDMIA r1!, {r4 - r7} ; pop thread {r8 - r11} high register from thread stack to {r4 - r7} MOV r8, r4 ; mov {r4 - r7} to {r8 - r11} MOV r9, r5 MOV r10, r6 MOV r11, r7 POP {r4 - r7} ; pop {r4 - r7} from MSP MSR psp, r1 ; update stack pointer pendsv_exit ; restore interrupt MSR PRIMASK, r2 MOVS r0, #0x04 RSBS r0, r0, #0x00 BX r0 ENDP ;/* ; * void rt_hw_context_switch_to(rt_uint32 to); ; * r0 --> to ; * this fucntion is used to perform the first thread switch ; */ rt_hw_context_switch_to PROC EXPORT rt_hw_context_switch_to ; set to thread LDR r1, =rt_interrupt_to_thread STR r0, [r1] ; set from thread to 0 LDR r1, =rt_interrupt_from_thread MOVS r0, #0x0 STR r0, [r1] ; set interrupt flag to 1 LDR r1, =rt_thread_switch_interrupt_flag MOVS r0, #1 STR r0, [r1] ; set the PendSV and SysTick exception priority LDR r0, =NVIC_SHPR3 LDR r1, =NVIC_PENDSV_PRI LDR r2, [r0,#0x00] ; read ORRS r1,r1,r2 ; modify STR r1, [r0] ; write-back ; trigger the PendSV exception (causes context switch) LDR r0, =NVIC_INT_CTRL LDR r1, =NVIC_PENDSVSET STR r1, [r0] ; restore MSP LDR r0, =SCB_VTOR LDR r0, [r0] LDR r0, [r0] MSR msp, r0 ; enable interrupts at processor level CPSIE I ; ensure PendSV exception taken place before subsequent operation DSB ISB ; never reach here! ENDP ; compatible with old version rt_hw_interrupt_thread_switch PROC EXPORT rt_hw_interrupt_thread_switch BX lr ENDP IMPORT rt_hw_hard_fault_exception HardFault_Handler PROC EXPORT HardFault_Handler ; get current context MRS r0, psp ; get fault thread stack pointer PUSH {lr} BL rt_hw_hard_fault_exception POP {pc} ENDP ALIGN 4 END
Aladdin-Wang/MicroBoot_Demo
6,278
STM32F7_APP/rt-thread/libcpu/arm/cortex-m0/context_gcc.S
/* * Copyright (c) 2006-2022, RT-Thread Development Team * * SPDX-License-Identifier: Apache-2.0 * * Change Logs: * Date Author Notes * 2010-01-25 Bernard first version * 2012-06-01 aozima set pendsv priority to 0xFF. * 2012-08-17 aozima fixed bug: store r8 - r11. * 2013-02-20 aozima port to gcc. * 2013-06-18 aozima add restore MSP feature. * 2013-11-04 bright fixed hardfault bug for gcc. */ .cpu cortex-m0 .fpu softvfp .syntax unified .thumb .text .equ SCB_VTOR, 0xE000ED08 /* Vector Table Offset Register */ .equ NVIC_INT_CTRL, 0xE000ED04 /* interrupt control state register */ .equ NVIC_SHPR3, 0xE000ED20 /* system priority register (3) */ .equ NVIC_PENDSV_PRI, 0xFFFF0000 /* PendSV and SysTick priority value (lowest) */ .equ NVIC_PENDSVSET, 0x10000000 /* value to trigger PendSV exception */ /* * rt_base_t rt_hw_interrupt_disable(); */ .global rt_hw_interrupt_disable .type rt_hw_interrupt_disable, %function rt_hw_interrupt_disable: MRS R0, PRIMASK CPSID I BX LR /* * void rt_hw_interrupt_enable(rt_base_t level); */ .global rt_hw_interrupt_enable .type rt_hw_interrupt_enable, %function rt_hw_interrupt_enable: MSR PRIMASK, R0 BX LR /* * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to); * R0 --> from * R1 --> to */ .global rt_hw_context_switch_interrupt .type rt_hw_context_switch_interrupt, %function .global rt_hw_context_switch .type rt_hw_context_switch, %function rt_hw_context_switch_interrupt: rt_hw_context_switch: /* set rt_thread_switch_interrupt_flag to 1 */ LDR R2, =rt_thread_switch_interrupt_flag LDR R3, [R2] CMP R3, #1 BEQ _reswitch MOVS R3, #1 STR R3, [R2] LDR R2, =rt_interrupt_from_thread /* set rt_interrupt_from_thread */ STR R0, [R2] _reswitch: LDR R2, =rt_interrupt_to_thread /* set rt_interrupt_to_thread */ STR R1, [R2] LDR R0, =NVIC_INT_CTRL /* trigger the PendSV exception (causes context switch) */ LDR R1, =NVIC_PENDSVSET STR R1, [R0] BX LR /* R0 --> switch from thread stack * R1 --> switch to thread stack * psr, pc, LR, R12, R3, R2, R1, R0 are pushed into [from] stack */ .global PendSV_Handler .type PendSV_Handler, %function PendSV_Handler: /* disable interrupt to protect context switch */ MRS R2, PRIMASK CPSID I /* get rt_thread_switch_interrupt_flag */ LDR R0, =rt_thread_switch_interrupt_flag LDR R1, [R0] CMP R1, #0x00 BEQ pendsv_exit /* pendsv already handled */ /* clear rt_thread_switch_interrupt_flag to 0 */ MOVS R1, #0 STR R1, [R0] LDR R0, =rt_interrupt_from_thread LDR R1, [R0] CMP R1, #0x00 BEQ switch_to_thread /* skip register save at the first time */ MRS R1, PSP /* get from thread stack pointer */ SUBS R1, R1, #0x20 /* space for {R4 - R7} and {R8 - R11} */ LDR R0, [R0] STR R1, [R0] /* update from thread stack pointer */ STMIA R1!, {R4 - R7} /* push thread {R4 - R7} register to thread stack */ MOV R4, R8 /* mov thread {R8 - R11} to {R4 - R7} */ MOV R5, R9 MOV R6, R10 MOV R7, R11 STMIA R1!, {R4 - R7} /* push thread {R8 - R11} high register to thread stack */ switch_to_thread: LDR R1, =rt_interrupt_to_thread LDR R1, [R1] LDR R1, [R1] /* load thread stack pointer */ LDMIA R1!, {R4 - R7} /* pop thread {R4 - R7} register from thread stack */ PUSH {R4 - R7} /* push {R4 - R7} to MSP for copy {R8 - R11} */ LDMIA R1!, {R4 - R7} /* pop thread {R8 - R11} high register from thread stack to {R4 - R7} */ MOV R8, R4 /* mov {R4 - R7} to {R8 - R11} */ MOV R9, R5 MOV R10, R6 MOV R11, R7 POP {R4 - R7} /* pop {R4 - R7} from MSP */ MSR PSP, R1 /* update stack pointer */ pendsv_exit: /* restore interrupt */ MSR PRIMASK, R2 MOVS R0, #0x04 RSBS R0, R0, #0x00 BX R0 /* * void rt_hw_context_switch_to(rt_uint32 to); * R0 --> to */ .global rt_hw_context_switch_to .type rt_hw_context_switch_to, %function rt_hw_context_switch_to: LDR R1, =rt_interrupt_to_thread STR R0, [R1] /* set from thread to 0 */ LDR R1, =rt_interrupt_from_thread MOVS R0, #0 STR R0, [R1] /* set interrupt flag to 1 */ LDR R1, =rt_thread_switch_interrupt_flag MOVS R0, #1 STR R0, [R1] /* set the PendSV and SysTick exception priority */ LDR R0, =NVIC_SHPR3 LDR R1, =NVIC_PENDSV_PRI LDR R2, [R0,#0x00] /* read */ ORRS R1, R1, R2 /* modify */ STR R1, [R0] /* write-back */ LDR R0, =NVIC_INT_CTRL /* trigger the PendSV exception (causes context switch) */ LDR R1, =NVIC_PENDSVSET STR R1, [R0] NOP /* restore MSP */ LDR R0, =SCB_VTOR LDR R0, [R0] LDR R0, [R0] NOP MSR MSP, R0 /* enable interrupts at processor level */ CPSIE I /* ensure PendSV exception taken place before subsequent operation */ DSB ISB /* never reach here! */ /* compatible with old version */ .global rt_hw_interrupt_thread_switch .type rt_hw_interrupt_thread_switch, %function rt_hw_interrupt_thread_switch: BX LR NOP .global HardFault_Handler .type HardFault_Handler, %function HardFault_Handler: /* get current context */ MRS R0, PSP /* get fault thread stack pointer */ PUSH {LR} BL rt_hw_hard_fault_exception POP {PC} /* * rt_uint32_t rt_hw_interrupt_check(void); * R0 --> state */ .global rt_hw_interrupt_check .type rt_hw_interrupt_check, %function rt_hw_interrupt_check: MRS R0, IPSR BX LR