repo_id
stringlengths 5
115
| size
int64 590
5.01M
| file_path
stringlengths 4
212
| content
stringlengths 590
5.01M
|
|---|---|---|---|
aixcc-public/challenge-001-exemplar-source
| 1,111
|
arch/sh/lib/udiv_qrnnd.S
|
/* SPDX-License-Identifier: GPL-2.0+ WITH GCC-exception-2.0
Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
2004, 2005, 2006
Free Software Foundation, Inc.
*/
!! libgcc routines for the Renesas / SuperH SH CPUs.
!! Contributed by Steve Chamberlain.
!! sac@cygnus.com
!! ashiftrt_r4_x, ___ashrsi3, ___ashlsi3, ___lshrsi3 routines
!! recoded in assembly by Toshiyasu Morita
!! tm@netcom.com
/* SH2 optimizations for ___ashrsi3, ___ashlsi3, ___lshrsi3 and
ELF local label prefixes by J"orn Rennecke
amylaar@cygnus.com */
/* r0: rn r1: qn */ /* r0: n1 r4: n0 r5: d r6: d1 */ /* r2: __m */
/* n1 < d, but n1 might be larger than d1. */
.global __udiv_qrnnd_16
.balign 8
__udiv_qrnnd_16:
div0u
cmp/hi r6,r0
bt .Lots
.rept 16
div1 r6,r0
.endr
extu.w r0,r1
bt 0f
add r6,r0
0: rotcl r1
mulu.w r1,r5
xtrct r4,r0
swap.w r0,r0
sts macl,r2
cmp/hs r2,r0
sub r2,r0
bt 0f
addc r5,r0
add #-1,r1
bt 0f
1: add #-1,r1
rts
add r5,r0
.balign 8
.Lots:
sub r5,r0
swap.w r4,r1
xtrct r0,r1
clrt
mov r1,r0
addc r5,r0
mov #-1,r1
bf/s 1b
shlr16 r1
0: rts
nop
|
aixcc-public/challenge-001-exemplar-source
| 4,048
|
arch/sh/lib/memmove.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* $Id: memmove.S,v 1.2 2001/07/27 11:51:09 gniibe Exp $
*
* "memmove" implementation of SuperH
*
* Copyright (C) 1999 Niibe Yutaka
*
*/
/*
* void *memmove(void *dst, const void *src, size_t n);
* The memory areas may overlap.
*/
#include <linux/linkage.h>
ENTRY(memmove)
! if dest > src, call memcpy (it copies in decreasing order)
cmp/hi r5,r4
bf 1f
mov.l 2f,r0
jmp @r0
nop
.balign 4
2: .long memcpy
1:
sub r5,r4 ! From here, r4 has the distance to r0
tst r6,r6
bt/s 9f ! if n=0, do nothing
mov r5,r0
add r6,r5
mov #12,r1
cmp/gt r6,r1
bt/s 8f ! if it's too small, copy a byte at once
add #-1,r4
add #1,r4
!
! [ ... ] DST [ ... ] SRC
! [ ... ] [ ... ]
! : :
! r0+r4--> [ ... ] r0 --> [ ... ]
! : :
! [ ... ] [ ... ]
! r5 -->
!
mov r4,r1
mov #3,r2
and r2,r1
shll2 r1
mov r0,r3 ! Save the value on R0 to R3
mova jmptable,r0
add r1,r0
mov.l @r0,r1
jmp @r1
mov r3,r0 ! and back to R0
.balign 4
jmptable:
.long case0
.long case1
.long case2
.long case3
! copy a byte at once
8: mov.b @r0+,r1
cmp/hs r5,r0
bf/s 8b ! while (r0<r5)
mov.b r1,@(r0,r4)
add #1,r4
9:
add r4,r0
rts
sub r6,r0
case_none:
bra 8b
add #-1,r4
case0:
!
! GHIJ KLMN OPQR --> GHIJ KLMN OPQR
!
! First, align to long word boundary
mov r0,r3
and r2,r3
tst r3,r3
bt/s 2f
add #-1,r4
mov #4,r2
sub r3,r2
1: dt r2
mov.b @r0+,r1
bf/s 1b
mov.b r1,@(r0,r4)
!
2: ! Second, copy a long word at once
add #-3,r4
add #-3,r5
3: mov.l @r0+,r1
cmp/hs r5,r0
bf/s 3b
mov.l r1,@(r0,r4)
add #3,r5
!
! Third, copy a byte at once, if necessary
cmp/eq r5,r0
bt/s 9b
add #4,r4
bra 8b
add #-1,r4
case3:
!
! GHIJ KLMN OPQR --> ...G HIJK LMNO PQR.
!
! First, align to long word boundary
mov r0,r3
and r2,r3
tst r3,r3
bt/s 2f
add #-1,r4
mov #4,r2
sub r3,r2
1: dt r2
mov.b @r0+,r1
bf/s 1b
mov.b r1,@(r0,r4)
!
2: ! Second, read a long word and write a long word at once
add #-2,r4
mov.l @(r0,r4),r1
add #-7,r5
add #-4,r4
!
#ifdef __LITTLE_ENDIAN__
shll8 r1
3: mov r1,r3 ! JIHG
shlr8 r3 ! xJIH
mov.l @r0+,r1 ! NMLK
mov r1,r2
shll16 r2
shll8 r2 ! Kxxx
or r2,r3 ! KJIH
cmp/hs r5,r0
bf/s 3b
mov.l r3,@(r0,r4)
#else
shlr8 r1
3: mov r1,r3 ! GHIJ
shll8 r3 ! HIJx
mov.l @r0+,r1 ! KLMN
mov r1,r2
shlr16 r2
shlr8 r2 ! xxxK
or r2,r3 ! HIJK
cmp/hs r5,r0
bf/s 3b
mov.l r3,@(r0,r4)
#endif
add #7,r5
!
! Third, copy a byte at once, if necessary
cmp/eq r5,r0
bt/s 9b
add #7,r4
add #-3,r0
bra 8b
add #-1,r4
case2:
!
! GHIJ KLMN OPQR --> ..GH IJKL MNOP QR..
!
! First, align to word boundary
tst #1,r0
bt/s 2f
add #-1,r4
mov.b @r0+,r1
mov.b r1,@(r0,r4)
!
2: ! Second, read a word and write a word at once
add #-1,r4
add #-1,r5
!
3: mov.w @r0+,r1
cmp/hs r5,r0
bf/s 3b
mov.w r1,@(r0,r4)
add #1,r5
!
! Third, copy a byte at once, if necessary
cmp/eq r5,r0
bt/s 9b
add #2,r4
mov.b @r0,r1
mov.b r1,@(r0,r4)
bra 9b
add #1,r0
case1:
!
! GHIJ KLMN OPQR --> .GHI JKLM NOPQ R...
!
! First, align to long word boundary
mov r0,r3
and r2,r3
tst r3,r3
bt/s 2f
add #-1,r4
mov #4,r2
sub r3,r2
1: dt r2
mov.b @r0+,r1
bf/s 1b
mov.b r1,@(r0,r4)
!
2: ! Second, read a long word and write a long word at once
mov.l @(r0,r4),r1
add #-7,r5
add #-4,r4
!
#ifdef __LITTLE_ENDIAN__
shll16 r1
shll8 r1
3: mov r1,r3 ! JIHG
shlr16 r3
shlr8 r3 ! xxxJ
mov.l @r0+,r1 ! NMLK
mov r1,r2
shll8 r2 ! MLKx
or r2,r3 ! MLKJ
cmp/hs r5,r0
bf/s 3b
mov.l r3,@(r0,r4)
#else
shlr16 r1
shlr8 r1
3: mov r1,r3 ! GHIJ
shll16 r3
shll8 r3 ! Jxxx
mov.l @r0+,r1 ! KLMN
mov r1,r2
shlr8 r2 ! xKLM
or r2,r3 ! JKLM
cmp/hs r5,r0
bf/s 3b ! while(r0<r5)
mov.l r3,@(r0,r4)
#endif
add #7,r5
!
! Third, copy a byte at once, if necessary
cmp/eq r5,r0
bt/s 9b
add #5,r4
add #-3,r0
bra 8b
add #-1,r4
|
aixcc-public/challenge-001-exemplar-source
| 2,315
|
arch/sh/lib/udivsi3_i4i-Os.S
|
/* SPDX-License-Identifier: GPL-2.0+ WITH GCC-exception-2.0
*
* Copyright (C) 2006 Free Software Foundation, Inc.
*/
/* Moderately Space-optimized libgcc routines for the Renesas SH /
STMicroelectronics ST40 CPUs.
Contributed by J"orn Rennecke joern.rennecke@st.com. */
/* Size: 186 bytes jointly for udivsi3_i4i and sdivsi3_i4i
sh4-200 run times:
udiv small divisor: 55 cycles
udiv large divisor: 52 cycles
sdiv small divisor, positive result: 59 cycles
sdiv large divisor, positive result: 56 cycles
sdiv small divisor, negative result: 65 cycles (*)
sdiv large divisor, negative result: 62 cycles (*)
(*): r2 is restored in the rts delay slot and has a lingering latency
of two more cycles. */
.balign 4
.global __udivsi3_i4i
.global __udivsi3_i4
.set __udivsi3_i4, __udivsi3_i4i
.type __udivsi3_i4i, @function
.type __sdivsi3_i4i, @function
__udivsi3_i4i:
sts pr,r1
mov.l r4,@-r15
extu.w r5,r0
cmp/eq r5,r0
swap.w r4,r0
shlr16 r4
bf/s large_divisor
div0u
mov.l r5,@-r15
shll16 r5
sdiv_small_divisor:
div1 r5,r4
bsr div6
div1 r5,r4
div1 r5,r4
bsr div6
div1 r5,r4
xtrct r4,r0
xtrct r0,r4
bsr div7
swap.w r4,r4
div1 r5,r4
bsr div7
div1 r5,r4
xtrct r4,r0
mov.l @r15+,r5
swap.w r0,r0
mov.l @r15+,r4
jmp @r1
rotcl r0
div7:
div1 r5,r4
div6:
div1 r5,r4; div1 r5,r4; div1 r5,r4
div1 r5,r4; div1 r5,r4; rts; div1 r5,r4
divx3:
rotcl r0
div1 r5,r4
rotcl r0
div1 r5,r4
rotcl r0
rts
div1 r5,r4
large_divisor:
mov.l r5,@-r15
sdiv_large_divisor:
xor r4,r0
.rept 4
rotcl r0
bsr divx3
div1 r5,r4
.endr
mov.l @r15+,r5
mov.l @r15+,r4
jmp @r1
rotcl r0
.global __sdivsi3_i4i
.global __sdivsi3_i4
.global __sdivsi3
.set __sdivsi3_i4, __sdivsi3_i4i
.set __sdivsi3, __sdivsi3_i4i
__sdivsi3_i4i:
mov.l r4,@-r15
cmp/pz r5
mov.l r5,@-r15
bt/s pos_divisor
cmp/pz r4
neg r5,r5
extu.w r5,r0
bt/s neg_result
cmp/eq r5,r0
neg r4,r4
pos_result:
swap.w r4,r0
bra sdiv_check_divisor
sts pr,r1
pos_divisor:
extu.w r5,r0
bt/s pos_result
cmp/eq r5,r0
neg r4,r4
neg_result:
mova negate_result,r0
;
mov r0,r1
swap.w r4,r0
lds r2,macl
sts pr,r2
sdiv_check_divisor:
shlr16 r4
bf/s sdiv_large_divisor
div0u
bra sdiv_small_divisor
shll16 r5
.balign 4
negate_result:
neg r0,r0
jmp @r2
sts macl,r2
|
aixcc-public/challenge-001-exemplar-source
| 15,652
|
arch/sh/lib/memcpy-sh4.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* "memcpy" implementation of SuperH
*
* Copyright (C) 1999 Niibe Yutaka
* Copyright (c) 2002 STMicroelectronics Ltd
* Modified from memcpy.S and micro-optimised for SH4
* Stuart Menefy (stuart.menefy@st.com)
*
*/
#include <linux/linkage.h>
/*
* void *memcpy(void *dst, const void *src, size_t n);
*
* It is assumed that there is no overlap between src and dst.
* If there is an overlap, then the results are undefined.
*/
!
! GHIJ KLMN OPQR --> ...G HIJK LMNO PQR.
!
! Size is 16 or greater, and may have trailing bytes
.balign 32
.Lcase1:
! Read a long word and write a long word at once
! At the start of each iteration, r7 contains last long load
add #-1,r5 ! 79 EX
mov r4,r2 ! 5 MT (0 cycles latency)
mov.l @(r0,r5),r7 ! 21 LS (2 cycles latency)
add #-4,r5 ! 50 EX
add #7,r2 ! 79 EX
!
#ifdef CONFIG_CPU_LITTLE_ENDIAN
! 6 cycles, 4 bytes per iteration
3: mov.l @(r0,r5),r1 ! 21 LS (latency=2) ! NMLK
mov r7, r3 ! 5 MT (latency=0) ! RQPO
cmp/hi r2,r0 ! 57 MT
shll16 r3 ! 103 EX
mov r1,r6 ! 5 MT (latency=0)
shll8 r3 ! 102 EX ! Oxxx
shlr8 r6 ! 106 EX ! xNML
mov r1, r7 ! 5 MT (latency=0)
or r6,r3 ! 82 EX ! ONML
bt/s 3b ! 109 BR
mov.l r3,@-r0 ! 30 LS
#else
3: mov.l @(r0,r5),r1 ! 21 LS (latency=2) ! KLMN
mov r7,r3 ! 5 MT (latency=0) ! OPQR
cmp/hi r2,r0 ! 57 MT
shlr16 r3 ! 107 EX
shlr8 r3 ! 106 EX ! xxxO
mov r1,r6 ! 5 MT (latency=0)
shll8 r6 ! 102 EX ! LMNx
mov r1,r7 ! 5 MT (latency=0)
or r6,r3 ! 82 EX ! LMNO
bt/s 3b ! 109 BR
mov.l r3,@-r0 ! 30 LS
#endif
! Finally, copy a byte at once, if necessary
add #4,r5 ! 50 EX
cmp/eq r4,r0 ! 54 MT
add #-6,r2 ! 50 EX
bt 9f ! 109 BR
8: cmp/hi r2,r0 ! 57 MT
mov.b @(r0,r5),r1 ! 20 LS (latency=2)
bt/s 8b ! 109 BR
mov.b r1,@-r0 ! 29 LS
9: rts
nop
!
! GHIJ KLMN OPQR --> .GHI JKLM NOPQ R...
!
! Size is 16 or greater, and may have trailing bytes
.balign 32
.Lcase3:
! Read a long word and write a long word at once
! At the start of each iteration, r7 contains last long load
add #-3,r5 ! 79 EX
mov r4,r2 ! 5 MT (0 cycles latency)
mov.l @(r0,r5),r7 ! 21 LS (2 cycles latency)
add #-4,r5 ! 50 EX
add #7,r2 ! 79 EX
!
#ifdef CONFIG_CPU_LITTLE_ENDIAN
! 6 cycles, 4 bytes per iteration
3: mov.l @(r0,r5),r1 ! 21 LS (latency=2) ! NMLK
mov r7, r3 ! 5 MT (latency=0) ! RQPO
cmp/hi r2,r0 ! 57 MT
shll8 r3 ! 102 EX ! QPOx
mov r1,r6 ! 5 MT (latency=0)
shlr16 r6 ! 107 EX
shlr8 r6 ! 106 EX ! xxxN
mov r1, r7 ! 5 MT (latency=0)
or r6,r3 ! 82 EX ! QPON
bt/s 3b ! 109 BR
mov.l r3,@-r0 ! 30 LS
#else
3: mov r7,r3 ! OPQR
shlr8 r3 ! xOPQ
mov.l @(r0,r5),r7 ! KLMN
mov r7,r6
shll16 r6
shll8 r6 ! Nxxx
or r6,r3 ! NOPQ
cmp/hi r2,r0
bt/s 3b
mov.l r3,@-r0
#endif
! Finally, copy a byte at once, if necessary
add #6,r5 ! 50 EX
cmp/eq r4,r0 ! 54 MT
add #-6,r2 ! 50 EX
bt 9f ! 109 BR
8: cmp/hi r2,r0 ! 57 MT
mov.b @(r0,r5),r1 ! 20 LS (latency=2)
bt/s 8b ! 109 BR
mov.b r1,@-r0 ! 29 LS
9: rts
nop
ENTRY(memcpy)
! Calculate the invariants which will be used in the remainder
! of the code:
!
! r4 --> [ ... ] DST [ ... ] SRC
! [ ... ] [ ... ]
! : :
! r0 --> [ ... ] r0+r5 --> [ ... ]
!
!
! Short circuit the common case of src, dst and len being 32 bit aligned
! and test for zero length move
mov r6, r0 ! 5 MT (0 cycle latency)
or r4, r0 ! 82 EX
or r5, r0 ! 82 EX
tst r6, r6 ! 86 MT
bt/s 99f ! 111 BR (zero len)
tst #3, r0 ! 87 MT
mov r4, r0 ! 5 MT (0 cycle latency)
add r6, r0 ! 49 EX
mov #16, r1 ! 6 EX
bt/s .Lcase00 ! 111 BR (aligned)
sub r4, r5 ! 75 EX
! Arguments are not nicely long word aligned or zero len.
! Check for small copies, and if so do a simple byte at a time copy.
!
! Deciding on an exact value of 'small' is not easy, as the point at which
! using the optimised routines become worthwhile varies (these are the
! cycle counts for differnet sizes using byte-at-a-time vs. optimised):
! size byte-at-time long word byte
! 16 42 39-40 46-50 50-55
! 24 58 43-44 54-58 62-67
! 36 82 49-50 66-70 80-85
! However the penalty for getting it 'wrong' is much higher for long word
! aligned data (and this is more common), so use a value of 16.
cmp/gt r6,r1 ! 56 MT
add #-1,r5 ! 50 EX
bf/s 6f ! 108 BR (not small)
mov r5, r3 ! 5 MT (latency=0)
shlr r6 ! 104 EX
mov.b @(r0,r5),r1 ! 20 LS (latency=2)
bf/s 4f ! 111 BR
add #-1,r3 ! 50 EX
tst r6, r6 ! 86 MT
bt/s 98f ! 110 BR
mov.b r1,@-r0 ! 29 LS
! 4 cycles, 2 bytes per iteration
3: mov.b @(r0,r5),r1 ! 20 LS (latency=2)
4: mov.b @(r0,r3),r2 ! 20 LS (latency=2)
dt r6 ! 67 EX
mov.b r1,@-r0 ! 29 LS
bf/s 3b ! 111 BR
mov.b r2,@-r0 ! 29 LS
98:
rts
nop
99: rts
mov r4, r0
! Size is not small, so its worthwhile looking for optimisations.
! First align destination to a long word boundary.
!
! r5 = normal value -1
6: tst #3, r0 ! 87 MT
mov #3, r3 ! 6 EX
bt/s 2f ! 111 BR
and r0,r3 ! 78 EX
! 3 cycles, 1 byte per iteration
1: dt r3 ! 67 EX
mov.b @(r0,r5),r1 ! 19 LS (latency=2)
add #-1, r6 ! 79 EX
bf/s 1b ! 109 BR
mov.b r1,@-r0 ! 28 LS
2: add #1, r5 ! 79 EX
! Now select the appropriate bulk transfer code based on relative
! alignment of src and dst.
mov r0, r3 ! 5 MT (latency=0)
mov r5, r0 ! 5 MT (latency=0)
tst #1, r0 ! 87 MT
bf/s 1f ! 111 BR
mov #64, r7 ! 6 EX
! bit 0 clear
cmp/ge r7, r6 ! 55 MT
bt/s 2f ! 111 BR
tst #2, r0 ! 87 MT
! small
bt/s .Lcase0
mov r3, r0
bra .Lcase2
nop
! big
2: bt/s .Lcase0b
mov r3, r0
bra .Lcase2b
nop
! bit 0 set
1: tst #2, r0 ! 87 MT
bt/s .Lcase1
mov r3, r0
bra .Lcase3
nop
!
! GHIJ KLMN OPQR --> GHIJ KLMN OPQR
!
! src, dst and size are all long word aligned
! size is non-zero
.balign 32
.Lcase00:
mov #64, r1 ! 6 EX
mov r5, r3 ! 5 MT (latency=0)
cmp/gt r6, r1 ! 56 MT
add #-4, r5 ! 50 EX
bf .Lcase00b ! 108 BR (big loop)
shlr2 r6 ! 105 EX
shlr r6 ! 104 EX
mov.l @(r0, r5), r1 ! 21 LS (latency=2)
bf/s 4f ! 111 BR
add #-8, r3 ! 50 EX
tst r6, r6 ! 86 MT
bt/s 5f ! 110 BR
mov.l r1,@-r0 ! 30 LS
! 4 cycles, 2 long words per iteration
3: mov.l @(r0, r5), r1 ! 21 LS (latency=2)
4: mov.l @(r0, r3), r2 ! 21 LS (latency=2)
dt r6 ! 67 EX
mov.l r1, @-r0 ! 30 LS
bf/s 3b ! 109 BR
mov.l r2, @-r0 ! 30 LS
5: rts
nop
! Size is 16 or greater and less than 64, but may have trailing bytes
.balign 32
.Lcase0:
add #-4, r5 ! 50 EX
mov r4, r7 ! 5 MT (latency=0)
mov.l @(r0, r5), r1 ! 21 LS (latency=2)
mov #4, r2 ! 6 EX
add #11, r7 ! 50 EX
tst r2, r6 ! 86 MT
mov r5, r3 ! 5 MT (latency=0)
bt/s 4f ! 111 BR
add #-4, r3 ! 50 EX
mov.l r1,@-r0 ! 30 LS
! 4 cycles, 2 long words per iteration
3: mov.l @(r0, r5), r1 ! 21 LS (latency=2)
4: mov.l @(r0, r3), r2 ! 21 LS (latency=2)
cmp/hi r7, r0
mov.l r1, @-r0 ! 30 LS
bt/s 3b ! 109 BR
mov.l r2, @-r0 ! 30 LS
! Copy the final 0-3 bytes
add #3,r5 ! 50 EX
cmp/eq r0, r4 ! 54 MT
add #-10, r7 ! 50 EX
bt 9f ! 110 BR
! 3 cycles, 1 byte per iteration
1: mov.b @(r0,r5),r1 ! 19 LS
cmp/hi r7,r0 ! 57 MT
bt/s 1b ! 111 BR
mov.b r1,@-r0 ! 28 LS
9: rts
nop
! Size is at least 64 bytes, so will be going round the big loop at least once.
!
! r2 = rounded up r4
! r3 = rounded down r0
.balign 32
.Lcase0b:
add #-4, r5 ! 50 EX
.Lcase00b:
mov r0, r3 ! 5 MT (latency=0)
mov #(~0x1f), r1 ! 6 EX
and r1, r3 ! 78 EX
mov r4, r2 ! 5 MT (latency=0)
cmp/eq r3, r0 ! 54 MT
add #0x1f, r2 ! 50 EX
bt/s 1f ! 110 BR
and r1, r2 ! 78 EX
! copy initial words until cache line aligned
mov.l @(r0, r5), r1 ! 21 LS (latency=2)
tst #4, r0 ! 87 MT
mov r5, r6 ! 5 MT (latency=0)
add #-4, r6 ! 50 EX
bt/s 4f ! 111 BR
add #8, r3 ! 50 EX
tst #0x18, r0 ! 87 MT
bt/s 1f ! 109 BR
mov.l r1,@-r0 ! 30 LS
! 4 cycles, 2 long words per iteration
3: mov.l @(r0, r5), r1 ! 21 LS (latency=2)
4: mov.l @(r0, r6), r7 ! 21 LS (latency=2)
cmp/eq r3, r0 ! 54 MT
mov.l r1, @-r0 ! 30 LS
bf/s 3b ! 109 BR
mov.l r7, @-r0 ! 30 LS
! Copy the cache line aligned blocks
!
! In use: r0, r2, r4, r5
! Scratch: r1, r3, r6, r7
!
! We could do this with the four scratch registers, but if src
! and dest hit the same cache line, this will thrash, so make
! use of additional registers.
!
! We also need r0 as a temporary (for movca), so 'undo' the invariant:
! r5: src (was r0+r5)
! r1: dest (was r0)
! this can be reversed at the end, so we don't need to save any extra
! state.
!
1: mov.l r8, @-r15 ! 30 LS
add r0, r5 ! 49 EX
mov.l r9, @-r15 ! 30 LS
mov r0, r1 ! 5 MT (latency=0)
mov.l r10, @-r15 ! 30 LS
add #-0x1c, r5 ! 50 EX
mov.l r11, @-r15 ! 30 LS
! 16 cycles, 32 bytes per iteration
2: mov.l @(0x00,r5),r0 ! 18 LS (latency=2)
add #-0x20, r1 ! 50 EX
mov.l @(0x04,r5),r3 ! 18 LS (latency=2)
mov.l @(0x08,r5),r6 ! 18 LS (latency=2)
mov.l @(0x0c,r5),r7 ! 18 LS (latency=2)
mov.l @(0x10,r5),r8 ! 18 LS (latency=2)
mov.l @(0x14,r5),r9 ! 18 LS (latency=2)
mov.l @(0x18,r5),r10 ! 18 LS (latency=2)
mov.l @(0x1c,r5),r11 ! 18 LS (latency=2)
movca.l r0,@r1 ! 40 LS (latency=3-7)
mov.l r3,@(0x04,r1) ! 33 LS
mov.l r6,@(0x08,r1) ! 33 LS
mov.l r7,@(0x0c,r1) ! 33 LS
mov.l r8,@(0x10,r1) ! 33 LS
add #-0x20, r5 ! 50 EX
mov.l r9,@(0x14,r1) ! 33 LS
cmp/eq r2,r1 ! 54 MT
mov.l r10,@(0x18,r1) ! 33 LS
bf/s 2b ! 109 BR
mov.l r11,@(0x1c,r1) ! 33 LS
mov r1, r0 ! 5 MT (latency=0)
mov.l @r15+, r11 ! 15 LS
sub r1, r5 ! 75 EX
mov.l @r15+, r10 ! 15 LS
cmp/eq r4, r0 ! 54 MT
bf/s 1f ! 109 BR
mov.l @r15+, r9 ! 15 LS
rts
1: mov.l @r15+, r8 ! 15 LS
sub r4, r1 ! 75 EX (len remaining)
! number of trailing bytes is non-zero
!
! invariants restored (r5 already decremented by 4)
! also r1=num bytes remaining
mov #4, r2 ! 6 EX
mov r4, r7 ! 5 MT (latency=0)
add #0x1c, r5 ! 50 EX (back to -4)
cmp/hs r2, r1 ! 58 MT
bf/s 5f ! 108 BR
add #11, r7 ! 50 EX
mov.l @(r0, r5), r6 ! 21 LS (latency=2)
tst r2, r1 ! 86 MT
mov r5, r3 ! 5 MT (latency=0)
bt/s 4f ! 111 BR
add #-4, r3 ! 50 EX
cmp/hs r2, r1 ! 58 MT
bt/s 5f ! 111 BR
mov.l r6,@-r0 ! 30 LS
! 4 cycles, 2 long words per iteration
3: mov.l @(r0, r5), r6 ! 21 LS (latency=2)
4: mov.l @(r0, r3), r2 ! 21 LS (latency=2)
cmp/hi r7, r0
mov.l r6, @-r0 ! 30 LS
bt/s 3b ! 109 BR
mov.l r2, @-r0 ! 30 LS
! Copy the final 0-3 bytes
5: cmp/eq r0, r4 ! 54 MT
add #-10, r7 ! 50 EX
bt 9f ! 110 BR
add #3,r5 ! 50 EX
! 3 cycles, 1 byte per iteration
1: mov.b @(r0,r5),r1 ! 19 LS
cmp/hi r7,r0 ! 57 MT
bt/s 1b ! 111 BR
mov.b r1,@-r0 ! 28 LS
9: rts
nop
!
! GHIJ KLMN OPQR --> ..GH IJKL MNOP QR..
!
.balign 32
.Lcase2:
! Size is 16 or greater and less then 64, but may have trailing bytes
2: mov r5, r6 ! 5 MT (latency=0)
add #-2,r5 ! 50 EX
mov r4,r2 ! 5 MT (latency=0)
add #-4,r6 ! 50 EX
add #7,r2 ! 50 EX
3: mov.w @(r0,r5),r1 ! 20 LS (latency=2)
mov.w @(r0,r6),r3 ! 20 LS (latency=2)
cmp/hi r2,r0 ! 57 MT
mov.w r1,@-r0 ! 29 LS
bt/s 3b ! 111 BR
mov.w r3,@-r0 ! 29 LS
bra 10f
nop
.balign 32
.Lcase2b:
! Size is at least 64 bytes, so will be going round the big loop at least once.
!
! r2 = rounded up r4
! r3 = rounded down r0
mov r0, r3 ! 5 MT (latency=0)
mov #(~0x1f), r1 ! 6 EX
and r1, r3 ! 78 EX
mov r4, r2 ! 5 MT (latency=0)
cmp/eq r3, r0 ! 54 MT
add #0x1f, r2 ! 50 EX
add #-2, r5 ! 50 EX
bt/s 1f ! 110 BR
and r1, r2 ! 78 EX
! Copy a short word one at a time until we are cache line aligned
! Normal values: r0, r2, r3, r4
! Unused: r1, r6, r7
! Mod: r5 (=r5-2)
!
add #2, r3 ! 50 EX
2: mov.w @(r0,r5),r1 ! 20 LS (latency=2)
cmp/eq r3,r0 ! 54 MT
bf/s 2b ! 111 BR
mov.w r1,@-r0 ! 29 LS
! Copy the cache line aligned blocks
!
! In use: r0, r2, r4, r5 (=r5-2)
! Scratch: r1, r3, r6, r7
!
! We could do this with the four scratch registers, but if src
! and dest hit the same cache line, this will thrash, so make
! use of additional registers.
!
! We also need r0 as a temporary (for movca), so 'undo' the invariant:
! r5: src (was r0+r5)
! r1: dest (was r0)
! this can be reversed at the end, so we don't need to save any extra
! state.
!
1: mov.l r8, @-r15 ! 30 LS
add r0, r5 ! 49 EX
mov.l r9, @-r15 ! 30 LS
mov r0, r1 ! 5 MT (latency=0)
mov.l r10, @-r15 ! 30 LS
add #-0x1e, r5 ! 50 EX
mov.l r11, @-r15 ! 30 LS
mov.l r12, @-r15 ! 30 LS
! 17 cycles, 32 bytes per iteration
#ifdef CONFIG_CPU_LITTLE_ENDIAN
2: mov.w @r5+, r0 ! 14 LS (latency=2) ..JI
add #-0x20, r1 ! 50 EX
mov.l @r5+, r3 ! 15 LS (latency=2) NMLK
mov.l @r5+, r6 ! 15 LS (latency=2) RQPO
shll16 r0 ! 103 EX JI..
mov.l @r5+, r7 ! 15 LS (latency=2)
xtrct r3, r0 ! 48 EX LKJI
mov.l @r5+, r8 ! 15 LS (latency=2)
xtrct r6, r3 ! 48 EX PONM
mov.l @r5+, r9 ! 15 LS (latency=2)
xtrct r7, r6 ! 48 EX
mov.l @r5+, r10 ! 15 LS (latency=2)
xtrct r8, r7 ! 48 EX
mov.l @r5+, r11 ! 15 LS (latency=2)
xtrct r9, r8 ! 48 EX
mov.w @r5+, r12 ! 15 LS (latency=2)
xtrct r10, r9 ! 48 EX
movca.l r0,@r1 ! 40 LS (latency=3-7)
xtrct r11, r10 ! 48 EX
mov.l r3, @(0x04,r1) ! 33 LS
xtrct r12, r11 ! 48 EX
mov.l r6, @(0x08,r1) ! 33 LS
mov.l r7, @(0x0c,r1) ! 33 LS
mov.l r8, @(0x10,r1) ! 33 LS
add #-0x40, r5 ! 50 EX
mov.l r9, @(0x14,r1) ! 33 LS
cmp/eq r2,r1 ! 54 MT
mov.l r10, @(0x18,r1) ! 33 LS
bf/s 2b ! 109 BR
mov.l r11, @(0x1c,r1) ! 33 LS
#else
2: mov.w @(0x1e,r5), r0 ! 17 LS (latency=2)
add #-2, r5 ! 50 EX
mov.l @(0x1c,r5), r3 ! 18 LS (latency=2)
add #-4, r1 ! 50 EX
mov.l @(0x18,r5), r6 ! 18 LS (latency=2)
shll16 r0 ! 103 EX
mov.l @(0x14,r5), r7 ! 18 LS (latency=2)
xtrct r3, r0 ! 48 EX
mov.l @(0x10,r5), r8 ! 18 LS (latency=2)
xtrct r6, r3 ! 48 EX
mov.l @(0x0c,r5), r9 ! 18 LS (latency=2)
xtrct r7, r6 ! 48 EX
mov.l @(0x08,r5), r10 ! 18 LS (latency=2)
xtrct r8, r7 ! 48 EX
mov.l @(0x04,r5), r11 ! 18 LS (latency=2)
xtrct r9, r8 ! 48 EX
mov.l @(0x00,r5), r12 ! 18 LS (latency=2)
xtrct r10, r9 ! 48 EX
movca.l r0,@r1 ! 40 LS (latency=3-7)
add #-0x1c, r1 ! 50 EX
mov.l r3, @(0x18,r1) ! 33 LS
xtrct r11, r10 ! 48 EX
mov.l r6, @(0x14,r1) ! 33 LS
xtrct r12, r11 ! 48 EX
mov.l r7, @(0x10,r1) ! 33 LS
mov.l r8, @(0x0c,r1) ! 33 LS
add #-0x1e, r5 ! 50 EX
mov.l r9, @(0x08,r1) ! 33 LS
cmp/eq r2,r1 ! 54 MT
mov.l r10, @(0x04,r1) ! 33 LS
bf/s 2b ! 109 BR
mov.l r11, @(0x00,r1) ! 33 LS
#endif
mov.l @r15+, r12
mov r1, r0 ! 5 MT (latency=0)
mov.l @r15+, r11 ! 15 LS
sub r1, r5 ! 75 EX
mov.l @r15+, r10 ! 15 LS
cmp/eq r4, r0 ! 54 MT
bf/s 1f ! 109 BR
mov.l @r15+, r9 ! 15 LS
rts
1: mov.l @r15+, r8 ! 15 LS
add #0x1e, r5 ! 50 EX
! Finish off a short word at a time
! r5 must be invariant - 2
10: mov r4,r2 ! 5 MT (latency=0)
add #1,r2 ! 50 EX
cmp/hi r2, r0 ! 57 MT
bf/s 1f ! 109 BR
add #2, r2 ! 50 EX
3: mov.w @(r0,r5),r1 ! 20 LS
cmp/hi r2,r0 ! 57 MT
bt/s 3b ! 109 BR
mov.w r1,@-r0 ! 29 LS
1:
!
! Finally, copy the last byte if necessary
cmp/eq r4,r0 ! 54 MT
bt/s 9b
add #1,r5
mov.b @(r0,r5),r1
rts
mov.b r1,@-r0
|
aixcc-public/challenge-001-exemplar-source
| 6,226
|
arch/sh/lib/copy_page.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* copy_page, __copy_user_page, __copy_user implementation of SuperH
*
* Copyright (C) 2001 Niibe Yutaka & Kaz Kojima
* Copyright (C) 2002 Toshinobu Sugioka
* Copyright (C) 2006 Paul Mundt
*/
#include <linux/linkage.h>
#include <asm/page.h>
/*
* copy_page
* @to: P1 address
* @from: P1 address
*
* void copy_page(void *to, void *from)
*/
/*
* r0, r1, r2, r3, r4, r5, r6, r7 --- scratch
* r8 --- from + PAGE_SIZE
* r9 --- not used
* r10 --- to
* r11 --- from
*/
ENTRY(copy_page)
mov.l r8,@-r15
mov.l r10,@-r15
mov.l r11,@-r15
mov r4,r10
mov r5,r11
mov r5,r8
mov #(PAGE_SIZE >> 10), r0
shll8 r0
shll2 r0
add r0,r8
!
1: mov.l @r11+,r0
mov.l @r11+,r1
mov.l @r11+,r2
mov.l @r11+,r3
mov.l @r11+,r4
mov.l @r11+,r5
mov.l @r11+,r6
mov.l @r11+,r7
#if defined(CONFIG_CPU_SH4)
movca.l r0,@r10
#else
mov.l r0,@r10
#endif
add #32,r10
mov.l r7,@-r10
mov.l r6,@-r10
mov.l r5,@-r10
mov.l r4,@-r10
mov.l r3,@-r10
mov.l r2,@-r10
mov.l r1,@-r10
cmp/eq r11,r8
bf/s 1b
add #28,r10
!
mov.l @r15+,r11
mov.l @r15+,r10
mov.l @r15+,r8
rts
nop
/*
* __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
* Return the number of bytes NOT copied
*/
#define EX(...) \
9999: __VA_ARGS__ ; \
.section __ex_table, "a"; \
.long 9999b, 6000f ; \
.previous
#define EX_NO_POP(...) \
9999: __VA_ARGS__ ; \
.section __ex_table, "a"; \
.long 9999b, 6005f ; \
.previous
ENTRY(__copy_user)
! Check if small number of bytes
mov #11,r0
mov r4,r3
cmp/gt r0,r6 ! r6 (len) > r0 (11)
bf/s .L_cleanup_loop_no_pop
add r6,r3 ! last destination address
! Calculate bytes needed to align to src
mov.l r11,@-r15
neg r5,r0
mov.l r10,@-r15
add #4,r0
mov.l r9,@-r15
and #3,r0
mov.l r8,@-r15
tst r0,r0
bt 2f
1:
! Copy bytes to long word align src
EX( mov.b @r5+,r1 )
dt r0
add #-1,r6
EX( mov.b r1,@r4 )
bf/s 1b
add #1,r4
! Jump to appropriate routine depending on dest
2: mov #3,r1
mov r6, r2
and r4,r1
shlr2 r2
shll2 r1
mova .L_jump_tbl,r0
mov.l @(r0,r1),r1
jmp @r1
nop
.align 2
.L_jump_tbl:
.long .L_dest00
.long .L_dest01
.long .L_dest10
.long .L_dest11
/*
* Come here if there are less than 12 bytes to copy
*
* Keep the branch target close, so the bf/s callee doesn't overflow
* and result in a more expensive branch being inserted. This is the
* fast-path for small copies, the jump via the jump table will hit the
* default slow-path cleanup. -PFM.
*/
.L_cleanup_loop_no_pop:
tst r6,r6 ! Check explicitly for zero
bt 1f
2:
EX_NO_POP( mov.b @r5+,r0 )
dt r6
EX_NO_POP( mov.b r0,@r4 )
bf/s 2b
add #1,r4
1: mov #0,r0 ! normal return
5000:
# Exception handler:
.section .fixup, "ax"
6005:
mov.l 8000f,r1
mov r3,r0
jmp @r1
sub r4,r0
.align 2
8000: .long 5000b
.previous
rts
nop
! Destination = 00
.L_dest00:
! Skip the large copy for small transfers
mov #(32+32-4), r0
cmp/gt r6, r0 ! r0 (60) > r6 (len)
bt 1f
! Align dest to a 32 byte boundary
neg r4,r0
add #0x20, r0
and #0x1f, r0
tst r0, r0
bt 2f
sub r0, r6
shlr2 r0
3:
EX( mov.l @r5+,r1 )
dt r0
EX( mov.l r1,@r4 )
bf/s 3b
add #4,r4
2:
EX( mov.l @r5+,r0 )
EX( mov.l @r5+,r1 )
EX( mov.l @r5+,r2 )
EX( mov.l @r5+,r7 )
EX( mov.l @r5+,r8 )
EX( mov.l @r5+,r9 )
EX( mov.l @r5+,r10 )
EX( mov.l @r5+,r11 )
#ifdef CONFIG_CPU_SH4
EX( movca.l r0,@r4 )
#else
EX( mov.l r0,@r4 )
#endif
add #-32, r6
EX( mov.l r1,@(4,r4) )
mov #32, r0
EX( mov.l r2,@(8,r4) )
cmp/gt r6, r0 ! r0 (32) > r6 (len)
EX( mov.l r7,@(12,r4) )
EX( mov.l r8,@(16,r4) )
EX( mov.l r9,@(20,r4) )
EX( mov.l r10,@(24,r4) )
EX( mov.l r11,@(28,r4) )
bf/s 2b
add #32,r4
1: mov r6, r0
shlr2 r0
tst r0, r0
bt .L_cleanup
1:
EX( mov.l @r5+,r1 )
dt r0
EX( mov.l r1,@r4 )
bf/s 1b
add #4,r4
bra .L_cleanup
nop
! Destination = 10
.L_dest10:
mov r2,r7
shlr2 r7
shlr r7
tst r7,r7
mov #7,r0
bt/s 1f
and r0,r2
2:
dt r7
#ifdef CONFIG_CPU_LITTLE_ENDIAN
EX( mov.l @r5+,r0 )
EX( mov.l @r5+,r1 )
EX( mov.l @r5+,r8 )
EX( mov.l @r5+,r9 )
EX( mov.l @r5+,r10 )
EX( mov.w r0,@r4 )
add #2,r4
xtrct r1,r0
xtrct r8,r1
xtrct r9,r8
xtrct r10,r9
EX( mov.l r0,@r4 )
EX( mov.l r1,@(4,r4) )
EX( mov.l r8,@(8,r4) )
EX( mov.l r9,@(12,r4) )
EX( mov.l @r5+,r1 )
EX( mov.l @r5+,r8 )
EX( mov.l @r5+,r0 )
xtrct r1,r10
xtrct r8,r1
xtrct r0,r8
shlr16 r0
EX( mov.l r10,@(16,r4) )
EX( mov.l r1,@(20,r4) )
EX( mov.l r8,@(24,r4) )
EX( mov.w r0,@(28,r4) )
bf/s 2b
add #30,r4
#else
EX( mov.l @(28,r5),r0 )
EX( mov.l @(24,r5),r8 )
EX( mov.l @(20,r5),r9 )
EX( mov.l @(16,r5),r10 )
EX( mov.w r0,@(30,r4) )
add #-2,r4
xtrct r8,r0
xtrct r9,r8
xtrct r10,r9
EX( mov.l r0,@(28,r4) )
EX( mov.l r8,@(24,r4) )
EX( mov.l r9,@(20,r4) )
EX( mov.l @(12,r5),r0 )
EX( mov.l @(8,r5),r8 )
xtrct r0,r10
EX( mov.l @(4,r5),r9 )
mov.l r10,@(16,r4)
EX( mov.l @r5,r10 )
xtrct r8,r0
xtrct r9,r8
xtrct r10,r9
EX( mov.l r0,@(12,r4) )
EX( mov.l r8,@(8,r4) )
swap.w r10,r0
EX( mov.l r9,@(4,r4) )
EX( mov.w r0,@(2,r4) )
add #32,r5
bf/s 2b
add #34,r4
#endif
tst r2,r2
bt .L_cleanup
1: ! Read longword, write two words per iteration
EX( mov.l @r5+,r0 )
dt r2
#ifdef CONFIG_CPU_LITTLE_ENDIAN
EX( mov.w r0,@r4 )
shlr16 r0
EX( mov.w r0,@(2,r4) )
#else
EX( mov.w r0,@(2,r4) )
shlr16 r0
EX( mov.w r0,@r4 )
#endif
bf/s 1b
add #4,r4
bra .L_cleanup
nop
! Destination = 01 or 11
.L_dest01:
.L_dest11:
! Read longword, write byte, word, byte per iteration
EX( mov.l @r5+,r0 )
dt r2
#ifdef CONFIG_CPU_LITTLE_ENDIAN
EX( mov.b r0,@r4 )
shlr8 r0
add #1,r4
EX( mov.w r0,@r4 )
shlr16 r0
EX( mov.b r0,@(2,r4) )
bf/s .L_dest01
add #3,r4
#else
EX( mov.b r0,@(3,r4) )
shlr8 r0
swap.w r0,r7
EX( mov.b r7,@r4 )
add #1,r4
EX( mov.w r0,@r4 )
bf/s .L_dest01
add #3,r4
#endif
! Cleanup last few bytes
.L_cleanup:
mov r6,r0
and #3,r0
tst r0,r0
bt .L_exit
mov r0,r6
.L_cleanup_loop:
EX( mov.b @r5+,r0 )
dt r6
EX( mov.b r0,@r4 )
bf/s .L_cleanup_loop
add #1,r4
.L_exit:
mov #0,r0 ! normal return
5000:
# Exception handler:
.section .fixup, "ax"
6000:
mov.l 8000f,r1
mov r3,r0
jmp @r1
sub r4,r0
.align 2
8000: .long 5000b
.previous
mov.l @r15+,r8
mov.l @r15+,r9
mov.l @r15+,r10
rts
mov.l @r15+,r11
|
aixcc-public/challenge-001-exemplar-source
| 5,317
|
arch/sh/lib/mcount.S
|
/* SPDX-License-Identifier: GPL-2.0
*
* arch/sh/lib/mcount.S
*
* Copyright (C) 2008, 2009 Paul Mundt
* Copyright (C) 2008, 2009 Matt Fleming
*/
#include <asm/ftrace.h>
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
#define MCOUNT_ENTER() \
mov.l r4, @-r15; \
mov.l r5, @-r15; \
mov.l r6, @-r15; \
mov.l r7, @-r15; \
sts.l pr, @-r15; \
\
mov.l @(20,r15),r4; \
sts pr, r5
#define MCOUNT_LEAVE() \
lds.l @r15+, pr; \
mov.l @r15+, r7; \
mov.l @r15+, r6; \
mov.l @r15+, r5; \
rts; \
mov.l @r15+, r4
#ifdef CONFIG_STACK_DEBUG
/*
* Perform diagnostic checks on the state of the kernel stack.
*
* Check for stack overflow. If there is less than 1KB free
* then it has overflowed.
*
* Make sure the stack pointer contains a valid address. Valid
* addresses for kernel stacks are anywhere after the bss
* (after __bss_stop) and anywhere in init_thread_union (init_stack).
*/
#define STACK_CHECK() \
mov #(THREAD_SIZE >> 10), r0; \
shll8 r0; \
shll2 r0; \
\
/* r1 = sp & (THREAD_SIZE - 1) */ \
mov #-1, r1; \
add r0, r1; \
and r15, r1; \
\
mov #TI_SIZE, r3; \
mov #(STACK_WARN >> 8), r2; \
shll8 r2; \
add r3, r2; \
\
/* Is the stack overflowing? */ \
cmp/hi r2, r1; \
bf stack_panic; \
\
/* If sp > __bss_stop then we're OK. */ \
mov.l .L_ebss, r1; \
cmp/hi r1, r15; \
bt 1f; \
\
/* If sp < init_stack, we're not OK. */ \
mov.l .L_init_thread_union, r1; \
cmp/hs r1, r15; \
bf stack_panic; \
\
/* If sp > init_stack && sp < __bss_stop, not OK. */ \
add r0, r1; \
cmp/hs r1, r15; \
bt stack_panic; \
1:
#else
#define STACK_CHECK()
#endif /* CONFIG_STACK_DEBUG */
.align 2
.globl _mcount
.type _mcount,@function
.globl mcount
.type mcount,@function
_mcount:
mcount:
STACK_CHECK()
#ifndef CONFIG_FUNCTION_TRACER
rts
nop
#else
MCOUNT_ENTER()
#ifdef CONFIG_DYNAMIC_FTRACE
.globl mcount_call
mcount_call:
mov.l .Lftrace_stub, r6
#else
mov.l .Lftrace_trace_function, r6
mov.l ftrace_stub, r7
cmp/eq r6, r7
bt skip_trace
mov.l @r6, r6
#endif
jsr @r6
nop
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
mov.l .Lftrace_graph_return, r6
mov.l .Lftrace_stub, r7
cmp/eq r6, r7
bt 1f
mov.l .Lftrace_graph_caller, r0
jmp @r0
nop
1:
mov.l .Lftrace_graph_entry, r6
mov.l .Lftrace_graph_entry_stub, r7
cmp/eq r6, r7
bt skip_trace
mov.l .Lftrace_graph_caller, r0
jmp @r0
nop
.align 2
.Lftrace_graph_return:
.long ftrace_graph_return
.Lftrace_graph_entry:
.long ftrace_graph_entry
.Lftrace_graph_entry_stub:
.long ftrace_graph_entry_stub
.Lftrace_graph_caller:
.long ftrace_graph_caller
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
.globl skip_trace
skip_trace:
MCOUNT_LEAVE()
.align 2
.Lftrace_trace_function:
.long ftrace_trace_function
#ifdef CONFIG_DYNAMIC_FTRACE
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
* NOTE: Do not move either ftrace_graph_call or ftrace_caller
* as this will affect the calculation of GRAPH_INSN_OFFSET.
*/
.globl ftrace_graph_call
ftrace_graph_call:
mov.l .Lskip_trace, r0
jmp @r0
nop
.align 2
.Lskip_trace:
.long skip_trace
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
.globl ftrace_caller
ftrace_caller:
MCOUNT_ENTER()
.globl ftrace_call
ftrace_call:
mov.l .Lftrace_stub, r6
jsr @r6
nop
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
bra ftrace_graph_call
nop
#else
MCOUNT_LEAVE()
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#endif /* CONFIG_DYNAMIC_FTRACE */
.align 2
/*
* NOTE: From here on the locations of the .Lftrace_stub label and
* ftrace_stub itself are fixed. Adding additional data here will skew
* the displacement for the memory table and break the block replacement.
* Place new labels either after the ftrace_stub body, or before
* ftrace_caller. You have been warned.
*/
.Lftrace_stub:
.long ftrace_stub
.globl ftrace_stub
ftrace_stub:
rts
nop
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_caller
ftrace_graph_caller:
mov.l 2f, r1
jmp @r1
nop
1:
/*
* MCOUNT_ENTER() pushed 5 registers onto the stack, so
* the stack address containing our return address is
* r15 + 20.
*/
mov #20, r0
add r15, r0
mov r0, r4
mov.l .Lprepare_ftrace_return, r0
jsr @r0
nop
MCOUNT_LEAVE()
.align 2
2: .long skip_trace
.Lprepare_ftrace_return:
.long prepare_ftrace_return
.globl return_to_handler
return_to_handler:
/*
* Save the return values.
*/
mov.l r0, @-r15
mov.l r1, @-r15
mov #0, r4
mov.l .Lftrace_return_to_handler, r0
jsr @r0
nop
/*
* The return value from ftrace_return_handler has the real
* address that we should return to.
*/
lds r0, pr
mov.l @r15+, r1
rts
mov.l @r15+, r0
.align 2
.Lftrace_return_to_handler:
.long ftrace_return_to_handler
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_STACK_DEBUG
.globl stack_panic
stack_panic:
mov.l .Ldump_stack, r0
jsr @r0
nop
mov.l .Lpanic, r0
jsr @r0
mov.l .Lpanic_s, r4
rts
nop
.align 2
.L_init_thread_union:
.long init_thread_union
.L_ebss:
.long __bss_stop
.Lpanic:
.long panic
.Lpanic_s:
.long .Lpanic_str
.Ldump_stack:
.long dump_stack
.section .rodata
.align 2
.Lpanic_str:
.string "Stack error"
#endif /* CONFIG_STACK_DEBUG */
|
aixcc-public/challenge-001-exemplar-source
| 1,827
|
arch/sh/lib/__clear_user.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* __clear_user_page, __clear_user, clear_page implementation of SuperH
*
* Copyright (C) 2001 Kaz Kojima
* Copyright (C) 2001, 2002 Niibe Yutaka
* Copyright (C) 2006 Paul Mundt
*/
#include <linux/linkage.h>
#include <asm/page.h>
ENTRY(__clear_user)
!
mov #0, r0
mov #0xffffffe0, r1
!
! r4..(r4+31)&~32 -------- not aligned [ Area 0 ]
! (r4+31)&~32..(r4+r5)&~32 -------- aligned [ Area 1 ]
! (r4+r5)&~32..r4+r5 -------- not aligned [ Area 2 ]
!
! Clear area 0
mov r4, r2
!
tst r1, r5 ! length < 32
bt .Larea2 ! skip to remainder
!
add #31, r2
and r1, r2
cmp/eq r4, r2
bt .Larea1
mov r2, r3
sub r4, r3
mov r3, r7
mov r4, r2
!
.L0: dt r3
0: mov.b r0, @r2
bf/s .L0
add #1, r2
!
sub r7, r5
mov r2, r4
.Larea1:
mov r4, r3
add r5, r3
and r1, r3
cmp/hi r2, r3
bf .Larea2
!
! Clear area 1
#if defined(CONFIG_CPU_SH4)
1: movca.l r0, @r2
#else
1: mov.l r0, @r2
#endif
add #4, r2
2: mov.l r0, @r2
add #4, r2
3: mov.l r0, @r2
add #4, r2
4: mov.l r0, @r2
add #4, r2
5: mov.l r0, @r2
add #4, r2
6: mov.l r0, @r2
add #4, r2
7: mov.l r0, @r2
add #4, r2
8: mov.l r0, @r2
add #4, r2
cmp/hi r2, r3
bt/s 1b
nop
!
! Clear area 2
.Larea2:
mov r4, r3
add r5, r3
cmp/hs r3, r2
bt/s .Ldone
sub r2, r3
.L2: dt r3
9: mov.b r0, @r2
bf/s .L2
add #1, r2
!
.Ldone: rts
mov #0, r0 ! return 0 as normal return
! return the number of bytes remained
.Lbad_clear_user:
mov r4, r0
add r5, r0
rts
sub r2, r0
.section __ex_table,"a"
.align 2
.long 0b, .Lbad_clear_user
.long 1b, .Lbad_clear_user
.long 2b, .Lbad_clear_user
.long 3b, .Lbad_clear_user
.long 4b, .Lbad_clear_user
.long 5b, .Lbad_clear_user
.long 6b, .Lbad_clear_user
.long 7b, .Lbad_clear_user
.long 8b, .Lbad_clear_user
.long 9b, .Lbad_clear_user
.previous
|
aixcc-public/challenge-001-exemplar-source
| 2,366
|
arch/sh/lib/ashiftrt.S
|
/* SPDX-License-Identifier: GPL-2.0+ WITH GCC-exception-2.0
Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
2004, 2005, 2006
Free Software Foundation, Inc.
*/
!! libgcc routines for the Renesas / SuperH SH CPUs.
!! Contributed by Steve Chamberlain.
!! sac@cygnus.com
!! ashiftrt_r4_x, ___ashrsi3, ___ashlsi3, ___lshrsi3 routines
!! recoded in assembly by Toshiyasu Morita
!! tm@netcom.com
/* SH2 optimizations for ___ashrsi3, ___ashlsi3, ___lshrsi3 and
ELF local label prefixes by J"orn Rennecke
amylaar@cygnus.com */
.global __ashiftrt_r4_0
.global __ashiftrt_r4_1
.global __ashiftrt_r4_2
.global __ashiftrt_r4_3
.global __ashiftrt_r4_4
.global __ashiftrt_r4_5
.global __ashiftrt_r4_6
.global __ashiftrt_r4_7
.global __ashiftrt_r4_8
.global __ashiftrt_r4_9
.global __ashiftrt_r4_10
.global __ashiftrt_r4_11
.global __ashiftrt_r4_12
.global __ashiftrt_r4_13
.global __ashiftrt_r4_14
.global __ashiftrt_r4_15
.global __ashiftrt_r4_16
.global __ashiftrt_r4_17
.global __ashiftrt_r4_18
.global __ashiftrt_r4_19
.global __ashiftrt_r4_20
.global __ashiftrt_r4_21
.global __ashiftrt_r4_22
.global __ashiftrt_r4_23
.global __ashiftrt_r4_24
.global __ashiftrt_r4_25
.global __ashiftrt_r4_26
.global __ashiftrt_r4_27
.global __ashiftrt_r4_28
.global __ashiftrt_r4_29
.global __ashiftrt_r4_30
.global __ashiftrt_r4_31
.global __ashiftrt_r4_32
.align 1
__ashiftrt_r4_32:
__ashiftrt_r4_31:
rotcl r4
rts
subc r4,r4
__ashiftrt_r4_30:
shar r4
__ashiftrt_r4_29:
shar r4
__ashiftrt_r4_28:
shar r4
__ashiftrt_r4_27:
shar r4
__ashiftrt_r4_26:
shar r4
__ashiftrt_r4_25:
shar r4
__ashiftrt_r4_24:
shlr16 r4
shlr8 r4
rts
exts.b r4,r4
__ashiftrt_r4_23:
shar r4
__ashiftrt_r4_22:
shar r4
__ashiftrt_r4_21:
shar r4
__ashiftrt_r4_20:
shar r4
__ashiftrt_r4_19:
shar r4
__ashiftrt_r4_18:
shar r4
__ashiftrt_r4_17:
shar r4
__ashiftrt_r4_16:
shlr16 r4
rts
exts.w r4,r4
__ashiftrt_r4_15:
shar r4
__ashiftrt_r4_14:
shar r4
__ashiftrt_r4_13:
shar r4
__ashiftrt_r4_12:
shar r4
__ashiftrt_r4_11:
shar r4
__ashiftrt_r4_10:
shar r4
__ashiftrt_r4_9:
shar r4
__ashiftrt_r4_8:
shar r4
__ashiftrt_r4_7:
shar r4
__ashiftrt_r4_6:
shar r4
__ashiftrt_r4_5:
shar r4
__ashiftrt_r4_4:
shar r4
__ashiftrt_r4_3:
shar r4
__ashiftrt_r4_2:
shar r4
__ashiftrt_r4_1:
rts
shar r4
__ashiftrt_r4_0:
rts
nop
|
aixcc-public/challenge-001-exemplar-source
| 4,348
|
arch/sh/lib/movmem.S
|
/* SPDX-License-Identifier: GPL-2.0+ WITH GCC-exception-2.0
Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
2004, 2005, 2006
Free Software Foundation, Inc.
*/
!! libgcc routines for the Renesas / SuperH SH CPUs.
!! Contributed by Steve Chamberlain.
!! sac@cygnus.com
!! ashiftrt_r4_x, ___ashrsi3, ___ashlsi3, ___lshrsi3 routines
!! recoded in assembly by Toshiyasu Morita
!! tm@netcom.com
/* SH2 optimizations for ___ashrsi3, ___ashlsi3, ___lshrsi3 and
ELF local label prefixes by J"orn Rennecke
amylaar@cygnus.com */
.text
.balign 4
.global __movmem
.global __movstr
.set __movstr, __movmem
/* This would be a lot simpler if r6 contained the byte count
minus 64, and we wouldn't be called here for a byte count of 64. */
__movmem:
sts.l pr,@-r15
shll2 r6
bsr __movmemSI52+2
mov.l @(48,r5),r0
.balign 4
movmem_loop: /* Reached with rts */
mov.l @(60,r5),r0
add #-64,r6
mov.l r0,@(60,r4)
tst r6,r6
mov.l @(56,r5),r0
bt movmem_done
mov.l r0,@(56,r4)
cmp/pl r6
mov.l @(52,r5),r0
add #64,r5
mov.l r0,@(52,r4)
add #64,r4
bt __movmemSI52
! done all the large groups, do the remainder
! jump to movmem+
mova __movmemSI4+4,r0
add r6,r0
jmp @r0
movmem_done: ! share slot insn, works out aligned.
lds.l @r15+,pr
mov.l r0,@(56,r4)
mov.l @(52,r5),r0
rts
mov.l r0,@(52,r4)
.balign 4
.global __movmemSI64
.global __movstrSI64
.set __movstrSI64, __movmemSI64
__movmemSI64:
mov.l @(60,r5),r0
mov.l r0,@(60,r4)
.global __movmemSI60
.global __movstrSI60
.set __movstrSI60, __movmemSI60
__movmemSI60:
mov.l @(56,r5),r0
mov.l r0,@(56,r4)
.global __movmemSI56
.global __movstrSI56
.set __movstrSI56, __movmemSI56
__movmemSI56:
mov.l @(52,r5),r0
mov.l r0,@(52,r4)
.global __movmemSI52
.global __movstrSI52
.set __movstrSI52, __movmemSI52
__movmemSI52:
mov.l @(48,r5),r0
mov.l r0,@(48,r4)
.global __movmemSI48
.global __movstrSI48
.set __movstrSI48, __movmemSI48
__movmemSI48:
mov.l @(44,r5),r0
mov.l r0,@(44,r4)
.global __movmemSI44
.global __movstrSI44
.set __movstrSI44, __movmemSI44
__movmemSI44:
mov.l @(40,r5),r0
mov.l r0,@(40,r4)
.global __movmemSI40
.global __movstrSI40
.set __movstrSI40, __movmemSI40
__movmemSI40:
mov.l @(36,r5),r0
mov.l r0,@(36,r4)
.global __movmemSI36
.global __movstrSI36
.set __movstrSI36, __movmemSI36
__movmemSI36:
mov.l @(32,r5),r0
mov.l r0,@(32,r4)
.global __movmemSI32
.global __movstrSI32
.set __movstrSI32, __movmemSI32
__movmemSI32:
mov.l @(28,r5),r0
mov.l r0,@(28,r4)
.global __movmemSI28
.global __movstrSI28
.set __movstrSI28, __movmemSI28
__movmemSI28:
mov.l @(24,r5),r0
mov.l r0,@(24,r4)
.global __movmemSI24
.global __movstrSI24
.set __movstrSI24, __movmemSI24
__movmemSI24:
mov.l @(20,r5),r0
mov.l r0,@(20,r4)
.global __movmemSI20
.global __movstrSI20
.set __movstrSI20, __movmemSI20
__movmemSI20:
mov.l @(16,r5),r0
mov.l r0,@(16,r4)
.global __movmemSI16
.global __movstrSI16
.set __movstrSI16, __movmemSI16
__movmemSI16:
mov.l @(12,r5),r0
mov.l r0,@(12,r4)
.global __movmemSI12
.global __movstrSI12
.set __movstrSI12, __movmemSI12
__movmemSI12:
mov.l @(8,r5),r0
mov.l r0,@(8,r4)
.global __movmemSI8
.global __movstrSI8
.set __movstrSI8, __movmemSI8
__movmemSI8:
mov.l @(4,r5),r0
mov.l r0,@(4,r4)
.global __movmemSI4
.global __movstrSI4
.set __movstrSI4, __movmemSI4
__movmemSI4:
mov.l @(0,r5),r0
rts
mov.l r0,@(0,r4)
.global __movmem_i4_even
.global __movstr_i4_even
.set __movstr_i4_even, __movmem_i4_even
.global __movmem_i4_odd
.global __movstr_i4_odd
.set __movstr_i4_odd, __movmem_i4_odd
.global __movmemSI12_i4
.global __movstrSI12_i4
.set __movstrSI12_i4, __movmemSI12_i4
.p2align 5
L_movmem_2mod4_end:
mov.l r0,@(16,r4)
rts
mov.l r1,@(20,r4)
.p2align 2
__movmem_i4_even:
mov.l @r5+,r0
bra L_movmem_start_even
mov.l @r5+,r1
__movmem_i4_odd:
mov.l @r5+,r1
add #-4,r4
mov.l @r5+,r2
mov.l @r5+,r3
mov.l r1,@(4,r4)
mov.l r2,@(8,r4)
L_movmem_loop:
mov.l r3,@(12,r4)
dt r6
mov.l @r5+,r0
bt/s L_movmem_2mod4_end
mov.l @r5+,r1
add #16,r4
L_movmem_start_even:
mov.l @r5+,r2
mov.l @r5+,r3
mov.l r0,@r4
dt r6
mov.l r1,@(4,r4)
bf/s L_movmem_loop
mov.l r2,@(8,r4)
rts
mov.l r3,@(12,r4)
.p2align 4
__movmemSI12_i4:
mov.l @r5,r0
mov.l @(4,r5),r1
mov.l @(8,r5),r2
mov.l r0,@r4
mov.l r1,@(4,r4)
rts
mov.l r2,@(8,r4)
|
aixcc-public/challenge-001-exemplar-source
| 1,668
|
arch/sh/lib/memset-sh4.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* "memset" implementation for SH4
*
* Copyright (C) 1999 Niibe Yutaka
* Copyright (c) 2009 STMicroelectronics Limited
* Author: Stuart Menefy <stuart.menefy:st.com>
*/
/*
* void *memset(void *s, int c, size_t n);
*/
#include <linux/linkage.h>
ENTRY(memset)
mov #12,r0
add r6,r4
cmp/gt r6,r0
bt/s 40f ! if it's too small, set a byte at once
mov r4,r0
and #3,r0
cmp/eq #0,r0
bt/s 2f ! It's aligned
sub r0,r6
1:
dt r0
bf/s 1b
mov.b r5,@-r4
2: ! make VVVV
extu.b r5,r5
swap.b r5,r0 ! V0
or r0,r5 ! VV
swap.w r5,r0 ! VV00
or r0,r5 ! VVVV
! Check if enough bytes need to be copied to be worth the big loop
mov #0x40, r0 ! (MT)
cmp/gt r6,r0 ! (MT) 64 > len => slow loop
bt/s 22f
mov r6,r0
! align the dst to the cache block size if necessary
mov r4, r3
mov #~(0x1f), r1
and r3, r1
cmp/eq r3, r1
bt/s 11f ! dst is already aligned
sub r1, r3 ! r3-r1 -> r3
shlr2 r3 ! number of loops
10: mov.l r5,@-r4
dt r3
bf/s 10b
add #-4, r6
11: ! dst is 32byte aligned
mov r6,r2
mov #-5,r0
shld r0,r2 ! number of loops
add #-32, r4
mov r5, r0
12:
movca.l r0,@r4
mov.l r5,@(4, r4)
mov.l r5,@(8, r4)
mov.l r5,@(12,r4)
mov.l r5,@(16,r4)
mov.l r5,@(20,r4)
add #-0x20, r6
mov.l r5,@(24,r4)
dt r2
mov.l r5,@(28,r4)
bf/s 12b
add #-32, r4
add #32, r4
mov #8, r0
cmp/ge r0, r6
bf 40f
mov r6,r0
22:
shlr2 r0
shlr r0 ! r0 = r6 >> 3
3:
dt r0
mov.l r5,@-r4 ! set 8-byte at once
bf/s 3b
mov.l r5,@-r4
!
mov #7,r0
and r0,r6
! fill bytes (length may be zero)
40: tst r6,r6
bt 5f
4:
dt r6
bf/s 4b
mov.b r5,@-r4
5:
rts
mov r4,r0
|
aixcc-public/challenge-001-exemplar-source
| 1,121
|
arch/sh/kernel/vsyscall/vsyscall-trapa.S
|
/* SPDX-License-Identifier: GPL-2.0 */
.text
.globl __kernel_vsyscall
.type __kernel_vsyscall,@function
__kernel_vsyscall:
.LSTART_vsyscall:
trapa #0x10
nop
.LEND_vsyscall:
.size __kernel_vsyscall,.-.LSTART_vsyscall
.previous
.section .eh_frame,"a",@progbits
.LCIE:
.ualong .LCIE_end - .LCIE_start
.LCIE_start:
.ualong 0 /* CIE ID */
.byte 0x1 /* Version number */
.string "zR" /* NUL-terminated augmentation string */
.uleb128 0x1 /* Code alignment factor */
.sleb128 -4 /* Data alignment factor */
.byte 0x11 /* Return address register column */
.uleb128 0x1 /* Augmentation length and data */
.byte 0x1b /* DW_EH_PE_pcrel | DW_EH_PE_sdata4. */
.byte 0xc,0xf,0x0 /* DW_CFA_def_cfa: r15 ofs 0 */
.align 2
.LCIE_end:
.ualong .LFDE_end-.LFDE_start /* Length FDE */
.LFDE_start:
.ualong .LFDE_start-.LCIE /* CIE pointer */
.ualong .LSTART_vsyscall-. /* PC-relative start address */
.ualong .LEND_vsyscall-.LSTART_vsyscall
.uleb128 0 /* Augmentation */
.align 2
.LFDE_end:
.previous
/* Get the common code for the sigreturn entry points */
#include "vsyscall-sigreturn.S"
|
aixcc-public/challenge-001-exemplar-source
| 2,169
|
arch/sh/kernel/vsyscall/vsyscall.lds.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Linker script for vsyscall DSO. The vsyscall page is an ELF shared
* object prelinked to its virtual address, and with only one read-only
* segment (that fits in one page). This script controls its layout.
*/
#include <asm/asm-offsets.h>
#ifdef CONFIG_CPU_LITTLE_ENDIAN
OUTPUT_FORMAT("elf32-sh-linux", "elf32-sh-linux", "elf32-sh-linux")
#else
OUTPUT_FORMAT("elf32-shbig-linux", "elf32-shbig-linux", "elf32-shbig-linux")
#endif
OUTPUT_ARCH(sh)
/* The ELF entry point can be used to set the AT_SYSINFO value. */
ENTRY(__kernel_vsyscall);
SECTIONS
{
. = SIZEOF_HEADERS;
.hash : { *(.hash) } :text
.gnu.hash : { *(.gnu.hash) }
.dynsym : { *(.dynsym) }
.dynstr : { *(.dynstr) }
.gnu.version : { *(.gnu.version) }
.gnu.version_d : { *(.gnu.version_d) }
.gnu.version_r : { *(.gnu.version_r) }
/*
* This linker script is used both with -r and with -shared.
* For the layouts to match, we need to skip more than enough
* space for the dynamic symbol table et al. If this amount
* is insufficient, ld -shared will barf. Just increase it here.
*/
. = 0x400;
.text : { *(.text) } :text =0x90909090
.note : { *(.note.*) } :text :note
.eh_frame_hdr : { *(.eh_frame_hdr ) } :text :eh_frame_hdr
.eh_frame : {
KEEP (*(.eh_frame))
LONG (0)
} :text
.dynamic : { *(.dynamic) } :text :dynamic
.useless : {
*(.got.plt) *(.got)
*(.data .data.* .gnu.linkonce.d.*)
*(.dynbss)
*(.bss .bss.* .gnu.linkonce.b.*)
} :text
}
/*
* Very old versions of ld do not recognize this name token; use the constant.
*/
#define PT_GNU_EH_FRAME 0x6474e550
/*
* We must supply the ELF program headers explicitly to get just one
* PT_LOAD segment, and set the flags explicitly to make segments read-only.
*/
PHDRS
{
text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */
dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
note PT_NOTE FLAGS(4); /* PF_R */
eh_frame_hdr PT_GNU_EH_FRAME;
}
/*
* This controls what symbols we export from the DSO.
*/
VERSION
{
LINUX_2.6 {
global:
__kernel_vsyscall;
__kernel_sigreturn;
__kernel_rt_sigreturn;
local: *;
};
}
|
aixcc-public/challenge-001-exemplar-source
| 1,775
|
arch/sh/kernel/vsyscall/vsyscall-sigreturn.S
|
/* SPDX-License-Identifier: GPL-2.0 */
#include <asm/unistd.h>
.text
.balign 32
.globl __kernel_sigreturn
.type __kernel_sigreturn,@function
__kernel_sigreturn:
.LSTART_sigreturn:
mov.w 1f, r3
trapa #0x10
or r0, r0
or r0, r0
or r0, r0
or r0, r0
or r0, r0
1: .short __NR_sigreturn
.LEND_sigreturn:
.size __kernel_sigreturn,.-.LSTART_sigreturn
.balign 32
.globl __kernel_rt_sigreturn
.type __kernel_rt_sigreturn,@function
__kernel_rt_sigreturn:
.LSTART_rt_sigreturn:
mov.w 1f, r3
trapa #0x10
or r0, r0
or r0, r0
or r0, r0
or r0, r0
or r0, r0
1: .short __NR_rt_sigreturn
.LEND_rt_sigreturn:
.size __kernel_rt_sigreturn,.-.LSTART_rt_sigreturn
.previous
.section .eh_frame,"a",@progbits
.LCIE1:
.ualong .LCIE1_end - .LCIE1_start
.LCIE1_start:
.ualong 0 /* CIE ID */
.byte 0x1 /* Version number */
.string "zRS" /* NUL-terminated augmentation string */
.uleb128 0x1 /* Code alignment factor */
.sleb128 -4 /* Data alignment factor */
.byte 0x11 /* Return address register column */
.uleb128 0x1 /* Augmentation length and data */
.byte 0x1b /* DW_EH_PE_pcrel | DW_EH_PE_sdata4. */
.byte 0xc, 0xf, 0x0 /* DW_CFA_def_cfa: r15 ofs 0 */
.align 2
.LCIE1_end:
.ualong .LFDE0_end-.LFDE0_start /* Length FDE0 */
.LFDE0_start:
.ualong .LFDE0_start-.LCIE1 /* CIE pointer */
.ualong .LSTART_sigreturn-. /* PC-relative start address */
.ualong .LEND_sigreturn-.LSTART_sigreturn
.uleb128 0 /* Augmentation */
.align 2
.LFDE0_end:
.ualong .LFDE1_end-.LFDE1_start /* Length FDE1 */
.LFDE1_start:
.ualong .LFDE1_start-.LCIE1 /* CIE pointer */
.ualong .LSTART_rt_sigreturn-. /* PC-relative start address */
.ualong .LEND_rt_sigreturn-.LSTART_rt_sigreturn
.uleb128 0 /* Augmentation */
.align 2
.LFDE1_end:
.previous
|
aixcc-public/challenge-001-exemplar-source
| 1,689
|
arch/sh/kernel/cpu/sh3/ex.S
|
/* SPDX-License-Identifier: GPL-2.0
*
* arch/sh/kernel/cpu/sh3/ex.S
*
* The SH-3 and SH-4 exception vector table.
*
* Copyright (C) 1999, 2000, 2002 Niibe Yutaka
* Copyright (C) 2003 - 2008 Paul Mundt
*/
#include <linux/linkage.h>
#if !defined(CONFIG_MMU)
#define tlb_miss_load exception_error
#define tlb_miss_store exception_error
#define initial_page_write exception_error
#define tlb_protection_violation_load exception_error
#define tlb_protection_violation_store exception_error
#define address_error_load exception_error
#define address_error_store exception_error
#endif
#if !defined(CONFIG_SH_FPU)
#define fpu_error_trap_handler exception_error
#endif
#if !defined(CONFIG_KGDB)
#define kgdb_handle_exception exception_error
#endif
.align 2
.data
ENTRY(exception_handling_table)
.long exception_error /* 000 */
.long exception_error
.long tlb_miss_load /* 040 */
.long tlb_miss_store
.long initial_page_write
.long tlb_protection_violation_load
.long tlb_protection_violation_store
.long address_error_load
.long address_error_store /* 100 */
.long fpu_error_trap_handler /* 120 */
.long exception_error /* 140 */
.long system_call ! Unconditional Trap /* 160 */
.long exception_error ! reserved_instruction (filled by trap_init) /* 180 */
.long exception_error ! illegal_slot_instruction (filled by trap_init) /*1A0*/
.long nmi_trap_handler /* 1C0 */ ! Allow trap to debugger
.long breakpoint_trap_handler /* 1E0 */
/*
* Pad the remainder of the table out, exceptions residing in far
* away offsets can be manually inserted in to their appropriate
* location via set_exception_table_{evt,vec}().
*/
.balign 4096,0,4096
|
aixcc-public/challenge-001-exemplar-source
| 10,714
|
arch/sh/kernel/cpu/sh3/entry.S
|
/* SPDX-License-Identifier: GPL-2.0
*
* arch/sh/kernel/cpu/sh3/entry.S
*
* Copyright (C) 1999, 2000, 2002 Niibe Yutaka
* Copyright (C) 2003 - 2012 Paul Mundt
*/
#include <linux/sys.h>
#include <linux/errno.h>
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/unistd.h>
#include <cpu/mmu_context.h>
#include <asm/page.h>
#include <asm/cache.h>
! NOTE:
! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
! to be jumped is too far, but it causes illegal slot exception.
/*
* entry.S contains the system-call and fault low-level handling routines.
* This also contains the timer-interrupt handler, as well as all interrupts
* and faults that can result in a task-switch.
*
* NOTE: This code handles signal-recognition, which happens every time
* after a timer-interrupt and after each system call.
*
* NOTE: This code uses a convention that instructions in the delay slot
* of a transfer-control instruction are indented by an extra space, thus:
*
* jmp @k0 ! control-transfer instruction
* ldc k1, ssr ! delay slot
*
* Stack layout in 'ret_from_syscall':
* ptrace needs to have all regs on the stack.
* if the order here is changed, it needs to be
* updated in ptrace.c and ptrace.h
*
* r0
* ...
* r15 = stack pointer
* spc
* pr
* ssr
* gbr
* mach
* macl
* syscall #
*
*/
/* Offsets to the stack */
OFF_R0 = 0 /* Return value. New ABI also arg4 */
OFF_R1 = 4 /* New ABI: arg5 */
OFF_R2 = 8 /* New ABI: arg6 */
OFF_R3 = 12 /* New ABI: syscall_nr */
OFF_R4 = 16 /* New ABI: arg0 */
OFF_R5 = 20 /* New ABI: arg1 */
OFF_R6 = 24 /* New ABI: arg2 */
OFF_R7 = 28 /* New ABI: arg3 */
OFF_SP = (15*4)
OFF_PC = (16*4)
OFF_SR = (16*4+8)
OFF_TRA = (16*4+6*4)
#define k0 r0
#define k1 r1
#define k2 r2
#define k3 r3
#define k4 r4
#define g_imask r6 /* r6_bank1 */
#define k_g_imask r6_bank /* r6_bank1 */
#define current r7 /* r7_bank1 */
#include <asm/entry-macros.S>
/*
* Kernel mode register usage:
* k0 scratch
* k1 scratch
* k2 scratch (Exception code)
* k3 scratch (Return address)
* k4 scratch
* k5 reserved
* k6 Global Interrupt Mask (0--15 << 4)
* k7 CURRENT_THREAD_INFO (pointer to current thread info)
*/
!
! TLB Miss / Initial Page write exception handling
! _and_
! TLB hits, but the access violate the protection.
! It can be valid access, such as stack grow and/or C-O-W.
!
!
! Find the pmd/pte entry and loadtlb
! If it's not found, cause address error (SEGV)
!
! Although this could be written in assembly language (and it'd be faster),
! this first version depends *much* on C implementation.
!
#if defined(CONFIG_MMU)
.align 2
ENTRY(tlb_miss_load)
bra call_handle_tlbmiss
mov #0, r5
.align 2
ENTRY(tlb_miss_store)
bra call_handle_tlbmiss
mov #FAULT_CODE_WRITE, r5
.align 2
ENTRY(initial_page_write)
bra call_handle_tlbmiss
mov #FAULT_CODE_INITIAL, r5
.align 2
ENTRY(tlb_protection_violation_load)
bra call_do_page_fault
mov #FAULT_CODE_PROT, r5
.align 2
ENTRY(tlb_protection_violation_store)
bra call_do_page_fault
mov #(FAULT_CODE_PROT | FAULT_CODE_WRITE), r5
call_handle_tlbmiss:
mov.l 1f, r0
mov r5, r8
mov.l @r0, r6
mov.l 2f, r0
sts pr, r10
jsr @r0
mov r15, r4
!
tst r0, r0
bf/s 0f
lds r10, pr
rts
nop
0:
mov r8, r5
call_do_page_fault:
mov.l 1f, r0
mov.l @r0, r6
mov.l 3f, r0
mov.l 4f, r1
mov r15, r4
jmp @r0
lds r1, pr
.align 2
1: .long MMU_TEA
2: .long handle_tlbmiss
3: .long do_page_fault
4: .long ret_from_exception
.align 2
ENTRY(address_error_load)
bra call_dae
mov #0,r5 ! writeaccess = 0
.align 2
ENTRY(address_error_store)
bra call_dae
mov #1,r5 ! writeaccess = 1
.align 2
call_dae:
mov.l 1f, r0
mov.l @r0, r6 ! address
mov.l 2f, r0
jmp @r0
mov r15, r4 ! regs
.align 2
1: .long MMU_TEA
2: .long do_address_error
#endif /* CONFIG_MMU */
#if defined(CONFIG_SH_STANDARD_BIOS)
/* Unwind the stack and jmp to the debug entry */
ENTRY(sh_bios_handler)
mov.l 1f, r8
bsr restore_regs
nop
lds k2, pr ! restore pr
mov k4, r15
!
mov.l 2f, k0
mov.l @k0, k0
jmp @k0
ldc k3, ssr
.align 2
1: .long 0x300000f0
2: .long gdb_vbr_vector
#endif /* CONFIG_SH_STANDARD_BIOS */
! restore_regs()
! - restore r0, r1, r2, r3, r4, r5, r6, r7 from the stack
! - switch bank
! - restore r8, r9, r10, r11, r12, r13, r14, r15 from the stack
! - restore spc, pr*, ssr, gbr, mach, macl, skip default tra
! k2 returns original pr
! k3 returns original sr
! k4 returns original stack pointer
! r8 passes SR bitmask, overwritten with restored data on return
! r9 trashed
! BL=0 on entry, on exit BL=1 (depending on r8).
ENTRY(restore_regs)
mov.l @r15+, r0
mov.l @r15+, r1
mov.l @r15+, r2
mov.l @r15+, r3
mov.l @r15+, r4
mov.l @r15+, r5
mov.l @r15+, r6
mov.l @r15+, r7
!
stc sr, r9
or r8, r9
ldc r9, sr
!
mov.l @r15+, r8
mov.l @r15+, r9
mov.l @r15+, r10
mov.l @r15+, r11
mov.l @r15+, r12
mov.l @r15+, r13
mov.l @r15+, r14
mov.l @r15+, k4 ! original stack pointer
ldc.l @r15+, spc
mov.l @r15+, k2 ! original PR
mov.l @r15+, k3 ! original SR
ldc.l @r15+, gbr
lds.l @r15+, mach
lds.l @r15+, macl
rts
add #4, r15 ! Skip syscall number
restore_all:
mov.l 7f, r8
bsr restore_regs
nop
lds k2, pr ! restore pr
!
! Calculate new SR value
mov k3, k2 ! original SR value
mov #0xfffffff0, k1
extu.b k1, k1
not k1, k1
and k1, k2 ! Mask original SR value
!
mov k3, k0 ! Calculate IMASK-bits
shlr2 k0
and #0x3c, k0
cmp/eq #0x3c, k0
bt/s 6f
shll2 k0
mov g_imask, k0
!
6: or k0, k2 ! Set the IMASK-bits
ldc k2, ssr
!
mov k4, r15
rte
nop
.align 2
5: .long 0x00001000 ! DSP
7: .long 0x30000000
! common exception handler
#include "../../entry-common.S"
! Exception Vector Base
!
! Should be aligned page boundary.
!
.balign 4096,0,4096
ENTRY(vbr_base)
.long 0
!
! 0x100: General exception vector
!
.balign 256,0,256
general_exception:
bra handle_exception
sts pr, k3 ! save original pr value in k3
! prepare_stack()
! - roll back gRB
! - switch to kernel stack
! k0 returns original sp (after roll back)
! k1 trashed
! k2 trashed
prepare_stack:
#ifdef CONFIG_GUSA
! Check for roll back gRB (User and Kernel)
mov r15, k0
shll k0
bf/s 1f
shll k0
bf/s 1f
stc spc, k1
stc r0_bank, k0
cmp/hs k0, k1 ! test k1 (saved PC) >= k0 (saved r0)
bt/s 2f
stc r1_bank, k1
add #-2, k0
add r15, k0
ldc k0, spc ! PC = saved r0 + r15 - 2
2: mov k1, r15 ! SP = r1
1:
#endif
! Switch to kernel stack if needed
stc ssr, k0 ! Is it from kernel space?
shll k0 ! Check MD bit (bit30) by shifting it into...
shll k0 ! ...the T bit
bt/s 1f ! It's a kernel to kernel transition.
mov r15, k0 ! save original stack to k0
/* User space to kernel */
mov #(THREAD_SIZE >> 10), k1
shll8 k1 ! k1 := THREAD_SIZE
shll2 k1
add current, k1
mov k1, r15 ! change to kernel stack
!
1:
rts
nop
!
! 0x400: Instruction and Data TLB miss exception vector
!
.balign 1024,0,1024
tlb_miss:
sts pr, k3 ! save original pr value in k3
handle_exception:
mova exception_data, k0
! Setup stack and save DSP context (k0 contains original r15 on return)
bsr prepare_stack
PREF(k0)
! Save registers / Switch to bank 0
mov.l 5f, k2 ! vector register address
mov.l 1f, k4 ! SR bits to clear in k4
bsr save_regs ! needs original pr value in k3
mov.l @k2, k2 ! read out vector and keep in k2
handle_exception_special:
setup_frame_reg
! Setup return address and jump to exception handler
mov.l 7f, r9 ! fetch return address
stc r2_bank, r0 ! k2 (vector)
mov.l 6f, r10
shlr2 r0
shlr r0
mov.l @(r0, r10), r10
jmp @r10
lds r9, pr ! put return address in pr
.align L1_CACHE_SHIFT
! save_regs()
! - save default tra, macl, mach, gbr, ssr, pr* and spc on the stack
! - save r15*, r14, r13, r12, r11, r10, r9, r8 on the stack
! - switch bank
! - save r7, r6, r5, r4, r3, r2, r1, r0 on the stack
! k0 contains original stack pointer*
! k1 trashed
! k3 passes original pr*
! k4 passes SR bitmask
! BL=1 on entry, on exit BL=0.
ENTRY(save_regs)
mov #-1, r1
mov.l k1, @-r15 ! set TRA (default: -1)
sts.l macl, @-r15
sts.l mach, @-r15
stc.l gbr, @-r15
stc.l ssr, @-r15
mov.l k3, @-r15 ! original pr in k3
stc.l spc, @-r15
mov.l k0, @-r15 ! original stack pointer in k0
mov.l r14, @-r15
mov.l r13, @-r15
mov.l r12, @-r15
mov.l r11, @-r15
mov.l r10, @-r15
mov.l r9, @-r15
mov.l r8, @-r15
mov.l 0f, k3 ! SR bits to set in k3
! fall-through
! save_low_regs()
! - modify SR for bank switch
! - save r7, r6, r5, r4, r3, r2, r1, r0 on the stack
! k3 passes bits to set in SR
! k4 passes bits to clear in SR
ENTRY(save_low_regs)
stc sr, r8
or k3, r8
and k4, r8
ldc r8, sr
mov.l r7, @-r15
mov.l r6, @-r15
mov.l r5, @-r15
mov.l r4, @-r15
mov.l r3, @-r15
mov.l r2, @-r15
mov.l r1, @-r15
rts
mov.l r0, @-r15
!
! 0x600: Interrupt / NMI vector
!
.balign 512,0,512
ENTRY(handle_interrupt)
sts pr, k3 ! save original pr value in k3
mova exception_data, k0
! Setup stack and save DSP context (k0 contains original r15 on return)
bsr prepare_stack
PREF(k0)
! Save registers / Switch to bank 0
mov.l 1f, k4 ! SR bits to clear in k4
bsr save_regs ! needs original pr value in k3
mov #-1, k2 ! default vector kept in k2
setup_frame_reg
stc sr, r0 ! get status register
shlr2 r0
and #0x3c, r0
cmp/eq #0x3c, r0
bf 9f
TRACE_IRQS_OFF
9:
! Setup return address and jump to do_IRQ
mov.l 4f, r9 ! fetch return address
lds r9, pr ! put return address in pr
mov.l 2f, r4
mov.l 3f, r9
mov.l @r4, r4 ! pass INTEVT vector as arg0
shlr2 r4
shlr r4
mov r4, r0 ! save vector->jmp table offset for later
shlr2 r4 ! vector to IRQ# conversion
mov #0x10, r5
cmp/hs r5, r4 ! is it a valid IRQ?
bt 10f
/*
* We got here as a result of taking the INTEVT path for something
* that isn't a valid hard IRQ, therefore we bypass the do_IRQ()
* path and special case the event dispatch instead. This is the
* expected path for the NMI (and any other brilliantly implemented
* exception), which effectively wants regular exception dispatch
* but is unfortunately reported through INTEVT rather than
* EXPEVT. Grr.
*/
mov.l 6f, r9
mov.l @(r0, r9), r9
jmp @r9
mov r15, r8 ! trap handlers take saved regs in r8
10:
jmp @r9 ! Off to do_IRQ() we go.
mov r15, r5 ! pass saved registers as arg1
ENTRY(exception_none)
rts
nop
.align L1_CACHE_SHIFT
exception_data:
0: .long 0x000080f0 ! FD=1, IMASK=15
1: .long 0xcfffffff ! RB=0, BL=0
2: .long INTEVT
3: .long do_IRQ
4: .long ret_from_irq
5: .long EXPEVT
6: .long exception_handling_table
7: .long ret_from_exception
|
aixcc-public/challenge-001-exemplar-source
| 2,869
|
arch/sh/kernel/cpu/sh3/swsusp.S
|
/* SPDX-License-Identifier: GPL-2.0
*
* arch/sh/kernel/cpu/sh3/swsusp.S
*
* Copyright (C) 2009 Magnus Damm
*/
#include <linux/sys.h>
#include <linux/errno.h>
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/page.h>
#define k0 r0
#define k1 r1
#define k2 r2
#define k3 r3
#define k4 r4
! swsusp_arch_resume()
! - copy restore_pblist pages
! - restore registers from swsusp_arch_regs_cpu0
ENTRY(swsusp_arch_resume)
mov.l 1f, r15
mov.l 2f, r4
mov.l @r4, r4
swsusp_copy_loop:
mov r4, r0
cmp/eq #0, r0
bt swsusp_restore_regs
mov.l @(PBE_ADDRESS, r4), r2
mov.l @(PBE_ORIG_ADDRESS, r4), r5
mov #(PAGE_SIZE >> 10), r3
shll8 r3
shlr2 r3 /* PAGE_SIZE / 16 */
swsusp_copy_page:
dt r3
mov.l @r2+,r1 /* 16n+0 */
mov.l r1,@r5
add #4,r5
mov.l @r2+,r1 /* 16n+4 */
mov.l r1,@r5
add #4,r5
mov.l @r2+,r1 /* 16n+8 */
mov.l r1,@r5
add #4,r5
mov.l @r2+,r1 /* 16n+12 */
mov.l r1,@r5
bf/s swsusp_copy_page
add #4,r5
bra swsusp_copy_loop
mov.l @(PBE_NEXT, r4), r4
swsusp_restore_regs:
! BL=0: R7->R0 is bank0
mov.l 3f, r8
mov.l 4f, r5
jsr @r5
nop
! BL=1: R7->R0 is bank1
lds k2, pr
ldc k3, ssr
mov.l @r15+, r0
mov.l @r15+, r1
mov.l @r15+, r2
mov.l @r15+, r3
mov.l @r15+, r4
mov.l @r15+, r5
mov.l @r15+, r6
mov.l @r15+, r7
rte
nop
! BL=0: R7->R0 is bank0
.align 2
1: .long swsusp_arch_regs_cpu0
2: .long restore_pblist
3: .long 0x20000000 ! RB=1
4: .long restore_regs
! swsusp_arch_suspend()
! - prepare pc for resume, return from function without swsusp_save on resume
! - save registers in swsusp_arch_regs_cpu0
! - call swsusp_save write suspend image
ENTRY(swsusp_arch_suspend)
sts pr, r0 ! save pr in r0
mov r15, r2 ! save sp in r2
mov r8, r5 ! save r8 in r5
stc sr, r1
ldc r1, ssr ! save sr in ssr
mov.l 1f, r1
ldc r1, spc ! setup pc value for resuming
mov.l 5f, r15 ! use swsusp_arch_regs_cpu0 as stack
mov.l 6f, r3
add r3, r15 ! save from top of structure
! BL=0: R7->R0 is bank0
mov.l 2f, r3 ! get new SR value for bank1
mov #0, r4
mov.l 7f, r1
jsr @r1 ! switch to bank1 and save bank1 r7->r0
not r4, r4
! BL=1: R7->R0 is bank1
stc r2_bank, k0 ! fetch old sp from r2_bank0
mov.l 3f, k4 ! SR bits to clear in k4
mov.l 8f, k1
jsr @k1 ! switch to bank0 and save all regs
stc r0_bank, k3 ! fetch old pr from r0_bank0
! BL=0: R7->R0 is bank0
mov r2, r15 ! restore old sp
mov r5, r8 ! restore old r8
stc ssr, r1
ldc r1, sr ! restore old sr
lds r0, pr ! restore old pr
mov.l 4f, r0
jmp @r0
nop
swsusp_call_save:
mov r2, r15 ! restore old sp
mov r5, r8 ! restore old r8
lds r0, pr ! restore old pr
rts
mov #0, r0
.align 2
1: .long swsusp_call_save
2: .long 0x20000000 ! RB=1
3: .long 0xdfffffff ! RB=0
4: .long swsusp_save
5: .long swsusp_arch_regs_cpu0
6: .long SWSUSP_ARCH_REGS_SIZE
7: .long save_low_regs
8: .long save_regs
|
aixcc-public/challenge-001-exemplar-source
| 4,993
|
arch/sh/kernel/cpu/sh2a/entry.S
|
/* SPDX-License-Identifier: GPL-2.0
*
* arch/sh/kernel/cpu/sh2a/entry.S
*
* The SH-2A exception entry
*
* Copyright (C) 2008 Yoshinori Sato
* Based on arch/sh/kernel/cpu/sh2/entry.S
*/
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <cpu/mmu_context.h>
#include <asm/unistd.h>
#include <asm/errno.h>
#include <asm/page.h>
/* Offsets to the stack */
OFF_R0 = 0 /* Return value. New ABI also arg4 */
OFF_R1 = 4 /* New ABI: arg5 */
OFF_R2 = 8 /* New ABI: arg6 */
OFF_R3 = 12 /* New ABI: syscall_nr */
OFF_R4 = 16 /* New ABI: arg0 */
OFF_R5 = 20 /* New ABI: arg1 */
OFF_R6 = 24 /* New ABI: arg2 */
OFF_R7 = 28 /* New ABI: arg3 */
OFF_SP = (15*4)
OFF_PC = (16*4)
OFF_SR = (16*4+2*4)
OFF_TRA = (16*4+6*4)
#include <asm/entry-macros.S>
ENTRY(exception_handler)
! stack
! r0 <- point sp
! r1
! pc
! sr
! r0 = temporary
! r1 = vector (pseudo EXPEVT / INTEVT / TRA)
mov.l r2,@-sp
cli
mov.l $cpu_mode,r2
bld.b #6,@(0,r2) !previus SR.MD
bst.b #6,@(4*4,r15) !set cpu mode to SR.MD
bt 1f
! switch to kernel mode
bset.b #6,@(0,r2) !set SR.MD
mov.l $current_thread_info,r2
mov.l @r2,r2
mov #(THREAD_SIZE >> 8),r0
shll8 r0
add r2,r0 ! r0 = kernel stack tail
mov r15,r2 ! r2 = user stack top
mov r0,r15 ! switch kernel stack
mov.l r1,@-r15 ! TRA
sts.l macl, @-r15
sts.l mach, @-r15
stc.l gbr, @-r15
mov.l @(4*4,r2),r0
mov.l r0,@-r15 ! original SR
sts.l pr,@-r15
mov.l @(3*4,r2),r0
mov.l r0,@-r15 ! original PC
mov r2,r0
add #(3+2)*4,r0 ! rewind r0 - r3 + exception frame
lds r0,pr ! pr = original SP
movmu.l r3,@-r15 ! save regs
mov r2,r8 ! r8 = previus stack top
mov r1,r9 ! r9 = interrupt vector
! restore previous stack
mov.l @r8+,r2
mov.l @r8+,r0
mov.l @r8+,r1
bra 2f
movml.l r2,@-r15
1:
! in kernel exception
mov r15,r2
add #-((OFF_TRA + 4) - OFF_PC) + 5*4,r15
movmu.l r3,@-r15
mov r2,r8 ! r8 = previous stack top
mov r1,r9 ! r9 = interrupt vector
! restore exception frame & regs
mov.l @r8+,r2 ! old R2
mov.l @r8+,r0 ! old R0
mov.l @r8+,r1 ! old R1
mov.l @r8+,r10 ! old PC
mov.l @r8+,r11 ! old SR
movml.l r2,@-r15
mov.l r10,@(OFF_PC,r15)
mov.l r11,@(OFF_SR,r15)
mov.l r8,@(OFF_SP,r15) ! save old sp
mov r15,r8
add #OFF_TRA + 4,r8
mov.l r9,@-r8
sts.l macl,@-r8
sts.l mach,@-r8
stc.l gbr,@-r8
add #-4,r8
sts.l pr,@-r8
2:
! dispatch exception / interrupt
mov #64,r8
cmp/hs r8,r9
bt interrupt_entry ! vec >= 64 is interrupt
mov #31,r8
cmp/hs r8,r9
bt trap_entry ! 64 > vec >= 31 is trap
mov.l 4f,r8
mov r9,r4
shll2 r9
add r9,r8
mov.l @r8,r8 ! exception handler address
tst r8,r8
bf 3f
mov.l 8f,r8 ! unhandled exception
3:
mov.l 5f,r10
jmp @r8
lds r10,pr
interrupt_entry:
mov r9,r4
mov r15,r5
mov.l 7f,r8
mov.l 6f,r9
jmp @r8
lds r9,pr
.align 2
4: .long exception_handling_table
5: .long ret_from_exception
6: .long ret_from_irq
7: .long do_IRQ
8: .long exception_error
trap_entry:
mov #0x30,r8
cmp/ge r8,r9 ! vector 0x1f-0x2f is systemcall
bt 1f
mov #0x1f,r9 ! convert to unified SH2/3/4 trap number
1:
shll2 r9 ! TRA
bra system_call ! jump common systemcall entry
mov r9,r8
#if defined(CONFIG_SH_STANDARD_BIOS)
/* Unwind the stack and jmp to the debug entry */
ENTRY(sh_bios_handler)
mov r15,r0
add #(22-4)*4-4,r0
ldc.l @r0+,gbr
lds.l @r0+,mach
lds.l @r0+,macl
mov r15,r0
mov.l @(OFF_SP,r0),r1
mov.l @(OFF_SR,r2),r3
mov.l r3,@-r1
mov.l @(OFF_SP,r2),r3
mov.l r3,@-r1
mov r15,r0
add #(22-4)*4-8,r0
mov.l 1f,r2
mov.l @r2,r2
stc sr,r3
mov.l r2,@r0
mov.l r3,@(4,r0)
mov.l r1,@(8,r0)
movml.l @r15+,r14
add #8,r15
lds.l @r15+, pr
mov.l @r15+,r15
rte
nop
.align 2
1: .long gdb_vbr_vector
#endif /* CONFIG_SH_STANDARD_BIOS */
ENTRY(address_error_trap_handler)
mov r15,r4 ! regs
mov.l @(OFF_PC,r15),r6 ! pc
mov.l 1f,r0
jmp @r0
mov #0,r5 ! writeaccess is unknown
.align 2
1: .long do_address_error
restore_all:
stc sr,r0
or #0xf0,r0
ldc r0,sr ! all interrupt block (same BL = 1)
! restore special register
! overlap exception frame
mov r15,r0
add #17*4,r0
lds.l @r0+,pr
add #4,r0
ldc.l @r0+,gbr
lds.l @r0+,mach
lds.l @r0+,macl
mov r15,r0
mov.l $cpu_mode,r2
bld.b #6,@(OFF_SR,r15)
bst.b #6,@(0,r2) ! save CPU mode
mov.l @(OFF_SR,r0),r1
shll2 r1
shlr2 r1 ! clear MD bit
mov.l @(OFF_SP,r0),r2
add #-8,r2
mov.l r2,@(OFF_SP,r0) ! point exception frame top
mov.l r1,@(4,r2) ! set sr
mov.l @(OFF_PC,r0),r1
mov.l r1,@r2 ! set pc
get_current_thread_info r0, r1
mov.l $current_thread_info,r1
mov.l r0,@r1
movml.l @r15+,r14
mov.l @r15,r15
rte
nop
.align 2
$current_thread_info:
.long __current_thread_info
$cpu_mode:
.long __cpu_mode
! common exception handler
#include "../../entry-common.S"
.data
! cpu operation mode
! bit30 = MD (compatible SH3/4)
__cpu_mode:
.long 0x40000000
.section .bss
__current_thread_info:
.long 0
ENTRY(exception_handling_table)
.space 4*32
|
aixcc-public/challenge-001-exemplar-source
| 6,792
|
arch/sh/kernel/cpu/sh2/entry.S
|
/* SPDX-License-Identifier: GPL-2.0
*
* arch/sh/kernel/cpu/sh2/entry.S
*
* The SH-2 exception entry
*
* Copyright (C) 2005-2008 Yoshinori Sato
* Copyright (C) 2005 AXE,Inc.
*/
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <cpu/mmu_context.h>
#include <asm/unistd.h>
#include <asm/errno.h>
#include <asm/page.h>
/* Offsets to the stack */
OFF_R0 = 0 /* Return value. New ABI also arg4 */
OFF_R1 = 4 /* New ABI: arg5 */
OFF_R2 = 8 /* New ABI: arg6 */
OFF_R3 = 12 /* New ABI: syscall_nr */
OFF_R4 = 16 /* New ABI: arg0 */
OFF_R5 = 20 /* New ABI: arg1 */
OFF_R6 = 24 /* New ABI: arg2 */
OFF_R7 = 28 /* New ABI: arg3 */
OFF_SP = (15*4)
OFF_PC = (16*4)
OFF_SR = (16*4+2*4)
OFF_TRA = (16*4+6*4)
#include <asm/entry-macros.S>
ENTRY(exception_handler)
! stack
! r0 <- point sp
! r1
! pc
! sr
! r0 = temporary
! r1 = vector (pseudo EXPEVT / INTEVT / TRA)
mov.l r2,@-sp
mov.l r3,@-sp
cli
mov.l $cpu_mode,r2
#ifdef CONFIG_SMP
mov.l $cpuid,r3
mov.l @r3,r3
mov.l @r3,r3
shll2 r3
add r3,r2
#endif
mov.l @r2,r0
mov.l @(5*4,r15),r3 ! previous SR
or r0,r3 ! set MD
tst r0,r0
bf/s 1f ! previous mode check
mov.l r3,@(5*4,r15) ! update SR
! switch to kernel mode
mov.l __md_bit,r0
mov.l r0,@r2 ! enter kernel mode
mov.l $current_thread_info,r2
#ifdef CONFIG_SMP
mov.l $cpuid,r0
mov.l @r0,r0
mov.l @r0,r0
shll2 r0
add r0,r2
#endif
mov.l @r2,r2
mov #(THREAD_SIZE >> 8),r0
shll8 r0
add r2,r0
mov r15,r2 ! r2 = user stack top
mov r0,r15 ! switch kernel stack
mov.l r1,@-r15 ! TRA
sts.l macl, @-r15
sts.l mach, @-r15
stc.l gbr, @-r15
mov.l @(5*4,r2),r0
mov.l r0,@-r15 ! original SR
sts.l pr,@-r15
mov.l @(4*4,r2),r0
mov.l r0,@-r15 ! original PC
mov r2,r3
add #(4+2)*4,r3 ! rewind r0 - r3 + exception frame
mov.l r3,@-r15 ! original SP
mov.l r14,@-r15
mov.l r13,@-r15
mov.l r12,@-r15
mov.l r11,@-r15
mov.l r10,@-r15
mov.l r9,@-r15
mov.l r8,@-r15
mov.l r7,@-r15
mov.l r6,@-r15
mov.l r5,@-r15
mov.l r4,@-r15
mov r1,r9 ! save TRA
mov r2,r8 ! copy user -> kernel stack
mov.l @(0,r8),r3
mov.l r3,@-r15
mov.l @(4,r8),r2
mov.l r2,@-r15
mov.l @(12,r8),r1
mov.l r1,@-r15
mov.l @(8,r8),r0
bra 2f
mov.l r0,@-r15
1:
! in kernel exception
mov #(22-4-4-1)*4+4,r0
mov r15,r2
sub r0,r15
mov.l @r2+,r0 ! old R3
mov.l r0,@-r15
mov.l @r2+,r0 ! old R2
mov.l r0,@-r15
mov.l @(4,r2),r0 ! old R1
mov.l r0,@-r15
mov.l @r2,r0 ! old R0
mov.l r0,@-r15
add #8,r2
mov.l @r2+,r3 ! old PC
mov.l @r2+,r0 ! old SR
add #-4,r2 ! exception frame stub (sr)
mov.l r1,@-r2 ! TRA
sts.l macl, @-r2
sts.l mach, @-r2
stc.l gbr, @-r2
mov.l r0,@-r2 ! save old SR
sts.l pr,@-r2
mov.l r3,@-r2 ! save old PC
mov r2,r0
add #8*4,r0
mov.l r0,@-r2 ! save old SP
mov.l r14,@-r2
mov.l r13,@-r2
mov.l r12,@-r2
mov.l r11,@-r2
mov.l r10,@-r2
mov.l r9,@-r2
mov.l r8,@-r2
mov.l r7,@-r2
mov.l r6,@-r2
mov.l r5,@-r2
mov.l r4,@-r2
mov r1,r9
mov.l @(OFF_R0,r15),r0
mov.l @(OFF_R1,r15),r1
mov.l @(OFF_R2,r15),r2
mov.l @(OFF_R3,r15),r3
2:
mov #64,r8
cmp/hs r8,r9
bt interrupt_entry ! vec >= 64 is interrupt
mov #31,r8
cmp/hs r8,r9
bt trap_entry ! 64 > vec >= 31 is trap
#ifdef CONFIG_CPU_J2
mov #16,r8
cmp/hs r8,r9
bt interrupt_entry ! 31 > vec >= 16 is interrupt
#endif
mov.l 4f,r8
mov r9,r4
shll2 r9
add r9,r8
mov.l @r8,r8 ! exception handler address
tst r8,r8
bf 3f
mov.l 8f,r8 ! unhandled exception
3:
mov.l 5f,r10
jmp @r8
lds r10,pr
interrupt_entry:
mov r9,r4
mov r15,r5
mov.l 6f,r9
mov.l 7f,r8
jmp @r8
lds r9,pr
.align 2
4: .long exception_handling_table
5: .long ret_from_exception
6: .long ret_from_irq
7: .long do_IRQ
8: .long exception_error
trap_entry:
mov #0x30,r8
cmp/ge r8,r9 ! vector 0x1f-0x2f is systemcall
bt 1f
mov #0x1f,r9 ! convert to unified SH2/3/4 trap number
1:
shll2 r9 ! TRA
bra system_call ! jump common systemcall entry
mov r9,r8
#if defined(CONFIG_SH_STANDARD_BIOS)
/* Unwind the stack and jmp to the debug entry */
ENTRY(sh_bios_handler)
mov r15,r0
add #(22-4)*4-4,r0
ldc.l @r0+,gbr
lds.l @r0+,mach
lds.l @r0+,macl
mov r15,r0
mov.l @(OFF_SP,r0),r1
mov #OFF_SR,r2
mov.l @(r0,r2),r3
mov.l r3,@-r1
mov #OFF_SP,r2
mov.l @(r0,r2),r3
mov.l r3,@-r1
mov r15,r0
add #(22-4)*4-8,r0
mov.l 1f,r2
mov.l @r2,r2
stc sr,r3
mov.l r2,@r0
mov.l r3,@(4,r0)
mov.l r1,@(8,r0)
mov.l @r15+, r0
mov.l @r15+, r1
mov.l @r15+, r2
mov.l @r15+, r3
mov.l @r15+, r4
mov.l @r15+, r5
mov.l @r15+, r6
mov.l @r15+, r7
mov.l @r15+, r8
mov.l @r15+, r9
mov.l @r15+, r10
mov.l @r15+, r11
mov.l @r15+, r12
mov.l @r15+, r13
mov.l @r15+, r14
add #8,r15
lds.l @r15+, pr
mov.l @r15+,r15
rte
nop
.align 2
1: .long gdb_vbr_vector
#endif /* CONFIG_SH_STANDARD_BIOS */
ENTRY(address_error_trap_handler)
mov r15,r4 ! regs
mov #OFF_PC,r0
mov.l @(r0,r15),r6 ! pc
mov.l 1f,r0
jmp @r0
mov #0,r5 ! writeaccess is unknown
.align 2
1: .long do_address_error
restore_all:
stc sr,r0
or #0xf0,r0
ldc r0,sr ! all interrupt block (same BL = 1)
! restore special register
! overlap exception frame
mov r15,r0
add #17*4,r0
lds.l @r0+,pr
add #4,r0
ldc.l @r0+,gbr
lds.l @r0+,mach
lds.l @r0+,macl
mov r15,r0
mov.l $cpu_mode,r2
#ifdef CONFIG_SMP
mov.l $cpuid,r3
mov.l @r3,r3
mov.l @r3,r3
shll2 r3
add r3,r2
#endif
mov #OFF_SR,r3
mov.l @(r0,r3),r1
mov.l __md_bit,r3
and r1,r3 ! copy MD bit
mov.l r3,@r2
shll2 r1 ! clear MD bit
shlr2 r1
mov.l @(OFF_SP,r0),r2
add #-8,r2
mov.l r2,@(OFF_SP,r0) ! point exception frame top
mov.l r1,@(4,r2) ! set sr
mov #OFF_PC,r3
mov.l @(r0,r3),r1
mov.l r1,@r2 ! set pc
get_current_thread_info r0, r1
mov.l $current_thread_info,r1
#ifdef CONFIG_SMP
mov.l $cpuid,r3
mov.l @r3,r3
mov.l @r3,r3
shll2 r3
add r3,r1
#endif
mov.l r0,@r1
mov.l @r15+,r0
mov.l @r15+,r1
mov.l @r15+,r2
mov.l @r15+,r3
mov.l @r15+,r4
mov.l @r15+,r5
mov.l @r15+,r6
mov.l @r15+,r7
mov.l @r15+,r8
mov.l @r15+,r9
mov.l @r15+,r10
mov.l @r15+,r11
mov.l @r15+,r12
mov.l @r15+,r13
mov.l @r15+,r14
mov.l @r15,r15
rte
nop
.align 2
__md_bit:
.long 0x40000000
$current_thread_info:
.long __current_thread_info
$cpu_mode:
.long __cpu_mode
#ifdef CONFIG_SMP
$cpuid:
.long sh2_cpuid_addr
#endif
! common exception handler
#include "../../entry-common.S"
#ifdef CONFIG_NR_CPUS
#define NR_CPUS CONFIG_NR_CPUS
#else
#define NR_CPUS 1
#endif
.data
! cpu operation mode
! bit30 = MD (compatible SH3/4)
__cpu_mode:
.rept NR_CPUS
.long 0x40000000
.endr
#ifdef CONFIG_SMP
.global sh2_cpuid_addr
sh2_cpuid_addr:
.long dummy_cpuid
dummy_cpuid:
.long 0
#endif
.section .bss
__current_thread_info:
.rept NR_CPUS
.long 0
.endr
ENTRY(exception_handling_table)
.space 4*32
|
aixcc-public/challenge-001-exemplar-source
| 6,877
|
arch/sh/kernel/cpu/shmobile/sleep.S
|
/* SPDX-License-Identifier: GPL-2.0
*
* arch/sh/kernel/cpu/sh4a/sleep-sh_mobile.S
*
* Sleep mode and Standby modes support for SuperH Mobile
*
* Copyright (C) 2009 Magnus Damm
*/
#include <linux/sys.h>
#include <linux/errno.h>
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/suspend.h>
/*
* Kernel mode register usage, see entry.S:
* k0 scratch
* k1 scratch
*/
#define k0 r0
#define k1 r1
/* manage self-refresh and enter standby mode. must be self-contained.
* this code will be copied to on-chip memory and executed from there.
*/
.balign 4
ENTRY(sh_mobile_sleep_enter_start)
/* save mode flags */
mov.l r4, @(SH_SLEEP_MODE, r5)
/* save original vbr */
stc vbr, r0
mov.l r0, @(SH_SLEEP_VBR, r5)
/* point vbr to our on-chip memory page */
ldc r5, vbr
/* save return address */
sts pr, r0
mov.l r0, @(SH_SLEEP_SPC, r5)
/* save sr */
stc sr, r0
mov.l r0, @(SH_SLEEP_SR, r5)
/* save general purpose registers to stack if needed */
mov.l @(SH_SLEEP_MODE, r5), r0
tst #SUSP_SH_REGS, r0
bt skip_regs_save
sts.l pr, @-r15
mov.l r14, @-r15
mov.l r13, @-r15
mov.l r12, @-r15
mov.l r11, @-r15
mov.l r10, @-r15
mov.l r9, @-r15
mov.l r8, @-r15
/* make sure bank0 is selected, save low registers */
mov.l rb_bit, r9
not r9, r9
bsr set_sr
mov #0, r10
bsr save_low_regs
nop
/* switch to bank 1, save low registers */
mov.l rb_bit, r10
bsr set_sr
mov #-1, r9
bsr save_low_regs
nop
/* switch back to bank 0 */
mov.l rb_bit, r9
not r9, r9
bsr set_sr
mov #0, r10
skip_regs_save:
/* save sp, also set to internal ram */
mov.l r15, @(SH_SLEEP_SP, r5)
mov r5, r15
/* save stbcr */
bsr save_register
mov #SH_SLEEP_REG_STBCR, r0
/* save mmu and cache context if needed */
mov.l @(SH_SLEEP_MODE, r5), r0
tst #SUSP_SH_MMU, r0
bt skip_mmu_save_disable
/* save mmu state */
bsr save_register
mov #SH_SLEEP_REG_PTEH, r0
bsr save_register
mov #SH_SLEEP_REG_PTEL, r0
bsr save_register
mov #SH_SLEEP_REG_TTB, r0
bsr save_register
mov #SH_SLEEP_REG_TEA, r0
bsr save_register
mov #SH_SLEEP_REG_MMUCR, r0
bsr save_register
mov #SH_SLEEP_REG_PTEA, r0
bsr save_register
mov #SH_SLEEP_REG_PASCR, r0
bsr save_register
mov #SH_SLEEP_REG_IRMCR, r0
/* invalidate TLBs and disable the MMU */
bsr get_register
mov #SH_SLEEP_REG_MMUCR, r0
mov #4, r1
mov.l r1, @r0
icbi @r0
/* save cache registers and disable caches */
bsr save_register
mov #SH_SLEEP_REG_CCR, r0
bsr save_register
mov #SH_SLEEP_REG_RAMCR, r0
bsr get_register
mov #SH_SLEEP_REG_CCR, r0
mov #0, r1
mov.l r1, @r0
icbi @r0
skip_mmu_save_disable:
/* call self-refresh entering code if needed */
mov.l @(SH_SLEEP_MODE, r5), r0
tst #SUSP_SH_SF, r0
bt skip_set_sf
mov.l @(SH_SLEEP_SF_PRE, r5), r0
jsr @r0
nop
skip_set_sf:
mov.l @(SH_SLEEP_MODE, r5), r0
tst #SUSP_SH_STANDBY, r0
bt test_rstandby
/* set mode to "software standby mode" */
bra do_sleep
mov #0x80, r1
test_rstandby:
tst #SUSP_SH_RSTANDBY, r0
bt test_ustandby
/* setup BAR register */
bsr get_register
mov #SH_SLEEP_REG_BAR, r0
mov.l @(SH_SLEEP_RESUME, r5), r1
mov.l r1, @r0
/* set mode to "r-standby mode" */
bra do_sleep
mov #0x20, r1
test_ustandby:
tst #SUSP_SH_USTANDBY, r0
bt force_sleep
/* set mode to "u-standby mode" */
bra do_sleep
mov #0x10, r1
force_sleep:
/* set mode to "sleep mode" */
mov #0x00, r1
do_sleep:
/* setup and enter selected standby mode */
bsr get_register
mov #SH_SLEEP_REG_STBCR, r0
mov.l r1, @r0
again:
sleep
bra again
nop
save_register:
add #SH_SLEEP_BASE_ADDR, r0
mov.l @(r0, r5), r1
add #-SH_SLEEP_BASE_ADDR, r0
mov.l @r1, r1
add #SH_SLEEP_BASE_DATA, r0
mov.l r1, @(r0, r5)
add #-SH_SLEEP_BASE_DATA, r0
rts
nop
get_register:
add #SH_SLEEP_BASE_ADDR, r0
mov.l @(r0, r5), r0
rts
nop
set_sr:
stc sr, r8
and r9, r8
or r10, r8
ldc r8, sr
rts
nop
save_low_regs:
mov.l r7, @-r15
mov.l r6, @-r15
mov.l r5, @-r15
mov.l r4, @-r15
mov.l r3, @-r15
mov.l r2, @-r15
mov.l r1, @-r15
rts
mov.l r0, @-r15
.balign 4
rb_bit: .long 0x20000000 ! RB=1
ENTRY(sh_mobile_sleep_enter_end)
.balign 4
ENTRY(sh_mobile_sleep_resume_start)
/* figure out start address */
bsr 0f
nop
0:
sts pr, k1
mov.l 1f, k0
and k0, k1
/* store pointer to data area in VBR */
ldc k1, vbr
/* setup sr with saved sr */
mov.l @(SH_SLEEP_SR, k1), k0
ldc k0, sr
/* now: user register set! */
stc vbr, r5
/* setup spc with return address to c code */
mov.l @(SH_SLEEP_SPC, r5), r0
ldc r0, spc
/* restore vbr */
mov.l @(SH_SLEEP_VBR, r5), r0
ldc r0, vbr
/* setup ssr with saved sr */
mov.l @(SH_SLEEP_SR, r5), r0
ldc r0, ssr
/* restore sp */
mov.l @(SH_SLEEP_SP, r5), r15
/* restore sleep mode register */
bsr restore_register
mov #SH_SLEEP_REG_STBCR, r0
/* call self-refresh resume code if needed */
mov.l @(SH_SLEEP_MODE, r5), r0
tst #SUSP_SH_SF, r0
bt skip_restore_sf
mov.l @(SH_SLEEP_SF_POST, r5), r0
jsr @r0
nop
skip_restore_sf:
/* restore mmu and cache state if needed */
mov.l @(SH_SLEEP_MODE, r5), r0
tst #SUSP_SH_MMU, r0
bt skip_restore_mmu
/* restore mmu state */
bsr restore_register
mov #SH_SLEEP_REG_PTEH, r0
bsr restore_register
mov #SH_SLEEP_REG_PTEL, r0
bsr restore_register
mov #SH_SLEEP_REG_TTB, r0
bsr restore_register
mov #SH_SLEEP_REG_TEA, r0
bsr restore_register
mov #SH_SLEEP_REG_PTEA, r0
bsr restore_register
mov #SH_SLEEP_REG_PASCR, r0
bsr restore_register
mov #SH_SLEEP_REG_IRMCR, r0
bsr restore_register
mov #SH_SLEEP_REG_MMUCR, r0
icbi @r0
/* restore cache settings */
bsr restore_register
mov #SH_SLEEP_REG_RAMCR, r0
icbi @r0
bsr restore_register
mov #SH_SLEEP_REG_CCR, r0
icbi @r0
skip_restore_mmu:
/* restore general purpose registers if needed */
mov.l @(SH_SLEEP_MODE, r5), r0
tst #SUSP_SH_REGS, r0
bt skip_restore_regs
/* switch to bank 1, restore low registers */
mov.l _rb_bit, r10
bsr _set_sr
mov #-1, r9
bsr restore_low_regs
nop
/* switch to bank0, restore low registers */
mov.l _rb_bit, r9
not r9, r9
bsr _set_sr
mov #0, r10
bsr restore_low_regs
nop
/* restore the rest of the registers */
mov.l @r15+, r8
mov.l @r15+, r9
mov.l @r15+, r10
mov.l @r15+, r11
mov.l @r15+, r12
mov.l @r15+, r13
mov.l @r15+, r14
lds.l @r15+, pr
skip_restore_regs:
rte
nop
restore_register:
add #SH_SLEEP_BASE_DATA, r0
mov.l @(r0, r5), r1
add #-SH_SLEEP_BASE_DATA, r0
add #SH_SLEEP_BASE_ADDR, r0
mov.l @(r0, r5), r0
mov.l r1, @r0
rts
nop
_set_sr:
stc sr, r8
and r9, r8
or r10, r8
ldc r8, sr
rts
nop
restore_low_regs:
mov.l @r15+, r0
mov.l @r15+, r1
mov.l @r15+, r2
mov.l @r15+, r3
mov.l @r15+, r4
mov.l @r15+, r5
mov.l @r15+, r6
rts
mov.l @r15+, r7
.balign 4
_rb_bit: .long 0x20000000 ! RB=1
1: .long ~0x7ff
ENTRY(sh_mobile_sleep_resume_end)
|
aixcc-public/challenge-001-exemplar-source
| 2,430
|
arch/sh/boot/compressed/head_32.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* linux/arch/sh/boot/compressed/head.S
*
* Copyright (C) 1999 Stuart Menefy
* Copyright (C) 2003 SUGIOKA Toshinobu
*/
.text
#include <asm/page.h>
.global startup
startup:
/* Load initial status register */
mov.l init_sr, r1
ldc r1, sr
/* Move myself to proper location if necessary */
mova 1f, r0
mov.l 1f, r2
cmp/eq r2, r0
bt clear_bss
sub r0, r2
mov.l bss_start_addr, r0
mov #0xffffffe0, r1
and r1, r0 ! align cache line
mov.l text_start_addr, r3
mov r0, r1
sub r2, r1
3:
mov.l @r1, r4
mov.l @(4,r1), r5
mov.l @(8,r1), r6
mov.l @(12,r1), r7
mov.l @(16,r1), r8
mov.l @(20,r1), r9
mov.l @(24,r1), r10
mov.l @(28,r1), r11
mov.l r4, @r0
mov.l r5, @(4,r0)
mov.l r6, @(8,r0)
mov.l r7, @(12,r0)
mov.l r8, @(16,r0)
mov.l r9, @(20,r0)
mov.l r10, @(24,r0)
mov.l r11, @(28,r0)
#ifdef CONFIG_CPU_SH4
ocbwb @r0
#endif
cmp/hi r3, r0
add #-32, r0
bt/s 3b
add #-32, r1
mov.l 2f, r0
jmp @r0
nop
.align 2
1: .long 1b
2: .long clear_bss
text_start_addr:
.long startup
/* Clear BSS */
clear_bss:
mov.l end_addr, r1
mov.l bss_start_addr, r2
mov #0, r0
l1:
mov.l r0, @-r1
cmp/eq r1,r2
bf l1
/* Set the initial pointer. */
mov.l init_stack_addr, r0
mov.l @r0, r15
/* Decompress the kernel */
mov.l decompress_kernel_addr, r0
jsr @r0
nop
/* Jump to the start of the decompressed kernel */
mov.l kernel_start_addr, r0
jmp @r0
nop
.align 2
bss_start_addr:
.long __bss_start
end_addr:
.long _end
init_sr:
.long 0x500000F0 /* Privileged mode, Bank=0, Block=1, IMASK=0xF */
kexec_magic:
.long 0x400000F0 /* magic used by kexec to parse zImage format */
init_stack_addr:
.long stack_start
decompress_kernel_addr:
.long decompress_kernel
kernel_start_addr:
#ifdef CONFIG_32BIT
.long ___pa(_text+PAGE_SIZE)
#else
.long _text+PAGE_SIZE
#endif
.align 9
fake_headers_as_bzImage:
.word 0
.ascii "HdrS" ! header signature
.word 0x0202 ! header version number (>= 0x0105)
! or else old loadlin-1.5 will fail)
.word 0 ! default_switch
.word 0 ! SETUPSEG
.word 0x1000
.word 0 ! pointing to kernel version string
.byte 0 ! = 0, old one (LILO, Loadlin,
! 0xTV: T=0 for LILO
! V = version
.byte 1 ! Load flags bzImage=1
.word 0x8000 ! size to move, when setup is not
.long 0x100000 ! 0x100000 = default for big kernel
.long 0 ! address of loaded ramdisk image
.long 0 # its size in bytes
|
aixcc-public/challenge-001-exemplar-source
| 4,021
|
arch/sh/boot/compressed/head_64.S
|
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* arch/shmedia/boot/compressed/head.S
*
* Copied from
* arch/shmedia/kernel/head.S
* which carried the copyright:
* Copyright (C) 2000, 2001 Paolo Alberelli
*
* Modification for compressed loader:
* Copyright (C) 2002 Stuart Menefy (stuart.menefy@st.com)
*/
#include <asm/cache.h>
#include <asm/tlb.h>
#include <cpu/mmu_context.h>
#include <cpu/registers.h>
/*
* Fixed TLB entries to identity map the beginning of RAM
*/
#define MMUIR_TEXT_H 0x0000000000000003 | CONFIG_MEMORY_START
/* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
#define MMUIR_TEXT_L 0x000000000000009a | CONFIG_MEMORY_START
/* 512 Mb, Cacheable (Write-back), execute, Not User, Ph. Add. */
#define MMUDR_CACHED_H 0x0000000000000003 | CONFIG_MEMORY_START
/* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
#define MMUDR_CACHED_L 0x000000000000015a | CONFIG_MEMORY_START
/* 512 Mb, Cacheable (Write-back), read/write, Not User, Ph. Add. */
#define ICCR0_INIT_VAL ICCR0_ON | ICCR0_ICI /* ICE + ICI */
#define ICCR1_INIT_VAL ICCR1_NOLOCK /* No locking */
#define OCCR0_INIT_VAL OCCR0_ON | OCCR0_OCI | OCCR0_WB /* OCE + OCI + WB */
#define OCCR1_INIT_VAL OCCR1_NOLOCK /* No locking */
.text
.global startup
startup:
/*
* Prevent speculative fetch on device memory due to
* uninitialized target registers.
* This must be executed before the first branch.
*/
ptabs/u r63, tr0
ptabs/u r63, tr1
ptabs/u r63, tr2
ptabs/u r63, tr3
ptabs/u r63, tr4
ptabs/u r63, tr5
ptabs/u r63, tr6
ptabs/u r63, tr7
synci
/*
* Set initial TLB entries for cached and uncached regions.
* Note: PTA/BLINK is PIC code, PTABS/BLINK isn't !
*/
/* Clear ITLBs */
pta 1f, tr1
movi ITLB_FIXED, r21
movi ITLB_LAST_VAR_UNRESTRICTED+TLB_STEP, r22
1: putcfg r21, 0, r63 /* Clear MMUIR[n].PTEH.V */
addi r21, TLB_STEP, r21
bne r21, r22, tr1
/* Clear DTLBs */
pta 1f, tr1
movi DTLB_FIXED, r21
movi DTLB_LAST_VAR_UNRESTRICTED+TLB_STEP, r22
1: putcfg r21, 0, r63 /* Clear MMUDR[n].PTEH.V */
addi r21, TLB_STEP, r21
bne r21, r22, tr1
/* Map one big (512Mb) page for ITLB */
movi ITLB_FIXED, r21
movi MMUIR_TEXT_L, r22 /* PTEL first */
putcfg r21, 1, r22 /* Set MMUIR[0].PTEL */
movi MMUIR_TEXT_H, r22 /* PTEH last */
putcfg r21, 0, r22 /* Set MMUIR[0].PTEH */
/* Map one big CACHED (512Mb) page for DTLB */
movi DTLB_FIXED, r21
movi MMUDR_CACHED_L, r22 /* PTEL first */
putcfg r21, 1, r22 /* Set MMUDR[0].PTEL */
movi MMUDR_CACHED_H, r22 /* PTEH last */
putcfg r21, 0, r22 /* Set MMUDR[0].PTEH */
/* ICache */
movi ICCR_BASE, r21
movi ICCR0_INIT_VAL, r22
movi ICCR1_INIT_VAL, r23
putcfg r21, ICCR_REG0, r22
putcfg r21, ICCR_REG1, r23
synci
/* OCache */
movi OCCR_BASE, r21
movi OCCR0_INIT_VAL, r22
movi OCCR1_INIT_VAL, r23
putcfg r21, OCCR_REG0, r22
putcfg r21, OCCR_REG1, r23
synco
/*
* Enable the MMU.
* From here-on code can be non-PIC.
*/
movi SR_HARMLESS | SR_ENABLE_MMU, r22
putcon r22, SSR
movi 1f, r22
putcon r22, SPC
synco
rte /* And now go into the hyperspace ... */
1: /* ... that's the next instruction ! */
/* Set initial stack pointer */
movi datalabel stack_start, r0
ld.l r0, 0, r15
/*
* Clear bss
*/
pt 1f, tr1
movi datalabel __bss_start, r22
movi datalabel _end, r23
1: st.l r22, 0, r63
addi r22, 4, r22
bne r22, r23, tr1
/*
* Decompress the kernel.
*/
pt decompress_kernel, tr0
blink tr0, r18
/*
* Disable the MMU.
*/
movi SR_HARMLESS, r22
putcon r22, SSR
movi 1f, r22
putcon r22, SPC
synco
rte /* And now go into the hyperspace ... */
1: /* ... that's the next instruction ! */
/* Jump into the decompressed kernel */
movi datalabel (CONFIG_MEMORY_START + 0x2000)+1, r19
ptabs r19, tr0
blink tr0, r18
/* Shouldn't return here, but just in case, loop forever */
pt 1f, tr0
1: blink tr0, r63
|
aixcc-public/challenge-001-exemplar-source
| 1,637
|
arch/sh/boot/romimage/head.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* linux/arch/sh/boot/romimage/head.S
*
* Board specific setup code, executed before zImage loader
*/
.text
#include <asm/page.h>
.global romstart
romstart:
/* include board specific setup code */
#include <mach/romimage.h>
#ifdef CONFIG_ROMIMAGE_MMCIF
/* load the romImage to above the empty zero page */
mov.l empty_zero_page_dst, r4
mov.l empty_zero_page_dst_adj, r5
add r5, r4
mov.l bytes_to_load, r5
mov.l loader_function, r7
jsr @r7
mov r4, r15
mov.l empty_zero_page_dst, r4
mov.l empty_zero_page_dst_adj, r5
add r5, r4
mov.l loaded_code_offs, r5
add r5, r4
jmp @r4
nop
.balign 4
empty_zero_page_dst_adj:
.long PAGE_SIZE
bytes_to_load:
.long end_data - romstart
loader_function:
.long mmcif_loader
loaded_code_offs:
.long loaded_code - romstart
loaded_code:
#endif /* CONFIG_ROMIMAGE_MMCIF */
/* copy the empty_zero_page contents to where vmlinux expects it */
mova extra_data_pos, r0
mov.l extra_data_size, r1
add r1, r0
mov.l empty_zero_page_dst, r1
mov #(PAGE_SHIFT - 4), r4
mov #1, r3
shld r4, r3 /* r3 = PAGE_SIZE / 16 */
1:
mov.l @r0, r4
mov.l @(4, r0), r5
mov.l @(8, r0), r6
mov.l @(12, r0), r7
add #16,r0
mov.l r4, @r1
mov.l r5, @(4, r1)
mov.l r6, @(8, r1)
mov.l r7, @(12, r1)
dt r3
add #16,r1
bf 1b
/* jump to the zImage entry point located after the zero page data */
mov #PAGE_SHIFT, r4
mov #1, r1
shld r4, r1
mova extra_data_pos, r0
add r1, r0
mov.l extra_data_size, r1
add r1, r0
jmp @r0
nop
.align 2
empty_zero_page_dst:
.long _text
extra_data_pos:
extra_data_size:
.long zero_page_pos - extra_data_pos
|
aixcc-public/challenge-001-exemplar-source
| 2,749
|
arch/sh/boards/mach-ecovec24/sdram.S
|
/* SPDX-License-Identifier: GPL-2.0
*
* Ecovec24 sdram self/auto-refresh setup code
*
* Copyright (C) 2009 Magnus Damm
*/
#include <linux/sys.h>
#include <linux/errno.h>
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/suspend.h>
#include <asm/romimage-macros.h>
/* code to enter and leave self-refresh. must be self-contained.
* this code will be copied to on-chip memory and executed from there.
*/
.balign 4
ENTRY(ecovec24_sdram_enter_start)
/* DBSC: put memory in self-refresh mode */
ED 0xFD000010, 0x00000000 /* DBEN */
ED 0xFD000040, 0x00000000 /* DBRFPDN0 */
ED 0xFD000014, 0x00000002 /* DBCMDCNT (PALL) */
ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */
ED 0xFD000040, 0x00000001 /* DBRFPDN0 */
rts
nop
ENTRY(ecovec24_sdram_enter_end)
.balign 4
ENTRY(ecovec24_sdram_leave_start)
mov.l @(SH_SLEEP_MODE, r5), r0
tst #SUSP_SH_RSTANDBY, r0
bf resume_rstandby
/* DBSC: put memory in auto-refresh mode */
ED 0xFD000040, 0x00000000 /* DBRFPDN0 */
WAIT 1
ED 0xFD000014, 0x00000002 /* DBCMDCNT (PALL) */
ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */
ED 0xFD000010, 0x00000001 /* DBEN */
ED 0xFD000040, 0x00010000 /* DBRFPDN0 */
rts
nop
resume_rstandby:
/* DBSC: re-initialize and put in auto-refresh */
ED 0xFD000108, 0x00000181 /* DBPDCNT0 */
ED 0xFD000020, 0x015B0002 /* DBCONF */
ED 0xFD000030, 0x03071502 /* DBTR0 */
ED 0xFD000034, 0x02020102 /* DBTR1 */
ED 0xFD000038, 0x01090405 /* DBTR2 */
ED 0xFD00003C, 0x00000002 /* DBTR3 */
ED 0xFD000008, 0x00000005 /* DBKIND */
ED 0xFD000040, 0x00000001 /* DBRFPDN0 */
ED 0xFD000040, 0x00000000 /* DBRFPDN0 */
ED 0xFD000018, 0x00000001 /* DBCKECNT */
mov #100,r0
WAIT_400NS:
dt r0
bf WAIT_400NS
ED 0xFD000014, 0x00000002 /* DBCMDCNT (PALL) */
ED 0xFD000060, 0x00020000 /* DBMRCNT (EMR2) */
ED 0xFD000060, 0x00030000 /* DBMRCNT (EMR3) */
ED 0xFD000060, 0x00010004 /* DBMRCNT (EMR) */
ED 0xFD000060, 0x00000532 /* DBMRCNT (MRS) */
ED 0xFD000014, 0x00000002 /* DBCMDCNT (PALL) */
ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */
ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */
ED 0xFD000060, 0x00000432 /* DBMRCNT (MRS) */
ED 0xFD000060, 0x000103c0 /* DBMRCNT (EMR) */
ED 0xFD000060, 0x00010040 /* DBMRCNT (EMR) */
mov #100,r0
WAIT_400NS_2:
dt r0
bf WAIT_400NS_2
ED 0xFD000010, 0x00000001 /* DBEN */
ED 0xFD000044, 0x0000050f /* DBRFPDN1 */
ED 0xFD000048, 0x236800e6 /* DBRFPDN2 */
mov.l DUMMY,r0
mov.l @r0, r1 /* force single dummy read */
ED 0xFD000014, 0x00000002 /* DBCMDCNT (PALL) */
ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */
ED 0xFD000108, 0x00000080 /* DBPDCNT0 */
ED 0xFD000040, 0x00010000 /* DBRFPDN0 */
rts
nop
.balign 4
DUMMY: .long 0xac400000
ENTRY(ecovec24_sdram_leave_end)
|
aixcc-public/challenge-001-exemplar-source
| 1,938
|
arch/sh/boards/mach-kfr2r09/sdram.S
|
/* SPDX-License-Identifier: GPL-2.0
*
* KFR2R09 sdram self/auto-refresh setup code
*
* Copyright (C) 2009 Magnus Damm
*/
#include <linux/sys.h>
#include <linux/errno.h>
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/suspend.h>
#include <asm/romimage-macros.h>
/* code to enter and leave self-refresh. must be self-contained.
* this code will be copied to on-chip memory and executed from there.
*/
.balign 4
ENTRY(kfr2r09_sdram_enter_start)
/* DBSC: put memory in self-refresh mode */
ED 0xFD000010, 0x00000000 /* DBEN */
ED 0xFD000040, 0x00000000 /* DBRFPDN0 */
ED 0xFD000014, 0x00000002 /* DBCMDCNT (PALL) */
ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */
ED 0xFD000040, 0x00000001 /* DBRFPDN0 */
rts
nop
ENTRY(kfr2r09_sdram_enter_end)
.balign 4
ENTRY(kfr2r09_sdram_leave_start)
/* DBSC: put memory in auto-refresh mode */
mov.l @(SH_SLEEP_MODE, r5), r0
tst #SUSP_SH_RSTANDBY, r0
bf resume_rstandby
ED 0xFD000040, 0x00000000 /* DBRFPDN0 */
WAIT 1
ED 0xFD000014, 0x00000002 /* DBCMDCNT (PALL) */
ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */
ED 0xFD000010, 0x00000001 /* DBEN */
ED 0xFD000040, 0x00010000 /* DBRFPDN0 */
rts
nop
resume_rstandby:
/* DBSC: re-initialize and put in auto-refresh */
ED 0xFD000108, 0x40000301 /* DBPDCNT0 */
ED 0xFD000020, 0x011B0002 /* DBCONF */
ED 0xFD000030, 0x03060E02 /* DBTR0 */
ED 0xFD000034, 0x01020102 /* DBTR1 */
ED 0xFD000038, 0x01090406 /* DBTR2 */
ED 0xFD000008, 0x00000004 /* DBKIND */
ED 0xFD000040, 0x00000001 /* DBRFPDN0 */
ED 0xFD000040, 0x00000000 /* DBRFPDN0 */
ED 0xFD000018, 0x00000001 /* DBCKECNT */
WAIT 1
ED 0xFD000010, 0x00000001 /* DBEN */
ED 0xFD000044, 0x000004AF /* DBRFPDN1 */
ED 0xFD000048, 0x20CF0037 /* DBRFPDN2 */
ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */
ED 0xFD000108, 0x40000300 /* DBPDCNT0 */
ED 0xFD000040, 0x00010000 /* DBRFPDN0 */
rts
nop
ENTRY(kfr2r09_sdram_leave_end)
|
aixcc-public/challenge-001-exemplar-source
| 1,210
|
arch/sh/boards/mach-migor/sdram.S
|
/* SPDX-License-Identifier: GPL-2.0
*
* Migo-R sdram self/auto-refresh setup code
*
* Copyright (C) 2009 Magnus Damm
*/
#include <linux/sys.h>
#include <linux/errno.h>
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/suspend.h>
#include <asm/romimage-macros.h>
/* code to enter and leave self-refresh. must be self-contained.
* this code will be copied to on-chip memory and executed from there.
*/
.balign 4
ENTRY(migor_sdram_enter_start)
/* SBSC: disable power down and put in self-refresh mode */
mov.l 1f, r4
mov.l 2f, r1
mov.l @r4, r2
or r1, r2
mov.l 3f, r3
and r3, r2
mov.l r2, @r4
rts
nop
.balign 4
1: .long 0xfe400008 /* SDCR0 */
2: .long 0x00000400
3: .long 0xffff7fff
ENTRY(migor_sdram_enter_end)
.balign 4
ENTRY(migor_sdram_leave_start)
/* SBSC: set auto-refresh mode */
mov.l 1f, r4
mov.l @r4, r0
mov.l 4f, r1
and r1, r0
mov.l r0, @r4
mov.l 6f, r4
mov.l 8f, r0
mov.l @r4, r1
mov #-1, r4
add r4, r1
or r1, r0
mov.l 7f, r1
mov.l r0, @r1
rts
nop
.balign 4
1: .long 0xfe400008 /* SDCR0 */
4: .long 0xfffffbff
6: .long 0xfe40001c /* RTCOR */
7: .long 0xfe400018 /* RTCNT */
8: .long 0xa55a0000
ENTRY(migor_sdram_leave_end)
|
aixcc-public/challenge-001-exemplar-source
| 1,224
|
arch/sh/boards/mach-ap325rxa/sdram.S
|
/* SPDX-License-Identifier: GPL-2.0
*
* AP325RXA sdram self/auto-refresh setup code
*
* Copyright (C) 2009 Magnus Damm
*/
#include <linux/sys.h>
#include <linux/errno.h>
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/suspend.h>
#include <asm/romimage-macros.h>
/* code to enter and leave self-refresh. must be self-contained.
* this code will be copied to on-chip memory and executed from there.
*/
.balign 4
ENTRY(ap325rxa_sdram_enter_start)
/* SBSC: disable power down and put in self-refresh mode */
mov.l 1f, r4
mov.l 2f, r1
mov.l @r4, r2
or r1, r2
mov.l 3f, r3
and r3, r2
mov.l r2, @r4
rts
nop
.balign 4
1: .long 0xfe400008 /* SDCR0 */
2: .long 0x00000400
3: .long 0xffff7fff
ENTRY(ap325rxa_sdram_enter_end)
.balign 4
ENTRY(ap325rxa_sdram_leave_start)
/* SBSC: set auto-refresh mode */
mov.l 1f, r4
mov.l @r4, r0
mov.l 4f, r1
and r1, r0
mov.l r0, @r4
mov.l 6f, r4
mov.l 8f, r0
mov.l @r4, r1
mov #-1, r4
add r4, r1
or r1, r0
mov.l 7f, r1
mov.l r0, @r1
rts
nop
.balign 4
1: .long 0xfe400008 /* SDCR0 */
4: .long 0xfffffbff
6: .long 0xfe40001c /* RTCOR */
7: .long 0xfe400018 /* RTCNT */
8: .long 0xa55a0000
ENTRY(ap325rxa_sdram_leave_end)
|
aixcc-public/challenge-001-exemplar-source
| 3,078
|
arch/sh/boards/mach-se/7724/sdram.S
|
/* SPDX-License-Identifier: GPL-2.0
*
* MS7724SE sdram self/auto-refresh setup code
*
* Copyright (C) 2009 Magnus Damm
*/
#include <linux/sys.h>
#include <linux/errno.h>
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/suspend.h>
#include <asm/romimage-macros.h>
/* code to enter and leave self-refresh. must be self-contained.
* this code will be copied to on-chip memory and executed from there.
*/
.balign 4
ENTRY(ms7724se_sdram_enter_start)
/* DBSC: put memory in self-refresh mode */
ED 0xFD000010, 0x00000000 /* DBEN */
ED 0xFD000040, 0x00000000 /* DBRFPDN0 */
ED 0xFD000014, 0x00000002 /* DBCMDCNT (PALL) */
ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */
ED 0xFD000040, 0x00000001 /* DBRFPDN0 */
rts
nop
ENTRY(ms7724se_sdram_enter_end)
.balign 4
ENTRY(ms7724se_sdram_leave_start)
/* DBSC: put memory in auto-refresh mode */
mov.l @(SH_SLEEP_MODE, r5), r0
tst #SUSP_SH_RSTANDBY, r0
bf resume_rstandby
ED 0xFD000040, 0x00000000 /* DBRFPDN0 */
WAIT 1
ED 0xFD000014, 0x00000002 /* DBCMDCNT (PALL) */
ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */
ED 0xFD000010, 0x00000001 /* DBEN */
ED 0xFD000040, 0x00010000 /* DBRFPDN0 */
rts
nop
resume_rstandby:
/* CPG: setup clocks before restarting external memory */
ED 0xA4150024, 0x00004000 /* PLLCR */
mov.l FRQCRA,r0
mov.l @r0,r3
mov.l KICK,r1
or r1, r3
mov.l r3, @r0
mov.l LSTATS,r0
mov #1,r1
WAIT_LSTATS:
mov.l @r0,r3
tst r1,r3
bf WAIT_LSTATS
/* DBSC: re-initialize and put in auto-refresh */
ED 0xFD000108, 0x00000181 /* DBPDCNT0 */
ED 0xFD000020, 0x015B0002 /* DBCONF */
ED 0xFD000030, 0x03071502 /* DBTR0 */
ED 0xFD000034, 0x02020102 /* DBTR1 */
ED 0xFD000038, 0x01090405 /* DBTR2 */
ED 0xFD00003C, 0x00000002 /* DBTR3 */
ED 0xFD000008, 0x00000005 /* DBKIND */
ED 0xFD000040, 0x00000001 /* DBRFPDN0 */
ED 0xFD000040, 0x00000000 /* DBRFPDN0 */
ED 0xFD000018, 0x00000001 /* DBCKECNT */
mov #100,r0
WAIT_400NS:
dt r0
bf WAIT_400NS
ED 0xFD000014, 0x00000002 /* DBCMDCNT (PALL) */
ED 0xFD000060, 0x00020000 /* DBMRCNT (EMR2) */
ED 0xFD000060, 0x00030000 /* DBMRCNT (EMR3) */
ED 0xFD000060, 0x00010004 /* DBMRCNT (EMR) */
ED 0xFD000060, 0x00000532 /* DBMRCNT (MRS) */
ED 0xFD000014, 0x00000002 /* DBCMDCNT (PALL) */
ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */
ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */
ED 0xFD000060, 0x00000432 /* DBMRCNT (MRS) */
ED 0xFD000060, 0x000103c0 /* DBMRCNT (EMR) */
ED 0xFD000060, 0x00010040 /* DBMRCNT (EMR) */
mov #100,r0
WAIT_400NS_2:
dt r0
bf WAIT_400NS_2
ED 0xFD000010, 0x00000001 /* DBEN */
ED 0xFD000044, 0x0000050f /* DBRFPDN1 */
ED 0xFD000048, 0x236800e6 /* DBRFPDN2 */
mov.l DUMMY,r0
mov.l @r0, r1 /* force single dummy read */
ED 0xFD000014, 0x00000002 /* DBCMDCNT (PALL) */
ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */
ED 0xFD000108, 0x00000080 /* DBPDCNT0 */
ED 0xFD000040, 0x00010000 /* DBRFPDN0 */
rts
nop
.balign 4
DUMMY: .long 0xac400000
FRQCRA: .long 0xa4150000
KICK: .long 0x80000000
LSTATS: .long 0xa4150060
ENTRY(ms7724se_sdram_leave_end)
|
aixcc-public/challenge-001-exemplar-source
| 1,897
|
arch/sh/include/asm/entry-macros.S
|
! SPDX-License-Identifier: GPL-2.0
! entry.S macro define
.macro cli
stc sr, r0
or #0xf0, r0
ldc r0, sr
.endm
.macro sti
mov #0xfffffff0, r11
extu.b r11, r11
not r11, r11
stc sr, r10
and r11, r10
#ifdef CONFIG_CPU_HAS_SR_RB
stc k_g_imask, r11
or r11, r10
#endif
ldc r10, sr
.endm
.macro get_current_thread_info, ti, tmp
#ifdef CONFIG_CPU_HAS_SR_RB
stc r7_bank, \ti
#else
mov #((THREAD_SIZE - 1) >> 10) ^ 0xff, \tmp
shll8 \tmp
shll2 \tmp
mov r15, \ti
and \tmp, \ti
#endif
.endm
#ifdef CONFIG_TRACE_IRQFLAGS
.macro TRACE_IRQS_ON
mov.l r0, @-r15
mov.l r1, @-r15
mov.l r2, @-r15
mov.l r3, @-r15
mov.l r4, @-r15
mov.l r5, @-r15
mov.l r6, @-r15
mov.l r7, @-r15
mov.l 7834f, r0
jsr @r0
nop
mov.l @r15+, r7
mov.l @r15+, r6
mov.l @r15+, r5
mov.l @r15+, r4
mov.l @r15+, r3
mov.l @r15+, r2
mov.l @r15+, r1
mov.l @r15+, r0
mov.l 7834f, r0
bra 7835f
nop
.balign 4
7834: .long trace_hardirqs_on
7835:
.endm
.macro TRACE_IRQS_OFF
mov.l r0, @-r15
mov.l r1, @-r15
mov.l r2, @-r15
mov.l r3, @-r15
mov.l r4, @-r15
mov.l r5, @-r15
mov.l r6, @-r15
mov.l r7, @-r15
mov.l 7834f, r0
jsr @r0
nop
mov.l @r15+, r7
mov.l @r15+, r6
mov.l @r15+, r5
mov.l @r15+, r4
mov.l @r15+, r3
mov.l @r15+, r2
mov.l @r15+, r1
mov.l @r15+, r0
mov.l 7834f, r0
bra 7835f
nop
.balign 4
7834: .long trace_hardirqs_off
7835:
.endm
#else
.macro TRACE_IRQS_ON
.endm
.macro TRACE_IRQS_OFF
.endm
#endif
#if defined(CONFIG_CPU_SH2A) || defined(CONFIG_CPU_SH4)
# define PREF(x) pref @x
#else
# define PREF(x) nop
#endif
/*
* Macro for use within assembly. Because the DWARF unwinder
* needs to use the frame register to unwind the stack, we
* need to setup r14 with the value of the stack pointer as
* the return address is usually on the stack somewhere.
*/
.macro setup_frame_reg
#ifdef CONFIG_DWARF_UNWINDER
mov r15, r14
#endif
.endm
|
aixcc-public/challenge-001-exemplar-source
| 4,939
|
arch/arm/mach-davinci/sleep.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* (C) Copyright 2009, Texas Instruments, Inc. https://www.ti.com/
*/
/* replicated define because linux/bitops.h cannot be included in assembly */
#define BIT(nr) (1 << (nr))
#include <linux/linkage.h>
#include <asm/assembler.h>
#include "psc.h"
#include "ddr2.h"
#include "clock.h"
/* Arbitrary, hardware currently does not update PHYRDY correctly */
#define PHYRDY_CYCLES 0x1000
/* Assume 25 MHz speed for the cycle conversions since PLLs are bypassed */
#define PLL_BYPASS_CYCLES (PLL_BYPASS_TIME * 25)
#define PLL_RESET_CYCLES (PLL_RESET_TIME * 25)
#define PLL_LOCK_CYCLES (PLL_LOCK_TIME * 25)
#define DEEPSLEEP_SLEEPENABLE_BIT BIT(31)
.text
.arch armv5te
/*
* Move DaVinci into deep sleep state
*
* Note: This code is copied to internal SRAM by PM code. When the DaVinci
* wakes up it continues execution at the point it went to sleep.
* Register Usage:
* r0: contains virtual base for DDR2 controller
* r1: contains virtual base for DDR2 Power and Sleep controller (PSC)
* r2: contains PSC number for DDR2
* r3: contains virtual base DDR2 PLL controller
* r4: contains virtual address of the DEEPSLEEP register
*/
ENTRY(davinci_cpu_suspend)
stmfd sp!, {r0-r12, lr} @ save registers on stack
ldr ip, CACHE_FLUSH
blx ip
ldmia r0, {r0-r4}
/*
* Switch DDR to self-refresh mode.
*/
/* calculate SDRCR address */
ldr ip, [r0, #DDR2_SDRCR_OFFSET]
bic ip, ip, #DDR2_SRPD_BIT
orr ip, ip, #DDR2_LPMODEN_BIT
str ip, [r0, #DDR2_SDRCR_OFFSET]
ldr ip, [r0, #DDR2_SDRCR_OFFSET]
orr ip, ip, #DDR2_MCLKSTOPEN_BIT
str ip, [r0, #DDR2_SDRCR_OFFSET]
mov ip, #PHYRDY_CYCLES
1: subs ip, ip, #0x1
bne 1b
/* Disable DDR2 LPSC */
mov r7, r0
mov r0, #0x2
bl davinci_ddr_psc_config
mov r0, r7
/* Disable clock to DDR PHY */
ldr ip, [r3, #PLLDIV1]
bic ip, ip, #PLLDIV_EN
str ip, [r3, #PLLDIV1]
/* Put the DDR PLL in bypass and power down */
ldr ip, [r3, #PLLCTL]
bic ip, ip, #PLLCTL_PLLENSRC
bic ip, ip, #PLLCTL_PLLEN
str ip, [r3, #PLLCTL]
/* Wait for PLL to switch to bypass */
mov ip, #PLL_BYPASS_CYCLES
2: subs ip, ip, #0x1
bne 2b
/* Power down the PLL */
ldr ip, [r3, #PLLCTL]
orr ip, ip, #PLLCTL_PLLPWRDN
str ip, [r3, #PLLCTL]
/* Go to deep sleep */
ldr ip, [r4]
orr ip, ip, #DEEPSLEEP_SLEEPENABLE_BIT
/* System goes to sleep beyond after this instruction */
str ip, [r4]
/* Wake up from sleep */
/* Clear sleep enable */
ldr ip, [r4]
bic ip, ip, #DEEPSLEEP_SLEEPENABLE_BIT
str ip, [r4]
/* initialize the DDR PLL controller */
/* Put PLL in reset */
ldr ip, [r3, #PLLCTL]
bic ip, ip, #PLLCTL_PLLRST
str ip, [r3, #PLLCTL]
/* Clear PLL power down */
ldr ip, [r3, #PLLCTL]
bic ip, ip, #PLLCTL_PLLPWRDN
str ip, [r3, #PLLCTL]
mov ip, #PLL_RESET_CYCLES
3: subs ip, ip, #0x1
bne 3b
/* Bring PLL out of reset */
ldr ip, [r3, #PLLCTL]
orr ip, ip, #PLLCTL_PLLRST
str ip, [r3, #PLLCTL]
/* Wait for PLL to lock (assume prediv = 1, 25MHz OSCIN) */
mov ip, #PLL_LOCK_CYCLES
4: subs ip, ip, #0x1
bne 4b
/* Remove PLL from bypass mode */
ldr ip, [r3, #PLLCTL]
bic ip, ip, #PLLCTL_PLLENSRC
orr ip, ip, #PLLCTL_PLLEN
str ip, [r3, #PLLCTL]
/* Start 2x clock to DDR2 */
ldr ip, [r3, #PLLDIV1]
orr ip, ip, #PLLDIV_EN
str ip, [r3, #PLLDIV1]
/* Enable VCLK */
/* Enable DDR2 LPSC */
mov r7, r0
mov r0, #0x3
bl davinci_ddr_psc_config
mov r0, r7
/* clear MCLKSTOPEN */
ldr ip, [r0, #DDR2_SDRCR_OFFSET]
bic ip, ip, #DDR2_MCLKSTOPEN_BIT
str ip, [r0, #DDR2_SDRCR_OFFSET]
ldr ip, [r0, #DDR2_SDRCR_OFFSET]
bic ip, ip, #DDR2_LPMODEN_BIT
str ip, [r0, #DDR2_SDRCR_OFFSET]
/* Restore registers and return */
ldmfd sp!, {r0-r12, pc}
ENDPROC(davinci_cpu_suspend)
/*
* Disables or Enables DDR2 LPSC
* Register Usage:
* r0: Enable or Disable LPSC r0 = 0x3 => Enable, r0 = 0x2 => Disable LPSC
* r1: contains virtual base for DDR2 Power and Sleep controller (PSC)
* r2: contains PSC number for DDR2
*/
ENTRY(davinci_ddr_psc_config)
/* Set next state in mdctl for DDR2 */
mov r6, #MDCTL
add r6, r6, r2, lsl #2
ldr ip, [r1, r6]
bic ip, ip, #MDSTAT_STATE_MASK
orr ip, ip, r0
str ip, [r1, r6]
/* Enable the Power Domain Transition Command */
ldr ip, [r1, #PTCMD]
orr ip, ip, #0x1
str ip, [r1, #PTCMD]
/* Check for Transition Complete (PTSTAT) */
ptstat_done:
ldr ip, [r1, #PTSTAT]
and ip, ip, #0x1
cmp ip, #0x0
bne ptstat_done
/* Check for DDR2 clock disable completion; */
mov r6, #MDSTAT
add r6, r6, r2, lsl #2
ddr2clk_stop_done:
ldr ip, [r1, r6]
and ip, ip, #MDSTAT_STATE_MASK
cmp ip, r0
bne ddr2clk_stop_done
ret lr
ENDPROC(davinci_ddr_psc_config)
CACHE_FLUSH:
#ifdef CONFIG_CPU_V6
.word v6_flush_kern_cache_all
#else
.word arm926_flush_kern_cache_all
#endif
ENTRY(davinci_cpu_suspend_sz)
.word . - davinci_cpu_suspend
ENDPROC(davinci_cpu_suspend_sz)
|
aixcc-public/challenge-001-exemplar-source
| 2,012
|
arch/arm/mach-sunxi/headsmp.S
|
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (c) 2018 Chen-Yu Tsai
* Copyright (c) 2018 Bootlin
*
* Chen-Yu Tsai <wens@csie.org>
* Mylène Josserand <mylene.josserand@bootlin.com>
*
* SMP support for sunxi based systems with Cortex A7/A15
*
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/cputype.h>
ENTRY(sunxi_mc_smp_cluster_cache_enable)
.arch armv7-a
/*
* Enable cluster-level coherency, in preparation for turning on the MMU.
*
* Also enable regional clock gating and L2 data latency settings for
* Cortex-A15. These settings are from the vendor kernel.
*/
mrc p15, 0, r1, c0, c0, 0
movw r2, #(ARM_CPU_PART_MASK & 0xffff)
movt r2, #(ARM_CPU_PART_MASK >> 16)
and r1, r1, r2
movw r2, #(ARM_CPU_PART_CORTEX_A15 & 0xffff)
movt r2, #(ARM_CPU_PART_CORTEX_A15 >> 16)
cmp r1, r2
bne not_a15
/* The following is Cortex-A15 specific */
/* ACTLR2: Enable CPU regional clock gates */
mrc p15, 1, r1, c15, c0, 4
orr r1, r1, #(0x1 << 31)
mcr p15, 1, r1, c15, c0, 4
/* L2ACTLR */
mrc p15, 1, r1, c15, c0, 0
/* Enable L2, GIC, and Timer regional clock gates */
orr r1, r1, #(0x1 << 26)
/* Disable clean/evict from being pushed to external */
orr r1, r1, #(0x1<<3)
mcr p15, 1, r1, c15, c0, 0
/* L2CTRL: L2 data RAM latency */
mrc p15, 1, r1, c9, c0, 2
bic r1, r1, #(0x7 << 0)
orr r1, r1, #(0x3 << 0)
mcr p15, 1, r1, c9, c0, 2
/* End of Cortex-A15 specific setup */
not_a15:
/* Get value of sunxi_mc_smp_first_comer */
adr r1, first
ldr r0, [r1]
ldr r0, [r1, r0]
/* Skip cci_enable_port_for_self if not first comer */
cmp r0, #0
bxeq lr
b cci_enable_port_for_self
.align 2
first: .word sunxi_mc_smp_first_comer - .
ENDPROC(sunxi_mc_smp_cluster_cache_enable)
ENTRY(sunxi_mc_smp_secondary_startup)
bl sunxi_mc_smp_cluster_cache_enable
bl secure_cntvoff_init
b secondary_startup
ENDPROC(sunxi_mc_smp_secondary_startup)
ENTRY(sunxi_mc_smp_resume)
bl sunxi_mc_smp_cluster_cache_enable
b cpu_resume
ENDPROC(sunxi_mc_smp_resume)
|
aixcc-public/challenge-001-exemplar-source
| 3,796
|
arch/arm/xen/hypercall.S
|
/******************************************************************************
* hypercall.S
*
* Xen hypercall wrappers
*
* Stefano Stabellini <stefano.stabellini@eu.citrix.com>, Citrix, 2012
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation; or, when distributed
* separately from the Linux kernel or incorporated into other
* software packages, subject to the following license:
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this source file (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy, modify,
* merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
/*
* The Xen hypercall calling convention is very similar to the ARM
* procedure calling convention: the first paramter is passed in r0, the
* second in r1, the third in r2 and the fourth in r3. Considering that
* Xen hypercalls have 5 arguments at most, the fifth paramter is passed
* in r4, differently from the procedure calling convention of using the
* stack for that case.
*
* The hypercall number is passed in r12.
*
* The return value is in r0.
*
* The hvc ISS is required to be 0xEA1, that is the Xen specific ARM
* hypercall tag.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/opcodes-virt.h>
#include <xen/interface/xen.h>
#define XEN_IMM 0xEA1
#define HYPERCALL_SIMPLE(hypercall) \
ENTRY(HYPERVISOR_##hypercall) \
mov r12, #__HYPERVISOR_##hypercall; \
__HVC(XEN_IMM); \
ret lr; \
ENDPROC(HYPERVISOR_##hypercall)
#define HYPERCALL0 HYPERCALL_SIMPLE
#define HYPERCALL1 HYPERCALL_SIMPLE
#define HYPERCALL2 HYPERCALL_SIMPLE
#define HYPERCALL3 HYPERCALL_SIMPLE
#define HYPERCALL4 HYPERCALL_SIMPLE
#define HYPERCALL5(hypercall) \
ENTRY(HYPERVISOR_##hypercall) \
stmdb sp!, {r4} \
ldr r4, [sp, #4] \
mov r12, #__HYPERVISOR_##hypercall; \
__HVC(XEN_IMM); \
ldm sp!, {r4} \
ret lr \
ENDPROC(HYPERVISOR_##hypercall)
.text
HYPERCALL2(xen_version);
HYPERCALL3(console_io);
HYPERCALL3(grant_table_op);
HYPERCALL2(sched_op);
HYPERCALL2(event_channel_op);
HYPERCALL2(hvm_op);
HYPERCALL2(memory_op);
HYPERCALL2(physdev_op);
HYPERCALL3(vcpu_op);
HYPERCALL1(platform_op_raw);
HYPERCALL2(multicall);
HYPERCALL2(vm_assist);
HYPERCALL3(dm_op);
ENTRY(privcmd_call)
stmdb sp!, {r4}
mov r12, r0
mov r0, r1
mov r1, r2
mov r2, r3
ldr r3, [sp, #8]
/*
* Privcmd calls are issued by the userspace. We need to allow the
* kernel to access the userspace memory before issuing the hypercall.
*/
uaccess_enable r4
/* r4 is loaded now as we use it as scratch register before */
ldr r4, [sp, #4]
__HVC(XEN_IMM)
/*
* Disable userspace access from kernel. This is fine to do it
* unconditionally as no set_fs(KERNEL_DS) is called before.
*/
uaccess_disable r4
ldm sp!, {r4}
ret lr
ENDPROC(privcmd_call);
|
aixcc-public/challenge-001-exemplar-source
| 1,635
|
arch/arm/vdso/vdso.lds.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Adapted from arm64 version.
*
* GNU linker script for the VDSO library.
*
* Copyright (C) 2012 ARM Limited
*
* Author: Will Deacon <will.deacon@arm.com>
* Heavily based on the vDSO linker scripts for other archs.
*/
#include <linux/const.h>
#include <asm/page.h>
#include <asm/vdso.h>
OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm", "elf32-littlearm")
OUTPUT_ARCH(arm)
SECTIONS
{
PROVIDE(_start = .);
. = SIZEOF_HEADERS;
.hash : { *(.hash) } :text
.gnu.hash : { *(.gnu.hash) }
.dynsym : { *(.dynsym) }
.dynstr : { *(.dynstr) }
.gnu.version : { *(.gnu.version) }
.gnu.version_d : { *(.gnu.version_d) }
.gnu.version_r : { *(.gnu.version_r) }
.note : { *(.note.*) } :text :note
.eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
.eh_frame : { KEEP (*(.eh_frame)) } :text
.dynamic : { *(.dynamic) } :text :dynamic
.rodata : { *(.rodata*) } :text
.text : { *(.text*) } :text =0xe7f001f2
.got : { *(.got) }
.rel.plt : { *(.rel.plt) }
/DISCARD/ : {
*(.note.GNU-stack)
*(.data .data.* .gnu.linkonce.d.* .sdata*)
*(.bss .sbss .dynbss .dynsbss)
}
}
/*
* We must supply the ELF program headers explicitly to get just one
* PT_LOAD segment, and set the flags explicitly to make segments read-only.
*/
PHDRS
{
text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
note PT_NOTE FLAGS(4); /* PF_R */
eh_frame_hdr PT_GNU_EH_FRAME;
}
VERSION
{
LINUX_2.6 {
global:
__vdso_clock_gettime;
__vdso_gettimeofday;
__vdso_clock_getres;
__vdso_clock_gettime64;
local: *;
};
}
|
aixcc-public/challenge-001-exemplar-source
| 1,994
|
arch/arm/mach-s3c/sleep-s3c64xx.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* linux/arch/arm/plat-s3c64xx/sleep.S
*
* Copyright 2008 Openmoko, Inc.
* Copyright 2008 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
* http://armlinux.simtec.co.uk/
*
* S3C64XX CPU sleep code
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include "map.h"
#undef S3C64XX_VA_GPIO
#define S3C64XX_VA_GPIO (0x0)
#include "regs-gpio.h"
#define LL_UART (S3C_PA_UART + (0x400 * CONFIG_S3C_LOWLEVEL_UART_PORT))
.text
/* Sleep magic, the word before the resume entry point so that the
* bootloader can check for a resumeable image. */
.word 0x2bedf00d
/* s3c_cpu_reusme
*
* This is the entry point, stored by whatever method the bootloader
* requires to get the kernel runnign again. This code expects to be
* entered with no caches live and the MMU disabled. It will then
* restore the MMU and other basic CP registers saved and restart
* the kernel C code to finish the resume code.
*/
ENTRY(s3c_cpu_resume)
msr cpsr_c, #PSR_I_BIT | PSR_F_BIT | SVC_MODE
ldr r2, =LL_UART /* for debug */
#ifdef CONFIG_S3C_PM_DEBUG_LED_SMDK
#define S3C64XX_GPNCON (S3C64XX_GPN_BASE + 0x00)
#define S3C64XX_GPNDAT (S3C64XX_GPN_BASE + 0x04)
#define S3C64XX_GPN_CONMASK(__gpio) (0x3 << ((__gpio) * 2))
#define S3C64XX_GPN_OUTPUT(__gpio) (0x1 << ((__gpio) * 2))
/* Initialise the GPIO state if we are debugging via the SMDK LEDs,
* as the uboot version supplied resets these to inputs during the
* resume checks.
*/
ldr r3, =S3C64XX_PA_GPIO
ldr r0, [ r3, #S3C64XX_GPNCON ]
bic r0, r0, #(S3C64XX_GPN_CONMASK(12) | S3C64XX_GPN_CONMASK(13) | \
S3C64XX_GPN_CONMASK(14) | S3C64XX_GPN_CONMASK(15))
orr r0, r0, #(S3C64XX_GPN_OUTPUT(12) | S3C64XX_GPN_OUTPUT(13) | \
S3C64XX_GPN_OUTPUT(14) | S3C64XX_GPN_OUTPUT(15))
str r0, [ r3, #S3C64XX_GPNCON ]
ldr r0, [ r3, #S3C64XX_GPNDAT ]
bic r0, r0, #0xf << 12 @ GPN12..15
orr r0, r0, #1 << 15 @ GPN15
str r0, [ r3, #S3C64XX_GPNDAT ]
#endif
b cpu_resume
|
aixcc-public/challenge-001-exemplar-source
| 1,470
|
arch/arm/mach-s3c/sleep-s3c24xx.S
|
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 2004 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* S3C2410 Power Manager (Suspend-To-RAM) support
*
* Based on PXA/SA1100 sleep code by:
* Nicolas Pitre, (c) 2002 Monta Vista Software Inc
* Cliff Brake, (c) 2001
*/
#include <linux/linkage.h>
#include <linux/serial_s3c.h>
#include <asm/assembler.h>
#include "map.h"
#include "regs-gpio.h"
#include "regs-clock.h"
/*
* S3C24XX_DEBUG_RESUME is dangerous if your bootloader does not
* reset the UART configuration, only enable if you really need this!
*/
//#define S3C24XX_DEBUG_RESUME
.text
/* sleep magic, to allow the bootloader to check for an valid
* image to resume to. Must be the first word before the
* s3c_cpu_resume entry.
*/
.word 0x2bedf00d
/* s3c_cpu_resume
*
* resume code entry for bootloader to call
*/
ENTRY(s3c_cpu_resume)
mov r0, #PSR_I_BIT | PSR_F_BIT | SVC_MODE
msr cpsr_c, r0
@@ load UART to allow us to print the two characters for
@@ resume debug
mov r2, #S3C24XX_PA_UART & 0xff000000
orr r2, r2, #S3C24XX_PA_UART & 0xff000
#if 0
/* SMDK2440 LED set */
mov r14, #S3C24XX_PA_GPIO
ldr r12, [ r14, #0x54 ]
bic r12, r12, #3<<4
orr r12, r12, #1<<7
str r12, [ r14, #0x54 ]
#endif
#ifdef S3C24XX_DEBUG_RESUME
mov r3, #'L'
strb r3, [ r2, #S3C2410_UTXH ]
1001:
ldrb r14, [ r3, #S3C2410_UTRSTAT ]
tst r14, #S3C2410_UTRSTAT_TXE
beq 1001b
#endif /* S3C24XX_DEBUG_RESUME */
b cpu_resume
|
aixcc-public/challenge-001-exemplar-source
| 2,786
|
arch/arm/mach-s3c/irq-s3c24xx-fiq.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/* linux/drivers/spi/spi_s3c24xx_fiq.S
*
* Copyright 2009 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* S3C24XX SPI - FIQ pseudo-DMA transfer code
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include "map.h"
#include "regs-irq.h"
#include <linux/spi/s3c24xx-fiq.h>
#define S3C2410_SPTDAT (0x10)
#define S3C2410_SPRDAT (0x14)
.text
@ entry to these routines is as follows, with the register names
@ defined in fiq.h so that they can be shared with the C files which
@ setup the calling registers.
@
@ fiq_rirq The base of the IRQ registers to find S3C2410_SRCPND
@ fiq_rtmp Temporary register to hold tx/rx data
@ fiq_rspi The base of the SPI register block
@ fiq_rtx The tx buffer pointer
@ fiq_rrx The rx buffer pointer
@ fiq_rcount The number of bytes to move
@ each entry starts with a word entry of how long it is
@ and an offset to the irq acknowledgment word
ENTRY(s3c24xx_spi_fiq_rx)
.word fiq_rx_end - fiq_rx_start
.word fiq_rx_irq_ack - fiq_rx_start
fiq_rx_start:
ldr fiq_rtmp, fiq_rx_irq_ack
str fiq_rtmp, [ fiq_rirq, # S3C2410_SRCPND - S3C24XX_VA_IRQ ]
ldrb fiq_rtmp, [ fiq_rspi, # S3C2410_SPRDAT ]
strb fiq_rtmp, [ fiq_rrx ], #1
mov fiq_rtmp, #0xff
strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
subs fiq_rcount, fiq_rcount, #1
subsne pc, lr, #4 @@ return, still have work to do
@@ set IRQ controller so that next op will trigger IRQ
mov fiq_rtmp, #0
str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ]
subs pc, lr, #4
fiq_rx_irq_ack:
.word 0
fiq_rx_end:
ENTRY(s3c24xx_spi_fiq_txrx)
.word fiq_txrx_end - fiq_txrx_start
.word fiq_txrx_irq_ack - fiq_txrx_start
fiq_txrx_start:
ldrb fiq_rtmp, [ fiq_rspi, # S3C2410_SPRDAT ]
strb fiq_rtmp, [ fiq_rrx ], #1
ldr fiq_rtmp, fiq_txrx_irq_ack
str fiq_rtmp, [ fiq_rirq, # S3C2410_SRCPND - S3C24XX_VA_IRQ ]
ldrb fiq_rtmp, [ fiq_rtx ], #1
strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
subs fiq_rcount, fiq_rcount, #1
subsne pc, lr, #4 @@ return, still have work to do
mov fiq_rtmp, #0
str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ]
subs pc, lr, #4
fiq_txrx_irq_ack:
.word 0
fiq_txrx_end:
ENTRY(s3c24xx_spi_fiq_tx)
.word fiq_tx_end - fiq_tx_start
.word fiq_tx_irq_ack - fiq_tx_start
fiq_tx_start:
ldrb fiq_rtmp, [ fiq_rspi, # S3C2410_SPRDAT ]
ldr fiq_rtmp, fiq_tx_irq_ack
str fiq_rtmp, [ fiq_rirq, # S3C2410_SRCPND - S3C24XX_VA_IRQ ]
ldrb fiq_rtmp, [ fiq_rtx ], #1
strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
subs fiq_rcount, fiq_rcount, #1
subsne pc, lr, #4 @@ return, still have work to do
mov fiq_rtmp, #0
str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ]
subs pc, lr, #4
fiq_tx_irq_ack:
.word 0
fiq_tx_end:
.end
|
aixcc-public/challenge-001-exemplar-source
| 1,108
|
arch/arm/mach-s3c/sleep-s3c2412.S
|
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 2007 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* S3C2412 Power Manager low-level sleep support
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include "map.h"
#include "regs-irq.h"
.text
.global s3c2412_sleep_enter
s3c2412_sleep_enter:
mov r0, #0 /* argument for coprocessors */
ldr r1, =S3C2410_INTPND
ldr r2, =S3C2410_SRCPND
ldr r3, =S3C2410_EINTPEND
teq r0, r0
bl s3c2412_sleep_enter1
teq pc, r0
bl s3c2412_sleep_enter1
.align 5
/* this is called twice, first with the Z flag to ensure that the
* instructions have been loaded into the cache, and the second
* time to try and suspend the system.
*/
s3c2412_sleep_enter1:
mcr p15, 0, r0, c7, c10, 4
mcrne p15, 0, r0, c7, c0, 4
/* if we return from here, it is because an interrupt was
* active when we tried to shutdown. Try and ack the IRQ and
* retry, as simply returning causes the system to lock.
*/
ldrne r9, [r1]
strne r9, [r1]
ldrne r9, [r2]
strne r9, [r2]
ldrne r9, [r3]
strne r9, [r3]
bne s3c2412_sleep_enter1
ret lr
|
aixcc-public/challenge-001-exemplar-source
| 1,322
|
arch/arm/mach-s3c/sleep-s3c2410.S
|
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 2004 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* S3C2410 Power Manager (Suspend-To-RAM) support
*
* Based on PXA/SA1100 sleep code by:
* Nicolas Pitre, (c) 2002 Monta Vista Software Inc
* Cliff Brake, (c) 2001
*/
#include <linux/linkage.h>
#include <linux/serial_s3c.h>
#include <asm/assembler.h>
#include "map.h"
#include "regs-gpio.h"
#include "regs-clock.h"
#include "regs-mem-s3c24xx.h"
/* s3c2410_cpu_suspend
*
* put the cpu into sleep mode
*/
ENTRY(s3c2410_cpu_suspend)
@@ prepare cpu to sleep
ldr r4, =S3C2410_REFRESH
ldr r5, =S3C24XX_MISCCR
ldr r6, =S3C2410_CLKCON
ldr r7, [r4] @ get REFRESH (and ensure in TLB)
ldr r8, [r5] @ get MISCCR (and ensure in TLB)
ldr r9, [r6] @ get CLKCON (and ensure in TLB)
orr r7, r7, #S3C2410_REFRESH_SELF @ SDRAM sleep command
orr r8, r8, #S3C2410_MISCCR_SDSLEEP @ SDRAM power-down signals
orr r9, r9, #S3C2410_CLKCON_POWER @ power down command
teq pc, #0 @ first as a trial-run to load cache
bl s3c2410_do_sleep
teq r0, r0 @ now do it for real
b s3c2410_do_sleep @
@@ align next bit of code to cache line
.align 5
s3c2410_do_sleep:
streq r7, [r4] @ SDRAM sleep command
streq r8, [r5] @ SDRAM power-down config
streq r9, [r6] @ CPU sleep
1: beq 1b
ret lr
|
aixcc-public/challenge-001-exemplar-source
| 2,657
|
arch/arm/mach-pxa/standby.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* PXA27x standby mode
*
* Author: David Burrage
*
* 2005 (c) MontaVista Software, Inc.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include "pxa2xx-regs.h"
.text
#ifdef CONFIG_PXA27x
ENTRY(pxa_cpu_standby)
ldr r0, =PSSR
mov r1, #(PSSR_PH | PSSR_STS)
mov r2, #PWRMODE_STANDBY
mov r3, #UNCACHED_PHYS_0 @ Read mem context in.
ldr ip, [r3]
b 1f
.align 5
1: mcr p14, 0, r2, c7, c0, 0 @ put the system into Standby
str r1, [r0] @ make sure PSSR_PH/STS are clear
ret lr
#endif
#ifdef CONFIG_PXA3xx
#define PXA3_MDCNFG 0x0000
#define PXA3_MDCNFG_DMCEN (1 << 30)
#define PXA3_DDR_HCAL 0x0060
#define PXA3_DDR_HCAL_HCRNG 0x1f
#define PXA3_DDR_HCAL_HCPROG (1 << 28)
#define PXA3_DDR_HCAL_HCEN (1 << 31)
#define PXA3_DMCIER 0x0070
#define PXA3_DMCIER_EDLP (1 << 29)
#define PXA3_DMCISR 0x0078
#define PXA3_RCOMP 0x0100
#define PXA3_RCOMP_SWEVAL (1 << 31)
ENTRY(pm_enter_standby_start)
mov r1, #0xf6000000 @ DMEMC_REG_BASE (PXA3_MDCNFG)
add r1, r1, #0x00100000
/*
* Preload the TLB entry for accessing the dynamic memory
* controller registers. Note that page table lookups will
* fail until the dynamic memory controller has been
* reinitialised - and that includes MMU page table walks.
* This also means that only the dynamic memory controller
* can be reliably accessed in the code following standby.
*/
ldr r2, [r1] @ Dummy read PXA3_MDCNFG
mcr p14, 0, r0, c7, c0, 0
.rept 8
nop
.endr
ldr r0, [r1, #PXA3_DDR_HCAL] @ Clear (and wait for) HCEN
bic r0, r0, #PXA3_DDR_HCAL_HCEN
str r0, [r1, #PXA3_DDR_HCAL]
1: ldr r0, [r1, #PXA3_DDR_HCAL]
tst r0, #PXA3_DDR_HCAL_HCEN
bne 1b
ldr r0, [r1, #PXA3_RCOMP] @ Initiate RCOMP
orr r0, r0, #PXA3_RCOMP_SWEVAL
str r0, [r1, #PXA3_RCOMP]
mov r0, #~0 @ Clear interrupts
str r0, [r1, #PXA3_DMCISR]
ldr r0, [r1, #PXA3_DMCIER] @ set DMIER[EDLP]
orr r0, r0, #PXA3_DMCIER_EDLP
str r0, [r1, #PXA3_DMCIER]
ldr r0, [r1, #PXA3_DDR_HCAL] @ clear HCRNG, set HCPROG, HCEN
bic r0, r0, #PXA3_DDR_HCAL_HCRNG
orr r0, r0, #PXA3_DDR_HCAL_HCEN | PXA3_DDR_HCAL_HCPROG
str r0, [r1, #PXA3_DDR_HCAL]
1: ldr r0, [r1, #PXA3_DMCISR]
tst r0, #PXA3_DMCIER_EDLP
beq 1b
ldr r0, [r1, #PXA3_MDCNFG] @ set PXA3_MDCNFG[DMCEN]
orr r0, r0, #PXA3_MDCNFG_DMCEN
str r0, [r1, #PXA3_MDCNFG]
1: ldr r0, [r1, #PXA3_MDCNFG]
tst r0, #PXA3_MDCNFG_DMCEN
beq 1b
ldr r0, [r1, #PXA3_DDR_HCAL] @ set PXA3_DDR_HCAL[HCRNG]
orr r0, r0, #2 @ HCRNG
str r0, [r1, #PXA3_DDR_HCAL]
ldr r0, [r1, #PXA3_DMCIER] @ Clear the interrupt
bic r0, r0, #0x20000000
str r0, [r1, #PXA3_DMCIER]
ret lr
ENTRY(pm_enter_standby_end)
#endif
|
aixcc-public/challenge-001-exemplar-source
| 4,305
|
arch/arm/mach-pxa/sleep.S
|
/*
* Low-level PXA250/210 sleep/wakeUp support
*
* Initial SA1110 code:
* Copyright (c) 2001 Cliff Brake <cbrake@accelent.com>
*
* Adapted for PXA by Nicolas Pitre:
* Copyright (c) 2002 Monta Vista Software, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include "smemc.h"
#include "pxa2xx-regs.h"
#define MDREFR_KDIV 0x200a4000 // all banks
#define CCCR_SLEEP 0x00000107 // L=7 2N=2 A=0 PPDIS=0 CPDIS=0
#define CCCR_N_MASK 0x00000380
#define CCCR_M_MASK 0x00000060
#define CCCR_L_MASK 0x0000001f
.text
#ifdef CONFIG_PXA3xx
/*
* pxa3xx_finish_suspend() - forces CPU into sleep state (S2D3C4)
*/
ENTRY(pxa3xx_finish_suspend)
mov r0, #0x06 @ S2D3C4 mode
mcr p14, 0, r0, c7, c0, 0 @ enter sleep
20: b 20b @ waiting for sleep
#endif /* CONFIG_PXA3xx */
#ifdef CONFIG_PXA27x
/*
* pxa27x_finish_suspend()
*
* Forces CPU into sleep state.
*
* r0 = value for PWRMODE M field for desired sleep state
*/
ENTRY(pxa27x_finish_suspend)
@ Put the processor to sleep
@ (also workaround for sighting 28071)
@ prepare value for sleep mode
mov r1, r0 @ sleep mode
@ prepare pointer to physical address 0 (virtual mapping in generic.c)
mov r2, #UNCACHED_PHYS_0
@ prepare SDRAM refresh settings
ldr r4, =MDREFR
ldr r5, [r4]
@ enable SDRAM self-refresh mode
orr r5, r5, #MDREFR_SLFRSH
@ set SDCLKx divide-by-2 bits (this is part of a workaround for Errata 50)
ldr r6, =MDREFR_KDIV
orr r5, r5, r6
@ Intel PXA270 Specification Update notes problems sleeping
@ with core operating above 91 MHz
@ (see Errata 50, ...processor does not exit from sleep...)
ldr r6, =CCCR
ldr r8, [r6] @ keep original value for resume
ldr r7, =CCCR_SLEEP @ prepare CCCR sleep value
mov r0, #0x2 @ prepare value for CLKCFG
@ align execution to a cache line
b pxa_cpu_do_suspend
#endif
#ifdef CONFIG_PXA25x
/*
* pxa25x_finish_suspend()
*
* Forces CPU into sleep state.
*
* r0 = value for PWRMODE M field for desired sleep state
*/
ENTRY(pxa25x_finish_suspend)
@ prepare value for sleep mode
mov r1, r0 @ sleep mode
@ prepare pointer to physical address 0 (virtual mapping in generic.c)
mov r2, #UNCACHED_PHYS_0
@ prepare SDRAM refresh settings
ldr r4, =MDREFR
ldr r5, [r4]
@ enable SDRAM self-refresh mode
orr r5, r5, #MDREFR_SLFRSH
@ Intel PXA255 Specification Update notes problems
@ about suspending with PXBus operating above 133MHz
@ (see Errata 31, GPIO output signals, ... unpredictable in sleep
@
@ We keep the change-down close to the actual suspend on SDRAM
@ as possible to eliminate messing about with the refresh clock
@ as the system will restore with the original speed settings
@
@ Ben Dooks, 13-Sep-2004
ldr r6, =CCCR
ldr r8, [r6] @ keep original value for resume
@ ensure x1 for run and turbo mode with memory clock
bic r7, r8, #CCCR_M_MASK | CCCR_N_MASK
orr r7, r7, #(1<<5) | (2<<7)
@ check that the memory frequency is within limits
and r14, r7, #CCCR_L_MASK
teq r14, #1
bicne r7, r7, #CCCR_L_MASK
orrne r7, r7, #1 @@ 99.53MHz
@ get ready for the change
@ note, turbo is not preserved over sleep so there is no
@ point in preserving it here. we save it on the stack with the
@ other CP registers instead.
mov r0, #0
mcr p14, 0, r0, c6, c0, 0
orr r0, r0, #2 @ initiate change bit
b pxa_cpu_do_suspend
#endif
.ltorg
.align 5
pxa_cpu_do_suspend:
@ All needed values are now in registers.
@ These last instructions should be in cache
@ initiate the frequency change...
str r7, [r6]
mcr p14, 0, r0, c6, c0, 0
@ restore the original cpu speed value for resume
str r8, [r6]
@ need 6 13-MHz cycles before changing PWRMODE
@ just set frequency to 91-MHz... 6*91/13 = 42
mov r0, #42
10: subs r0, r0, #1
bne 10b
@ Do not reorder...
@ Intel PXA270 Specification Update notes problems performing
@ external accesses after SDRAM is put in self-refresh mode
@ (see Errata 38 ...hangs when entering self-refresh mode)
@ force address lines low by reading at physical address 0
ldr r3, [r2]
@ put SDRAM into self-refresh
str r5, [r4]
@ enter sleep mode
mcr p14, 0, r1, c7, c0, 0 @ PWRMODE
20: b 20b @ loop waiting for sleep
|
aixcc-public/challenge-001-exemplar-source
| 3,545
|
arch/arm/mach-omap2/omap-headsmp.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Secondary CPU startup routine source file.
*
* Copyright (C) 2009-2014 Texas Instruments, Inc.
*
* Author:
* Santosh Shilimkar <santosh.shilimkar@ti.com>
*
* Interface functions needed for the SMP. This file is based on arm
* realview smp platform.
* Copyright (c) 2003 ARM Limited.
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
#include "omap44xx.h"
/* Physical address needed since MMU not enabled yet on secondary core */
#define AUX_CORE_BOOT0_PA 0x48281800
#define API_HYP_ENTRY 0x102
ENTRY(omap_secondary_startup)
#ifdef CONFIG_SMP
b secondary_startup
#else
/* Should never get here */
again: wfi
b again
#endif
#ENDPROC(omap_secondary_startup)
/*
* OMAP5 specific entry point for secondary CPU to jump from ROM
* code. This routine also provides a holding flag into which
* secondary core is held until we're ready for it to initialise.
* The primary core will update this flag using a hardware
* register AuxCoreBoot0.
*/
ENTRY(omap5_secondary_startup)
wait: ldr r2, =AUX_CORE_BOOT0_PA @ read from AuxCoreBoot0
ldr r0, [r2]
mov r0, r0, lsr #5
mrc p15, 0, r4, c0, c0, 5
and r4, r4, #0x0f
cmp r0, r4
bne wait
b omap_secondary_startup
ENDPROC(omap5_secondary_startup)
/*
* Same as omap5_secondary_startup except we call into the ROM to
* enable HYP mode first. This is called instead of
* omap5_secondary_startup if the primary CPU was put into HYP mode by
* the boot loader.
*/
.arch armv7-a
.arch_extension sec
ENTRY(omap5_secondary_hyp_startup)
wait_2: ldr r2, =AUX_CORE_BOOT0_PA @ read from AuxCoreBoot0
ldr r0, [r2]
mov r0, r0, lsr #5
mrc p15, 0, r4, c0, c0, 5
and r4, r4, #0x0f
cmp r0, r4
bne wait_2
ldr r12, =API_HYP_ENTRY
badr r0, hyp_boot
smc #0
hyp_boot:
b omap_secondary_startup
ENDPROC(omap5_secondary_hyp_startup)
/*
* OMAP4 specific entry point for secondary CPU to jump from ROM
* code. This routine also provides a holding flag into which
* secondary core is held until we're ready for it to initialise.
* The primary core will update this flag using a hardware
* register AuxCoreBoot0.
*/
ENTRY(omap4_secondary_startup)
hold: ldr r12,=0x103
dsb
smc #0 @ read from AuxCoreBoot0
mov r0, r0, lsr #9
mrc p15, 0, r4, c0, c0, 5
and r4, r4, #0x0f
cmp r0, r4
bne hold
/*
* we've been released from the wait loop,secondary_stack
* should now contain the SVC stack for this core
*/
b omap_secondary_startup
ENDPROC(omap4_secondary_startup)
ENTRY(omap4460_secondary_startup)
hold_2: ldr r12,=0x103
dsb
smc #0 @ read from AuxCoreBoot0
mov r0, r0, lsr #9
mrc p15, 0, r4, c0, c0, 5
and r4, r4, #0x0f
cmp r0, r4
bne hold_2
/*
* GIC distributor control register has changed between
* CortexA9 r1pX and r2pX. The Control Register secure
* banked version is now composed of 2 bits:
* bit 0 == Secure Enable
* bit 1 == Non-Secure Enable
* The Non-Secure banked register has not changed
* Because the ROM Code is based on the r1pX GIC, the CPU1
* GIC restoration will cause a problem to CPU0 Non-Secure SW.
* The workaround must be:
* 1) Before doing the CPU1 wakeup, CPU0 must disable
* the GIC distributor
* 2) CPU1 must re-enable the GIC distributor on
* it's wakeup path.
*/
ldr r1, =OMAP44XX_GIC_DIST_BASE
ldr r0, [r1]
orr r0, #1
str r0, [r1]
/*
* we've been released from the wait loop,secondary_stack
* should now contain the SVC stack for this core
*/
b omap_secondary_startup
ENDPROC(omap4460_secondary_startup)
|
aixcc-public/challenge-001-exemplar-source
| 2,448
|
arch/arm/mach-omap2/omap-smc.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* OMAP34xx and OMAP44xx secure APIs file.
*
* Copyright (C) 2010 Texas Instruments, Inc.
* Written by Santosh Shilimkar <santosh.shilimkar@ti.com>
*
* Copyright (C) 2012 Ivaylo Dimitrov <freemangordon@abv.bg>
* Copyright (C) 2013 Pali Rohár <pali@kernel.org>
*/
#include <linux/linkage.h>
/*
* This is common routine to manage secure monitor API
* used to modify the PL310 secure registers.
* 'r0' contains the value to be modified and 'r12' contains
* the monitor API number. It uses few CPU registers
* internally and hence they need be backed up including
* link register "lr".
* Function signature : void _omap_smc1(u32 fn, u32 arg)
*/
.arch armv7-a
.arch_extension sec
ENTRY(_omap_smc1)
stmfd sp!, {r2-r12, lr}
mov r12, r0
mov r0, r1
dsb
smc #0
ldmfd sp!, {r2-r12, pc}
ENDPROC(_omap_smc1)
/**
* u32 omap_smc2(u32 id, u32 falg, u32 pargs)
* Low level common routine for secure HAL and PPA APIs.
* @id: Application ID of HAL APIs
* @flag: Flag to indicate the criticality of operation
* @pargs: Physical address of parameter list starting
* with number of parametrs
*/
ENTRY(omap_smc2)
stmfd sp!, {r4-r12, lr}
mov r3, r2
mov r2, r1
mov r1, #0x0 @ Process ID
mov r6, #0xff
mov r12, #0x00 @ Secure Service ID
mov r7, #0
mcr p15, 0, r7, c7, c5, 6
dsb
dmb
smc #0
ldmfd sp!, {r4-r12, pc}
ENDPROC(omap_smc2)
/**
* u32 omap_smc3(u32 service_id, u32 process_id, u32 flag, u32 pargs)
* Low level common routine for secure HAL and PPA APIs via smc #1
* r0 - @service_id: Secure Service ID
* r1 - @process_id: Process ID
* r2 - @flag: Flag to indicate the criticality of operation
* r3 - @pargs: Physical address of parameter list
*/
ENTRY(omap_smc3)
stmfd sp!, {r4-r11, lr}
mov r12, r0 @ Copy the secure service ID
mov r6, #0xff @ Indicate new Task call
dsb @ Memory Barrier (not sure if needed, copied from omap_smc2)
smc #1 @ Call PPA service
ldmfd sp!, {r4-r11, pc}
ENDPROC(omap_smc3)
ENTRY(omap_modify_auxcoreboot0)
stmfd sp!, {r1-r12, lr}
ldr r12, =0x104
dsb
smc #0
ldmfd sp!, {r1-r12, pc}
ENDPROC(omap_modify_auxcoreboot0)
ENTRY(omap_auxcoreboot_addr)
stmfd sp!, {r2-r12, lr}
ldr r12, =0x105
dsb
smc #0
ldmfd sp!, {r2-r12, pc}
ENDPROC(omap_auxcoreboot_addr)
ENTRY(omap_read_auxcoreboot0)
stmfd sp!, {r2-r12, lr}
ldr r12, =0x103
dsb
smc #0
ldmfd sp!, {r2-r12, pc}
ENDPROC(omap_read_auxcoreboot0)
|
aixcc-public/challenge-001-exemplar-source
| 5,678
|
arch/arm/mach-omap2/sleep33xx.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Low level suspend code for AM33XX SoCs
*
* Copyright (C) 2012-2018 Texas Instruments Incorporated - https://www.ti.com/
* Dave Gerlach, Vaibhav Bedia
*/
#include <linux/linkage.h>
#include <linux/platform_data/pm33xx.h>
#include <linux/ti-emif-sram.h>
#include <asm/assembler.h>
#include <asm/memory.h>
#include "iomap.h"
#include "cm33xx.h"
#include "pm-asm-offsets.h"
#define AM33XX_CM_CLKCTRL_MODULESTATE_DISABLED 0x00030000
#define AM33XX_CM_CLKCTRL_MODULEMODE_DISABLE 0x0003
#define AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE 0x0002
/* replicated define because linux/bitops.h cannot be included in assembly */
#define BIT(nr) (1 << (nr))
.arm
.arch armv7-a
.align 3
ENTRY(am33xx_do_wfi)
stmfd sp!, {r4 - r11, lr} @ save registers on stack
/* Save wfi_flags arg to data space */
mov r4, r0
adr r3, am33xx_pm_ro_sram_data
ldr r2, [r3, #AMX3_PM_RO_SRAM_DATA_VIRT_OFFSET]
str r4, [r2, #AMX3_PM_WFI_FLAGS_OFFSET]
/* Only flush cache is we know we are losing MPU context */
tst r4, #WFI_FLAG_FLUSH_CACHE
beq cache_skip_flush
/*
* Flush all data from the L1 and L2 data cache before disabling
* SCTLR.C bit.
*/
ldr r1, kernel_flush
blx r1
/*
* Clear the SCTLR.C bit to prevent further data cache
* allocation. Clearing SCTLR.C would make all the data accesses
* strongly ordered and would not hit the cache.
*/
mrc p15, 0, r0, c1, c0, 0
bic r0, r0, #(1 << 2) @ Disable the C bit
mcr p15, 0, r0, c1, c0, 0
isb
/*
* Invalidate L1 and L2 data cache.
*/
ldr r1, kernel_flush
blx r1
adr r3, am33xx_pm_ro_sram_data
ldr r2, [r3, #AMX3_PM_RO_SRAM_DATA_VIRT_OFFSET]
ldr r4, [r2, #AMX3_PM_WFI_FLAGS_OFFSET]
cache_skip_flush:
/* Check if we want self refresh */
tst r4, #WFI_FLAG_SELF_REFRESH
beq emif_skip_enter_sr
adr r9, am33xx_emif_sram_table
ldr r3, [r9, #EMIF_PM_ENTER_SR_OFFSET]
blx r3
emif_skip_enter_sr:
/* Only necessary if PER is losing context */
tst r4, #WFI_FLAG_SAVE_EMIF
beq emif_skip_save
ldr r3, [r9, #EMIF_PM_SAVE_CONTEXT_OFFSET]
blx r3
emif_skip_save:
/* Only can disable EMIF if we have entered self refresh */
tst r4, #WFI_FLAG_SELF_REFRESH
beq emif_skip_disable
/* Disable EMIF */
ldr r1, virt_emif_clkctrl
ldr r2, [r1]
bic r2, r2, #AM33XX_CM_CLKCTRL_MODULEMODE_DISABLE
str r2, [r1]
ldr r1, virt_emif_clkctrl
wait_emif_disable:
ldr r2, [r1]
mov r3, #AM33XX_CM_CLKCTRL_MODULESTATE_DISABLED
cmp r2, r3
bne wait_emif_disable
emif_skip_disable:
tst r4, #WFI_FLAG_WAKE_M3
beq wkup_m3_skip
/*
* For the MPU WFI to be registered as an interrupt
* to WKUP_M3, MPU_CLKCTRL.MODULEMODE needs to be set
* to DISABLED
*/
ldr r1, virt_mpu_clkctrl
ldr r2, [r1]
bic r2, r2, #AM33XX_CM_CLKCTRL_MODULEMODE_DISABLE
str r2, [r1]
wkup_m3_skip:
/*
* Execute an ISB instruction to ensure that all of the
* CP15 register changes have been committed.
*/
isb
/*
* Execute a barrier instruction to ensure that all cache,
* TLB and branch predictor maintenance operations issued
* have completed.
*/
dsb
dmb
/*
* Execute a WFI instruction and wait until the
* STANDBYWFI output is asserted to indicate that the
* CPU is in idle and low power state. CPU can specualatively
* prefetch the instructions so add NOPs after WFI. Thirteen
* NOPs as per Cortex-A8 pipeline.
*/
wfi
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
/* We come here in case of an abort due to a late interrupt */
/* Set MPU_CLKCTRL.MODULEMODE back to ENABLE */
ldr r1, virt_mpu_clkctrl
mov r2, #AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE
str r2, [r1]
/* Re-enable EMIF */
ldr r1, virt_emif_clkctrl
mov r2, #AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE
str r2, [r1]
wait_emif_enable:
ldr r3, [r1]
cmp r2, r3
bne wait_emif_enable
/* Only necessary if PER is losing context */
tst r4, #WFI_FLAG_SELF_REFRESH
beq emif_skip_exit_sr_abt
adr r9, am33xx_emif_sram_table
ldr r1, [r9, #EMIF_PM_ABORT_SR_OFFSET]
blx r1
emif_skip_exit_sr_abt:
tst r4, #WFI_FLAG_FLUSH_CACHE
beq cache_skip_restore
/*
* Set SCTLR.C bit to allow data cache allocation
*/
mrc p15, 0, r0, c1, c0, 0
orr r0, r0, #(1 << 2) @ Enable the C bit
mcr p15, 0, r0, c1, c0, 0
isb
cache_skip_restore:
/* Let the suspend code know about the abort */
mov r0, #1
ldmfd sp!, {r4 - r11, pc} @ restore regs and return
ENDPROC(am33xx_do_wfi)
.align
ENTRY(am33xx_resume_offset)
.word . - am33xx_do_wfi
ENTRY(am33xx_resume_from_deep_sleep)
/* Re-enable EMIF */
ldr r0, phys_emif_clkctrl
mov r1, #AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE
str r1, [r0]
wait_emif_enable1:
ldr r2, [r0]
cmp r1, r2
bne wait_emif_enable1
adr r9, am33xx_emif_sram_table
ldr r1, [r9, #EMIF_PM_RESTORE_CONTEXT_OFFSET]
blx r1
ldr r1, [r9, #EMIF_PM_EXIT_SR_OFFSET]
blx r1
resume_to_ddr:
/* We are back. Branch to the common CPU resume routine */
mov r0, #0
ldr pc, resume_addr
ENDPROC(am33xx_resume_from_deep_sleep)
/*
* Local variables
*/
.align
kernel_flush:
.word v7_flush_dcache_all
virt_mpu_clkctrl:
.word AM33XX_CM_MPU_MPU_CLKCTRL
virt_emif_clkctrl:
.word AM33XX_CM_PER_EMIF_CLKCTRL
phys_emif_clkctrl:
.word (AM33XX_CM_BASE + AM33XX_CM_PER_MOD + \
AM33XX_CM_PER_EMIF_CLKCTRL_OFFSET)
.align 3
/* DDR related defines */
am33xx_emif_sram_table:
.space EMIF_PM_FUNCTIONS_SIZE
ENTRY(am33xx_pm_sram)
.word am33xx_do_wfi
.word am33xx_do_wfi_sz
.word am33xx_resume_offset
.word am33xx_emif_sram_table
.word am33xx_pm_ro_sram_data
resume_addr:
.word cpu_resume - PAGE_OFFSET + 0x80000000
.align 3
ENTRY(am33xx_pm_ro_sram_data)
.space AMX3_PM_RO_SRAM_DATA_SIZE
ENTRY(am33xx_do_wfi_sz)
.word . - am33xx_do_wfi
|
aixcc-public/challenge-001-exemplar-source
| 9,890
|
arch/arm/mach-omap2/sram243x.S
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* linux/arch/arm/mach-omap2/sram243x.S
*
* Omap2 specific functions that need to be run in internal SRAM
*
* (C) Copyright 2004
* Texas Instruments, <www.ti.com>
* Richard Woodruff <r-woodruff2@ti.com>
*
* Richard Woodruff notes that any changes to this code must be carefully
* audited and tested to ensure that they don't cause a TLB miss while
* the SDRAM is inaccessible. Such a situation will crash the system
* since it will cause the ARM MMU to attempt to walk the page tables.
* These crashes may be intermittent.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include "soc.h"
#include "iomap.h"
#include "prm2xxx.h"
#include "cm2xxx.h"
#include "sdrc.h"
.text
.align 3
ENTRY(omap243x_sram_ddr_init)
stmfd sp!, {r0 - r12, lr} @ save registers on stack
mov r12, r2 @ capture CS1 vs CS0
mov r8, r3 @ capture force parameter
/* frequency shift down */
ldr r2, omap243x_sdi_cm_clksel2_pll @ get address of dpllout reg
mov r3, #0x1 @ value for 1x operation
str r3, [r2] @ go to L1-freq operation
/* voltage shift down */
mov r9, #0x1 @ set up for L1 voltage call
bl voltage_shift @ go drop voltage
/* dll lock mode */
ldr r11, omap243x_sdi_sdrc_dlla_ctrl @ addr of dlla ctrl
ldr r10, [r11] @ get current val
cmp r12, #0x1 @ cs1 base (2422 es2.05/1)
addeq r11, r11, #0x8 @ if cs1 base, move to DLLB
mvn r9, #0x4 @ mask to get clear bit2
and r10, r10, r9 @ clear bit2 for lock mode.
orr r10, r10, #0x8 @ make sure DLL on (es2 bit pos)
orr r10, r10, #0x2 @ 90 degree phase for all below 133MHz
str r10, [r11] @ commit to DLLA_CTRL
bl i_dll_wait @ wait for dll to lock
/* get dll value */
add r11, r11, #0x4 @ get addr of status reg
ldr r10, [r11] @ get locked value
/* voltage shift up */
mov r9, #0x0 @ shift back to L0-voltage
bl voltage_shift @ go raise voltage
/* frequency shift up */
mov r3, #0x2 @ value for 2x operation
str r3, [r2] @ go to L0-freq operation
/* reset entry mode for dllctrl */
sub r11, r11, #0x4 @ move from status to ctrl
cmp r12, #0x1 @ normalize if cs1 based
subeq r11, r11, #0x8 @ possibly back to DLLA
cmp r8, #0x1 @ if forced unlock exit
orreq r1, r1, #0x4 @ make sure exit with unlocked value
str r1, [r11] @ restore DLLA_CTRL high value
add r11, r11, #0x8 @ move to DLLB_CTRL addr
str r1, [r11] @ set value DLLB_CTRL
bl i_dll_wait @ wait for possible lock
/* set up for return, DDR should be good */
str r10, [r0] @ write dll_status and return counter
ldmfd sp!, {r0 - r12, pc} @ restore regs and return
/* ensure the DLL has relocked */
i_dll_wait:
mov r4, #0x800 @ delay DLL relock, min 0x400 L3 clocks
i_dll_delay:
subs r4, r4, #0x1
bne i_dll_delay
ret lr
/*
* shift up or down voltage, use R9 as input to tell level.
* wait for it to finish, use 32k sync counter, 1tick=31uS.
*/
voltage_shift:
ldr r4, omap243x_sdi_prcm_voltctrl @ get addr of volt ctrl.
ldr r5, [r4] @ get value.
ldr r6, prcm_mask_val @ get value of mask
and r5, r5, r6 @ apply mask to clear bits
orr r5, r5, r9 @ bulld value for L0/L1-volt operation.
str r5, [r4] @ set up for change.
mov r3, #0x4000 @ get val for force
orr r5, r5, r3 @ build value for force
str r5, [r4] @ Force transition to L1
ldr r3, omap243x_sdi_timer_32ksynct_cr @ get addr of counter
ldr r5, [r3] @ get value
add r5, r5, #0x3 @ give it at most 93uS
volt_delay:
ldr r7, [r3] @ get timer value
cmp r5, r7 @ time up?
bhi volt_delay @ not yet->branch
ret lr @ back to caller.
omap243x_sdi_cm_clksel2_pll:
.word OMAP2430_CM_REGADDR(PLL_MOD, CM_CLKSEL2)
omap243x_sdi_sdrc_dlla_ctrl:
.word OMAP243X_SDRC_REGADDR(SDRC_DLLA_CTRL)
omap243x_sdi_prcm_voltctrl:
.word OMAP2430_PRCM_VOLTCTRL
prcm_mask_val:
.word 0xFFFF3FFC
omap243x_sdi_timer_32ksynct_cr:
.word OMAP2_L4_IO_ADDRESS(OMAP2430_32KSYNCT_BASE + 0x010)
ENTRY(omap243x_sram_ddr_init_sz)
.word . - omap243x_sram_ddr_init
/*
* Reprograms memory timings.
* r0 = [PRCM_FULL | PRCM_HALF] r1 = SDRC_DLLA_CTRL value r2 = [DDR | SDR]
* PRCM_FULL = 2, PRCM_HALF = 1, DDR = 1, SDR = 0
*/
.align 3
ENTRY(omap243x_sram_reprogram_sdrc)
stmfd sp!, {r0 - r10, lr} @ save registers on stack
mov r3, #0x0 @ clear for mrc call
mcr p15, 0, r3, c7, c10, 4 @ memory barrier, finish ARM SDR/DDR
nop
nop
ldr r6, omap243x_srs_sdrc_rfr_ctrl @ get addr of refresh reg
ldr r5, [r6] @ get value
mov r5, r5, lsr #8 @ isolate rfr field and drop burst
cmp r0, #0x1 @ going to half speed?
movne r9, #0x0 @ if up set flag up for pre up, hi volt
blne voltage_shift_c @ adjust voltage
cmp r0, #0x1 @ going to half speed (post branch link)
moveq r5, r5, lsr #1 @ divide by 2 if to half
movne r5, r5, lsl #1 @ mult by 2 if to full
mov r5, r5, lsl #8 @ put rfr field back into place
add r5, r5, #0x1 @ turn on burst of 1
ldr r4, omap243x_srs_cm_clksel2_pll @ get address of out reg
ldr r3, [r4] @ get curr value
orr r3, r3, #0x3
bic r3, r3, #0x3 @ clear lower bits
orr r3, r3, r0 @ new state value
str r3, [r4] @ set new state (pll/x, x=1 or 2)
nop
nop
moveq r9, #0x1 @ if speed down, post down, drop volt
bleq voltage_shift_c
mcr p15, 0, r3, c7, c10, 4 @ memory barrier
str r5, [r6] @ set new RFR_1 value
add r6, r6, #0x30 @ get RFR_2 addr
str r5, [r6] @ set RFR_2
nop
cmp r2, #0x1 @ (SDR or DDR) do we need to adjust DLL
bne freq_out @ leave if SDR, no DLL function
/* With DDR, we need to take care of the DLL for the frequency change */
ldr r2, omap243x_srs_sdrc_dlla_ctrl @ addr of dlla ctrl
str r1, [r2] @ write out new SDRC_DLLA_CTRL
add r2, r2, #0x8 @ addr to SDRC_DLLB_CTRL
str r1, [r2] @ commit to SDRC_DLLB_CTRL
mov r1, #0x2000 @ wait DLL relock, min 0x400 L3 clocks
dll_wait:
subs r1, r1, #0x1
bne dll_wait
freq_out:
ldmfd sp!, {r0 - r10, pc} @ restore regs and return
/*
* shift up or down voltage, use R9 as input to tell level.
* wait for it to finish, use 32k sync counter, 1tick=31uS.
*/
voltage_shift_c:
ldr r10, omap243x_srs_prcm_voltctrl @ get addr of volt ctrl
ldr r8, [r10] @ get value
ldr r7, ddr_prcm_mask_val @ get value of mask
and r8, r8, r7 @ apply mask to clear bits
orr r8, r8, r9 @ bulld value for L0/L1-volt operation.
str r8, [r10] @ set up for change.
mov r7, #0x4000 @ get val for force
orr r8, r8, r7 @ build value for force
str r8, [r10] @ Force transition to L1
ldr r10, omap243x_srs_timer_32ksynct @ get addr of counter
ldr r8, [r10] @ get value
add r8, r8, #0x2 @ give it at most 62uS (min 31+)
volt_delay_c:
ldr r7, [r10] @ get timer value
cmp r8, r7 @ time up?
bhi volt_delay_c @ not yet->branch
ret lr @ back to caller
omap243x_srs_cm_clksel2_pll:
.word OMAP2430_CM_REGADDR(PLL_MOD, CM_CLKSEL2)
omap243x_srs_sdrc_dlla_ctrl:
.word OMAP243X_SDRC_REGADDR(SDRC_DLLA_CTRL)
omap243x_srs_sdrc_rfr_ctrl:
.word OMAP243X_SDRC_REGADDR(SDRC_RFR_CTRL_0)
omap243x_srs_prcm_voltctrl:
.word OMAP2430_PRCM_VOLTCTRL
ddr_prcm_mask_val:
.word 0xFFFF3FFC
omap243x_srs_timer_32ksynct:
.word OMAP2_L4_IO_ADDRESS(OMAP2430_32KSYNCT_BASE + 0x010)
ENTRY(omap243x_sram_reprogram_sdrc_sz)
.word . - omap243x_sram_reprogram_sdrc
/*
* Set dividers and pll. Also recalculate DLL value for DDR and unlock mode.
*/
.align 3
ENTRY(omap243x_sram_set_prcm)
stmfd sp!, {r0-r12, lr} @ regs to stack
adr r4, pbegin @ addr of preload start
adr r8, pend @ addr of preload end
mcrr p15, 1, r8, r4, c12 @ preload into icache
pbegin:
/* move into fast relock bypass */
ldr r8, omap243x_ssp_pll_ctl @ get addr
ldr r5, [r8] @ get val
mvn r6, #0x3 @ clear mask
and r5, r5, r6 @ clear field
orr r7, r5, #0x2 @ fast relock val
str r7, [r8] @ go to fast relock
ldr r4, omap243x_ssp_pll_stat @ addr of stat
block:
/* wait for bypass */
ldr r8, [r4] @ stat value
and r8, r8, #0x3 @ mask for stat
cmp r8, #0x1 @ there yet
bne block @ loop if not
/* set new dpll dividers _after_ in bypass */
ldr r4, omap243x_ssp_pll_div @ get addr
str r0, [r4] @ set dpll ctrl val
ldr r4, omap243x_ssp_set_config @ get addr
mov r8, #1 @ valid cfg msk
str r8, [r4] @ make dividers take
mov r4, #100 @ dead spin a bit
wait_a_bit:
subs r4, r4, #1 @ dec loop
bne wait_a_bit @ delay done?
/* check if staying in bypass */
cmp r2, #0x1 @ stay in bypass?
beq pend @ jump over dpll relock
/* relock DPLL with new vals */
ldr r5, omap243x_ssp_pll_stat @ get addr
ldr r4, omap243x_ssp_pll_ctl @ get addr
orr r8, r7, #0x3 @ val for lock dpll
str r8, [r4] @ set val
mov r0, #1000 @ dead spin a bit
wait_more:
subs r0, r0, #1 @ dec loop
bne wait_more @ delay done?
wait_lock:
ldr r8, [r5] @ get lock val
and r8, r8, #3 @ isolate field
cmp r8, #2 @ locked?
bne wait_lock @ wait if not
pend:
/* update memory timings & briefly lock dll */
ldr r4, omap243x_ssp_sdrc_rfr @ get addr
str r1, [r4] @ update refresh timing
ldr r11, omap243x_ssp_dlla_ctrl @ get addr of DLLA ctrl
ldr r10, [r11] @ get current val
mvn r9, #0x4 @ mask to get clear bit2
and r10, r10, r9 @ clear bit2 for lock mode
orr r10, r10, #0x8 @ make sure DLL on (es2 bit pos)
str r10, [r11] @ commit to DLLA_CTRL
add r11, r11, #0x8 @ move to dllb
str r10, [r11] @ hit DLLB also
mov r4, #0x800 @ relock time (min 0x400 L3 clocks)
wait_dll_lock:
subs r4, r4, #0x1
bne wait_dll_lock
nop
ldmfd sp!, {r0-r12, pc} @ restore regs and return
omap243x_ssp_set_config:
.word OMAP2430_PRCM_CLKCFG_CTRL
omap243x_ssp_pll_ctl:
.word OMAP2430_CM_REGADDR(PLL_MOD, CM_CLKEN)
omap243x_ssp_pll_stat:
.word OMAP2430_CM_REGADDR(PLL_MOD, CM_IDLEST)
omap243x_ssp_pll_div:
.word OMAP2430_CM_REGADDR(PLL_MOD, CM_CLKSEL1)
omap243x_ssp_sdrc_rfr:
.word OMAP243X_SDRC_REGADDR(SDRC_RFR_CTRL_0)
omap243x_ssp_dlla_ctrl:
.word OMAP243X_SDRC_REGADDR(SDRC_DLLA_CTRL)
ENTRY(omap243x_sram_set_prcm_sz)
.word . - omap243x_sram_set_prcm
|
aixcc-public/challenge-001-exemplar-source
| 2,784
|
arch/arm/mach-omap2/sleep24xx.S
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* linux/arch/arm/mach-omap2/sleep.S
*
* (C) Copyright 2004
* Texas Instruments, <www.ti.com>
* Richard Woodruff <r-woodruff2@ti.com>
*
* (C) Copyright 2006 Nokia Corporation
* Fixed idle loop sleep
* Igor Stoppa <igor.stoppa@nokia.com>
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include "omap24xx.h"
#include "sdrc.h"
/* First address of reserved address space? apparently valid for OMAP2 & 3 */
#define A_SDRC0_V (0xC0000000)
.text
/*
* omap24xx_cpu_suspend() - Forces OMAP into deep sleep state by completing
* SDRC shutdown then ARM shutdown. Upon wake MPU is back on so just restore
* SDRC.
*
* Input:
* R0 : DLL ctrl value pre-Sleep
* R1 : SDRC_DLLA_CTRL
* R2 : SDRC_POWER
*
* The if the DPLL is going to AutoIdle. It seems like the DPLL may be back on
* when we get called, but the DLL probably isn't. We will wait a bit more in
* case the DPLL isn't quite there yet. The code will wait on DLL for DDR even
* if in unlocked mode.
*
* For less than 242x-ES2.2 upon wake from a sleep mode where the external
* oscillator was stopped, a timing bug exists where a non-stabilized 12MHz
* clock can pass into the PRCM can cause problems at DSP and IVA.
* To work around this the code will switch to the 32kHz source prior to sleep.
* Post sleep we will shift back to using the DPLL. Apparently,
* CM_IDLEST_CLKGEN does not reflect the full clock change so you need to wait
* 3x12MHz + 3x32kHz clocks for a full switch.
*
* The DLL load value is not kept in RETENTION or OFF. It needs to be restored
* at wake
*/
.align 3
ENTRY(omap24xx_cpu_suspend)
stmfd sp!, {r0 - r12, lr} @ save registers on stack
mov r3, #0x0 @ clear for mcr call
mcr p15, 0, r3, c7, c10, 4 @ memory barrier, hope SDR/DDR finished
nop
nop
ldr r4, [r2] @ read SDRC_POWER
orr r4, r4, #0x40 @ enable self refresh on idle req
mov r5, #0x2000 @ set delay (DPLL relock + DLL relock)
str r4, [r2] @ make it so
nop
mcr p15, 0, r3, c7, c0, 4 @ wait for interrupt
nop
loop:
subs r5, r5, #0x1 @ awake, wait just a bit
bne loop
/* The DPLL has to be on before we take the DDR out of self refresh */
bic r4, r4, #0x40 @ now clear self refresh bit.
str r4, [r2] @ write to SDRC_POWER
ldr r4, A_SDRC0 @ make a clock happen
ldr r4, [r4] @ read A_SDRC0
nop @ start auto refresh only after clk ok
movs r0, r0 @ see if DDR or SDR
strne r0, [r1] @ rewrite DLLA to force DLL reload
addne r1, r1, #0x8 @ move to DLLB
strne r0, [r1] @ rewrite DLLB to force DLL reload
mov r5, #0x1000
loop2:
subs r5, r5, #0x1
bne loop2
/* resume*/
ldmfd sp!, {r0 - r12, pc} @ restore regs and return
A_SDRC0:
.word A_SDRC0_V
ENTRY(omap24xx_cpu_suspend_sz)
.word . - omap24xx_cpu_suspend
|
aixcc-public/challenge-001-exemplar-source
| 14,885
|
arch/arm/mach-omap2/sleep34xx.S
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* (C) Copyright 2007
* Texas Instruments
* Karthik Dasu <karthik-dp@ti.com>
*
* (C) Copyright 2004
* Texas Instruments, <www.ti.com>
* Richard Woodruff <r-woodruff2@ti.com>
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include "omap34xx.h"
#include "iomap.h"
#include "cm3xxx.h"
#include "prm3xxx.h"
#include "sdrc.h"
#include "sram.h"
#include "control.h"
/*
* Registers access definitions
*/
#define SDRC_SCRATCHPAD_SEM_OFFS 0xc
#define SDRC_SCRATCHPAD_SEM_V OMAP343X_SCRATCHPAD_REGADDR\
(SDRC_SCRATCHPAD_SEM_OFFS)
#define PM_PREPWSTST_CORE_P OMAP3430_PRM_BASE + CORE_MOD +\
OMAP3430_PM_PREPWSTST
#define PM_PWSTCTRL_MPU_P OMAP3430_PRM_BASE + MPU_MOD + OMAP2_PM_PWSTCTRL
#define CM_IDLEST1_CORE_V OMAP34XX_CM_REGADDR(CORE_MOD, CM_IDLEST1)
#define CM_IDLEST_CKGEN_V OMAP34XX_CM_REGADDR(PLL_MOD, CM_IDLEST)
#define SRAM_BASE_P OMAP3_SRAM_PA
#define CONTROL_STAT OMAP343X_CTRL_BASE + OMAP343X_CONTROL_STATUS
#define CONTROL_MEM_RTA_CTRL (OMAP343X_CTRL_BASE +\
OMAP36XX_CONTROL_MEM_RTA_CTRL)
/* Move this as correct place is available */
#define SCRATCHPAD_MEM_OFFS 0x310
#define SCRATCHPAD_BASE_P (OMAP343X_CTRL_BASE +\
OMAP343X_CONTROL_MEM_WKUP +\
SCRATCHPAD_MEM_OFFS)
#define SDRC_POWER_V OMAP34XX_SDRC_REGADDR(SDRC_POWER)
#define SDRC_SYSCONFIG_P (OMAP343X_SDRC_BASE + SDRC_SYSCONFIG)
#define SDRC_MR_0_P (OMAP343X_SDRC_BASE + SDRC_MR_0)
#define SDRC_EMR2_0_P (OMAP343X_SDRC_BASE + SDRC_EMR2_0)
#define SDRC_MANUAL_0_P (OMAP343X_SDRC_BASE + SDRC_MANUAL_0)
#define SDRC_MR_1_P (OMAP343X_SDRC_BASE + SDRC_MR_1)
#define SDRC_EMR2_1_P (OMAP343X_SDRC_BASE + SDRC_EMR2_1)
#define SDRC_MANUAL_1_P (OMAP343X_SDRC_BASE + SDRC_MANUAL_1)
#define SDRC_DLLA_STATUS_V OMAP34XX_SDRC_REGADDR(SDRC_DLLA_STATUS)
#define SDRC_DLLA_CTRL_V OMAP34XX_SDRC_REGADDR(SDRC_DLLA_CTRL)
/*
* This file needs be built unconditionally as ARM to interoperate correctly
* with non-Thumb-2-capable firmware.
*/
.arm
/*
* API functions
*/
.text
/*
* L2 cache needs to be toggled for stable OFF mode functionality on 3630.
* This function sets up a flag that will allow for this toggling to take
* place on 3630. Hopefully some version in the future may not need this.
*/
ENTRY(enable_omap3630_toggle_l2_on_restore)
stmfd sp!, {lr} @ save registers on stack
/* Setup so that we will disable and enable l2 */
mov r1, #0x1
adr r3, l2dis_3630_offset
ldr r2, [r3] @ value for offset
str r1, [r2, r3] @ write to l2dis_3630
ldmfd sp!, {pc} @ restore regs and return
ENDPROC(enable_omap3630_toggle_l2_on_restore)
/*
* Function to call rom code to save secure ram context.
*
* r0 = physical address of the parameters
*/
.arch armv7-a
.arch_extension sec
ENTRY(save_secure_ram_context)
stmfd sp!, {r4 - r11, lr} @ save registers on stack
mov r3, r0 @ physical address of parameters
mov r0, #25 @ set service ID for PPA
mov r12, r0 @ copy secure service ID in r12
mov r1, #0 @ set task id for ROM code in r1
mov r2, #4 @ set some flags in r2, r6
mov r6, #0xff
dsb @ data write barrier
dmb @ data memory barrier
smc #1 @ call SMI monitor (smi #1)
nop
nop
nop
nop
ldmfd sp!, {r4 - r11, pc}
ENDPROC(save_secure_ram_context)
/*
* ======================
* == Idle entry point ==
* ======================
*/
/*
* Forces OMAP into idle state
*
* omap34xx_cpu_suspend() - This bit of code saves the CPU context if needed
* and executes the WFI instruction. Calling WFI effectively changes the
* power domains states to the desired target power states.
*
*
* Notes:
* - only the minimum set of functions gets copied to internal SRAM at boot
* and after wake-up from OFF mode, cf. omap_push_sram_idle. The function
* pointers in SDRAM or SRAM are called depending on the desired low power
* target state.
* - when the OMAP wakes up it continues at different execution points
* depending on the low power mode (non-OFF vs OFF modes),
* cf. 'Resume path for xxx mode' comments.
*/
.align 3
ENTRY(omap34xx_cpu_suspend)
stmfd sp!, {r4 - r11, lr} @ save registers on stack
/*
* r0 contains information about saving context:
* 0 - No context lost
* 1 - Only L1 and logic lost
* 2 - Only L2 lost (Even L1 is retained we clean it along with L2)
* 3 - Both L1 and L2 lost and logic lost
*/
/*
* For OFF mode: save context and jump to WFI in SDRAM (omap3_do_wfi)
* For non-OFF modes: jump to the WFI code in SRAM (omap3_do_wfi_sram)
*/
ldr r4, omap3_do_wfi_sram_addr
ldr r5, [r4]
cmp r0, #0x0 @ If no context save required,
bxeq r5 @ jump to the WFI code in SRAM
/* Otherwise fall through to the save context code */
save_context_wfi:
/*
* jump out to kernel flush routine
* - reuse that code is better
* - it executes in a cached space so is faster than refetch per-block
* - should be faster and will change with kernel
* - 'might' have to copy address, load and jump to it
* Flush all data from the L1 data cache before disabling
* SCTLR.C bit.
*/
ldr r1, kernel_flush
mov lr, pc
bx r1
/*
* Clear the SCTLR.C bit to prevent further data cache
* allocation. Clearing SCTLR.C would make all the data accesses
* strongly ordered and would not hit the cache.
*/
mrc p15, 0, r0, c1, c0, 0
bic r0, r0, #(1 << 2) @ Disable the C bit
mcr p15, 0, r0, c1, c0, 0
isb
/*
* Invalidate L1 data cache. Even though only invalidate is
* necessary exported flush API is used here. Doing clean
* on already clean cache would be almost NOP.
*/
ldr r1, kernel_flush
blx r1
b omap3_do_wfi
ENDPROC(omap34xx_cpu_suspend)
omap3_do_wfi_sram_addr:
.word omap3_do_wfi_sram
kernel_flush:
.word v7_flush_dcache_all
/* ===================================
* == WFI instruction => Enter idle ==
* ===================================
*/
/*
* Do WFI instruction
* Includes the resume path for non-OFF modes
*
* This code gets copied to internal SRAM and is accessible
* from both SDRAM and SRAM:
* - executed from SRAM for non-off modes (omap3_do_wfi_sram),
* - executed from SDRAM for OFF mode (omap3_do_wfi).
*/
.align 3
ENTRY(omap3_do_wfi)
ldr r4, sdrc_power @ read the SDRC_POWER register
ldr r5, [r4] @ read the contents of SDRC_POWER
orr r5, r5, #0x40 @ enable self refresh on idle req
str r5, [r4] @ write back to SDRC_POWER register
/* Data memory barrier and Data sync barrier */
dsb
dmb
/*
* ===================================
* == WFI instruction => Enter idle ==
* ===================================
*/
wfi @ wait for interrupt
/*
* ===================================
* == Resume path for non-OFF modes ==
* ===================================
*/
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
/*
* This function implements the erratum ID i581 WA:
* SDRC state restore before accessing the SDRAM
*
* Only used at return from non-OFF mode. For OFF
* mode the ROM code configures the SDRC and
* the DPLL before calling the restore code directly
* from DDR.
*/
/* Make sure SDRC accesses are ok */
wait_sdrc_ok:
/* DPLL3 must be locked before accessing the SDRC. Maybe the HW ensures this */
ldr r4, cm_idlest_ckgen
wait_dpll3_lock:
ldr r5, [r4]
tst r5, #1
beq wait_dpll3_lock
ldr r4, cm_idlest1_core
wait_sdrc_ready:
ldr r5, [r4]
tst r5, #0x2
bne wait_sdrc_ready
/* allow DLL powerdown upon hw idle req */
ldr r4, sdrc_power
ldr r5, [r4]
bic r5, r5, #0x40
str r5, [r4]
is_dll_in_lock_mode:
/* Is dll in lock mode? */
ldr r4, sdrc_dlla_ctrl
ldr r5, [r4]
tst r5, #0x4
bne exit_nonoff_modes @ Return if locked
/* wait till dll locks */
wait_dll_lock_timed:
ldr r4, sdrc_dlla_status
/* Wait 20uS for lock */
mov r6, #8
wait_dll_lock:
subs r6, r6, #0x1
beq kick_dll
ldr r5, [r4]
and r5, r5, #0x4
cmp r5, #0x4
bne wait_dll_lock
b exit_nonoff_modes @ Return when locked
/* disable/reenable DLL if not locked */
kick_dll:
ldr r4, sdrc_dlla_ctrl
ldr r5, [r4]
mov r6, r5
bic r6, #(1<<3) @ disable dll
str r6, [r4]
dsb
orr r6, r6, #(1<<3) @ enable dll
str r6, [r4]
dsb
b wait_dll_lock_timed
exit_nonoff_modes:
/* Re-enable C-bit if needed */
mrc p15, 0, r0, c1, c0, 0
tst r0, #(1 << 2) @ Check C bit enabled?
orreq r0, r0, #(1 << 2) @ Enable the C bit if cleared
mcreq p15, 0, r0, c1, c0, 0
isb
/*
* ===================================
* == Exit point from non-OFF modes ==
* ===================================
*/
ldmfd sp!, {r4 - r11, pc} @ restore regs and return
ENDPROC(omap3_do_wfi)
sdrc_power:
.word SDRC_POWER_V
cm_idlest1_core:
.word CM_IDLEST1_CORE_V
cm_idlest_ckgen:
.word CM_IDLEST_CKGEN_V
sdrc_dlla_status:
.word SDRC_DLLA_STATUS_V
sdrc_dlla_ctrl:
.word SDRC_DLLA_CTRL_V
ENTRY(omap3_do_wfi_sz)
.word . - omap3_do_wfi
/*
* ==============================
* == Resume path for OFF mode ==
* ==============================
*/
/*
* The restore_* functions are called by the ROM code
* when back from WFI in OFF mode.
* Cf. the get_*restore_pointer functions.
*
* restore_es3: applies to 34xx >= ES3.0
* restore_3630: applies to 36xx
* restore: common code for 3xxx
*
* Note: when back from CORE and MPU OFF mode we are running
* from SDRAM, without MMU, without the caches and prediction.
* Also the SRAM content has been cleared.
*/
ENTRY(omap3_restore_es3)
ldr r5, pm_prepwstst_core_p
ldr r4, [r5]
and r4, r4, #0x3
cmp r4, #0x0 @ Check if previous power state of CORE is OFF
bne omap3_restore @ Fall through to OMAP3 common code
adr r0, es3_sdrc_fix
ldr r1, sram_base
ldr r2, es3_sdrc_fix_sz
mov r2, r2, ror #2
copy_to_sram:
ldmia r0!, {r3} @ val = *src
stmia r1!, {r3} @ *dst = val
subs r2, r2, #0x1 @ num_words--
bne copy_to_sram
ldr r1, sram_base
blx r1
b omap3_restore @ Fall through to OMAP3 common code
ENDPROC(omap3_restore_es3)
ENTRY(omap3_restore_3630)
ldr r1, pm_prepwstst_core_p
ldr r2, [r1]
and r2, r2, #0x3
cmp r2, #0x0 @ Check if previous power state of CORE is OFF
bne omap3_restore @ Fall through to OMAP3 common code
/* Disable RTA before giving control */
ldr r1, control_mem_rta
mov r2, #OMAP36XX_RTA_DISABLE
str r2, [r1]
ENDPROC(omap3_restore_3630)
/* Fall through to common code for the remaining logic */
ENTRY(omap3_restore)
/*
* Read the pwstctrl register to check the reason for mpu reset.
* This tells us what was lost.
*/
ldr r1, pm_pwstctrl_mpu
ldr r2, [r1]
and r2, r2, #0x3
cmp r2, #0x0 @ Check if target power state was OFF or RET
bne logic_l1_restore
adr r1, l2dis_3630_offset @ address for offset
ldr r0, [r1] @ value for offset
ldr r0, [r1, r0] @ value at l2dis_3630
cmp r0, #0x1 @ should we disable L2 on 3630?
bne skipl2dis
mrc p15, 0, r0, c1, c0, 1
bic r0, r0, #2 @ disable L2 cache
mcr p15, 0, r0, c1, c0, 1
skipl2dis:
ldr r0, control_stat
ldr r1, [r0]
and r1, #0x700
cmp r1, #0x300
beq l2_inv_gp
adr r0, l2_inv_api_params_offset
ldr r3, [r0]
add r3, r3, r0 @ r3 points to dummy parameters
mov r0, #40 @ set service ID for PPA
mov r12, r0 @ copy secure Service ID in r12
mov r1, #0 @ set task id for ROM code in r1
mov r2, #4 @ set some flags in r2, r6
mov r6, #0xff
dsb @ data write barrier
dmb @ data memory barrier
smc #1 @ call SMI monitor (smi #1)
/* Write to Aux control register to set some bits */
mov r0, #42 @ set service ID for PPA
mov r12, r0 @ copy secure Service ID in r12
mov r1, #0 @ set task id for ROM code in r1
mov r2, #4 @ set some flags in r2, r6
mov r6, #0xff
ldr r4, scratchpad_base
ldr r3, [r4, #0xBC] @ r3 points to parameters
dsb @ data write barrier
dmb @ data memory barrier
smc #1 @ call SMI monitor (smi #1)
#ifdef CONFIG_OMAP3_L2_AUX_SECURE_SAVE_RESTORE
/* Restore L2 aux control register */
@ set service ID for PPA
mov r0, #CONFIG_OMAP3_L2_AUX_SECURE_SERVICE_SET_ID
mov r12, r0 @ copy service ID in r12
mov r1, #0 @ set task ID for ROM code in r1
mov r2, #4 @ set some flags in r2, r6
mov r6, #0xff
ldr r4, scratchpad_base
ldr r3, [r4, #0xBC]
adds r3, r3, #8 @ r3 points to parameters
dsb @ data write barrier
dmb @ data memory barrier
smc #1 @ call SMI monitor (smi #1)
#endif
b logic_l1_restore
.align
l2_inv_api_params_offset:
.long l2_inv_api_params - .
l2_inv_gp:
/* Execute smi to invalidate L2 cache */
mov r12, #0x1 @ set up to invalidate L2
smc #0 @ Call SMI monitor (smieq)
/* Write to Aux control register to set some bits */
ldr r4, scratchpad_base
ldr r3, [r4,#0xBC]
ldr r0, [r3,#4]
mov r12, #0x3
smc #0 @ Call SMI monitor (smieq)
ldr r4, scratchpad_base
ldr r3, [r4,#0xBC]
ldr r0, [r3,#12]
mov r12, #0x2
smc #0 @ Call SMI monitor (smieq)
logic_l1_restore:
adr r0, l2dis_3630_offset @ adress for offset
ldr r1, [r0] @ value for offset
ldr r1, [r0, r1] @ value at l2dis_3630
cmp r1, #0x1 @ Test if L2 re-enable needed on 3630
bne skipl2reen
mrc p15, 0, r1, c1, c0, 1
orr r1, r1, #2 @ re-enable L2 cache
mcr p15, 0, r1, c1, c0, 1
skipl2reen:
/* Now branch to the common CPU resume function */
b cpu_resume
ENDPROC(omap3_restore)
.ltorg
/*
* Local variables
*/
pm_prepwstst_core_p:
.word PM_PREPWSTST_CORE_P
pm_pwstctrl_mpu:
.word PM_PWSTCTRL_MPU_P
scratchpad_base:
.word SCRATCHPAD_BASE_P
sram_base:
.word SRAM_BASE_P + 0x8000
control_stat:
.word CONTROL_STAT
control_mem_rta:
.word CONTROL_MEM_RTA_CTRL
l2dis_3630_offset:
.long l2dis_3630 - .
.data
.align 2
l2dis_3630:
.word 0
.data
.align 2
l2_inv_api_params:
.word 0x1, 0x00
/*
* Internal functions
*/
/*
* This function implements the erratum ID i443 WA, applies to 34xx >= ES3.0
* Copied to and run from SRAM in order to reconfigure the SDRC parameters.
*/
.text
.align 3
ENTRY(es3_sdrc_fix)
ldr r4, sdrc_syscfg @ get config addr
ldr r5, [r4] @ get value
tst r5, #0x100 @ is part access blocked
it eq
biceq r5, r5, #0x100 @ clear bit if set
str r5, [r4] @ write back change
ldr r4, sdrc_mr_0 @ get config addr
ldr r5, [r4] @ get value
str r5, [r4] @ write back change
ldr r4, sdrc_emr2_0 @ get config addr
ldr r5, [r4] @ get value
str r5, [r4] @ write back change
ldr r4, sdrc_manual_0 @ get config addr
mov r5, #0x2 @ autorefresh command
str r5, [r4] @ kick off refreshes
ldr r4, sdrc_mr_1 @ get config addr
ldr r5, [r4] @ get value
str r5, [r4] @ write back change
ldr r4, sdrc_emr2_1 @ get config addr
ldr r5, [r4] @ get value
str r5, [r4] @ write back change
ldr r4, sdrc_manual_1 @ get config addr
mov r5, #0x2 @ autorefresh command
str r5, [r4] @ kick off refreshes
bx lr
/*
* Local variables
*/
.align
sdrc_syscfg:
.word SDRC_SYSCONFIG_P
sdrc_mr_0:
.word SDRC_MR_0_P
sdrc_emr2_0:
.word SDRC_EMR2_0_P
sdrc_manual_0:
.word SDRC_MANUAL_0_P
sdrc_mr_1:
.word SDRC_MR_1_P
sdrc_emr2_1:
.word SDRC_EMR2_1_P
sdrc_manual_1:
.word SDRC_MANUAL_1_P
ENDPROC(es3_sdrc_fix)
ENTRY(es3_sdrc_fix_sz)
.word . - es3_sdrc_fix
|
aixcc-public/challenge-001-exemplar-source
| 9,890
|
arch/arm/mach-omap2/sram242x.S
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* linux/arch/arm/mach-omap2/sram242x.S
*
* Omap2 specific functions that need to be run in internal SRAM
*
* (C) Copyright 2004
* Texas Instruments, <www.ti.com>
* Richard Woodruff <r-woodruff2@ti.com>
*
* Richard Woodruff notes that any changes to this code must be carefully
* audited and tested to ensure that they don't cause a TLB miss while
* the SDRAM is inaccessible. Such a situation will crash the system
* since it will cause the ARM MMU to attempt to walk the page tables.
* These crashes may be intermittent.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include "soc.h"
#include "iomap.h"
#include "prm2xxx.h"
#include "cm2xxx.h"
#include "sdrc.h"
.text
.align 3
ENTRY(omap242x_sram_ddr_init)
stmfd sp!, {r0 - r12, lr} @ save registers on stack
mov r12, r2 @ capture CS1 vs CS0
mov r8, r3 @ capture force parameter
/* frequency shift down */
ldr r2, omap242x_sdi_cm_clksel2_pll @ get address of dpllout reg
mov r3, #0x1 @ value for 1x operation
str r3, [r2] @ go to L1-freq operation
/* voltage shift down */
mov r9, #0x1 @ set up for L1 voltage call
bl voltage_shift @ go drop voltage
/* dll lock mode */
ldr r11, omap242x_sdi_sdrc_dlla_ctrl @ addr of dlla ctrl
ldr r10, [r11] @ get current val
cmp r12, #0x1 @ cs1 base (2422 es2.05/1)
addeq r11, r11, #0x8 @ if cs1 base, move to DLLB
mvn r9, #0x4 @ mask to get clear bit2
and r10, r10, r9 @ clear bit2 for lock mode.
orr r10, r10, #0x8 @ make sure DLL on (es2 bit pos)
orr r10, r10, #0x2 @ 90 degree phase for all below 133MHz
str r10, [r11] @ commit to DLLA_CTRL
bl i_dll_wait @ wait for dll to lock
/* get dll value */
add r11, r11, #0x4 @ get addr of status reg
ldr r10, [r11] @ get locked value
/* voltage shift up */
mov r9, #0x0 @ shift back to L0-voltage
bl voltage_shift @ go raise voltage
/* frequency shift up */
mov r3, #0x2 @ value for 2x operation
str r3, [r2] @ go to L0-freq operation
/* reset entry mode for dllctrl */
sub r11, r11, #0x4 @ move from status to ctrl
cmp r12, #0x1 @ normalize if cs1 based
subeq r11, r11, #0x8 @ possibly back to DLLA
cmp r8, #0x1 @ if forced unlock exit
orreq r1, r1, #0x4 @ make sure exit with unlocked value
str r1, [r11] @ restore DLLA_CTRL high value
add r11, r11, #0x8 @ move to DLLB_CTRL addr
str r1, [r11] @ set value DLLB_CTRL
bl i_dll_wait @ wait for possible lock
/* set up for return, DDR should be good */
str r10, [r0] @ write dll_status and return counter
ldmfd sp!, {r0 - r12, pc} @ restore regs and return
/* ensure the DLL has relocked */
i_dll_wait:
mov r4, #0x800 @ delay DLL relock, min 0x400 L3 clocks
i_dll_delay:
subs r4, r4, #0x1
bne i_dll_delay
ret lr
/*
* shift up or down voltage, use R9 as input to tell level.
* wait for it to finish, use 32k sync counter, 1tick=31uS.
*/
voltage_shift:
ldr r4, omap242x_sdi_prcm_voltctrl @ get addr of volt ctrl.
ldr r5, [r4] @ get value.
ldr r6, prcm_mask_val @ get value of mask
and r5, r5, r6 @ apply mask to clear bits
orr r5, r5, r9 @ bulld value for L0/L1-volt operation.
str r5, [r4] @ set up for change.
mov r3, #0x4000 @ get val for force
orr r5, r5, r3 @ build value for force
str r5, [r4] @ Force transition to L1
ldr r3, omap242x_sdi_timer_32ksynct_cr @ get addr of counter
ldr r5, [r3] @ get value
add r5, r5, #0x3 @ give it at most 93uS
volt_delay:
ldr r7, [r3] @ get timer value
cmp r5, r7 @ time up?
bhi volt_delay @ not yet->branch
ret lr @ back to caller.
omap242x_sdi_cm_clksel2_pll:
.word OMAP2420_CM_REGADDR(PLL_MOD, CM_CLKSEL2)
omap242x_sdi_sdrc_dlla_ctrl:
.word OMAP242X_SDRC_REGADDR(SDRC_DLLA_CTRL)
omap242x_sdi_prcm_voltctrl:
.word OMAP2420_PRCM_VOLTCTRL
prcm_mask_val:
.word 0xFFFF3FFC
omap242x_sdi_timer_32ksynct_cr:
.word OMAP2_L4_IO_ADDRESS(OMAP2420_32KSYNCT_BASE + 0x010)
ENTRY(omap242x_sram_ddr_init_sz)
.word . - omap242x_sram_ddr_init
/*
* Reprograms memory timings.
* r0 = [PRCM_FULL | PRCM_HALF] r1 = SDRC_DLLA_CTRL value r2 = [DDR | SDR]
* PRCM_FULL = 2, PRCM_HALF = 1, DDR = 1, SDR = 0
*/
.align 3
ENTRY(omap242x_sram_reprogram_sdrc)
stmfd sp!, {r0 - r10, lr} @ save registers on stack
mov r3, #0x0 @ clear for mrc call
mcr p15, 0, r3, c7, c10, 4 @ memory barrier, finish ARM SDR/DDR
nop
nop
ldr r6, omap242x_srs_sdrc_rfr_ctrl @ get addr of refresh reg
ldr r5, [r6] @ get value
mov r5, r5, lsr #8 @ isolate rfr field and drop burst
cmp r0, #0x1 @ going to half speed?
movne r9, #0x0 @ if up set flag up for pre up, hi volt
blne voltage_shift_c @ adjust voltage
cmp r0, #0x1 @ going to half speed (post branch link)
moveq r5, r5, lsr #1 @ divide by 2 if to half
movne r5, r5, lsl #1 @ mult by 2 if to full
mov r5, r5, lsl #8 @ put rfr field back into place
add r5, r5, #0x1 @ turn on burst of 1
ldr r4, omap242x_srs_cm_clksel2_pll @ get address of out reg
ldr r3, [r4] @ get curr value
orr r3, r3, #0x3
bic r3, r3, #0x3 @ clear lower bits
orr r3, r3, r0 @ new state value
str r3, [r4] @ set new state (pll/x, x=1 or 2)
nop
nop
moveq r9, #0x1 @ if speed down, post down, drop volt
bleq voltage_shift_c
mcr p15, 0, r3, c7, c10, 4 @ memory barrier
str r5, [r6] @ set new RFR_1 value
add r6, r6, #0x30 @ get RFR_2 addr
str r5, [r6] @ set RFR_2
nop
cmp r2, #0x1 @ (SDR or DDR) do we need to adjust DLL
bne freq_out @ leave if SDR, no DLL function
/* With DDR, we need to take care of the DLL for the frequency change */
ldr r2, omap242x_srs_sdrc_dlla_ctrl @ addr of dlla ctrl
str r1, [r2] @ write out new SDRC_DLLA_CTRL
add r2, r2, #0x8 @ addr to SDRC_DLLB_CTRL
str r1, [r2] @ commit to SDRC_DLLB_CTRL
mov r1, #0x2000 @ wait DLL relock, min 0x400 L3 clocks
dll_wait:
subs r1, r1, #0x1
bne dll_wait
freq_out:
ldmfd sp!, {r0 - r10, pc} @ restore regs and return
/*
* shift up or down voltage, use R9 as input to tell level.
* wait for it to finish, use 32k sync counter, 1tick=31uS.
*/
voltage_shift_c:
ldr r10, omap242x_srs_prcm_voltctrl @ get addr of volt ctrl
ldr r8, [r10] @ get value
ldr r7, ddr_prcm_mask_val @ get value of mask
and r8, r8, r7 @ apply mask to clear bits
orr r8, r8, r9 @ bulld value for L0/L1-volt operation.
str r8, [r10] @ set up for change.
mov r7, #0x4000 @ get val for force
orr r8, r8, r7 @ build value for force
str r8, [r10] @ Force transition to L1
ldr r10, omap242x_srs_timer_32ksynct @ get addr of counter
ldr r8, [r10] @ get value
add r8, r8, #0x2 @ give it at most 62uS (min 31+)
volt_delay_c:
ldr r7, [r10] @ get timer value
cmp r8, r7 @ time up?
bhi volt_delay_c @ not yet->branch
ret lr @ back to caller
omap242x_srs_cm_clksel2_pll:
.word OMAP2420_CM_REGADDR(PLL_MOD, CM_CLKSEL2)
omap242x_srs_sdrc_dlla_ctrl:
.word OMAP242X_SDRC_REGADDR(SDRC_DLLA_CTRL)
omap242x_srs_sdrc_rfr_ctrl:
.word OMAP242X_SDRC_REGADDR(SDRC_RFR_CTRL_0)
omap242x_srs_prcm_voltctrl:
.word OMAP2420_PRCM_VOLTCTRL
ddr_prcm_mask_val:
.word 0xFFFF3FFC
omap242x_srs_timer_32ksynct:
.word OMAP2_L4_IO_ADDRESS(OMAP2420_32KSYNCT_BASE + 0x010)
ENTRY(omap242x_sram_reprogram_sdrc_sz)
.word . - omap242x_sram_reprogram_sdrc
/*
* Set dividers and pll. Also recalculate DLL value for DDR and unlock mode.
*/
.align 3
ENTRY(omap242x_sram_set_prcm)
stmfd sp!, {r0-r12, lr} @ regs to stack
adr r4, pbegin @ addr of preload start
adr r8, pend @ addr of preload end
mcrr p15, 1, r8, r4, c12 @ preload into icache
pbegin:
/* move into fast relock bypass */
ldr r8, omap242x_ssp_pll_ctl @ get addr
ldr r5, [r8] @ get val
mvn r6, #0x3 @ clear mask
and r5, r5, r6 @ clear field
orr r7, r5, #0x2 @ fast relock val
str r7, [r8] @ go to fast relock
ldr r4, omap242x_ssp_pll_stat @ addr of stat
block:
/* wait for bypass */
ldr r8, [r4] @ stat value
and r8, r8, #0x3 @ mask for stat
cmp r8, #0x1 @ there yet
bne block @ loop if not
/* set new dpll dividers _after_ in bypass */
ldr r4, omap242x_ssp_pll_div @ get addr
str r0, [r4] @ set dpll ctrl val
ldr r4, omap242x_ssp_set_config @ get addr
mov r8, #1 @ valid cfg msk
str r8, [r4] @ make dividers take
mov r4, #100 @ dead spin a bit
wait_a_bit:
subs r4, r4, #1 @ dec loop
bne wait_a_bit @ delay done?
/* check if staying in bypass */
cmp r2, #0x1 @ stay in bypass?
beq pend @ jump over dpll relock
/* relock DPLL with new vals */
ldr r5, omap242x_ssp_pll_stat @ get addr
ldr r4, omap242x_ssp_pll_ctl @ get addr
orr r8, r7, #0x3 @ val for lock dpll
str r8, [r4] @ set val
mov r0, #1000 @ dead spin a bit
wait_more:
subs r0, r0, #1 @ dec loop
bne wait_more @ delay done?
wait_lock:
ldr r8, [r5] @ get lock val
and r8, r8, #3 @ isolate field
cmp r8, #2 @ locked?
bne wait_lock @ wait if not
pend:
/* update memory timings & briefly lock dll */
ldr r4, omap242x_ssp_sdrc_rfr @ get addr
str r1, [r4] @ update refresh timing
ldr r11, omap242x_ssp_dlla_ctrl @ get addr of DLLA ctrl
ldr r10, [r11] @ get current val
mvn r9, #0x4 @ mask to get clear bit2
and r10, r10, r9 @ clear bit2 for lock mode
orr r10, r10, #0x8 @ make sure DLL on (es2 bit pos)
str r10, [r11] @ commit to DLLA_CTRL
add r11, r11, #0x8 @ move to dllb
str r10, [r11] @ hit DLLB also
mov r4, #0x800 @ relock time (min 0x400 L3 clocks)
wait_dll_lock:
subs r4, r4, #0x1
bne wait_dll_lock
nop
ldmfd sp!, {r0-r12, pc} @ restore regs and return
omap242x_ssp_set_config:
.word OMAP2420_PRCM_CLKCFG_CTRL
omap242x_ssp_pll_ctl:
.word OMAP2420_CM_REGADDR(PLL_MOD, CM_CLKEN)
omap242x_ssp_pll_stat:
.word OMAP2420_CM_REGADDR(PLL_MOD, CM_IDLEST)
omap242x_ssp_pll_div:
.word OMAP2420_CM_REGADDR(PLL_MOD, CM_CLKSEL1)
omap242x_ssp_sdrc_rfr:
.word OMAP242X_SDRC_REGADDR(SDRC_RFR_CTRL_0)
omap242x_ssp_dlla_ctrl:
.word OMAP242X_SDRC_REGADDR(SDRC_DLLA_CTRL)
ENTRY(omap242x_sram_set_prcm_sz)
.word . - omap242x_sram_set_prcm
|
aixcc-public/challenge-001-exemplar-source
| 10,937
|
arch/arm/mach-omap2/sleep43xx.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Low level suspend code for AM43XX SoCs
*
* Copyright (C) 2013-2018 Texas Instruments Incorporated - https://www.ti.com/
* Dave Gerlach, Vaibhav Bedia
*/
#include <linux/linkage.h>
#include <linux/ti-emif-sram.h>
#include <linux/platform_data/pm33xx.h>
#include <asm/assembler.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/memory.h>
#include "cm33xx.h"
#include "common.h"
#include "iomap.h"
#include "omap-secure.h"
#include "omap44xx.h"
#include "pm-asm-offsets.h"
#include "prm33xx.h"
#include "prcm43xx.h"
/* replicated define because linux/bitops.h cannot be included in assembly */
#define BIT(nr) (1 << (nr))
#define AM33XX_CM_CLKCTRL_MODULESTATE_DISABLED 0x00030000
#define AM33XX_CM_CLKCTRL_MODULEMODE_DISABLE 0x0003
#define AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE 0x0002
#define AM43XX_EMIF_POWEROFF_ENABLE 0x1
#define AM43XX_EMIF_POWEROFF_DISABLE 0x0
#define AM43XX_CM_CLKSTCTRL_CLKTRCTRL_SW_SLEEP 0x1
#define AM43XX_CM_CLKSTCTRL_CLKTRCTRL_HW_AUTO 0x3
#define AM43XX_CM_BASE 0x44DF0000
#define AM43XX_CM_REGADDR(inst, reg) \
AM33XX_L4_WK_IO_ADDRESS(AM43XX_CM_BASE + (inst) + (reg))
#define AM43XX_CM_MPU_CLKSTCTRL AM43XX_CM_REGADDR(AM43XX_CM_MPU_INST, \
AM43XX_CM_MPU_MPU_CDOFFS)
#define AM43XX_CM_MPU_MPU_CLKCTRL AM43XX_CM_REGADDR(AM43XX_CM_MPU_INST, \
AM43XX_CM_MPU_MPU_CLKCTRL_OFFSET)
#define AM43XX_CM_PER_EMIF_CLKCTRL AM43XX_CM_REGADDR(AM43XX_CM_PER_INST, \
AM43XX_CM_PER_EMIF_CLKCTRL_OFFSET)
#define AM43XX_PRM_EMIF_CTRL_OFFSET 0x0030
#define RTC_SECONDS_REG 0x0
#define RTC_PMIC_REG 0x98
#define RTC_PMIC_POWER_EN BIT(16)
#define RTC_PMIC_EXT_WAKEUP_STS BIT(12)
#define RTC_PMIC_EXT_WAKEUP_POL BIT(4)
#define RTC_PMIC_EXT_WAKEUP_EN BIT(0)
.arm
.arch armv7-a
.arch_extension sec
.align 3
ENTRY(am43xx_do_wfi)
stmfd sp!, {r4 - r11, lr} @ save registers on stack
/* Save wfi_flags arg to data space */
mov r4, r0
adr r3, am43xx_pm_ro_sram_data
ldr r2, [r3, #AMX3_PM_RO_SRAM_DATA_VIRT_OFFSET]
str r4, [r2, #AMX3_PM_WFI_FLAGS_OFFSET]
#ifdef CONFIG_CACHE_L2X0
/* Retrieve l2 cache virt address BEFORE we shut off EMIF */
ldr r1, get_l2cache_base
blx r1
mov r8, r0
#endif
/* Only flush cache is we know we are losing MPU context */
tst r4, #WFI_FLAG_FLUSH_CACHE
beq cache_skip_flush
/*
* Flush all data from the L1 and L2 data cache before disabling
* SCTLR.C bit.
*/
ldr r1, kernel_flush
blx r1
/*
* Clear the SCTLR.C bit to prevent further data cache
* allocation. Clearing SCTLR.C would make all the data accesses
* strongly ordered and would not hit the cache.
*/
mrc p15, 0, r0, c1, c0, 0
bic r0, r0, #(1 << 2) @ Disable the C bit
mcr p15, 0, r0, c1, c0, 0
isb
dsb
/*
* Invalidate L1 and L2 data cache.
*/
ldr r1, kernel_flush
blx r1
#ifdef CONFIG_CACHE_L2X0
/*
* Clean and invalidate the L2 cache.
*/
#ifdef CONFIG_PL310_ERRATA_727915
mov r0, #0x03
mov r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
dsb
smc #0
dsb
#endif
mov r0, r8
adr r4, am43xx_pm_ro_sram_data
ldr r3, [r4, #AMX3_PM_RO_SRAM_DATA_VIRT_OFFSET]
mov r2, r0
ldr r0, [r2, #L2X0_AUX_CTRL]
str r0, [r3, #AMX3_PM_L2_AUX_CTRL_VAL_OFFSET]
ldr r0, [r2, #L310_PREFETCH_CTRL]
str r0, [r3, #AMX3_PM_L2_PREFETCH_CTRL_VAL_OFFSET]
ldr r0, l2_val
str r0, [r2, #L2X0_CLEAN_INV_WAY]
wait:
ldr r0, [r2, #L2X0_CLEAN_INV_WAY]
ldr r1, l2_val
ands r0, r0, r1
bne wait
#ifdef CONFIG_PL310_ERRATA_727915
mov r0, #0x00
mov r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
dsb
smc #0
dsb
#endif
l2x_sync:
mov r0, r8
mov r2, r0
mov r0, #0x0
str r0, [r2, #L2X0_CACHE_SYNC]
sync:
ldr r0, [r2, #L2X0_CACHE_SYNC]
ands r0, r0, #0x1
bne sync
#endif
/* Restore wfi_flags */
adr r3, am43xx_pm_ro_sram_data
ldr r2, [r3, #AMX3_PM_RO_SRAM_DATA_VIRT_OFFSET]
ldr r4, [r2, #AMX3_PM_WFI_FLAGS_OFFSET]
cache_skip_flush:
/*
* If we are trying to enter RTC+DDR mode we must perform
* a read from the rtc address space to ensure translation
* presence in the TLB to avoid page table walk after DDR
* is unavailable.
*/
tst r4, #WFI_FLAG_RTC_ONLY
beq skip_rtc_va_refresh
adr r3, am43xx_pm_ro_sram_data
ldr r1, [r3, #AMX3_PM_RTC_BASE_VIRT_OFFSET]
ldr r0, [r1]
skip_rtc_va_refresh:
/* Check if we want self refresh */
tst r4, #WFI_FLAG_SELF_REFRESH
beq emif_skip_enter_sr
adr r9, am43xx_emif_sram_table
ldr r3, [r9, #EMIF_PM_ENTER_SR_OFFSET]
blx r3
emif_skip_enter_sr:
/* Only necessary if PER is losing context */
tst r4, #WFI_FLAG_SAVE_EMIF
beq emif_skip_save
ldr r3, [r9, #EMIF_PM_SAVE_CONTEXT_OFFSET]
blx r3
emif_skip_save:
/* Only can disable EMIF if we have entered self refresh */
tst r4, #WFI_FLAG_SELF_REFRESH
beq emif_skip_disable
/* Disable EMIF */
ldr r1, am43xx_virt_emif_clkctrl
ldr r2, [r1]
bic r2, r2, #AM33XX_CM_CLKCTRL_MODULEMODE_DISABLE
str r2, [r1]
wait_emif_disable:
ldr r2, [r1]
mov r3, #AM33XX_CM_CLKCTRL_MODULESTATE_DISABLED
cmp r2, r3
bne wait_emif_disable
emif_skip_disable:
tst r4, #WFI_FLAG_RTC_ONLY
beq skip_rtc_only
adr r3, am43xx_pm_ro_sram_data
ldr r1, [r3, #AMX3_PM_RTC_BASE_VIRT_OFFSET]
ldr r0, [r1, #RTC_PMIC_REG]
orr r0, r0, #RTC_PMIC_POWER_EN
orr r0, r0, #RTC_PMIC_EXT_WAKEUP_STS
orr r0, r0, #RTC_PMIC_EXT_WAKEUP_EN
orr r0, r0, #RTC_PMIC_EXT_WAKEUP_POL
str r0, [r1, #RTC_PMIC_REG]
ldr r0, [r1, #RTC_PMIC_REG]
/* Wait for 2 seconds to lose power */
mov r3, #2
ldr r2, [r1, #RTC_SECONDS_REG]
rtc_loop:
ldr r0, [r1, #RTC_SECONDS_REG]
cmp r0, r2
beq rtc_loop
mov r2, r0
subs r3, r3, #1
bne rtc_loop
b re_enable_emif
skip_rtc_only:
tst r4, #WFI_FLAG_WAKE_M3
beq wkup_m3_skip
/*
* For the MPU WFI to be registered as an interrupt
* to WKUP_M3, MPU_CLKCTRL.MODULEMODE needs to be set
* to DISABLED
*/
ldr r1, am43xx_virt_mpu_clkctrl
ldr r2, [r1]
bic r2, r2, #AM33XX_CM_CLKCTRL_MODULEMODE_DISABLE
str r2, [r1]
/*
* Put MPU CLKDM to SW_SLEEP
*/
ldr r1, am43xx_virt_mpu_clkstctrl
mov r2, #AM43XX_CM_CLKSTCTRL_CLKTRCTRL_SW_SLEEP
str r2, [r1]
wkup_m3_skip:
/*
* Execute a barrier instruction to ensure that all cache,
* TLB and branch predictor maintenance operations issued
* have completed.
*/
dsb
dmb
/*
* Execute a WFI instruction and wait until the
* STANDBYWFI output is asserted to indicate that the
* CPU is in idle and low power state. CPU can specualatively
* prefetch the instructions so add NOPs after WFI. Sixteen
* NOPs as per Cortex-A9 pipeline.
*/
wfi
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
/* We come here in case of an abort due to a late interrupt */
ldr r1, am43xx_virt_mpu_clkstctrl
mov r2, #AM43XX_CM_CLKSTCTRL_CLKTRCTRL_HW_AUTO
str r2, [r1]
/* Set MPU_CLKCTRL.MODULEMODE back to ENABLE */
ldr r1, am43xx_virt_mpu_clkctrl
mov r2, #AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE
str r2, [r1]
re_enable_emif:
/* Re-enable EMIF */
ldr r1, am43xx_virt_emif_clkctrl
mov r2, #AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE
str r2, [r1]
wait_emif_enable:
ldr r3, [r1]
cmp r2, r3
bne wait_emif_enable
tst r4, #WFI_FLAG_FLUSH_CACHE
beq cache_skip_restore
/*
* Set SCTLR.C bit to allow data cache allocation
*/
mrc p15, 0, r0, c1, c0, 0
orr r0, r0, #(1 << 2) @ Enable the C bit
mcr p15, 0, r0, c1, c0, 0
isb
cache_skip_restore:
/* Only necessary if PER is losing context */
tst r4, #WFI_FLAG_SELF_REFRESH
beq emif_skip_exit_sr_abt
adr r9, am43xx_emif_sram_table
ldr r1, [r9, #EMIF_PM_ABORT_SR_OFFSET]
blx r1
emif_skip_exit_sr_abt:
/* Let the suspend code know about the abort */
mov r0, #1
ldmfd sp!, {r4 - r11, pc} @ restore regs and return
ENDPROC(am43xx_do_wfi)
.align
ENTRY(am43xx_resume_offset)
.word . - am43xx_do_wfi
ENTRY(am43xx_resume_from_deep_sleep)
/* Set MPU CLKSTCTRL to HW AUTO so that CPUidle works properly */
ldr r1, am43xx_virt_mpu_clkstctrl
mov r2, #AM43XX_CM_CLKSTCTRL_CLKTRCTRL_HW_AUTO
str r2, [r1]
/* For AM43xx, use EMIF power down until context is restored */
ldr r2, am43xx_phys_emif_poweroff
mov r1, #AM43XX_EMIF_POWEROFF_ENABLE
str r1, [r2, #0x0]
/* Re-enable EMIF */
ldr r1, am43xx_phys_emif_clkctrl
mov r2, #AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE
str r2, [r1]
wait_emif_enable1:
ldr r3, [r1]
cmp r2, r3
bne wait_emif_enable1
adr r9, am43xx_emif_sram_table
ldr r1, [r9, #EMIF_PM_RESTORE_CONTEXT_OFFSET]
blx r1
ldr r1, [r9, #EMIF_PM_EXIT_SR_OFFSET]
blx r1
ldr r2, am43xx_phys_emif_poweroff
mov r1, #AM43XX_EMIF_POWEROFF_DISABLE
str r1, [r2, #0x0]
ldr r1, [r9, #EMIF_PM_RUN_HW_LEVELING]
blx r1
#ifdef CONFIG_CACHE_L2X0
ldr r2, l2_cache_base
ldr r0, [r2, #L2X0_CTRL]
and r0, #0x0f
cmp r0, #1
beq skip_l2en @ Skip if already enabled
adr r4, am43xx_pm_ro_sram_data
ldr r3, [r4, #AMX3_PM_RO_SRAM_DATA_PHYS_OFFSET]
ldr r0, [r3, #AMX3_PM_L2_PREFETCH_CTRL_VAL_OFFSET]
ldr r12, l2_smc1
dsb
smc #0
dsb
set_aux_ctrl:
ldr r0, [r3, #AMX3_PM_L2_AUX_CTRL_VAL_OFFSET]
ldr r12, l2_smc2
dsb
smc #0
dsb
/* L2 invalidate on resume */
ldr r0, l2_val
ldr r2, l2_cache_base
str r0, [r2, #L2X0_INV_WAY]
wait2:
ldr r0, [r2, #L2X0_INV_WAY]
ldr r1, l2_val
ands r0, r0, r1
bne wait2
#ifdef CONFIG_PL310_ERRATA_727915
mov r0, #0x00
mov r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
dsb
smc #0
dsb
#endif
l2x_sync2:
ldr r2, l2_cache_base
mov r0, #0x0
str r0, [r2, #L2X0_CACHE_SYNC]
sync2:
ldr r0, [r2, #L2X0_CACHE_SYNC]
ands r0, r0, #0x1
bne sync2
mov r0, #0x1
ldr r12, l2_smc3
dsb
smc #0
dsb
#endif
skip_l2en:
/* We are back. Branch to the common CPU resume routine */
mov r0, #0
ldr pc, resume_addr
ENDPROC(am43xx_resume_from_deep_sleep)
/*
* Local variables
*/
.align
kernel_flush:
.word v7_flush_dcache_all
ddr_start:
.word PAGE_OFFSET
am43xx_phys_emif_poweroff:
.word (AM43XX_CM_BASE + AM43XX_PRM_DEVICE_INST + \
AM43XX_PRM_EMIF_CTRL_OFFSET)
am43xx_virt_mpu_clkstctrl:
.word (AM43XX_CM_MPU_CLKSTCTRL)
am43xx_virt_mpu_clkctrl:
.word (AM43XX_CM_MPU_MPU_CLKCTRL)
am43xx_virt_emif_clkctrl:
.word (AM43XX_CM_PER_EMIF_CLKCTRL)
am43xx_phys_emif_clkctrl:
.word (AM43XX_CM_BASE + AM43XX_CM_PER_INST + \
AM43XX_CM_PER_EMIF_CLKCTRL_OFFSET)
#ifdef CONFIG_CACHE_L2X0
/* L2 cache related defines for AM437x */
get_l2cache_base:
.word omap4_get_l2cache_base
l2_cache_base:
.word OMAP44XX_L2CACHE_BASE
l2_smc1:
.word OMAP4_MON_L2X0_PREFETCH_INDEX
l2_smc2:
.word OMAP4_MON_L2X0_AUXCTRL_INDEX
l2_smc3:
.word OMAP4_MON_L2X0_CTRL_INDEX
l2_val:
.word 0xffff
#endif
.align 3
/* DDR related defines */
ENTRY(am43xx_emif_sram_table)
.space EMIF_PM_FUNCTIONS_SIZE
ENTRY(am43xx_pm_sram)
.word am43xx_do_wfi
.word am43xx_do_wfi_sz
.word am43xx_resume_offset
.word am43xx_emif_sram_table
.word am43xx_pm_ro_sram_data
resume_addr:
.word cpu_resume - PAGE_OFFSET + 0x80000000
.align 3
ENTRY(am43xx_pm_ro_sram_data)
.space AMX3_PM_RO_SRAM_DATA_SIZE
ENTRY(am43xx_do_wfi_sz)
.word . - am43xx_do_wfi
|
aixcc-public/challenge-001-exemplar-source
| 10,202
|
arch/arm/mach-omap2/sleep44xx.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* OMAP44xx sleep code.
*
* Copyright (C) 2011 Texas Instruments, Inc.
* Santosh Shilimkar <santosh.shilimkar@ti.com>
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/smp_scu.h>
#include <asm/memory.h>
#include <asm/hardware/cache-l2x0.h>
#include "omap-secure.h"
#include "common.h"
#include "omap44xx.h"
#include "omap4-sar-layout.h"
.arch armv7-a
#if defined(CONFIG_SMP) && defined(CONFIG_PM)
.arch_extension sec
.macro DO_SMC
dsb
smc #0
dsb
.endm
#ifdef CONFIG_ARCH_OMAP4
/*
* =============================
* == CPU suspend finisher ==
* =============================
*
* void omap4_finish_suspend(unsigned long cpu_state)
*
* This function code saves the CPU context and performs the CPU
* power down sequence. Calling WFI effectively changes the CPU
* power domains states to the desired target power state.
*
* @cpu_state : contains context save state (r0)
* 0 - No context lost
* 1 - CPUx L1 and logic lost: MPUSS CSWR
* 2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR
* 3 - CPUx L1 and logic lost + GIC + L2 lost: MPUSS OFF
* @return: This function never returns for CPU OFF and DORMANT power states.
* Post WFI, CPU transitions to DORMANT or OFF power state and on wake-up
* from this follows a full CPU reset path via ROM code to CPU restore code.
* The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET.
* It returns to the caller for CPU INACTIVE and ON power states or in case
* CPU failed to transition to targeted OFF/DORMANT state.
*
* omap4_finish_suspend() calls v7_flush_dcache_all() which doesn't save
* stack frame and it expects the caller to take care of it. Hence the entire
* stack frame is saved to avoid possible stack corruption.
*/
ENTRY(omap4_finish_suspend)
stmfd sp!, {r4-r12, lr}
cmp r0, #0x0
beq do_WFI @ No lowpower state, jump to WFI
/*
* Flush all data from the L1 data cache before disabling
* SCTLR.C bit.
*/
bl omap4_get_sar_ram_base
ldr r9, [r0, #OMAP_TYPE_OFFSET]
cmp r9, #0x1 @ Check for HS device
bne skip_secure_l1_clean
mov r0, #SCU_PM_NORMAL
mov r1, #0xFF @ clean seucre L1
stmfd r13!, {r4-r12, r14}
ldr r12, =OMAP4_MON_SCU_PWR_INDEX
DO_SMC
ldmfd r13!, {r4-r12, r14}
skip_secure_l1_clean:
bl v7_flush_dcache_all
/*
* Clear the SCTLR.C bit to prevent further data cache
* allocation. Clearing SCTLR.C would make all the data accesses
* strongly ordered and would not hit the cache.
*/
mrc p15, 0, r0, c1, c0, 0
bic r0, r0, #(1 << 2) @ Disable the C bit
mcr p15, 0, r0, c1, c0, 0
isb
bl v7_invalidate_l1
/*
* Switch the CPU from Symmetric Multiprocessing (SMP) mode
* to AsymmetricMultiprocessing (AMP) mode by programming
* the SCU power status to DORMANT or OFF mode.
* This enables the CPU to be taken out of coherency by
* preventing the CPU from receiving cache, TLB, or BTB
* maintenance operations broadcast by other CPUs in the cluster.
*/
bl omap4_get_sar_ram_base
mov r8, r0
ldr r9, [r8, #OMAP_TYPE_OFFSET]
cmp r9, #0x1 @ Check for HS device
bne scu_gp_set
mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR
ands r0, r0, #0x0f
ldreq r0, [r8, #SCU_OFFSET0]
ldrne r0, [r8, #SCU_OFFSET1]
mov r1, #0x00
stmfd r13!, {r4-r12, r14}
ldr r12, =OMAP4_MON_SCU_PWR_INDEX
DO_SMC
ldmfd r13!, {r4-r12, r14}
b skip_scu_gp_set
scu_gp_set:
mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR
ands r0, r0, #0x0f
ldreq r1, [r8, #SCU_OFFSET0]
ldrne r1, [r8, #SCU_OFFSET1]
bl omap4_get_scu_base
bl scu_power_mode
skip_scu_gp_set:
mrc p15, 0, r0, c1, c1, 2 @ Read NSACR data
tst r0, #(1 << 18)
mrcne p15, 0, r0, c1, c0, 1
bicne r0, r0, #(1 << 6) @ Disable SMP bit
mcrne p15, 0, r0, c1, c0, 1
isb
dsb
#ifdef CONFIG_CACHE_L2X0
/*
* Clean and invalidate the L2 cache.
* Common cache-l2x0.c functions can't be used here since it
* uses spinlocks. We are out of coherency here with data cache
* disabled. The spinlock implementation uses exclusive load/store
* instruction which can fail without data cache being enabled.
* OMAP4 hardware doesn't support exclusive monitor which can
* overcome exclusive access issue. Because of this, CPU can
* lead to deadlock.
*/
bl omap4_get_sar_ram_base
mov r8, r0
mrc p15, 0, r5, c0, c0, 5 @ Read MPIDR
ands r5, r5, #0x0f
ldreq r0, [r8, #L2X0_SAVE_OFFSET0] @ Retrieve L2 state from SAR
ldrne r0, [r8, #L2X0_SAVE_OFFSET1] @ memory.
cmp r0, #3
bne do_WFI
#ifdef CONFIG_PL310_ERRATA_727915
mov r0, #0x03
mov r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
DO_SMC
#endif
bl omap4_get_l2cache_base
mov r2, r0
ldr r0, =0xffff
str r0, [r2, #L2X0_CLEAN_INV_WAY]
wait:
ldr r0, [r2, #L2X0_CLEAN_INV_WAY]
ldr r1, =0xffff
ands r0, r0, r1
bne wait
#ifdef CONFIG_PL310_ERRATA_727915
mov r0, #0x00
mov r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
DO_SMC
#endif
l2x_sync:
bl omap4_get_l2cache_base
mov r2, r0
mov r0, #0x0
str r0, [r2, #L2X0_CACHE_SYNC]
sync:
ldr r0, [r2, #L2X0_CACHE_SYNC]
ands r0, r0, #0x1
bne sync
#endif
do_WFI:
bl omap_do_wfi
/*
* CPU is here when it failed to enter OFF/DORMANT or
* no low power state was attempted.
*/
mrc p15, 0, r0, c1, c0, 0
tst r0, #(1 << 2) @ Check C bit enabled?
orreq r0, r0, #(1 << 2) @ Enable the C bit
mcreq p15, 0, r0, c1, c0, 0
isb
/*
* Ensure the CPU power state is set to NORMAL in
* SCU power state so that CPU is back in coherency.
* In non-coherent mode CPU can lock-up and lead to
* system deadlock.
*/
mrc p15, 0, r0, c1, c0, 1
tst r0, #(1 << 6) @ Check SMP bit enabled?
orreq r0, r0, #(1 << 6)
mcreq p15, 0, r0, c1, c0, 1
isb
bl omap4_get_sar_ram_base
mov r8, r0
ldr r9, [r8, #OMAP_TYPE_OFFSET]
cmp r9, #0x1 @ Check for HS device
bne scu_gp_clear
mov r0, #SCU_PM_NORMAL
mov r1, #0x00
stmfd r13!, {r4-r12, r14}
ldr r12, =OMAP4_MON_SCU_PWR_INDEX
DO_SMC
ldmfd r13!, {r4-r12, r14}
b skip_scu_gp_clear
scu_gp_clear:
bl omap4_get_scu_base
mov r1, #SCU_PM_NORMAL
bl scu_power_mode
skip_scu_gp_clear:
isb
dsb
ldmfd sp!, {r4-r12, pc}
ENDPROC(omap4_finish_suspend)
/*
* ============================
* == CPU resume entry point ==
* ============================
*
* void omap4_cpu_resume(void)
*
* ROM code jumps to this function while waking up from CPU
* OFF or DORMANT state. Physical address of the function is
* stored in the SAR RAM while entering to OFF or DORMANT mode.
* The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET.
*/
ENTRY(omap4_cpu_resume)
/*
* Configure ACTRL and enable NS SMP bit access on CPU1 on HS device.
* OMAP44XX EMU/HS devices - CPU0 SMP bit access is enabled in PPA
* init and for CPU1, a secure PPA API provided. CPU0 must be ON
* while executing NS_SMP API on CPU1 and PPA version must be 1.4.0+.
* OMAP443X GP devices- SMP bit isn't accessible.
* OMAP446X GP devices - SMP bit access is enabled on both CPUs.
*/
ldr r8, =OMAP44XX_SAR_RAM_BASE
ldr r9, [r8, #OMAP_TYPE_OFFSET]
cmp r9, #0x1 @ Skip if GP device
bne skip_ns_smp_enable
mrc p15, 0, r0, c0, c0, 5
ands r0, r0, #0x0f
beq skip_ns_smp_enable
ppa_actrl_retry:
mov r0, #OMAP4_PPA_CPU_ACTRL_SMP_INDEX
adr r1, ppa_zero_params_offset
ldr r3, [r1]
add r3, r3, r1 @ Pointer to ppa_zero_params
mov r1, #0x0 @ Process ID
mov r2, #0x4 @ Flag
mov r6, #0xff
mov r12, #0x00 @ Secure Service ID
DO_SMC
cmp r0, #0x0 @ API returns 0 on success.
beq enable_smp_bit
b ppa_actrl_retry
enable_smp_bit:
mrc p15, 0, r0, c1, c0, 1
tst r0, #(1 << 6) @ Check SMP bit enabled?
orreq r0, r0, #(1 << 6)
mcreq p15, 0, r0, c1, c0, 1
isb
skip_ns_smp_enable:
#ifdef CONFIG_CACHE_L2X0
/*
* Restore the L2 AUXCTRL and enable the L2 cache.
* OMAP4_MON_L2X0_AUXCTRL_INDEX = Program the L2X0 AUXCTRL
* OMAP4_MON_L2X0_CTRL_INDEX = Enable the L2 using L2X0 CTRL
* register r0 contains value to be programmed.
* L2 cache is already invalidate by ROM code as part
* of MPUSS OFF wakeup path.
*/
ldr r2, =OMAP44XX_L2CACHE_BASE
ldr r0, [r2, #L2X0_CTRL]
and r0, #0x0f
cmp r0, #1
beq skip_l2en @ Skip if already enabled
ldr r3, =OMAP44XX_SAR_RAM_BASE
ldr r1, [r3, #OMAP_TYPE_OFFSET]
cmp r1, #0x1 @ Check for HS device
bne set_gp_por
ldr r0, =OMAP4_PPA_L2_POR_INDEX
ldr r1, =OMAP44XX_SAR_RAM_BASE
ldr r4, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
adr r1, ppa_por_params_offset
ldr r3, [r1]
add r3, r3, r1 @ Pointer to ppa_por_params
str r4, [r3, #0x04]
mov r1, #0x0 @ Process ID
mov r2, #0x4 @ Flag
mov r6, #0xff
mov r12, #0x00 @ Secure Service ID
DO_SMC
b set_aux_ctrl
set_gp_por:
ldr r1, =OMAP44XX_SAR_RAM_BASE
ldr r0, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
ldr r12, =OMAP4_MON_L2X0_PREFETCH_INDEX @ Setup L2 PREFETCH
DO_SMC
set_aux_ctrl:
ldr r1, =OMAP44XX_SAR_RAM_BASE
ldr r0, [r1, #L2X0_AUXCTRL_OFFSET]
ldr r12, =OMAP4_MON_L2X0_AUXCTRL_INDEX @ Setup L2 AUXCTRL
DO_SMC
mov r0, #0x1
ldr r12, =OMAP4_MON_L2X0_CTRL_INDEX @ Enable L2 cache
DO_SMC
skip_l2en:
#endif
b cpu_resume @ Jump to generic resume
ppa_por_params_offset:
.long ppa_por_params - .
ENDPROC(omap4_cpu_resume)
#endif /* CONFIG_ARCH_OMAP4 */
#endif /* defined(CONFIG_SMP) && defined(CONFIG_PM) */
ENTRY(omap_do_wfi)
stmfd sp!, {lr}
#ifdef CONFIG_OMAP_INTERCONNECT_BARRIER
/* Drain interconnect write buffers. */
bl omap_interconnect_sync
#endif
/*
* Execute an ISB instruction to ensure that all of the
* CP15 register changes have been committed.
*/
isb
/*
* Execute a barrier instruction to ensure that all cache,
* TLB and branch predictor maintenance operations issued
* by any CPU in the cluster have completed.
*/
dsb
dmb
/*
* Execute a WFI instruction and wait until the
* STANDBYWFI output is asserted to indicate that the
* CPU is in idle and low power state. CPU can specualatively
* prefetch the instructions so add NOPs after WFI. Sixteen
* NOPs as per Cortex-A9 pipeline.
*/
wfi @ Wait For Interrupt
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
ldmfd sp!, {pc}
ppa_zero_params_offset:
.long ppa_zero_params - .
ENDPROC(omap_do_wfi)
.data
.align 2
ppa_zero_params:
.word 0
ppa_por_params:
.word 1, 0
|
aixcc-public/challenge-001-exemplar-source
| 34,195
|
arch/arm/kernel/entry-armv.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/arch/arm/kernel/entry-armv.S
*
* Copyright (C) 1996,1997,1998 Russell King.
* ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
* nommu support by Hyok S. Choi (hyok.choi@samsung.com)
*
* Low-level vector interface routines
*
* Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction
* that causes it to save wrong values... Be aware!
*/
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/memory.h>
#include <asm/glue-df.h>
#include <asm/glue-pf.h>
#include <asm/vfpmacros.h>
#include <asm/thread_notify.h>
#include <asm/unwind.h>
#include <asm/unistd.h>
#include <asm/tls.h>
#include <asm/system_info.h>
#include <asm/uaccess-asm.h>
#include "entry-header.S"
#include <asm/probes.h>
/*
* Interrupt handling.
*/
.macro irq_handler, from_user:req
mov r1, sp
ldr_this_cpu r2, irq_stack_ptr, r2, r3
.if \from_user == 0
@
@ If we took the interrupt while running in the kernel, we may already
@ be using the IRQ stack, so revert to the original value in that case.
@
subs r3, r2, r1 @ SP above bottom of IRQ stack?
rsbscs r3, r3, #THREAD_SIZE @ ... and below the top?
#ifdef CONFIG_VMAP_STACK
ldr_va r3, high_memory, cc @ End of the linear region
cmpcc r3, r1 @ Stack pointer was below it?
#endif
bcc 0f @ If not, switch to the IRQ stack
mov r0, r1
bl generic_handle_arch_irq
b 1f
0:
.endif
mov_l r0, generic_handle_arch_irq
bl call_with_stack
1:
.endm
.macro pabt_helper
@ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
#ifdef MULTI_PABORT
ldr_va ip, processor, offset=PROCESSOR_PABT_FUNC
bl_r ip
#else
bl CPU_PABORT_HANDLER
#endif
.endm
.macro dabt_helper
@
@ Call the processor-specific abort handler:
@
@ r2 - pt_regs
@ r4 - aborted context pc
@ r5 - aborted context psr
@
@ The abort handler must return the aborted address in r0, and
@ the fault status register in r1. r9 must be preserved.
@
#ifdef MULTI_DABORT
ldr_va ip, processor, offset=PROCESSOR_DABT_FUNC
bl_r ip
#else
bl CPU_DABORT_HANDLER
#endif
.endm
.section .entry.text,"ax",%progbits
/*
* Invalid mode handlers
*/
.macro inv_entry, reason
sub sp, sp, #PT_REGS_SIZE
ARM( stmib sp, {r1 - lr} )
THUMB( stmia sp, {r0 - r12} )
THUMB( str sp, [sp, #S_SP] )
THUMB( str lr, [sp, #S_LR] )
mov r1, #\reason
.endm
__pabt_invalid:
inv_entry BAD_PREFETCH
b common_invalid
ENDPROC(__pabt_invalid)
__dabt_invalid:
inv_entry BAD_DATA
b common_invalid
ENDPROC(__dabt_invalid)
__irq_invalid:
inv_entry BAD_IRQ
b common_invalid
ENDPROC(__irq_invalid)
__und_invalid:
inv_entry BAD_UNDEFINSTR
@
@ XXX fall through to common_invalid
@
@
@ common_invalid - generic code for failed exception (re-entrant version of handlers)
@
common_invalid:
zero_fp
ldmia r0, {r4 - r6}
add r0, sp, #S_PC @ here for interlock avoidance
mov r7, #-1 @ "" "" "" ""
str r4, [sp] @ save preserved r0
stmia r0, {r5 - r7} @ lr_<exception>,
@ cpsr_<exception>, "old_r0"
mov r0, sp
b bad_mode
ENDPROC(__und_invalid)
/*
* SVC mode handlers
*/
#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
#define SPFIX(code...) code
#else
#define SPFIX(code...)
#endif
.macro svc_entry, stack_hole=0, trace=1, uaccess=1, overflow_check=1
UNWIND(.fnstart )
sub sp, sp, #(SVC_REGS_SIZE + \stack_hole)
THUMB( add sp, r1 ) @ get SP in a GPR without
THUMB( sub r1, sp, r1 ) @ using a temp register
.if \overflow_check
UNWIND(.save {r0 - pc} )
do_overflow_check (SVC_REGS_SIZE + \stack_hole)
.endif
#ifdef CONFIG_THUMB2_KERNEL
tst r1, #4 @ test stack pointer alignment
sub r1, sp, r1 @ restore original R1
sub sp, r1 @ restore original SP
#else
SPFIX( tst sp, #4 )
#endif
SPFIX( subne sp, sp, #4 )
ARM( stmib sp, {r1 - r12} )
THUMB( stmia sp, {r0 - r12} ) @ No STMIB in Thumb-2
ldmia r0, {r3 - r5}
add r7, sp, #S_SP @ here for interlock avoidance
mov r6, #-1 @ "" "" "" ""
add r2, sp, #(SVC_REGS_SIZE + \stack_hole)
SPFIX( addne r2, r2, #4 )
str r3, [sp] @ save the "real" r0 copied
@ from the exception stack
mov r3, lr
@
@ We are now ready to fill in the remaining blanks on the stack:
@
@ r2 - sp_svc
@ r3 - lr_svc
@ r4 - lr_<exception>, already fixed up for correct return/restart
@ r5 - spsr_<exception>
@ r6 - orig_r0 (see pt_regs definition in ptrace.h)
@
stmia r7, {r2 - r6}
get_thread_info tsk
uaccess_entry tsk, r0, r1, r2, \uaccess
.if \trace
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
.endif
.endm
.align 5
__dabt_svc:
svc_entry uaccess=0
mov r2, sp
dabt_helper
THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR
svc_exit r5 @ return from exception
UNWIND(.fnend )
ENDPROC(__dabt_svc)
.align 5
__irq_svc:
svc_entry
irq_handler from_user=0
#ifdef CONFIG_PREEMPTION
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
ldr r0, [tsk, #TI_FLAGS] @ get flags
teq r8, #0 @ if preempt count != 0
movne r0, #0 @ force flags to 0
tst r0, #_TIF_NEED_RESCHED
blne svc_preempt
#endif
svc_exit r5, irq = 1 @ return from exception
UNWIND(.fnend )
ENDPROC(__irq_svc)
.ltorg
#ifdef CONFIG_PREEMPTION
svc_preempt:
mov r8, lr
1: bl preempt_schedule_irq @ irq en/disable is done inside
ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
tst r0, #_TIF_NEED_RESCHED
reteq r8 @ go again
b 1b
#endif
__und_fault:
@ Correct the PC such that it is pointing at the instruction
@ which caused the fault. If the faulting instruction was ARM
@ the PC will be pointing at the next instruction, and have to
@ subtract 4. Otherwise, it is Thumb, and the PC will be
@ pointing at the second half of the Thumb instruction. We
@ have to subtract 2.
ldr r2, [r0, #S_PC]
sub r2, r2, r1
str r2, [r0, #S_PC]
b do_undefinstr
ENDPROC(__und_fault)
.align 5
__und_svc:
#ifdef CONFIG_KPROBES
@ If a kprobe is about to simulate a "stmdb sp..." instruction,
@ it obviously needs free stack space which then will belong to
@ the saved context.
svc_entry MAX_STACK_SIZE
#else
svc_entry
#endif
mov r1, #4 @ PC correction to apply
THUMB( tst r5, #PSR_T_BIT ) @ exception taken in Thumb mode?
THUMB( movne r1, #2 ) @ if so, fix up PC correction
mov r0, sp @ struct pt_regs *regs
bl __und_fault
__und_svc_finish:
get_thread_info tsk
ldr r5, [sp, #S_PSR] @ Get SVC cpsr
svc_exit r5 @ return from exception
UNWIND(.fnend )
ENDPROC(__und_svc)
.align 5
__pabt_svc:
svc_entry
mov r2, sp @ regs
pabt_helper
svc_exit r5 @ return from exception
UNWIND(.fnend )
ENDPROC(__pabt_svc)
.align 5
__fiq_svc:
svc_entry trace=0
mov r0, sp @ struct pt_regs *regs
bl handle_fiq_as_nmi
svc_exit_via_fiq
UNWIND(.fnend )
ENDPROC(__fiq_svc)
/*
* Abort mode handlers
*/
@
@ Taking a FIQ in abort mode is similar to taking a FIQ in SVC mode
@ and reuses the same macros. However in abort mode we must also
@ save/restore lr_abt and spsr_abt to make nested aborts safe.
@
.align 5
__fiq_abt:
svc_entry trace=0
ARM( msr cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
THUMB( mov r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
THUMB( msr cpsr_c, r0 )
mov r1, lr @ Save lr_abt
mrs r2, spsr @ Save spsr_abt, abort is now safe
ARM( msr cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
THUMB( mov r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
THUMB( msr cpsr_c, r0 )
stmfd sp!, {r1 - r2}
add r0, sp, #8 @ struct pt_regs *regs
bl handle_fiq_as_nmi
ldmfd sp!, {r1 - r2}
ARM( msr cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
THUMB( mov r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
THUMB( msr cpsr_c, r0 )
mov lr, r1 @ Restore lr_abt, abort is unsafe
msr spsr_cxsf, r2 @ Restore spsr_abt
ARM( msr cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
THUMB( mov r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
THUMB( msr cpsr_c, r0 )
svc_exit_via_fiq
UNWIND(.fnend )
ENDPROC(__fiq_abt)
/*
* User mode handlers
*
* EABI note: sp_svc is always 64-bit aligned here, so should PT_REGS_SIZE
*/
#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (PT_REGS_SIZE & 7)
#error "sizeof(struct pt_regs) must be a multiple of 8"
#endif
.macro usr_entry, trace=1, uaccess=1
UNWIND(.fnstart )
UNWIND(.cantunwind ) @ don't unwind the user space
sub sp, sp, #PT_REGS_SIZE
ARM( stmib sp, {r1 - r12} )
THUMB( stmia sp, {r0 - r12} )
ATRAP( mrc p15, 0, r7, c1, c0, 0)
ATRAP( ldr_va r8, cr_alignment)
ldmia r0, {r3 - r5}
add r0, sp, #S_PC @ here for interlock avoidance
mov r6, #-1 @ "" "" "" ""
str r3, [sp] @ save the "real" r0 copied
@ from the exception stack
@
@ We are now ready to fill in the remaining blanks on the stack:
@
@ r4 - lr_<exception>, already fixed up for correct return/restart
@ r5 - spsr_<exception>
@ r6 - orig_r0 (see pt_regs definition in ptrace.h)
@
@ Also, separately save sp_usr and lr_usr
@
stmia r0, {r4 - r6}
ARM( stmdb r0, {sp, lr}^ )
THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
.if \uaccess
uaccess_disable ip
.endif
@ Enable the alignment trap while in kernel mode
ATRAP( teq r8, r7)
ATRAP( mcrne p15, 0, r8, c1, c0, 0)
reload_current r7, r8
@
@ Clear FP to mark the first stack frame
@
zero_fp
.if \trace
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
ct_user_exit save = 0
.endif
.endm
.macro kuser_cmpxchg_check
#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS)
#ifndef CONFIG_MMU
#warning "NPTL on non MMU needs fixing"
#else
@ Make sure our user space atomic helper is restarted
@ if it was interrupted in a critical region. Here we
@ perform a quick test inline since it should be false
@ 99.9999% of the time. The rest is done out of line.
ldr r0, =TASK_SIZE
cmp r4, r0
blhs kuser_cmpxchg64_fixup
#endif
#endif
.endm
.align 5
__dabt_usr:
usr_entry uaccess=0
kuser_cmpxchg_check
mov r2, sp
dabt_helper
b ret_from_exception
UNWIND(.fnend )
ENDPROC(__dabt_usr)
.align 5
__irq_usr:
usr_entry
kuser_cmpxchg_check
irq_handler from_user=1
get_thread_info tsk
mov why, #0
b ret_to_user_from_irq
UNWIND(.fnend )
ENDPROC(__irq_usr)
.ltorg
.align 5
__und_usr:
usr_entry uaccess=0
mov r2, r4
mov r3, r5
@ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the
@ faulting instruction depending on Thumb mode.
@ r3 = regs->ARM_cpsr
@
@ The emulation code returns using r9 if it has emulated the
@ instruction, or the more conventional lr if we are to treat
@ this as a real undefined instruction
@
badr r9, ret_from_exception
@ IRQs must be enabled before attempting to read the instruction from
@ user space since that could cause a page/translation fault if the
@ page table was modified by another CPU.
enable_irq
tst r3, #PSR_T_BIT @ Thumb mode?
bne __und_usr_thumb
sub r4, r2, #4 @ ARM instr at LR - 4
1: ldrt r0, [r4]
ARM_BE8(rev r0, r0) @ little endian instruction
uaccess_disable ip
@ r0 = 32-bit ARM instruction which caused the exception
@ r2 = PC value for the following instruction (:= regs->ARM_pc)
@ r4 = PC value for the faulting instruction
@ lr = 32-bit undefined instruction function
badr lr, __und_usr_fault_32
b call_fpe
__und_usr_thumb:
@ Thumb instruction
sub r4, r2, #2 @ First half of thumb instr at LR - 2
#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
/*
* Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms
* can never be supported in a single kernel, this code is not applicable at
* all when __LINUX_ARM_ARCH__ < 6. This allows simplifying assumptions to be
* made about .arch directives.
*/
#if __LINUX_ARM_ARCH__ < 7
/* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */
ldr_va r5, cpu_architecture
cmp r5, #CPU_ARCH_ARMv7
blo __und_usr_fault_16 @ 16bit undefined instruction
/*
* The following code won't get run unless the running CPU really is v7, so
* coding round the lack of ldrht on older arches is pointless. Temporarily
* override the assembler target arch with the minimum required instead:
*/
.arch armv6t2
#endif
2: ldrht r5, [r4]
ARM_BE8(rev16 r5, r5) @ little endian instruction
cmp r5, #0xe800 @ 32bit instruction if xx != 0
blo __und_usr_fault_16_pan @ 16bit undefined instruction
3: ldrht r0, [r2]
ARM_BE8(rev16 r0, r0) @ little endian instruction
uaccess_disable ip
add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
orr r0, r0, r5, lsl #16
badr lr, __und_usr_fault_32
@ r0 = the two 16-bit Thumb instructions which caused the exception
@ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc)
@ r4 = PC value for the first 16-bit Thumb instruction
@ lr = 32bit undefined instruction function
#if __LINUX_ARM_ARCH__ < 7
/* If the target arch was overridden, change it back: */
#ifdef CONFIG_CPU_32v6K
.arch armv6k
#else
.arch armv6
#endif
#endif /* __LINUX_ARM_ARCH__ < 7 */
#else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */
b __und_usr_fault_16
#endif
UNWIND(.fnend)
ENDPROC(__und_usr)
/*
* The out of line fixup for the ldrt instructions above.
*/
.pushsection .text.fixup, "ax"
.align 2
4: str r4, [sp, #S_PC] @ retry current instruction
ret r9
.popsection
.pushsection __ex_table,"a"
.long 1b, 4b
#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
.long 2b, 4b
.long 3b, 4b
#endif
.popsection
/*
* Check whether the instruction is a co-processor instruction.
* If yes, we need to call the relevant co-processor handler.
*
* Note that we don't do a full check here for the co-processor
* instructions; all instructions with bit 27 set are well
* defined. The only instructions that should fault are the
* co-processor instructions. However, we have to watch out
* for the ARM6/ARM7 SWI bug.
*
* NEON is a special case that has to be handled here. Not all
* NEON instructions are co-processor instructions, so we have
* to make a special case of checking for them. Plus, there's
* five groups of them, so we have a table of mask/opcode pairs
* to check against, and if any match then we branch off into the
* NEON handler code.
*
* Emulators may wish to make use of the following registers:
* r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
* r2 = PC value to resume execution after successful emulation
* r9 = normal "successful" return address
* r10 = this threads thread_info structure
* lr = unrecognised instruction return address
* IRQs enabled, FIQs enabled.
*/
@
@ Fall-through from Thumb-2 __und_usr
@
#ifdef CONFIG_NEON
get_thread_info r10 @ get current thread
adr r6, .LCneon_thumb_opcodes
b 2f
#endif
call_fpe:
get_thread_info r10 @ get current thread
#ifdef CONFIG_NEON
adr r6, .LCneon_arm_opcodes
2: ldr r5, [r6], #4 @ mask value
ldr r7, [r6], #4 @ opcode bits matching in mask
cmp r5, #0 @ end mask?
beq 1f
and r8, r0, r5
cmp r8, r7 @ NEON instruction?
bne 2b
mov r7, #1
strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used
strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used
b do_vfp @ let VFP handler handle this
1:
#endif
tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2
reteq lr
and r8, r0, #0x00000f00 @ mask out CP number
mov r7, #1
add r6, r10, r8, lsr #8 @ add used_cp[] array offset first
strb r7, [r6, #TI_USED_CP] @ set appropriate used_cp[]
#ifdef CONFIG_IWMMXT
@ Test if we need to give access to iWMMXt coprocessors
ldr r5, [r10, #TI_FLAGS]
rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only
movscs r7, r5, lsr #(TIF_USING_IWMMXT + 1)
bcs iwmmxt_task_enable
#endif
ARM( add pc, pc, r8, lsr #6 )
THUMB( lsr r8, r8, #6 )
THUMB( add pc, r8 )
nop
ret.w lr @ CP#0
W(b) do_fpe @ CP#1 (FPE)
W(b) do_fpe @ CP#2 (FPE)
ret.w lr @ CP#3
ret.w lr @ CP#4
ret.w lr @ CP#5
ret.w lr @ CP#6
ret.w lr @ CP#7
ret.w lr @ CP#8
ret.w lr @ CP#9
#ifdef CONFIG_VFP
W(b) do_vfp @ CP#10 (VFP)
W(b) do_vfp @ CP#11 (VFP)
#else
ret.w lr @ CP#10 (VFP)
ret.w lr @ CP#11 (VFP)
#endif
ret.w lr @ CP#12
ret.w lr @ CP#13
ret.w lr @ CP#14 (Debug)
ret.w lr @ CP#15 (Control)
#ifdef CONFIG_NEON
.align 6
.LCneon_arm_opcodes:
.word 0xfe000000 @ mask
.word 0xf2000000 @ opcode
.word 0xff100000 @ mask
.word 0xf4000000 @ opcode
.word 0x00000000 @ mask
.word 0x00000000 @ opcode
.LCneon_thumb_opcodes:
.word 0xef000000 @ mask
.word 0xef000000 @ opcode
.word 0xff100000 @ mask
.word 0xf9000000 @ opcode
.word 0x00000000 @ mask
.word 0x00000000 @ opcode
#endif
do_fpe:
add r10, r10, #TI_FPSTATE @ r10 = workspace
ldr_va pc, fp_enter, tmp=r4 @ Call FP module USR entry point
/*
* The FP module is called with these registers set:
* r0 = instruction
* r2 = PC+4
* r9 = normal "successful" return address
* r10 = FP workspace
* lr = unrecognised FP instruction return address
*/
.pushsection .data
.align 2
ENTRY(fp_enter)
.word no_fp
.popsection
ENTRY(no_fp)
ret lr
ENDPROC(no_fp)
__und_usr_fault_32:
mov r1, #4
b 1f
__und_usr_fault_16_pan:
uaccess_disable ip
__und_usr_fault_16:
mov r1, #2
1: mov r0, sp
badr lr, ret_from_exception
b __und_fault
ENDPROC(__und_usr_fault_32)
ENDPROC(__und_usr_fault_16)
.align 5
__pabt_usr:
usr_entry
mov r2, sp @ regs
pabt_helper
UNWIND(.fnend )
/* fall through */
/*
* This is the return code to user mode for abort handlers
*/
ENTRY(ret_from_exception)
UNWIND(.fnstart )
UNWIND(.cantunwind )
get_thread_info tsk
mov why, #0
b ret_to_user
UNWIND(.fnend )
ENDPROC(__pabt_usr)
ENDPROC(ret_from_exception)
.align 5
__fiq_usr:
usr_entry trace=0
kuser_cmpxchg_check
mov r0, sp @ struct pt_regs *regs
bl handle_fiq_as_nmi
get_thread_info tsk
restore_user_regs fast = 0, offset = 0
UNWIND(.fnend )
ENDPROC(__fiq_usr)
/*
* Register switch for ARMv3 and ARMv4 processors
* r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
* previous and next are guaranteed not to be the same.
*/
ENTRY(__switch_to)
UNWIND(.fnstart )
UNWIND(.cantunwind )
add ip, r1, #TI_CPU_SAVE
ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack
THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
THUMB( str sp, [ip], #4 )
THUMB( str lr, [ip], #4 )
ldr r4, [r2, #TI_TP_VALUE]
ldr r5, [r2, #TI_TP_VALUE + 4]
#ifdef CONFIG_CPU_USE_DOMAINS
mrc p15, 0, r6, c3, c0, 0 @ Get domain register
str r6, [r1, #TI_CPU_DOMAIN] @ Save old domain register
ldr r6, [r2, #TI_CPU_DOMAIN]
#endif
switch_tls r1, r4, r5, r3, r7
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) && \
!defined(CONFIG_STACKPROTECTOR_PER_TASK)
ldr r8, =__stack_chk_guard
.if (TSK_STACK_CANARY > IMM12_MASK)
add r9, r2, #TSK_STACK_CANARY & ~IMM12_MASK
ldr r9, [r9, #TSK_STACK_CANARY & IMM12_MASK]
.else
ldr r9, [r2, #TSK_STACK_CANARY & IMM12_MASK]
.endif
#endif
mov r7, r2 @ Preserve 'next'
#ifdef CONFIG_CPU_USE_DOMAINS
mcr p15, 0, r6, c3, c0, 0 @ Set domain register
#endif
mov r5, r0
add r4, r2, #TI_CPU_SAVE
ldr r0, =thread_notify_head
mov r1, #THREAD_NOTIFY_SWITCH
bl atomic_notifier_call_chain
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) && \
!defined(CONFIG_STACKPROTECTOR_PER_TASK)
str r9, [r8]
#endif
mov r0, r5
#if !defined(CONFIG_THUMB2_KERNEL) && !defined(CONFIG_VMAP_STACK)
set_current r7, r8
ldmia r4, {r4 - sl, fp, sp, pc} @ Load all regs saved previously
#else
mov r1, r7
ldmia r4, {r4 - sl, fp, ip, lr} @ Load all regs saved previously
#ifdef CONFIG_VMAP_STACK
@
@ Do a dummy read from the new stack while running from the old one so
@ that we can rely on do_translation_fault() to fix up any stale PMD
@ entries covering the vmalloc region.
@
ldr r2, [ip]
#endif
@ When CONFIG_THREAD_INFO_IN_TASK=n, the update of SP itself is what
@ effectuates the task switch, as that is what causes the observable
@ values of current and current_thread_info to change. When
@ CONFIG_THREAD_INFO_IN_TASK=y, setting current (and therefore
@ current_thread_info) is done explicitly, and the update of SP just
@ switches us to another stack, with few other side effects. In order
@ to prevent this distinction from causing any inconsistencies, let's
@ keep the 'set_current' call as close as we can to the update of SP.
set_current r1, r2
mov sp, ip
ret lr
#endif
UNWIND(.fnend )
ENDPROC(__switch_to)
#ifdef CONFIG_VMAP_STACK
.text
.align 2
__bad_stack:
@
@ We've just detected an overflow. We need to load the address of this
@ CPU's overflow stack into the stack pointer register. We have only one
@ scratch register so let's use a sequence of ADDs including one
@ involving the PC, and decorate them with PC-relative group
@ relocations. As these are ARM only, switch to ARM mode first.
@
@ We enter here with IP clobbered and its value stashed on the mode
@ stack.
@
THUMB( bx pc )
THUMB( nop )
THUMB( .arm )
ldr_this_cpu_armv6 ip, overflow_stack_ptr
str sp, [ip, #-4]! @ Preserve original SP value
mov sp, ip @ Switch to overflow stack
pop {ip} @ Original SP in IP
#if defined(CONFIG_UNWINDER_FRAME_POINTER) && defined(CONFIG_CC_IS_GCC)
mov ip, ip @ mov expected by unwinder
push {fp, ip, lr, pc} @ GCC flavor frame record
#else
str ip, [sp, #-8]! @ store original SP
push {fpreg, lr} @ Clang flavor frame record
#endif
UNWIND( ldr ip, [r0, #4] ) @ load exception LR
UNWIND( str ip, [sp, #12] ) @ store in the frame record
ldr ip, [r0, #12] @ reload IP
@ Store the original GPRs to the new stack.
svc_entry uaccess=0, overflow_check=0
UNWIND( .save {sp, pc} )
UNWIND( .save {fpreg, lr} )
UNWIND( .setfp fpreg, sp )
ldr fpreg, [sp, #S_SP] @ Add our frame record
@ to the linked list
#if defined(CONFIG_UNWINDER_FRAME_POINTER) && defined(CONFIG_CC_IS_GCC)
ldr r1, [fp, #4] @ reload SP at entry
add fp, fp, #12
#else
ldr r1, [fpreg, #8]
#endif
str r1, [sp, #S_SP] @ store in pt_regs
@ Stash the regs for handle_bad_stack
mov r0, sp
@ Time to die
bl handle_bad_stack
nop
UNWIND( .fnend )
ENDPROC(__bad_stack)
#endif
__INIT
/*
* User helpers.
*
* Each segment is 32-byte aligned and will be moved to the top of the high
* vector page. New segments (if ever needed) must be added in front of
* existing ones. This mechanism should be used only for things that are
* really small and justified, and not be abused freely.
*
* See Documentation/arm/kernel_user_helpers.rst for formal definitions.
*/
THUMB( .arm )
.macro usr_ret, reg
#ifdef CONFIG_ARM_THUMB
bx \reg
#else
ret \reg
#endif
.endm
.macro kuser_pad, sym, size
.if (. - \sym) & 3
.rept 4 - (. - \sym) & 3
.byte 0
.endr
.endif
.rept (\size - (. - \sym)) / 4
.word 0xe7fddef1
.endr
.endm
#ifdef CONFIG_KUSER_HELPERS
.align 5
.globl __kuser_helper_start
__kuser_helper_start:
/*
* Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular
* kuser "slots", therefore 0xffff0f80 is not used as a valid entry point.
*/
__kuser_cmpxchg64: @ 0xffff0f60
#if defined(CONFIG_CPU_32v6K)
stmfd sp!, {r4, r5, r6, r7}
ldrd r4, r5, [r0] @ load old val
ldrd r6, r7, [r1] @ load new val
smp_dmb arm
1: ldrexd r0, r1, [r2] @ load current val
eors r3, r0, r4 @ compare with oldval (1)
eorseq r3, r1, r5 @ compare with oldval (2)
strexdeq r3, r6, r7, [r2] @ store newval if eq
teqeq r3, #1 @ success?
beq 1b @ if no then retry
smp_dmb arm
rsbs r0, r3, #0 @ set returned val and C flag
ldmfd sp!, {r4, r5, r6, r7}
usr_ret lr
#elif !defined(CONFIG_SMP)
#ifdef CONFIG_MMU
/*
* The only thing that can break atomicity in this cmpxchg64
* implementation is either an IRQ or a data abort exception
* causing another process/thread to be scheduled in the middle of
* the critical sequence. The same strategy as for cmpxchg is used.
*/
stmfd sp!, {r4, r5, r6, lr}
ldmia r0, {r4, r5} @ load old val
ldmia r1, {r6, lr} @ load new val
1: ldmia r2, {r0, r1} @ load current val
eors r3, r0, r4 @ compare with oldval (1)
eorseq r3, r1, r5 @ compare with oldval (2)
2: stmiaeq r2, {r6, lr} @ store newval if eq
rsbs r0, r3, #0 @ set return val and C flag
ldmfd sp!, {r4, r5, r6, pc}
.text
kuser_cmpxchg64_fixup:
@ Called from kuser_cmpxchg_fixup.
@ r4 = address of interrupted insn (must be preserved).
@ sp = saved regs. r7 and r8 are clobbered.
@ 1b = first critical insn, 2b = last critical insn.
@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
mov r7, #0xffff0fff
sub r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
subs r8, r4, r7
rsbscs r8, r8, #(2b - 1b)
strcs r7, [sp, #S_PC]
#if __LINUX_ARM_ARCH__ < 6
bcc kuser_cmpxchg32_fixup
#endif
ret lr
.previous
#else
#warning "NPTL on non MMU needs fixing"
mov r0, #-1
adds r0, r0, #0
usr_ret lr
#endif
#else
#error "incoherent kernel configuration"
#endif
kuser_pad __kuser_cmpxchg64, 64
__kuser_memory_barrier: @ 0xffff0fa0
smp_dmb arm
usr_ret lr
kuser_pad __kuser_memory_barrier, 32
__kuser_cmpxchg: @ 0xffff0fc0
#if __LINUX_ARM_ARCH__ < 6
#ifdef CONFIG_MMU
/*
* The only thing that can break atomicity in this cmpxchg
* implementation is either an IRQ or a data abort exception
* causing another process/thread to be scheduled in the middle
* of the critical sequence. To prevent this, code is added to
* the IRQ and data abort exception handlers to set the pc back
* to the beginning of the critical section if it is found to be
* within that critical section (see kuser_cmpxchg_fixup).
*/
1: ldr r3, [r2] @ load current val
subs r3, r3, r0 @ compare with oldval
2: streq r1, [r2] @ store newval if eq
rsbs r0, r3, #0 @ set return val and C flag
usr_ret lr
.text
kuser_cmpxchg32_fixup:
@ Called from kuser_cmpxchg_check macro.
@ r4 = address of interrupted insn (must be preserved).
@ sp = saved regs. r7 and r8 are clobbered.
@ 1b = first critical insn, 2b = last critical insn.
@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
mov r7, #0xffff0fff
sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
subs r8, r4, r7
rsbscs r8, r8, #(2b - 1b)
strcs r7, [sp, #S_PC]
ret lr
.previous
#else
#warning "NPTL on non MMU needs fixing"
mov r0, #-1
adds r0, r0, #0
usr_ret lr
#endif
#else
smp_dmb arm
1: ldrex r3, [r2]
subs r3, r3, r0
strexeq r3, r1, [r2]
teqeq r3, #1
beq 1b
rsbs r0, r3, #0
/* beware -- each __kuser slot must be 8 instructions max */
ALT_SMP(b __kuser_memory_barrier)
ALT_UP(usr_ret lr)
#endif
kuser_pad __kuser_cmpxchg, 32
__kuser_get_tls: @ 0xffff0fe0
ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
usr_ret lr
mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
kuser_pad __kuser_get_tls, 16
.rep 3
.word 0 @ 0xffff0ff0 software TLS value, then
.endr @ pad up to __kuser_helper_version
__kuser_helper_version: @ 0xffff0ffc
.word ((__kuser_helper_end - __kuser_helper_start) >> 5)
.globl __kuser_helper_end
__kuser_helper_end:
#endif
THUMB( .thumb )
/*
* Vector stubs.
*
* This code is copied to 0xffff1000 so we can use branches in the
* vectors, rather than ldr's. Note that this code must not exceed
* a page size.
*
* Common stub entry macro:
* Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
*
* SP points to a minimal amount of processor-private memory, the address
* of which is copied into r0 for the mode specific abort handler.
*/
.macro vector_stub, name, mode, correction=0
.align 5
#ifdef CONFIG_HARDEN_BRANCH_HISTORY
vector_bhb_bpiall_\name:
mcr p15, 0, r0, c7, c5, 6 @ BPIALL
@ isb not needed due to "movs pc, lr" in the vector stub
@ which gives a "context synchronisation".
#endif
vector_\name:
.if \correction
sub lr, lr, #\correction
.endif
@ Save r0, lr_<exception> (parent PC)
stmia sp, {r0, lr} @ save r0, lr
@ Save spsr_<exception> (parent CPSR)
.Lvec_\name:
mrs lr, spsr
str lr, [sp, #8] @ save spsr
@
@ Prepare for SVC32 mode. IRQs remain disabled.
@
mrs r0, cpsr
eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
msr spsr_cxsf, r0
@
@ the branch table must immediately follow this code
@
and lr, lr, #0x0f
THUMB( adr r0, 1f )
THUMB( ldr lr, [r0, lr, lsl #2] )
mov r0, sp
ARM( ldr lr, [pc, lr, lsl #2] )
movs pc, lr @ branch to handler in SVC mode
ENDPROC(vector_\name)
#ifdef CONFIG_HARDEN_BRANCH_HISTORY
.subsection 1
.align 5
vector_bhb_loop8_\name:
.if \correction
sub lr, lr, #\correction
.endif
@ Save r0, lr_<exception> (parent PC)
stmia sp, {r0, lr}
@ bhb workaround
mov r0, #8
3: W(b) . + 4
subs r0, r0, #1
bne 3b
dsb nsh
@ isb not needed due to "movs pc, lr" in the vector stub
@ which gives a "context synchronisation".
b .Lvec_\name
ENDPROC(vector_bhb_loop8_\name)
.previous
#endif
.align 2
@ handler addresses follow this label
1:
.endm
.section .stubs, "ax", %progbits
@ These need to remain at the start of the section so that
@ they are in range of the 'SWI' entries in the vector tables
@ located 4k down.
.L__vector_swi:
.word vector_swi
#ifdef CONFIG_HARDEN_BRANCH_HISTORY
.L__vector_bhb_loop8_swi:
.word vector_bhb_loop8_swi
.L__vector_bhb_bpiall_swi:
.word vector_bhb_bpiall_swi
#endif
vector_rst:
ARM( swi SYS_ERROR0 )
THUMB( svc #0 )
THUMB( nop )
b vector_und
/*
* Interrupt dispatcher
*/
vector_stub irq, IRQ_MODE, 4
.long __irq_usr @ 0 (USR_26 / USR_32)
.long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
.long __irq_invalid @ 2 (IRQ_26 / IRQ_32)
.long __irq_svc @ 3 (SVC_26 / SVC_32)
.long __irq_invalid @ 4
.long __irq_invalid @ 5
.long __irq_invalid @ 6
.long __irq_invalid @ 7
.long __irq_invalid @ 8
.long __irq_invalid @ 9
.long __irq_invalid @ a
.long __irq_invalid @ b
.long __irq_invalid @ c
.long __irq_invalid @ d
.long __irq_invalid @ e
.long __irq_invalid @ f
/*
* Data abort dispatcher
* Enter in ABT mode, spsr = USR CPSR, lr = USR PC
*/
vector_stub dabt, ABT_MODE, 8
.long __dabt_usr @ 0 (USR_26 / USR_32)
.long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
.long __dabt_invalid @ 2 (IRQ_26 / IRQ_32)
.long __dabt_svc @ 3 (SVC_26 / SVC_32)
.long __dabt_invalid @ 4
.long __dabt_invalid @ 5
.long __dabt_invalid @ 6
.long __dabt_invalid @ 7
.long __dabt_invalid @ 8
.long __dabt_invalid @ 9
.long __dabt_invalid @ a
.long __dabt_invalid @ b
.long __dabt_invalid @ c
.long __dabt_invalid @ d
.long __dabt_invalid @ e
.long __dabt_invalid @ f
/*
* Prefetch abort dispatcher
* Enter in ABT mode, spsr = USR CPSR, lr = USR PC
*/
vector_stub pabt, ABT_MODE, 4
.long __pabt_usr @ 0 (USR_26 / USR_32)
.long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
.long __pabt_invalid @ 2 (IRQ_26 / IRQ_32)
.long __pabt_svc @ 3 (SVC_26 / SVC_32)
.long __pabt_invalid @ 4
.long __pabt_invalid @ 5
.long __pabt_invalid @ 6
.long __pabt_invalid @ 7
.long __pabt_invalid @ 8
.long __pabt_invalid @ 9
.long __pabt_invalid @ a
.long __pabt_invalid @ b
.long __pabt_invalid @ c
.long __pabt_invalid @ d
.long __pabt_invalid @ e
.long __pabt_invalid @ f
/*
* Undef instr entry dispatcher
* Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
*/
vector_stub und, UND_MODE
.long __und_usr @ 0 (USR_26 / USR_32)
.long __und_invalid @ 1 (FIQ_26 / FIQ_32)
.long __und_invalid @ 2 (IRQ_26 / IRQ_32)
.long __und_svc @ 3 (SVC_26 / SVC_32)
.long __und_invalid @ 4
.long __und_invalid @ 5
.long __und_invalid @ 6
.long __und_invalid @ 7
.long __und_invalid @ 8
.long __und_invalid @ 9
.long __und_invalid @ a
.long __und_invalid @ b
.long __und_invalid @ c
.long __und_invalid @ d
.long __und_invalid @ e
.long __und_invalid @ f
.align 5
/*=============================================================================
* Address exception handler
*-----------------------------------------------------------------------------
* These aren't too critical.
* (they're not supposed to happen, and won't happen in 32-bit data mode).
*/
vector_addrexcptn:
b vector_addrexcptn
/*=============================================================================
* FIQ "NMI" handler
*-----------------------------------------------------------------------------
* Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86
* systems. This must be the last vector stub, so lets place it in its own
* subsection.
*/
.subsection 2
vector_stub fiq, FIQ_MODE, 4
.long __fiq_usr @ 0 (USR_26 / USR_32)
.long __fiq_svc @ 1 (FIQ_26 / FIQ_32)
.long __fiq_svc @ 2 (IRQ_26 / IRQ_32)
.long __fiq_svc @ 3 (SVC_26 / SVC_32)
.long __fiq_svc @ 4
.long __fiq_svc @ 5
.long __fiq_svc @ 6
.long __fiq_abt @ 7
.long __fiq_svc @ 8
.long __fiq_svc @ 9
.long __fiq_svc @ a
.long __fiq_svc @ b
.long __fiq_svc @ c
.long __fiq_svc @ d
.long __fiq_svc @ e
.long __fiq_svc @ f
.globl vector_fiq
.section .vectors, "ax", %progbits
W(b) vector_rst
W(b) vector_und
ARM( .reloc ., R_ARM_LDR_PC_G0, .L__vector_swi )
THUMB( .reloc ., R_ARM_THM_PC12, .L__vector_swi )
W(ldr) pc, .
W(b) vector_pabt
W(b) vector_dabt
W(b) vector_addrexcptn
W(b) vector_irq
W(b) vector_fiq
#ifdef CONFIG_HARDEN_BRANCH_HISTORY
.section .vectors.bhb.loop8, "ax", %progbits
W(b) vector_rst
W(b) vector_bhb_loop8_und
ARM( .reloc ., R_ARM_LDR_PC_G0, .L__vector_bhb_loop8_swi )
THUMB( .reloc ., R_ARM_THM_PC12, .L__vector_bhb_loop8_swi )
W(ldr) pc, .
W(b) vector_bhb_loop8_pabt
W(b) vector_bhb_loop8_dabt
W(b) vector_addrexcptn
W(b) vector_bhb_loop8_irq
W(b) vector_bhb_loop8_fiq
.section .vectors.bhb.bpiall, "ax", %progbits
W(b) vector_rst
W(b) vector_bhb_bpiall_und
ARM( .reloc ., R_ARM_LDR_PC_G0, .L__vector_bhb_bpiall_swi )
THUMB( .reloc ., R_ARM_THM_PC12, .L__vector_bhb_bpiall_swi )
W(ldr) pc, .
W(b) vector_bhb_bpiall_pabt
W(b) vector_bhb_bpiall_dabt
W(b) vector_addrexcptn
W(b) vector_bhb_bpiall_irq
W(b) vector_bhb_bpiall_fiq
#endif
.data
.align 2
.globl cr_alignment
cr_alignment:
.space 4
|
aixcc-public/challenge-001-exemplar-source
| 16,571
|
arch/arm/kernel/head.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/arch/arm/kernel/head.S
*
* Copyright (C) 1994-2002 Russell King
* Copyright (c) 2003 ARM Limited
* All Rights Reserved
*
* Kernel startup code for all 32-bit CPUs
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/cp15.h>
#include <asm/domain.h>
#include <asm/ptrace.h>
#include <asm/asm-offsets.h>
#include <asm/memory.h>
#include <asm/thread_info.h>
#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_SEMIHOSTING)
#include CONFIG_DEBUG_LL_INCLUDE
#endif
/*
* swapper_pg_dir is the virtual address of the initial page table.
* We place the page tables 16K below KERNEL_RAM_VADDR. Therefore, we must
* make sure that KERNEL_RAM_VADDR is correctly set. Currently, we expect
* the least significant 16 bits to be 0x8000, but we could probably
* relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x4000.
*/
#define KERNEL_RAM_VADDR (KERNEL_OFFSET + TEXT_OFFSET)
#if (KERNEL_RAM_VADDR & 0xffff) != 0x8000
#error KERNEL_RAM_VADDR must start at 0xXXXX8000
#endif
#ifdef CONFIG_ARM_LPAE
/* LPAE requires an additional page for the PGD */
#define PG_DIR_SIZE 0x5000
#define PMD_ENTRY_ORDER 3 /* PMD entry size is 2^PMD_ENTRY_ORDER */
#else
#define PG_DIR_SIZE 0x4000
#define PMD_ENTRY_ORDER 2
#endif
.globl swapper_pg_dir
.equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
/*
* This needs to be assigned at runtime when the linker symbols are
* resolved. These are unsigned 64bit really, but in this assembly code
* We store them as 32bit.
*/
.pushsection .data
.align 2
.globl kernel_sec_start
.globl kernel_sec_end
kernel_sec_start:
.long 0
.long 0
kernel_sec_end:
.long 0
.long 0
.popsection
.macro pgtbl, rd, phys
add \rd, \phys, #TEXT_OFFSET
sub \rd, \rd, #PG_DIR_SIZE
.endm
/*
* Kernel startup entry point.
* ---------------------------
*
* This is normally called from the decompressor code. The requirements
* are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0,
* r1 = machine nr, r2 = atags or dtb pointer.
*
* This code is mostly position independent, so if you link the kernel at
* 0xc0008000, you call this at __pa(0xc0008000).
*
* See linux/arch/arm/tools/mach-types for the complete list of machine
* numbers for r1.
*
* We're trying to keep crap to a minimum; DO NOT add any machine specific
* crap here - that's what the boot loader (or in extreme, well justified
* circumstances, zImage) is for.
*/
.arm
__HEAD
ENTRY(stext)
ARM_BE8(setend be ) @ ensure we are in BE8 mode
THUMB( badr r9, 1f ) @ Kernel is always entered in ARM.
THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
THUMB( .thumb ) @ switch to Thumb now.
THUMB(1: )
#ifdef CONFIG_ARM_VIRT_EXT
bl __hyp_stub_install
#endif
@ ensure svc mode and all interrupts masked
safe_svcmode_maskall r9
mrc p15, 0, r9, c0, c0 @ get processor id
bl __lookup_processor_type @ r5=procinfo r9=cpuid
movs r10, r5 @ invalid processor (r5=0)?
THUMB( it eq ) @ force fixup-able long branch encoding
beq __error_p @ yes, error 'p'
#ifdef CONFIG_ARM_LPAE
mrc p15, 0, r3, c0, c1, 4 @ read ID_MMFR0
and r3, r3, #0xf @ extract VMSA support
cmp r3, #5 @ long-descriptor translation table format?
THUMB( it lo ) @ force fixup-able long branch encoding
blo __error_lpae @ only classic page table format
#endif
#ifndef CONFIG_XIP_KERNEL
adr_l r8, _text @ __pa(_text)
sub r8, r8, #TEXT_OFFSET @ PHYS_OFFSET
#else
ldr r8, =PLAT_PHYS_OFFSET @ always constant in this case
#endif
/*
* r1 = machine no, r2 = atags or dtb,
* r8 = phys_offset, r9 = cpuid, r10 = procinfo
*/
bl __vet_atags
#ifdef CONFIG_SMP_ON_UP
bl __fixup_smp
#endif
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
bl __fixup_pv_table
#endif
bl __create_page_tables
/*
* The following calls CPU specific code in a position independent
* manner. See arch/arm/mm/proc-*.S for details. r10 = base of
* xxx_proc_info structure selected by __lookup_processor_type
* above.
*
* The processor init function will be called with:
* r1 - machine type
* r2 - boot data (atags/dt) pointer
* r4 - translation table base (low word)
* r5 - translation table base (high word, if LPAE)
* r8 - translation table base 1 (pfn if LPAE)
* r9 - cpuid
* r13 - virtual address for __enable_mmu -> __turn_mmu_on
*
* On return, the CPU will be ready for the MMU to be turned on,
* r0 will hold the CPU control register value, r1, r2, r4, and
* r9 will be preserved. r5 will also be preserved if LPAE.
*/
ldr r13, =__mmap_switched @ address to jump to after
@ mmu has been enabled
badr lr, 1f @ return (PIC) address
#ifdef CONFIG_ARM_LPAE
mov r5, #0 @ high TTBR0
mov r8, r4, lsr #12 @ TTBR1 is swapper_pg_dir pfn
#else
mov r8, r4 @ set TTBR1 to swapper_pg_dir
#endif
ldr r12, [r10, #PROCINFO_INITFUNC]
add r12, r12, r10
ret r12
1: b __enable_mmu
ENDPROC(stext)
.ltorg
/*
* Setup the initial page tables. We only setup the barest
* amount which are required to get the kernel running, which
* generally means mapping in the kernel code.
*
* r8 = phys_offset, r9 = cpuid, r10 = procinfo
*
* Returns:
* r0, r3, r5-r7 corrupted
* r4 = physical page table address
*/
__create_page_tables:
pgtbl r4, r8 @ page table address
/*
* Clear the swapper page table
*/
mov r0, r4
mov r3, #0
add r6, r0, #PG_DIR_SIZE
1: str r3, [r0], #4
str r3, [r0], #4
str r3, [r0], #4
str r3, [r0], #4
teq r0, r6
bne 1b
#ifdef CONFIG_ARM_LPAE
/*
* Build the PGD table (first level) to point to the PMD table. A PGD
* entry is 64-bit wide.
*/
mov r0, r4
add r3, r4, #0x1000 @ first PMD table address
orr r3, r3, #3 @ PGD block type
mov r6, #4 @ PTRS_PER_PGD
mov r7, #1 << (55 - 32) @ L_PGD_SWAPPER
1:
#ifdef CONFIG_CPU_ENDIAN_BE8
str r7, [r0], #4 @ set top PGD entry bits
str r3, [r0], #4 @ set bottom PGD entry bits
#else
str r3, [r0], #4 @ set bottom PGD entry bits
str r7, [r0], #4 @ set top PGD entry bits
#endif
add r3, r3, #0x1000 @ next PMD table
subs r6, r6, #1
bne 1b
add r4, r4, #0x1000 @ point to the PMD tables
#ifdef CONFIG_CPU_ENDIAN_BE8
add r4, r4, #4 @ we only write the bottom word
#endif
#endif
ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags
/*
* Create identity mapping to cater for __enable_mmu.
* This identity mapping will be removed by paging_init().
*/
adr_l r5, __turn_mmu_on @ _pa(__turn_mmu_on)
adr_l r6, __turn_mmu_on_end @ _pa(__turn_mmu_on_end)
mov r5, r5, lsr #SECTION_SHIFT
mov r6, r6, lsr #SECTION_SHIFT
1: orr r3, r7, r5, lsl #SECTION_SHIFT @ flags + kernel base
str r3, [r4, r5, lsl #PMD_ENTRY_ORDER] @ identity mapping
cmp r5, r6
addlo r5, r5, #1 @ next section
blo 1b
/*
* The main matter: map in the kernel using section mappings, and
* set two variables to indicate the physical start and end of the
* kernel.
*/
add r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ENTRY_ORDER)
ldr r6, =(_end - 1)
adr_l r5, kernel_sec_start @ _pa(kernel_sec_start)
#if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32
str r8, [r5, #4] @ Save physical start of kernel (BE)
#else
str r8, [r5] @ Save physical start of kernel (LE)
#endif
orr r3, r8, r7 @ Add the MMU flags
add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ENTRY_ORDER)
1: str r3, [r0], #1 << PMD_ENTRY_ORDER
add r3, r3, #1 << SECTION_SHIFT
cmp r0, r6
bls 1b
eor r3, r3, r7 @ Remove the MMU flags
adr_l r5, kernel_sec_end @ _pa(kernel_sec_end)
#if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32
str r3, [r5, #4] @ Save physical end of kernel (BE)
#else
str r3, [r5] @ Save physical end of kernel (LE)
#endif
#ifdef CONFIG_XIP_KERNEL
/*
* Map the kernel image separately as it is not located in RAM.
*/
#define XIP_START XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR)
mov r3, pc
mov r3, r3, lsr #SECTION_SHIFT
orr r3, r7, r3, lsl #SECTION_SHIFT
add r0, r4, #(XIP_START & 0xff000000) >> (SECTION_SHIFT - PMD_ENTRY_ORDER)
str r3, [r0, #((XIP_START & 0x00f00000) >> SECTION_SHIFT) << PMD_ENTRY_ORDER]!
ldr r6, =(_edata_loc - 1)
add r0, r0, #1 << PMD_ENTRY_ORDER
add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ENTRY_ORDER)
1: cmp r0, r6
add r3, r3, #1 << SECTION_SHIFT
strls r3, [r0], #1 << PMD_ENTRY_ORDER
bls 1b
#endif
/*
* Then map boot params address in r2 if specified.
* We map 2 sections in case the ATAGs/DTB crosses a section boundary.
*/
mov r0, r2, lsr #SECTION_SHIFT
cmp r2, #0
ldrne r3, =FDT_FIXED_BASE >> (SECTION_SHIFT - PMD_ENTRY_ORDER)
addne r3, r3, r4
orrne r6, r7, r0, lsl #SECTION_SHIFT
strne r6, [r3], #1 << PMD_ENTRY_ORDER
addne r6, r6, #1 << SECTION_SHIFT
strne r6, [r3]
#if defined(CONFIG_ARM_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8)
sub r4, r4, #4 @ Fixup page table pointer
@ for 64-bit descriptors
#endif
#ifdef CONFIG_DEBUG_LL
#if !defined(CONFIG_DEBUG_ICEDCC) && !defined(CONFIG_DEBUG_SEMIHOSTING)
/*
* Map in IO space for serial debugging.
* This allows debug messages to be output
* via a serial console before paging_init.
*/
addruart r7, r3, r0
mov r3, r3, lsr #SECTION_SHIFT
mov r3, r3, lsl #PMD_ENTRY_ORDER
add r0, r4, r3
mov r3, r7, lsr #SECTION_SHIFT
ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags
orr r3, r7, r3, lsl #SECTION_SHIFT
#ifdef CONFIG_ARM_LPAE
mov r7, #1 << (54 - 32) @ XN
#ifdef CONFIG_CPU_ENDIAN_BE8
str r7, [r0], #4
str r3, [r0], #4
#else
str r3, [r0], #4
str r7, [r0], #4
#endif
#else
orr r3, r3, #PMD_SECT_XN
str r3, [r0], #4
#endif
#else /* CONFIG_DEBUG_ICEDCC || CONFIG_DEBUG_SEMIHOSTING */
/* we don't need any serial debugging mappings */
ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags
#endif
#if defined(CONFIG_ARCH_NETWINDER) || defined(CONFIG_ARCH_CATS)
/*
* If we're using the NetWinder or CATS, we also need to map
* in the 16550-type serial port for the debug messages
*/
add r0, r4, #0xff000000 >> (SECTION_SHIFT - PMD_ENTRY_ORDER)
orr r3, r7, #0x7c000000
str r3, [r0]
#endif
#ifdef CONFIG_ARCH_RPC
/*
* Map in screen at 0x02000000 & SCREEN2_BASE
* Similar reasons here - for debug. This is
* only for Acorn RiscPC architectures.
*/
add r0, r4, #0x02000000 >> (SECTION_SHIFT - PMD_ENTRY_ORDER)
orr r3, r7, #0x02000000
str r3, [r0]
add r0, r4, #0xd8000000 >> (SECTION_SHIFT - PMD_ENTRY_ORDER)
str r3, [r0]
#endif
#endif
#ifdef CONFIG_ARM_LPAE
sub r4, r4, #0x1000 @ point to the PGD table
#endif
ret lr
ENDPROC(__create_page_tables)
.ltorg
#if defined(CONFIG_SMP)
.text
.arm
ENTRY(secondary_startup_arm)
THUMB( badr r9, 1f ) @ Kernel is entered in ARM.
THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
THUMB( .thumb ) @ switch to Thumb now.
THUMB(1: )
ENTRY(secondary_startup)
/*
* Common entry point for secondary CPUs.
*
* Ensure that we're in SVC mode, and IRQs are disabled. Lookup
* the processor type - there is no need to check the machine type
* as it has already been validated by the primary processor.
*/
ARM_BE8(setend be) @ ensure we are in BE8 mode
#ifdef CONFIG_ARM_VIRT_EXT
bl __hyp_stub_install_secondary
#endif
safe_svcmode_maskall r9
mrc p15, 0, r9, c0, c0 @ get processor id
bl __lookup_processor_type
movs r10, r5 @ invalid processor?
moveq r0, #'p' @ yes, error 'p'
THUMB( it eq ) @ force fixup-able long branch encoding
beq __error_p
/*
* Use the page tables supplied from __cpu_up.
*/
adr_l r3, secondary_data
mov_l r12, __secondary_switched
ldrd r4, r5, [r3, #0] @ get secondary_data.pgdir
ARM_BE8(eor r4, r4, r5) @ Swap r5 and r4 in BE:
ARM_BE8(eor r5, r4, r5) @ it can be done in 3 steps
ARM_BE8(eor r4, r4, r5) @ without using a temp reg.
ldr r8, [r3, #8] @ get secondary_data.swapper_pg_dir
badr lr, __enable_mmu @ return address
mov r13, r12 @ __secondary_switched address
ldr r12, [r10, #PROCINFO_INITFUNC]
add r12, r12, r10 @ initialise processor
@ (return control reg)
ret r12
ENDPROC(secondary_startup)
ENDPROC(secondary_startup_arm)
ENTRY(__secondary_switched)
#if defined(CONFIG_VMAP_STACK) && !defined(CONFIG_ARM_LPAE)
@ Before using the vmap'ed stack, we have to switch to swapper_pg_dir
@ as the ID map does not cover the vmalloc region.
mrc p15, 0, ip, c2, c0, 1 @ read TTBR1
mcr p15, 0, ip, c2, c0, 0 @ set TTBR0
instr_sync
#endif
adr_l r7, secondary_data + 12 @ get secondary_data.stack
ldr sp, [r7]
ldr r0, [r7, #4] @ get secondary_data.task
mov fp, #0
b secondary_start_kernel
ENDPROC(__secondary_switched)
#endif /* defined(CONFIG_SMP) */
/*
* Setup common bits before finally enabling the MMU. Essentially
* this is just loading the page table pointer and domain access
* registers. All these registers need to be preserved by the
* processor setup function (or set in the case of r0)
*
* r0 = cp#15 control register
* r1 = machine ID
* r2 = atags or dtb pointer
* r4 = TTBR pointer (low word)
* r5 = TTBR pointer (high word if LPAE)
* r9 = processor ID
* r13 = *virtual* address to jump to upon completion
*/
__enable_mmu:
#if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6
orr r0, r0, #CR_A
#else
bic r0, r0, #CR_A
#endif
#ifdef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CR_C
#endif
#ifdef CONFIG_CPU_BPREDICT_DISABLE
bic r0, r0, #CR_Z
#endif
#ifdef CONFIG_CPU_ICACHE_DISABLE
bic r0, r0, #CR_I
#endif
#ifdef CONFIG_ARM_LPAE
mcrr p15, 0, r4, r5, c2 @ load TTBR0
#else
mov r5, #DACR_INIT
mcr p15, 0, r5, c3, c0, 0 @ load domain access register
mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
#endif
b __turn_mmu_on
ENDPROC(__enable_mmu)
/*
* Enable the MMU. This completely changes the structure of the visible
* memory space. You will not be able to trace execution through this.
* If you have an enquiry about this, *please* check the linux-arm-kernel
* mailing list archives BEFORE sending another post to the list.
*
* r0 = cp#15 control register
* r1 = machine ID
* r2 = atags or dtb pointer
* r9 = processor ID
* r13 = *virtual* address to jump to upon completion
*
* other registers depend on the function called upon completion
*/
.align 5
.pushsection .idmap.text, "ax"
ENTRY(__turn_mmu_on)
mov r0, r0
instr_sync
mcr p15, 0, r0, c1, c0, 0 @ write control reg
mrc p15, 0, r3, c0, c0, 0 @ read id reg
instr_sync
mov r3, r3
mov r3, r13
ret r3
__turn_mmu_on_end:
ENDPROC(__turn_mmu_on)
.popsection
#ifdef CONFIG_SMP_ON_UP
__HEAD
__fixup_smp:
and r3, r9, #0x000f0000 @ architecture version
teq r3, #0x000f0000 @ CPU ID supported?
bne __fixup_smp_on_up @ no, assume UP
bic r3, r9, #0x00ff0000
bic r3, r3, #0x0000000f @ mask 0xff00fff0
mov r4, #0x41000000
orr r4, r4, #0x0000b000
orr r4, r4, #0x00000020 @ val 0x4100b020
teq r3, r4 @ ARM 11MPCore?
reteq lr @ yes, assume SMP
mrc p15, 0, r0, c0, c0, 5 @ read MPIDR
and r0, r0, #0xc0000000 @ multiprocessing extensions and
teq r0, #0x80000000 @ not part of a uniprocessor system?
bne __fixup_smp_on_up @ no, assume UP
@ Core indicates it is SMP. Check for Aegis SOC where a single
@ Cortex-A9 CPU is present but SMP operations fault.
mov r4, #0x41000000
orr r4, r4, #0x0000c000
orr r4, r4, #0x00000090
teq r3, r4 @ Check for ARM Cortex-A9
retne lr @ Not ARM Cortex-A9,
@ If a future SoC *does* use 0x0 as the PERIPH_BASE, then the
@ below address check will need to be #ifdef'd or equivalent
@ for the Aegis platform.
mrc p15, 4, r0, c15, c0 @ get SCU base address
teq r0, #0x0 @ '0' on actual UP A9 hardware
beq __fixup_smp_on_up @ So its an A9 UP
ldr r0, [r0, #4] @ read SCU Config
ARM_BE8(rev r0, r0) @ byteswap if big endian
and r0, r0, #0x3 @ number of CPUs
teq r0, #0x0 @ is 1?
retne lr
__fixup_smp_on_up:
adr_l r4, __smpalt_begin
adr_l r5, __smpalt_end
b __do_fixup_smp_on_up
ENDPROC(__fixup_smp)
.pushsection .data
.align 2
.globl smp_on_up
smp_on_up:
ALT_SMP(.long 1)
ALT_UP(.long 0)
.popsection
#endif
.text
__do_fixup_smp_on_up:
cmp r4, r5
reths lr
ldmia r4, {r0, r6}
ARM( str r6, [r0, r4] )
THUMB( add r0, r0, r4 )
add r4, r4, #8
#ifdef __ARMEB__
THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian.
#endif
THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords
THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r0.
THUMB( strh r6, [r0] )
b __do_fixup_smp_on_up
ENDPROC(__do_fixup_smp_on_up)
ENTRY(fixup_smp)
stmfd sp!, {r4 - r6, lr}
mov r4, r0
add r5, r0, r1
bl __do_fixup_smp_on_up
ldmfd sp!, {r4 - r6, pc}
ENDPROC(fixup_smp)
#include "head-common.S"
|
aixcc-public/challenge-001-exemplar-source
| 12,962
|
arch/arm/kernel/entry-header.S
|
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/errno.h>
#include <asm/thread_info.h>
#include <asm/uaccess-asm.h>
#include <asm/v7m.h>
@ Bad Abort numbers
@ -----------------
@
#define BAD_PREFETCH 0
#define BAD_DATA 1
#define BAD_ADDREXCPTN 2
#define BAD_IRQ 3
#define BAD_UNDEFINSTR 4
@
@ Most of the stack format comes from struct pt_regs, but with
@ the addition of 8 bytes for storing syscall args 5 and 6.
@ This _must_ remain a multiple of 8 for EABI.
@
#define S_OFF 8
/*
* The SWI code relies on the fact that R0 is at the bottom of the stack
* (due to slow/fast restore user regs).
*/
#if S_R0 != 0
#error "Please fix"
#endif
.macro zero_fp
#ifdef CONFIG_FRAME_POINTER
mov fp, #0
#endif
.endm
#ifdef CONFIG_ALIGNMENT_TRAP
#define ATRAP(x...) x
#else
#define ATRAP(x...)
#endif
.macro alignment_trap, rtmp1, rtmp2, label
#ifdef CONFIG_ALIGNMENT_TRAP
mrc p15, 0, \rtmp2, c1, c0, 0
ldr_va \rtmp1, \label
teq \rtmp1, \rtmp2
mcrne p15, 0, \rtmp1, c1, c0, 0
#endif
.endm
#ifdef CONFIG_CPU_V7M
/*
* ARMv7-M exception entry/exit macros.
*
* xPSR, ReturnAddress(), LR (R14), R12, R3, R2, R1, and R0 are
* automatically saved on the current stack (32 words) before
* switching to the exception stack (SP_main).
*
* If exception is taken while in user mode, SP_main is
* empty. Otherwise, SP_main is aligned to 64 bit automatically
* (CCR.STKALIGN set).
*
* Linux assumes that the interrupts are disabled when entering an
* exception handler and it may BUG if this is not the case. Interrupts
* are disabled during entry and reenabled in the exit macro.
*
* v7m_exception_slow_exit is used when returning from SVC or PendSV.
* When returning to kernel mode, we don't return from exception.
*/
.macro v7m_exception_entry
@ determine the location of the registers saved by the core during
@ exception entry. Depending on the mode the cpu was in when the
@ exception happend that is either on the main or the process stack.
@ Bit 2 of EXC_RETURN stored in the lr register specifies which stack
@ was used.
tst lr, #EXC_RET_STACK_MASK
mrsne r12, psp
moveq r12, sp
@ we cannot rely on r0-r3 and r12 matching the value saved in the
@ exception frame because of tail-chaining. So these have to be
@ reloaded.
ldmia r12!, {r0-r3}
@ Linux expects to have irqs off. Do it here before taking stack space
cpsid i
sub sp, #PT_REGS_SIZE-S_IP
stmdb sp!, {r0-r11}
@ load saved r12, lr, return address and xPSR.
@ r0-r7 are used for signals and never touched from now on. Clobbering
@ r8-r12 is OK.
mov r9, r12
ldmia r9!, {r8, r10-r12}
@ calculate the original stack pointer value.
@ r9 currently points to the memory location just above the auto saved
@ xPSR.
@ The cpu might automatically 8-byte align the stack. Bit 9
@ of the saved xPSR specifies if stack aligning took place. In this case
@ another 32-bit value is included in the stack.
tst r12, V7M_xPSR_FRAMEPTRALIGN
addne r9, r9, #4
@ store saved r12 using str to have a register to hold the base for stm
str r8, [sp, #S_IP]
add r8, sp, #S_SP
@ store r13-r15, xPSR
stmia r8!, {r9-r12}
@ store old_r0
str r0, [r8]
.endm
/*
* PENDSV and SVCALL are configured to have the same exception
* priorities. As a kernel thread runs at SVCALL execution priority it
* can never be preempted and so we will never have to return to a
* kernel thread here.
*/
.macro v7m_exception_slow_exit ret_r0
cpsid i
ldr lr, =exc_ret
ldr lr, [lr]
@ read original r12, sp, lr, pc and xPSR
add r12, sp, #S_IP
ldmia r12, {r1-r5}
@ an exception frame is always 8-byte aligned. To tell the hardware if
@ the sp to be restored is aligned or not set bit 9 of the saved xPSR
@ accordingly.
tst r2, #4
subne r2, r2, #4
orrne r5, V7M_xPSR_FRAMEPTRALIGN
biceq r5, V7M_xPSR_FRAMEPTRALIGN
@ ensure bit 0 is cleared in the PC, otherwise behaviour is
@ unpredictable
bic r4, #1
@ write basic exception frame
stmdb r2!, {r1, r3-r5}
ldmia sp, {r1, r3-r5}
.if \ret_r0
stmdb r2!, {r0, r3-r5}
.else
stmdb r2!, {r1, r3-r5}
.endif
@ restore process sp
msr psp, r2
@ restore original r4-r11
ldmia sp!, {r0-r11}
@ restore main sp
add sp, sp, #PT_REGS_SIZE-S_IP
cpsie i
bx lr
.endm
#endif /* CONFIG_CPU_V7M */
@
@ Store/load the USER SP and LR registers by switching to the SYS
@ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not
@ available. Should only be called from SVC mode
@
.macro store_user_sp_lr, rd, rtemp, offset = 0
mrs \rtemp, cpsr
eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
msr cpsr_c, \rtemp @ switch to the SYS mode
str sp, [\rd, #\offset] @ save sp_usr
str lr, [\rd, #\offset + 4] @ save lr_usr
eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
msr cpsr_c, \rtemp @ switch back to the SVC mode
.endm
.macro load_user_sp_lr, rd, rtemp, offset = 0
mrs \rtemp, cpsr
eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
msr cpsr_c, \rtemp @ switch to the SYS mode
ldr sp, [\rd, #\offset] @ load sp_usr
ldr lr, [\rd, #\offset + 4] @ load lr_usr
eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
msr cpsr_c, \rtemp @ switch back to the SVC mode
.endm
.macro svc_exit, rpsr, irq = 0
.if \irq != 0
@ IRQs already off
#ifdef CONFIG_TRACE_IRQFLAGS
@ The parent context IRQs must have been enabled to get here in
@ the first place, so there's no point checking the PSR I bit.
bl trace_hardirqs_on
#endif
.else
@ IRQs off again before pulling preserved data off the stack
disable_irq_notrace
#ifdef CONFIG_TRACE_IRQFLAGS
tst \rpsr, #PSR_I_BIT
bleq trace_hardirqs_on
tst \rpsr, #PSR_I_BIT
blne trace_hardirqs_off
#endif
.endif
uaccess_exit tsk, r0, r1
#ifndef CONFIG_THUMB2_KERNEL
@ ARM mode SVC restore
msr spsr_cxsf, \rpsr
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
@ We must avoid clrex due to Cortex-A15 erratum #830321
sub r0, sp, #4 @ uninhabited address
strex r1, r2, [r0] @ clear the exclusive monitor
#endif
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
#else
@ Thumb mode SVC restore
ldr lr, [sp, #S_SP] @ top of the stack
ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
@ We must avoid clrex due to Cortex-A15 erratum #830321
strex r2, r1, [sp, #S_LR] @ clear the exclusive monitor
stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context
ldmia sp, {r0 - r12}
mov sp, lr
ldr lr, [sp], #4
rfeia sp!
#endif
.endm
@
@ svc_exit_via_fiq - like svc_exit but switches to FIQ mode before exit
@
@ This macro acts in a similar manner to svc_exit but switches to FIQ
@ mode to restore the final part of the register state.
@
@ We cannot use the normal svc_exit procedure because that would
@ clobber spsr_svc (FIQ could be delivered during the first few
@ instructions of vector_swi meaning its contents have not been
@ saved anywhere).
@
@ Note that, unlike svc_exit, this macro also does not allow a caller
@ supplied rpsr. This is because the FIQ exceptions are not re-entrant
@ and the handlers cannot call into the scheduler (meaning the value
@ on the stack remains correct).
@
.macro svc_exit_via_fiq
uaccess_exit tsk, r0, r1
#ifndef CONFIG_THUMB2_KERNEL
@ ARM mode restore
mov r0, sp
ldmib r0, {r1 - r14} @ abort is deadly from here onward (it will
@ clobber state restored below)
msr cpsr_c, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
add r8, r0, #S_PC
ldr r9, [r0, #S_PSR]
msr spsr_cxsf, r9
ldr r0, [r0, #S_R0]
ldmia r8, {pc}^
#else
@ Thumb mode restore
add r0, sp, #S_R2
ldr lr, [sp, #S_LR]
ldr sp, [sp, #S_SP] @ abort is deadly from here onward (it will
@ clobber state restored below)
ldmia r0, {r2 - r12}
mov r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
msr cpsr_c, r1
sub r0, #S_R2
add r8, r0, #S_PC
ldmia r0, {r0 - r1}
rfeia r8
#endif
.endm
.macro restore_user_regs, fast = 0, offset = 0
#if defined(CONFIG_CPU_32v6K) && \
(!defined(CONFIG_CPU_V6) || defined(CONFIG_SMP))
#ifdef CONFIG_CPU_V6
ALT_SMP(nop)
ALT_UP_B(.L1_\@)
#endif
@ The TLS register update is deferred until return to user space so we
@ can use it for other things while running in the kernel
mrc p15, 0, r1, c13, c0, 3 @ get current_thread_info pointer
ldr r1, [r1, #TI_TP_VALUE]
mcr p15, 0, r1, c13, c0, 3 @ set TLS register
.L1_\@:
#endif
uaccess_enable r1, isb=0
#ifndef CONFIG_THUMB2_KERNEL
@ ARM mode restore
mov r2, sp
ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr
ldr lr, [r2, #\offset + S_PC]! @ get pc
tst r1, #PSR_I_BIT | 0x0f
bne 1f
msr spsr_cxsf, r1 @ save in spsr_svc
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
@ We must avoid clrex due to Cortex-A15 erratum #830321
strex r1, r2, [r2] @ clear the exclusive monitor
#endif
.if \fast
ldmdb r2, {r1 - lr}^ @ get calling r1 - lr
.else
ldmdb r2, {r0 - lr}^ @ get calling r0 - lr
.endif
mov r0, r0 @ ARMv5T and earlier require a nop
@ after ldm {}^
add sp, sp, #\offset + PT_REGS_SIZE
movs pc, lr @ return & move spsr_svc into cpsr
1: bug "Returning to usermode but unexpected PSR bits set?", \@
#elif defined(CONFIG_CPU_V7M)
@ V7M restore.
@ Note that we don't need to do clrex here as clearing the local
@ monitor is part of the exception entry and exit sequence.
.if \offset
add sp, #\offset
.endif
v7m_exception_slow_exit ret_r0 = \fast
#else
@ Thumb mode restore
mov r2, sp
load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr
ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
ldr lr, [sp, #\offset + S_PC] @ get pc
add sp, sp, #\offset + S_SP
tst r1, #PSR_I_BIT | 0x0f
bne 1f
msr spsr_cxsf, r1 @ save in spsr_svc
@ We must avoid clrex due to Cortex-A15 erratum #830321
strex r1, r2, [sp] @ clear the exclusive monitor
.if \fast
ldmdb sp, {r1 - r12} @ get calling r1 - r12
.else
ldmdb sp, {r0 - r12} @ get calling r0 - r12
.endif
add sp, sp, #PT_REGS_SIZE - S_SP
movs pc, lr @ return & move spsr_svc into cpsr
1: bug "Returning to usermode but unexpected PSR bits set?", \@
#endif /* !CONFIG_THUMB2_KERNEL */
.endm
/*
* Context tracking subsystem. Used to instrument transitions
* between user and kernel mode.
*/
.macro ct_user_exit, save = 1
#ifdef CONFIG_CONTEXT_TRACKING_USER
.if \save
stmdb sp!, {r0-r3, ip, lr}
bl user_exit_callable
ldmia sp!, {r0-r3, ip, lr}
.else
bl user_exit_callable
.endif
#endif
.endm
.macro ct_user_enter, save = 1
#ifdef CONFIG_CONTEXT_TRACKING_USER
.if \save
stmdb sp!, {r0-r3, ip, lr}
bl user_enter_callable
ldmia sp!, {r0-r3, ip, lr}
.else
bl user_enter_callable
.endif
#endif
.endm
.macro invoke_syscall, table, nr, tmp, ret, reload=0
#ifdef CONFIG_CPU_SPECTRE
mov \tmp, \nr
cmp \tmp, #NR_syscalls @ check upper syscall limit
movcs \tmp, #0
csdb
badr lr, \ret @ return address
.if \reload
add r1, sp, #S_R0 + S_OFF @ pointer to regs
ldmiacc r1, {r0 - r6} @ reload r0-r6
stmiacc sp, {r4, r5} @ update stack arguments
.endif
ldrcc pc, [\table, \tmp, lsl #2] @ call sys_* routine
#else
cmp \nr, #NR_syscalls @ check upper syscall limit
badr lr, \ret @ return address
.if \reload
add r1, sp, #S_R0 + S_OFF @ pointer to regs
ldmiacc r1, {r0 - r6} @ reload r0-r6
stmiacc sp, {r4, r5} @ update stack arguments
.endif
ldrcc pc, [\table, \nr, lsl #2] @ call sys_* routine
#endif
.endm
/*
* These are the registers used in the syscall handler, and allow us to
* have in theory up to 7 arguments to a function - r0 to r6.
*
* r7 is reserved for the system call number for thumb mode.
*
* Note that tbl == why is intentional.
*
* We must set at least "tsk" and "why" when calling ret_with_reschedule.
*/
scno .req r7 @ syscall number
tbl .req r8 @ syscall table pointer
why .req r8 @ Linux syscall (!= 0)
tsk .req r9 @ current thread_info
.macro do_overflow_check, frame_size:req
#ifdef CONFIG_VMAP_STACK
@
@ Test whether the SP has overflowed. Task and IRQ stacks are aligned
@ so that SP & BIT(THREAD_SIZE_ORDER + PAGE_SHIFT) should always be
@ zero.
@
ARM( tst sp, #1 << (THREAD_SIZE_ORDER + PAGE_SHIFT) )
THUMB( tst r1, #1 << (THREAD_SIZE_ORDER + PAGE_SHIFT) )
THUMB( it ne )
bne .Lstack_overflow_check\@
.pushsection .text
.Lstack_overflow_check\@:
@
@ The stack pointer is not pointing to a valid vmap'ed stack, but it
@ may be pointing into the linear map instead, which may happen if we
@ are already running from the overflow stack. We cannot detect overflow
@ in such cases so just carry on.
@
str ip, [r0, #12] @ Stash IP on the mode stack
ldr_va ip, high_memory @ Start of VMALLOC space
ARM( cmp sp, ip ) @ SP in vmalloc space?
THUMB( cmp r1, ip )
THUMB( itt lo )
ldrlo ip, [r0, #12] @ Restore IP
blo .Lout\@ @ Carry on
THUMB( sub r1, sp, r1 ) @ Restore original R1
THUMB( sub sp, r1 ) @ Restore original SP
add sp, sp, #\frame_size @ Undo svc_entry's SP change
b __bad_stack @ Handle VMAP stack overflow
.popsection
.Lout\@:
#endif
.endm
|
aixcc-public/challenge-001-exemplar-source
| 5,606
|
arch/arm/kernel/head-common.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/arch/arm/kernel/head-common.S
*
* Copyright (C) 1994-2002 Russell King
* Copyright (c) 2003 ARM Limited
* All Rights Reserved
*/
#include <asm/assembler.h>
#define ATAG_CORE 0x54410001
#define ATAG_CORE_SIZE ((2*4 + 3*4) >> 2)
#define ATAG_CORE_SIZE_EMPTY ((2*4) >> 2)
#ifdef CONFIG_CPU_BIG_ENDIAN
#define OF_DT_MAGIC 0xd00dfeed
#else
#define OF_DT_MAGIC 0xedfe0dd0 /* 0xd00dfeed in big-endian */
#endif
/*
* Exception handling. Something went wrong and we can't proceed. We
* ought to tell the user, but since we don't have any guarantee that
* we're even running on the right architecture, we do virtually nothing.
*
* If CONFIG_DEBUG_LL is set we try to print out something about the error
* and hope for the best (useful if bootloader fails to pass a proper
* machine ID for example).
*/
__HEAD
/* Determine validity of the r2 atags pointer. The heuristic requires
* that the pointer be aligned, in the first 16k of physical RAM and
* that the ATAG_CORE marker is first and present. If CONFIG_OF_FLATTREE
* is selected, then it will also accept a dtb pointer. Future revisions
* of this function may be more lenient with the physical address and
* may also be able to move the ATAGS block if necessary.
*
* Returns:
* r2 either valid atags pointer, valid dtb pointer, or zero
* r5, r6 corrupted
*/
__vet_atags:
tst r2, #0x3 @ aligned?
bne 1f
ldr r5, [r2, #0]
#ifdef CONFIG_OF_FLATTREE
ldr r6, =OF_DT_MAGIC @ is it a DTB?
cmp r5, r6
beq 2f
#endif
cmp r5, #ATAG_CORE_SIZE @ is first tag ATAG_CORE?
cmpne r5, #ATAG_CORE_SIZE_EMPTY
bne 1f
ldr r5, [r2, #4]
ldr r6, =ATAG_CORE
cmp r5, r6
bne 1f
2: ret lr @ atag/dtb pointer is ok
1: mov r2, #0
ret lr
ENDPROC(__vet_atags)
/*
* The following fragment of code is executed with the MMU on in MMU mode,
* and uses absolute addresses; this is not position independent.
*
* r0 = cp#15 control register (exc_ret for M-class)
* r1 = machine ID
* r2 = atags/dtb pointer
* r9 = processor ID
*/
__INIT
__mmap_switched:
mov r7, r1
mov r8, r2
mov r10, r0
adr r4, __mmap_switched_data
mov fp, #0
#if defined(CONFIG_XIP_DEFLATED_DATA)
ARM( ldr sp, [r4], #4 )
THUMB( ldr sp, [r4] )
THUMB( add r4, #4 )
bl __inflate_kernel_data @ decompress .data to RAM
teq r0, #0
bne __error
#elif defined(CONFIG_XIP_KERNEL)
ARM( ldmia r4!, {r0, r1, r2, sp} )
THUMB( ldmia r4!, {r0, r1, r2, r3} )
THUMB( mov sp, r3 )
sub r2, r2, r1
bl __memcpy @ copy .data to RAM
#endif
ARM( ldmia r4!, {r0, r1, sp} )
THUMB( ldmia r4!, {r0, r1, r3} )
THUMB( mov sp, r3 )
sub r2, r1, r0
mov r1, #0
bl __memset @ clear .bss
adr_l r0, init_task @ get swapper task_struct
set_current r0, r1
ldmia r4, {r0, r1, r2, r3}
str r9, [r0] @ Save processor ID
str r7, [r1] @ Save machine type
str r8, [r2] @ Save atags pointer
cmp r3, #0
strne r10, [r3] @ Save control register values
#ifdef CONFIG_KASAN
bl kasan_early_init
#endif
mov lr, #0
b start_kernel
ENDPROC(__mmap_switched)
.align 2
.type __mmap_switched_data, %object
__mmap_switched_data:
#ifdef CONFIG_XIP_KERNEL
#ifndef CONFIG_XIP_DEFLATED_DATA
.long _sdata @ r0
.long __data_loc @ r1
.long _edata_loc @ r2
#endif
.long __bss_stop @ sp (temporary stack in .bss)
#endif
.long __bss_start @ r0
.long __bss_stop @ r1
.long init_thread_union + THREAD_START_SP @ sp
.long processor_id @ r0
.long __machine_arch_type @ r1
.long __atags_pointer @ r2
#ifdef CONFIG_CPU_CP15
.long cr_alignment @ r3
#else
M_CLASS(.long exc_ret) @ r3
AR_CLASS(.long 0) @ r3
#endif
.size __mmap_switched_data, . - __mmap_switched_data
__FINIT
.text
/*
* This provides a C-API version of __lookup_processor_type
*/
ENTRY(lookup_processor_type)
stmfd sp!, {r4 - r6, r9, lr}
mov r9, r0
bl __lookup_processor_type
mov r0, r5
ldmfd sp!, {r4 - r6, r9, pc}
ENDPROC(lookup_processor_type)
/*
* Read processor ID register (CP#15, CR0), and look up in the linker-built
* supported processor list. Note that we can't use the absolute addresses
* for the __proc_info lists since we aren't running with the MMU on
* (and therefore, we are not in the correct address space). We have to
* calculate the offset.
*
* r9 = cpuid
* Returns:
* r3, r4, r6 corrupted
* r5 = proc_info pointer in physical address space
* r9 = cpuid (preserved)
*/
__lookup_processor_type:
/*
* Look in <asm/procinfo.h> for information about the __proc_info
* structure.
*/
adr_l r5, __proc_info_begin
adr_l r6, __proc_info_end
1: ldmia r5, {r3, r4} @ value, mask
and r4, r4, r9 @ mask wanted bits
teq r3, r4
beq 2f
add r5, r5, #PROC_INFO_SZ @ sizeof(proc_info_list)
cmp r5, r6
blo 1b
mov r5, #0 @ unknown processor
2: ret lr
ENDPROC(__lookup_processor_type)
__error_lpae:
#ifdef CONFIG_DEBUG_LL
adr r0, str_lpae
bl printascii
b __error
str_lpae: .asciz "\nError: Kernel with LPAE support, but CPU does not support LPAE.\n"
#else
b __error
#endif
.align
ENDPROC(__error_lpae)
__error_p:
#ifdef CONFIG_DEBUG_LL
adr r0, str_p1
bl printascii
mov r0, r9
bl printhex8
adr r0, str_p2
bl printascii
b __error
str_p1: .asciz "\nError: unrecognized/unsupported processor variant (0x"
str_p2: .asciz ").\n"
.align
#endif
ENDPROC(__error_p)
__error:
#ifdef CONFIG_ARCH_RPC
/*
* Turn the screen red on a error - RiscPC only.
*/
mov r0, #0x02000000
mov r3, #0x11
orr r3, r3, r3, lsl #8
orr r3, r3, r3, lsl #16
str r3, [r0], #4
str r3, [r0], #4
str r3, [r0], #4
str r3, [r0], #4
#endif
1: mov r0, r0
b 1b
ENDPROC(__error)
|
aixcc-public/challenge-001-exemplar-source
| 14,239
|
arch/arm/kernel/head-nommu.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/arch/arm/kernel/head-nommu.S
*
* Copyright (C) 1994-2002 Russell King
* Copyright (C) 2003-2006 Hyok S. Choi
*
* Common kernel startup code (non-paged MM)
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <asm/assembler.h>
#include <asm/ptrace.h>
#include <asm/asm-offsets.h>
#include <asm/memory.h>
#include <asm/cp15.h>
#include <asm/thread_info.h>
#include <asm/v7m.h>
#include <asm/mpu.h>
#include <asm/page.h>
/*
* Kernel startup entry point.
* ---------------------------
*
* This is normally called from the decompressor code. The requirements
* are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0,
* r1 = machine nr.
*
* See linux/arch/arm/tools/mach-types for the complete list of machine
* numbers for r1.
*
*/
__HEAD
#ifdef CONFIG_CPU_THUMBONLY
.thumb
ENTRY(stext)
#else
.arm
ENTRY(stext)
THUMB( badr r9, 1f ) @ Kernel is always entered in ARM.
THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
THUMB( .thumb ) @ switch to Thumb now.
THUMB(1: )
#endif
#ifdef CONFIG_ARM_VIRT_EXT
bl __hyp_stub_install
#endif
@ ensure svc mode and all interrupts masked
safe_svcmode_maskall r9
@ and irqs disabled
#if defined(CONFIG_CPU_CP15)
mrc p15, 0, r9, c0, c0 @ get processor id
#elif defined(CONFIG_CPU_V7M)
ldr r9, =BASEADDR_V7M_SCB
ldr r9, [r9, V7M_SCB_CPUID]
#else
ldr r9, =CONFIG_PROCESSOR_ID
#endif
bl __lookup_processor_type @ r5=procinfo r9=cpuid
movs r10, r5 @ invalid processor (r5=0)?
beq __error_p @ yes, error 'p'
#ifdef CONFIG_ARM_MPU
bl __setup_mpu
#endif
badr lr, 1f @ return (PIC) address
ldr r12, [r10, #PROCINFO_INITFUNC]
add r12, r12, r10
ret r12
1: ldr lr, =__mmap_switched
b __after_proc_init
ENDPROC(stext)
#ifdef CONFIG_SMP
.text
ENTRY(secondary_startup)
/*
* Common entry point for secondary CPUs.
*
* Ensure that we're in SVC mode, and IRQs are disabled. Lookup
* the processor type - there is no need to check the machine type
* as it has already been validated by the primary processor.
*/
#ifdef CONFIG_ARM_VIRT_EXT
bl __hyp_stub_install_secondary
#endif
safe_svcmode_maskall r9
#ifndef CONFIG_CPU_CP15
ldr r9, =CONFIG_PROCESSOR_ID
#else
mrc p15, 0, r9, c0, c0 @ get processor id
#endif
bl __lookup_processor_type @ r5=procinfo r9=cpuid
movs r10, r5 @ invalid processor?
beq __error_p @ yes, error 'p'
ldr r7, __secondary_data
#ifdef CONFIG_ARM_MPU
bl __secondary_setup_mpu @ Initialize the MPU
#endif
badr lr, 1f @ return (PIC) address
ldr r12, [r10, #PROCINFO_INITFUNC]
add r12, r12, r10
ret r12
1: bl __after_proc_init
ldr r7, __secondary_data @ reload r7
ldr sp, [r7, #12] @ set up the stack pointer
ldr r0, [r7, #16] @ set up task pointer
mov fp, #0
b secondary_start_kernel
ENDPROC(secondary_startup)
.type __secondary_data, %object
__secondary_data:
.long secondary_data
#endif /* CONFIG_SMP */
/*
* Set the Control Register and Read the process ID.
*/
.text
__after_proc_init:
M_CLASS(movw r12, #:lower16:BASEADDR_V7M_SCB)
M_CLASS(movt r12, #:upper16:BASEADDR_V7M_SCB)
#ifdef CONFIG_ARM_MPU
M_CLASS(ldr r3, [r12, 0x50])
AR_CLASS(mrc p15, 0, r3, c0, c1, 4) @ Read ID_MMFR0
and r3, r3, #(MMFR0_PMSA) @ PMSA field
teq r3, #(MMFR0_PMSAv7) @ PMSA v7
beq 1f
teq r3, #(MMFR0_PMSAv8) @ PMSA v8
/*
* Memory region attributes for PMSAv8:
*
* n = AttrIndx[2:0]
* n MAIR
* DEVICE_nGnRnE 000 00000000
* NORMAL 001 11111111
*/
ldreq r3, =PMSAv8_MAIR(0x00, PMSAv8_RGN_DEVICE_nGnRnE) | \
PMSAv8_MAIR(0xff, PMSAv8_RGN_NORMAL)
AR_CLASS(mcreq p15, 0, r3, c10, c2, 0) @ MAIR 0
M_CLASS(streq r3, [r12, #PMSAv8_MAIR0])
moveq r3, #0
AR_CLASS(mcreq p15, 0, r3, c10, c2, 1) @ MAIR 1
M_CLASS(streq r3, [r12, #PMSAv8_MAIR1])
1:
#endif
#ifdef CONFIG_CPU_CP15
/*
* CP15 system control register value returned in r0 from
* the CPU init function.
*/
#ifdef CONFIG_ARM_MPU
biceq r0, r0, #CR_BR @ Disable the 'default mem-map'
orreq r0, r0, #CR_M @ Set SCTRL.M (MPU on)
#endif
#if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6
orr r0, r0, #CR_A
#else
bic r0, r0, #CR_A
#endif
#ifdef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CR_C
#endif
#ifdef CONFIG_CPU_BPREDICT_DISABLE
bic r0, r0, #CR_Z
#endif
#ifdef CONFIG_CPU_ICACHE_DISABLE
bic r0, r0, #CR_I
#endif
mcr p15, 0, r0, c1, c0, 0 @ write control reg
instr_sync
#elif defined (CONFIG_CPU_V7M)
#ifdef CONFIG_ARM_MPU
ldreq r3, [r12, MPU_CTRL]
biceq r3, #MPU_CTRL_PRIVDEFENA
orreq r3, #MPU_CTRL_ENABLE
streq r3, [r12, MPU_CTRL]
isb
#endif
/* For V7M systems we want to modify the CCR similarly to the SCTLR */
#ifdef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #V7M_SCB_CCR_DC
#endif
#ifdef CONFIG_CPU_BPREDICT_DISABLE
bic r0, r0, #V7M_SCB_CCR_BP
#endif
#ifdef CONFIG_CPU_ICACHE_DISABLE
bic r0, r0, #V7M_SCB_CCR_IC
#endif
str r0, [r12, V7M_SCB_CCR]
/* Pass exc_ret to __mmap_switched */
mov r0, r10
#endif /* CONFIG_CPU_CP15 elif CONFIG_CPU_V7M */
ret lr
ENDPROC(__after_proc_init)
.ltorg
#ifdef CONFIG_ARM_MPU
#ifndef CONFIG_CPU_V7M
/* Set which MPU region should be programmed */
.macro set_region_nr tmp, rgnr, unused
mov \tmp, \rgnr @ Use static region numbers
mcr p15, 0, \tmp, c6, c2, 0 @ Write RGNR
.endm
/* Setup a single MPU region, either D or I side (D-side for unified) */
.macro setup_region bar, acr, sr, side = PMSAv7_DATA_SIDE, unused
mcr p15, 0, \bar, c6, c1, (0 + \side) @ I/DRBAR
mcr p15, 0, \acr, c6, c1, (4 + \side) @ I/DRACR
mcr p15, 0, \sr, c6, c1, (2 + \side) @ I/DRSR
.endm
#else
.macro set_region_nr tmp, rgnr, base
mov \tmp, \rgnr
str \tmp, [\base, #PMSAv7_RNR]
.endm
.macro setup_region bar, acr, sr, unused, base
lsl \acr, \acr, #16
orr \acr, \acr, \sr
str \bar, [\base, #PMSAv7_RBAR]
str \acr, [\base, #PMSAv7_RASR]
.endm
#endif
/*
* Setup the MPU and initial MPU Regions. We create the following regions:
* Region 0: Use this for probing the MPU details, so leave disabled.
* Region 1: Background region - covers the whole of RAM as strongly ordered
* Region 2: Normal, Shared, cacheable for RAM. From PHYS_OFFSET, size from r6
* Region 3: Normal, shared, inaccessible from PL0 to protect the vectors page
*
* r6: Value to be written to DRSR (and IRSR if required) for PMSAv7_RAM_REGION
*/
__HEAD
ENTRY(__setup_mpu)
/* Probe for v7 PMSA compliance */
M_CLASS(movw r12, #:lower16:BASEADDR_V7M_SCB)
M_CLASS(movt r12, #:upper16:BASEADDR_V7M_SCB)
AR_CLASS(mrc p15, 0, r0, c0, c1, 4) @ Read ID_MMFR0
M_CLASS(ldr r0, [r12, 0x50])
and r0, r0, #(MMFR0_PMSA) @ PMSA field
teq r0, #(MMFR0_PMSAv7) @ PMSA v7
beq __setup_pmsa_v7
teq r0, #(MMFR0_PMSAv8) @ PMSA v8
beq __setup_pmsa_v8
ret lr
ENDPROC(__setup_mpu)
ENTRY(__setup_pmsa_v7)
/* Calculate the size of a region covering just the kernel */
ldr r5, =PLAT_PHYS_OFFSET @ Region start: PHYS_OFFSET
ldr r6, =(_end) @ Cover whole kernel
sub r6, r6, r5 @ Minimum size of region to map
clz r6, r6 @ Region size must be 2^N...
rsb r6, r6, #31 @ ...so round up region size
lsl r6, r6, #PMSAv7_RSR_SZ @ Put size in right field
orr r6, r6, #(1 << PMSAv7_RSR_EN) @ Set region enabled bit
/* Determine whether the D/I-side memory map is unified. We set the
* flags here and continue to use them for the rest of this function */
AR_CLASS(mrc p15, 0, r0, c0, c0, 4) @ MPUIR
M_CLASS(ldr r0, [r12, #MPU_TYPE])
ands r5, r0, #MPUIR_DREGION_SZMASK @ 0 size d region => No MPU
bxeq lr
tst r0, #MPUIR_nU @ MPUIR_nU = 0 for unified
/* Setup second region first to free up r6 */
set_region_nr r0, #PMSAv7_RAM_REGION, r12
isb
/* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */
ldr r0, =PLAT_PHYS_OFFSET @ RAM starts at PHYS_OFFSET
ldr r5,=(PMSAv7_AP_PL1RW_PL0RW | PMSAv7_RGN_NORMAL)
setup_region r0, r5, r6, PMSAv7_DATA_SIDE, r12 @ PHYS_OFFSET, shared, enabled
beq 1f @ Memory-map not unified
setup_region r0, r5, r6, PMSAv7_INSTR_SIDE, r12 @ PHYS_OFFSET, shared, enabled
1: isb
/* First/background region */
set_region_nr r0, #PMSAv7_BG_REGION, r12
isb
/* Execute Never, strongly ordered, inaccessible to PL0, rw PL1 */
mov r0, #0 @ BG region starts at 0x0
ldr r5,=(PMSAv7_ACR_XN | PMSAv7_RGN_STRONGLY_ORDERED | PMSAv7_AP_PL1RW_PL0NA)
mov r6, #PMSAv7_RSR_ALL_MEM @ 4GB region, enabled
setup_region r0, r5, r6, PMSAv7_DATA_SIDE, r12 @ 0x0, BG region, enabled
beq 2f @ Memory-map not unified
setup_region r0, r5, r6, PMSAv7_INSTR_SIDE r12 @ 0x0, BG region, enabled
2: isb
#ifdef CONFIG_XIP_KERNEL
set_region_nr r0, #PMSAv7_ROM_REGION, r12
isb
ldr r5,=(PMSAv7_AP_PL1RO_PL0NA | PMSAv7_RGN_NORMAL)
ldr r0, =CONFIG_XIP_PHYS_ADDR @ ROM start
ldr r6, =(_exiprom) @ ROM end
sub r6, r6, r0 @ Minimum size of region to map
clz r6, r6 @ Region size must be 2^N...
rsb r6, r6, #31 @ ...so round up region size
lsl r6, r6, #PMSAv7_RSR_SZ @ Put size in right field
orr r6, r6, #(1 << PMSAv7_RSR_EN) @ Set region enabled bit
setup_region r0, r5, r6, PMSAv7_DATA_SIDE, r12 @ XIP_PHYS_ADDR, shared, enabled
beq 3f @ Memory-map not unified
setup_region r0, r5, r6, PMSAv7_INSTR_SIDE, r12 @ XIP_PHYS_ADDR, shared, enabled
3: isb
#endif
ret lr
ENDPROC(__setup_pmsa_v7)
ENTRY(__setup_pmsa_v8)
mov r0, #0
AR_CLASS(mcr p15, 0, r0, c6, c2, 1) @ PRSEL
M_CLASS(str r0, [r12, #PMSAv8_RNR])
isb
#ifdef CONFIG_XIP_KERNEL
ldr r5, =CONFIG_XIP_PHYS_ADDR @ ROM start
ldr r6, =(_exiprom) @ ROM end
sub r6, r6, #1
bic r6, r6, #(PMSAv8_MINALIGN - 1)
orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED)
orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN)
AR_CLASS(mcr p15, 0, r5, c6, c8, 0) @ PRBAR0
AR_CLASS(mcr p15, 0, r6, c6, c8, 1) @ PRLAR0
M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(0)])
M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(0)])
#endif
ldr r5, =KERNEL_START
ldr r6, =KERNEL_END
sub r6, r6, #1
bic r6, r6, #(PMSAv8_MINALIGN - 1)
orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED)
orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN)
AR_CLASS(mcr p15, 0, r5, c6, c8, 4) @ PRBAR1
AR_CLASS(mcr p15, 0, r6, c6, c8, 5) @ PRLAR1
M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(1)])
M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(1)])
/* Setup Background: 0x0 - min(KERNEL_START, XIP_PHYS_ADDR) */
#ifdef CONFIG_XIP_KERNEL
ldr r6, =KERNEL_START
ldr r5, =CONFIG_XIP_PHYS_ADDR
cmp r6, r5
movcs r6, r5
#else
ldr r6, =KERNEL_START
#endif
cmp r6, #0
beq 1f
mov r5, #0
sub r6, r6, #1
bic r6, r6, #(PMSAv8_MINALIGN - 1)
orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN)
orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN)
AR_CLASS(mcr p15, 0, r5, c6, c9, 0) @ PRBAR2
AR_CLASS(mcr p15, 0, r6, c6, c9, 1) @ PRLAR2
M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(2)])
M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(2)])
1:
/* Setup Background: max(KERNEL_END, _exiprom) - 0xffffffff */
#ifdef CONFIG_XIP_KERNEL
ldr r5, =KERNEL_END
ldr r6, =(_exiprom)
cmp r5, r6
movcc r5, r6
#else
ldr r5, =KERNEL_END
#endif
mov r6, #0xffffffff
bic r6, r6, #(PMSAv8_MINALIGN - 1)
orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN)
orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN)
AR_CLASS(mcr p15, 0, r5, c6, c9, 4) @ PRBAR3
AR_CLASS(mcr p15, 0, r6, c6, c9, 5) @ PRLAR3
M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(3)])
M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(3)])
#ifdef CONFIG_XIP_KERNEL
/* Setup Background: min(_exiprom, KERNEL_END) - max(KERNEL_START, XIP_PHYS_ADDR) */
ldr r5, =(_exiprom)
ldr r6, =KERNEL_END
cmp r5, r6
movcs r5, r6
ldr r6, =KERNEL_START
ldr r0, =CONFIG_XIP_PHYS_ADDR
cmp r6, r0
movcc r6, r0
sub r6, r6, #1
bic r6, r6, #(PMSAv8_MINALIGN - 1)
orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN)
orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN)
#ifdef CONFIG_CPU_V7M
/* There is no alias for n == 4 */
mov r0, #4
str r0, [r12, #PMSAv8_RNR] @ PRSEL
isb
str r5, [r12, #PMSAv8_RBAR_A(0)]
str r6, [r12, #PMSAv8_RLAR_A(0)]
#else
mcr p15, 0, r5, c6, c10, 0 @ PRBAR4
mcr p15, 0, r6, c6, c10, 1 @ PRLAR4
#endif
#endif
ret lr
ENDPROC(__setup_pmsa_v8)
#ifdef CONFIG_SMP
/*
* r6: pointer at mpu_rgn_info
*/
.text
ENTRY(__secondary_setup_mpu)
/* Use MPU region info supplied by __cpu_up */
ldr r6, [r7] @ get secondary_data.mpu_rgn_info
/* Probe for v7 PMSA compliance */
mrc p15, 0, r0, c0, c1, 4 @ Read ID_MMFR0
and r0, r0, #(MMFR0_PMSA) @ PMSA field
teq r0, #(MMFR0_PMSAv7) @ PMSA v7
beq __secondary_setup_pmsa_v7
teq r0, #(MMFR0_PMSAv8) @ PMSA v8
beq __secondary_setup_pmsa_v8
b __error_p
ENDPROC(__secondary_setup_mpu)
/*
* r6: pointer at mpu_rgn_info
*/
ENTRY(__secondary_setup_pmsa_v7)
/* Determine whether the D/I-side memory map is unified. We set the
* flags here and continue to use them for the rest of this function */
mrc p15, 0, r0, c0, c0, 4 @ MPUIR
ands r5, r0, #MPUIR_DREGION_SZMASK @ 0 size d region => No MPU
beq __error_p
ldr r4, [r6, #MPU_RNG_INFO_USED]
mov r5, #MPU_RNG_SIZE
add r3, r6, #MPU_RNG_INFO_RNGS
mla r3, r4, r5, r3
1:
tst r0, #MPUIR_nU @ MPUIR_nU = 0 for unified
sub r3, r3, #MPU_RNG_SIZE
sub r4, r4, #1
set_region_nr r0, r4
isb
ldr r0, [r3, #MPU_RGN_DRBAR]
ldr r6, [r3, #MPU_RGN_DRSR]
ldr r5, [r3, #MPU_RGN_DRACR]
setup_region r0, r5, r6, PMSAv7_DATA_SIDE
beq 2f
setup_region r0, r5, r6, PMSAv7_INSTR_SIDE
2: isb
mrc p15, 0, r0, c0, c0, 4 @ Reevaluate the MPUIR
cmp r4, #0
bgt 1b
ret lr
ENDPROC(__secondary_setup_pmsa_v7)
ENTRY(__secondary_setup_pmsa_v8)
ldr r4, [r6, #MPU_RNG_INFO_USED]
#ifndef CONFIG_XIP_KERNEL
add r4, r4, #1
#endif
mov r5, #MPU_RNG_SIZE
add r3, r6, #MPU_RNG_INFO_RNGS
mla r3, r4, r5, r3
1:
sub r3, r3, #MPU_RNG_SIZE
sub r4, r4, #1
mcr p15, 0, r4, c6, c2, 1 @ PRSEL
isb
ldr r5, [r3, #MPU_RGN_PRBAR]
ldr r6, [r3, #MPU_RGN_PRLAR]
mcr p15, 0, r5, c6, c3, 0 @ PRBAR
mcr p15, 0, r6, c6, c3, 1 @ PRLAR
cmp r4, #0
bgt 1b
ret lr
ENDPROC(__secondary_setup_pmsa_v8)
#endif /* CONFIG_SMP */
#endif /* CONFIG_ARM_MPU */
#include "head-common.S"
|
aixcc-public/challenge-001-exemplar-source
| 7,924
|
arch/arm/kernel/phys2virt.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 1994-2002 Russell King
* Copyright (c) 2003, 2020 ARM Limited
* All Rights Reserved
*/
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/page.h>
#ifdef __ARMEB__
#define LOW_OFFSET 0x4
#define HIGH_OFFSET 0x0
#else
#define LOW_OFFSET 0x0
#define HIGH_OFFSET 0x4
#endif
/*
* __fixup_pv_table - patch the stub instructions with the delta between
* PHYS_OFFSET and PAGE_OFFSET, which is assumed to be
* 2 MiB aligned.
*
* Called from head.S, which expects the following registers to be preserved:
* r1 = machine no, r2 = atags or dtb,
* r8 = phys_offset, r9 = cpuid, r10 = procinfo
*/
__HEAD
ENTRY(__fixup_pv_table)
mov r0, r8, lsr #PAGE_SHIFT @ convert to PFN
str_l r0, __pv_phys_pfn_offset, r3
adr_l r0, __pv_offset
subs r3, r8, #PAGE_OFFSET @ PHYS_OFFSET - PAGE_OFFSET
mvn ip, #0
strcc ip, [r0, #HIGH_OFFSET] @ save to __pv_offset high bits
str r3, [r0, #LOW_OFFSET] @ save to __pv_offset low bits
mov r0, r3, lsr #21 @ constant for add/sub instructions
teq r3, r0, lsl #21 @ must be 2 MiB aligned
bne 0f
adr_l r4, __pv_table_begin
adr_l r5, __pv_table_end
b __fixup_a_pv_table
0: mov r0, r0 @ deadloop on error
b 0b
ENDPROC(__fixup_pv_table)
.text
__fixup_a_pv_table:
adr_l r6, __pv_offset
ldr r0, [r6, #HIGH_OFFSET] @ pv_offset high word
ldr r6, [r6, #LOW_OFFSET] @ pv_offset low word
cmn r0, #1
#ifdef CONFIG_THUMB2_KERNEL
@
@ The Thumb-2 versions of the patchable sequences are
@
@ phys-to-virt: movw <reg>, #offset<31:21>
@ lsl <reg>, #21
@ sub <VA>, <PA>, <reg>
@
@ virt-to-phys (non-LPAE): movw <reg>, #offset<31:21>
@ lsl <reg>, #21
@ add <PA>, <VA>, <reg>
@
@ virt-to-phys (LPAE): movw <reg>, #offset<31:21>
@ lsl <reg>, #21
@ adds <PAlo>, <VA>, <reg>
@ mov <PAhi>, #offset<39:32>
@ adc <PAhi>, <PAhi>, #0
@
@ In the non-LPAE case, all patchable instructions are MOVW
@ instructions, where we need to patch in the offset into the
@ second halfword of the opcode (the 16-bit immediate is encoded
@ as imm4:i:imm3:imm8)
@
@ 15 11 10 9 4 3 0 15 14 12 11 8 7 0
@ +-----------+---+-------------+------++---+------+----+------+
@ MOVW | 1 1 1 1 0 | i | 1 0 0 1 0 0 | imm4 || 0 | imm3 | Rd | imm8 |
@ +-----------+---+-------------+------++---+------+----+------+
@
@ In the LPAE case, we also need to patch in the high word of the
@ offset into the immediate field of the MOV instruction, or patch it
@ to a MVN instruction if the offset is negative. In this case, we
@ need to inspect the first halfword of the opcode, to check whether
@ it is MOVW or MOV/MVN, and to perform the MOV to MVN patching if
@ needed. The encoding of the immediate is rather complex for values
@ of i:imm3 != 0b0000, but fortunately, we never need more than 8 lower
@ order bits, which can be patched into imm8 directly (and i:imm3
@ cleared)
@
@ 15 11 10 9 5 0 15 14 12 11 8 7 0
@ +-----------+---+---------------------++---+------+----+------+
@ MOV | 1 1 1 1 0 | i | 0 0 0 1 0 0 1 1 1 1 || 0 | imm3 | Rd | imm8 |
@ MVN | 1 1 1 1 0 | i | 0 0 0 1 1 0 1 1 1 1 || 0 | imm3 | Rd | imm8 |
@ +-----------+---+---------------------++---+------+----+------+
@
moveq r0, #0x200000 @ set bit 21, mov to mvn instruction
lsrs r3, r6, #29 @ isolate top 3 bits of displacement
ubfx r6, r6, #21, #8 @ put bits 28:21 into the MOVW imm8 field
bfi r6, r3, #12, #3 @ put bits 31:29 into the MOVW imm3 field
b .Lnext
.Lloop: add r7, r4
adds r4, #4 @ clears Z flag
#ifdef CONFIG_ARM_LPAE
ldrh ip, [r7]
ARM_BE8(rev16 ip, ip)
tst ip, #0x200 @ MOVW has bit 9 set, MVN has it clear
bne 0f @ skip to MOVW handling (Z flag is clear)
bic ip, #0x20 @ clear bit 5 (MVN -> MOV)
orr ip, ip, r0, lsr #16 @ MOV -> MVN if offset < 0
ARM_BE8(rev16 ip, ip)
strh ip, [r7]
@ Z flag is set
0:
#endif
ldrh ip, [r7, #2]
ARM_BE8(rev16 ip, ip)
and ip, #0xf00 @ clear everything except Rd field
orreq ip, r0 @ Z flag set -> MOV/MVN -> patch in high bits
orrne ip, r6 @ Z flag clear -> MOVW -> patch in low bits
ARM_BE8(rev16 ip, ip)
strh ip, [r7, #2]
#else
#ifdef CONFIG_CPU_ENDIAN_BE8
@ in BE8, we load data in BE, but instructions still in LE
#define PV_BIT24 0x00000001
#define PV_IMM8_MASK 0xff000000
#define PV_IMMR_MSB 0x00080000
#else
#define PV_BIT24 0x01000000
#define PV_IMM8_MASK 0x000000ff
#define PV_IMMR_MSB 0x00000800
#endif
@
@ The ARM versions of the patchable sequences are
@
@ phys-to-virt: sub <VA>, <PA>, #offset<31:24>, lsl #24
@ sub <VA>, <PA>, #offset<23:16>, lsl #16
@
@ virt-to-phys (non-LPAE): add <PA>, <VA>, #offset<31:24>, lsl #24
@ add <PA>, <VA>, #offset<23:16>, lsl #16
@
@ virt-to-phys (LPAE): movw <reg>, #offset<31:20>
@ adds <PAlo>, <VA>, <reg>, lsl #20
@ mov <PAhi>, #offset<39:32>
@ adc <PAhi>, <PAhi>, #0
@
@ In the non-LPAE case, all patchable instructions are ADD or SUB
@ instructions, where we need to patch in the offset into the
@ immediate field of the opcode, which is emitted with the correct
@ rotation value. (The effective value of the immediate is imm12<7:0>
@ rotated right by [2 * imm12<11:8>] bits)
@
@ 31 28 27 23 22 20 19 16 15 12 11 0
@ +------+-----------------+------+------+-------+
@ ADD | cond | 0 0 1 0 1 0 0 0 | Rn | Rd | imm12 |
@ SUB | cond | 0 0 1 0 0 1 0 0 | Rn | Rd | imm12 |
@ MOV | cond | 0 0 1 1 1 0 1 0 | Rn | Rd | imm12 |
@ MVN | cond | 0 0 1 1 1 1 1 0 | Rn | Rd | imm12 |
@ +------+-----------------+------+------+-------+
@
@ In the LPAE case, we use a MOVW instruction to carry the low offset
@ word, and patch in the high word of the offset into the immediate
@ field of the subsequent MOV instruction, or patch it to a MVN
@ instruction if the offset is negative. We can distinguish MOVW
@ instructions based on bits 23:22 of the opcode, and ADD/SUB can be
@ distinguished from MOV/MVN (all using the encodings above) using
@ bit 24.
@
@ 31 28 27 23 22 20 19 16 15 12 11 0
@ +------+-----------------+------+------+-------+
@ MOVW | cond | 0 0 1 1 0 0 0 0 | imm4 | Rd | imm12 |
@ +------+-----------------+------+------+-------+
@
moveq r0, #0x400000 @ set bit 22, mov to mvn instruction
mov r3, r6, lsr #16 @ put offset bits 31-16 into r3
mov r6, r6, lsr #24 @ put offset bits 31-24 into r6
and r3, r3, #0xf0 @ only keep offset bits 23-20 in r3
b .Lnext
.Lloop: ldr ip, [r7, r4]
#ifdef CONFIG_ARM_LPAE
tst ip, #PV_BIT24 @ ADD/SUB have bit 24 clear
beq 1f
ARM_BE8(rev ip, ip)
tst ip, #0xc00000 @ MOVW has bits 23:22 clear
bic ip, ip, #0x400000 @ clear bit 22
bfc ip, #0, #12 @ clear imm12 field of MOV[W] instruction
orreq ip, ip, r6, lsl #4 @ MOVW -> mask in offset bits 31-24
orreq ip, ip, r3, lsr #4 @ MOVW -> mask in offset bits 23-20
orrne ip, ip, r0 @ MOV -> mask in offset bits 7-0 (or bit 22)
ARM_BE8(rev ip, ip)
b 2f
1:
#endif
tst ip, #PV_IMMR_MSB @ rotation value >= 16 ?
bic ip, ip, #PV_IMM8_MASK
orreq ip, ip, r6 ARM_BE8(, lsl #24) @ mask in offset bits 31-24
orrne ip, ip, r3 ARM_BE8(, lsl #24) @ mask in offset bits 23-20
2:
str ip, [r7, r4]
add r4, r4, #4
#endif
.Lnext:
cmp r4, r5
ldrcc r7, [r4] @ use branch for delay slot
bcc .Lloop
ret lr
ENDPROC(__fixup_a_pv_table)
ENTRY(fixup_pv_table)
stmfd sp!, {r4 - r7, lr}
mov r4, r0 @ r0 = table start
add r5, r0, r1 @ r1 = table size
bl __fixup_a_pv_table
ldmfd sp!, {r4 - r7, pc}
ENDPROC(fixup_pv_table)
.data
.align 2
.globl __pv_phys_pfn_offset
.type __pv_phys_pfn_offset, %object
__pv_phys_pfn_offset:
.word 0
.size __pv_phys_pfn_offset, . -__pv_phys_pfn_offset
.globl __pv_offset
.type __pv_offset, %object
__pv_offset:
.quad 0
.size __pv_offset, . -__pv_offset
|
aixcc-public/challenge-001-exemplar-source
| 6,287
|
arch/arm/kernel/entry-ftrace.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
#include <asm/assembler.h>
#include <asm/ftrace.h>
#include <asm/unwind.h>
#include "entry-header.S"
/*
* When compiling with -pg, gcc inserts a call to the mcount routine at the
* start of every function. In mcount, apart from the function's address (in
* lr), we need to get hold of the function's caller's address.
*
* Newer GCCs (4.4+) solve this problem by using a version of mcount with call
* sites like:
*
* push {lr}
* bl __gnu_mcount_nc
*
* With these compilers, frame pointers are not necessary.
*
* mcount can be thought of as a function called in the middle of a subroutine
* call. As such, it needs to be transparent for both the caller and the
* callee: the original lr needs to be restored when leaving mcount, and no
* registers should be clobbered.
*
* When using dynamic ftrace, we patch out the mcount call by a "add sp, #4"
* instead of the __gnu_mcount_nc call (see arch/arm/kernel/ftrace.c).
*/
.macro mcount_adjust_addr rd, rn
bic \rd, \rn, #1 @ clear the Thumb bit if present
sub \rd, \rd, #MCOUNT_INSN_SIZE
.endm
.macro __mcount suffix
mcount_enter
ldr_va r2, ftrace_trace_function
badr r0, .Lftrace_stub
cmp r0, r2
bne 1f
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ldr_va r2, ftrace_graph_return
cmp r0, r2
bne ftrace_graph_caller\suffix
ldr_va r2, ftrace_graph_entry
mov_l r0, ftrace_graph_entry_stub
cmp r0, r2
bne ftrace_graph_caller\suffix
#endif
mcount_exit
1: mcount_get_lr r1 @ lr of instrumented func
mcount_adjust_addr r0, lr @ instrumented function
badr lr, 2f
mov pc, r2
2: mcount_exit
.endm
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
.macro __ftrace_regs_caller
str lr, [sp, #-8]! @ store LR as PC and make space for CPSR/OLD_R0,
@ OLD_R0 will overwrite previous LR
ldr lr, [sp, #8] @ get previous LR
str r0, [sp, #8] @ write r0 as OLD_R0 over previous LR
str lr, [sp, #-4]! @ store previous LR as LR
add lr, sp, #16 @ move in LR the value of SP as it was
@ before the push {lr} of the mcount mechanism
push {r0-r11, ip, lr}
@ stack content at this point:
@ 0 4 48 52 56 60 64 68 72
@ R0 | R1 | ... | IP | SP + 4 | previous LR | LR | PSR | OLD_R0 |
mov r3, sp @ struct pt_regs*
ldr_va r2, function_trace_op @ pointer to the current
@ function tracing op
ldr r1, [sp, #S_LR] @ lr of instrumented func
ldr lr, [sp, #S_PC] @ get LR
mcount_adjust_addr r0, lr @ instrumented function
.globl ftrace_regs_call
ftrace_regs_call:
bl ftrace_stub
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_regs_call
ftrace_graph_regs_call:
ARM( mov r0, r0 )
THUMB( nop.w )
#endif
@ pop saved regs
pop {r0-r11, ip, lr} @ restore r0 through r12
ldr lr, [sp], #4 @ restore LR
ldr pc, [sp], #12
.endm
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.macro __ftrace_graph_regs_caller
#ifdef CONFIG_UNWINDER_FRAME_POINTER
sub r0, fp, #4 @ lr of instrumented routine (parent)
#else
add r0, sp, #S_LR
#endif
@ called from __ftrace_regs_caller
ldr r1, [sp, #S_PC] @ instrumented routine (func)
mcount_adjust_addr r1, r1
mov r2, fpreg @ frame pointer
add r3, sp, #PT_REGS_SIZE
bl prepare_ftrace_return
@ pop registers saved in ftrace_regs_caller
pop {r0-r11, ip, lr} @ restore r0 through r12
ldr lr, [sp], #4 @ restore LR
ldr pc, [sp], #12
.endm
#endif
#endif
.macro __ftrace_caller suffix
mcount_enter
mcount_get_lr r1 @ lr of instrumented func
mcount_adjust_addr r0, lr @ instrumented function
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
ldr_va r2, function_trace_op @ pointer to the current
@ function tracing op
mov r3, #0 @ regs is NULL
#endif
.globl ftrace_call\suffix
ftrace_call\suffix:
bl ftrace_stub
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_call\suffix
ftrace_graph_call\suffix:
ARM( mov r0, r0 )
THUMB( nop.w )
#endif
mcount_exit
.endm
.macro __ftrace_graph_caller
#ifdef CONFIG_UNWINDER_FRAME_POINTER
sub r0, fp, #4 @ &lr of instrumented routine (&parent)
#else
add r0, sp, #20
#endif
#ifdef CONFIG_DYNAMIC_FTRACE
@ called from __ftrace_caller, saved in mcount_enter
ldr r1, [sp, #16] @ instrumented routine (func)
mcount_adjust_addr r1, r1
#else
@ called from __mcount, untouched in lr
mcount_adjust_addr r1, lr @ instrumented routine (func)
#endif
mov r2, fpreg @ frame pointer
add r3, sp, #24
bl prepare_ftrace_return
mcount_exit
.endm
/*
* __gnu_mcount_nc
*/
.macro mcount_enter
/*
* This pad compensates for the push {lr} at the call site. Note that we are
* unable to unwind through a function which does not otherwise save its lr.
*/
UNWIND(.pad #4)
stmdb sp!, {r0-r3, lr}
UNWIND(.save {r0-r3, lr})
.endm
.macro mcount_get_lr reg
ldr \reg, [sp, #20]
.endm
.macro mcount_exit
ldmia sp!, {r0-r3}
ldr lr, [sp, #4]
ldr pc, [sp], #8
.endm
ENTRY(__gnu_mcount_nc)
UNWIND(.fnstart)
#ifdef CONFIG_DYNAMIC_FTRACE
push {lr}
ldr lr, [sp, #4]
ldr pc, [sp], #8
#else
__mcount
#endif
UNWIND(.fnend)
ENDPROC(__gnu_mcount_nc)
#ifdef CONFIG_DYNAMIC_FTRACE
ENTRY(ftrace_caller)
UNWIND(.fnstart)
__ftrace_caller
UNWIND(.fnend)
ENDPROC(ftrace_caller)
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
ENTRY(ftrace_regs_caller)
UNWIND(.fnstart)
__ftrace_regs_caller
UNWIND(.fnend)
ENDPROC(ftrace_regs_caller)
#endif
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller)
UNWIND(.fnstart)
__ftrace_graph_caller
UNWIND(.fnend)
ENDPROC(ftrace_graph_caller)
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
ENTRY(ftrace_graph_regs_caller)
UNWIND(.fnstart)
__ftrace_graph_regs_caller
UNWIND(.fnend)
ENDPROC(ftrace_graph_regs_caller)
#endif
#endif
.purgem mcount_enter
.purgem mcount_get_lr
.purgem mcount_exit
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(return_to_handler)
stmdb sp!, {r0-r3}
add r0, sp, #16 @ sp at exit of instrumented routine
bl ftrace_return_to_handler
mov lr, r0 @ r0 has real ret addr
ldmia sp!, {r0-r3}
ret lr
ENDPROC(return_to_handler)
#endif
ENTRY(ftrace_stub)
.Lftrace_stub:
ret lr
ENDPROC(ftrace_stub)
#ifdef CONFIG_DYNAMIC_FTRACE
__INIT
.macro init_tramp, dst:req
ENTRY(\dst\()_from_init)
ldr pc, =\dst
ENDPROC(\dst\()_from_init)
.endm
init_tramp ftrace_caller
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
init_tramp ftrace_regs_caller
#endif
#endif
|
aixcc-public/challenge-001-exemplar-source
| 4,815
|
arch/arm/kernel/vmlinux-xip.lds.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* ld script to make ARM Linux kernel
* taken from the i386 version by Russell King
* Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
*/
/* No __ro_after_init data in the .rodata section - which will always be ro */
#define RO_AFTER_INIT_DATA
#include <linux/sizes.h>
#include <asm/vmlinux.lds.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
#include <asm/memory.h>
#include <asm/mpu.h>
#include <asm/page.h>
OUTPUT_ARCH(arm)
ENTRY(stext)
#ifndef __ARMEB__
jiffies = jiffies_64;
#else
jiffies = jiffies_64 + 4;
#endif
SECTIONS
{
/*
* XXX: The linker does not define how output sections are
* assigned to input sections when there are multiple statements
* matching the same input section name. There is no documented
* order of matching.
*
* unwind exit sections must be discarded before the rest of the
* unwind sections get included.
*/
/DISCARD/ : {
ARM_DISCARD
*(.alt.smp.init)
*(.pv_table)
#ifndef CONFIG_ARM_UNWIND
*(.ARM.exidx) *(.ARM.exidx.*)
*(.ARM.extab) *(.ARM.extab.*)
#endif
}
. = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR);
_xiprom = .; /* XIP ROM area to be mapped */
.head.text : {
_text = .;
HEAD_TEXT
}
.text : { /* Real text segment */
_stext = .; /* Text and read-only data */
ARM_TEXT
}
RO_DATA(PAGE_SIZE)
. = ALIGN(4);
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
__start___ex_table = .;
ARM_MMU_KEEP(*(__ex_table))
__stop___ex_table = .;
}
#ifdef CONFIG_ARM_UNWIND
ARM_UNWIND_SECTIONS
#endif
_etext = .; /* End of text and rodata section */
ARM_VECTORS
INIT_TEXT_SECTION(8)
.exit.text : {
ARM_EXIT_KEEP(EXIT_TEXT)
}
.init.proc.info : {
ARM_CPU_DISCARD(PROC_INFO)
}
.init.arch.info : {
__arch_info_begin = .;
*(.arch.info.init)
__arch_info_end = .;
}
.init.tagtable : {
__tagtable_begin = .;
*(.taglist.init)
__tagtable_end = .;
}
.init.rodata : {
INIT_SETUP(16)
INIT_CALLS
CON_INITCALL
INIT_RAM_FS
}
#ifdef CONFIG_ARM_MPU
. = ALIGN(SZ_128K);
#endif
_exiprom = .; /* End of XIP ROM area */
/*
* From this point, stuff is considered writable and will be copied to RAM
*/
__data_loc = ALIGN(4); /* location in file */
. = PAGE_OFFSET + TEXT_OFFSET; /* location in memory */
#undef LOAD_OFFSET
#define LOAD_OFFSET (PAGE_OFFSET + TEXT_OFFSET - __data_loc)
. = ALIGN(THREAD_SIZE);
_sdata = .;
RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
.data.ro_after_init : AT(ADDR(.data.ro_after_init) - LOAD_OFFSET) {
*(.data..ro_after_init)
}
_edata = .;
. = ALIGN(PAGE_SIZE);
__init_begin = .;
.init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
INIT_DATA
}
.exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
ARM_EXIT_KEEP(EXIT_DATA)
}
#ifdef CONFIG_SMP
PERCPU_SECTION(L1_CACHE_BYTES)
#endif
#ifdef CONFIG_HAVE_TCM
ARM_TCM
#endif
/*
* End of copied data. We need a dummy section to get its LMA.
* Also located before final ALIGN() as trailing padding is not stored
* in the resulting binary file and useless to copy.
*/
.data.endmark : AT(ADDR(.data.endmark) - LOAD_OFFSET) { }
_edata_loc = LOADADDR(.data.endmark);
. = ALIGN(PAGE_SIZE);
__init_end = .;
BSS_SECTION(0, 0, 8)
#ifdef CONFIG_ARM_MPU
. = ALIGN(PMSAv8_MINALIGN);
#endif
_end = .;
STABS_DEBUG
DWARF_DEBUG
ARM_DETAILS
ARM_ASSERTS
}
/*
* These must never be empty
* If you have to comment these two assert statements out, your
* binutils is too old (for other reasons as well)
*/
ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
#ifndef CONFIG_COMPILE_TEST
ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
#endif
#ifdef CONFIG_XIP_DEFLATED_DATA
/*
* The .bss is used as a stack area for __inflate_kernel_data() whose stack
* frame is 9568 bytes. Make sure it has extra room left.
*/
ASSERT((_end - __bss_start) >= 12288, ".bss too small for CONFIG_XIP_DEFLATED_DATA")
#endif
#if defined(CONFIG_ARM_MPU) && !defined(CONFIG_COMPILE_TEST)
/*
* Due to PMSAv7 restriction on base address and size we have to
* enforce minimal alignment restrictions. It was seen that weaker
* alignment restriction on _xiprom will likely force XIP address
* space spawns multiple MPU regions thus it is likely we run in
* situation when we are reprogramming MPU region we run on with
* something which doesn't cover reprogramming code itself, so as soon
* as we update MPU settings we'd immediately try to execute straight
* from background region which is XN.
* It seem that alignment in 1M should suit most users.
* _exiprom is aligned as 1/8 of 1M so can be covered by subregion
* disable
*/
ASSERT(!(_xiprom & (SZ_1M - 1)), "XIP start address may cause MPU programming issues")
ASSERT(!(_exiprom & (SZ_128K - 1)), "XIP end address may cause MPU programming issues")
#endif
|
aixcc-public/challenge-001-exemplar-source
| 1,295
|
arch/arm/kernel/relocate_kernel.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* relocate_kernel.S - put the kernel image in place to boot
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/kexec.h>
.align 3 /* not needed for this code, but keeps fncpy() happy */
ENTRY(relocate_new_kernel)
adr r7, relocate_new_kernel_end
ldr r0, [r7, #KEXEC_INDIR_PAGE]
ldr r1, [r7, #KEXEC_START_ADDR]
/*
* If there is no indirection page (we are doing crashdumps)
* skip any relocation.
*/
cmp r0, #0
beq 2f
0: /* top, read another word for the indirection page */
ldr r3, [r0],#4
/* Is it a destination page. Put destination address to r4 */
tst r3,#1
beq 1f
bic r4,r3,#1
b 0b
1:
/* Is it an indirection page */
tst r3,#2
beq 1f
bic r0,r3,#2
b 0b
1:
/* are we done ? */
tst r3,#4
beq 1f
b 2f
1:
/* is it source ? */
tst r3,#8
beq 0b
bic r3,r3,#8
mov r6,#1024
9:
ldr r5,[r3],#4
str r5,[r4],#4
subs r6,r6,#1
bne 9b
b 0b
2:
/* Jump to relocated kernel */
mov lr, r1
mov r0, #0
ldr r1, [r7, #KEXEC_MACH_TYPE]
ldr r2, [r7, #KEXEC_R2]
ARM( ret lr )
THUMB( bx lr )
ENDPROC(relocate_new_kernel)
.align 3
relocate_new_kernel_end:
.globl relocate_new_kernel_size
relocate_new_kernel_size:
.long relocate_new_kernel_end - relocate_new_kernel
|
aixcc-public/challenge-001-exemplar-source
| 8,427
|
arch/arm/kernel/iwmmxt.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/arch/arm/kernel/iwmmxt.S
*
* XScale iWMMXt (Concan) context switching and handling
*
* Initial code:
* Copyright (c) 2003, Intel Corporation
*
* Full lazy switching support, optimizations and more, by Nicolas Pitre
* Copyright (c) 2003-2004, MontaVista Software, Inc.
*/
#include <linux/linkage.h>
#include <asm/ptrace.h>
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
#include <asm/assembler.h>
#include "iwmmxt.h"
#if defined(CONFIG_CPU_PJ4) || defined(CONFIG_CPU_PJ4B)
#define PJ4(code...) code
#define XSC(code...)
#elif defined(CONFIG_CPU_MOHAWK) || \
defined(CONFIG_CPU_XSC3) || \
defined(CONFIG_CPU_XSCALE)
#define PJ4(code...)
#define XSC(code...) code
#else
#error "Unsupported iWMMXt architecture"
#endif
#define MMX_WR0 (0x00)
#define MMX_WR1 (0x08)
#define MMX_WR2 (0x10)
#define MMX_WR3 (0x18)
#define MMX_WR4 (0x20)
#define MMX_WR5 (0x28)
#define MMX_WR6 (0x30)
#define MMX_WR7 (0x38)
#define MMX_WR8 (0x40)
#define MMX_WR9 (0x48)
#define MMX_WR10 (0x50)
#define MMX_WR11 (0x58)
#define MMX_WR12 (0x60)
#define MMX_WR13 (0x68)
#define MMX_WR14 (0x70)
#define MMX_WR15 (0x78)
#define MMX_WCSSF (0x80)
#define MMX_WCASF (0x84)
#define MMX_WCGR0 (0x88)
#define MMX_WCGR1 (0x8C)
#define MMX_WCGR2 (0x90)
#define MMX_WCGR3 (0x94)
#define MMX_SIZE (0x98)
.text
.arm
/*
* Lazy switching of Concan coprocessor context
*
* r10 = struct thread_info pointer
* r9 = ret_from_exception
* lr = undefined instr exit
*
* called from prefetch exception handler with interrupts enabled
*/
ENTRY(iwmmxt_task_enable)
inc_preempt_count r10, r3
XSC(mrc p15, 0, r2, c15, c1, 0)
PJ4(mrc p15, 0, r2, c1, c0, 2)
@ CP0 and CP1 accessible?
XSC(tst r2, #0x3)
PJ4(tst r2, #0xf)
bne 4f @ if so no business here
@ enable access to CP0 and CP1
XSC(orr r2, r2, #0x3)
XSC(mcr p15, 0, r2, c15, c1, 0)
PJ4(orr r2, r2, #0xf)
PJ4(mcr p15, 0, r2, c1, c0, 2)
ldr r3, =concan_owner
add r0, r10, #TI_IWMMXT_STATE @ get task Concan save area
ldr r2, [sp, #60] @ current task pc value
ldr r1, [r3] @ get current Concan owner
str r0, [r3] @ this task now owns Concan regs
sub r2, r2, #4 @ adjust pc back
str r2, [sp, #60]
mrc p15, 0, r2, c2, c0, 0
mov r2, r2 @ cpwait
bl concan_save
#ifdef CONFIG_PREEMPT_COUNT
get_thread_info r10
#endif
4: dec_preempt_count r10, r3
ret r9 @ normal exit from exception
concan_save:
teq r1, #0 @ test for last ownership
beq concan_load @ no owner, skip save
tmrc r2, wCon
@ CUP? wCx
tst r2, #0x1
beq 1f
concan_dump:
wstrw wCSSF, r1, MMX_WCSSF
wstrw wCASF, r1, MMX_WCASF
wstrw wCGR0, r1, MMX_WCGR0
wstrw wCGR1, r1, MMX_WCGR1
wstrw wCGR2, r1, MMX_WCGR2
wstrw wCGR3, r1, MMX_WCGR3
1: @ MUP? wRn
tst r2, #0x2
beq 2f
wstrd wR0, r1, MMX_WR0
wstrd wR1, r1, MMX_WR1
wstrd wR2, r1, MMX_WR2
wstrd wR3, r1, MMX_WR3
wstrd wR4, r1, MMX_WR4
wstrd wR5, r1, MMX_WR5
wstrd wR6, r1, MMX_WR6
wstrd wR7, r1, MMX_WR7
wstrd wR8, r1, MMX_WR8
wstrd wR9, r1, MMX_WR9
wstrd wR10, r1, MMX_WR10
wstrd wR11, r1, MMX_WR11
wstrd wR12, r1, MMX_WR12
wstrd wR13, r1, MMX_WR13
wstrd wR14, r1, MMX_WR14
wstrd wR15, r1, MMX_WR15
2: teq r0, #0 @ anything to load?
reteq lr @ if not, return
concan_load:
@ Load wRn
wldrd wR0, r0, MMX_WR0
wldrd wR1, r0, MMX_WR1
wldrd wR2, r0, MMX_WR2
wldrd wR3, r0, MMX_WR3
wldrd wR4, r0, MMX_WR4
wldrd wR5, r0, MMX_WR5
wldrd wR6, r0, MMX_WR6
wldrd wR7, r0, MMX_WR7
wldrd wR8, r0, MMX_WR8
wldrd wR9, r0, MMX_WR9
wldrd wR10, r0, MMX_WR10
wldrd wR11, r0, MMX_WR11
wldrd wR12, r0, MMX_WR12
wldrd wR13, r0, MMX_WR13
wldrd wR14, r0, MMX_WR14
wldrd wR15, r0, MMX_WR15
@ Load wCx
wldrw wCSSF, r0, MMX_WCSSF
wldrw wCASF, r0, MMX_WCASF
wldrw wCGR0, r0, MMX_WCGR0
wldrw wCGR1, r0, MMX_WCGR1
wldrw wCGR2, r0, MMX_WCGR2
wldrw wCGR3, r0, MMX_WCGR3
@ clear CUP/MUP (only if r1 != 0)
teq r1, #0
mov r2, #0
reteq lr
tmcr wCon, r2
ret lr
ENDPROC(iwmmxt_task_enable)
/*
* Back up Concan regs to save area and disable access to them
* (mainly for gdb or sleep mode usage)
*
* r0 = struct thread_info pointer of target task or NULL for any
*/
ENTRY(iwmmxt_task_disable)
stmfd sp!, {r4, lr}
mrs ip, cpsr
orr r2, ip, #PSR_I_BIT @ disable interrupts
msr cpsr_c, r2
ldr r3, =concan_owner
add r2, r0, #TI_IWMMXT_STATE @ get task Concan save area
ldr r1, [r3] @ get current Concan owner
teq r1, #0 @ any current owner?
beq 1f @ no: quit
teq r0, #0 @ any owner?
teqne r1, r2 @ or specified one?
bne 1f @ no: quit
@ enable access to CP0 and CP1
XSC(mrc p15, 0, r4, c15, c1, 0)
XSC(orr r4, r4, #0x3)
XSC(mcr p15, 0, r4, c15, c1, 0)
PJ4(mrc p15, 0, r4, c1, c0, 2)
PJ4(orr r4, r4, #0xf)
PJ4(mcr p15, 0, r4, c1, c0, 2)
mov r0, #0 @ nothing to load
str r0, [r3] @ no more current owner
mrc p15, 0, r2, c2, c0, 0
mov r2, r2 @ cpwait
bl concan_save
@ disable access to CP0 and CP1
XSC(bic r4, r4, #0x3)
XSC(mcr p15, 0, r4, c15, c1, 0)
PJ4(bic r4, r4, #0xf)
PJ4(mcr p15, 0, r4, c1, c0, 2)
mrc p15, 0, r2, c2, c0, 0
mov r2, r2 @ cpwait
1: msr cpsr_c, ip @ restore interrupt mode
ldmfd sp!, {r4, pc}
ENDPROC(iwmmxt_task_disable)
/*
* Copy Concan state to given memory address
*
* r0 = struct thread_info pointer of target task
* r1 = memory address where to store Concan state
*
* this is called mainly in the creation of signal stack frames
*/
ENTRY(iwmmxt_task_copy)
mrs ip, cpsr
orr r2, ip, #PSR_I_BIT @ disable interrupts
msr cpsr_c, r2
ldr r3, =concan_owner
add r2, r0, #TI_IWMMXT_STATE @ get task Concan save area
ldr r3, [r3] @ get current Concan owner
teq r2, r3 @ does this task own it...
beq 1f
@ current Concan values are in the task save area
msr cpsr_c, ip @ restore interrupt mode
mov r0, r1
mov r1, r2
mov r2, #MMX_SIZE
b memcpy
1: @ this task owns Concan regs -- grab a copy from there
mov r0, #0 @ nothing to load
mov r2, #3 @ save all regs
mov r3, lr @ preserve return address
bl concan_dump
msr cpsr_c, ip @ restore interrupt mode
ret r3
ENDPROC(iwmmxt_task_copy)
/*
* Restore Concan state from given memory address
*
* r0 = struct thread_info pointer of target task
* r1 = memory address where to get Concan state from
*
* this is used to restore Concan state when unwinding a signal stack frame
*/
ENTRY(iwmmxt_task_restore)
mrs ip, cpsr
orr r2, ip, #PSR_I_BIT @ disable interrupts
msr cpsr_c, r2
ldr r3, =concan_owner
add r2, r0, #TI_IWMMXT_STATE @ get task Concan save area
ldr r3, [r3] @ get current Concan owner
bic r2, r2, #0x7 @ 64-bit alignment
teq r2, r3 @ does this task own it...
beq 1f
@ this task doesn't own Concan regs -- use its save area
msr cpsr_c, ip @ restore interrupt mode
mov r0, r2
mov r2, #MMX_SIZE
b memcpy
1: @ this task owns Concan regs -- load them directly
mov r0, r1
mov r1, #0 @ don't clear CUP/MUP
mov r3, lr @ preserve return address
bl concan_load
msr cpsr_c, ip @ restore interrupt mode
ret r3
ENDPROC(iwmmxt_task_restore)
/*
* Concan handling on task switch
*
* r0 = next thread_info pointer
*
* Called only from the iwmmxt notifier with task preemption disabled.
*/
ENTRY(iwmmxt_task_switch)
XSC(mrc p15, 0, r1, c15, c1, 0)
PJ4(mrc p15, 0, r1, c1, c0, 2)
@ CP0 and CP1 accessible?
XSC(tst r1, #0x3)
PJ4(tst r1, #0xf)
bne 1f @ yes: block them for next task
ldr r2, =concan_owner
add r3, r0, #TI_IWMMXT_STATE @ get next task Concan save area
ldr r2, [r2] @ get current Concan owner
teq r2, r3 @ next task owns it?
retne lr @ no: leave Concan disabled
1: @ flip Concan access
XSC(eor r1, r1, #0x3)
XSC(mcr p15, 0, r1, c15, c1, 0)
PJ4(eor r1, r1, #0xf)
PJ4(mcr p15, 0, r1, c1, c0, 2)
mrc p15, 0, r1, c2, c0, 0
sub pc, lr, r1, lsr #32 @ cpwait and return
ENDPROC(iwmmxt_task_switch)
/*
* Remove Concan ownership of given task
*
* r0 = struct thread_info pointer
*/
ENTRY(iwmmxt_task_release)
mrs r2, cpsr
orr ip, r2, #PSR_I_BIT @ disable interrupts
msr cpsr_c, ip
ldr r3, =concan_owner
add r0, r0, #TI_IWMMXT_STATE @ get task Concan save area
ldr r1, [r3] @ get current Concan owner
eors r0, r0, r1 @ if equal...
streq r0, [r3] @ then clear ownership
msr cpsr_c, r2 @ restore interrupts
ret lr
ENDPROC(iwmmxt_task_release)
.data
.align 2
concan_owner:
.word 0
|
aixcc-public/challenge-001-exemplar-source
| 3,750
|
arch/arm/kernel/entry-v7m.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/arch/arm/kernel/entry-v7m.S
*
* Copyright (C) 2008 ARM Ltd.
*
* Low-level vector interface routines for the ARMv7-M architecture
*/
#include <asm/memory.h>
#include <asm/glue.h>
#include <asm/thread_notify.h>
#include <asm/v7m.h>
#include "entry-header.S"
#ifdef CONFIG_TRACE_IRQFLAGS
#error "CONFIG_TRACE_IRQFLAGS not supported on the current ARMv7M implementation"
#endif
__invalid_entry:
v7m_exception_entry
#ifdef CONFIG_PRINTK
adr r0, strerr
mrs r1, ipsr
mov r2, lr
bl _printk
#endif
mov r0, sp
bl show_regs
1: b 1b
ENDPROC(__invalid_entry)
strerr: .asciz "\nUnhandled exception: IPSR = %08lx LR = %08lx\n"
.align 2
__irq_entry:
v7m_exception_entry
@
@ Invoke the IRQ handler
@
mov r0, sp
ldr_this_cpu sp, irq_stack_ptr, r1, r2
@
@ If we took the interrupt while running in the kernel, we may already
@ be using the IRQ stack, so revert to the original value in that case.
@
subs r2, sp, r0 @ SP above bottom of IRQ stack?
rsbscs r2, r2, #THREAD_SIZE @ ... and below the top?
movcs sp, r0
push {r0, lr} @ preserve LR and original SP
@ routine called with r0 = struct pt_regs *
bl generic_handle_arch_irq
pop {r0, lr}
mov sp, r0
@
@ Check for any pending work if returning to user
@
ldr r1, =BASEADDR_V7M_SCB
ldr r0, [r1, V7M_SCB_ICSR]
tst r0, V7M_SCB_ICSR_RETTOBASE
beq 2f
get_thread_info tsk
ldr r2, [tsk, #TI_FLAGS]
movs r2, r2, lsl #16
beq 2f @ no work pending
mov r0, #V7M_SCB_ICSR_PENDSVSET
str r0, [r1, V7M_SCB_ICSR] @ raise PendSV
2:
@ registers r0-r3 and r12 are automatically restored on exception
@ return. r4-r7 were not clobbered in v7m_exception_entry so for
@ correctness they don't need to be restored. So only r8-r11 must be
@ restored here. The easiest way to do so is to restore r0-r7, too.
ldmia sp!, {r0-r11}
add sp, #PT_REGS_SIZE-S_IP
cpsie i
bx lr
ENDPROC(__irq_entry)
__pendsv_entry:
v7m_exception_entry
ldr r1, =BASEADDR_V7M_SCB
mov r0, #V7M_SCB_ICSR_PENDSVCLR
str r0, [r1, V7M_SCB_ICSR] @ clear PendSV
@ execute the pending work, including reschedule
get_thread_info tsk
mov why, #0
b ret_to_user_from_irq
ENDPROC(__pendsv_entry)
/*
* Register switch for ARMv7-M processors.
* r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
* previous and next are guaranteed not to be the same.
*/
ENTRY(__switch_to)
.fnstart
.cantunwind
add ip, r1, #TI_CPU_SAVE
stmia ip!, {r4 - r11} @ Store most regs on stack
str sp, [ip], #4
str lr, [ip], #4
mov r5, r0
mov r6, r2 @ Preserve 'next'
add r4, r2, #TI_CPU_SAVE
ldr r0, =thread_notify_head
mov r1, #THREAD_NOTIFY_SWITCH
bl atomic_notifier_call_chain
mov r0, r5
mov r1, r6
ldmia r4, {r4 - r12, lr} @ Load all regs saved previously
set_current r1, r2
mov sp, ip
bx lr
.fnend
ENDPROC(__switch_to)
.data
#if CONFIG_CPU_V7M_NUM_IRQ <= 112
.align 9
#else
.align 10
#endif
/*
* Vector table (Natural alignment need to be ensured)
*/
ENTRY(vector_table)
.long 0 @ 0 - Reset stack pointer
.long __invalid_entry @ 1 - Reset
.long __invalid_entry @ 2 - NMI
.long __invalid_entry @ 3 - HardFault
.long __invalid_entry @ 4 - MemManage
.long __invalid_entry @ 5 - BusFault
.long __invalid_entry @ 6 - UsageFault
.long __invalid_entry @ 7 - Reserved
.long __invalid_entry @ 8 - Reserved
.long __invalid_entry @ 9 - Reserved
.long __invalid_entry @ 10 - Reserved
.long vector_swi @ 11 - SVCall
.long __invalid_entry @ 12 - Debug Monitor
.long __invalid_entry @ 13 - Reserved
.long __pendsv_entry @ 14 - PendSV
.long __invalid_entry @ 15 - SysTick
.rept CONFIG_CPU_V7M_NUM_IRQ
.long __irq_entry @ External Interrupts
.endr
.align 2
.globl exc_ret
exc_ret:
.space 4
|
aixcc-public/challenge-001-exemplar-source
| 3,511
|
arch/arm/kernel/sigreturn_codes.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* sigreturn_codes.S - code sinpets for sigreturn syscalls
*
* Created by: Victor Kamensky, 2013-08-13
* Copyright: (C) 2013 Linaro Limited
*/
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
/*
* For ARM syscalls, we encode the syscall number into the instruction.
* With EABI, the syscall number has to be loaded into r7. As result
* ARM syscall sequence snippet will have move and svc in .arm encoding
*
* For Thumb syscalls, we pass the syscall number via r7. We therefore
* need two 16-bit instructions in .thumb encoding
*
* Please note sigreturn_codes code are not executed in place. Instead
* they just copied by kernel into appropriate places. Code inside of
* arch/arm/kernel/signal.c is very sensitive to layout of these code
* snippets.
*/
/*
* In CPU_THUMBONLY case kernel arm opcodes are not allowed.
* Note in this case codes skips those instructions but it uses .org
* directive to keep correct layout of sigreturn_codes array.
*/
#ifndef CONFIG_CPU_THUMBONLY
#define ARM_OK(code...) code
#else
#define ARM_OK(code...)
#endif
.macro arm_slot n
.org sigreturn_codes + 12 * (\n)
ARM_OK( .arm )
.endm
.macro thumb_slot n
.org sigreturn_codes + 12 * (\n) + 8
.thumb
.endm
.macro arm_fdpic_slot n
.org sigreturn_codes + 24 + 20 * (\n)
ARM_OK( .arm )
.endm
.macro thumb_fdpic_slot n
.org sigreturn_codes + 24 + 20 * (\n) + 12
.thumb
.endm
#if __LINUX_ARM_ARCH__ <= 4
/*
* Note we manually set minimally required arch that supports
* required thumb opcodes for early arch versions. It is OK
* for this file to be used in combination with other
* lower arch variants, since these code snippets are only
* used as input data.
*/
.arch armv4t
#endif
.section .rodata
.global sigreturn_codes
.type sigreturn_codes, #object
.align
sigreturn_codes:
/* ARM sigreturn syscall code snippet */
arm_slot 0
ARM_OK( mov r7, #(__NR_sigreturn - __NR_SYSCALL_BASE) )
ARM_OK( swi #(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE) )
/* Thumb sigreturn syscall code snippet */
thumb_slot 0
movs r7, #(__NR_sigreturn - __NR_SYSCALL_BASE)
swi #0
/* ARM sigreturn_rt syscall code snippet */
arm_slot 1
ARM_OK( mov r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE) )
ARM_OK( swi #(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE) )
/* Thumb sigreturn_rt syscall code snippet */
thumb_slot 1
movs r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE)
swi #0
/* ARM sigreturn restorer FDPIC bounce code snippet */
arm_fdpic_slot 0
ARM_OK( ldr r3, [sp, #SIGFRAME_RC3_OFFSET] )
ARM_OK( ldmia r3, {r3, r9} )
#ifdef CONFIG_ARM_THUMB
ARM_OK( bx r3 )
#else
ARM_OK( ret r3 )
#endif
/* Thumb sigreturn restorer FDPIC bounce code snippet */
thumb_fdpic_slot 0
ldr r3, [sp, #SIGFRAME_RC3_OFFSET]
ldmia r3, {r2, r3}
mov r9, r3
bx r2
/* ARM sigreturn_rt restorer FDPIC bounce code snippet */
arm_fdpic_slot 1
ARM_OK( ldr r3, [sp, #RT_SIGFRAME_RC3_OFFSET] )
ARM_OK( ldmia r3, {r3, r9} )
#ifdef CONFIG_ARM_THUMB
ARM_OK( bx r3 )
#else
ARM_OK( ret r3 )
#endif
/* Thumb sigreturn_rt restorer FDPIC bounce code snippet */
thumb_fdpic_slot 1
ldr r3, [sp, #RT_SIGFRAME_RC3_OFFSET]
ldmia r3, {r2, r3}
mov r9, r3
bx r2
/*
* Note on additional space: setup_return in signal.c
* always copies the same number of words regardless whether
* it is thumb case or not, so we need one additional padding
* word after the last entry.
*/
.space 4
.size sigreturn_codes, . - sigreturn_codes
|
aixcc-public/challenge-001-exemplar-source
| 1,482
|
arch/arm/kernel/smccc-call.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015, Linaro Limited
*/
#include <linux/linkage.h>
#include <linux/arm-smccc.h>
#include <asm/asm-offsets.h>
#include <asm/opcodes-sec.h>
#include <asm/opcodes-virt.h>
#include <asm/unwind.h>
/*
* Wrap c macros in asm macros to delay expansion until after the
* SMCCC asm macro is expanded.
*/
.macro SMCCC_SMC
__SMC(0)
.endm
.macro SMCCC_HVC
__HVC(0)
.endm
.macro SMCCC instr
UNWIND( .fnstart)
mov r12, sp
push {r4-r7}
UNWIND( .save {r4-r7})
ldm r12, {r4-r7}
\instr
ldr r4, [sp, #36]
cmp r4, #0
beq 1f // No quirk structure
ldr r5, [r4, #ARM_SMCCC_QUIRK_ID_OFFS]
cmp r5, #ARM_SMCCC_QUIRK_QCOM_A6
bne 1f // No quirk present
str r6, [r4, #ARM_SMCCC_QUIRK_STATE_OFFS]
1: pop {r4-r7}
ldr r12, [sp, #(4 * 4)]
stm r12, {r0-r3}
bx lr
UNWIND( .fnend)
.endm
/*
* void smccc_smc(unsigned long a0, unsigned long a1, unsigned long a2,
* unsigned long a3, unsigned long a4, unsigned long a5,
* unsigned long a6, unsigned long a7, struct arm_smccc_res *res,
* struct arm_smccc_quirk *quirk)
*/
ENTRY(__arm_smccc_smc)
SMCCC SMCCC_SMC
ENDPROC(__arm_smccc_smc)
/*
* void smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2,
* unsigned long a3, unsigned long a4, unsigned long a5,
* unsigned long a6, unsigned long a7, struct arm_smccc_res *res,
* struct arm_smccc_quirk *quirk)
*/
ENTRY(__arm_smccc_hvc)
SMCCC SMCCC_HVC
ENDPROC(__arm_smccc_hvc)
|
aixcc-public/challenge-001-exemplar-source
| 1,250
|
arch/arm/kernel/fiqasm.S
|
/*
* linux/arch/arm/kernel/fiqasm.S
*
* Derived from code originally in linux/arch/arm/kernel/fiq.c:
*
* Copyright (C) 1998 Russell King
* Copyright (C) 1998, 1999 Phil Blundell
* Copyright (C) 2011, Linaro Limited
*
* FIQ support written by Philip Blundell <philb@gnu.org>, 1998.
*
* FIQ support re-written by Russell King to be more generic
*
* v7/Thumb-2 compatibility modifications by Linaro Limited, 2011.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
/*
* Taking an interrupt in FIQ mode is death, so both these functions
* disable irqs for the duration.
*/
ENTRY(__set_fiq_regs)
mov r2, #PSR_I_BIT | PSR_F_BIT | FIQ_MODE
mrs r1, cpsr
msr cpsr_c, r2 @ select FIQ mode
mov r0, r0 @ avoid hazard prior to ARMv4
ldmia r0!, {r8 - r12}
ldr sp, [r0], #4
ldr lr, [r0]
msr cpsr_c, r1 @ return to SVC mode
mov r0, r0 @ avoid hazard prior to ARMv4
ret lr
ENDPROC(__set_fiq_regs)
ENTRY(__get_fiq_regs)
mov r2, #PSR_I_BIT | PSR_F_BIT | FIQ_MODE
mrs r1, cpsr
msr cpsr_c, r2 @ select FIQ mode
mov r0, r0 @ avoid hazard prior to ARMv4
stmia r0!, {r8 - r12}
str sp, [r0], #4
str lr, [r0]
msr cpsr_c, r1 @ return to SVC mode
mov r0, r0 @ avoid hazard prior to ARMv4
ret lr
ENDPROC(__get_fiq_regs)
|
aixcc-public/challenge-001-exemplar-source
| 11,725
|
arch/arm/kernel/entry-common.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/arch/arm/kernel/entry-common.S
*
* Copyright (C) 2000 Russell King
*/
#include <asm/assembler.h>
#include <asm/unistd.h>
#include <asm/ftrace.h>
#include <asm/unwind.h>
#include <asm/memory.h>
#ifdef CONFIG_AEABI
#include <asm/unistd-oabi.h>
#endif
.equ NR_syscalls, __NR_syscalls
.macro arch_ret_to_user, tmp
#ifdef CONFIG_ARCH_IOP32X
mrc p15, 0, \tmp, c15, c1, 0
tst \tmp, #(1 << 6)
bicne \tmp, \tmp, #(1 << 6)
mcrne p15, 0, \tmp, c15, c1, 0 @ Disable cp6 access
#endif
.endm
#include "entry-header.S"
saved_psr .req r8
#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING_USER)
saved_pc .req r9
#define TRACE(x...) x
#else
saved_pc .req lr
#define TRACE(x...)
#endif
.section .entry.text,"ax",%progbits
.align 5
#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING_USER) || \
IS_ENABLED(CONFIG_DEBUG_RSEQ))
/*
* This is the fast syscall return path. We do as little as possible here,
* such as avoiding writing r0 to the stack. We only use this path if we
* have tracing, context tracking and rseq debug disabled - the overheads
* from those features make this path too inefficient.
*/
ret_fast_syscall:
__ret_fast_syscall:
UNWIND(.fnstart )
UNWIND(.cantunwind )
disable_irq_notrace @ disable interrupts
ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
movs r1, r1, lsl #16
bne fast_work_pending
/* perform architecture specific actions before user return */
arch_ret_to_user r1
restore_user_regs fast = 1, offset = S_OFF
UNWIND(.fnend )
ENDPROC(ret_fast_syscall)
/* Ok, we need to do extra processing, enter the slow path. */
fast_work_pending:
str r0, [sp, #S_R0+S_OFF]! @ returned r0
/* fall through to work_pending */
#else
/*
* The "replacement" ret_fast_syscall for when tracing, context tracking,
* or rseq debug is enabled. As we will need to call out to some C functions,
* we save r0 first to avoid needing to save registers around each C function
* call.
*/
ret_fast_syscall:
__ret_fast_syscall:
UNWIND(.fnstart )
UNWIND(.cantunwind )
str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
#if IS_ENABLED(CONFIG_DEBUG_RSEQ)
/* do_rseq_syscall needs interrupts enabled. */
mov r0, sp @ 'regs'
bl do_rseq_syscall
#endif
disable_irq_notrace @ disable interrupts
ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
movs r1, r1, lsl #16
beq no_work_pending
UNWIND(.fnend )
ENDPROC(ret_fast_syscall)
/* Slower path - fall through to work_pending */
#endif
tst r1, #_TIF_SYSCALL_WORK
bne __sys_trace_return_nosave
slow_work_pending:
mov r0, sp @ 'regs'
mov r2, why @ 'syscall'
bl do_work_pending
cmp r0, #0
beq no_work_pending
movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
str scno, [tsk, #TI_ABI_SYSCALL] @ make sure tracers see update
ldmia sp, {r0 - r6} @ have to reload r0 - r6
b local_restart @ ... and off we go
ENDPROC(ret_fast_syscall)
/*
* "slow" syscall return path. "why" tells us if this was a real syscall.
* IRQs may be enabled here, so always disable them. Note that we use the
* "notrace" version to avoid calling into the tracing code unnecessarily.
* do_work_pending() will update this state if necessary.
*/
ENTRY(ret_to_user)
ret_slow_syscall:
#if IS_ENABLED(CONFIG_DEBUG_RSEQ)
/* do_rseq_syscall needs interrupts enabled. */
enable_irq_notrace @ enable interrupts
mov r0, sp @ 'regs'
bl do_rseq_syscall
#endif
disable_irq_notrace @ disable interrupts
ENTRY(ret_to_user_from_irq)
ldr r1, [tsk, #TI_FLAGS]
movs r1, r1, lsl #16
bne slow_work_pending
no_work_pending:
asm_trace_hardirqs_on save = 0
/* perform architecture specific actions before user return */
arch_ret_to_user r1
ct_user_enter save = 0
restore_user_regs fast = 0, offset = 0
ENDPROC(ret_to_user_from_irq)
ENDPROC(ret_to_user)
/*
* This is how we return from a fork.
*/
ENTRY(ret_from_fork)
bl schedule_tail
cmp r5, #0
movne r0, r4
badrne lr, 1f
retne r5
1: get_thread_info tsk
b ret_slow_syscall
ENDPROC(ret_from_fork)
/*=============================================================================
* SWI handler
*-----------------------------------------------------------------------------
*/
.align 5
#ifdef CONFIG_HARDEN_BRANCH_HISTORY
ENTRY(vector_bhb_loop8_swi)
sub sp, sp, #PT_REGS_SIZE
stmia sp, {r0 - r12}
mov r8, #8
1: b 2f
2: subs r8, r8, #1
bne 1b
dsb nsh
isb
b 3f
ENDPROC(vector_bhb_loop8_swi)
.align 5
ENTRY(vector_bhb_bpiall_swi)
sub sp, sp, #PT_REGS_SIZE
stmia sp, {r0 - r12}
mcr p15, 0, r8, c7, c5, 6 @ BPIALL
isb
b 3f
ENDPROC(vector_bhb_bpiall_swi)
#endif
.align 5
ENTRY(vector_swi)
#ifdef CONFIG_CPU_V7M
v7m_exception_entry
#else
sub sp, sp, #PT_REGS_SIZE
stmia sp, {r0 - r12} @ Calling r0 - r12
3:
ARM( add r8, sp, #S_PC )
ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr
THUMB( mov r8, sp )
THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr
mrs saved_psr, spsr @ called from non-FIQ mode, so ok.
TRACE( mov saved_pc, lr )
str saved_pc, [sp, #S_PC] @ Save calling PC
str saved_psr, [sp, #S_PSR] @ Save CPSR
str r0, [sp, #S_OLD_R0] @ Save OLD_R0
#endif
reload_current r10, ip
zero_fp
alignment_trap r10, ip, cr_alignment
asm_trace_hardirqs_on save=0
enable_irq_notrace
ct_user_exit save=0
/*
* Get the system call number.
*/
#if defined(CONFIG_OABI_COMPAT)
/*
* If we have CONFIG_OABI_COMPAT then we need to look at the swi
* value to determine if it is an EABI or an old ABI call.
*/
#ifdef CONFIG_ARM_THUMB
tst saved_psr, #PSR_T_BIT
movne r10, #0 @ no thumb OABI emulation
USER( ldreq r10, [saved_pc, #-4] ) @ get SWI instruction
#else
USER( ldr r10, [saved_pc, #-4] ) @ get SWI instruction
#endif
ARM_BE8(rev r10, r10) @ little endian instruction
#elif defined(CONFIG_AEABI)
/*
* Pure EABI user space always put syscall number into scno (r7).
*/
#elif defined(CONFIG_ARM_THUMB)
/* Legacy ABI only, possibly thumb mode. */
tst saved_psr, #PSR_T_BIT @ this is SPSR from save_user_regs
addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in
USER( ldreq scno, [saved_pc, #-4] )
#else
/* Legacy ABI only. */
USER( ldr scno, [saved_pc, #-4] ) @ get SWI instruction
#endif
/* saved_psr and saved_pc are now dead */
uaccess_disable tbl
get_thread_info tsk
adr tbl, sys_call_table @ load syscall table pointer
#if defined(CONFIG_OABI_COMPAT)
/*
* If the swi argument is zero, this is an EABI call and we do nothing.
*
* If this is an old ABI call, get the syscall number into scno and
* get the old ABI syscall table address.
*/
bics r10, r10, #0xff000000
strne r10, [tsk, #TI_ABI_SYSCALL]
streq scno, [tsk, #TI_ABI_SYSCALL]
eorne scno, r10, #__NR_OABI_SYSCALL_BASE
ldrne tbl, =sys_oabi_call_table
#elif !defined(CONFIG_AEABI)
bic scno, scno, #0xff000000 @ mask off SWI op-code
str scno, [tsk, #TI_ABI_SYSCALL]
eor scno, scno, #__NR_SYSCALL_BASE @ check OS number
#else
str scno, [tsk, #TI_ABI_SYSCALL]
#endif
/*
* Reload the registers that may have been corrupted on entry to
* the syscall assembly (by tracing or context tracking.)
*/
TRACE( ldmia sp, {r0 - r3} )
local_restart:
ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing
stmdb sp!, {r4, r5} @ push fifth and sixth args
tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
bne __sys_trace
invoke_syscall tbl, scno, r10, __ret_fast_syscall
add r1, sp, #S_OFF
2: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back
bcs arm_syscall
mov why, #0 @ no longer a real syscall
b sys_ni_syscall @ not private func
#if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI)
/*
* We failed to handle a fault trying to access the page
* containing the swi instruction, but we're not really in a
* position to return -EFAULT. Instead, return back to the
* instruction and re-enter the user fault handling path trying
* to page it in. This will likely result in sending SEGV to the
* current task.
*/
9001:
sub lr, saved_pc, #4
str lr, [sp, #S_PC]
get_thread_info tsk
b ret_fast_syscall
#endif
ENDPROC(vector_swi)
.ltorg
/*
* This is the really slow path. We're going to be doing
* context switches, and waiting for our parent to respond.
*/
__sys_trace:
add r0, sp, #S_OFF
bl syscall_trace_enter
mov scno, r0
invoke_syscall tbl, scno, r10, __sys_trace_return, reload=1
cmp scno, #-1 @ skip the syscall?
bne 2b
add sp, sp, #S_OFF @ restore stack
__sys_trace_return_nosave:
enable_irq_notrace
mov r0, sp
bl syscall_trace_exit
b ret_slow_syscall
__sys_trace_return:
str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
mov r0, sp
bl syscall_trace_exit
b ret_slow_syscall
.macro syscall_table_start, sym
.equ __sys_nr, 0
.type \sym, #object
ENTRY(\sym)
.endm
.macro syscall, nr, func
.ifgt __sys_nr - \nr
.error "Duplicated/unorded system call entry"
.endif
.rept \nr - __sys_nr
.long sys_ni_syscall
.endr
.long \func
.equ __sys_nr, \nr + 1
.endm
.macro syscall_table_end, sym
.ifgt __sys_nr - __NR_syscalls
.error "System call table too big"
.endif
.rept __NR_syscalls - __sys_nr
.long sys_ni_syscall
.endr
.size \sym, . - \sym
.endm
#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
#define __SYSCALL(nr, func) syscall nr, func
/*
* This is the syscall table declaration for native ABI syscalls.
* With EABI a couple syscalls are obsolete and defined as sys_ni_syscall.
*/
syscall_table_start sys_call_table
#ifdef CONFIG_AEABI
#include <calls-eabi.S>
#else
#include <calls-oabi.S>
#endif
syscall_table_end sys_call_table
/*============================================================================
* Special system call wrappers
*/
@ r0 = syscall number
@ r8 = syscall table
sys_syscall:
bic scno, r0, #__NR_OABI_SYSCALL_BASE
cmp scno, #__NR_syscall - __NR_SYSCALL_BASE
cmpne scno, #NR_syscalls @ check range
#ifdef CONFIG_CPU_SPECTRE
movhs scno, #0
csdb
#endif
stmialo sp, {r5, r6} @ shuffle args
movlo r0, r1
movlo r1, r2
movlo r2, r3
movlo r3, r4
ldrlo pc, [tbl, scno, lsl #2]
b sys_ni_syscall
ENDPROC(sys_syscall)
sys_sigreturn_wrapper:
add r0, sp, #S_OFF
mov why, #0 @ prevent syscall restart handling
b sys_sigreturn
ENDPROC(sys_sigreturn_wrapper)
sys_rt_sigreturn_wrapper:
add r0, sp, #S_OFF
mov why, #0 @ prevent syscall restart handling
b sys_rt_sigreturn
ENDPROC(sys_rt_sigreturn_wrapper)
sys_statfs64_wrapper:
teq r1, #88
moveq r1, #84
b sys_statfs64
ENDPROC(sys_statfs64_wrapper)
sys_fstatfs64_wrapper:
teq r1, #88
moveq r1, #84
b sys_fstatfs64
ENDPROC(sys_fstatfs64_wrapper)
/*
* Note: off_4k (r5) is always units of 4K. If we can't do the requested
* offset, we return EINVAL.
*/
sys_mmap2:
str r5, [sp, #4]
b sys_mmap_pgoff
ENDPROC(sys_mmap2)
#ifdef CONFIG_OABI_COMPAT
/*
* These are syscalls with argument register differences
*/
sys_oabi_pread64:
stmia sp, {r3, r4}
b sys_pread64
ENDPROC(sys_oabi_pread64)
sys_oabi_pwrite64:
stmia sp, {r3, r4}
b sys_pwrite64
ENDPROC(sys_oabi_pwrite64)
sys_oabi_truncate64:
mov r3, r2
mov r2, r1
b sys_truncate64
ENDPROC(sys_oabi_truncate64)
sys_oabi_ftruncate64:
mov r3, r2
mov r2, r1
b sys_ftruncate64
ENDPROC(sys_oabi_ftruncate64)
sys_oabi_readahead:
str r3, [sp]
mov r3, r2
mov r2, r1
b sys_readahead
ENDPROC(sys_oabi_readahead)
/*
* Let's declare a second syscall table for old ABI binaries
* using the compatibility syscall entries.
*/
syscall_table_start sys_oabi_call_table
#undef __SYSCALL_WITH_COMPAT
#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat)
#include <calls-oabi.S>
syscall_table_end sys_oabi_call_table
#endif
|
aixcc-public/challenge-001-exemplar-source
| 3,513
|
arch/arm/kernel/vmlinux.lds.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* ld script to make ARM Linux kernel
* taken from the i386 version by Russell King
* Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
*/
#ifdef CONFIG_XIP_KERNEL
#include "vmlinux-xip.lds.S"
#else
#include <linux/pgtable.h>
#include <asm/vmlinux.lds.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
#include <asm/memory.h>
#include <asm/mpu.h>
#include <asm/page.h>
OUTPUT_ARCH(arm)
ENTRY(stext)
#ifndef __ARMEB__
jiffies = jiffies_64;
#else
jiffies = jiffies_64 + 4;
#endif
SECTIONS
{
/*
* XXX: The linker does not define how output sections are
* assigned to input sections when there are multiple statements
* matching the same input section name. There is no documented
* order of matching.
*
* unwind exit sections must be discarded before the rest of the
* unwind sections get included.
*/
/DISCARD/ : {
ARM_DISCARD
#ifndef CONFIG_SMP_ON_UP
*(.alt.smp.init)
#endif
#ifndef CONFIG_ARM_UNWIND
*(.ARM.exidx) *(.ARM.exidx.*)
*(.ARM.extab) *(.ARM.extab.*)
#endif
}
. = KERNEL_OFFSET + TEXT_OFFSET;
.head.text : {
_text = .;
HEAD_TEXT
}
#ifdef CONFIG_STRICT_KERNEL_RWX
. = ALIGN(1<<SECTION_SHIFT);
#endif
#ifdef CONFIG_ARM_MPU
. = ALIGN(PMSAv8_MINALIGN);
#endif
.text : { /* Real text segment */
_stext = .; /* Text and read-only data */
ARM_TEXT
}
#ifdef CONFIG_DEBUG_ALIGN_RODATA
. = ALIGN(1<<SECTION_SHIFT);
#endif
_etext = .; /* End of text section */
RO_DATA(PAGE_SIZE)
. = ALIGN(4);
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
__start___ex_table = .;
ARM_MMU_KEEP(*(__ex_table))
__stop___ex_table = .;
}
#ifdef CONFIG_ARM_UNWIND
ARM_UNWIND_SECTIONS
#endif
#ifdef CONFIG_STRICT_KERNEL_RWX
. = ALIGN(1<<SECTION_SHIFT);
#else
. = ALIGN(PAGE_SIZE);
#endif
__init_begin = .;
ARM_VECTORS
INIT_TEXT_SECTION(8)
.exit.text : {
ARM_EXIT_KEEP(EXIT_TEXT)
}
.init.proc.info : {
ARM_CPU_DISCARD(PROC_INFO)
}
.init.arch.info : {
__arch_info_begin = .;
*(.arch.info.init)
__arch_info_end = .;
}
.init.tagtable : {
__tagtable_begin = .;
*(.taglist.init)
__tagtable_end = .;
}
#ifdef CONFIG_SMP_ON_UP
.init.smpalt : {
__smpalt_begin = .;
*(.alt.smp.init)
__smpalt_end = .;
}
#endif
.init.pv_table : {
__pv_table_begin = .;
*(.pv_table)
__pv_table_end = .;
}
INIT_DATA_SECTION(16)
.exit.data : {
ARM_EXIT_KEEP(EXIT_DATA)
}
#ifdef CONFIG_SMP
PERCPU_SECTION(L1_CACHE_BYTES)
#endif
#ifdef CONFIG_HAVE_TCM
ARM_TCM
#endif
#ifdef CONFIG_STRICT_KERNEL_RWX
. = ALIGN(1<<SECTION_SHIFT);
#else
. = ALIGN(THREAD_ALIGN);
#endif
__init_end = .;
_sdata = .;
RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_ALIGN)
_edata = .;
BSS_SECTION(0, 0, 0)
#ifdef CONFIG_ARM_MPU
. = ALIGN(PMSAv8_MINALIGN);
#endif
_end = .;
STABS_DEBUG
DWARF_DEBUG
ARM_DETAILS
ARM_ASSERTS
}
#ifdef CONFIG_STRICT_KERNEL_RWX
/*
* Without CONFIG_DEBUG_ALIGN_RODATA, __start_rodata_section_aligned will
* be the first section-aligned location after __start_rodata. Otherwise,
* it will be equal to __start_rodata.
*/
__start_rodata_section_aligned = ALIGN(__start_rodata, 1 << SECTION_SHIFT);
#endif
/*
* These must never be empty
* If you have to comment these two assert statements out, your
* binutils is too old (for other reasons as well)
*/
ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
#ifndef CONFIG_COMPILE_TEST
ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
#endif
#endif /* CONFIG_XIP_KERNEL */
|
aixcc-public/challenge-001-exemplar-source
| 5,579
|
arch/arm/kernel/sleep.S
|
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
#include <linux/threads.h>
#include <asm/asm-offsets.h>
#include <asm/assembler.h>
#include <asm/glue-cache.h>
#include <asm/glue-proc.h>
.text
/*
* Implementation of MPIDR hash algorithm through shifting
* and OR'ing.
*
* @dst: register containing hash result
* @rs0: register containing affinity level 0 bit shift
* @rs1: register containing affinity level 1 bit shift
* @rs2: register containing affinity level 2 bit shift
* @mpidr: register containing MPIDR value
* @mask: register containing MPIDR mask
*
* Pseudo C-code:
*
*u32 dst;
*
*compute_mpidr_hash(u32 rs0, u32 rs1, u32 rs2, u32 mpidr, u32 mask) {
* u32 aff0, aff1, aff2;
* u32 mpidr_masked = mpidr & mask;
* aff0 = mpidr_masked & 0xff;
* aff1 = mpidr_masked & 0xff00;
* aff2 = mpidr_masked & 0xff0000;
* dst = (aff0 >> rs0 | aff1 >> rs1 | aff2 >> rs2);
*}
* Input registers: rs0, rs1, rs2, mpidr, mask
* Output register: dst
* Note: input and output registers must be disjoint register sets
(eg: a macro instance with mpidr = r1 and dst = r1 is invalid)
*/
.macro compute_mpidr_hash dst, rs0, rs1, rs2, mpidr, mask
and \mpidr, \mpidr, \mask @ mask out MPIDR bits
and \dst, \mpidr, #0xff @ mask=aff0
ARM( mov \dst, \dst, lsr \rs0 ) @ dst=aff0>>rs0
THUMB( lsr \dst, \dst, \rs0 )
and \mask, \mpidr, #0xff00 @ mask = aff1
ARM( orr \dst, \dst, \mask, lsr \rs1 ) @ dst|=(aff1>>rs1)
THUMB( lsr \mask, \mask, \rs1 )
THUMB( orr \dst, \dst, \mask )
and \mask, \mpidr, #0xff0000 @ mask = aff2
ARM( orr \dst, \dst, \mask, lsr \rs2 ) @ dst|=(aff2>>rs2)
THUMB( lsr \mask, \mask, \rs2 )
THUMB( orr \dst, \dst, \mask )
.endm
/*
* Save CPU state for a suspend. This saves the CPU general purpose
* registers, and allocates space on the kernel stack to save the CPU
* specific registers and some other data for resume.
* r0 = suspend function arg0
* r1 = suspend function
* r2 = MPIDR value the resuming CPU will use
*/
ENTRY(__cpu_suspend)
stmfd sp!, {r4 - r11, lr}
#ifdef MULTI_CPU
ldr r10, =processor
ldr r4, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state
#else
ldr r4, =cpu_suspend_size
#endif
mov r5, sp @ current virtual SP
#ifdef CONFIG_VMAP_STACK
@ Run the suspend code from the overflow stack so we don't have to rely
@ on vmalloc-to-phys conversions anywhere in the arch suspend code.
@ The original SP value captured in R5 will be restored on the way out.
ldr_this_cpu sp, overflow_stack_ptr, r6, r7
#endif
add r4, r4, #12 @ Space for pgd, virt sp, phys resume fn
sub sp, sp, r4 @ allocate CPU state on stack
ldr r3, =sleep_save_sp
stmfd sp!, {r0, r1} @ save suspend func arg and pointer
ldr r3, [r3, #SLEEP_SAVE_SP_VIRT]
ALT_SMP(W(nop)) @ don't use adr_l inside ALT_SMP()
ALT_UP_B(1f)
adr_l r0, mpidr_hash
/* This ldmia relies on the memory layout of the mpidr_hash struct */
ldmia r0, {r1, r6-r8} @ r1 = mpidr mask (r6,r7,r8) = l[0,1,2] shifts
compute_mpidr_hash r0, r6, r7, r8, r2, r1
add r3, r3, r0, lsl #2
1: mov r2, r5 @ virtual SP
mov r1, r4 @ size of save block
add r0, sp, #8 @ pointer to save block
bl __cpu_suspend_save
badr lr, cpu_suspend_abort
ldmfd sp!, {r0, pc} @ call suspend fn
ENDPROC(__cpu_suspend)
.ltorg
cpu_suspend_abort:
ldmia sp!, {r1 - r3} @ pop phys pgd, virt SP, phys resume fn
teq r0, #0
moveq r0, #1 @ force non-zero value
mov sp, r2
ldmfd sp!, {r4 - r11, pc}
ENDPROC(cpu_suspend_abort)
/*
* r0 = control register value
*/
.align 5
.pushsection .idmap.text,"ax"
ENTRY(cpu_resume_mmu)
ldr r3, =cpu_resume_after_mmu
instr_sync
mcr p15, 0, r0, c1, c0, 0 @ turn on MMU, I-cache, etc
mrc p15, 0, r0, c0, c0, 0 @ read id reg
instr_sync
mov r0, r0
mov r0, r0
ret r3 @ jump to virtual address
ENDPROC(cpu_resume_mmu)
.popsection
cpu_resume_after_mmu:
#if defined(CONFIG_VMAP_STACK) && !defined(CONFIG_ARM_LPAE)
@ Before using the vmap'ed stack, we have to switch to swapper_pg_dir
@ as the ID map does not cover the vmalloc region.
mrc p15, 0, ip, c2, c0, 1 @ read TTBR1
mcr p15, 0, ip, c2, c0, 0 @ set TTBR0
instr_sync
#endif
bl cpu_init @ restore the und/abt/irq banked regs
mov r0, #0 @ return zero on success
ldmfd sp!, {r4 - r11, pc}
ENDPROC(cpu_resume_after_mmu)
.text
.align
#ifdef CONFIG_MCPM
.arm
THUMB( .thumb )
ENTRY(cpu_resume_no_hyp)
ARM_BE8(setend be) @ ensure we are in BE mode
b no_hyp
#endif
#ifdef CONFIG_MMU
.arm
ENTRY(cpu_resume_arm)
THUMB( badr r9, 1f ) @ Kernel is entered in ARM.
THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
THUMB( .thumb ) @ switch to Thumb now.
THUMB(1: )
#endif
ENTRY(cpu_resume)
ARM_BE8(setend be) @ ensure we are in BE mode
#ifdef CONFIG_ARM_VIRT_EXT
bl __hyp_stub_install_secondary
#endif
safe_svcmode_maskall r1
no_hyp:
mov r1, #0
ALT_SMP(mrc p15, 0, r0, c0, c0, 5)
ALT_UP_B(1f)
adr_l r2, mpidr_hash @ r2 = struct mpidr_hash phys address
/*
* This ldmia relies on the memory layout of the mpidr_hash
* struct mpidr_hash.
*/
ldmia r2, { r3-r6 } @ r3 = mpidr mask (r4,r5,r6) = l[0,1,2] shifts
compute_mpidr_hash r1, r4, r5, r6, r0, r3
1:
ldr_l r0, sleep_save_sp + SLEEP_SAVE_SP_PHYS
ldr r0, [r0, r1, lsl #2]
@ load phys pgd, stack, resume fn
ARM( ldmia r0!, {r1, sp, pc} )
THUMB( ldmia r0!, {r1, r2, r3} )
THUMB( mov sp, r2 )
THUMB( bx r3 )
ENDPROC(cpu_resume)
#ifdef CONFIG_MMU
ENDPROC(cpu_resume_arm)
#endif
#ifdef CONFIG_MCPM
ENDPROC(cpu_resume_no_hyp)
#endif
.data
.align 2
.type sleep_save_sp, #object
ENTRY(sleep_save_sp)
.space SLEEP_SAVE_SP_SZ @ struct sleep_save_sp
|
aixcc-public/challenge-001-exemplar-source
| 5,923
|
arch/arm/kernel/hyp-stub.S
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (c) 2012 Linaro Limited.
*/
#include <linux/init.h>
#include <linux/irqchip/arm-gic-v3.h>
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/virt.h>
#ifndef ZIMAGE
/*
* For the kernel proper, we need to find out the CPU boot mode long after
* boot, so we need to store it in a writable variable.
*
* This is not in .bss, because we set it sufficiently early that the boot-time
* zeroing of .bss would clobber it.
*/
.data
.align 2
ENTRY(__boot_cpu_mode)
.long 0
.text
/*
* Save the primary CPU boot mode. Requires 2 scratch registers.
*/
.macro store_primary_cpu_mode reg1, reg2
mrs \reg1, cpsr
and \reg1, \reg1, #MODE_MASK
str_l \reg1, __boot_cpu_mode, \reg2
.endm
/*
* Compare the current mode with the one saved on the primary CPU.
* If they don't match, record that fact. The Z bit indicates
* if there's a match or not.
* Requires 2 additional scratch registers.
*/
.macro compare_cpu_mode_with_primary mode, reg1, reg2
adr_l \reg2, __boot_cpu_mode
ldr \reg1, [\reg2]
cmp \mode, \reg1 @ matches primary CPU boot mode?
orrne \reg1, \reg1, #BOOT_CPU_MODE_MISMATCH
strne \reg1, [\reg2] @ record what happened and give up
.endm
#else /* ZIMAGE */
.macro store_primary_cpu_mode reg1:req, reg2:req
.endm
/*
* The zImage loader only runs on one CPU, so we don't bother with mult-CPU
* consistency checking:
*/
.macro compare_cpu_mode_with_primary mode, reg1, reg2
cmp \mode, \mode
.endm
#endif /* ZIMAGE */
/*
* Hypervisor stub installation functions.
*
* These must be called with the MMU and D-cache off.
* They are not ABI compliant and are only intended to be called from the kernel
* entry points in head.S.
*/
@ Call this from the primary CPU
ENTRY(__hyp_stub_install)
store_primary_cpu_mode r4, r5
ENDPROC(__hyp_stub_install)
@ fall through...
@ Secondary CPUs should call here
ENTRY(__hyp_stub_install_secondary)
mrs r4, cpsr
and r4, r4, #MODE_MASK
/*
* If the secondary has booted with a different mode, give up
* immediately.
*/
compare_cpu_mode_with_primary r4, r5, r6
retne lr
/*
* Once we have given up on one CPU, we do not try to install the
* stub hypervisor on the remaining ones: because the saved boot mode
* is modified, it can't compare equal to the CPSR mode field any
* more.
*
* Otherwise...
*/
cmp r4, #HYP_MODE
retne lr @ give up if the CPU is not in HYP mode
/*
* Configure HSCTLR to set correct exception endianness/instruction set
* state etc.
* Turn off all traps
* Eventually, CPU-specific code might be needed -- assume not for now
*
* This code relies on the "eret" instruction to synchronize the
* various coprocessor accesses. This is done when we switch to SVC
* (see safe_svcmode_maskall).
*/
@ Now install the hypervisor stub:
W(adr) r7, __hyp_stub_vectors
mcr p15, 4, r7, c12, c0, 0 @ set hypervisor vector base (HVBAR)
@ Disable all traps, so we don't get any nasty surprise
mov r7, #0
mcr p15, 4, r7, c1, c1, 0 @ HCR
mcr p15, 4, r7, c1, c1, 2 @ HCPTR
mcr p15, 4, r7, c1, c1, 3 @ HSTR
THUMB( orr r7, #(1 << 30) ) @ HSCTLR.TE
ARM_BE8(orr r7, r7, #(1 << 25)) @ HSCTLR.EE
mcr p15, 4, r7, c1, c0, 0 @ HSCTLR
mrc p15, 4, r7, c1, c1, 1 @ HDCR
and r7, #0x1f @ Preserve HPMN
mcr p15, 4, r7, c1, c1, 1 @ HDCR
@ Make sure NS-SVC is initialised appropriately
mrc p15, 0, r7, c1, c0, 0 @ SCTLR
orr r7, #(1 << 5) @ CP15 barriers enabled
bic r7, #(3 << 7) @ Clear SED/ITD for v8 (RES0 for v7)
bic r7, #(3 << 19) @ WXN and UWXN disabled
mcr p15, 0, r7, c1, c0, 0 @ SCTLR
mrc p15, 0, r7, c0, c0, 0 @ MIDR
mcr p15, 4, r7, c0, c0, 0 @ VPIDR
mrc p15, 0, r7, c0, c0, 5 @ MPIDR
mcr p15, 4, r7, c0, c0, 5 @ VMPIDR
#if !defined(ZIMAGE) && defined(CONFIG_ARM_ARCH_TIMER)
@ make CNTP_* and CNTPCT accessible from PL1
mrc p15, 0, r7, c0, c1, 1 @ ID_PFR1
ubfx r7, r7, #16, #4
teq r7, #0
beq 1f
mrc p15, 4, r7, c14, c1, 0 @ CNTHCTL
orr r7, r7, #3 @ PL1PCEN | PL1PCTEN
mcr p15, 4, r7, c14, c1, 0 @ CNTHCTL
mov r7, #0
mcrr p15, 4, r7, r7, c14 @ CNTVOFF
@ Disable virtual timer in case it was counting
mrc p15, 0, r7, c14, c3, 1 @ CNTV_CTL
bic r7, #1 @ Clear ENABLE
mcr p15, 0, r7, c14, c3, 1 @ CNTV_CTL
1:
#endif
#ifdef CONFIG_ARM_GIC_V3
@ Check whether GICv3 system registers are available
mrc p15, 0, r7, c0, c1, 1 @ ID_PFR1
ubfx r7, r7, #28, #4
teq r7, #0
beq 2f
@ Enable system register accesses
mrc p15, 4, r7, c12, c9, 5 @ ICC_HSRE
orr r7, r7, #(ICC_SRE_EL2_ENABLE | ICC_SRE_EL2_SRE)
mcr p15, 4, r7, c12, c9, 5 @ ICC_HSRE
isb
@ SRE bit could be forced to 0 by firmware.
@ Check whether it sticks before accessing any other sysreg
mrc p15, 4, r7, c12, c9, 5 @ ICC_HSRE
tst r7, #ICC_SRE_EL2_SRE
beq 2f
mov r7, #0
mcr p15, 4, r7, c12, c11, 0 @ ICH_HCR
2:
#endif
bx lr @ The boot CPU mode is left in r4.
ENDPROC(__hyp_stub_install_secondary)
__hyp_stub_do_trap:
#ifdef ZIMAGE
teq r0, #HVC_SET_VECTORS
bne 1f
/* Only the ZIMAGE stubs can change the HYP vectors */
mcr p15, 4, r1, c12, c0, 0 @ set HVBAR
b __hyp_stub_exit
#endif
1: teq r0, #HVC_SOFT_RESTART
bne 2f
bx r1
2: ldr r0, =HVC_STUB_ERR
__ERET
__hyp_stub_exit:
mov r0, #0
__ERET
ENDPROC(__hyp_stub_do_trap)
/*
* __hyp_set_vectors is only used when ZIMAGE must bounce between HYP
* and SVC. For the kernel itself, the vectors are set once and for
* all by the stubs.
*/
ENTRY(__hyp_set_vectors)
mov r1, r0
mov r0, #HVC_SET_VECTORS
__HVC(0)
ret lr
ENDPROC(__hyp_set_vectors)
ENTRY(__hyp_soft_restart)
mov r1, r0
mov r0, #HVC_SOFT_RESTART
__HVC(0)
ret lr
ENDPROC(__hyp_soft_restart)
.align 5
ENTRY(__hyp_stub_vectors)
__hyp_stub_reset: W(b) .
__hyp_stub_und: W(b) .
__hyp_stub_svc: W(b) .
__hyp_stub_pabort: W(b) .
__hyp_stub_dabort: W(b) .
__hyp_stub_trap: W(b) __hyp_stub_do_trap
__hyp_stub_irq: W(b) .
__hyp_stub_fiq: W(b) .
ENDPROC(__hyp_stub_vectors)
|
aixcc-public/challenge-001-exemplar-source
| 2,657
|
arch/arm/kernel/debug.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/arch/arm/kernel/debug.S
*
* Copyright (C) 1994-1999 Russell King
*
* 32-bit debugging code
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
.text
/*
* Some debugging routines (useful if you've got MM problems and
* printk isn't working). For DEBUGGING ONLY!!! Do not leave
* references to these in a production kernel!
*/
#if !defined(CONFIG_DEBUG_SEMIHOSTING)
#include CONFIG_DEBUG_LL_INCLUDE
#endif
#ifdef CONFIG_MMU
.macro addruart_current, rx, tmp1, tmp2
addruart \tmp1, \tmp2, \rx
mrc p15, 0, \rx, c1, c0
tst \rx, #1
moveq \rx, \tmp1
movne \rx, \tmp2
.endm
#else /* !CONFIG_MMU */
.macro addruart_current, rx, tmp1, tmp2
addruart \rx, \tmp1, \tmp2
.endm
#endif /* CONFIG_MMU */
/*
* Useful debugging routines
*/
ENTRY(printhex8)
mov r1, #8
b printhex
ENDPROC(printhex8)
ENTRY(printhex4)
mov r1, #4
b printhex
ENDPROC(printhex4)
ENTRY(printhex2)
mov r1, #2
printhex: adr r2, hexbuf_rel
ldr r3, [r2]
add r2, r2, r3
add r3, r2, r1
mov r1, #0
strb r1, [r3]
1: and r1, r0, #15
mov r0, r0, lsr #4
cmp r1, #10
addlt r1, r1, #'0'
addge r1, r1, #'a' - 10
strb r1, [r3, #-1]!
teq r3, r2
bne 1b
mov r0, r2
b printascii
ENDPROC(printhex2)
.pushsection .bss
hexbuf_addr: .space 16
.popsection
.align
hexbuf_rel: .long hexbuf_addr - .
.ltorg
#ifndef CONFIG_DEBUG_SEMIHOSTING
ENTRY(printascii)
addruart_current r3, r1, r2
1: teq r0, #0
ldrbne r1, [r0], #1
teqne r1, #0
reteq lr
2: teq r1, #'\n'
bne 3f
mov r1, #'\r'
#ifdef CONFIG_DEBUG_UART_FLOW_CONTROL
waituartcts r2, r3
#endif
waituarttxrdy r2, r3
senduart r1, r3
busyuart r2, r3
mov r1, #'\n'
3:
#ifdef CONFIG_DEBUG_UART_FLOW_CONTROL
waituartcts r2, r3
#endif
waituarttxrdy r2, r3
senduart r1, r3
busyuart r2, r3
b 1b
ENDPROC(printascii)
ENTRY(printch)
addruart_current r3, r1, r2
mov r1, r0
mov r0, #0
b 2b
ENDPROC(printch)
#ifdef CONFIG_MMU
ENTRY(debug_ll_addr)
addruart r2, r3, ip
str r2, [r0]
str r3, [r1]
ret lr
ENDPROC(debug_ll_addr)
#endif
#else
ENTRY(printascii)
mov r1, r0
mov r0, #0x04 @ SYS_WRITE0
ARM( svc #0x123456 )
#ifdef CONFIG_CPU_V7M
THUMB( bkpt #0xab )
#else
THUMB( svc #0xab )
#endif
ret lr
ENDPROC(printascii)
ENTRY(printch)
adr r1, hexbuf_rel
ldr r2, [r1]
add r1, r1, r2
strb r0, [r1]
mov r0, #0x03 @ SYS_WRITEC
ARM( svc #0x123456 )
#ifdef CONFIG_CPU_V7M
THUMB( bkpt #0xab )
#else
THUMB( svc #0xab )
#endif
ret lr
ENDPROC(printch)
ENTRY(debug_ll_addr)
mov r2, #0
str r2, [r0]
str r2, [r1]
ret lr
ENDPROC(debug_ll_addr)
#endif
|
aixcc-public/challenge-001-exemplar-source
| 20,508
|
arch/arm/crypto/sha1-armv7-neon.S
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* sha1-armv7-neon.S - ARM/NEON accelerated SHA-1 transform function
*
* Copyright © 2013-2014 Jussi Kivilinna <jussi.kivilinna@iki.fi>
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
.syntax unified
.fpu neon
.text
/* Context structure */
#define state_h0 0
#define state_h1 4
#define state_h2 8
#define state_h3 12
#define state_h4 16
/* Constants */
#define K1 0x5A827999
#define K2 0x6ED9EBA1
#define K3 0x8F1BBCDC
#define K4 0xCA62C1D6
.align 4
.LK_VEC:
.LK1: .long K1, K1, K1, K1
.LK2: .long K2, K2, K2, K2
.LK3: .long K3, K3, K3, K3
.LK4: .long K4, K4, K4, K4
/* Register macros */
#define RSTATE r0
#define RDATA r1
#define RNBLKS r2
#define ROLDSTACK r3
#define RWK lr
#define _a r4
#define _b r5
#define _c r6
#define _d r7
#define _e r8
#define RT0 r9
#define RT1 r10
#define RT2 r11
#define RT3 r12
#define W0 q0
#define W1 q7
#define W2 q2
#define W3 q3
#define W4 q4
#define W5 q6
#define W6 q5
#define W7 q1
#define tmp0 q8
#define tmp1 q9
#define tmp2 q10
#define tmp3 q11
#define qK1 q12
#define qK2 q13
#define qK3 q14
#define qK4 q15
#ifdef CONFIG_CPU_BIG_ENDIAN
#define ARM_LE(code...)
#else
#define ARM_LE(code...) code
#endif
/* Round function macros. */
#define WK_offs(i) (((i) & 15) * 4)
#define _R_F1(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
ldr RT3, [sp, WK_offs(i)]; \
pre1(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
bic RT0, d, b; \
add e, e, a, ror #(32 - 5); \
and RT1, c, b; \
pre2(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
add RT0, RT0, RT3; \
add e, e, RT1; \
ror b, #(32 - 30); \
pre3(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
add e, e, RT0;
#define _R_F2(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
ldr RT3, [sp, WK_offs(i)]; \
pre1(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
eor RT0, d, b; \
add e, e, a, ror #(32 - 5); \
eor RT0, RT0, c; \
pre2(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
add e, e, RT3; \
ror b, #(32 - 30); \
pre3(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
add e, e, RT0; \
#define _R_F3(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
ldr RT3, [sp, WK_offs(i)]; \
pre1(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
eor RT0, b, c; \
and RT1, b, c; \
add e, e, a, ror #(32 - 5); \
pre2(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
and RT0, RT0, d; \
add RT1, RT1, RT3; \
add e, e, RT0; \
ror b, #(32 - 30); \
pre3(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
add e, e, RT1;
#define _R_F4(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
_R_F2(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28)
#define _R(a,b,c,d,e,f,i,pre1,pre2,pre3,i16,\
W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
_R_##f(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28)
#define R(a,b,c,d,e,f,i) \
_R_##f(a,b,c,d,e,i,dummy,dummy,dummy,i16,\
W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28)
#define dummy(...)
/* Input expansion macros. */
/********* Precalc macros for rounds 0-15 *************************************/
#define W_PRECALC_00_15() \
add RWK, sp, #(WK_offs(0)); \
\
vld1.32 {W0, W7}, [RDATA]!; \
ARM_LE(vrev32.8 W0, W0; ) /* big => little */ \
vld1.32 {W6, W5}, [RDATA]!; \
vadd.u32 tmp0, W0, curK; \
ARM_LE(vrev32.8 W7, W7; ) /* big => little */ \
ARM_LE(vrev32.8 W6, W6; ) /* big => little */ \
vadd.u32 tmp1, W7, curK; \
ARM_LE(vrev32.8 W5, W5; ) /* big => little */ \
vadd.u32 tmp2, W6, curK; \
vst1.32 {tmp0, tmp1}, [RWK]!; \
vadd.u32 tmp3, W5, curK; \
vst1.32 {tmp2, tmp3}, [RWK]; \
#define WPRECALC_00_15_0(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vld1.32 {W0, W7}, [RDATA]!; \
#define WPRECALC_00_15_1(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
add RWK, sp, #(WK_offs(0)); \
#define WPRECALC_00_15_2(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
ARM_LE(vrev32.8 W0, W0; ) /* big => little */ \
#define WPRECALC_00_15_3(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vld1.32 {W6, W5}, [RDATA]!; \
#define WPRECALC_00_15_4(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vadd.u32 tmp0, W0, curK; \
#define WPRECALC_00_15_5(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
ARM_LE(vrev32.8 W7, W7; ) /* big => little */ \
#define WPRECALC_00_15_6(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
ARM_LE(vrev32.8 W6, W6; ) /* big => little */ \
#define WPRECALC_00_15_7(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vadd.u32 tmp1, W7, curK; \
#define WPRECALC_00_15_8(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
ARM_LE(vrev32.8 W5, W5; ) /* big => little */ \
#define WPRECALC_00_15_9(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vadd.u32 tmp2, W6, curK; \
#define WPRECALC_00_15_10(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vst1.32 {tmp0, tmp1}, [RWK]!; \
#define WPRECALC_00_15_11(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vadd.u32 tmp3, W5, curK; \
#define WPRECALC_00_15_12(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vst1.32 {tmp2, tmp3}, [RWK]; \
/********* Precalc macros for rounds 16-31 ************************************/
#define WPRECALC_16_31_0(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
veor tmp0, tmp0; \
vext.8 W, W_m16, W_m12, #8; \
#define WPRECALC_16_31_1(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
add RWK, sp, #(WK_offs(i)); \
vext.8 tmp0, W_m04, tmp0, #4; \
#define WPRECALC_16_31_2(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
veor tmp0, tmp0, W_m16; \
veor.32 W, W, W_m08; \
#define WPRECALC_16_31_3(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
veor tmp1, tmp1; \
veor W, W, tmp0; \
#define WPRECALC_16_31_4(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vshl.u32 tmp0, W, #1; \
#define WPRECALC_16_31_5(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vext.8 tmp1, tmp1, W, #(16-12); \
vshr.u32 W, W, #31; \
#define WPRECALC_16_31_6(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vorr tmp0, tmp0, W; \
vshr.u32 W, tmp1, #30; \
#define WPRECALC_16_31_7(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vshl.u32 tmp1, tmp1, #2; \
#define WPRECALC_16_31_8(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
veor tmp0, tmp0, W; \
#define WPRECALC_16_31_9(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
veor W, tmp0, tmp1; \
#define WPRECALC_16_31_10(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vadd.u32 tmp0, W, curK; \
#define WPRECALC_16_31_11(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vst1.32 {tmp0}, [RWK];
/********* Precalc macros for rounds 32-79 ************************************/
#define WPRECALC_32_79_0(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
veor W, W_m28; \
#define WPRECALC_32_79_1(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vext.8 tmp0, W_m08, W_m04, #8; \
#define WPRECALC_32_79_2(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
veor W, W_m16; \
#define WPRECALC_32_79_3(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
veor W, tmp0; \
#define WPRECALC_32_79_4(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
add RWK, sp, #(WK_offs(i&~3)); \
#define WPRECALC_32_79_5(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vshl.u32 tmp1, W, #2; \
#define WPRECALC_32_79_6(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vshr.u32 tmp0, W, #30; \
#define WPRECALC_32_79_7(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vorr W, tmp0, tmp1; \
#define WPRECALC_32_79_8(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vadd.u32 tmp0, W, curK; \
#define WPRECALC_32_79_9(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vst1.32 {tmp0}, [RWK];
/*
* Transform nblks*64 bytes (nblks*16 32-bit words) at DATA.
*
* unsigned int
* sha1_transform_neon (void *ctx, const unsigned char *data,
* unsigned int nblks)
*/
.align 3
ENTRY(sha1_transform_neon)
/* input:
* r0: ctx, CTX
* r1: data (64*nblks bytes)
* r2: nblks
*/
cmp RNBLKS, #0;
beq .Ldo_nothing;
push {r4-r12, lr};
/*vpush {q4-q7};*/
adr RT3, .LK_VEC;
mov ROLDSTACK, sp;
/* Align stack. */
sub RT0, sp, #(16*4);
and RT0, #(~(16-1));
mov sp, RT0;
vld1.32 {qK1-qK2}, [RT3]!; /* Load K1,K2 */
/* Get the values of the chaining variables. */
ldm RSTATE, {_a-_e};
vld1.32 {qK3-qK4}, [RT3]; /* Load K3,K4 */
#undef curK
#define curK qK1
/* Precalc 0-15. */
W_PRECALC_00_15();
.Loop:
/* Transform 0-15 + Precalc 16-31. */
_R( _a, _b, _c, _d, _e, F1, 0,
WPRECALC_16_31_0, WPRECALC_16_31_1, WPRECALC_16_31_2, 16,
W4, W5, W6, W7, W0, _, _, _ );
_R( _e, _a, _b, _c, _d, F1, 1,
WPRECALC_16_31_3, WPRECALC_16_31_4, WPRECALC_16_31_5, 16,
W4, W5, W6, W7, W0, _, _, _ );
_R( _d, _e, _a, _b, _c, F1, 2,
WPRECALC_16_31_6, WPRECALC_16_31_7, WPRECALC_16_31_8, 16,
W4, W5, W6, W7, W0, _, _, _ );
_R( _c, _d, _e, _a, _b, F1, 3,
WPRECALC_16_31_9, WPRECALC_16_31_10,WPRECALC_16_31_11,16,
W4, W5, W6, W7, W0, _, _, _ );
#undef curK
#define curK qK2
_R( _b, _c, _d, _e, _a, F1, 4,
WPRECALC_16_31_0, WPRECALC_16_31_1, WPRECALC_16_31_2, 20,
W3, W4, W5, W6, W7, _, _, _ );
_R( _a, _b, _c, _d, _e, F1, 5,
WPRECALC_16_31_3, WPRECALC_16_31_4, WPRECALC_16_31_5, 20,
W3, W4, W5, W6, W7, _, _, _ );
_R( _e, _a, _b, _c, _d, F1, 6,
WPRECALC_16_31_6, WPRECALC_16_31_7, WPRECALC_16_31_8, 20,
W3, W4, W5, W6, W7, _, _, _ );
_R( _d, _e, _a, _b, _c, F1, 7,
WPRECALC_16_31_9, WPRECALC_16_31_10,WPRECALC_16_31_11,20,
W3, W4, W5, W6, W7, _, _, _ );
_R( _c, _d, _e, _a, _b, F1, 8,
WPRECALC_16_31_0, WPRECALC_16_31_1, WPRECALC_16_31_2, 24,
W2, W3, W4, W5, W6, _, _, _ );
_R( _b, _c, _d, _e, _a, F1, 9,
WPRECALC_16_31_3, WPRECALC_16_31_4, WPRECALC_16_31_5, 24,
W2, W3, W4, W5, W6, _, _, _ );
_R( _a, _b, _c, _d, _e, F1, 10,
WPRECALC_16_31_6, WPRECALC_16_31_7, WPRECALC_16_31_8, 24,
W2, W3, W4, W5, W6, _, _, _ );
_R( _e, _a, _b, _c, _d, F1, 11,
WPRECALC_16_31_9, WPRECALC_16_31_10,WPRECALC_16_31_11,24,
W2, W3, W4, W5, W6, _, _, _ );
_R( _d, _e, _a, _b, _c, F1, 12,
WPRECALC_16_31_0, WPRECALC_16_31_1, WPRECALC_16_31_2, 28,
W1, W2, W3, W4, W5, _, _, _ );
_R( _c, _d, _e, _a, _b, F1, 13,
WPRECALC_16_31_3, WPRECALC_16_31_4, WPRECALC_16_31_5, 28,
W1, W2, W3, W4, W5, _, _, _ );
_R( _b, _c, _d, _e, _a, F1, 14,
WPRECALC_16_31_6, WPRECALC_16_31_7, WPRECALC_16_31_8, 28,
W1, W2, W3, W4, W5, _, _, _ );
_R( _a, _b, _c, _d, _e, F1, 15,
WPRECALC_16_31_9, WPRECALC_16_31_10,WPRECALC_16_31_11,28,
W1, W2, W3, W4, W5, _, _, _ );
/* Transform 16-63 + Precalc 32-79. */
_R( _e, _a, _b, _c, _d, F1, 16,
WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 32,
W0, W1, W2, W3, W4, W5, W6, W7);
_R( _d, _e, _a, _b, _c, F1, 17,
WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 32,
W0, W1, W2, W3, W4, W5, W6, W7);
_R( _c, _d, _e, _a, _b, F1, 18,
WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 32,
W0, W1, W2, W3, W4, W5, W6, W7);
_R( _b, _c, _d, _e, _a, F1, 19,
WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 32,
W0, W1, W2, W3, W4, W5, W6, W7);
_R( _a, _b, _c, _d, _e, F2, 20,
WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 36,
W7, W0, W1, W2, W3, W4, W5, W6);
_R( _e, _a, _b, _c, _d, F2, 21,
WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 36,
W7, W0, W1, W2, W3, W4, W5, W6);
_R( _d, _e, _a, _b, _c, F2, 22,
WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 36,
W7, W0, W1, W2, W3, W4, W5, W6);
_R( _c, _d, _e, _a, _b, F2, 23,
WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 36,
W7, W0, W1, W2, W3, W4, W5, W6);
#undef curK
#define curK qK3
_R( _b, _c, _d, _e, _a, F2, 24,
WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 40,
W6, W7, W0, W1, W2, W3, W4, W5);
_R( _a, _b, _c, _d, _e, F2, 25,
WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 40,
W6, W7, W0, W1, W2, W3, W4, W5);
_R( _e, _a, _b, _c, _d, F2, 26,
WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 40,
W6, W7, W0, W1, W2, W3, W4, W5);
_R( _d, _e, _a, _b, _c, F2, 27,
WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 40,
W6, W7, W0, W1, W2, W3, W4, W5);
_R( _c, _d, _e, _a, _b, F2, 28,
WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 44,
W5, W6, W7, W0, W1, W2, W3, W4);
_R( _b, _c, _d, _e, _a, F2, 29,
WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 44,
W5, W6, W7, W0, W1, W2, W3, W4);
_R( _a, _b, _c, _d, _e, F2, 30,
WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 44,
W5, W6, W7, W0, W1, W2, W3, W4);
_R( _e, _a, _b, _c, _d, F2, 31,
WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 44,
W5, W6, W7, W0, W1, W2, W3, W4);
_R( _d, _e, _a, _b, _c, F2, 32,
WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 48,
W4, W5, W6, W7, W0, W1, W2, W3);
_R( _c, _d, _e, _a, _b, F2, 33,
WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 48,
W4, W5, W6, W7, W0, W1, W2, W3);
_R( _b, _c, _d, _e, _a, F2, 34,
WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 48,
W4, W5, W6, W7, W0, W1, W2, W3);
_R( _a, _b, _c, _d, _e, F2, 35,
WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 48,
W4, W5, W6, W7, W0, W1, W2, W3);
_R( _e, _a, _b, _c, _d, F2, 36,
WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 52,
W3, W4, W5, W6, W7, W0, W1, W2);
_R( _d, _e, _a, _b, _c, F2, 37,
WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 52,
W3, W4, W5, W6, W7, W0, W1, W2);
_R( _c, _d, _e, _a, _b, F2, 38,
WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 52,
W3, W4, W5, W6, W7, W0, W1, W2);
_R( _b, _c, _d, _e, _a, F2, 39,
WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 52,
W3, W4, W5, W6, W7, W0, W1, W2);
_R( _a, _b, _c, _d, _e, F3, 40,
WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 56,
W2, W3, W4, W5, W6, W7, W0, W1);
_R( _e, _a, _b, _c, _d, F3, 41,
WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 56,
W2, W3, W4, W5, W6, W7, W0, W1);
_R( _d, _e, _a, _b, _c, F3, 42,
WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 56,
W2, W3, W4, W5, W6, W7, W0, W1);
_R( _c, _d, _e, _a, _b, F3, 43,
WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 56,
W2, W3, W4, W5, W6, W7, W0, W1);
#undef curK
#define curK qK4
_R( _b, _c, _d, _e, _a, F3, 44,
WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 60,
W1, W2, W3, W4, W5, W6, W7, W0);
_R( _a, _b, _c, _d, _e, F3, 45,
WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 60,
W1, W2, W3, W4, W5, W6, W7, W0);
_R( _e, _a, _b, _c, _d, F3, 46,
WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 60,
W1, W2, W3, W4, W5, W6, W7, W0);
_R( _d, _e, _a, _b, _c, F3, 47,
WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 60,
W1, W2, W3, W4, W5, W6, W7, W0);
_R( _c, _d, _e, _a, _b, F3, 48,
WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 64,
W0, W1, W2, W3, W4, W5, W6, W7);
_R( _b, _c, _d, _e, _a, F3, 49,
WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 64,
W0, W1, W2, W3, W4, W5, W6, W7);
_R( _a, _b, _c, _d, _e, F3, 50,
WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 64,
W0, W1, W2, W3, W4, W5, W6, W7);
_R( _e, _a, _b, _c, _d, F3, 51,
WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 64,
W0, W1, W2, W3, W4, W5, W6, W7);
_R( _d, _e, _a, _b, _c, F3, 52,
WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 68,
W7, W0, W1, W2, W3, W4, W5, W6);
_R( _c, _d, _e, _a, _b, F3, 53,
WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 68,
W7, W0, W1, W2, W3, W4, W5, W6);
_R( _b, _c, _d, _e, _a, F3, 54,
WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 68,
W7, W0, W1, W2, W3, W4, W5, W6);
_R( _a, _b, _c, _d, _e, F3, 55,
WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 68,
W7, W0, W1, W2, W3, W4, W5, W6);
_R( _e, _a, _b, _c, _d, F3, 56,
WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 72,
W6, W7, W0, W1, W2, W3, W4, W5);
_R( _d, _e, _a, _b, _c, F3, 57,
WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 72,
W6, W7, W0, W1, W2, W3, W4, W5);
_R( _c, _d, _e, _a, _b, F3, 58,
WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 72,
W6, W7, W0, W1, W2, W3, W4, W5);
_R( _b, _c, _d, _e, _a, F3, 59,
WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 72,
W6, W7, W0, W1, W2, W3, W4, W5);
subs RNBLKS, #1;
_R( _a, _b, _c, _d, _e, F4, 60,
WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 76,
W5, W6, W7, W0, W1, W2, W3, W4);
_R( _e, _a, _b, _c, _d, F4, 61,
WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 76,
W5, W6, W7, W0, W1, W2, W3, W4);
_R( _d, _e, _a, _b, _c, F4, 62,
WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 76,
W5, W6, W7, W0, W1, W2, W3, W4);
_R( _c, _d, _e, _a, _b, F4, 63,
WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 76,
W5, W6, W7, W0, W1, W2, W3, W4);
beq .Lend;
/* Transform 64-79 + Precalc 0-15 of next block. */
#undef curK
#define curK qK1
_R( _b, _c, _d, _e, _a, F4, 64,
WPRECALC_00_15_0, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _a, _b, _c, _d, _e, F4, 65,
WPRECALC_00_15_1, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _e, _a, _b, _c, _d, F4, 66,
WPRECALC_00_15_2, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _d, _e, _a, _b, _c, F4, 67,
WPRECALC_00_15_3, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _c, _d, _e, _a, _b, F4, 68,
dummy, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _b, _c, _d, _e, _a, F4, 69,
dummy, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _a, _b, _c, _d, _e, F4, 70,
WPRECALC_00_15_4, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _e, _a, _b, _c, _d, F4, 71,
WPRECALC_00_15_5, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _d, _e, _a, _b, _c, F4, 72,
dummy, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _c, _d, _e, _a, _b, F4, 73,
dummy, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _b, _c, _d, _e, _a, F4, 74,
WPRECALC_00_15_6, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _a, _b, _c, _d, _e, F4, 75,
WPRECALC_00_15_7, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _e, _a, _b, _c, _d, F4, 76,
WPRECALC_00_15_8, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _d, _e, _a, _b, _c, F4, 77,
WPRECALC_00_15_9, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _c, _d, _e, _a, _b, F4, 78,
WPRECALC_00_15_10, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _b, _c, _d, _e, _a, F4, 79,
WPRECALC_00_15_11, dummy, WPRECALC_00_15_12, _, _, _, _, _, _, _, _, _ );
/* Update the chaining variables. */
ldm RSTATE, {RT0-RT3};
add _a, RT0;
ldr RT0, [RSTATE, #state_h4];
add _b, RT1;
add _c, RT2;
add _d, RT3;
add _e, RT0;
stm RSTATE, {_a-_e};
b .Loop;
.Lend:
/* Transform 64-79 */
R( _b, _c, _d, _e, _a, F4, 64 );
R( _a, _b, _c, _d, _e, F4, 65 );
R( _e, _a, _b, _c, _d, F4, 66 );
R( _d, _e, _a, _b, _c, F4, 67 );
R( _c, _d, _e, _a, _b, F4, 68 );
R( _b, _c, _d, _e, _a, F4, 69 );
R( _a, _b, _c, _d, _e, F4, 70 );
R( _e, _a, _b, _c, _d, F4, 71 );
R( _d, _e, _a, _b, _c, F4, 72 );
R( _c, _d, _e, _a, _b, F4, 73 );
R( _b, _c, _d, _e, _a, F4, 74 );
R( _a, _b, _c, _d, _e, F4, 75 );
R( _e, _a, _b, _c, _d, F4, 76 );
R( _d, _e, _a, _b, _c, F4, 77 );
R( _c, _d, _e, _a, _b, F4, 78 );
R( _b, _c, _d, _e, _a, F4, 79 );
mov sp, ROLDSTACK;
/* Update the chaining variables. */
ldm RSTATE, {RT0-RT3};
add _a, RT0;
ldr RT0, [RSTATE, #state_h4];
add _b, RT1;
add _c, RT2;
add _d, RT3;
/*vpop {q4-q7};*/
add _e, RT0;
stm RSTATE, {_a-_e};
pop {r4-r12, pc};
.Ldo_nothing:
bx lr
ENDPROC(sha1_transform_neon)
|
aixcc-public/challenge-001-exemplar-source
| 10,472
|
arch/arm/crypto/chacha-scalar-core.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2018 Google, Inc.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
/*
* Design notes:
*
* 16 registers would be needed to hold the state matrix, but only 14 are
* available because 'sp' and 'pc' cannot be used. So we spill the elements
* (x8, x9) to the stack and swap them out with (x10, x11). This adds one
* 'ldrd' and one 'strd' instruction per round.
*
* All rotates are performed using the implicit rotate operand accepted by the
* 'add' and 'eor' instructions. This is faster than using explicit rotate
* instructions. To make this work, we allow the values in the second and last
* rows of the ChaCha state matrix (rows 'b' and 'd') to temporarily have the
* wrong rotation amount. The rotation amount is then fixed up just in time
* when the values are used. 'brot' is the number of bits the values in row 'b'
* need to be rotated right to arrive at the correct values, and 'drot'
* similarly for row 'd'. (brot, drot) start out as (0, 0) but we make it such
* that they end up as (25, 24) after every round.
*/
// ChaCha state registers
X0 .req r0
X1 .req r1
X2 .req r2
X3 .req r3
X4 .req r4
X5 .req r5
X6 .req r6
X7 .req r7
X8_X10 .req r8 // shared by x8 and x10
X9_X11 .req r9 // shared by x9 and x11
X12 .req r10
X13 .req r11
X14 .req r12
X15 .req r14
.macro _le32_bswap_4x a, b, c, d, tmp
#ifdef __ARMEB__
rev_l \a, \tmp
rev_l \b, \tmp
rev_l \c, \tmp
rev_l \d, \tmp
#endif
.endm
.macro __ldrd a, b, src, offset
#if __LINUX_ARM_ARCH__ >= 6
ldrd \a, \b, [\src, #\offset]
#else
ldr \a, [\src, #\offset]
ldr \b, [\src, #\offset + 4]
#endif
.endm
.macro __strd a, b, dst, offset
#if __LINUX_ARM_ARCH__ >= 6
strd \a, \b, [\dst, #\offset]
#else
str \a, [\dst, #\offset]
str \b, [\dst, #\offset + 4]
#endif
.endm
.macro _halfround a1, b1, c1, d1, a2, b2, c2, d2
// a += b; d ^= a; d = rol(d, 16);
add \a1, \a1, \b1, ror #brot
add \a2, \a2, \b2, ror #brot
eor \d1, \a1, \d1, ror #drot
eor \d2, \a2, \d2, ror #drot
// drot == 32 - 16 == 16
// c += d; b ^= c; b = rol(b, 12);
add \c1, \c1, \d1, ror #16
add \c2, \c2, \d2, ror #16
eor \b1, \c1, \b1, ror #brot
eor \b2, \c2, \b2, ror #brot
// brot == 32 - 12 == 20
// a += b; d ^= a; d = rol(d, 8);
add \a1, \a1, \b1, ror #20
add \a2, \a2, \b2, ror #20
eor \d1, \a1, \d1, ror #16
eor \d2, \a2, \d2, ror #16
// drot == 32 - 8 == 24
// c += d; b ^= c; b = rol(b, 7);
add \c1, \c1, \d1, ror #24
add \c2, \c2, \d2, ror #24
eor \b1, \c1, \b1, ror #20
eor \b2, \c2, \b2, ror #20
// brot == 32 - 7 == 25
.endm
.macro _doubleround
// column round
// quarterrounds: (x0, x4, x8, x12) and (x1, x5, x9, x13)
_halfround X0, X4, X8_X10, X12, X1, X5, X9_X11, X13
// save (x8, x9); restore (x10, x11)
__strd X8_X10, X9_X11, sp, 0
__ldrd X8_X10, X9_X11, sp, 8
// quarterrounds: (x2, x6, x10, x14) and (x3, x7, x11, x15)
_halfround X2, X6, X8_X10, X14, X3, X7, X9_X11, X15
.set brot, 25
.set drot, 24
// diagonal round
// quarterrounds: (x0, x5, x10, x15) and (x1, x6, x11, x12)
_halfround X0, X5, X8_X10, X15, X1, X6, X9_X11, X12
// save (x10, x11); restore (x8, x9)
__strd X8_X10, X9_X11, sp, 8
__ldrd X8_X10, X9_X11, sp, 0
// quarterrounds: (x2, x7, x8, x13) and (x3, x4, x9, x14)
_halfround X2, X7, X8_X10, X13, X3, X4, X9_X11, X14
.endm
.macro _chacha_permute nrounds
.set brot, 0
.set drot, 0
.rept \nrounds / 2
_doubleround
.endr
.endm
.macro _chacha nrounds
.Lnext_block\@:
// Stack: unused0-unused1 x10-x11 x0-x15 OUT IN LEN
// Registers contain x0-x9,x12-x15.
// Do the core ChaCha permutation to update x0-x15.
_chacha_permute \nrounds
add sp, #8
// Stack: x10-x11 orig_x0-orig_x15 OUT IN LEN
// Registers contain x0-x9,x12-x15.
// x4-x7 are rotated by 'brot'; x12-x15 are rotated by 'drot'.
// Free up some registers (r8-r12,r14) by pushing (x8-x9,x12-x15).
push {X8_X10, X9_X11, X12, X13, X14, X15}
// Load (OUT, IN, LEN).
ldr r14, [sp, #96]
ldr r12, [sp, #100]
ldr r11, [sp, #104]
orr r10, r14, r12
// Use slow path if fewer than 64 bytes remain.
cmp r11, #64
blt .Lxor_slowpath\@
// Use slow path if IN and/or OUT isn't 4-byte aligned. Needed even on
// ARMv6+, since ldmia and stmia (used below) still require alignment.
tst r10, #3
bne .Lxor_slowpath\@
// Fast path: XOR 64 bytes of aligned data.
// Stack: x8-x9 x12-x15 x10-x11 orig_x0-orig_x15 OUT IN LEN
// Registers: r0-r7 are x0-x7; r8-r11 are free; r12 is IN; r14 is OUT.
// x4-x7 are rotated by 'brot'; x12-x15 are rotated by 'drot'.
// x0-x3
__ldrd r8, r9, sp, 32
__ldrd r10, r11, sp, 40
add X0, X0, r8
add X1, X1, r9
add X2, X2, r10
add X3, X3, r11
_le32_bswap_4x X0, X1, X2, X3, r8
ldmia r12!, {r8-r11}
eor X0, X0, r8
eor X1, X1, r9
eor X2, X2, r10
eor X3, X3, r11
stmia r14!, {X0-X3}
// x4-x7
__ldrd r8, r9, sp, 48
__ldrd r10, r11, sp, 56
add X4, r8, X4, ror #brot
add X5, r9, X5, ror #brot
ldmia r12!, {X0-X3}
add X6, r10, X6, ror #brot
add X7, r11, X7, ror #brot
_le32_bswap_4x X4, X5, X6, X7, r8
eor X4, X4, X0
eor X5, X5, X1
eor X6, X6, X2
eor X7, X7, X3
stmia r14!, {X4-X7}
// x8-x15
pop {r0-r7} // (x8-x9,x12-x15,x10-x11)
__ldrd r8, r9, sp, 32
__ldrd r10, r11, sp, 40
add r0, r0, r8 // x8
add r1, r1, r9 // x9
add r6, r6, r10 // x10
add r7, r7, r11 // x11
_le32_bswap_4x r0, r1, r6, r7, r8
ldmia r12!, {r8-r11}
eor r0, r0, r8 // x8
eor r1, r1, r9 // x9
eor r6, r6, r10 // x10
eor r7, r7, r11 // x11
stmia r14!, {r0,r1,r6,r7}
ldmia r12!, {r0,r1,r6,r7}
__ldrd r8, r9, sp, 48
__ldrd r10, r11, sp, 56
add r2, r8, r2, ror #drot // x12
add r3, r9, r3, ror #drot // x13
add r4, r10, r4, ror #drot // x14
add r5, r11, r5, ror #drot // x15
_le32_bswap_4x r2, r3, r4, r5, r9
ldr r9, [sp, #72] // load LEN
eor r2, r2, r0 // x12
eor r3, r3, r1 // x13
eor r4, r4, r6 // x14
eor r5, r5, r7 // x15
subs r9, #64 // decrement and check LEN
stmia r14!, {r2-r5}
beq .Ldone\@
.Lprepare_for_next_block\@:
// Stack: x0-x15 OUT IN LEN
// Increment block counter (x12)
add r8, #1
// Store updated (OUT, IN, LEN)
str r14, [sp, #64]
str r12, [sp, #68]
str r9, [sp, #72]
mov r14, sp
// Store updated block counter (x12)
str r8, [sp, #48]
sub sp, #16
// Reload state and do next block
ldmia r14!, {r0-r11} // load x0-x11
__strd r10, r11, sp, 8 // store x10-x11 before state
ldmia r14, {r10-r12,r14} // load x12-x15
b .Lnext_block\@
.Lxor_slowpath\@:
// Slow path: < 64 bytes remaining, or unaligned input or output buffer.
// We handle it by storing the 64 bytes of keystream to the stack, then
// XOR-ing the needed portion with the data.
// Allocate keystream buffer
sub sp, #64
mov r14, sp
// Stack: ks0-ks15 x8-x9 x12-x15 x10-x11 orig_x0-orig_x15 OUT IN LEN
// Registers: r0-r7 are x0-x7; r8-r11 are free; r12 is IN; r14 is &ks0.
// x4-x7 are rotated by 'brot'; x12-x15 are rotated by 'drot'.
// Save keystream for x0-x3
__ldrd r8, r9, sp, 96
__ldrd r10, r11, sp, 104
add X0, X0, r8
add X1, X1, r9
add X2, X2, r10
add X3, X3, r11
_le32_bswap_4x X0, X1, X2, X3, r8
stmia r14!, {X0-X3}
// Save keystream for x4-x7
__ldrd r8, r9, sp, 112
__ldrd r10, r11, sp, 120
add X4, r8, X4, ror #brot
add X5, r9, X5, ror #brot
add X6, r10, X6, ror #brot
add X7, r11, X7, ror #brot
_le32_bswap_4x X4, X5, X6, X7, r8
add r8, sp, #64
stmia r14!, {X4-X7}
// Save keystream for x8-x15
ldm r8, {r0-r7} // (x8-x9,x12-x15,x10-x11)
__ldrd r8, r9, sp, 128
__ldrd r10, r11, sp, 136
add r0, r0, r8 // x8
add r1, r1, r9 // x9
add r6, r6, r10 // x10
add r7, r7, r11 // x11
_le32_bswap_4x r0, r1, r6, r7, r8
stmia r14!, {r0,r1,r6,r7}
__ldrd r8, r9, sp, 144
__ldrd r10, r11, sp, 152
add r2, r8, r2, ror #drot // x12
add r3, r9, r3, ror #drot // x13
add r4, r10, r4, ror #drot // x14
add r5, r11, r5, ror #drot // x15
_le32_bswap_4x r2, r3, r4, r5, r9
stmia r14, {r2-r5}
// Stack: ks0-ks15 unused0-unused7 x0-x15 OUT IN LEN
// Registers: r8 is block counter, r12 is IN.
ldr r9, [sp, #168] // LEN
ldr r14, [sp, #160] // OUT
cmp r9, #64
mov r0, sp
movle r1, r9
movgt r1, #64
// r1 is number of bytes to XOR, in range [1, 64]
.if __LINUX_ARM_ARCH__ < 6
orr r2, r12, r14
tst r2, #3 // IN or OUT misaligned?
bne .Lxor_next_byte\@
.endif
// XOR a word at a time
.rept 16
subs r1, #4
blt .Lxor_words_done\@
ldr r2, [r12], #4
ldr r3, [r0], #4
eor r2, r2, r3
str r2, [r14], #4
.endr
b .Lxor_slowpath_done\@
.Lxor_words_done\@:
ands r1, r1, #3
beq .Lxor_slowpath_done\@
// XOR a byte at a time
.Lxor_next_byte\@:
ldrb r2, [r12], #1
ldrb r3, [r0], #1
eor r2, r2, r3
strb r2, [r14], #1
subs r1, #1
bne .Lxor_next_byte\@
.Lxor_slowpath_done\@:
subs r9, #64
add sp, #96
bgt .Lprepare_for_next_block\@
.Ldone\@:
.endm // _chacha
/*
* void chacha_doarm(u8 *dst, const u8 *src, unsigned int bytes,
* const u32 *state, int nrounds);
*/
ENTRY(chacha_doarm)
cmp r2, #0 // len == 0?
reteq lr
ldr ip, [sp]
cmp ip, #12
push {r0-r2,r4-r11,lr}
// Push state x0-x15 onto stack.
// Also store an extra copy of x10-x11 just before the state.
add X12, r3, #48
ldm X12, {X12,X13,X14,X15}
push {X12,X13,X14,X15}
sub sp, sp, #64
__ldrd X8_X10, X9_X11, r3, 40
__strd X8_X10, X9_X11, sp, 8
__strd X8_X10, X9_X11, sp, 56
ldm r3, {X0-X9_X11}
__strd X0, X1, sp, 16
__strd X2, X3, sp, 24
__strd X4, X5, sp, 32
__strd X6, X7, sp, 40
__strd X8_X10, X9_X11, sp, 48
beq 1f
_chacha 20
0: add sp, #76
pop {r4-r11, pc}
1: _chacha 12
b 0b
ENDPROC(chacha_doarm)
/*
* void hchacha_block_arm(const u32 state[16], u32 out[8], int nrounds);
*/
ENTRY(hchacha_block_arm)
push {r1,r4-r11,lr}
cmp r2, #12 // ChaCha12 ?
mov r14, r0
ldmia r14!, {r0-r11} // load x0-x11
push {r10-r11} // store x10-x11 to stack
ldm r14, {r10-r12,r14} // load x12-x15
sub sp, #8
beq 1f
_chacha_permute 20
// Skip over (unused0-unused1, x10-x11)
0: add sp, #16
// Fix up rotations of x12-x15
ror X12, X12, #drot
ror X13, X13, #drot
pop {r4} // load 'out'
ror X14, X14, #drot
ror X15, X15, #drot
// Store (x0-x3,x12-x15) to 'out'
stm r4, {X0,X1,X2,X3,X12,X13,X14,X15}
pop {r4-r11,pc}
1: _chacha_permute 12
b 0b
ENDPROC(hchacha_block_arm)
|
aixcc-public/challenge-001-exemplar-source
| 2,566
|
arch/arm/crypto/sha1-ce-core.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* sha1-ce-core.S - SHA-1 secure hash using ARMv8 Crypto Extensions
*
* Copyright (C) 2015 Linaro Ltd.
* Author: Ard Biesheuvel <ard.biesheuvel@linaro.org>
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
.text
.arch armv8-a
.fpu crypto-neon-fp-armv8
k0 .req q0
k1 .req q1
k2 .req q2
k3 .req q3
ta0 .req q4
ta1 .req q5
tb0 .req q5
tb1 .req q4
dga .req q6
dgb .req q7
dgbs .req s28
dg0 .req q12
dg1a0 .req q13
dg1a1 .req q14
dg1b0 .req q14
dg1b1 .req q13
.macro add_only, op, ev, rc, s0, dg1
.ifnb \s0
vadd.u32 tb\ev, q\s0, \rc
.endif
sha1h.32 dg1b\ev, dg0
.ifb \dg1
sha1\op\().32 dg0, dg1a\ev, ta\ev
.else
sha1\op\().32 dg0, \dg1, ta\ev
.endif
.endm
.macro add_update, op, ev, rc, s0, s1, s2, s3, dg1
sha1su0.32 q\s0, q\s1, q\s2
add_only \op, \ev, \rc, \s1, \dg1
sha1su1.32 q\s0, q\s3
.endm
.align 6
.Lsha1_rcon:
.word 0x5a827999, 0x5a827999, 0x5a827999, 0x5a827999
.word 0x6ed9eba1, 0x6ed9eba1, 0x6ed9eba1, 0x6ed9eba1
.word 0x8f1bbcdc, 0x8f1bbcdc, 0x8f1bbcdc, 0x8f1bbcdc
.word 0xca62c1d6, 0xca62c1d6, 0xca62c1d6, 0xca62c1d6
/*
* void sha1_ce_transform(struct sha1_state *sst, u8 const *src,
* int blocks);
*/
ENTRY(sha1_ce_transform)
/* load round constants */
adr ip, .Lsha1_rcon
vld1.32 {k0-k1}, [ip, :128]!
vld1.32 {k2-k3}, [ip, :128]
/* load state */
vld1.32 {dga}, [r0]
vldr dgbs, [r0, #16]
/* load input */
0: vld1.32 {q8-q9}, [r1]!
vld1.32 {q10-q11}, [r1]!
subs r2, r2, #1
#ifndef CONFIG_CPU_BIG_ENDIAN
vrev32.8 q8, q8
vrev32.8 q9, q9
vrev32.8 q10, q10
vrev32.8 q11, q11
#endif
vadd.u32 ta0, q8, k0
vmov dg0, dga
add_update c, 0, k0, 8, 9, 10, 11, dgb
add_update c, 1, k0, 9, 10, 11, 8
add_update c, 0, k0, 10, 11, 8, 9
add_update c, 1, k0, 11, 8, 9, 10
add_update c, 0, k1, 8, 9, 10, 11
add_update p, 1, k1, 9, 10, 11, 8
add_update p, 0, k1, 10, 11, 8, 9
add_update p, 1, k1, 11, 8, 9, 10
add_update p, 0, k1, 8, 9, 10, 11
add_update p, 1, k2, 9, 10, 11, 8
add_update m, 0, k2, 10, 11, 8, 9
add_update m, 1, k2, 11, 8, 9, 10
add_update m, 0, k2, 8, 9, 10, 11
add_update m, 1, k2, 9, 10, 11, 8
add_update m, 0, k3, 10, 11, 8, 9
add_update p, 1, k3, 11, 8, 9, 10
add_only p, 0, k3, 9
add_only p, 1, k3, 10
add_only p, 0, k3, 11
add_only p, 1
/* update state */
vadd.u32 dga, dga, dg0
vadd.u32 dgb, dgb, dg1a0
bne 0b
/* store new state */
vst1.32 {dga}, [r0]
vstr dgbs, [r0, #16]
bx lr
ENDPROC(sha1_ce_transform)
|
aixcc-public/challenge-001-exemplar-source
| 13,769
|
arch/arm/crypto/sha1-armv4-large.S
|
#define __ARM_ARCH__ __LINUX_ARM_ARCH__
@ SPDX-License-Identifier: GPL-2.0
@ This code is taken from the OpenSSL project but the author (Andy Polyakov)
@ has relicensed it under the GPLv2. Therefore this program is free software;
@ you can redistribute it and/or modify it under the terms of the GNU General
@ Public License version 2 as published by the Free Software Foundation.
@
@ The original headers, including the original license headers, are
@ included below for completeness.
@ ====================================================================
@ Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
@ project. The module is, however, dual licensed under OpenSSL and
@ CRYPTOGAMS licenses depending on where you obtain it. For further
@ details see https://www.openssl.org/~appro/cryptogams/.
@ ====================================================================
@ sha1_block procedure for ARMv4.
@
@ January 2007.
@ Size/performance trade-off
@ ====================================================================
@ impl size in bytes comp cycles[*] measured performance
@ ====================================================================
@ thumb 304 3212 4420
@ armv4-small 392/+29% 1958/+64% 2250/+96%
@ armv4-compact 740/+89% 1552/+26% 1840/+22%
@ armv4-large 1420/+92% 1307/+19% 1370/+34%[***]
@ full unroll ~5100/+260% ~1260/+4% ~1300/+5%
@ ====================================================================
@ thumb = same as 'small' but in Thumb instructions[**] and
@ with recurring code in two private functions;
@ small = detached Xload/update, loops are folded;
@ compact = detached Xload/update, 5x unroll;
@ large = interleaved Xload/update, 5x unroll;
@ full unroll = interleaved Xload/update, full unroll, estimated[!];
@
@ [*] Manually counted instructions in "grand" loop body. Measured
@ performance is affected by prologue and epilogue overhead,
@ i-cache availability, branch penalties, etc.
@ [**] While each Thumb instruction is twice smaller, they are not as
@ diverse as ARM ones: e.g., there are only two arithmetic
@ instructions with 3 arguments, no [fixed] rotate, addressing
@ modes are limited. As result it takes more instructions to do
@ the same job in Thumb, therefore the code is never twice as
@ small and always slower.
@ [***] which is also ~35% better than compiler generated code. Dual-
@ issue Cortex A8 core was measured to process input block in
@ ~990 cycles.
@ August 2010.
@
@ Rescheduling for dual-issue pipeline resulted in 13% improvement on
@ Cortex A8 core and in absolute terms ~870 cycles per input block
@ [or 13.6 cycles per byte].
@ February 2011.
@
@ Profiler-assisted and platform-specific optimization resulted in 10%
@ improvement on Cortex A8 core and 12.2 cycles per byte.
#include <linux/linkage.h>
.text
.align 2
ENTRY(sha1_block_data_order)
stmdb sp!,{r4-r12,lr}
add r2,r1,r2,lsl#6 @ r2 to point at the end of r1
ldmia r0,{r3,r4,r5,r6,r7}
.Lloop:
ldr r8,.LK_00_19
mov r14,sp
sub sp,sp,#15*4
mov r5,r5,ror#30
mov r6,r6,ror#30
mov r7,r7,ror#30 @ [6]
.L_00_15:
#if __ARM_ARCH__<7
ldrb r10,[r1,#2]
ldrb r9,[r1,#3]
ldrb r11,[r1,#1]
add r7,r8,r7,ror#2 @ E+=K_00_19
ldrb r12,[r1],#4
orr r9,r9,r10,lsl#8
eor r10,r5,r6 @ F_xx_xx
orr r9,r9,r11,lsl#16
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
orr r9,r9,r12,lsl#24
#else
ldr r9,[r1],#4 @ handles unaligned
add r7,r8,r7,ror#2 @ E+=K_00_19
eor r10,r5,r6 @ F_xx_xx
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
#ifdef __ARMEL__
rev r9,r9 @ byte swap
#endif
#endif
and r10,r4,r10,ror#2
add r7,r7,r9 @ E+=X[i]
eor r10,r10,r6,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]!
add r7,r7,r10 @ E+=F_00_19(B,C,D)
#if __ARM_ARCH__<7
ldrb r10,[r1,#2]
ldrb r9,[r1,#3]
ldrb r11,[r1,#1]
add r6,r8,r6,ror#2 @ E+=K_00_19
ldrb r12,[r1],#4
orr r9,r9,r10,lsl#8
eor r10,r4,r5 @ F_xx_xx
orr r9,r9,r11,lsl#16
add r6,r6,r7,ror#27 @ E+=ROR(A,27)
orr r9,r9,r12,lsl#24
#else
ldr r9,[r1],#4 @ handles unaligned
add r6,r8,r6,ror#2 @ E+=K_00_19
eor r10,r4,r5 @ F_xx_xx
add r6,r6,r7,ror#27 @ E+=ROR(A,27)
#ifdef __ARMEL__
rev r9,r9 @ byte swap
#endif
#endif
and r10,r3,r10,ror#2
add r6,r6,r9 @ E+=X[i]
eor r10,r10,r5,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]!
add r6,r6,r10 @ E+=F_00_19(B,C,D)
#if __ARM_ARCH__<7
ldrb r10,[r1,#2]
ldrb r9,[r1,#3]
ldrb r11,[r1,#1]
add r5,r8,r5,ror#2 @ E+=K_00_19
ldrb r12,[r1],#4
orr r9,r9,r10,lsl#8
eor r10,r3,r4 @ F_xx_xx
orr r9,r9,r11,lsl#16
add r5,r5,r6,ror#27 @ E+=ROR(A,27)
orr r9,r9,r12,lsl#24
#else
ldr r9,[r1],#4 @ handles unaligned
add r5,r8,r5,ror#2 @ E+=K_00_19
eor r10,r3,r4 @ F_xx_xx
add r5,r5,r6,ror#27 @ E+=ROR(A,27)
#ifdef __ARMEL__
rev r9,r9 @ byte swap
#endif
#endif
and r10,r7,r10,ror#2
add r5,r5,r9 @ E+=X[i]
eor r10,r10,r4,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]!
add r5,r5,r10 @ E+=F_00_19(B,C,D)
#if __ARM_ARCH__<7
ldrb r10,[r1,#2]
ldrb r9,[r1,#3]
ldrb r11,[r1,#1]
add r4,r8,r4,ror#2 @ E+=K_00_19
ldrb r12,[r1],#4
orr r9,r9,r10,lsl#8
eor r10,r7,r3 @ F_xx_xx
orr r9,r9,r11,lsl#16
add r4,r4,r5,ror#27 @ E+=ROR(A,27)
orr r9,r9,r12,lsl#24
#else
ldr r9,[r1],#4 @ handles unaligned
add r4,r8,r4,ror#2 @ E+=K_00_19
eor r10,r7,r3 @ F_xx_xx
add r4,r4,r5,ror#27 @ E+=ROR(A,27)
#ifdef __ARMEL__
rev r9,r9 @ byte swap
#endif
#endif
and r10,r6,r10,ror#2
add r4,r4,r9 @ E+=X[i]
eor r10,r10,r3,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]!
add r4,r4,r10 @ E+=F_00_19(B,C,D)
#if __ARM_ARCH__<7
ldrb r10,[r1,#2]
ldrb r9,[r1,#3]
ldrb r11,[r1,#1]
add r3,r8,r3,ror#2 @ E+=K_00_19
ldrb r12,[r1],#4
orr r9,r9,r10,lsl#8
eor r10,r6,r7 @ F_xx_xx
orr r9,r9,r11,lsl#16
add r3,r3,r4,ror#27 @ E+=ROR(A,27)
orr r9,r9,r12,lsl#24
#else
ldr r9,[r1],#4 @ handles unaligned
add r3,r8,r3,ror#2 @ E+=K_00_19
eor r10,r6,r7 @ F_xx_xx
add r3,r3,r4,ror#27 @ E+=ROR(A,27)
#ifdef __ARMEL__
rev r9,r9 @ byte swap
#endif
#endif
and r10,r5,r10,ror#2
add r3,r3,r9 @ E+=X[i]
eor r10,r10,r7,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]!
add r3,r3,r10 @ E+=F_00_19(B,C,D)
cmp r14,sp
bne .L_00_15 @ [((11+4)*5+2)*3]
sub sp,sp,#25*4
#if __ARM_ARCH__<7
ldrb r10,[r1,#2]
ldrb r9,[r1,#3]
ldrb r11,[r1,#1]
add r7,r8,r7,ror#2 @ E+=K_00_19
ldrb r12,[r1],#4
orr r9,r9,r10,lsl#8
eor r10,r5,r6 @ F_xx_xx
orr r9,r9,r11,lsl#16
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
orr r9,r9,r12,lsl#24
#else
ldr r9,[r1],#4 @ handles unaligned
add r7,r8,r7,ror#2 @ E+=K_00_19
eor r10,r5,r6 @ F_xx_xx
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
#ifdef __ARMEL__
rev r9,r9 @ byte swap
#endif
#endif
and r10,r4,r10,ror#2
add r7,r7,r9 @ E+=X[i]
eor r10,r10,r6,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]!
add r7,r7,r10 @ E+=F_00_19(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r6,r8,r6,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r4,r5 @ F_xx_xx
mov r9,r9,ror#31
add r6,r6,r7,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r3,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r6,r6,r9 @ E+=X[i]
eor r10,r10,r5,ror#2 @ F_00_19(B,C,D)
add r6,r6,r10 @ E+=F_00_19(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r5,r8,r5,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r3,r4 @ F_xx_xx
mov r9,r9,ror#31
add r5,r5,r6,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r7,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r5,r5,r9 @ E+=X[i]
eor r10,r10,r4,ror#2 @ F_00_19(B,C,D)
add r5,r5,r10 @ E+=F_00_19(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r4,r8,r4,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r7,r3 @ F_xx_xx
mov r9,r9,ror#31
add r4,r4,r5,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r6,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r4,r4,r9 @ E+=X[i]
eor r10,r10,r3,ror#2 @ F_00_19(B,C,D)
add r4,r4,r10 @ E+=F_00_19(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r3,r8,r3,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r6,r7 @ F_xx_xx
mov r9,r9,ror#31
add r3,r3,r4,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r5,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r3,r3,r9 @ E+=X[i]
eor r10,r10,r7,ror#2 @ F_00_19(B,C,D)
add r3,r3,r10 @ E+=F_00_19(B,C,D)
ldr r8,.LK_20_39 @ [+15+16*4]
cmn sp,#0 @ [+3], clear carry to denote 20_39
.L_20_39_or_60_79:
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r7,r8,r7,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r5,r6 @ F_xx_xx
mov r9,r9,ror#31
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
eor r10,r4,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r7,r7,r9 @ E+=X[i]
add r7,r7,r10 @ E+=F_20_39(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r6,r8,r6,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r4,r5 @ F_xx_xx
mov r9,r9,ror#31
add r6,r6,r7,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
eor r10,r3,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r6,r6,r9 @ E+=X[i]
add r6,r6,r10 @ E+=F_20_39(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r5,r8,r5,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r3,r4 @ F_xx_xx
mov r9,r9,ror#31
add r5,r5,r6,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
eor r10,r7,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r5,r5,r9 @ E+=X[i]
add r5,r5,r10 @ E+=F_20_39(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r4,r8,r4,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r7,r3 @ F_xx_xx
mov r9,r9,ror#31
add r4,r4,r5,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
eor r10,r6,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r4,r4,r9 @ E+=X[i]
add r4,r4,r10 @ E+=F_20_39(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r3,r8,r3,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r6,r7 @ F_xx_xx
mov r9,r9,ror#31
add r3,r3,r4,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
eor r10,r5,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r3,r3,r9 @ E+=X[i]
add r3,r3,r10 @ E+=F_20_39(B,C,D)
ARM( teq r14,sp ) @ preserve carry
THUMB( mov r11,sp )
THUMB( teq r14,r11 ) @ preserve carry
bne .L_20_39_or_60_79 @ [+((12+3)*5+2)*4]
bcs .L_done @ [+((12+3)*5+2)*4], spare 300 bytes
ldr r8,.LK_40_59
sub sp,sp,#20*4 @ [+2]
.L_40_59:
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r7,r8,r7,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r5,r6 @ F_xx_xx
mov r9,r9,ror#31
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r4,r10,ror#2 @ F_xx_xx
and r11,r5,r6 @ F_xx_xx
add r7,r7,r9 @ E+=X[i]
add r7,r7,r10 @ E+=F_40_59(B,C,D)
add r7,r7,r11,ror#2
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r6,r8,r6,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r4,r5 @ F_xx_xx
mov r9,r9,ror#31
add r6,r6,r7,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r3,r10,ror#2 @ F_xx_xx
and r11,r4,r5 @ F_xx_xx
add r6,r6,r9 @ E+=X[i]
add r6,r6,r10 @ E+=F_40_59(B,C,D)
add r6,r6,r11,ror#2
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r5,r8,r5,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r3,r4 @ F_xx_xx
mov r9,r9,ror#31
add r5,r5,r6,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r7,r10,ror#2 @ F_xx_xx
and r11,r3,r4 @ F_xx_xx
add r5,r5,r9 @ E+=X[i]
add r5,r5,r10 @ E+=F_40_59(B,C,D)
add r5,r5,r11,ror#2
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r4,r8,r4,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r7,r3 @ F_xx_xx
mov r9,r9,ror#31
add r4,r4,r5,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r6,r10,ror#2 @ F_xx_xx
and r11,r7,r3 @ F_xx_xx
add r4,r4,r9 @ E+=X[i]
add r4,r4,r10 @ E+=F_40_59(B,C,D)
add r4,r4,r11,ror#2
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r3,r8,r3,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r6,r7 @ F_xx_xx
mov r9,r9,ror#31
add r3,r3,r4,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r5,r10,ror#2 @ F_xx_xx
and r11,r6,r7 @ F_xx_xx
add r3,r3,r9 @ E+=X[i]
add r3,r3,r10 @ E+=F_40_59(B,C,D)
add r3,r3,r11,ror#2
cmp r14,sp
bne .L_40_59 @ [+((12+5)*5+2)*4]
ldr r8,.LK_60_79
sub sp,sp,#20*4
cmp sp,#0 @ set carry to denote 60_79
b .L_20_39_or_60_79 @ [+4], spare 300 bytes
.L_done:
add sp,sp,#80*4 @ "deallocate" stack frame
ldmia r0,{r8,r9,r10,r11,r12}
add r3,r8,r3
add r4,r9,r4
add r5,r10,r5,ror#2
add r6,r11,r6,ror#2
add r7,r12,r7,ror#2
stmia r0,{r3,r4,r5,r6,r7}
teq r1,r2
bne .Lloop @ [+18], total 1307
ldmia sp!,{r4-r12,pc}
.align 2
.LK_00_19: .word 0x5a827999
.LK_20_39: .word 0x6ed9eba1
.LK_40_59: .word 0x8f1bbcdc
.LK_60_79: .word 0xca62c1d6
ENDPROC(sha1_block_data_order)
.asciz "SHA1 block transform for ARMv4, CRYPTOGAMS by <appro@openssl.org>"
.align 2
|
aixcc-public/challenge-001-exemplar-source
| 10,122
|
arch/arm/crypto/blake2b-neon-core.S
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* BLAKE2b digest algorithm, NEON accelerated
*
* Copyright 2020 Google LLC
*
* Author: Eric Biggers <ebiggers@google.com>
*/
#include <linux/linkage.h>
.text
.fpu neon
// The arguments to blake2b_compress_neon()
STATE .req r0
BLOCK .req r1
NBLOCKS .req r2
INC .req r3
// Pointers to the rotation tables
ROR24_TABLE .req r4
ROR16_TABLE .req r5
// The original stack pointer
ORIG_SP .req r6
// NEON registers which contain the message words of the current block.
// M_0-M_3 are occasionally used for other purposes too.
M_0 .req d16
M_1 .req d17
M_2 .req d18
M_3 .req d19
M_4 .req d20
M_5 .req d21
M_6 .req d22
M_7 .req d23
M_8 .req d24
M_9 .req d25
M_10 .req d26
M_11 .req d27
M_12 .req d28
M_13 .req d29
M_14 .req d30
M_15 .req d31
.align 4
// Tables for computing ror64(x, 24) and ror64(x, 16) using the vtbl.8
// instruction. This is the most efficient way to implement these
// rotation amounts with NEON. (On Cortex-A53 it's the same speed as
// vshr.u64 + vsli.u64, while on Cortex-A7 it's faster.)
.Lror24_table:
.byte 3, 4, 5, 6, 7, 0, 1, 2
.Lror16_table:
.byte 2, 3, 4, 5, 6, 7, 0, 1
// The BLAKE2b initialization vector
.Lblake2b_IV:
.quad 0x6a09e667f3bcc908, 0xbb67ae8584caa73b
.quad 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1
.quad 0x510e527fade682d1, 0x9b05688c2b3e6c1f
.quad 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179
// Execute one round of BLAKE2b by updating the state matrix v[0..15] in the
// NEON registers q0-q7. The message block is in q8..q15 (M_0-M_15). The stack
// pointer points to a 32-byte aligned buffer containing a copy of q8 and q9
// (M_0-M_3), so that they can be reloaded if they are used as temporary
// registers. The macro arguments s0-s15 give the order in which the message
// words are used in this round. 'final' is 1 if this is the final round.
.macro _blake2b_round s0, s1, s2, s3, s4, s5, s6, s7, \
s8, s9, s10, s11, s12, s13, s14, s15, final=0
// Mix the columns:
// (v[0], v[4], v[8], v[12]), (v[1], v[5], v[9], v[13]),
// (v[2], v[6], v[10], v[14]), and (v[3], v[7], v[11], v[15]).
// a += b + m[blake2b_sigma[r][2*i + 0]];
vadd.u64 q0, q0, q2
vadd.u64 q1, q1, q3
vadd.u64 d0, d0, M_\s0
vadd.u64 d1, d1, M_\s2
vadd.u64 d2, d2, M_\s4
vadd.u64 d3, d3, M_\s6
// d = ror64(d ^ a, 32);
veor q6, q6, q0
veor q7, q7, q1
vrev64.32 q6, q6
vrev64.32 q7, q7
// c += d;
vadd.u64 q4, q4, q6
vadd.u64 q5, q5, q7
// b = ror64(b ^ c, 24);
vld1.8 {M_0}, [ROR24_TABLE, :64]
veor q2, q2, q4
veor q3, q3, q5
vtbl.8 d4, {d4}, M_0
vtbl.8 d5, {d5}, M_0
vtbl.8 d6, {d6}, M_0
vtbl.8 d7, {d7}, M_0
// a += b + m[blake2b_sigma[r][2*i + 1]];
//
// M_0 got clobbered above, so we have to reload it if any of the four
// message words this step needs happens to be M_0. Otherwise we don't
// need to reload it here, as it will just get clobbered again below.
.if \s1 == 0 || \s3 == 0 || \s5 == 0 || \s7 == 0
vld1.8 {M_0}, [sp, :64]
.endif
vadd.u64 q0, q0, q2
vadd.u64 q1, q1, q3
vadd.u64 d0, d0, M_\s1
vadd.u64 d1, d1, M_\s3
vadd.u64 d2, d2, M_\s5
vadd.u64 d3, d3, M_\s7
// d = ror64(d ^ a, 16);
vld1.8 {M_0}, [ROR16_TABLE, :64]
veor q6, q6, q0
veor q7, q7, q1
vtbl.8 d12, {d12}, M_0
vtbl.8 d13, {d13}, M_0
vtbl.8 d14, {d14}, M_0
vtbl.8 d15, {d15}, M_0
// c += d;
vadd.u64 q4, q4, q6
vadd.u64 q5, q5, q7
// b = ror64(b ^ c, 63);
//
// This rotation amount isn't a multiple of 8, so it has to be
// implemented using a pair of shifts, which requires temporary
// registers. Use q8-q9 (M_0-M_3) for this, and reload them afterwards.
veor q8, q2, q4
veor q9, q3, q5
vshr.u64 q2, q8, #63
vshr.u64 q3, q9, #63
vsli.u64 q2, q8, #1
vsli.u64 q3, q9, #1
vld1.8 {q8-q9}, [sp, :256]
// Mix the diagonals:
// (v[0], v[5], v[10], v[15]), (v[1], v[6], v[11], v[12]),
// (v[2], v[7], v[8], v[13]), and (v[3], v[4], v[9], v[14]).
//
// There are two possible ways to do this: use 'vext' instructions to
// shift the rows of the matrix so that the diagonals become columns,
// and undo it afterwards; or just use 64-bit operations on 'd'
// registers instead of 128-bit operations on 'q' registers. We use the
// latter approach, as it performs much better on Cortex-A7.
// a += b + m[blake2b_sigma[r][2*i + 0]];
vadd.u64 d0, d0, d5
vadd.u64 d1, d1, d6
vadd.u64 d2, d2, d7
vadd.u64 d3, d3, d4
vadd.u64 d0, d0, M_\s8
vadd.u64 d1, d1, M_\s10
vadd.u64 d2, d2, M_\s12
vadd.u64 d3, d3, M_\s14
// d = ror64(d ^ a, 32);
veor d15, d15, d0
veor d12, d12, d1
veor d13, d13, d2
veor d14, d14, d3
vrev64.32 d15, d15
vrev64.32 d12, d12
vrev64.32 d13, d13
vrev64.32 d14, d14
// c += d;
vadd.u64 d10, d10, d15
vadd.u64 d11, d11, d12
vadd.u64 d8, d8, d13
vadd.u64 d9, d9, d14
// b = ror64(b ^ c, 24);
vld1.8 {M_0}, [ROR24_TABLE, :64]
veor d5, d5, d10
veor d6, d6, d11
veor d7, d7, d8
veor d4, d4, d9
vtbl.8 d5, {d5}, M_0
vtbl.8 d6, {d6}, M_0
vtbl.8 d7, {d7}, M_0
vtbl.8 d4, {d4}, M_0
// a += b + m[blake2b_sigma[r][2*i + 1]];
.if \s9 == 0 || \s11 == 0 || \s13 == 0 || \s15 == 0
vld1.8 {M_0}, [sp, :64]
.endif
vadd.u64 d0, d0, d5
vadd.u64 d1, d1, d6
vadd.u64 d2, d2, d7
vadd.u64 d3, d3, d4
vadd.u64 d0, d0, M_\s9
vadd.u64 d1, d1, M_\s11
vadd.u64 d2, d2, M_\s13
vadd.u64 d3, d3, M_\s15
// d = ror64(d ^ a, 16);
vld1.8 {M_0}, [ROR16_TABLE, :64]
veor d15, d15, d0
veor d12, d12, d1
veor d13, d13, d2
veor d14, d14, d3
vtbl.8 d12, {d12}, M_0
vtbl.8 d13, {d13}, M_0
vtbl.8 d14, {d14}, M_0
vtbl.8 d15, {d15}, M_0
// c += d;
vadd.u64 d10, d10, d15
vadd.u64 d11, d11, d12
vadd.u64 d8, d8, d13
vadd.u64 d9, d9, d14
// b = ror64(b ^ c, 63);
veor d16, d4, d9
veor d17, d5, d10
veor d18, d6, d11
veor d19, d7, d8
vshr.u64 q2, q8, #63
vshr.u64 q3, q9, #63
vsli.u64 q2, q8, #1
vsli.u64 q3, q9, #1
// Reloading q8-q9 can be skipped on the final round.
.if ! \final
vld1.8 {q8-q9}, [sp, :256]
.endif
.endm
//
// void blake2b_compress_neon(struct blake2b_state *state,
// const u8 *block, size_t nblocks, u32 inc);
//
// Only the first three fields of struct blake2b_state are used:
// u64 h[8]; (inout)
// u64 t[2]; (inout)
// u64 f[2]; (in)
//
.align 5
ENTRY(blake2b_compress_neon)
push {r4-r10}
// Allocate a 32-byte stack buffer that is 32-byte aligned.
mov ORIG_SP, sp
sub ip, sp, #32
bic ip, ip, #31
mov sp, ip
adr ROR24_TABLE, .Lror24_table
adr ROR16_TABLE, .Lror16_table
mov ip, STATE
vld1.64 {q0-q1}, [ip]! // Load h[0..3]
vld1.64 {q2-q3}, [ip]! // Load h[4..7]
.Lnext_block:
adr r10, .Lblake2b_IV
vld1.64 {q14-q15}, [ip] // Load t[0..1] and f[0..1]
vld1.64 {q4-q5}, [r10]! // Load IV[0..3]
vmov r7, r8, d28 // Copy t[0] to (r7, r8)
vld1.64 {q6-q7}, [r10] // Load IV[4..7]
adds r7, r7, INC // Increment counter
bcs .Lslow_inc_ctr
vmov.i32 d28[0], r7
vst1.64 {d28}, [ip] // Update t[0]
.Linc_ctr_done:
// Load the next message block and finish initializing the state matrix
// 'v'. Fortunately, there are exactly enough NEON registers to fit the
// entire state matrix in q0-q7 and the entire message block in q8-15.
//
// However, _blake2b_round also needs some extra registers for rotates,
// so we have to spill some registers. It's better to spill the message
// registers than the state registers, as the message doesn't change.
// Therefore we store a copy of the first 32 bytes of the message block
// (q8-q9) in an aligned buffer on the stack so that they can be
// reloaded when needed. (We could just reload directly from the
// message buffer, but it's faster to use aligned loads.)
vld1.8 {q8-q9}, [BLOCK]!
veor q6, q6, q14 // v[12..13] = IV[4..5] ^ t[0..1]
vld1.8 {q10-q11}, [BLOCK]!
veor q7, q7, q15 // v[14..15] = IV[6..7] ^ f[0..1]
vld1.8 {q12-q13}, [BLOCK]!
vst1.8 {q8-q9}, [sp, :256]
mov ip, STATE
vld1.8 {q14-q15}, [BLOCK]!
// Execute the rounds. Each round is provided the order in which it
// needs to use the message words.
_blake2b_round 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
_blake2b_round 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3
_blake2b_round 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4
_blake2b_round 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8
_blake2b_round 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13
_blake2b_round 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9
_blake2b_round 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11
_blake2b_round 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10
_blake2b_round 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5
_blake2b_round 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0
_blake2b_round 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
_blake2b_round 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 \
final=1
// Fold the final state matrix into the hash chaining value:
//
// for (i = 0; i < 8; i++)
// h[i] ^= v[i] ^ v[i + 8];
//
vld1.64 {q8-q9}, [ip]! // Load old h[0..3]
veor q0, q0, q4 // v[0..1] ^= v[8..9]
veor q1, q1, q5 // v[2..3] ^= v[10..11]
vld1.64 {q10-q11}, [ip] // Load old h[4..7]
veor q2, q2, q6 // v[4..5] ^= v[12..13]
veor q3, q3, q7 // v[6..7] ^= v[14..15]
veor q0, q0, q8 // v[0..1] ^= h[0..1]
veor q1, q1, q9 // v[2..3] ^= h[2..3]
mov ip, STATE
subs NBLOCKS, NBLOCKS, #1 // nblocks--
vst1.64 {q0-q1}, [ip]! // Store new h[0..3]
veor q2, q2, q10 // v[4..5] ^= h[4..5]
veor q3, q3, q11 // v[6..7] ^= h[6..7]
vst1.64 {q2-q3}, [ip]! // Store new h[4..7]
// Advance to the next block, if there is one.
bne .Lnext_block // nblocks != 0?
mov sp, ORIG_SP
pop {r4-r10}
mov pc, lr
.Lslow_inc_ctr:
// Handle the case where the counter overflowed its low 32 bits, by
// carrying the overflow bit into the full 128-bit counter.
vmov r9, r10, d29
adcs r8, r8, #0
adcs r9, r9, #0
adc r10, r10, #0
vmov d28, r7, r8
vmov d29, r9, r10
vst1.64 {q14}, [ip] // Update t[0] and t[1]
b .Linc_ctr_done
ENDPROC(blake2b_compress_neon)
|
aixcc-public/challenge-001-exemplar-source
| 9,627
|
arch/arm/crypto/blake2s-core.S
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* BLAKE2s digest algorithm, ARM scalar implementation
*
* Copyright 2020 Google LLC
*
* Author: Eric Biggers <ebiggers@google.com>
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
// Registers used to hold message words temporarily. There aren't
// enough ARM registers to hold the whole message block, so we have to
// load the words on-demand.
M_0 .req r12
M_1 .req r14
// The BLAKE2s initialization vector
.Lblake2s_IV:
.word 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A
.word 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19
.macro __ldrd a, b, src, offset
#if __LINUX_ARM_ARCH__ >= 6
ldrd \a, \b, [\src, #\offset]
#else
ldr \a, [\src, #\offset]
ldr \b, [\src, #\offset + 4]
#endif
.endm
.macro __strd a, b, dst, offset
#if __LINUX_ARM_ARCH__ >= 6
strd \a, \b, [\dst, #\offset]
#else
str \a, [\dst, #\offset]
str \b, [\dst, #\offset + 4]
#endif
.endm
.macro _le32_bswap a, tmp
#ifdef __ARMEB__
rev_l \a, \tmp
#endif
.endm
.macro _le32_bswap_8x a, b, c, d, e, f, g, h, tmp
_le32_bswap \a, \tmp
_le32_bswap \b, \tmp
_le32_bswap \c, \tmp
_le32_bswap \d, \tmp
_le32_bswap \e, \tmp
_le32_bswap \f, \tmp
_le32_bswap \g, \tmp
_le32_bswap \h, \tmp
.endm
// Execute a quarter-round of BLAKE2s by mixing two columns or two diagonals.
// (a0, b0, c0, d0) and (a1, b1, c1, d1) give the registers containing the two
// columns/diagonals. s0-s1 are the word offsets to the message words the first
// column/diagonal needs, and likewise s2-s3 for the second column/diagonal.
// M_0 and M_1 are free to use, and the message block can be found at sp + 32.
//
// Note that to save instructions, the rotations don't happen when the
// pseudocode says they should, but rather they are delayed until the values are
// used. See the comment above _blake2s_round().
.macro _blake2s_quarterround a0, b0, c0, d0, a1, b1, c1, d1, s0, s1, s2, s3
ldr M_0, [sp, #32 + 4 * \s0]
ldr M_1, [sp, #32 + 4 * \s2]
// a += b + m[blake2s_sigma[r][2*i + 0]];
add \a0, \a0, \b0, ror #brot
add \a1, \a1, \b1, ror #brot
add \a0, \a0, M_0
add \a1, \a1, M_1
// d = ror32(d ^ a, 16);
eor \d0, \a0, \d0, ror #drot
eor \d1, \a1, \d1, ror #drot
// c += d;
add \c0, \c0, \d0, ror #16
add \c1, \c1, \d1, ror #16
// b = ror32(b ^ c, 12);
eor \b0, \c0, \b0, ror #brot
eor \b1, \c1, \b1, ror #brot
ldr M_0, [sp, #32 + 4 * \s1]
ldr M_1, [sp, #32 + 4 * \s3]
// a += b + m[blake2s_sigma[r][2*i + 1]];
add \a0, \a0, \b0, ror #12
add \a1, \a1, \b1, ror #12
add \a0, \a0, M_0
add \a1, \a1, M_1
// d = ror32(d ^ a, 8);
eor \d0, \a0, \d0, ror#16
eor \d1, \a1, \d1, ror#16
// c += d;
add \c0, \c0, \d0, ror#8
add \c1, \c1, \d1, ror#8
// b = ror32(b ^ c, 7);
eor \b0, \c0, \b0, ror#12
eor \b1, \c1, \b1, ror#12
.endm
// Execute one round of BLAKE2s by updating the state matrix v[0..15]. v[0..9]
// are in r0..r9. The stack pointer points to 8 bytes of scratch space for
// spilling v[8..9], then to v[9..15], then to the message block. r10-r12 and
// r14 are free to use. The macro arguments s0-s15 give the order in which the
// message words are used in this round.
//
// All rotates are performed using the implicit rotate operand accepted by the
// 'add' and 'eor' instructions. This is faster than using explicit rotate
// instructions. To make this work, we allow the values in the second and last
// rows of the BLAKE2s state matrix (rows 'b' and 'd') to temporarily have the
// wrong rotation amount. The rotation amount is then fixed up just in time
// when the values are used. 'brot' is the number of bits the values in row 'b'
// need to be rotated right to arrive at the correct values, and 'drot'
// similarly for row 'd'. (brot, drot) start out as (0, 0) but we make it such
// that they end up as (7, 8) after every round.
.macro _blake2s_round s0, s1, s2, s3, s4, s5, s6, s7, \
s8, s9, s10, s11, s12, s13, s14, s15
// Mix first two columns:
// (v[0], v[4], v[8], v[12]) and (v[1], v[5], v[9], v[13]).
__ldrd r10, r11, sp, 16 // load v[12] and v[13]
_blake2s_quarterround r0, r4, r8, r10, r1, r5, r9, r11, \
\s0, \s1, \s2, \s3
__strd r8, r9, sp, 0
__strd r10, r11, sp, 16
// Mix second two columns:
// (v[2], v[6], v[10], v[14]) and (v[3], v[7], v[11], v[15]).
__ldrd r8, r9, sp, 8 // load v[10] and v[11]
__ldrd r10, r11, sp, 24 // load v[14] and v[15]
_blake2s_quarterround r2, r6, r8, r10, r3, r7, r9, r11, \
\s4, \s5, \s6, \s7
str r10, [sp, #24] // store v[14]
// v[10], v[11], and v[15] are used below, so no need to store them yet.
.set brot, 7
.set drot, 8
// Mix first two diagonals:
// (v[0], v[5], v[10], v[15]) and (v[1], v[6], v[11], v[12]).
ldr r10, [sp, #16] // load v[12]
_blake2s_quarterround r0, r5, r8, r11, r1, r6, r9, r10, \
\s8, \s9, \s10, \s11
__strd r8, r9, sp, 8
str r11, [sp, #28]
str r10, [sp, #16]
// Mix second two diagonals:
// (v[2], v[7], v[8], v[13]) and (v[3], v[4], v[9], v[14]).
__ldrd r8, r9, sp, 0 // load v[8] and v[9]
__ldrd r10, r11, sp, 20 // load v[13] and v[14]
_blake2s_quarterround r2, r7, r8, r10, r3, r4, r9, r11, \
\s12, \s13, \s14, \s15
__strd r10, r11, sp, 20
.endm
//
// void blake2s_compress(struct blake2s_state *state,
// const u8 *block, size_t nblocks, u32 inc);
//
// Only the first three fields of struct blake2s_state are used:
// u32 h[8]; (inout)
// u32 t[2]; (inout)
// u32 f[2]; (in)
//
.align 5
ENTRY(blake2s_compress)
push {r0-r2,r4-r11,lr} // keep this an even number
.Lnext_block:
// r0 is 'state'
// r1 is 'block'
// r3 is 'inc'
// Load and increment the counter t[0..1].
__ldrd r10, r11, r0, 32
adds r10, r10, r3
adc r11, r11, #0
__strd r10, r11, r0, 32
// _blake2s_round is very short on registers, so copy the message block
// to the stack to save a register during the rounds. This also has the
// advantage that misalignment only needs to be dealt with in one place.
sub sp, sp, #64
mov r12, sp
tst r1, #3
bne .Lcopy_block_misaligned
ldmia r1!, {r2-r9}
_le32_bswap_8x r2, r3, r4, r5, r6, r7, r8, r9, r14
stmia r12!, {r2-r9}
ldmia r1!, {r2-r9}
_le32_bswap_8x r2, r3, r4, r5, r6, r7, r8, r9, r14
stmia r12, {r2-r9}
.Lcopy_block_done:
str r1, [sp, #68] // Update message pointer
// Calculate v[8..15]. Push v[9..15] onto the stack, and leave space
// for spilling v[8..9]. Leave v[8..9] in r8-r9.
mov r14, r0 // r14 = state
adr r12, .Lblake2s_IV
ldmia r12!, {r8-r9} // load IV[0..1]
__ldrd r0, r1, r14, 40 // load f[0..1]
ldm r12, {r2-r7} // load IV[3..7]
eor r4, r4, r10 // v[12] = IV[4] ^ t[0]
eor r5, r5, r11 // v[13] = IV[5] ^ t[1]
eor r6, r6, r0 // v[14] = IV[6] ^ f[0]
eor r7, r7, r1 // v[15] = IV[7] ^ f[1]
push {r2-r7} // push v[9..15]
sub sp, sp, #8 // leave space for v[8..9]
// Load h[0..7] == v[0..7].
ldm r14, {r0-r7}
// Execute the rounds. Each round is provided the order in which it
// needs to use the message words.
.set brot, 0
.set drot, 0
_blake2s_round 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
_blake2s_round 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3
_blake2s_round 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4
_blake2s_round 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8
_blake2s_round 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13
_blake2s_round 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9
_blake2s_round 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11
_blake2s_round 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10
_blake2s_round 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5
_blake2s_round 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0
// Fold the final state matrix into the hash chaining value:
//
// for (i = 0; i < 8; i++)
// h[i] ^= v[i] ^ v[i + 8];
//
ldr r14, [sp, #96] // r14 = &h[0]
add sp, sp, #8 // v[8..9] are already loaded.
pop {r10-r11} // load v[10..11]
eor r0, r0, r8
eor r1, r1, r9
eor r2, r2, r10
eor r3, r3, r11
ldm r14, {r8-r11} // load h[0..3]
eor r0, r0, r8
eor r1, r1, r9
eor r2, r2, r10
eor r3, r3, r11
stmia r14!, {r0-r3} // store new h[0..3]
ldm r14, {r0-r3} // load old h[4..7]
pop {r8-r11} // load v[12..15]
eor r0, r0, r4, ror #brot
eor r1, r1, r5, ror #brot
eor r2, r2, r6, ror #brot
eor r3, r3, r7, ror #brot
eor r0, r0, r8, ror #drot
eor r1, r1, r9, ror #drot
eor r2, r2, r10, ror #drot
eor r3, r3, r11, ror #drot
add sp, sp, #64 // skip copy of message block
stm r14, {r0-r3} // store new h[4..7]
// Advance to the next block, if there is one. Note that if there are
// multiple blocks, then 'inc' (the counter increment amount) must be
// 64. So we can simply set it to 64 without re-loading it.
ldm sp, {r0, r1, r2} // load (state, block, nblocks)
mov r3, #64 // set 'inc'
subs r2, r2, #1 // nblocks--
str r2, [sp, #8]
bne .Lnext_block // nblocks != 0?
pop {r0-r2,r4-r11,pc}
// The next message block (pointed to by r1) isn't 4-byte aligned, so it
// can't be loaded using ldmia. Copy it to the stack buffer (pointed to
// by r12) using an alternative method. r2-r9 are free to use.
.Lcopy_block_misaligned:
mov r2, #64
1:
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
ldr r3, [r1], #4
_le32_bswap r3, r4
#else
ldrb r3, [r1, #0]
ldrb r4, [r1, #1]
ldrb r5, [r1, #2]
ldrb r6, [r1, #3]
add r1, r1, #4
orr r3, r3, r4, lsl #8
orr r3, r3, r5, lsl #16
orr r3, r3, r6, lsl #24
#endif
subs r2, r2, #4
str r3, [r12], #4
bne 1b
b .Lcopy_block_done
ENDPROC(blake2s_compress)
|
aixcc-public/challenge-001-exemplar-source
| 6,891
|
arch/arm/crypto/ghash-ce-core.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Accelerated GHASH implementation with NEON/ARMv8 vmull.p8/64 instructions.
*
* Copyright (C) 2015 - 2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
.arch armv8-a
.fpu crypto-neon-fp-armv8
SHASH .req q0
T1 .req q1
XL .req q2
XM .req q3
XH .req q4
IN1 .req q4
SHASH_L .req d0
SHASH_H .req d1
T1_L .req d2
T1_H .req d3
XL_L .req d4
XL_H .req d5
XM_L .req d6
XM_H .req d7
XH_L .req d8
t0l .req d10
t0h .req d11
t1l .req d12
t1h .req d13
t2l .req d14
t2h .req d15
t3l .req d16
t3h .req d17
t4l .req d18
t4h .req d19
t0q .req q5
t1q .req q6
t2q .req q7
t3q .req q8
t4q .req q9
T2 .req q9
s1l .req d20
s1h .req d21
s2l .req d22
s2h .req d23
s3l .req d24
s3h .req d25
s4l .req d26
s4h .req d27
MASK .req d28
SHASH2_p8 .req d28
k16 .req d29
k32 .req d30
k48 .req d31
SHASH2_p64 .req d31
HH .req q10
HH3 .req q11
HH4 .req q12
HH34 .req q13
HH_L .req d20
HH_H .req d21
HH3_L .req d22
HH3_H .req d23
HH4_L .req d24
HH4_H .req d25
HH34_L .req d26
HH34_H .req d27
SHASH2_H .req d29
XL2 .req q5
XM2 .req q6
XH2 .req q7
T3 .req q8
XL2_L .req d10
XL2_H .req d11
XM2_L .req d12
XM2_H .req d13
T3_L .req d16
T3_H .req d17
.text
.macro __pmull_p64, rd, rn, rm, b1, b2, b3, b4
vmull.p64 \rd, \rn, \rm
.endm
/*
* This implementation of 64x64 -> 128 bit polynomial multiplication
* using vmull.p8 instructions (8x8 -> 16) is taken from the paper
* "Fast Software Polynomial Multiplication on ARM Processors Using
* the NEON Engine" by Danilo Camara, Conrado Gouvea, Julio Lopez and
* Ricardo Dahab (https://hal.inria.fr/hal-01506572)
*
* It has been slightly tweaked for in-order performance, and to allow
* 'rq' to overlap with 'ad' or 'bd'.
*/
.macro __pmull_p8, rq, ad, bd, b1=t4l, b2=t3l, b3=t4l, b4=t3l
vext.8 t0l, \ad, \ad, #1 @ A1
.ifc \b1, t4l
vext.8 t4l, \bd, \bd, #1 @ B1
.endif
vmull.p8 t0q, t0l, \bd @ F = A1*B
vext.8 t1l, \ad, \ad, #2 @ A2
vmull.p8 t4q, \ad, \b1 @ E = A*B1
.ifc \b2, t3l
vext.8 t3l, \bd, \bd, #2 @ B2
.endif
vmull.p8 t1q, t1l, \bd @ H = A2*B
vext.8 t2l, \ad, \ad, #3 @ A3
vmull.p8 t3q, \ad, \b2 @ G = A*B2
veor t0q, t0q, t4q @ L = E + F
.ifc \b3, t4l
vext.8 t4l, \bd, \bd, #3 @ B3
.endif
vmull.p8 t2q, t2l, \bd @ J = A3*B
veor t0l, t0l, t0h @ t0 = (L) (P0 + P1) << 8
veor t1q, t1q, t3q @ M = G + H
.ifc \b4, t3l
vext.8 t3l, \bd, \bd, #4 @ B4
.endif
vmull.p8 t4q, \ad, \b3 @ I = A*B3
veor t1l, t1l, t1h @ t1 = (M) (P2 + P3) << 16
vmull.p8 t3q, \ad, \b4 @ K = A*B4
vand t0h, t0h, k48
vand t1h, t1h, k32
veor t2q, t2q, t4q @ N = I + J
veor t0l, t0l, t0h
veor t1l, t1l, t1h
veor t2l, t2l, t2h @ t2 = (N) (P4 + P5) << 24
vand t2h, t2h, k16
veor t3l, t3l, t3h @ t3 = (K) (P6 + P7) << 32
vmov.i64 t3h, #0
vext.8 t0q, t0q, t0q, #15
veor t2l, t2l, t2h
vext.8 t1q, t1q, t1q, #14
vmull.p8 \rq, \ad, \bd @ D = A*B
vext.8 t2q, t2q, t2q, #13
vext.8 t3q, t3q, t3q, #12
veor t0q, t0q, t1q
veor t2q, t2q, t3q
veor \rq, \rq, t0q
veor \rq, \rq, t2q
.endm
//
// PMULL (64x64->128) based reduction for CPUs that can do
// it in a single instruction.
//
.macro __pmull_reduce_p64
vmull.p64 T1, XL_L, MASK
veor XH_L, XH_L, XM_H
vext.8 T1, T1, T1, #8
veor XL_H, XL_H, XM_L
veor T1, T1, XL
vmull.p64 XL, T1_H, MASK
.endm
//
// Alternative reduction for CPUs that lack support for the
// 64x64->128 PMULL instruction
//
.macro __pmull_reduce_p8
veor XL_H, XL_H, XM_L
veor XH_L, XH_L, XM_H
vshl.i64 T1, XL, #57
vshl.i64 T2, XL, #62
veor T1, T1, T2
vshl.i64 T2, XL, #63
veor T1, T1, T2
veor XL_H, XL_H, T1_L
veor XH_L, XH_L, T1_H
vshr.u64 T1, XL, #1
veor XH, XH, XL
veor XL, XL, T1
vshr.u64 T1, T1, #6
vshr.u64 XL, XL, #1
.endm
.macro ghash_update, pn
vld1.64 {XL}, [r1]
/* do the head block first, if supplied */
ldr ip, [sp]
teq ip, #0
beq 0f
vld1.64 {T1}, [ip]
teq r0, #0
b 3f
0: .ifc \pn, p64
tst r0, #3 // skip until #blocks is a
bne 2f // round multiple of 4
vld1.8 {XL2-XM2}, [r2]!
1: vld1.8 {T3-T2}, [r2]!
vrev64.8 XL2, XL2
vrev64.8 XM2, XM2
subs r0, r0, #4
vext.8 T1, XL2, XL2, #8
veor XL2_H, XL2_H, XL_L
veor XL, XL, T1
vrev64.8 T3, T3
vrev64.8 T1, T2
vmull.p64 XH, HH4_H, XL_H // a1 * b1
veor XL2_H, XL2_H, XL_H
vmull.p64 XL, HH4_L, XL_L // a0 * b0
vmull.p64 XM, HH34_H, XL2_H // (a1 + a0)(b1 + b0)
vmull.p64 XH2, HH3_H, XM2_L // a1 * b1
veor XM2_L, XM2_L, XM2_H
vmull.p64 XL2, HH3_L, XM2_H // a0 * b0
vmull.p64 XM2, HH34_L, XM2_L // (a1 + a0)(b1 + b0)
veor XH, XH, XH2
veor XL, XL, XL2
veor XM, XM, XM2
vmull.p64 XH2, HH_H, T3_L // a1 * b1
veor T3_L, T3_L, T3_H
vmull.p64 XL2, HH_L, T3_H // a0 * b0
vmull.p64 XM2, SHASH2_H, T3_L // (a1 + a0)(b1 + b0)
veor XH, XH, XH2
veor XL, XL, XL2
veor XM, XM, XM2
vmull.p64 XH2, SHASH_H, T1_L // a1 * b1
veor T1_L, T1_L, T1_H
vmull.p64 XL2, SHASH_L, T1_H // a0 * b0
vmull.p64 XM2, SHASH2_p64, T1_L // (a1 + a0)(b1 + b0)
veor XH, XH, XH2
veor XL, XL, XL2
veor XM, XM, XM2
beq 4f
vld1.8 {XL2-XM2}, [r2]!
veor T1, XL, XH
veor XM, XM, T1
__pmull_reduce_p64
veor T1, T1, XH
veor XL, XL, T1
b 1b
.endif
2: vld1.64 {T1}, [r2]!
subs r0, r0, #1
3: /* multiply XL by SHASH in GF(2^128) */
#ifndef CONFIG_CPU_BIG_ENDIAN
vrev64.8 T1, T1
#endif
vext.8 IN1, T1, T1, #8
veor T1_L, T1_L, XL_H
veor XL, XL, IN1
__pmull_\pn XH, XL_H, SHASH_H, s1h, s2h, s3h, s4h @ a1 * b1
veor T1, T1, XL
__pmull_\pn XL, XL_L, SHASH_L, s1l, s2l, s3l, s4l @ a0 * b0
__pmull_\pn XM, T1_L, SHASH2_\pn @ (a1+a0)(b1+b0)
4: veor T1, XL, XH
veor XM, XM, T1
__pmull_reduce_\pn
veor T1, T1, XH
veor XL, XL, T1
bne 0b
vst1.64 {XL}, [r1]
bx lr
.endm
/*
* void pmull_ghash_update(int blocks, u64 dg[], const char *src,
* struct ghash_key const *k, const char *head)
*/
ENTRY(pmull_ghash_update_p64)
vld1.64 {SHASH}, [r3]!
vld1.64 {HH}, [r3]!
vld1.64 {HH3-HH4}, [r3]
veor SHASH2_p64, SHASH_L, SHASH_H
veor SHASH2_H, HH_L, HH_H
veor HH34_L, HH3_L, HH3_H
veor HH34_H, HH4_L, HH4_H
vmov.i8 MASK, #0xe1
vshl.u64 MASK, MASK, #57
ghash_update p64
ENDPROC(pmull_ghash_update_p64)
ENTRY(pmull_ghash_update_p8)
vld1.64 {SHASH}, [r3]
veor SHASH2_p8, SHASH_L, SHASH_H
vext.8 s1l, SHASH_L, SHASH_L, #1
vext.8 s2l, SHASH_L, SHASH_L, #2
vext.8 s3l, SHASH_L, SHASH_L, #3
vext.8 s4l, SHASH_L, SHASH_L, #4
vext.8 s1h, SHASH_H, SHASH_H, #1
vext.8 s2h, SHASH_H, SHASH_H, #2
vext.8 s3h, SHASH_H, SHASH_H, #3
vext.8 s4h, SHASH_H, SHASH_H, #4
vmov.i64 k16, #0xffff
vmov.i64 k32, #0xffffffff
vmov.i64 k48, #0xffffffffffff
ghash_update p8
ENDPROC(pmull_ghash_update_p8)
|
aixcc-public/challenge-001-exemplar-source
| 22,660
|
arch/arm/crypto/aes-neonbs-core.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Bit sliced AES using NEON instructions
*
* Copyright (C) 2017 Linaro Ltd.
* Author: Ard Biesheuvel <ard.biesheuvel@linaro.org>
*/
/*
* The algorithm implemented here is described in detail by the paper
* 'Faster and Timing-Attack Resistant AES-GCM' by Emilia Kaesper and
* Peter Schwabe (https://eprint.iacr.org/2009/129.pdf)
*
* This implementation is based primarily on the OpenSSL implementation
* for 32-bit ARM written by Andy Polyakov <appro@openssl.org>
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
.text
.fpu neon
rounds .req ip
bskey .req r4
q0l .req d0
q0h .req d1
q1l .req d2
q1h .req d3
q2l .req d4
q2h .req d5
q3l .req d6
q3h .req d7
q4l .req d8
q4h .req d9
q5l .req d10
q5h .req d11
q6l .req d12
q6h .req d13
q7l .req d14
q7h .req d15
q8l .req d16
q8h .req d17
q9l .req d18
q9h .req d19
q10l .req d20
q10h .req d21
q11l .req d22
q11h .req d23
q12l .req d24
q12h .req d25
q13l .req d26
q13h .req d27
q14l .req d28
q14h .req d29
q15l .req d30
q15h .req d31
.macro __tbl, out, tbl, in, tmp
.ifc \out, \tbl
.ifb \tmp
.error __tbl needs temp register if out == tbl
.endif
vmov \tmp, \out
.endif
vtbl.8 \out\()l, {\tbl}, \in\()l
.ifc \out, \tbl
vtbl.8 \out\()h, {\tmp}, \in\()h
.else
vtbl.8 \out\()h, {\tbl}, \in\()h
.endif
.endm
.macro __ldr, out, sym
vldr \out\()l, \sym
vldr \out\()h, \sym + 8
.endm
.macro in_bs_ch, b0, b1, b2, b3, b4, b5, b6, b7
veor \b2, \b2, \b1
veor \b5, \b5, \b6
veor \b3, \b3, \b0
veor \b6, \b6, \b2
veor \b5, \b5, \b0
veor \b6, \b6, \b3
veor \b3, \b3, \b7
veor \b7, \b7, \b5
veor \b3, \b3, \b4
veor \b4, \b4, \b5
veor \b2, \b2, \b7
veor \b3, \b3, \b1
veor \b1, \b1, \b5
.endm
.macro out_bs_ch, b0, b1, b2, b3, b4, b5, b6, b7
veor \b0, \b0, \b6
veor \b1, \b1, \b4
veor \b4, \b4, \b6
veor \b2, \b2, \b0
veor \b6, \b6, \b1
veor \b1, \b1, \b5
veor \b5, \b5, \b3
veor \b3, \b3, \b7
veor \b7, \b7, \b5
veor \b2, \b2, \b5
veor \b4, \b4, \b7
.endm
.macro inv_in_bs_ch, b6, b1, b2, b4, b7, b0, b3, b5
veor \b1, \b1, \b7
veor \b4, \b4, \b7
veor \b7, \b7, \b5
veor \b1, \b1, \b3
veor \b2, \b2, \b5
veor \b3, \b3, \b7
veor \b6, \b6, \b1
veor \b2, \b2, \b0
veor \b5, \b5, \b3
veor \b4, \b4, \b6
veor \b0, \b0, \b6
veor \b1, \b1, \b4
.endm
.macro inv_out_bs_ch, b6, b5, b0, b3, b7, b1, b4, b2
veor \b1, \b1, \b5
veor \b2, \b2, \b7
veor \b3, \b3, \b1
veor \b4, \b4, \b5
veor \b7, \b7, \b5
veor \b3, \b3, \b4
veor \b5, \b5, \b0
veor \b3, \b3, \b7
veor \b6, \b6, \b2
veor \b2, \b2, \b1
veor \b6, \b6, \b3
veor \b3, \b3, \b0
veor \b5, \b5, \b6
.endm
.macro mul_gf4, x0, x1, y0, y1, t0, t1
veor \t0, \y0, \y1
vand \t0, \t0, \x0
veor \x0, \x0, \x1
vand \t1, \x1, \y0
vand \x0, \x0, \y1
veor \x1, \t1, \t0
veor \x0, \x0, \t1
.endm
.macro mul_gf4_n_gf4, x0, x1, y0, y1, t0, x2, x3, y2, y3, t1
veor \t0, \y0, \y1
veor \t1, \y2, \y3
vand \t0, \t0, \x0
vand \t1, \t1, \x2
veor \x0, \x0, \x1
veor \x2, \x2, \x3
vand \x1, \x1, \y0
vand \x3, \x3, \y2
vand \x0, \x0, \y1
vand \x2, \x2, \y3
veor \x1, \x1, \x0
veor \x2, \x2, \x3
veor \x0, \x0, \t0
veor \x3, \x3, \t1
.endm
.macro mul_gf16_2, x0, x1, x2, x3, x4, x5, x6, x7, \
y0, y1, y2, y3, t0, t1, t2, t3
veor \t0, \x0, \x2
veor \t1, \x1, \x3
mul_gf4 \x0, \x1, \y0, \y1, \t2, \t3
veor \y0, \y0, \y2
veor \y1, \y1, \y3
mul_gf4_n_gf4 \t0, \t1, \y0, \y1, \t3, \x2, \x3, \y2, \y3, \t2
veor \x0, \x0, \t0
veor \x2, \x2, \t0
veor \x1, \x1, \t1
veor \x3, \x3, \t1
veor \t0, \x4, \x6
veor \t1, \x5, \x7
mul_gf4_n_gf4 \t0, \t1, \y0, \y1, \t3, \x6, \x7, \y2, \y3, \t2
veor \y0, \y0, \y2
veor \y1, \y1, \y3
mul_gf4 \x4, \x5, \y0, \y1, \t2, \t3
veor \x4, \x4, \t0
veor \x6, \x6, \t0
veor \x5, \x5, \t1
veor \x7, \x7, \t1
.endm
.macro inv_gf256, x0, x1, x2, x3, x4, x5, x6, x7, \
t0, t1, t2, t3, s0, s1, s2, s3
veor \t3, \x4, \x6
veor \t0, \x5, \x7
veor \t1, \x1, \x3
veor \s1, \x7, \x6
veor \s0, \x0, \x2
veor \s3, \t3, \t0
vorr \t2, \t0, \t1
vand \s2, \t3, \s0
vorr \t3, \t3, \s0
veor \s0, \s0, \t1
vand \t0, \t0, \t1
veor \t1, \x3, \x2
vand \s3, \s3, \s0
vand \s1, \s1, \t1
veor \t1, \x4, \x5
veor \s0, \x1, \x0
veor \t3, \t3, \s1
veor \t2, \t2, \s1
vand \s1, \t1, \s0
vorr \t1, \t1, \s0
veor \t3, \t3, \s3
veor \t0, \t0, \s1
veor \t2, \t2, \s2
veor \t1, \t1, \s3
veor \t0, \t0, \s2
vand \s0, \x7, \x3
veor \t1, \t1, \s2
vand \s1, \x6, \x2
vand \s2, \x5, \x1
vorr \s3, \x4, \x0
veor \t3, \t3, \s0
veor \t1, \t1, \s2
veor \s0, \t0, \s3
veor \t2, \t2, \s1
vand \s2, \t3, \t1
veor \s1, \t2, \s2
veor \s3, \s0, \s2
vbsl \s1, \t1, \s0
vmvn \t0, \s0
vbsl \s0, \s1, \s3
vbsl \t0, \s1, \s3
vbsl \s3, \t3, \t2
veor \t3, \t3, \t2
vand \s2, \s0, \s3
veor \t1, \t1, \t0
veor \s2, \s2, \t3
mul_gf16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \
\s3, \s2, \s1, \t1, \s0, \t0, \t2, \t3
.endm
.macro sbox, b0, b1, b2, b3, b4, b5, b6, b7, \
t0, t1, t2, t3, s0, s1, s2, s3
in_bs_ch \b0, \b1, \b2, \b3, \b4, \b5, \b6, \b7
inv_gf256 \b6, \b5, \b0, \b3, \b7, \b1, \b4, \b2, \
\t0, \t1, \t2, \t3, \s0, \s1, \s2, \s3
out_bs_ch \b7, \b1, \b4, \b2, \b6, \b5, \b0, \b3
.endm
.macro inv_sbox, b0, b1, b2, b3, b4, b5, b6, b7, \
t0, t1, t2, t3, s0, s1, s2, s3
inv_in_bs_ch \b0, \b1, \b2, \b3, \b4, \b5, \b6, \b7
inv_gf256 \b5, \b1, \b2, \b6, \b3, \b7, \b0, \b4, \
\t0, \t1, \t2, \t3, \s0, \s1, \s2, \s3
inv_out_bs_ch \b3, \b7, \b0, \b4, \b5, \b1, \b2, \b6
.endm
.macro shift_rows, x0, x1, x2, x3, x4, x5, x6, x7, \
t0, t1, t2, t3, mask
vld1.8 {\t0-\t1}, [bskey, :256]!
veor \t0, \t0, \x0
vld1.8 {\t2-\t3}, [bskey, :256]!
veor \t1, \t1, \x1
__tbl \x0, \t0, \mask
veor \t2, \t2, \x2
__tbl \x1, \t1, \mask
vld1.8 {\t0-\t1}, [bskey, :256]!
veor \t3, \t3, \x3
__tbl \x2, \t2, \mask
__tbl \x3, \t3, \mask
vld1.8 {\t2-\t3}, [bskey, :256]!
veor \t0, \t0, \x4
veor \t1, \t1, \x5
__tbl \x4, \t0, \mask
veor \t2, \t2, \x6
__tbl \x5, \t1, \mask
veor \t3, \t3, \x7
__tbl \x6, \t2, \mask
__tbl \x7, \t3, \mask
.endm
.macro inv_shift_rows, x0, x1, x2, x3, x4, x5, x6, x7, \
t0, t1, t2, t3, mask
__tbl \x0, \x0, \mask, \t0
__tbl \x1, \x1, \mask, \t1
__tbl \x2, \x2, \mask, \t2
__tbl \x3, \x3, \mask, \t3
__tbl \x4, \x4, \mask, \t0
__tbl \x5, \x5, \mask, \t1
__tbl \x6, \x6, \mask, \t2
__tbl \x7, \x7, \mask, \t3
.endm
.macro mix_cols, x0, x1, x2, x3, x4, x5, x6, x7, \
t0, t1, t2, t3, t4, t5, t6, t7, inv
vext.8 \t0, \x0, \x0, #12
vext.8 \t1, \x1, \x1, #12
veor \x0, \x0, \t0
vext.8 \t2, \x2, \x2, #12
veor \x1, \x1, \t1
vext.8 \t3, \x3, \x3, #12
veor \x2, \x2, \t2
vext.8 \t4, \x4, \x4, #12
veor \x3, \x3, \t3
vext.8 \t5, \x5, \x5, #12
veor \x4, \x4, \t4
vext.8 \t6, \x6, \x6, #12
veor \x5, \x5, \t5
vext.8 \t7, \x7, \x7, #12
veor \x6, \x6, \t6
veor \t1, \t1, \x0
veor.8 \x7, \x7, \t7
vext.8 \x0, \x0, \x0, #8
veor \t2, \t2, \x1
veor \t0, \t0, \x7
veor \t1, \t1, \x7
vext.8 \x1, \x1, \x1, #8
veor \t5, \t5, \x4
veor \x0, \x0, \t0
veor \t6, \t6, \x5
veor \x1, \x1, \t1
vext.8 \t0, \x4, \x4, #8
veor \t4, \t4, \x3
vext.8 \t1, \x5, \x5, #8
veor \t7, \t7, \x6
vext.8 \x4, \x3, \x3, #8
veor \t3, \t3, \x2
vext.8 \x5, \x7, \x7, #8
veor \t4, \t4, \x7
vext.8 \x3, \x6, \x6, #8
veor \t3, \t3, \x7
vext.8 \x6, \x2, \x2, #8
veor \x7, \t1, \t5
.ifb \inv
veor \x2, \t0, \t4
veor \x4, \x4, \t3
veor \x5, \x5, \t7
veor \x3, \x3, \t6
veor \x6, \x6, \t2
.else
veor \t3, \t3, \x4
veor \x5, \x5, \t7
veor \x2, \x3, \t6
veor \x3, \t0, \t4
veor \x4, \x6, \t2
vmov \x6, \t3
.endif
.endm
.macro inv_mix_cols, x0, x1, x2, x3, x4, x5, x6, x7, \
t0, t1, t2, t3, t4, t5, t6, t7
vld1.8 {\t0-\t1}, [bskey, :256]!
veor \x0, \x0, \t0
vld1.8 {\t2-\t3}, [bskey, :256]!
veor \x1, \x1, \t1
vld1.8 {\t4-\t5}, [bskey, :256]!
veor \x2, \x2, \t2
vld1.8 {\t6-\t7}, [bskey, :256]
sub bskey, bskey, #224
veor \x3, \x3, \t3
veor \x4, \x4, \t4
veor \x5, \x5, \t5
veor \x6, \x6, \t6
veor \x7, \x7, \t7
vext.8 \t0, \x0, \x0, #8
vext.8 \t6, \x6, \x6, #8
vext.8 \t7, \x7, \x7, #8
veor \t0, \t0, \x0
vext.8 \t1, \x1, \x1, #8
veor \t6, \t6, \x6
vext.8 \t2, \x2, \x2, #8
veor \t7, \t7, \x7
vext.8 \t3, \x3, \x3, #8
veor \t1, \t1, \x1
vext.8 \t4, \x4, \x4, #8
veor \t2, \t2, \x2
vext.8 \t5, \x5, \x5, #8
veor \t3, \t3, \x3
veor \t4, \t4, \x4
veor \t5, \t5, \x5
veor \x0, \x0, \t6
veor \x1, \x1, \t6
veor \x2, \x2, \t0
veor \x4, \x4, \t2
veor \x3, \x3, \t1
veor \x1, \x1, \t7
veor \x2, \x2, \t7
veor \x4, \x4, \t6
veor \x5, \x5, \t3
veor \x3, \x3, \t6
veor \x6, \x6, \t4
veor \x4, \x4, \t7
veor \x5, \x5, \t7
veor \x7, \x7, \t5
mix_cols \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \
\t0, \t1, \t2, \t3, \t4, \t5, \t6, \t7, 1
.endm
.macro swapmove_2x, a0, b0, a1, b1, n, mask, t0, t1
vshr.u64 \t0, \b0, #\n
vshr.u64 \t1, \b1, #\n
veor \t0, \t0, \a0
veor \t1, \t1, \a1
vand \t0, \t0, \mask
vand \t1, \t1, \mask
veor \a0, \a0, \t0
vshl.s64 \t0, \t0, #\n
veor \a1, \a1, \t1
vshl.s64 \t1, \t1, #\n
veor \b0, \b0, \t0
veor \b1, \b1, \t1
.endm
.macro bitslice, x7, x6, x5, x4, x3, x2, x1, x0, t0, t1, t2, t3
vmov.i8 \t0, #0x55
vmov.i8 \t1, #0x33
swapmove_2x \x0, \x1, \x2, \x3, 1, \t0, \t2, \t3
swapmove_2x \x4, \x5, \x6, \x7, 1, \t0, \t2, \t3
vmov.i8 \t0, #0x0f
swapmove_2x \x0, \x2, \x1, \x3, 2, \t1, \t2, \t3
swapmove_2x \x4, \x6, \x5, \x7, 2, \t1, \t2, \t3
swapmove_2x \x0, \x4, \x1, \x5, 4, \t0, \t2, \t3
swapmove_2x \x2, \x6, \x3, \x7, 4, \t0, \t2, \t3
.endm
.align 4
M0: .quad 0x02060a0e03070b0f, 0x0004080c0105090d
/*
* void aesbs_convert_key(u8 out[], u32 const rk[], int rounds)
*/
ENTRY(aesbs_convert_key)
vld1.32 {q7}, [r1]! // load round 0 key
vld1.32 {q15}, [r1]! // load round 1 key
vmov.i8 q8, #0x01 // bit masks
vmov.i8 q9, #0x02
vmov.i8 q10, #0x04
vmov.i8 q11, #0x08
vmov.i8 q12, #0x10
vmov.i8 q13, #0x20
__ldr q14, M0
sub r2, r2, #1
vst1.8 {q7}, [r0, :128]! // save round 0 key
.Lkey_loop:
__tbl q7, q15, q14
vmov.i8 q6, #0x40
vmov.i8 q15, #0x80
vtst.8 q0, q7, q8
vtst.8 q1, q7, q9
vtst.8 q2, q7, q10
vtst.8 q3, q7, q11
vtst.8 q4, q7, q12
vtst.8 q5, q7, q13
vtst.8 q6, q7, q6
vtst.8 q7, q7, q15
vld1.32 {q15}, [r1]! // load next round key
vmvn q0, q0
vmvn q1, q1
vmvn q5, q5
vmvn q6, q6
subs r2, r2, #1
vst1.8 {q0-q1}, [r0, :256]!
vst1.8 {q2-q3}, [r0, :256]!
vst1.8 {q4-q5}, [r0, :256]!
vst1.8 {q6-q7}, [r0, :256]!
bne .Lkey_loop
vmov.i8 q7, #0x63 // compose .L63
veor q15, q15, q7
vst1.8 {q15}, [r0, :128]
bx lr
ENDPROC(aesbs_convert_key)
.align 4
M0SR: .quad 0x0a0e02060f03070b, 0x0004080c05090d01
aesbs_encrypt8:
vld1.8 {q9}, [bskey, :128]! // round 0 key
__ldr q8, M0SR
veor q10, q0, q9 // xor with round0 key
veor q11, q1, q9
__tbl q0, q10, q8
veor q12, q2, q9
__tbl q1, q11, q8
veor q13, q3, q9
__tbl q2, q12, q8
veor q14, q4, q9
__tbl q3, q13, q8
veor q15, q5, q9
__tbl q4, q14, q8
veor q10, q6, q9
__tbl q5, q15, q8
veor q11, q7, q9
__tbl q6, q10, q8
__tbl q7, q11, q8
bitslice q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11
sub rounds, rounds, #1
b .Lenc_sbox
.align 5
SR: .quad 0x0504070600030201, 0x0f0e0d0c0a09080b
SRM0: .quad 0x0304090e00050a0f, 0x01060b0c0207080d
.Lenc_last:
__ldr q12, SRM0
.Lenc_loop:
shift_rows q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11, q12
.Lenc_sbox:
sbox q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11, q12, \
q13, q14, q15
subs rounds, rounds, #1
bcc .Lenc_done
mix_cols q0, q1, q4, q6, q3, q7, q2, q5, q8, q9, q10, q11, q12, \
q13, q14, q15
beq .Lenc_last
__ldr q12, SR
b .Lenc_loop
.Lenc_done:
vld1.8 {q12}, [bskey, :128] // last round key
bitslice q0, q1, q4, q6, q3, q7, q2, q5, q8, q9, q10, q11
veor q0, q0, q12
veor q1, q1, q12
veor q4, q4, q12
veor q6, q6, q12
veor q3, q3, q12
veor q7, q7, q12
veor q2, q2, q12
veor q5, q5, q12
bx lr
ENDPROC(aesbs_encrypt8)
.align 4
M0ISR: .quad 0x0a0e0206070b0f03, 0x0004080c0d010509
aesbs_decrypt8:
add bskey, bskey, rounds, lsl #7
sub bskey, bskey, #112
vld1.8 {q9}, [bskey, :128] // round 0 key
sub bskey, bskey, #128
__ldr q8, M0ISR
veor q10, q0, q9 // xor with round0 key
veor q11, q1, q9
__tbl q0, q10, q8
veor q12, q2, q9
__tbl q1, q11, q8
veor q13, q3, q9
__tbl q2, q12, q8
veor q14, q4, q9
__tbl q3, q13, q8
veor q15, q5, q9
__tbl q4, q14, q8
veor q10, q6, q9
__tbl q5, q15, q8
veor q11, q7, q9
__tbl q6, q10, q8
__tbl q7, q11, q8
bitslice q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11
sub rounds, rounds, #1
b .Ldec_sbox
.align 5
ISR: .quad 0x0504070602010003, 0x0f0e0d0c080b0a09
ISRM0: .quad 0x01040b0e0205080f, 0x0306090c00070a0d
.Ldec_last:
__ldr q12, ISRM0
.Ldec_loop:
inv_shift_rows q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11, q12
.Ldec_sbox:
inv_sbox q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11, q12, \
q13, q14, q15
subs rounds, rounds, #1
bcc .Ldec_done
inv_mix_cols q0, q1, q6, q4, q2, q7, q3, q5, q8, q9, q10, q11, q12, \
q13, q14, q15
beq .Ldec_last
__ldr q12, ISR
b .Ldec_loop
.Ldec_done:
add bskey, bskey, #112
vld1.8 {q12}, [bskey, :128] // last round key
bitslice q0, q1, q6, q4, q2, q7, q3, q5, q8, q9, q10, q11
veor q0, q0, q12
veor q1, q1, q12
veor q6, q6, q12
veor q4, q4, q12
veor q2, q2, q12
veor q7, q7, q12
veor q3, q3, q12
veor q5, q5, q12
bx lr
ENDPROC(aesbs_decrypt8)
/*
* aesbs_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
* int blocks)
* aesbs_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
* int blocks)
*/
.macro __ecb_crypt, do8, o0, o1, o2, o3, o4, o5, o6, o7
push {r4-r6, lr}
ldr r5, [sp, #16] // number of blocks
99: adr ip, 0f
and lr, r5, #7
cmp r5, #8
sub ip, ip, lr, lsl #2
movlt pc, ip // computed goto if blocks < 8
vld1.8 {q0}, [r1]!
vld1.8 {q1}, [r1]!
vld1.8 {q2}, [r1]!
vld1.8 {q3}, [r1]!
vld1.8 {q4}, [r1]!
vld1.8 {q5}, [r1]!
vld1.8 {q6}, [r1]!
vld1.8 {q7}, [r1]!
0: mov bskey, r2
mov rounds, r3
bl \do8
adr ip, 1f
and lr, r5, #7
cmp r5, #8
sub ip, ip, lr, lsl #2
movlt pc, ip // computed goto if blocks < 8
vst1.8 {\o0}, [r0]!
vst1.8 {\o1}, [r0]!
vst1.8 {\o2}, [r0]!
vst1.8 {\o3}, [r0]!
vst1.8 {\o4}, [r0]!
vst1.8 {\o5}, [r0]!
vst1.8 {\o6}, [r0]!
vst1.8 {\o7}, [r0]!
1: subs r5, r5, #8
bgt 99b
pop {r4-r6, pc}
.endm
.align 4
ENTRY(aesbs_ecb_encrypt)
__ecb_crypt aesbs_encrypt8, q0, q1, q4, q6, q3, q7, q2, q5
ENDPROC(aesbs_ecb_encrypt)
.align 4
ENTRY(aesbs_ecb_decrypt)
__ecb_crypt aesbs_decrypt8, q0, q1, q6, q4, q2, q7, q3, q5
ENDPROC(aesbs_ecb_decrypt)
/*
* aesbs_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[],
* int rounds, int blocks, u8 iv[])
*/
.align 4
ENTRY(aesbs_cbc_decrypt)
mov ip, sp
push {r4-r6, lr}
ldm ip, {r5-r6} // load args 4-5
99: adr ip, 0f
and lr, r5, #7
cmp r5, #8
sub ip, ip, lr, lsl #2
mov lr, r1
movlt pc, ip // computed goto if blocks < 8
vld1.8 {q0}, [lr]!
vld1.8 {q1}, [lr]!
vld1.8 {q2}, [lr]!
vld1.8 {q3}, [lr]!
vld1.8 {q4}, [lr]!
vld1.8 {q5}, [lr]!
vld1.8 {q6}, [lr]!
vld1.8 {q7}, [lr]
0: mov bskey, r2
mov rounds, r3
bl aesbs_decrypt8
vld1.8 {q8}, [r6]
vmov q9, q8
vmov q10, q8
vmov q11, q8
vmov q12, q8
vmov q13, q8
vmov q14, q8
vmov q15, q8
adr ip, 1f
and lr, r5, #7
cmp r5, #8
sub ip, ip, lr, lsl #2
movlt pc, ip // computed goto if blocks < 8
vld1.8 {q9}, [r1]!
vld1.8 {q10}, [r1]!
vld1.8 {q11}, [r1]!
vld1.8 {q12}, [r1]!
vld1.8 {q13}, [r1]!
vld1.8 {q14}, [r1]!
vld1.8 {q15}, [r1]!
W(nop)
1: adr ip, 2f
sub ip, ip, lr, lsl #3
movlt pc, ip // computed goto if blocks < 8
veor q0, q0, q8
vst1.8 {q0}, [r0]!
veor q1, q1, q9
vst1.8 {q1}, [r0]!
veor q6, q6, q10
vst1.8 {q6}, [r0]!
veor q4, q4, q11
vst1.8 {q4}, [r0]!
veor q2, q2, q12
vst1.8 {q2}, [r0]!
veor q7, q7, q13
vst1.8 {q7}, [r0]!
veor q3, q3, q14
vst1.8 {q3}, [r0]!
veor q5, q5, q15
vld1.8 {q8}, [r1]! // load next round's iv
2: vst1.8 {q5}, [r0]!
subs r5, r5, #8
vst1.8 {q8}, [r6] // store next round's iv
bgt 99b
pop {r4-r6, pc}
ENDPROC(aesbs_cbc_decrypt)
.macro next_ctr, q
vmov \q\()h, r9, r10
adds r10, r10, #1
adcs r9, r9, #0
vmov \q\()l, r7, r8
adcs r8, r8, #0
adc r7, r7, #0
vrev32.8 \q, \q
.endm
/*
* aesbs_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[],
* int rounds, int bytes, u8 ctr[])
*/
ENTRY(aesbs_ctr_encrypt)
mov ip, sp
push {r4-r10, lr}
ldm ip, {r5, r6} // load args 4-5
vld1.8 {q0}, [r6] // load counter
vrev32.8 q1, q0
vmov r9, r10, d3
vmov r7, r8, d2
adds r10, r10, #1
adcs r9, r9, #0
adcs r8, r8, #0
adc r7, r7, #0
99: vmov q1, q0
sub lr, r5, #1
vmov q2, q0
adr ip, 0f
vmov q3, q0
and lr, lr, #112
vmov q4, q0
cmp r5, #112
vmov q5, q0
sub ip, ip, lr, lsl #1
vmov q6, q0
add ip, ip, lr, lsr #2
vmov q7, q0
movle pc, ip // computed goto if bytes < 112
next_ctr q1
next_ctr q2
next_ctr q3
next_ctr q4
next_ctr q5
next_ctr q6
next_ctr q7
0: mov bskey, r2
mov rounds, r3
bl aesbs_encrypt8
adr ip, 1f
sub lr, r5, #1
cmp r5, #128
bic lr, lr, #15
ands r4, r5, #15 // preserves C flag
teqcs r5, r5 // set Z flag if not last iteration
sub ip, ip, lr, lsr #2
rsb r4, r4, #16
movcc pc, ip // computed goto if bytes < 128
vld1.8 {q8}, [r1]!
vld1.8 {q9}, [r1]!
vld1.8 {q10}, [r1]!
vld1.8 {q11}, [r1]!
vld1.8 {q12}, [r1]!
vld1.8 {q13}, [r1]!
vld1.8 {q14}, [r1]!
1: subne r1, r1, r4
vld1.8 {q15}, [r1]!
add ip, ip, #2f - 1b
veor q0, q0, q8
veor q1, q1, q9
veor q4, q4, q10
veor q6, q6, q11
veor q3, q3, q12
veor q7, q7, q13
veor q2, q2, q14
bne 3f
veor q5, q5, q15
movcc pc, ip // computed goto if bytes < 128
vst1.8 {q0}, [r0]!
vst1.8 {q1}, [r0]!
vst1.8 {q4}, [r0]!
vst1.8 {q6}, [r0]!
vst1.8 {q3}, [r0]!
vst1.8 {q7}, [r0]!
vst1.8 {q2}, [r0]!
2: subne r0, r0, r4
vst1.8 {q5}, [r0]!
next_ctr q0
subs r5, r5, #128
bgt 99b
vst1.8 {q0}, [r6]
pop {r4-r10, pc}
3: adr lr, .Lpermute_table + 16
cmp r5, #16 // Z flag remains cleared
sub lr, lr, r4
vld1.8 {q8-q9}, [lr]
vtbl.8 d16, {q5}, d16
vtbl.8 d17, {q5}, d17
veor q5, q8, q15
bcc 4f // have to reload prev if R5 < 16
vtbx.8 d10, {q2}, d18
vtbx.8 d11, {q2}, d19
mov pc, ip // branch back to VST sequence
4: sub r0, r0, r4
vshr.s8 q9, q9, #7 // create mask for VBIF
vld1.8 {q8}, [r0] // reload
vbif q5, q8, q9
vst1.8 {q5}, [r0]
pop {r4-r10, pc}
ENDPROC(aesbs_ctr_encrypt)
.align 6
.Lpermute_table:
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
.byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07
.byte 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
.macro next_tweak, out, in, const, tmp
vshr.s64 \tmp, \in, #63
vand \tmp, \tmp, \const
vadd.u64 \out, \in, \in
vext.8 \tmp, \tmp, \tmp, #8
veor \out, \out, \tmp
.endm
/*
* aesbs_xts_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
* int blocks, u8 iv[], int reorder_last_tweak)
* aesbs_xts_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
* int blocks, u8 iv[], int reorder_last_tweak)
*/
.align 6
__xts_prepare8:
vld1.8 {q14}, [r7] // load iv
vmov.i32 d30, #0x87 // compose tweak mask vector
vmovl.u32 q15, d30
vshr.u64 d30, d31, #7
vmov q12, q14
adr ip, 0f
and r4, r6, #7
cmp r6, #8
sub ip, ip, r4, lsl #5
mov r4, sp
movlt pc, ip // computed goto if blocks < 8
vld1.8 {q0}, [r1]!
next_tweak q12, q14, q15, q13
veor q0, q0, q14
vst1.8 {q14}, [r4, :128]!
vld1.8 {q1}, [r1]!
next_tweak q14, q12, q15, q13
veor q1, q1, q12
vst1.8 {q12}, [r4, :128]!
vld1.8 {q2}, [r1]!
next_tweak q12, q14, q15, q13
veor q2, q2, q14
vst1.8 {q14}, [r4, :128]!
vld1.8 {q3}, [r1]!
next_tweak q14, q12, q15, q13
veor q3, q3, q12
vst1.8 {q12}, [r4, :128]!
vld1.8 {q4}, [r1]!
next_tweak q12, q14, q15, q13
veor q4, q4, q14
vst1.8 {q14}, [r4, :128]!
vld1.8 {q5}, [r1]!
next_tweak q14, q12, q15, q13
veor q5, q5, q12
vst1.8 {q12}, [r4, :128]!
vld1.8 {q6}, [r1]!
next_tweak q12, q14, q15, q13
veor q6, q6, q14
vst1.8 {q14}, [r4, :128]!
vld1.8 {q7}, [r1]!
next_tweak q14, q12, q15, q13
THUMB( itt le )
W(cmple) r8, #0
ble 1f
0: veor q7, q7, q12
vst1.8 {q12}, [r4, :128]
vst1.8 {q14}, [r7] // store next iv
bx lr
1: vswp q12, q14
b 0b
ENDPROC(__xts_prepare8)
.macro __xts_crypt, do8, o0, o1, o2, o3, o4, o5, o6, o7
push {r4-r8, lr}
mov r5, sp // preserve sp
ldrd r6, r7, [sp, #24] // get blocks and iv args
rsb r8, ip, #1
sub ip, sp, #128 // make room for 8x tweak
bic ip, ip, #0xf // align sp to 16 bytes
mov sp, ip
99: bl __xts_prepare8
mov bskey, r2
mov rounds, r3
bl \do8
adr ip, 0f
and lr, r6, #7
cmp r6, #8
sub ip, ip, lr, lsl #2
mov r4, sp
movlt pc, ip // computed goto if blocks < 8
vld1.8 {q8}, [r4, :128]!
vld1.8 {q9}, [r4, :128]!
vld1.8 {q10}, [r4, :128]!
vld1.8 {q11}, [r4, :128]!
vld1.8 {q12}, [r4, :128]!
vld1.8 {q13}, [r4, :128]!
vld1.8 {q14}, [r4, :128]!
vld1.8 {q15}, [r4, :128]
0: adr ip, 1f
sub ip, ip, lr, lsl #3
movlt pc, ip // computed goto if blocks < 8
veor \o0, \o0, q8
vst1.8 {\o0}, [r0]!
veor \o1, \o1, q9
vst1.8 {\o1}, [r0]!
veor \o2, \o2, q10
vst1.8 {\o2}, [r0]!
veor \o3, \o3, q11
vst1.8 {\o3}, [r0]!
veor \o4, \o4, q12
vst1.8 {\o4}, [r0]!
veor \o5, \o5, q13
vst1.8 {\o5}, [r0]!
veor \o6, \o6, q14
vst1.8 {\o6}, [r0]!
veor \o7, \o7, q15
vst1.8 {\o7}, [r0]!
1: subs r6, r6, #8
bgt 99b
mov sp, r5
pop {r4-r8, pc}
.endm
ENTRY(aesbs_xts_encrypt)
mov ip, #0 // never reorder final tweak
__xts_crypt aesbs_encrypt8, q0, q1, q4, q6, q3, q7, q2, q5
ENDPROC(aesbs_xts_encrypt)
ENTRY(aesbs_xts_decrypt)
ldr ip, [sp, #8] // reorder final tweak?
__xts_crypt aesbs_decrypt8, q0, q1, q6, q4, q2, q7, q3, q5
ENDPROC(aesbs_xts_decrypt)
|
aixcc-public/challenge-001-exemplar-source
| 6,854
|
arch/arm/crypto/crc32-ce-core.S
|
/*
* Accelerated CRC32(C) using ARM CRC, NEON and Crypto Extensions instructions
*
* Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 only,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License version 2 for more details (a copy is included
* in the LICENSE file that accompanied this code).
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see http://www.gnu.org/licenses
*
* Please visit http://www.xyratex.com/contact if you need additional
* information or have any questions.
*
* GPL HEADER END
*/
/*
* Copyright 2012 Xyratex Technology Limited
*
* Using hardware provided PCLMULQDQ instruction to accelerate the CRC32
* calculation.
* CRC32 polynomial:0x04c11db7(BE)/0xEDB88320(LE)
* PCLMULQDQ is a new instruction in Intel SSE4.2, the reference can be found
* at:
* https://www.intel.com/products/processor/manuals/
* Intel(R) 64 and IA-32 Architectures Software Developer's Manual
* Volume 2B: Instruction Set Reference, N-Z
*
* Authors: Gregory Prestas <Gregory_Prestas@us.xyratex.com>
* Alexander Boyko <Alexander_Boyko@xyratex.com>
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
.text
.align 6
.arch armv8-a
.arch_extension crc
.fpu crypto-neon-fp-armv8
.Lcrc32_constants:
/*
* [x4*128+32 mod P(x) << 32)]' << 1 = 0x154442bd4
* #define CONSTANT_R1 0x154442bd4LL
*
* [(x4*128-32 mod P(x) << 32)]' << 1 = 0x1c6e41596
* #define CONSTANT_R2 0x1c6e41596LL
*/
.quad 0x0000000154442bd4
.quad 0x00000001c6e41596
/*
* [(x128+32 mod P(x) << 32)]' << 1 = 0x1751997d0
* #define CONSTANT_R3 0x1751997d0LL
*
* [(x128-32 mod P(x) << 32)]' << 1 = 0x0ccaa009e
* #define CONSTANT_R4 0x0ccaa009eLL
*/
.quad 0x00000001751997d0
.quad 0x00000000ccaa009e
/*
* [(x64 mod P(x) << 32)]' << 1 = 0x163cd6124
* #define CONSTANT_R5 0x163cd6124LL
*/
.quad 0x0000000163cd6124
.quad 0x00000000FFFFFFFF
/*
* #define CRCPOLY_TRUE_LE_FULL 0x1DB710641LL
*
* Barrett Reduction constant (u64`) = u` = (x**64 / P(x))`
* = 0x1F7011641LL
* #define CONSTANT_RU 0x1F7011641LL
*/
.quad 0x00000001DB710641
.quad 0x00000001F7011641
.Lcrc32c_constants:
.quad 0x00000000740eef02
.quad 0x000000009e4addf8
.quad 0x00000000f20c0dfe
.quad 0x000000014cd00bd6
.quad 0x00000000dd45aab8
.quad 0x00000000FFFFFFFF
.quad 0x0000000105ec76f0
.quad 0x00000000dea713f1
dCONSTANTl .req d0
dCONSTANTh .req d1
qCONSTANT .req q0
BUF .req r0
LEN .req r1
CRC .req r2
qzr .req q9
/**
* Calculate crc32
* BUF - buffer
* LEN - sizeof buffer (multiple of 16 bytes), LEN should be > 63
* CRC - initial crc32
* return %eax crc32
* uint crc32_pmull_le(unsigned char const *buffer,
* size_t len, uint crc32)
*/
ENTRY(crc32_pmull_le)
adr r3, .Lcrc32_constants
b 0f
ENTRY(crc32c_pmull_le)
adr r3, .Lcrc32c_constants
0: bic LEN, LEN, #15
vld1.8 {q1-q2}, [BUF, :128]!
vld1.8 {q3-q4}, [BUF, :128]!
vmov.i8 qzr, #0
vmov.i8 qCONSTANT, #0
vmov.32 dCONSTANTl[0], CRC
veor.8 d2, d2, dCONSTANTl
sub LEN, LEN, #0x40
cmp LEN, #0x40
blt less_64
vld1.64 {qCONSTANT}, [r3]
loop_64: /* 64 bytes Full cache line folding */
sub LEN, LEN, #0x40
vmull.p64 q5, d3, dCONSTANTh
vmull.p64 q6, d5, dCONSTANTh
vmull.p64 q7, d7, dCONSTANTh
vmull.p64 q8, d9, dCONSTANTh
vmull.p64 q1, d2, dCONSTANTl
vmull.p64 q2, d4, dCONSTANTl
vmull.p64 q3, d6, dCONSTANTl
vmull.p64 q4, d8, dCONSTANTl
veor.8 q1, q1, q5
vld1.8 {q5}, [BUF, :128]!
veor.8 q2, q2, q6
vld1.8 {q6}, [BUF, :128]!
veor.8 q3, q3, q7
vld1.8 {q7}, [BUF, :128]!
veor.8 q4, q4, q8
vld1.8 {q8}, [BUF, :128]!
veor.8 q1, q1, q5
veor.8 q2, q2, q6
veor.8 q3, q3, q7
veor.8 q4, q4, q8
cmp LEN, #0x40
bge loop_64
less_64: /* Folding cache line into 128bit */
vldr dCONSTANTl, [r3, #16]
vldr dCONSTANTh, [r3, #24]
vmull.p64 q5, d3, dCONSTANTh
vmull.p64 q1, d2, dCONSTANTl
veor.8 q1, q1, q5
veor.8 q1, q1, q2
vmull.p64 q5, d3, dCONSTANTh
vmull.p64 q1, d2, dCONSTANTl
veor.8 q1, q1, q5
veor.8 q1, q1, q3
vmull.p64 q5, d3, dCONSTANTh
vmull.p64 q1, d2, dCONSTANTl
veor.8 q1, q1, q5
veor.8 q1, q1, q4
teq LEN, #0
beq fold_64
loop_16: /* Folding rest buffer into 128bit */
subs LEN, LEN, #0x10
vld1.8 {q2}, [BUF, :128]!
vmull.p64 q5, d3, dCONSTANTh
vmull.p64 q1, d2, dCONSTANTl
veor.8 q1, q1, q5
veor.8 q1, q1, q2
bne loop_16
fold_64:
/* perform the last 64 bit fold, also adds 32 zeroes
* to the input stream */
vmull.p64 q2, d2, dCONSTANTh
vext.8 q1, q1, qzr, #8
veor.8 q1, q1, q2
/* final 32-bit fold */
vldr dCONSTANTl, [r3, #32]
vldr d6, [r3, #40]
vmov.i8 d7, #0
vext.8 q2, q1, qzr, #4
vand.8 d2, d2, d6
vmull.p64 q1, d2, dCONSTANTl
veor.8 q1, q1, q2
/* Finish up with the bit-reversed barrett reduction 64 ==> 32 bits */
vldr dCONSTANTl, [r3, #48]
vldr dCONSTANTh, [r3, #56]
vand.8 q2, q1, q3
vext.8 q2, qzr, q2, #8
vmull.p64 q2, d5, dCONSTANTh
vand.8 q2, q2, q3
vmull.p64 q2, d4, dCONSTANTl
veor.8 q1, q1, q2
vmov r0, s5
bx lr
ENDPROC(crc32_pmull_le)
ENDPROC(crc32c_pmull_le)
.macro __crc32, c
subs ip, r2, #8
bmi .Ltail\c
tst r1, #3
bne .Lunaligned\c
teq ip, #0
.Laligned8\c:
ldrd r2, r3, [r1], #8
ARM_BE8(rev r2, r2 )
ARM_BE8(rev r3, r3 )
crc32\c\()w r0, r0, r2
crc32\c\()w r0, r0, r3
bxeq lr
subs ip, ip, #8
bpl .Laligned8\c
.Ltail\c:
tst ip, #4
beq 2f
ldr r3, [r1], #4
ARM_BE8(rev r3, r3 )
crc32\c\()w r0, r0, r3
2: tst ip, #2
beq 1f
ldrh r3, [r1], #2
ARM_BE8(rev16 r3, r3 )
crc32\c\()h r0, r0, r3
1: tst ip, #1
bxeq lr
ldrb r3, [r1]
crc32\c\()b r0, r0, r3
bx lr
.Lunaligned\c:
tst r1, #1
beq 2f
ldrb r3, [r1], #1
subs r2, r2, #1
crc32\c\()b r0, r0, r3
tst r1, #2
beq 0f
2: ldrh r3, [r1], #2
subs r2, r2, #2
ARM_BE8(rev16 r3, r3 )
crc32\c\()h r0, r0, r3
0: subs ip, r2, #8
bpl .Laligned8\c
b .Ltail\c
.endm
.align 5
ENTRY(crc32_armv8_le)
__crc32
ENDPROC(crc32_armv8_le)
.align 5
ENTRY(crc32c_armv8_le)
__crc32 c
ENDPROC(crc32c_armv8_le)
|
aixcc-public/challenge-001-exemplar-source
| 2,770
|
arch/arm/crypto/sha2-ce-core.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* sha2-ce-core.S - SHA-224/256 secure hash using ARMv8 Crypto Extensions
*
* Copyright (C) 2015 Linaro Ltd.
* Author: Ard Biesheuvel <ard.biesheuvel@linaro.org>
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
.text
.arch armv8-a
.fpu crypto-neon-fp-armv8
k0 .req q7
k1 .req q8
rk .req r3
ta0 .req q9
ta1 .req q10
tb0 .req q10
tb1 .req q9
dga .req q11
dgb .req q12
dg0 .req q13
dg1 .req q14
dg2 .req q15
.macro add_only, ev, s0
vmov dg2, dg0
.ifnb \s0
vld1.32 {k\ev}, [rk, :128]!
.endif
sha256h.32 dg0, dg1, tb\ev
sha256h2.32 dg1, dg2, tb\ev
.ifnb \s0
vadd.u32 ta\ev, q\s0, k\ev
.endif
.endm
.macro add_update, ev, s0, s1, s2, s3
sha256su0.32 q\s0, q\s1
add_only \ev, \s1
sha256su1.32 q\s0, q\s2, q\s3
.endm
.align 6
.Lsha256_rcon:
.word 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5
.word 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5
.word 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3
.word 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174
.word 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc
.word 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da
.word 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7
.word 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967
.word 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13
.word 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85
.word 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3
.word 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070
.word 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5
.word 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3
.word 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208
.word 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
/*
* void sha2_ce_transform(struct sha256_state *sst, u8 const *src,
int blocks);
*/
ENTRY(sha2_ce_transform)
/* load state */
vld1.32 {dga-dgb}, [r0]
/* load input */
0: vld1.32 {q0-q1}, [r1]!
vld1.32 {q2-q3}, [r1]!
subs r2, r2, #1
#ifndef CONFIG_CPU_BIG_ENDIAN
vrev32.8 q0, q0
vrev32.8 q1, q1
vrev32.8 q2, q2
vrev32.8 q3, q3
#endif
/* load first round constant */
adr rk, .Lsha256_rcon
vld1.32 {k0}, [rk, :128]!
vadd.u32 ta0, q0, k0
vmov dg0, dga
vmov dg1, dgb
add_update 1, 0, 1, 2, 3
add_update 0, 1, 2, 3, 0
add_update 1, 2, 3, 0, 1
add_update 0, 3, 0, 1, 2
add_update 1, 0, 1, 2, 3
add_update 0, 1, 2, 3, 0
add_update 1, 2, 3, 0, 1
add_update 0, 3, 0, 1, 2
add_update 1, 0, 1, 2, 3
add_update 0, 1, 2, 3, 0
add_update 1, 2, 3, 0, 1
add_update 0, 3, 0, 1, 2
add_only 1, 1
add_only 0, 2
add_only 1, 3
add_only 0
/* update state */
vadd.u32 dga, dga, dg0
vadd.u32 dgb, dgb, dg1
bne 0b
/* store new state */
vst1.32 {dga-dgb}, [r0]
bx lr
ENDPROC(sha2_ce_transform)
|
aixcc-public/challenge-001-exemplar-source
| 15,074
|
arch/arm/crypto/chacha-neon-core.S
|
/*
* ChaCha/XChaCha NEON helper functions
*
* Copyright (C) 2016 Linaro, Ltd. <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Based on:
* ChaCha20 256-bit cipher algorithm, RFC7539, x64 SSE3 functions
*
* Copyright (C) 2015 Martin Willi
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
/*
* NEON doesn't have a rotate instruction. The alternatives are, more or less:
*
* (a) vshl.u32 + vsri.u32 (needs temporary register)
* (b) vshl.u32 + vshr.u32 + vorr (needs temporary register)
* (c) vrev32.16 (16-bit rotations only)
* (d) vtbl.8 + vtbl.8 (multiple of 8 bits rotations only,
* needs index vector)
*
* ChaCha has 16, 12, 8, and 7-bit rotations. For the 12 and 7-bit rotations,
* the only choices are (a) and (b). We use (a) since it takes two-thirds the
* cycles of (b) on both Cortex-A7 and Cortex-A53.
*
* For the 16-bit rotation, we use vrev32.16 since it's consistently fastest
* and doesn't need a temporary register.
*
* For the 8-bit rotation, we use vtbl.8 + vtbl.8. On Cortex-A7, this sequence
* is twice as fast as (a), even when doing (a) on multiple registers
* simultaneously to eliminate the stall between vshl and vsri. Also, it
* parallelizes better when temporary registers are scarce.
*
* A disadvantage is that on Cortex-A53, the vtbl sequence is the same speed as
* (a), so the need to load the rotation table actually makes the vtbl method
* slightly slower overall on that CPU (~1.3% slower ChaCha20). Still, it
* seems to be a good compromise to get a more significant speed boost on some
* CPUs, e.g. ~4.8% faster ChaCha20 on Cortex-A7.
*/
#include <linux/linkage.h>
#include <asm/cache.h>
.text
.fpu neon
.align 5
/*
* chacha_permute - permute one block
*
* Permute one 64-byte block where the state matrix is stored in the four NEON
* registers q0-q3. It performs matrix operations on four words in parallel,
* but requires shuffling to rearrange the words after each round.
*
* The round count is given in r3.
*
* Clobbers: r3, ip, q4-q5
*/
chacha_permute:
adr ip, .Lrol8_table
vld1.8 {d10}, [ip, :64]
.Ldoubleround:
// x0 += x1, x3 = rotl32(x3 ^ x0, 16)
vadd.i32 q0, q0, q1
veor q3, q3, q0
vrev32.16 q3, q3
// x2 += x3, x1 = rotl32(x1 ^ x2, 12)
vadd.i32 q2, q2, q3
veor q4, q1, q2
vshl.u32 q1, q4, #12
vsri.u32 q1, q4, #20
// x0 += x1, x3 = rotl32(x3 ^ x0, 8)
vadd.i32 q0, q0, q1
veor q3, q3, q0
vtbl.8 d6, {d6}, d10
vtbl.8 d7, {d7}, d10
// x2 += x3, x1 = rotl32(x1 ^ x2, 7)
vadd.i32 q2, q2, q3
veor q4, q1, q2
vshl.u32 q1, q4, #7
vsri.u32 q1, q4, #25
// x1 = shuffle32(x1, MASK(0, 3, 2, 1))
vext.8 q1, q1, q1, #4
// x2 = shuffle32(x2, MASK(1, 0, 3, 2))
vext.8 q2, q2, q2, #8
// x3 = shuffle32(x3, MASK(2, 1, 0, 3))
vext.8 q3, q3, q3, #12
// x0 += x1, x3 = rotl32(x3 ^ x0, 16)
vadd.i32 q0, q0, q1
veor q3, q3, q0
vrev32.16 q3, q3
// x2 += x3, x1 = rotl32(x1 ^ x2, 12)
vadd.i32 q2, q2, q3
veor q4, q1, q2
vshl.u32 q1, q4, #12
vsri.u32 q1, q4, #20
// x0 += x1, x3 = rotl32(x3 ^ x0, 8)
vadd.i32 q0, q0, q1
veor q3, q3, q0
vtbl.8 d6, {d6}, d10
vtbl.8 d7, {d7}, d10
// x2 += x3, x1 = rotl32(x1 ^ x2, 7)
vadd.i32 q2, q2, q3
veor q4, q1, q2
vshl.u32 q1, q4, #7
vsri.u32 q1, q4, #25
// x1 = shuffle32(x1, MASK(2, 1, 0, 3))
vext.8 q1, q1, q1, #12
// x2 = shuffle32(x2, MASK(1, 0, 3, 2))
vext.8 q2, q2, q2, #8
// x3 = shuffle32(x3, MASK(0, 3, 2, 1))
vext.8 q3, q3, q3, #4
subs r3, r3, #2
bne .Ldoubleround
bx lr
ENDPROC(chacha_permute)
ENTRY(chacha_block_xor_neon)
// r0: Input state matrix, s
// r1: 1 data block output, o
// r2: 1 data block input, i
// r3: nrounds
push {lr}
// x0..3 = s0..3
add ip, r0, #0x20
vld1.32 {q0-q1}, [r0]
vld1.32 {q2-q3}, [ip]
vmov q8, q0
vmov q9, q1
vmov q10, q2
vmov q11, q3
bl chacha_permute
add ip, r2, #0x20
vld1.8 {q4-q5}, [r2]
vld1.8 {q6-q7}, [ip]
// o0 = i0 ^ (x0 + s0)
vadd.i32 q0, q0, q8
veor q0, q0, q4
// o1 = i1 ^ (x1 + s1)
vadd.i32 q1, q1, q9
veor q1, q1, q5
// o2 = i2 ^ (x2 + s2)
vadd.i32 q2, q2, q10
veor q2, q2, q6
// o3 = i3 ^ (x3 + s3)
vadd.i32 q3, q3, q11
veor q3, q3, q7
add ip, r1, #0x20
vst1.8 {q0-q1}, [r1]
vst1.8 {q2-q3}, [ip]
pop {pc}
ENDPROC(chacha_block_xor_neon)
ENTRY(hchacha_block_neon)
// r0: Input state matrix, s
// r1: output (8 32-bit words)
// r2: nrounds
push {lr}
vld1.32 {q0-q1}, [r0]!
vld1.32 {q2-q3}, [r0]
mov r3, r2
bl chacha_permute
vst1.32 {q0}, [r1]!
vst1.32 {q3}, [r1]
pop {pc}
ENDPROC(hchacha_block_neon)
.align 4
.Lctrinc: .word 0, 1, 2, 3
.Lrol8_table: .byte 3, 0, 1, 2, 7, 4, 5, 6
.align 5
ENTRY(chacha_4block_xor_neon)
push {r4, lr}
mov r4, sp // preserve the stack pointer
sub ip, sp, #0x20 // allocate a 32 byte buffer
bic ip, ip, #0x1f // aligned to 32 bytes
mov sp, ip
// r0: Input state matrix, s
// r1: 4 data blocks output, o
// r2: 4 data blocks input, i
// r3: nrounds
//
// This function encrypts four consecutive ChaCha blocks by loading
// the state matrix in NEON registers four times. The algorithm performs
// each operation on the corresponding word of each state matrix, hence
// requires no word shuffling. The words are re-interleaved before the
// final addition of the original state and the XORing step.
//
// x0..15[0-3] = s0..15[0-3]
add ip, r0, #0x20
vld1.32 {q0-q1}, [r0]
vld1.32 {q2-q3}, [ip]
adr lr, .Lctrinc
vdup.32 q15, d7[1]
vdup.32 q14, d7[0]
vld1.32 {q4}, [lr, :128]
vdup.32 q13, d6[1]
vdup.32 q12, d6[0]
vdup.32 q11, d5[1]
vdup.32 q10, d5[0]
vadd.u32 q12, q12, q4 // x12 += counter values 0-3
vdup.32 q9, d4[1]
vdup.32 q8, d4[0]
vdup.32 q7, d3[1]
vdup.32 q6, d3[0]
vdup.32 q5, d2[1]
vdup.32 q4, d2[0]
vdup.32 q3, d1[1]
vdup.32 q2, d1[0]
vdup.32 q1, d0[1]
vdup.32 q0, d0[0]
adr ip, .Lrol8_table
b 1f
.Ldoubleround4:
vld1.32 {q8-q9}, [sp, :256]
1:
// x0 += x4, x12 = rotl32(x12 ^ x0, 16)
// x1 += x5, x13 = rotl32(x13 ^ x1, 16)
// x2 += x6, x14 = rotl32(x14 ^ x2, 16)
// x3 += x7, x15 = rotl32(x15 ^ x3, 16)
vadd.i32 q0, q0, q4
vadd.i32 q1, q1, q5
vadd.i32 q2, q2, q6
vadd.i32 q3, q3, q7
veor q12, q12, q0
veor q13, q13, q1
veor q14, q14, q2
veor q15, q15, q3
vrev32.16 q12, q12
vrev32.16 q13, q13
vrev32.16 q14, q14
vrev32.16 q15, q15
// x8 += x12, x4 = rotl32(x4 ^ x8, 12)
// x9 += x13, x5 = rotl32(x5 ^ x9, 12)
// x10 += x14, x6 = rotl32(x6 ^ x10, 12)
// x11 += x15, x7 = rotl32(x7 ^ x11, 12)
vadd.i32 q8, q8, q12
vadd.i32 q9, q9, q13
vadd.i32 q10, q10, q14
vadd.i32 q11, q11, q15
vst1.32 {q8-q9}, [sp, :256]
veor q8, q4, q8
veor q9, q5, q9
vshl.u32 q4, q8, #12
vshl.u32 q5, q9, #12
vsri.u32 q4, q8, #20
vsri.u32 q5, q9, #20
veor q8, q6, q10
veor q9, q7, q11
vshl.u32 q6, q8, #12
vshl.u32 q7, q9, #12
vsri.u32 q6, q8, #20
vsri.u32 q7, q9, #20
// x0 += x4, x12 = rotl32(x12 ^ x0, 8)
// x1 += x5, x13 = rotl32(x13 ^ x1, 8)
// x2 += x6, x14 = rotl32(x14 ^ x2, 8)
// x3 += x7, x15 = rotl32(x15 ^ x3, 8)
vld1.8 {d16}, [ip, :64]
vadd.i32 q0, q0, q4
vadd.i32 q1, q1, q5
vadd.i32 q2, q2, q6
vadd.i32 q3, q3, q7
veor q12, q12, q0
veor q13, q13, q1
veor q14, q14, q2
veor q15, q15, q3
vtbl.8 d24, {d24}, d16
vtbl.8 d25, {d25}, d16
vtbl.8 d26, {d26}, d16
vtbl.8 d27, {d27}, d16
vtbl.8 d28, {d28}, d16
vtbl.8 d29, {d29}, d16
vtbl.8 d30, {d30}, d16
vtbl.8 d31, {d31}, d16
vld1.32 {q8-q9}, [sp, :256]
// x8 += x12, x4 = rotl32(x4 ^ x8, 7)
// x9 += x13, x5 = rotl32(x5 ^ x9, 7)
// x10 += x14, x6 = rotl32(x6 ^ x10, 7)
// x11 += x15, x7 = rotl32(x7 ^ x11, 7)
vadd.i32 q8, q8, q12
vadd.i32 q9, q9, q13
vadd.i32 q10, q10, q14
vadd.i32 q11, q11, q15
vst1.32 {q8-q9}, [sp, :256]
veor q8, q4, q8
veor q9, q5, q9
vshl.u32 q4, q8, #7
vshl.u32 q5, q9, #7
vsri.u32 q4, q8, #25
vsri.u32 q5, q9, #25
veor q8, q6, q10
veor q9, q7, q11
vshl.u32 q6, q8, #7
vshl.u32 q7, q9, #7
vsri.u32 q6, q8, #25
vsri.u32 q7, q9, #25
vld1.32 {q8-q9}, [sp, :256]
// x0 += x5, x15 = rotl32(x15 ^ x0, 16)
// x1 += x6, x12 = rotl32(x12 ^ x1, 16)
// x2 += x7, x13 = rotl32(x13 ^ x2, 16)
// x3 += x4, x14 = rotl32(x14 ^ x3, 16)
vadd.i32 q0, q0, q5
vadd.i32 q1, q1, q6
vadd.i32 q2, q2, q7
vadd.i32 q3, q3, q4
veor q15, q15, q0
veor q12, q12, q1
veor q13, q13, q2
veor q14, q14, q3
vrev32.16 q15, q15
vrev32.16 q12, q12
vrev32.16 q13, q13
vrev32.16 q14, q14
// x10 += x15, x5 = rotl32(x5 ^ x10, 12)
// x11 += x12, x6 = rotl32(x6 ^ x11, 12)
// x8 += x13, x7 = rotl32(x7 ^ x8, 12)
// x9 += x14, x4 = rotl32(x4 ^ x9, 12)
vadd.i32 q10, q10, q15
vadd.i32 q11, q11, q12
vadd.i32 q8, q8, q13
vadd.i32 q9, q9, q14
vst1.32 {q8-q9}, [sp, :256]
veor q8, q7, q8
veor q9, q4, q9
vshl.u32 q7, q8, #12
vshl.u32 q4, q9, #12
vsri.u32 q7, q8, #20
vsri.u32 q4, q9, #20
veor q8, q5, q10
veor q9, q6, q11
vshl.u32 q5, q8, #12
vshl.u32 q6, q9, #12
vsri.u32 q5, q8, #20
vsri.u32 q6, q9, #20
// x0 += x5, x15 = rotl32(x15 ^ x0, 8)
// x1 += x6, x12 = rotl32(x12 ^ x1, 8)
// x2 += x7, x13 = rotl32(x13 ^ x2, 8)
// x3 += x4, x14 = rotl32(x14 ^ x3, 8)
vld1.8 {d16}, [ip, :64]
vadd.i32 q0, q0, q5
vadd.i32 q1, q1, q6
vadd.i32 q2, q2, q7
vadd.i32 q3, q3, q4
veor q15, q15, q0
veor q12, q12, q1
veor q13, q13, q2
veor q14, q14, q3
vtbl.8 d30, {d30}, d16
vtbl.8 d31, {d31}, d16
vtbl.8 d24, {d24}, d16
vtbl.8 d25, {d25}, d16
vtbl.8 d26, {d26}, d16
vtbl.8 d27, {d27}, d16
vtbl.8 d28, {d28}, d16
vtbl.8 d29, {d29}, d16
vld1.32 {q8-q9}, [sp, :256]
// x10 += x15, x5 = rotl32(x5 ^ x10, 7)
// x11 += x12, x6 = rotl32(x6 ^ x11, 7)
// x8 += x13, x7 = rotl32(x7 ^ x8, 7)
// x9 += x14, x4 = rotl32(x4 ^ x9, 7)
vadd.i32 q10, q10, q15
vadd.i32 q11, q11, q12
vadd.i32 q8, q8, q13
vadd.i32 q9, q9, q14
vst1.32 {q8-q9}, [sp, :256]
veor q8, q7, q8
veor q9, q4, q9
vshl.u32 q7, q8, #7
vshl.u32 q4, q9, #7
vsri.u32 q7, q8, #25
vsri.u32 q4, q9, #25
veor q8, q5, q10
veor q9, q6, q11
vshl.u32 q5, q8, #7
vshl.u32 q6, q9, #7
vsri.u32 q5, q8, #25
vsri.u32 q6, q9, #25
subs r3, r3, #2
bne .Ldoubleround4
// x0..7[0-3] are in q0-q7, x10..15[0-3] are in q10-q15.
// x8..9[0-3] are on the stack.
// Re-interleave the words in the first two rows of each block (x0..7).
// Also add the counter values 0-3 to x12[0-3].
vld1.32 {q8}, [lr, :128] // load counter values 0-3
vzip.32 q0, q1 // => (0 1 0 1) (0 1 0 1)
vzip.32 q2, q3 // => (2 3 2 3) (2 3 2 3)
vzip.32 q4, q5 // => (4 5 4 5) (4 5 4 5)
vzip.32 q6, q7 // => (6 7 6 7) (6 7 6 7)
vadd.u32 q12, q8 // x12 += counter values 0-3
vswp d1, d4
vswp d3, d6
vld1.32 {q8-q9}, [r0]! // load s0..7
vswp d9, d12
vswp d11, d14
// Swap q1 and q4 so that we'll free up consecutive registers (q0-q1)
// after XORing the first 32 bytes.
vswp q1, q4
// First two rows of each block are (q0 q1) (q2 q6) (q4 q5) (q3 q7)
// x0..3[0-3] += s0..3[0-3] (add orig state to 1st row of each block)
vadd.u32 q0, q0, q8
vadd.u32 q2, q2, q8
vadd.u32 q4, q4, q8
vadd.u32 q3, q3, q8
// x4..7[0-3] += s4..7[0-3] (add orig state to 2nd row of each block)
vadd.u32 q1, q1, q9
vadd.u32 q6, q6, q9
vadd.u32 q5, q5, q9
vadd.u32 q7, q7, q9
// XOR first 32 bytes using keystream from first two rows of first block
vld1.8 {q8-q9}, [r2]!
veor q8, q8, q0
veor q9, q9, q1
vst1.8 {q8-q9}, [r1]!
// Re-interleave the words in the last two rows of each block (x8..15).
vld1.32 {q8-q9}, [sp, :256]
mov sp, r4 // restore original stack pointer
ldr r4, [r4, #8] // load number of bytes
vzip.32 q12, q13 // => (12 13 12 13) (12 13 12 13)
vzip.32 q14, q15 // => (14 15 14 15) (14 15 14 15)
vzip.32 q8, q9 // => (8 9 8 9) (8 9 8 9)
vzip.32 q10, q11 // => (10 11 10 11) (10 11 10 11)
vld1.32 {q0-q1}, [r0] // load s8..15
vswp d25, d28
vswp d27, d30
vswp d17, d20
vswp d19, d22
// Last two rows of each block are (q8 q12) (q10 q14) (q9 q13) (q11 q15)
// x8..11[0-3] += s8..11[0-3] (add orig state to 3rd row of each block)
vadd.u32 q8, q8, q0
vadd.u32 q10, q10, q0
vadd.u32 q9, q9, q0
vadd.u32 q11, q11, q0
// x12..15[0-3] += s12..15[0-3] (add orig state to 4th row of each block)
vadd.u32 q12, q12, q1
vadd.u32 q14, q14, q1
vadd.u32 q13, q13, q1
vadd.u32 q15, q15, q1
// XOR the rest of the data with the keystream
vld1.8 {q0-q1}, [r2]!
subs r4, r4, #96
veor q0, q0, q8
veor q1, q1, q12
ble .Lle96
vst1.8 {q0-q1}, [r1]!
vld1.8 {q0-q1}, [r2]!
subs r4, r4, #32
veor q0, q0, q2
veor q1, q1, q6
ble .Lle128
vst1.8 {q0-q1}, [r1]!
vld1.8 {q0-q1}, [r2]!
subs r4, r4, #32
veor q0, q0, q10
veor q1, q1, q14
ble .Lle160
vst1.8 {q0-q1}, [r1]!
vld1.8 {q0-q1}, [r2]!
subs r4, r4, #32
veor q0, q0, q4
veor q1, q1, q5
ble .Lle192
vst1.8 {q0-q1}, [r1]!
vld1.8 {q0-q1}, [r2]!
subs r4, r4, #32
veor q0, q0, q9
veor q1, q1, q13
ble .Lle224
vst1.8 {q0-q1}, [r1]!
vld1.8 {q0-q1}, [r2]!
subs r4, r4, #32
veor q0, q0, q3
veor q1, q1, q7
blt .Llt256
.Lout:
vst1.8 {q0-q1}, [r1]!
vld1.8 {q0-q1}, [r2]
veor q0, q0, q11
veor q1, q1, q15
vst1.8 {q0-q1}, [r1]
pop {r4, pc}
.Lle192:
vmov q4, q9
vmov q5, q13
.Lle160:
// nothing to do
.Lfinalblock:
// Process the final block if processing less than 4 full blocks.
// Entered with 32 bytes of ChaCha cipher stream in q4-q5, and the
// previous 32 byte output block that still needs to be written at
// [r1] in q0-q1.
beq .Lfullblock
.Lpartialblock:
adr lr, .Lpermute + 32
add r2, r2, r4
add lr, lr, r4
add r4, r4, r1
vld1.8 {q2-q3}, [lr]
vld1.8 {q6-q7}, [r2]
add r4, r4, #32
vtbl.8 d4, {q4-q5}, d4
vtbl.8 d5, {q4-q5}, d5
vtbl.8 d6, {q4-q5}, d6
vtbl.8 d7, {q4-q5}, d7
veor q6, q6, q2
veor q7, q7, q3
vst1.8 {q6-q7}, [r4] // overlapping stores
vst1.8 {q0-q1}, [r1]
pop {r4, pc}
.Lfullblock:
vmov q11, q4
vmov q15, q5
b .Lout
.Lle96:
vmov q4, q2
vmov q5, q6
b .Lfinalblock
.Lle128:
vmov q4, q10
vmov q5, q14
b .Lfinalblock
.Lle224:
vmov q4, q3
vmov q5, q7
b .Lfinalblock
.Llt256:
vmov q4, q11
vmov q5, q15
b .Lpartialblock
ENDPROC(chacha_4block_xor_neon)
.align L1_CACHE_SHIFT
.Lpermute:
.byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07
.byte 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
.byte 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17
.byte 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f
.byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07
.byte 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
.byte 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17
.byte 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f
|
aixcc-public/challenge-001-exemplar-source
| 2,337
|
arch/arm/crypto/nh-neon-core.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* NH - ε-almost-universal hash function, NEON accelerated version
*
* Copyright 2018 Google LLC
*
* Author: Eric Biggers <ebiggers@google.com>
*/
#include <linux/linkage.h>
.text
.fpu neon
KEY .req r0
MESSAGE .req r1
MESSAGE_LEN .req r2
HASH .req r3
PASS0_SUMS .req q0
PASS0_SUM_A .req d0
PASS0_SUM_B .req d1
PASS1_SUMS .req q1
PASS1_SUM_A .req d2
PASS1_SUM_B .req d3
PASS2_SUMS .req q2
PASS2_SUM_A .req d4
PASS2_SUM_B .req d5
PASS3_SUMS .req q3
PASS3_SUM_A .req d6
PASS3_SUM_B .req d7
K0 .req q4
K1 .req q5
K2 .req q6
K3 .req q7
T0 .req q8
T0_L .req d16
T0_H .req d17
T1 .req q9
T1_L .req d18
T1_H .req d19
T2 .req q10
T2_L .req d20
T2_H .req d21
T3 .req q11
T3_L .req d22
T3_H .req d23
.macro _nh_stride k0, k1, k2, k3
// Load next message stride
vld1.8 {T3}, [MESSAGE]!
// Load next key stride
vld1.32 {\k3}, [KEY]!
// Add message words to key words
vadd.u32 T0, T3, \k0
vadd.u32 T1, T3, \k1
vadd.u32 T2, T3, \k2
vadd.u32 T3, T3, \k3
// Multiply 32x32 => 64 and accumulate
vmlal.u32 PASS0_SUMS, T0_L, T0_H
vmlal.u32 PASS1_SUMS, T1_L, T1_H
vmlal.u32 PASS2_SUMS, T2_L, T2_H
vmlal.u32 PASS3_SUMS, T3_L, T3_H
.endm
/*
* void nh_neon(const u32 *key, const u8 *message, size_t message_len,
* u8 hash[NH_HASH_BYTES])
*
* It's guaranteed that message_len % 16 == 0.
*/
ENTRY(nh_neon)
vld1.32 {K0,K1}, [KEY]!
vmov.u64 PASS0_SUMS, #0
vmov.u64 PASS1_SUMS, #0
vld1.32 {K2}, [KEY]!
vmov.u64 PASS2_SUMS, #0
vmov.u64 PASS3_SUMS, #0
subs MESSAGE_LEN, MESSAGE_LEN, #64
blt .Lloop4_done
.Lloop4:
_nh_stride K0, K1, K2, K3
_nh_stride K1, K2, K3, K0
_nh_stride K2, K3, K0, K1
_nh_stride K3, K0, K1, K2
subs MESSAGE_LEN, MESSAGE_LEN, #64
bge .Lloop4
.Lloop4_done:
ands MESSAGE_LEN, MESSAGE_LEN, #63
beq .Ldone
_nh_stride K0, K1, K2, K3
subs MESSAGE_LEN, MESSAGE_LEN, #16
beq .Ldone
_nh_stride K1, K2, K3, K0
subs MESSAGE_LEN, MESSAGE_LEN, #16
beq .Ldone
_nh_stride K2, K3, K0, K1
.Ldone:
// Sum the accumulators for each pass, then store the sums to 'hash'
vadd.u64 T0_L, PASS0_SUM_A, PASS0_SUM_B
vadd.u64 T0_H, PASS1_SUM_A, PASS1_SUM_B
vadd.u64 T1_L, PASS2_SUM_A, PASS2_SUM_B
vadd.u64 T1_H, PASS3_SUM_A, PASS3_SUM_B
vst1.8 {T0-T1}, [HASH]
bx lr
ENDPROC(nh_neon)
|
aixcc-public/challenge-001-exemplar-source
| 15,545
|
arch/arm/crypto/aes-ce-core.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* aes-ce-core.S - AES in CBC/CTR/XTS mode using ARMv8 Crypto Extensions
*
* Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
.text
.arch armv8-a
.fpu crypto-neon-fp-armv8
.align 3
.macro enc_round, state, key
aese.8 \state, \key
aesmc.8 \state, \state
.endm
.macro dec_round, state, key
aesd.8 \state, \key
aesimc.8 \state, \state
.endm
.macro enc_dround, key1, key2
enc_round q0, \key1
enc_round q0, \key2
.endm
.macro dec_dround, key1, key2
dec_round q0, \key1
dec_round q0, \key2
.endm
.macro enc_fround, key1, key2, key3
enc_round q0, \key1
aese.8 q0, \key2
veor q0, q0, \key3
.endm
.macro dec_fround, key1, key2, key3
dec_round q0, \key1
aesd.8 q0, \key2
veor q0, q0, \key3
.endm
.macro enc_dround_4x, key1, key2
enc_round q0, \key1
enc_round q1, \key1
enc_round q2, \key1
enc_round q3, \key1
enc_round q0, \key2
enc_round q1, \key2
enc_round q2, \key2
enc_round q3, \key2
.endm
.macro dec_dround_4x, key1, key2
dec_round q0, \key1
dec_round q1, \key1
dec_round q2, \key1
dec_round q3, \key1
dec_round q0, \key2
dec_round q1, \key2
dec_round q2, \key2
dec_round q3, \key2
.endm
.macro enc_fround_4x, key1, key2, key3
enc_round q0, \key1
enc_round q1, \key1
enc_round q2, \key1
enc_round q3, \key1
aese.8 q0, \key2
aese.8 q1, \key2
aese.8 q2, \key2
aese.8 q3, \key2
veor q0, q0, \key3
veor q1, q1, \key3
veor q2, q2, \key3
veor q3, q3, \key3
.endm
.macro dec_fround_4x, key1, key2, key3
dec_round q0, \key1
dec_round q1, \key1
dec_round q2, \key1
dec_round q3, \key1
aesd.8 q0, \key2
aesd.8 q1, \key2
aesd.8 q2, \key2
aesd.8 q3, \key2
veor q0, q0, \key3
veor q1, q1, \key3
veor q2, q2, \key3
veor q3, q3, \key3
.endm
.macro do_block, dround, fround
cmp r3, #12 @ which key size?
vld1.32 {q10-q11}, [ip]!
\dround q8, q9
vld1.32 {q12-q13}, [ip]!
\dround q10, q11
vld1.32 {q10-q11}, [ip]!
\dround q12, q13
vld1.32 {q12-q13}, [ip]!
\dround q10, q11
blo 0f @ AES-128: 10 rounds
vld1.32 {q10-q11}, [ip]!
\dround q12, q13
beq 1f @ AES-192: 12 rounds
vld1.32 {q12-q13}, [ip]
\dround q10, q11
0: \fround q12, q13, q14
bx lr
1: \fround q10, q11, q14
bx lr
.endm
/*
* Internal, non-AAPCS compliant functions that implement the core AES
* transforms. These should preserve all registers except q0 - q2 and ip
* Arguments:
* q0 : first in/output block
* q1 : second in/output block (_4x version only)
* q2 : third in/output block (_4x version only)
* q3 : fourth in/output block (_4x version only)
* q8 : first round key
* q9 : secound round key
* q14 : final round key
* r2 : address of round key array
* r3 : number of rounds
*/
.align 6
aes_encrypt:
add ip, r2, #32 @ 3rd round key
.Laes_encrypt_tweak:
do_block enc_dround, enc_fround
ENDPROC(aes_encrypt)
.align 6
aes_decrypt:
add ip, r2, #32 @ 3rd round key
do_block dec_dround, dec_fround
ENDPROC(aes_decrypt)
.align 6
aes_encrypt_4x:
add ip, r2, #32 @ 3rd round key
do_block enc_dround_4x, enc_fround_4x
ENDPROC(aes_encrypt_4x)
.align 6
aes_decrypt_4x:
add ip, r2, #32 @ 3rd round key
do_block dec_dround_4x, dec_fround_4x
ENDPROC(aes_decrypt_4x)
.macro prepare_key, rk, rounds
add ip, \rk, \rounds, lsl #4
vld1.32 {q8-q9}, [\rk] @ load first 2 round keys
vld1.32 {q14}, [ip] @ load last round key
.endm
/*
* aes_ecb_encrypt(u8 out[], u8 const in[], u32 const rk[], int rounds,
* int blocks)
* aes_ecb_decrypt(u8 out[], u8 const in[], u32 const rk[], int rounds,
* int blocks)
*/
ENTRY(ce_aes_ecb_encrypt)
push {r4, lr}
ldr r4, [sp, #8]
prepare_key r2, r3
.Lecbencloop4x:
subs r4, r4, #4
bmi .Lecbenc1x
vld1.8 {q0-q1}, [r1]!
vld1.8 {q2-q3}, [r1]!
bl aes_encrypt_4x
vst1.8 {q0-q1}, [r0]!
vst1.8 {q2-q3}, [r0]!
b .Lecbencloop4x
.Lecbenc1x:
adds r4, r4, #4
beq .Lecbencout
.Lecbencloop:
vld1.8 {q0}, [r1]!
bl aes_encrypt
vst1.8 {q0}, [r0]!
subs r4, r4, #1
bne .Lecbencloop
.Lecbencout:
pop {r4, pc}
ENDPROC(ce_aes_ecb_encrypt)
ENTRY(ce_aes_ecb_decrypt)
push {r4, lr}
ldr r4, [sp, #8]
prepare_key r2, r3
.Lecbdecloop4x:
subs r4, r4, #4
bmi .Lecbdec1x
vld1.8 {q0-q1}, [r1]!
vld1.8 {q2-q3}, [r1]!
bl aes_decrypt_4x
vst1.8 {q0-q1}, [r0]!
vst1.8 {q2-q3}, [r0]!
b .Lecbdecloop4x
.Lecbdec1x:
adds r4, r4, #4
beq .Lecbdecout
.Lecbdecloop:
vld1.8 {q0}, [r1]!
bl aes_decrypt
vst1.8 {q0}, [r0]!
subs r4, r4, #1
bne .Lecbdecloop
.Lecbdecout:
pop {r4, pc}
ENDPROC(ce_aes_ecb_decrypt)
/*
* aes_cbc_encrypt(u8 out[], u8 const in[], u32 const rk[], int rounds,
* int blocks, u8 iv[])
* aes_cbc_decrypt(u8 out[], u8 const in[], u32 const rk[], int rounds,
* int blocks, u8 iv[])
*/
ENTRY(ce_aes_cbc_encrypt)
push {r4-r6, lr}
ldrd r4, r5, [sp, #16]
vld1.8 {q0}, [r5]
prepare_key r2, r3
.Lcbcencloop:
vld1.8 {q1}, [r1]! @ get next pt block
veor q0, q0, q1 @ ..and xor with iv
bl aes_encrypt
vst1.8 {q0}, [r0]!
subs r4, r4, #1
bne .Lcbcencloop
vst1.8 {q0}, [r5]
pop {r4-r6, pc}
ENDPROC(ce_aes_cbc_encrypt)
ENTRY(ce_aes_cbc_decrypt)
push {r4-r6, lr}
ldrd r4, r5, [sp, #16]
vld1.8 {q15}, [r5] @ keep iv in q15
prepare_key r2, r3
.Lcbcdecloop4x:
subs r4, r4, #4
bmi .Lcbcdec1x
vld1.8 {q0-q1}, [r1]!
vld1.8 {q2-q3}, [r1]!
vmov q4, q0
vmov q5, q1
vmov q6, q2
vmov q7, q3
bl aes_decrypt_4x
veor q0, q0, q15
veor q1, q1, q4
veor q2, q2, q5
veor q3, q3, q6
vmov q15, q7
vst1.8 {q0-q1}, [r0]!
vst1.8 {q2-q3}, [r0]!
b .Lcbcdecloop4x
.Lcbcdec1x:
adds r4, r4, #4
beq .Lcbcdecout
vmov q6, q14 @ preserve last round key
.Lcbcdecloop:
vld1.8 {q0}, [r1]! @ get next ct block
veor q14, q15, q6 @ combine prev ct with last key
vmov q15, q0
bl aes_decrypt
vst1.8 {q0}, [r0]!
subs r4, r4, #1
bne .Lcbcdecloop
.Lcbcdecout:
vst1.8 {q15}, [r5] @ keep iv in q15
pop {r4-r6, pc}
ENDPROC(ce_aes_cbc_decrypt)
/*
* ce_aes_cbc_cts_encrypt(u8 out[], u8 const in[], u32 const rk[],
* int rounds, int bytes, u8 const iv[])
* ce_aes_cbc_cts_decrypt(u8 out[], u8 const in[], u32 const rk[],
* int rounds, int bytes, u8 const iv[])
*/
ENTRY(ce_aes_cbc_cts_encrypt)
push {r4-r6, lr}
ldrd r4, r5, [sp, #16]
movw ip, :lower16:.Lcts_permute_table
movt ip, :upper16:.Lcts_permute_table
sub r4, r4, #16
add lr, ip, #32
add ip, ip, r4
sub lr, lr, r4
vld1.8 {q5}, [ip]
vld1.8 {q6}, [lr]
add ip, r1, r4
vld1.8 {q0}, [r1] @ overlapping loads
vld1.8 {q3}, [ip]
vld1.8 {q1}, [r5] @ get iv
prepare_key r2, r3
veor q0, q0, q1 @ xor with iv
bl aes_encrypt
vtbl.8 d4, {d0-d1}, d10
vtbl.8 d5, {d0-d1}, d11
vtbl.8 d2, {d6-d7}, d12
vtbl.8 d3, {d6-d7}, d13
veor q0, q0, q1
bl aes_encrypt
add r4, r0, r4
vst1.8 {q2}, [r4] @ overlapping stores
vst1.8 {q0}, [r0]
pop {r4-r6, pc}
ENDPROC(ce_aes_cbc_cts_encrypt)
ENTRY(ce_aes_cbc_cts_decrypt)
push {r4-r6, lr}
ldrd r4, r5, [sp, #16]
movw ip, :lower16:.Lcts_permute_table
movt ip, :upper16:.Lcts_permute_table
sub r4, r4, #16
add lr, ip, #32
add ip, ip, r4
sub lr, lr, r4
vld1.8 {q5}, [ip]
vld1.8 {q6}, [lr]
add ip, r1, r4
vld1.8 {q0}, [r1] @ overlapping loads
vld1.8 {q1}, [ip]
vld1.8 {q3}, [r5] @ get iv
prepare_key r2, r3
bl aes_decrypt
vtbl.8 d4, {d0-d1}, d10
vtbl.8 d5, {d0-d1}, d11
vtbx.8 d0, {d2-d3}, d12
vtbx.8 d1, {d2-d3}, d13
veor q1, q1, q2
bl aes_decrypt
veor q0, q0, q3 @ xor with iv
add r4, r0, r4
vst1.8 {q1}, [r4] @ overlapping stores
vst1.8 {q0}, [r0]
pop {r4-r6, pc}
ENDPROC(ce_aes_cbc_cts_decrypt)
/*
* aes_ctr_encrypt(u8 out[], u8 const in[], u32 const rk[], int rounds,
* int blocks, u8 ctr[])
*/
ENTRY(ce_aes_ctr_encrypt)
push {r4-r6, lr}
ldrd r4, r5, [sp, #16]
vld1.8 {q7}, [r5] @ load ctr
prepare_key r2, r3
vmov r6, s31 @ keep swabbed ctr in r6
rev r6, r6
cmn r6, r4 @ 32 bit overflow?
bcs .Lctrloop
.Lctrloop4x:
subs r4, r4, #4
bmi .Lctr1x
/*
* NOTE: the sequence below has been carefully tweaked to avoid
* a silicon erratum that exists in Cortex-A57 (#1742098) and
* Cortex-A72 (#1655431) cores, where AESE/AESMC instruction pairs
* may produce an incorrect result if they take their input from a
* register of which a single 32-bit lane has been updated the last
* time it was modified. To work around this, the lanes of registers
* q0-q3 below are not manipulated individually, and the different
* counter values are prepared by successive manipulations of q7.
*/
add ip, r6, #1
vmov q0, q7
rev ip, ip
add lr, r6, #2
vmov s31, ip @ set lane 3 of q1 via q7
add ip, r6, #3
rev lr, lr
vmov q1, q7
vmov s31, lr @ set lane 3 of q2 via q7
rev ip, ip
vmov q2, q7
vmov s31, ip @ set lane 3 of q3 via q7
add r6, r6, #4
vmov q3, q7
vld1.8 {q4-q5}, [r1]!
vld1.8 {q6}, [r1]!
vld1.8 {q15}, [r1]!
bl aes_encrypt_4x
veor q0, q0, q4
veor q1, q1, q5
veor q2, q2, q6
veor q3, q3, q15
rev ip, r6
vst1.8 {q0-q1}, [r0]!
vst1.8 {q2-q3}, [r0]!
vmov s31, ip
b .Lctrloop4x
.Lctr1x:
adds r4, r4, #4
beq .Lctrout
.Lctrloop:
vmov q0, q7
bl aes_encrypt
adds r6, r6, #1 @ increment BE ctr
rev ip, r6
vmov s31, ip
bcs .Lctrcarry
.Lctrcarrydone:
subs r4, r4, #1
bmi .Lctrtailblock @ blocks < 0 means tail block
vld1.8 {q3}, [r1]!
veor q3, q0, q3
vst1.8 {q3}, [r0]!
bne .Lctrloop
.Lctrout:
vst1.8 {q7}, [r5] @ return next CTR value
pop {r4-r6, pc}
.Lctrtailblock:
vst1.8 {q0}, [r0, :64] @ return the key stream
b .Lctrout
.Lctrcarry:
.irp sreg, s30, s29, s28
vmov ip, \sreg @ load next word of ctr
rev ip, ip @ ... to handle the carry
adds ip, ip, #1
rev ip, ip
vmov \sreg, ip
bcc .Lctrcarrydone
.endr
b .Lctrcarrydone
ENDPROC(ce_aes_ctr_encrypt)
/*
* aes_xts_encrypt(u8 out[], u8 const in[], u32 const rk1[], int rounds,
* int bytes, u8 iv[], u32 const rk2[], int first)
* aes_xts_decrypt(u8 out[], u8 const in[], u32 const rk1[], int rounds,
* int bytes, u8 iv[], u32 const rk2[], int first)
*/
.macro next_tweak, out, in, const, tmp
vshr.s64 \tmp, \in, #63
vand \tmp, \tmp, \const
vadd.u64 \out, \in, \in
vext.8 \tmp, \tmp, \tmp, #8
veor \out, \out, \tmp
.endm
ce_aes_xts_init:
vmov.i32 d30, #0x87 @ compose tweak mask vector
vmovl.u32 q15, d30
vshr.u64 d30, d31, #7
ldrd r4, r5, [sp, #16] @ load args
ldr r6, [sp, #28]
vld1.8 {q0}, [r5] @ load iv
teq r6, #1 @ start of a block?
bxne lr
@ Encrypt the IV in q0 with the second AES key. This should only
@ be done at the start of a block.
ldr r6, [sp, #24] @ load AES key 2
prepare_key r6, r3
add ip, r6, #32 @ 3rd round key of key 2
b .Laes_encrypt_tweak @ tail call
ENDPROC(ce_aes_xts_init)
ENTRY(ce_aes_xts_encrypt)
push {r4-r6, lr}
bl ce_aes_xts_init @ run shared prologue
prepare_key r2, r3
vmov q4, q0
teq r6, #0 @ start of a block?
bne .Lxtsenc4x
.Lxtsencloop4x:
next_tweak q4, q4, q15, q10
.Lxtsenc4x:
subs r4, r4, #64
bmi .Lxtsenc1x
vld1.8 {q0-q1}, [r1]! @ get 4 pt blocks
vld1.8 {q2-q3}, [r1]!
next_tweak q5, q4, q15, q10
veor q0, q0, q4
next_tweak q6, q5, q15, q10
veor q1, q1, q5
next_tweak q7, q6, q15, q10
veor q2, q2, q6
veor q3, q3, q7
bl aes_encrypt_4x
veor q0, q0, q4
veor q1, q1, q5
veor q2, q2, q6
veor q3, q3, q7
vst1.8 {q0-q1}, [r0]! @ write 4 ct blocks
vst1.8 {q2-q3}, [r0]!
vmov q4, q7
teq r4, #0
beq .Lxtsencret
b .Lxtsencloop4x
.Lxtsenc1x:
adds r4, r4, #64
beq .Lxtsencout
subs r4, r4, #16
bmi .LxtsencctsNx
.Lxtsencloop:
vld1.8 {q0}, [r1]!
.Lxtsencctsout:
veor q0, q0, q4
bl aes_encrypt
veor q0, q0, q4
teq r4, #0
beq .Lxtsencout
subs r4, r4, #16
next_tweak q4, q4, q15, q6
bmi .Lxtsenccts
vst1.8 {q0}, [r0]!
b .Lxtsencloop
.Lxtsencout:
vst1.8 {q0}, [r0]
.Lxtsencret:
vst1.8 {q4}, [r5]
pop {r4-r6, pc}
.LxtsencctsNx:
vmov q0, q3
sub r0, r0, #16
.Lxtsenccts:
movw ip, :lower16:.Lcts_permute_table
movt ip, :upper16:.Lcts_permute_table
add r1, r1, r4 @ rewind input pointer
add r4, r4, #16 @ # bytes in final block
add lr, ip, #32
add ip, ip, r4
sub lr, lr, r4
add r4, r0, r4 @ output address of final block
vld1.8 {q1}, [r1] @ load final partial block
vld1.8 {q2}, [ip]
vld1.8 {q3}, [lr]
vtbl.8 d4, {d0-d1}, d4
vtbl.8 d5, {d0-d1}, d5
vtbx.8 d0, {d2-d3}, d6
vtbx.8 d1, {d2-d3}, d7
vst1.8 {q2}, [r4] @ overlapping stores
mov r4, #0
b .Lxtsencctsout
ENDPROC(ce_aes_xts_encrypt)
ENTRY(ce_aes_xts_decrypt)
push {r4-r6, lr}
bl ce_aes_xts_init @ run shared prologue
prepare_key r2, r3
vmov q4, q0
/* subtract 16 bytes if we are doing CTS */
tst r4, #0xf
subne r4, r4, #0x10
teq r6, #0 @ start of a block?
bne .Lxtsdec4x
.Lxtsdecloop4x:
next_tweak q4, q4, q15, q10
.Lxtsdec4x:
subs r4, r4, #64
bmi .Lxtsdec1x
vld1.8 {q0-q1}, [r1]! @ get 4 ct blocks
vld1.8 {q2-q3}, [r1]!
next_tweak q5, q4, q15, q10
veor q0, q0, q4
next_tweak q6, q5, q15, q10
veor q1, q1, q5
next_tweak q7, q6, q15, q10
veor q2, q2, q6
veor q3, q3, q7
bl aes_decrypt_4x
veor q0, q0, q4
veor q1, q1, q5
veor q2, q2, q6
veor q3, q3, q7
vst1.8 {q0-q1}, [r0]! @ write 4 pt blocks
vst1.8 {q2-q3}, [r0]!
vmov q4, q7
teq r4, #0
beq .Lxtsdecout
b .Lxtsdecloop4x
.Lxtsdec1x:
adds r4, r4, #64
beq .Lxtsdecout
subs r4, r4, #16
.Lxtsdecloop:
vld1.8 {q0}, [r1]!
bmi .Lxtsdeccts
.Lxtsdecctsout:
veor q0, q0, q4
bl aes_decrypt
veor q0, q0, q4
vst1.8 {q0}, [r0]!
teq r4, #0
beq .Lxtsdecout
subs r4, r4, #16
next_tweak q4, q4, q15, q6
b .Lxtsdecloop
.Lxtsdecout:
vst1.8 {q4}, [r5]
pop {r4-r6, pc}
.Lxtsdeccts:
movw ip, :lower16:.Lcts_permute_table
movt ip, :upper16:.Lcts_permute_table
add r1, r1, r4 @ rewind input pointer
add r4, r4, #16 @ # bytes in final block
add lr, ip, #32
add ip, ip, r4
sub lr, lr, r4
add r4, r0, r4 @ output address of final block
next_tweak q5, q4, q15, q6
vld1.8 {q1}, [r1] @ load final partial block
vld1.8 {q2}, [ip]
vld1.8 {q3}, [lr]
veor q0, q0, q5
bl aes_decrypt
veor q0, q0, q5
vtbl.8 d4, {d0-d1}, d4
vtbl.8 d5, {d0-d1}, d5
vtbx.8 d0, {d2-d3}, d6
vtbx.8 d1, {d2-d3}, d7
vst1.8 {q2}, [r4] @ overlapping stores
mov r4, #0
b .Lxtsdecctsout
ENDPROC(ce_aes_xts_decrypt)
/*
* u32 ce_aes_sub(u32 input) - use the aese instruction to perform the
* AES sbox substitution on each byte in
* 'input'
*/
ENTRY(ce_aes_sub)
vdup.32 q1, r0
veor q0, q0, q0
aese.8 q0, q1
vmov r0, s0
bx lr
ENDPROC(ce_aes_sub)
/*
* void ce_aes_invert(u8 *dst, u8 *src) - perform the Inverse MixColumns
* operation on round key *src
*/
ENTRY(ce_aes_invert)
vld1.32 {q0}, [r1]
aesimc.8 q0, q0
vst1.32 {q0}, [r0]
bx lr
ENDPROC(ce_aes_invert)
.section ".rodata", "a"
.align 6
.Lcts_permute_table:
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
.byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7
.byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
|
aixcc-public/challenge-001-exemplar-source
| 46,959
|
arch/arm/crypto/curve25519-core.S
|
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/*
* Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
*
* Based on public domain code from Daniel J. Bernstein and Peter Schwabe. This
* began from SUPERCOP's curve25519/neon2/scalarmult.s, but has subsequently been
* manually reworked for use in kernel space.
*/
#include <linux/linkage.h>
.text
.arch armv7-a
.fpu neon
.align 4
ENTRY(curve25519_neon)
push {r4-r11, lr}
mov ip, sp
sub r3, sp, #704
and r3, r3, #0xfffffff0
mov sp, r3
movw r4, #0
movw r5, #254
vmov.i32 q0, #1
vshr.u64 q1, q0, #7
vshr.u64 q0, q0, #8
vmov.i32 d4, #19
vmov.i32 d5, #38
add r6, sp, #480
vst1.8 {d2-d3}, [r6, : 128]!
vst1.8 {d0-d1}, [r6, : 128]!
vst1.8 {d4-d5}, [r6, : 128]
add r6, r3, #0
vmov.i32 q2, #0
vst1.8 {d4-d5}, [r6, : 128]!
vst1.8 {d4-d5}, [r6, : 128]!
vst1.8 d4, [r6, : 64]
add r6, r3, #0
movw r7, #960
sub r7, r7, #2
neg r7, r7
sub r7, r7, r7, LSL #7
str r7, [r6]
add r6, sp, #672
vld1.8 {d4-d5}, [r1]!
vld1.8 {d6-d7}, [r1]
vst1.8 {d4-d5}, [r6, : 128]!
vst1.8 {d6-d7}, [r6, : 128]
sub r1, r6, #16
ldrb r6, [r1]
and r6, r6, #248
strb r6, [r1]
ldrb r6, [r1, #31]
and r6, r6, #127
orr r6, r6, #64
strb r6, [r1, #31]
vmov.i64 q2, #0xffffffff
vshr.u64 q3, q2, #7
vshr.u64 q2, q2, #6
vld1.8 {d8}, [r2]
vld1.8 {d10}, [r2]
add r2, r2, #6
vld1.8 {d12}, [r2]
vld1.8 {d14}, [r2]
add r2, r2, #6
vld1.8 {d16}, [r2]
add r2, r2, #4
vld1.8 {d18}, [r2]
vld1.8 {d20}, [r2]
add r2, r2, #6
vld1.8 {d22}, [r2]
add r2, r2, #2
vld1.8 {d24}, [r2]
vld1.8 {d26}, [r2]
vshr.u64 q5, q5, #26
vshr.u64 q6, q6, #3
vshr.u64 q7, q7, #29
vshr.u64 q8, q8, #6
vshr.u64 q10, q10, #25
vshr.u64 q11, q11, #3
vshr.u64 q12, q12, #12
vshr.u64 q13, q13, #38
vand q4, q4, q2
vand q6, q6, q2
vand q8, q8, q2
vand q10, q10, q2
vand q2, q12, q2
vand q5, q5, q3
vand q7, q7, q3
vand q9, q9, q3
vand q11, q11, q3
vand q3, q13, q3
add r2, r3, #48
vadd.i64 q12, q4, q1
vadd.i64 q13, q10, q1
vshr.s64 q12, q12, #26
vshr.s64 q13, q13, #26
vadd.i64 q5, q5, q12
vshl.i64 q12, q12, #26
vadd.i64 q14, q5, q0
vadd.i64 q11, q11, q13
vshl.i64 q13, q13, #26
vadd.i64 q15, q11, q0
vsub.i64 q4, q4, q12
vshr.s64 q12, q14, #25
vsub.i64 q10, q10, q13
vshr.s64 q13, q15, #25
vadd.i64 q6, q6, q12
vshl.i64 q12, q12, #25
vadd.i64 q14, q6, q1
vadd.i64 q2, q2, q13
vsub.i64 q5, q5, q12
vshr.s64 q12, q14, #26
vshl.i64 q13, q13, #25
vadd.i64 q14, q2, q1
vadd.i64 q7, q7, q12
vshl.i64 q12, q12, #26
vadd.i64 q15, q7, q0
vsub.i64 q11, q11, q13
vshr.s64 q13, q14, #26
vsub.i64 q6, q6, q12
vshr.s64 q12, q15, #25
vadd.i64 q3, q3, q13
vshl.i64 q13, q13, #26
vadd.i64 q14, q3, q0
vadd.i64 q8, q8, q12
vshl.i64 q12, q12, #25
vadd.i64 q15, q8, q1
add r2, r2, #8
vsub.i64 q2, q2, q13
vshr.s64 q13, q14, #25
vsub.i64 q7, q7, q12
vshr.s64 q12, q15, #26
vadd.i64 q14, q13, q13
vadd.i64 q9, q9, q12
vtrn.32 d12, d14
vshl.i64 q12, q12, #26
vtrn.32 d13, d15
vadd.i64 q0, q9, q0
vadd.i64 q4, q4, q14
vst1.8 d12, [r2, : 64]!
vshl.i64 q6, q13, #4
vsub.i64 q7, q8, q12
vshr.s64 q0, q0, #25
vadd.i64 q4, q4, q6
vadd.i64 q6, q10, q0
vshl.i64 q0, q0, #25
vadd.i64 q8, q6, q1
vadd.i64 q4, q4, q13
vshl.i64 q10, q13, #25
vadd.i64 q1, q4, q1
vsub.i64 q0, q9, q0
vshr.s64 q8, q8, #26
vsub.i64 q3, q3, q10
vtrn.32 d14, d0
vshr.s64 q1, q1, #26
vtrn.32 d15, d1
vadd.i64 q0, q11, q8
vst1.8 d14, [r2, : 64]
vshl.i64 q7, q8, #26
vadd.i64 q5, q5, q1
vtrn.32 d4, d6
vshl.i64 q1, q1, #26
vtrn.32 d5, d7
vsub.i64 q3, q6, q7
add r2, r2, #16
vsub.i64 q1, q4, q1
vst1.8 d4, [r2, : 64]
vtrn.32 d6, d0
vtrn.32 d7, d1
sub r2, r2, #8
vtrn.32 d2, d10
vtrn.32 d3, d11
vst1.8 d6, [r2, : 64]
sub r2, r2, #24
vst1.8 d2, [r2, : 64]
add r2, r3, #96
vmov.i32 q0, #0
vmov.i64 d2, #0xff
vmov.i64 d3, #0
vshr.u32 q1, q1, #7
vst1.8 {d2-d3}, [r2, : 128]!
vst1.8 {d0-d1}, [r2, : 128]!
vst1.8 d0, [r2, : 64]
add r2, r3, #144
vmov.i32 q0, #0
vst1.8 {d0-d1}, [r2, : 128]!
vst1.8 {d0-d1}, [r2, : 128]!
vst1.8 d0, [r2, : 64]
add r2, r3, #240
vmov.i32 q0, #0
vmov.i64 d2, #0xff
vmov.i64 d3, #0
vshr.u32 q1, q1, #7
vst1.8 {d2-d3}, [r2, : 128]!
vst1.8 {d0-d1}, [r2, : 128]!
vst1.8 d0, [r2, : 64]
add r2, r3, #48
add r6, r3, #192
vld1.8 {d0-d1}, [r2, : 128]!
vld1.8 {d2-d3}, [r2, : 128]!
vld1.8 {d4}, [r2, : 64]
vst1.8 {d0-d1}, [r6, : 128]!
vst1.8 {d2-d3}, [r6, : 128]!
vst1.8 d4, [r6, : 64]
.Lmainloop:
mov r2, r5, LSR #3
and r6, r5, #7
ldrb r2, [r1, r2]
mov r2, r2, LSR r6
and r2, r2, #1
str r5, [sp, #456]
eor r4, r4, r2
str r2, [sp, #460]
neg r2, r4
add r4, r3, #96
add r5, r3, #192
add r6, r3, #144
vld1.8 {d8-d9}, [r4, : 128]!
add r7, r3, #240
vld1.8 {d10-d11}, [r5, : 128]!
veor q6, q4, q5
vld1.8 {d14-d15}, [r6, : 128]!
vdup.i32 q8, r2
vld1.8 {d18-d19}, [r7, : 128]!
veor q10, q7, q9
vld1.8 {d22-d23}, [r4, : 128]!
vand q6, q6, q8
vld1.8 {d24-d25}, [r5, : 128]!
vand q10, q10, q8
vld1.8 {d26-d27}, [r6, : 128]!
veor q4, q4, q6
vld1.8 {d28-d29}, [r7, : 128]!
veor q5, q5, q6
vld1.8 {d0}, [r4, : 64]
veor q6, q7, q10
vld1.8 {d2}, [r5, : 64]
veor q7, q9, q10
vld1.8 {d4}, [r6, : 64]
veor q9, q11, q12
vld1.8 {d6}, [r7, : 64]
veor q10, q0, q1
sub r2, r4, #32
vand q9, q9, q8
sub r4, r5, #32
vand q10, q10, q8
sub r5, r6, #32
veor q11, q11, q9
sub r6, r7, #32
veor q0, q0, q10
veor q9, q12, q9
veor q1, q1, q10
veor q10, q13, q14
veor q12, q2, q3
vand q10, q10, q8
vand q8, q12, q8
veor q12, q13, q10
veor q2, q2, q8
veor q10, q14, q10
veor q3, q3, q8
vadd.i32 q8, q4, q6
vsub.i32 q4, q4, q6
vst1.8 {d16-d17}, [r2, : 128]!
vadd.i32 q6, q11, q12
vst1.8 {d8-d9}, [r5, : 128]!
vsub.i32 q4, q11, q12
vst1.8 {d12-d13}, [r2, : 128]!
vadd.i32 q6, q0, q2
vst1.8 {d8-d9}, [r5, : 128]!
vsub.i32 q0, q0, q2
vst1.8 d12, [r2, : 64]
vadd.i32 q2, q5, q7
vst1.8 d0, [r5, : 64]
vsub.i32 q0, q5, q7
vst1.8 {d4-d5}, [r4, : 128]!
vadd.i32 q2, q9, q10
vst1.8 {d0-d1}, [r6, : 128]!
vsub.i32 q0, q9, q10
vst1.8 {d4-d5}, [r4, : 128]!
vadd.i32 q2, q1, q3
vst1.8 {d0-d1}, [r6, : 128]!
vsub.i32 q0, q1, q3
vst1.8 d4, [r4, : 64]
vst1.8 d0, [r6, : 64]
add r2, sp, #512
add r4, r3, #96
add r5, r3, #144
vld1.8 {d0-d1}, [r2, : 128]
vld1.8 {d2-d3}, [r4, : 128]!
vld1.8 {d4-d5}, [r5, : 128]!
vzip.i32 q1, q2
vld1.8 {d6-d7}, [r4, : 128]!
vld1.8 {d8-d9}, [r5, : 128]!
vshl.i32 q5, q1, #1
vzip.i32 q3, q4
vshl.i32 q6, q2, #1
vld1.8 {d14}, [r4, : 64]
vshl.i32 q8, q3, #1
vld1.8 {d15}, [r5, : 64]
vshl.i32 q9, q4, #1
vmul.i32 d21, d7, d1
vtrn.32 d14, d15
vmul.i32 q11, q4, q0
vmul.i32 q0, q7, q0
vmull.s32 q12, d2, d2
vmlal.s32 q12, d11, d1
vmlal.s32 q12, d12, d0
vmlal.s32 q12, d13, d23
vmlal.s32 q12, d16, d22
vmlal.s32 q12, d7, d21
vmull.s32 q10, d2, d11
vmlal.s32 q10, d4, d1
vmlal.s32 q10, d13, d0
vmlal.s32 q10, d6, d23
vmlal.s32 q10, d17, d22
vmull.s32 q13, d10, d4
vmlal.s32 q13, d11, d3
vmlal.s32 q13, d13, d1
vmlal.s32 q13, d16, d0
vmlal.s32 q13, d17, d23
vmlal.s32 q13, d8, d22
vmull.s32 q1, d10, d5
vmlal.s32 q1, d11, d4
vmlal.s32 q1, d6, d1
vmlal.s32 q1, d17, d0
vmlal.s32 q1, d8, d23
vmull.s32 q14, d10, d6
vmlal.s32 q14, d11, d13
vmlal.s32 q14, d4, d4
vmlal.s32 q14, d17, d1
vmlal.s32 q14, d18, d0
vmlal.s32 q14, d9, d23
vmull.s32 q11, d10, d7
vmlal.s32 q11, d11, d6
vmlal.s32 q11, d12, d5
vmlal.s32 q11, d8, d1
vmlal.s32 q11, d19, d0
vmull.s32 q15, d10, d8
vmlal.s32 q15, d11, d17
vmlal.s32 q15, d12, d6
vmlal.s32 q15, d13, d5
vmlal.s32 q15, d19, d1
vmlal.s32 q15, d14, d0
vmull.s32 q2, d10, d9
vmlal.s32 q2, d11, d8
vmlal.s32 q2, d12, d7
vmlal.s32 q2, d13, d6
vmlal.s32 q2, d14, d1
vmull.s32 q0, d15, d1
vmlal.s32 q0, d10, d14
vmlal.s32 q0, d11, d19
vmlal.s32 q0, d12, d8
vmlal.s32 q0, d13, d17
vmlal.s32 q0, d6, d6
add r2, sp, #480
vld1.8 {d18-d19}, [r2, : 128]!
vmull.s32 q3, d16, d7
vmlal.s32 q3, d10, d15
vmlal.s32 q3, d11, d14
vmlal.s32 q3, d12, d9
vmlal.s32 q3, d13, d8
vld1.8 {d8-d9}, [r2, : 128]
vadd.i64 q5, q12, q9
vadd.i64 q6, q15, q9
vshr.s64 q5, q5, #26
vshr.s64 q6, q6, #26
vadd.i64 q7, q10, q5
vshl.i64 q5, q5, #26
vadd.i64 q8, q7, q4
vadd.i64 q2, q2, q6
vshl.i64 q6, q6, #26
vadd.i64 q10, q2, q4
vsub.i64 q5, q12, q5
vshr.s64 q8, q8, #25
vsub.i64 q6, q15, q6
vshr.s64 q10, q10, #25
vadd.i64 q12, q13, q8
vshl.i64 q8, q8, #25
vadd.i64 q13, q12, q9
vadd.i64 q0, q0, q10
vsub.i64 q7, q7, q8
vshr.s64 q8, q13, #26
vshl.i64 q10, q10, #25
vadd.i64 q13, q0, q9
vadd.i64 q1, q1, q8
vshl.i64 q8, q8, #26
vadd.i64 q15, q1, q4
vsub.i64 q2, q2, q10
vshr.s64 q10, q13, #26
vsub.i64 q8, q12, q8
vshr.s64 q12, q15, #25
vadd.i64 q3, q3, q10
vshl.i64 q10, q10, #26
vadd.i64 q13, q3, q4
vadd.i64 q14, q14, q12
add r2, r3, #288
vshl.i64 q12, q12, #25
add r4, r3, #336
vadd.i64 q15, q14, q9
add r2, r2, #8
vsub.i64 q0, q0, q10
add r4, r4, #8
vshr.s64 q10, q13, #25
vsub.i64 q1, q1, q12
vshr.s64 q12, q15, #26
vadd.i64 q13, q10, q10
vadd.i64 q11, q11, q12
vtrn.32 d16, d2
vshl.i64 q12, q12, #26
vtrn.32 d17, d3
vadd.i64 q1, q11, q4
vadd.i64 q4, q5, q13
vst1.8 d16, [r2, : 64]!
vshl.i64 q5, q10, #4
vst1.8 d17, [r4, : 64]!
vsub.i64 q8, q14, q12
vshr.s64 q1, q1, #25
vadd.i64 q4, q4, q5
vadd.i64 q5, q6, q1
vshl.i64 q1, q1, #25
vadd.i64 q6, q5, q9
vadd.i64 q4, q4, q10
vshl.i64 q10, q10, #25
vadd.i64 q9, q4, q9
vsub.i64 q1, q11, q1
vshr.s64 q6, q6, #26
vsub.i64 q3, q3, q10
vtrn.32 d16, d2
vshr.s64 q9, q9, #26
vtrn.32 d17, d3
vadd.i64 q1, q2, q6
vst1.8 d16, [r2, : 64]
vshl.i64 q2, q6, #26
vst1.8 d17, [r4, : 64]
vadd.i64 q6, q7, q9
vtrn.32 d0, d6
vshl.i64 q7, q9, #26
vtrn.32 d1, d7
vsub.i64 q2, q5, q2
add r2, r2, #16
vsub.i64 q3, q4, q7
vst1.8 d0, [r2, : 64]
add r4, r4, #16
vst1.8 d1, [r4, : 64]
vtrn.32 d4, d2
vtrn.32 d5, d3
sub r2, r2, #8
sub r4, r4, #8
vtrn.32 d6, d12
vtrn.32 d7, d13
vst1.8 d4, [r2, : 64]
vst1.8 d5, [r4, : 64]
sub r2, r2, #24
sub r4, r4, #24
vst1.8 d6, [r2, : 64]
vst1.8 d7, [r4, : 64]
add r2, r3, #240
add r4, r3, #96
vld1.8 {d0-d1}, [r4, : 128]!
vld1.8 {d2-d3}, [r4, : 128]!
vld1.8 {d4}, [r4, : 64]
add r4, r3, #144
vld1.8 {d6-d7}, [r4, : 128]!
vtrn.32 q0, q3
vld1.8 {d8-d9}, [r4, : 128]!
vshl.i32 q5, q0, #4
vtrn.32 q1, q4
vshl.i32 q6, q3, #4
vadd.i32 q5, q5, q0
vadd.i32 q6, q6, q3
vshl.i32 q7, q1, #4
vld1.8 {d5}, [r4, : 64]
vshl.i32 q8, q4, #4
vtrn.32 d4, d5
vadd.i32 q7, q7, q1
vadd.i32 q8, q8, q4
vld1.8 {d18-d19}, [r2, : 128]!
vshl.i32 q10, q2, #4
vld1.8 {d22-d23}, [r2, : 128]!
vadd.i32 q10, q10, q2
vld1.8 {d24}, [r2, : 64]
vadd.i32 q5, q5, q0
add r2, r3, #192
vld1.8 {d26-d27}, [r2, : 128]!
vadd.i32 q6, q6, q3
vld1.8 {d28-d29}, [r2, : 128]!
vadd.i32 q8, q8, q4
vld1.8 {d25}, [r2, : 64]
vadd.i32 q10, q10, q2
vtrn.32 q9, q13
vadd.i32 q7, q7, q1
vadd.i32 q5, q5, q0
vtrn.32 q11, q14
vadd.i32 q6, q6, q3
add r2, sp, #528
vadd.i32 q10, q10, q2
vtrn.32 d24, d25
vst1.8 {d12-d13}, [r2, : 128]!
vshl.i32 q6, q13, #1
vst1.8 {d20-d21}, [r2, : 128]!
vshl.i32 q10, q14, #1
vst1.8 {d12-d13}, [r2, : 128]!
vshl.i32 q15, q12, #1
vadd.i32 q8, q8, q4
vext.32 d10, d31, d30, #0
vadd.i32 q7, q7, q1
vst1.8 {d16-d17}, [r2, : 128]!
vmull.s32 q8, d18, d5
vmlal.s32 q8, d26, d4
vmlal.s32 q8, d19, d9
vmlal.s32 q8, d27, d3
vmlal.s32 q8, d22, d8
vmlal.s32 q8, d28, d2
vmlal.s32 q8, d23, d7
vmlal.s32 q8, d29, d1
vmlal.s32 q8, d24, d6
vmlal.s32 q8, d25, d0
vst1.8 {d14-d15}, [r2, : 128]!
vmull.s32 q2, d18, d4
vmlal.s32 q2, d12, d9
vmlal.s32 q2, d13, d8
vmlal.s32 q2, d19, d3
vmlal.s32 q2, d22, d2
vmlal.s32 q2, d23, d1
vmlal.s32 q2, d24, d0
vst1.8 {d20-d21}, [r2, : 128]!
vmull.s32 q7, d18, d9
vmlal.s32 q7, d26, d3
vmlal.s32 q7, d19, d8
vmlal.s32 q7, d27, d2
vmlal.s32 q7, d22, d7
vmlal.s32 q7, d28, d1
vmlal.s32 q7, d23, d6
vmlal.s32 q7, d29, d0
vst1.8 {d10-d11}, [r2, : 128]!
vmull.s32 q5, d18, d3
vmlal.s32 q5, d19, d2
vmlal.s32 q5, d22, d1
vmlal.s32 q5, d23, d0
vmlal.s32 q5, d12, d8
vst1.8 {d16-d17}, [r2, : 128]
vmull.s32 q4, d18, d8
vmlal.s32 q4, d26, d2
vmlal.s32 q4, d19, d7
vmlal.s32 q4, d27, d1
vmlal.s32 q4, d22, d6
vmlal.s32 q4, d28, d0
vmull.s32 q8, d18, d7
vmlal.s32 q8, d26, d1
vmlal.s32 q8, d19, d6
vmlal.s32 q8, d27, d0
add r2, sp, #544
vld1.8 {d20-d21}, [r2, : 128]
vmlal.s32 q7, d24, d21
vmlal.s32 q7, d25, d20
vmlal.s32 q4, d23, d21
vmlal.s32 q4, d29, d20
vmlal.s32 q8, d22, d21
vmlal.s32 q8, d28, d20
vmlal.s32 q5, d24, d20
vst1.8 {d14-d15}, [r2, : 128]
vmull.s32 q7, d18, d6
vmlal.s32 q7, d26, d0
add r2, sp, #624
vld1.8 {d30-d31}, [r2, : 128]
vmlal.s32 q2, d30, d21
vmlal.s32 q7, d19, d21
vmlal.s32 q7, d27, d20
add r2, sp, #592
vld1.8 {d26-d27}, [r2, : 128]
vmlal.s32 q4, d25, d27
vmlal.s32 q8, d29, d27
vmlal.s32 q8, d25, d26
vmlal.s32 q7, d28, d27
vmlal.s32 q7, d29, d26
add r2, sp, #576
vld1.8 {d28-d29}, [r2, : 128]
vmlal.s32 q4, d24, d29
vmlal.s32 q8, d23, d29
vmlal.s32 q8, d24, d28
vmlal.s32 q7, d22, d29
vmlal.s32 q7, d23, d28
vst1.8 {d8-d9}, [r2, : 128]
add r2, sp, #528
vld1.8 {d8-d9}, [r2, : 128]
vmlal.s32 q7, d24, d9
vmlal.s32 q7, d25, d31
vmull.s32 q1, d18, d2
vmlal.s32 q1, d19, d1
vmlal.s32 q1, d22, d0
vmlal.s32 q1, d24, d27
vmlal.s32 q1, d23, d20
vmlal.s32 q1, d12, d7
vmlal.s32 q1, d13, d6
vmull.s32 q6, d18, d1
vmlal.s32 q6, d19, d0
vmlal.s32 q6, d23, d27
vmlal.s32 q6, d22, d20
vmlal.s32 q6, d24, d26
vmull.s32 q0, d18, d0
vmlal.s32 q0, d22, d27
vmlal.s32 q0, d23, d26
vmlal.s32 q0, d24, d31
vmlal.s32 q0, d19, d20
add r2, sp, #608
vld1.8 {d18-d19}, [r2, : 128]
vmlal.s32 q2, d18, d7
vmlal.s32 q5, d18, d6
vmlal.s32 q1, d18, d21
vmlal.s32 q0, d18, d28
vmlal.s32 q6, d18, d29
vmlal.s32 q2, d19, d6
vmlal.s32 q5, d19, d21
vmlal.s32 q1, d19, d29
vmlal.s32 q0, d19, d9
vmlal.s32 q6, d19, d28
add r2, sp, #560
vld1.8 {d18-d19}, [r2, : 128]
add r2, sp, #480
vld1.8 {d22-d23}, [r2, : 128]
vmlal.s32 q5, d19, d7
vmlal.s32 q0, d18, d21
vmlal.s32 q0, d19, d29
vmlal.s32 q6, d18, d6
add r2, sp, #496
vld1.8 {d6-d7}, [r2, : 128]
vmlal.s32 q6, d19, d21
add r2, sp, #544
vld1.8 {d18-d19}, [r2, : 128]
vmlal.s32 q0, d30, d8
add r2, sp, #640
vld1.8 {d20-d21}, [r2, : 128]
vmlal.s32 q5, d30, d29
add r2, sp, #576
vld1.8 {d24-d25}, [r2, : 128]
vmlal.s32 q1, d30, d28
vadd.i64 q13, q0, q11
vadd.i64 q14, q5, q11
vmlal.s32 q6, d30, d9
vshr.s64 q4, q13, #26
vshr.s64 q13, q14, #26
vadd.i64 q7, q7, q4
vshl.i64 q4, q4, #26
vadd.i64 q14, q7, q3
vadd.i64 q9, q9, q13
vshl.i64 q13, q13, #26
vadd.i64 q15, q9, q3
vsub.i64 q0, q0, q4
vshr.s64 q4, q14, #25
vsub.i64 q5, q5, q13
vshr.s64 q13, q15, #25
vadd.i64 q6, q6, q4
vshl.i64 q4, q4, #25
vadd.i64 q14, q6, q11
vadd.i64 q2, q2, q13
vsub.i64 q4, q7, q4
vshr.s64 q7, q14, #26
vshl.i64 q13, q13, #25
vadd.i64 q14, q2, q11
vadd.i64 q8, q8, q7
vshl.i64 q7, q7, #26
vadd.i64 q15, q8, q3
vsub.i64 q9, q9, q13
vshr.s64 q13, q14, #26
vsub.i64 q6, q6, q7
vshr.s64 q7, q15, #25
vadd.i64 q10, q10, q13
vshl.i64 q13, q13, #26
vadd.i64 q14, q10, q3
vadd.i64 q1, q1, q7
add r2, r3, #144
vshl.i64 q7, q7, #25
add r4, r3, #96
vadd.i64 q15, q1, q11
add r2, r2, #8
vsub.i64 q2, q2, q13
add r4, r4, #8
vshr.s64 q13, q14, #25
vsub.i64 q7, q8, q7
vshr.s64 q8, q15, #26
vadd.i64 q14, q13, q13
vadd.i64 q12, q12, q8
vtrn.32 d12, d14
vshl.i64 q8, q8, #26
vtrn.32 d13, d15
vadd.i64 q3, q12, q3
vadd.i64 q0, q0, q14
vst1.8 d12, [r2, : 64]!
vshl.i64 q7, q13, #4
vst1.8 d13, [r4, : 64]!
vsub.i64 q1, q1, q8
vshr.s64 q3, q3, #25
vadd.i64 q0, q0, q7
vadd.i64 q5, q5, q3
vshl.i64 q3, q3, #25
vadd.i64 q6, q5, q11
vadd.i64 q0, q0, q13
vshl.i64 q7, q13, #25
vadd.i64 q8, q0, q11
vsub.i64 q3, q12, q3
vshr.s64 q6, q6, #26
vsub.i64 q7, q10, q7
vtrn.32 d2, d6
vshr.s64 q8, q8, #26
vtrn.32 d3, d7
vadd.i64 q3, q9, q6
vst1.8 d2, [r2, : 64]
vshl.i64 q6, q6, #26
vst1.8 d3, [r4, : 64]
vadd.i64 q1, q4, q8
vtrn.32 d4, d14
vshl.i64 q4, q8, #26
vtrn.32 d5, d15
vsub.i64 q5, q5, q6
add r2, r2, #16
vsub.i64 q0, q0, q4
vst1.8 d4, [r2, : 64]
add r4, r4, #16
vst1.8 d5, [r4, : 64]
vtrn.32 d10, d6
vtrn.32 d11, d7
sub r2, r2, #8
sub r4, r4, #8
vtrn.32 d0, d2
vtrn.32 d1, d3
vst1.8 d10, [r2, : 64]
vst1.8 d11, [r4, : 64]
sub r2, r2, #24
sub r4, r4, #24
vst1.8 d0, [r2, : 64]
vst1.8 d1, [r4, : 64]
add r2, r3, #288
add r4, r3, #336
vld1.8 {d0-d1}, [r2, : 128]!
vld1.8 {d2-d3}, [r4, : 128]!
vsub.i32 q0, q0, q1
vld1.8 {d2-d3}, [r2, : 128]!
vld1.8 {d4-d5}, [r4, : 128]!
vsub.i32 q1, q1, q2
add r5, r3, #240
vld1.8 {d4}, [r2, : 64]
vld1.8 {d6}, [r4, : 64]
vsub.i32 q2, q2, q3
vst1.8 {d0-d1}, [r5, : 128]!
vst1.8 {d2-d3}, [r5, : 128]!
vst1.8 d4, [r5, : 64]
add r2, r3, #144
add r4, r3, #96
add r5, r3, #144
add r6, r3, #192
vld1.8 {d0-d1}, [r2, : 128]!
vld1.8 {d2-d3}, [r4, : 128]!
vsub.i32 q2, q0, q1
vadd.i32 q0, q0, q1
vld1.8 {d2-d3}, [r2, : 128]!
vld1.8 {d6-d7}, [r4, : 128]!
vsub.i32 q4, q1, q3
vadd.i32 q1, q1, q3
vld1.8 {d6}, [r2, : 64]
vld1.8 {d10}, [r4, : 64]
vsub.i32 q6, q3, q5
vadd.i32 q3, q3, q5
vst1.8 {d4-d5}, [r5, : 128]!
vst1.8 {d0-d1}, [r6, : 128]!
vst1.8 {d8-d9}, [r5, : 128]!
vst1.8 {d2-d3}, [r6, : 128]!
vst1.8 d12, [r5, : 64]
vst1.8 d6, [r6, : 64]
add r2, r3, #0
add r4, r3, #240
vld1.8 {d0-d1}, [r4, : 128]!
vld1.8 {d2-d3}, [r4, : 128]!
vld1.8 {d4}, [r4, : 64]
add r4, r3, #336
vld1.8 {d6-d7}, [r4, : 128]!
vtrn.32 q0, q3
vld1.8 {d8-d9}, [r4, : 128]!
vshl.i32 q5, q0, #4
vtrn.32 q1, q4
vshl.i32 q6, q3, #4
vadd.i32 q5, q5, q0
vadd.i32 q6, q6, q3
vshl.i32 q7, q1, #4
vld1.8 {d5}, [r4, : 64]
vshl.i32 q8, q4, #4
vtrn.32 d4, d5
vadd.i32 q7, q7, q1
vadd.i32 q8, q8, q4
vld1.8 {d18-d19}, [r2, : 128]!
vshl.i32 q10, q2, #4
vld1.8 {d22-d23}, [r2, : 128]!
vadd.i32 q10, q10, q2
vld1.8 {d24}, [r2, : 64]
vadd.i32 q5, q5, q0
add r2, r3, #288
vld1.8 {d26-d27}, [r2, : 128]!
vadd.i32 q6, q6, q3
vld1.8 {d28-d29}, [r2, : 128]!
vadd.i32 q8, q8, q4
vld1.8 {d25}, [r2, : 64]
vadd.i32 q10, q10, q2
vtrn.32 q9, q13
vadd.i32 q7, q7, q1
vadd.i32 q5, q5, q0
vtrn.32 q11, q14
vadd.i32 q6, q6, q3
add r2, sp, #528
vadd.i32 q10, q10, q2
vtrn.32 d24, d25
vst1.8 {d12-d13}, [r2, : 128]!
vshl.i32 q6, q13, #1
vst1.8 {d20-d21}, [r2, : 128]!
vshl.i32 q10, q14, #1
vst1.8 {d12-d13}, [r2, : 128]!
vshl.i32 q15, q12, #1
vadd.i32 q8, q8, q4
vext.32 d10, d31, d30, #0
vadd.i32 q7, q7, q1
vst1.8 {d16-d17}, [r2, : 128]!
vmull.s32 q8, d18, d5
vmlal.s32 q8, d26, d4
vmlal.s32 q8, d19, d9
vmlal.s32 q8, d27, d3
vmlal.s32 q8, d22, d8
vmlal.s32 q8, d28, d2
vmlal.s32 q8, d23, d7
vmlal.s32 q8, d29, d1
vmlal.s32 q8, d24, d6
vmlal.s32 q8, d25, d0
vst1.8 {d14-d15}, [r2, : 128]!
vmull.s32 q2, d18, d4
vmlal.s32 q2, d12, d9
vmlal.s32 q2, d13, d8
vmlal.s32 q2, d19, d3
vmlal.s32 q2, d22, d2
vmlal.s32 q2, d23, d1
vmlal.s32 q2, d24, d0
vst1.8 {d20-d21}, [r2, : 128]!
vmull.s32 q7, d18, d9
vmlal.s32 q7, d26, d3
vmlal.s32 q7, d19, d8
vmlal.s32 q7, d27, d2
vmlal.s32 q7, d22, d7
vmlal.s32 q7, d28, d1
vmlal.s32 q7, d23, d6
vmlal.s32 q7, d29, d0
vst1.8 {d10-d11}, [r2, : 128]!
vmull.s32 q5, d18, d3
vmlal.s32 q5, d19, d2
vmlal.s32 q5, d22, d1
vmlal.s32 q5, d23, d0
vmlal.s32 q5, d12, d8
vst1.8 {d16-d17}, [r2, : 128]!
vmull.s32 q4, d18, d8
vmlal.s32 q4, d26, d2
vmlal.s32 q4, d19, d7
vmlal.s32 q4, d27, d1
vmlal.s32 q4, d22, d6
vmlal.s32 q4, d28, d0
vmull.s32 q8, d18, d7
vmlal.s32 q8, d26, d1
vmlal.s32 q8, d19, d6
vmlal.s32 q8, d27, d0
add r2, sp, #544
vld1.8 {d20-d21}, [r2, : 128]
vmlal.s32 q7, d24, d21
vmlal.s32 q7, d25, d20
vmlal.s32 q4, d23, d21
vmlal.s32 q4, d29, d20
vmlal.s32 q8, d22, d21
vmlal.s32 q8, d28, d20
vmlal.s32 q5, d24, d20
vst1.8 {d14-d15}, [r2, : 128]
vmull.s32 q7, d18, d6
vmlal.s32 q7, d26, d0
add r2, sp, #624
vld1.8 {d30-d31}, [r2, : 128]
vmlal.s32 q2, d30, d21
vmlal.s32 q7, d19, d21
vmlal.s32 q7, d27, d20
add r2, sp, #592
vld1.8 {d26-d27}, [r2, : 128]
vmlal.s32 q4, d25, d27
vmlal.s32 q8, d29, d27
vmlal.s32 q8, d25, d26
vmlal.s32 q7, d28, d27
vmlal.s32 q7, d29, d26
add r2, sp, #576
vld1.8 {d28-d29}, [r2, : 128]
vmlal.s32 q4, d24, d29
vmlal.s32 q8, d23, d29
vmlal.s32 q8, d24, d28
vmlal.s32 q7, d22, d29
vmlal.s32 q7, d23, d28
vst1.8 {d8-d9}, [r2, : 128]
add r2, sp, #528
vld1.8 {d8-d9}, [r2, : 128]
vmlal.s32 q7, d24, d9
vmlal.s32 q7, d25, d31
vmull.s32 q1, d18, d2
vmlal.s32 q1, d19, d1
vmlal.s32 q1, d22, d0
vmlal.s32 q1, d24, d27
vmlal.s32 q1, d23, d20
vmlal.s32 q1, d12, d7
vmlal.s32 q1, d13, d6
vmull.s32 q6, d18, d1
vmlal.s32 q6, d19, d0
vmlal.s32 q6, d23, d27
vmlal.s32 q6, d22, d20
vmlal.s32 q6, d24, d26
vmull.s32 q0, d18, d0
vmlal.s32 q0, d22, d27
vmlal.s32 q0, d23, d26
vmlal.s32 q0, d24, d31
vmlal.s32 q0, d19, d20
add r2, sp, #608
vld1.8 {d18-d19}, [r2, : 128]
vmlal.s32 q2, d18, d7
vmlal.s32 q5, d18, d6
vmlal.s32 q1, d18, d21
vmlal.s32 q0, d18, d28
vmlal.s32 q6, d18, d29
vmlal.s32 q2, d19, d6
vmlal.s32 q5, d19, d21
vmlal.s32 q1, d19, d29
vmlal.s32 q0, d19, d9
vmlal.s32 q6, d19, d28
add r2, sp, #560
vld1.8 {d18-d19}, [r2, : 128]
add r2, sp, #480
vld1.8 {d22-d23}, [r2, : 128]
vmlal.s32 q5, d19, d7
vmlal.s32 q0, d18, d21
vmlal.s32 q0, d19, d29
vmlal.s32 q6, d18, d6
add r2, sp, #496
vld1.8 {d6-d7}, [r2, : 128]
vmlal.s32 q6, d19, d21
add r2, sp, #544
vld1.8 {d18-d19}, [r2, : 128]
vmlal.s32 q0, d30, d8
add r2, sp, #640
vld1.8 {d20-d21}, [r2, : 128]
vmlal.s32 q5, d30, d29
add r2, sp, #576
vld1.8 {d24-d25}, [r2, : 128]
vmlal.s32 q1, d30, d28
vadd.i64 q13, q0, q11
vadd.i64 q14, q5, q11
vmlal.s32 q6, d30, d9
vshr.s64 q4, q13, #26
vshr.s64 q13, q14, #26
vadd.i64 q7, q7, q4
vshl.i64 q4, q4, #26
vadd.i64 q14, q7, q3
vadd.i64 q9, q9, q13
vshl.i64 q13, q13, #26
vadd.i64 q15, q9, q3
vsub.i64 q0, q0, q4
vshr.s64 q4, q14, #25
vsub.i64 q5, q5, q13
vshr.s64 q13, q15, #25
vadd.i64 q6, q6, q4
vshl.i64 q4, q4, #25
vadd.i64 q14, q6, q11
vadd.i64 q2, q2, q13
vsub.i64 q4, q7, q4
vshr.s64 q7, q14, #26
vshl.i64 q13, q13, #25
vadd.i64 q14, q2, q11
vadd.i64 q8, q8, q7
vshl.i64 q7, q7, #26
vadd.i64 q15, q8, q3
vsub.i64 q9, q9, q13
vshr.s64 q13, q14, #26
vsub.i64 q6, q6, q7
vshr.s64 q7, q15, #25
vadd.i64 q10, q10, q13
vshl.i64 q13, q13, #26
vadd.i64 q14, q10, q3
vadd.i64 q1, q1, q7
add r2, r3, #288
vshl.i64 q7, q7, #25
add r4, r3, #96
vadd.i64 q15, q1, q11
add r2, r2, #8
vsub.i64 q2, q2, q13
add r4, r4, #8
vshr.s64 q13, q14, #25
vsub.i64 q7, q8, q7
vshr.s64 q8, q15, #26
vadd.i64 q14, q13, q13
vadd.i64 q12, q12, q8
vtrn.32 d12, d14
vshl.i64 q8, q8, #26
vtrn.32 d13, d15
vadd.i64 q3, q12, q3
vadd.i64 q0, q0, q14
vst1.8 d12, [r2, : 64]!
vshl.i64 q7, q13, #4
vst1.8 d13, [r4, : 64]!
vsub.i64 q1, q1, q8
vshr.s64 q3, q3, #25
vadd.i64 q0, q0, q7
vadd.i64 q5, q5, q3
vshl.i64 q3, q3, #25
vadd.i64 q6, q5, q11
vadd.i64 q0, q0, q13
vshl.i64 q7, q13, #25
vadd.i64 q8, q0, q11
vsub.i64 q3, q12, q3
vshr.s64 q6, q6, #26
vsub.i64 q7, q10, q7
vtrn.32 d2, d6
vshr.s64 q8, q8, #26
vtrn.32 d3, d7
vadd.i64 q3, q9, q6
vst1.8 d2, [r2, : 64]
vshl.i64 q6, q6, #26
vst1.8 d3, [r4, : 64]
vadd.i64 q1, q4, q8
vtrn.32 d4, d14
vshl.i64 q4, q8, #26
vtrn.32 d5, d15
vsub.i64 q5, q5, q6
add r2, r2, #16
vsub.i64 q0, q0, q4
vst1.8 d4, [r2, : 64]
add r4, r4, #16
vst1.8 d5, [r4, : 64]
vtrn.32 d10, d6
vtrn.32 d11, d7
sub r2, r2, #8
sub r4, r4, #8
vtrn.32 d0, d2
vtrn.32 d1, d3
vst1.8 d10, [r2, : 64]
vst1.8 d11, [r4, : 64]
sub r2, r2, #24
sub r4, r4, #24
vst1.8 d0, [r2, : 64]
vst1.8 d1, [r4, : 64]
add r2, sp, #512
add r4, r3, #144
add r5, r3, #192
vld1.8 {d0-d1}, [r2, : 128]
vld1.8 {d2-d3}, [r4, : 128]!
vld1.8 {d4-d5}, [r5, : 128]!
vzip.i32 q1, q2
vld1.8 {d6-d7}, [r4, : 128]!
vld1.8 {d8-d9}, [r5, : 128]!
vshl.i32 q5, q1, #1
vzip.i32 q3, q4
vshl.i32 q6, q2, #1
vld1.8 {d14}, [r4, : 64]
vshl.i32 q8, q3, #1
vld1.8 {d15}, [r5, : 64]
vshl.i32 q9, q4, #1
vmul.i32 d21, d7, d1
vtrn.32 d14, d15
vmul.i32 q11, q4, q0
vmul.i32 q0, q7, q0
vmull.s32 q12, d2, d2
vmlal.s32 q12, d11, d1
vmlal.s32 q12, d12, d0
vmlal.s32 q12, d13, d23
vmlal.s32 q12, d16, d22
vmlal.s32 q12, d7, d21
vmull.s32 q10, d2, d11
vmlal.s32 q10, d4, d1
vmlal.s32 q10, d13, d0
vmlal.s32 q10, d6, d23
vmlal.s32 q10, d17, d22
vmull.s32 q13, d10, d4
vmlal.s32 q13, d11, d3
vmlal.s32 q13, d13, d1
vmlal.s32 q13, d16, d0
vmlal.s32 q13, d17, d23
vmlal.s32 q13, d8, d22
vmull.s32 q1, d10, d5
vmlal.s32 q1, d11, d4
vmlal.s32 q1, d6, d1
vmlal.s32 q1, d17, d0
vmlal.s32 q1, d8, d23
vmull.s32 q14, d10, d6
vmlal.s32 q14, d11, d13
vmlal.s32 q14, d4, d4
vmlal.s32 q14, d17, d1
vmlal.s32 q14, d18, d0
vmlal.s32 q14, d9, d23
vmull.s32 q11, d10, d7
vmlal.s32 q11, d11, d6
vmlal.s32 q11, d12, d5
vmlal.s32 q11, d8, d1
vmlal.s32 q11, d19, d0
vmull.s32 q15, d10, d8
vmlal.s32 q15, d11, d17
vmlal.s32 q15, d12, d6
vmlal.s32 q15, d13, d5
vmlal.s32 q15, d19, d1
vmlal.s32 q15, d14, d0
vmull.s32 q2, d10, d9
vmlal.s32 q2, d11, d8
vmlal.s32 q2, d12, d7
vmlal.s32 q2, d13, d6
vmlal.s32 q2, d14, d1
vmull.s32 q0, d15, d1
vmlal.s32 q0, d10, d14
vmlal.s32 q0, d11, d19
vmlal.s32 q0, d12, d8
vmlal.s32 q0, d13, d17
vmlal.s32 q0, d6, d6
add r2, sp, #480
vld1.8 {d18-d19}, [r2, : 128]!
vmull.s32 q3, d16, d7
vmlal.s32 q3, d10, d15
vmlal.s32 q3, d11, d14
vmlal.s32 q3, d12, d9
vmlal.s32 q3, d13, d8
vld1.8 {d8-d9}, [r2, : 128]
vadd.i64 q5, q12, q9
vadd.i64 q6, q15, q9
vshr.s64 q5, q5, #26
vshr.s64 q6, q6, #26
vadd.i64 q7, q10, q5
vshl.i64 q5, q5, #26
vadd.i64 q8, q7, q4
vadd.i64 q2, q2, q6
vshl.i64 q6, q6, #26
vadd.i64 q10, q2, q4
vsub.i64 q5, q12, q5
vshr.s64 q8, q8, #25
vsub.i64 q6, q15, q6
vshr.s64 q10, q10, #25
vadd.i64 q12, q13, q8
vshl.i64 q8, q8, #25
vadd.i64 q13, q12, q9
vadd.i64 q0, q0, q10
vsub.i64 q7, q7, q8
vshr.s64 q8, q13, #26
vshl.i64 q10, q10, #25
vadd.i64 q13, q0, q9
vadd.i64 q1, q1, q8
vshl.i64 q8, q8, #26
vadd.i64 q15, q1, q4
vsub.i64 q2, q2, q10
vshr.s64 q10, q13, #26
vsub.i64 q8, q12, q8
vshr.s64 q12, q15, #25
vadd.i64 q3, q3, q10
vshl.i64 q10, q10, #26
vadd.i64 q13, q3, q4
vadd.i64 q14, q14, q12
add r2, r3, #144
vshl.i64 q12, q12, #25
add r4, r3, #192
vadd.i64 q15, q14, q9
add r2, r2, #8
vsub.i64 q0, q0, q10
add r4, r4, #8
vshr.s64 q10, q13, #25
vsub.i64 q1, q1, q12
vshr.s64 q12, q15, #26
vadd.i64 q13, q10, q10
vadd.i64 q11, q11, q12
vtrn.32 d16, d2
vshl.i64 q12, q12, #26
vtrn.32 d17, d3
vadd.i64 q1, q11, q4
vadd.i64 q4, q5, q13
vst1.8 d16, [r2, : 64]!
vshl.i64 q5, q10, #4
vst1.8 d17, [r4, : 64]!
vsub.i64 q8, q14, q12
vshr.s64 q1, q1, #25
vadd.i64 q4, q4, q5
vadd.i64 q5, q6, q1
vshl.i64 q1, q1, #25
vadd.i64 q6, q5, q9
vadd.i64 q4, q4, q10
vshl.i64 q10, q10, #25
vadd.i64 q9, q4, q9
vsub.i64 q1, q11, q1
vshr.s64 q6, q6, #26
vsub.i64 q3, q3, q10
vtrn.32 d16, d2
vshr.s64 q9, q9, #26
vtrn.32 d17, d3
vadd.i64 q1, q2, q6
vst1.8 d16, [r2, : 64]
vshl.i64 q2, q6, #26
vst1.8 d17, [r4, : 64]
vadd.i64 q6, q7, q9
vtrn.32 d0, d6
vshl.i64 q7, q9, #26
vtrn.32 d1, d7
vsub.i64 q2, q5, q2
add r2, r2, #16
vsub.i64 q3, q4, q7
vst1.8 d0, [r2, : 64]
add r4, r4, #16
vst1.8 d1, [r4, : 64]
vtrn.32 d4, d2
vtrn.32 d5, d3
sub r2, r2, #8
sub r4, r4, #8
vtrn.32 d6, d12
vtrn.32 d7, d13
vst1.8 d4, [r2, : 64]
vst1.8 d5, [r4, : 64]
sub r2, r2, #24
sub r4, r4, #24
vst1.8 d6, [r2, : 64]
vst1.8 d7, [r4, : 64]
add r2, r3, #336
add r4, r3, #288
vld1.8 {d0-d1}, [r2, : 128]!
vld1.8 {d2-d3}, [r4, : 128]!
vadd.i32 q0, q0, q1
vld1.8 {d2-d3}, [r2, : 128]!
vld1.8 {d4-d5}, [r4, : 128]!
vadd.i32 q1, q1, q2
add r5, r3, #288
vld1.8 {d4}, [r2, : 64]
vld1.8 {d6}, [r4, : 64]
vadd.i32 q2, q2, q3
vst1.8 {d0-d1}, [r5, : 128]!
vst1.8 {d2-d3}, [r5, : 128]!
vst1.8 d4, [r5, : 64]
add r2, r3, #48
add r4, r3, #144
vld1.8 {d0-d1}, [r4, : 128]!
vld1.8 {d2-d3}, [r4, : 128]!
vld1.8 {d4}, [r4, : 64]
add r4, r3, #288
vld1.8 {d6-d7}, [r4, : 128]!
vtrn.32 q0, q3
vld1.8 {d8-d9}, [r4, : 128]!
vshl.i32 q5, q0, #4
vtrn.32 q1, q4
vshl.i32 q6, q3, #4
vadd.i32 q5, q5, q0
vadd.i32 q6, q6, q3
vshl.i32 q7, q1, #4
vld1.8 {d5}, [r4, : 64]
vshl.i32 q8, q4, #4
vtrn.32 d4, d5
vadd.i32 q7, q7, q1
vadd.i32 q8, q8, q4
vld1.8 {d18-d19}, [r2, : 128]!
vshl.i32 q10, q2, #4
vld1.8 {d22-d23}, [r2, : 128]!
vadd.i32 q10, q10, q2
vld1.8 {d24}, [r2, : 64]
vadd.i32 q5, q5, q0
add r2, r3, #240
vld1.8 {d26-d27}, [r2, : 128]!
vadd.i32 q6, q6, q3
vld1.8 {d28-d29}, [r2, : 128]!
vadd.i32 q8, q8, q4
vld1.8 {d25}, [r2, : 64]
vadd.i32 q10, q10, q2
vtrn.32 q9, q13
vadd.i32 q7, q7, q1
vadd.i32 q5, q5, q0
vtrn.32 q11, q14
vadd.i32 q6, q6, q3
add r2, sp, #528
vadd.i32 q10, q10, q2
vtrn.32 d24, d25
vst1.8 {d12-d13}, [r2, : 128]!
vshl.i32 q6, q13, #1
vst1.8 {d20-d21}, [r2, : 128]!
vshl.i32 q10, q14, #1
vst1.8 {d12-d13}, [r2, : 128]!
vshl.i32 q15, q12, #1
vadd.i32 q8, q8, q4
vext.32 d10, d31, d30, #0
vadd.i32 q7, q7, q1
vst1.8 {d16-d17}, [r2, : 128]!
vmull.s32 q8, d18, d5
vmlal.s32 q8, d26, d4
vmlal.s32 q8, d19, d9
vmlal.s32 q8, d27, d3
vmlal.s32 q8, d22, d8
vmlal.s32 q8, d28, d2
vmlal.s32 q8, d23, d7
vmlal.s32 q8, d29, d1
vmlal.s32 q8, d24, d6
vmlal.s32 q8, d25, d0
vst1.8 {d14-d15}, [r2, : 128]!
vmull.s32 q2, d18, d4
vmlal.s32 q2, d12, d9
vmlal.s32 q2, d13, d8
vmlal.s32 q2, d19, d3
vmlal.s32 q2, d22, d2
vmlal.s32 q2, d23, d1
vmlal.s32 q2, d24, d0
vst1.8 {d20-d21}, [r2, : 128]!
vmull.s32 q7, d18, d9
vmlal.s32 q7, d26, d3
vmlal.s32 q7, d19, d8
vmlal.s32 q7, d27, d2
vmlal.s32 q7, d22, d7
vmlal.s32 q7, d28, d1
vmlal.s32 q7, d23, d6
vmlal.s32 q7, d29, d0
vst1.8 {d10-d11}, [r2, : 128]!
vmull.s32 q5, d18, d3
vmlal.s32 q5, d19, d2
vmlal.s32 q5, d22, d1
vmlal.s32 q5, d23, d0
vmlal.s32 q5, d12, d8
vst1.8 {d16-d17}, [r2, : 128]!
vmull.s32 q4, d18, d8
vmlal.s32 q4, d26, d2
vmlal.s32 q4, d19, d7
vmlal.s32 q4, d27, d1
vmlal.s32 q4, d22, d6
vmlal.s32 q4, d28, d0
vmull.s32 q8, d18, d7
vmlal.s32 q8, d26, d1
vmlal.s32 q8, d19, d6
vmlal.s32 q8, d27, d0
add r2, sp, #544
vld1.8 {d20-d21}, [r2, : 128]
vmlal.s32 q7, d24, d21
vmlal.s32 q7, d25, d20
vmlal.s32 q4, d23, d21
vmlal.s32 q4, d29, d20
vmlal.s32 q8, d22, d21
vmlal.s32 q8, d28, d20
vmlal.s32 q5, d24, d20
vst1.8 {d14-d15}, [r2, : 128]
vmull.s32 q7, d18, d6
vmlal.s32 q7, d26, d0
add r2, sp, #624
vld1.8 {d30-d31}, [r2, : 128]
vmlal.s32 q2, d30, d21
vmlal.s32 q7, d19, d21
vmlal.s32 q7, d27, d20
add r2, sp, #592
vld1.8 {d26-d27}, [r2, : 128]
vmlal.s32 q4, d25, d27
vmlal.s32 q8, d29, d27
vmlal.s32 q8, d25, d26
vmlal.s32 q7, d28, d27
vmlal.s32 q7, d29, d26
add r2, sp, #576
vld1.8 {d28-d29}, [r2, : 128]
vmlal.s32 q4, d24, d29
vmlal.s32 q8, d23, d29
vmlal.s32 q8, d24, d28
vmlal.s32 q7, d22, d29
vmlal.s32 q7, d23, d28
vst1.8 {d8-d9}, [r2, : 128]
add r2, sp, #528
vld1.8 {d8-d9}, [r2, : 128]
vmlal.s32 q7, d24, d9
vmlal.s32 q7, d25, d31
vmull.s32 q1, d18, d2
vmlal.s32 q1, d19, d1
vmlal.s32 q1, d22, d0
vmlal.s32 q1, d24, d27
vmlal.s32 q1, d23, d20
vmlal.s32 q1, d12, d7
vmlal.s32 q1, d13, d6
vmull.s32 q6, d18, d1
vmlal.s32 q6, d19, d0
vmlal.s32 q6, d23, d27
vmlal.s32 q6, d22, d20
vmlal.s32 q6, d24, d26
vmull.s32 q0, d18, d0
vmlal.s32 q0, d22, d27
vmlal.s32 q0, d23, d26
vmlal.s32 q0, d24, d31
vmlal.s32 q0, d19, d20
add r2, sp, #608
vld1.8 {d18-d19}, [r2, : 128]
vmlal.s32 q2, d18, d7
vmlal.s32 q5, d18, d6
vmlal.s32 q1, d18, d21
vmlal.s32 q0, d18, d28
vmlal.s32 q6, d18, d29
vmlal.s32 q2, d19, d6
vmlal.s32 q5, d19, d21
vmlal.s32 q1, d19, d29
vmlal.s32 q0, d19, d9
vmlal.s32 q6, d19, d28
add r2, sp, #560
vld1.8 {d18-d19}, [r2, : 128]
add r2, sp, #480
vld1.8 {d22-d23}, [r2, : 128]
vmlal.s32 q5, d19, d7
vmlal.s32 q0, d18, d21
vmlal.s32 q0, d19, d29
vmlal.s32 q6, d18, d6
add r2, sp, #496
vld1.8 {d6-d7}, [r2, : 128]
vmlal.s32 q6, d19, d21
add r2, sp, #544
vld1.8 {d18-d19}, [r2, : 128]
vmlal.s32 q0, d30, d8
add r2, sp, #640
vld1.8 {d20-d21}, [r2, : 128]
vmlal.s32 q5, d30, d29
add r2, sp, #576
vld1.8 {d24-d25}, [r2, : 128]
vmlal.s32 q1, d30, d28
vadd.i64 q13, q0, q11
vadd.i64 q14, q5, q11
vmlal.s32 q6, d30, d9
vshr.s64 q4, q13, #26
vshr.s64 q13, q14, #26
vadd.i64 q7, q7, q4
vshl.i64 q4, q4, #26
vadd.i64 q14, q7, q3
vadd.i64 q9, q9, q13
vshl.i64 q13, q13, #26
vadd.i64 q15, q9, q3
vsub.i64 q0, q0, q4
vshr.s64 q4, q14, #25
vsub.i64 q5, q5, q13
vshr.s64 q13, q15, #25
vadd.i64 q6, q6, q4
vshl.i64 q4, q4, #25
vadd.i64 q14, q6, q11
vadd.i64 q2, q2, q13
vsub.i64 q4, q7, q4
vshr.s64 q7, q14, #26
vshl.i64 q13, q13, #25
vadd.i64 q14, q2, q11
vadd.i64 q8, q8, q7
vshl.i64 q7, q7, #26
vadd.i64 q15, q8, q3
vsub.i64 q9, q9, q13
vshr.s64 q13, q14, #26
vsub.i64 q6, q6, q7
vshr.s64 q7, q15, #25
vadd.i64 q10, q10, q13
vshl.i64 q13, q13, #26
vadd.i64 q14, q10, q3
vadd.i64 q1, q1, q7
add r2, r3, #240
vshl.i64 q7, q7, #25
add r4, r3, #144
vadd.i64 q15, q1, q11
add r2, r2, #8
vsub.i64 q2, q2, q13
add r4, r4, #8
vshr.s64 q13, q14, #25
vsub.i64 q7, q8, q7
vshr.s64 q8, q15, #26
vadd.i64 q14, q13, q13
vadd.i64 q12, q12, q8
vtrn.32 d12, d14
vshl.i64 q8, q8, #26
vtrn.32 d13, d15
vadd.i64 q3, q12, q3
vadd.i64 q0, q0, q14
vst1.8 d12, [r2, : 64]!
vshl.i64 q7, q13, #4
vst1.8 d13, [r4, : 64]!
vsub.i64 q1, q1, q8
vshr.s64 q3, q3, #25
vadd.i64 q0, q0, q7
vadd.i64 q5, q5, q3
vshl.i64 q3, q3, #25
vadd.i64 q6, q5, q11
vadd.i64 q0, q0, q13
vshl.i64 q7, q13, #25
vadd.i64 q8, q0, q11
vsub.i64 q3, q12, q3
vshr.s64 q6, q6, #26
vsub.i64 q7, q10, q7
vtrn.32 d2, d6
vshr.s64 q8, q8, #26
vtrn.32 d3, d7
vadd.i64 q3, q9, q6
vst1.8 d2, [r2, : 64]
vshl.i64 q6, q6, #26
vst1.8 d3, [r4, : 64]
vadd.i64 q1, q4, q8
vtrn.32 d4, d14
vshl.i64 q4, q8, #26
vtrn.32 d5, d15
vsub.i64 q5, q5, q6
add r2, r2, #16
vsub.i64 q0, q0, q4
vst1.8 d4, [r2, : 64]
add r4, r4, #16
vst1.8 d5, [r4, : 64]
vtrn.32 d10, d6
vtrn.32 d11, d7
sub r2, r2, #8
sub r4, r4, #8
vtrn.32 d0, d2
vtrn.32 d1, d3
vst1.8 d10, [r2, : 64]
vst1.8 d11, [r4, : 64]
sub r2, r2, #24
sub r4, r4, #24
vst1.8 d0, [r2, : 64]
vst1.8 d1, [r4, : 64]
ldr r2, [sp, #456]
ldr r4, [sp, #460]
subs r5, r2, #1
bge .Lmainloop
add r1, r3, #144
add r2, r3, #336
vld1.8 {d0-d1}, [r1, : 128]!
vld1.8 {d2-d3}, [r1, : 128]!
vld1.8 {d4}, [r1, : 64]
vst1.8 {d0-d1}, [r2, : 128]!
vst1.8 {d2-d3}, [r2, : 128]!
vst1.8 d4, [r2, : 64]
movw r1, #0
.Linvertloop:
add r2, r3, #144
movw r4, #0
movw r5, #2
cmp r1, #1
moveq r5, #1
addeq r2, r3, #336
addeq r4, r3, #48
cmp r1, #2
moveq r5, #1
addeq r2, r3, #48
cmp r1, #3
moveq r5, #5
addeq r4, r3, #336
cmp r1, #4
moveq r5, #10
cmp r1, #5
moveq r5, #20
cmp r1, #6
moveq r5, #10
addeq r2, r3, #336
addeq r4, r3, #336
cmp r1, #7
moveq r5, #50
cmp r1, #8
moveq r5, #100
cmp r1, #9
moveq r5, #50
addeq r2, r3, #336
cmp r1, #10
moveq r5, #5
addeq r2, r3, #48
cmp r1, #11
moveq r5, #0
addeq r2, r3, #96
add r6, r3, #144
add r7, r3, #288
vld1.8 {d0-d1}, [r6, : 128]!
vld1.8 {d2-d3}, [r6, : 128]!
vld1.8 {d4}, [r6, : 64]
vst1.8 {d0-d1}, [r7, : 128]!
vst1.8 {d2-d3}, [r7, : 128]!
vst1.8 d4, [r7, : 64]
cmp r5, #0
beq .Lskipsquaringloop
.Lsquaringloop:
add r6, r3, #288
add r7, r3, #288
add r8, r3, #288
vmov.i32 q0, #19
vmov.i32 q1, #0
vmov.i32 q2, #1
vzip.i32 q1, q2
vld1.8 {d4-d5}, [r7, : 128]!
vld1.8 {d6-d7}, [r7, : 128]!
vld1.8 {d9}, [r7, : 64]
vld1.8 {d10-d11}, [r6, : 128]!
add r7, sp, #384
vld1.8 {d12-d13}, [r6, : 128]!
vmul.i32 q7, q2, q0
vld1.8 {d8}, [r6, : 64]
vext.32 d17, d11, d10, #1
vmul.i32 q9, q3, q0
vext.32 d16, d10, d8, #1
vshl.u32 q10, q5, q1
vext.32 d22, d14, d4, #1
vext.32 d24, d18, d6, #1
vshl.u32 q13, q6, q1
vshl.u32 d28, d8, d2
vrev64.i32 d22, d22
vmul.i32 d1, d9, d1
vrev64.i32 d24, d24
vext.32 d29, d8, d13, #1
vext.32 d0, d1, d9, #1
vrev64.i32 d0, d0
vext.32 d2, d9, d1, #1
vext.32 d23, d15, d5, #1
vmull.s32 q4, d20, d4
vrev64.i32 d23, d23
vmlal.s32 q4, d21, d1
vrev64.i32 d2, d2
vmlal.s32 q4, d26, d19
vext.32 d3, d5, d15, #1
vmlal.s32 q4, d27, d18
vrev64.i32 d3, d3
vmlal.s32 q4, d28, d15
vext.32 d14, d12, d11, #1
vmull.s32 q5, d16, d23
vext.32 d15, d13, d12, #1
vmlal.s32 q5, d17, d4
vst1.8 d8, [r7, : 64]!
vmlal.s32 q5, d14, d1
vext.32 d12, d9, d8, #0
vmlal.s32 q5, d15, d19
vmov.i64 d13, #0
vmlal.s32 q5, d29, d18
vext.32 d25, d19, d7, #1
vmlal.s32 q6, d20, d5
vrev64.i32 d25, d25
vmlal.s32 q6, d21, d4
vst1.8 d11, [r7, : 64]!
vmlal.s32 q6, d26, d1
vext.32 d9, d10, d10, #0
vmlal.s32 q6, d27, d19
vmov.i64 d8, #0
vmlal.s32 q6, d28, d18
vmlal.s32 q4, d16, d24
vmlal.s32 q4, d17, d5
vmlal.s32 q4, d14, d4
vst1.8 d12, [r7, : 64]!
vmlal.s32 q4, d15, d1
vext.32 d10, d13, d12, #0
vmlal.s32 q4, d29, d19
vmov.i64 d11, #0
vmlal.s32 q5, d20, d6
vmlal.s32 q5, d21, d5
vmlal.s32 q5, d26, d4
vext.32 d13, d8, d8, #0
vmlal.s32 q5, d27, d1
vmov.i64 d12, #0
vmlal.s32 q5, d28, d19
vst1.8 d9, [r7, : 64]!
vmlal.s32 q6, d16, d25
vmlal.s32 q6, d17, d6
vst1.8 d10, [r7, : 64]
vmlal.s32 q6, d14, d5
vext.32 d8, d11, d10, #0
vmlal.s32 q6, d15, d4
vmov.i64 d9, #0
vmlal.s32 q6, d29, d1
vmlal.s32 q4, d20, d7
vmlal.s32 q4, d21, d6
vmlal.s32 q4, d26, d5
vext.32 d11, d12, d12, #0
vmlal.s32 q4, d27, d4
vmov.i64 d10, #0
vmlal.s32 q4, d28, d1
vmlal.s32 q5, d16, d0
sub r6, r7, #32
vmlal.s32 q5, d17, d7
vmlal.s32 q5, d14, d6
vext.32 d30, d9, d8, #0
vmlal.s32 q5, d15, d5
vld1.8 {d31}, [r6, : 64]!
vmlal.s32 q5, d29, d4
vmlal.s32 q15, d20, d0
vext.32 d0, d6, d18, #1
vmlal.s32 q15, d21, d25
vrev64.i32 d0, d0
vmlal.s32 q15, d26, d24
vext.32 d1, d7, d19, #1
vext.32 d7, d10, d10, #0
vmlal.s32 q15, d27, d23
vrev64.i32 d1, d1
vld1.8 {d6}, [r6, : 64]
vmlal.s32 q15, d28, d22
vmlal.s32 q3, d16, d4
add r6, r6, #24
vmlal.s32 q3, d17, d2
vext.32 d4, d31, d30, #0
vmov d17, d11
vmlal.s32 q3, d14, d1
vext.32 d11, d13, d13, #0
vext.32 d13, d30, d30, #0
vmlal.s32 q3, d15, d0
vext.32 d1, d8, d8, #0
vmlal.s32 q3, d29, d3
vld1.8 {d5}, [r6, : 64]
sub r6, r6, #16
vext.32 d10, d6, d6, #0
vmov.i32 q1, #0xffffffff
vshl.i64 q4, q1, #25
add r7, sp, #480
vld1.8 {d14-d15}, [r7, : 128]
vadd.i64 q9, q2, q7
vshl.i64 q1, q1, #26
vshr.s64 q10, q9, #26
vld1.8 {d0}, [r6, : 64]!
vadd.i64 q5, q5, q10
vand q9, q9, q1
vld1.8 {d16}, [r6, : 64]!
add r6, sp, #496
vld1.8 {d20-d21}, [r6, : 128]
vadd.i64 q11, q5, q10
vsub.i64 q2, q2, q9
vshr.s64 q9, q11, #25
vext.32 d12, d5, d4, #0
vand q11, q11, q4
vadd.i64 q0, q0, q9
vmov d19, d7
vadd.i64 q3, q0, q7
vsub.i64 q5, q5, q11
vshr.s64 q11, q3, #26
vext.32 d18, d11, d10, #0
vand q3, q3, q1
vadd.i64 q8, q8, q11
vadd.i64 q11, q8, q10
vsub.i64 q0, q0, q3
vshr.s64 q3, q11, #25
vand q11, q11, q4
vadd.i64 q3, q6, q3
vadd.i64 q6, q3, q7
vsub.i64 q8, q8, q11
vshr.s64 q11, q6, #26
vand q6, q6, q1
vadd.i64 q9, q9, q11
vadd.i64 d25, d19, d21
vsub.i64 q3, q3, q6
vshr.s64 d23, d25, #25
vand q4, q12, q4
vadd.i64 d21, d23, d23
vshl.i64 d25, d23, #4
vadd.i64 d21, d21, d23
vadd.i64 d25, d25, d21
vadd.i64 d4, d4, d25
vzip.i32 q0, q8
vadd.i64 d12, d4, d14
add r6, r8, #8
vst1.8 d0, [r6, : 64]
vsub.i64 d19, d19, d9
add r6, r6, #16
vst1.8 d16, [r6, : 64]
vshr.s64 d22, d12, #26
vand q0, q6, q1
vadd.i64 d10, d10, d22
vzip.i32 q3, q9
vsub.i64 d4, d4, d0
sub r6, r6, #8
vst1.8 d6, [r6, : 64]
add r6, r6, #16
vst1.8 d18, [r6, : 64]
vzip.i32 q2, q5
sub r6, r6, #32
vst1.8 d4, [r6, : 64]
subs r5, r5, #1
bhi .Lsquaringloop
.Lskipsquaringloop:
mov r2, r2
add r5, r3, #288
add r6, r3, #144
vmov.i32 q0, #19
vmov.i32 q1, #0
vmov.i32 q2, #1
vzip.i32 q1, q2
vld1.8 {d4-d5}, [r5, : 128]!
vld1.8 {d6-d7}, [r5, : 128]!
vld1.8 {d9}, [r5, : 64]
vld1.8 {d10-d11}, [r2, : 128]!
add r5, sp, #384
vld1.8 {d12-d13}, [r2, : 128]!
vmul.i32 q7, q2, q0
vld1.8 {d8}, [r2, : 64]
vext.32 d17, d11, d10, #1
vmul.i32 q9, q3, q0
vext.32 d16, d10, d8, #1
vshl.u32 q10, q5, q1
vext.32 d22, d14, d4, #1
vext.32 d24, d18, d6, #1
vshl.u32 q13, q6, q1
vshl.u32 d28, d8, d2
vrev64.i32 d22, d22
vmul.i32 d1, d9, d1
vrev64.i32 d24, d24
vext.32 d29, d8, d13, #1
vext.32 d0, d1, d9, #1
vrev64.i32 d0, d0
vext.32 d2, d9, d1, #1
vext.32 d23, d15, d5, #1
vmull.s32 q4, d20, d4
vrev64.i32 d23, d23
vmlal.s32 q4, d21, d1
vrev64.i32 d2, d2
vmlal.s32 q4, d26, d19
vext.32 d3, d5, d15, #1
vmlal.s32 q4, d27, d18
vrev64.i32 d3, d3
vmlal.s32 q4, d28, d15
vext.32 d14, d12, d11, #1
vmull.s32 q5, d16, d23
vext.32 d15, d13, d12, #1
vmlal.s32 q5, d17, d4
vst1.8 d8, [r5, : 64]!
vmlal.s32 q5, d14, d1
vext.32 d12, d9, d8, #0
vmlal.s32 q5, d15, d19
vmov.i64 d13, #0
vmlal.s32 q5, d29, d18
vext.32 d25, d19, d7, #1
vmlal.s32 q6, d20, d5
vrev64.i32 d25, d25
vmlal.s32 q6, d21, d4
vst1.8 d11, [r5, : 64]!
vmlal.s32 q6, d26, d1
vext.32 d9, d10, d10, #0
vmlal.s32 q6, d27, d19
vmov.i64 d8, #0
vmlal.s32 q6, d28, d18
vmlal.s32 q4, d16, d24
vmlal.s32 q4, d17, d5
vmlal.s32 q4, d14, d4
vst1.8 d12, [r5, : 64]!
vmlal.s32 q4, d15, d1
vext.32 d10, d13, d12, #0
vmlal.s32 q4, d29, d19
vmov.i64 d11, #0
vmlal.s32 q5, d20, d6
vmlal.s32 q5, d21, d5
vmlal.s32 q5, d26, d4
vext.32 d13, d8, d8, #0
vmlal.s32 q5, d27, d1
vmov.i64 d12, #0
vmlal.s32 q5, d28, d19
vst1.8 d9, [r5, : 64]!
vmlal.s32 q6, d16, d25
vmlal.s32 q6, d17, d6
vst1.8 d10, [r5, : 64]
vmlal.s32 q6, d14, d5
vext.32 d8, d11, d10, #0
vmlal.s32 q6, d15, d4
vmov.i64 d9, #0
vmlal.s32 q6, d29, d1
vmlal.s32 q4, d20, d7
vmlal.s32 q4, d21, d6
vmlal.s32 q4, d26, d5
vext.32 d11, d12, d12, #0
vmlal.s32 q4, d27, d4
vmov.i64 d10, #0
vmlal.s32 q4, d28, d1
vmlal.s32 q5, d16, d0
sub r2, r5, #32
vmlal.s32 q5, d17, d7
vmlal.s32 q5, d14, d6
vext.32 d30, d9, d8, #0
vmlal.s32 q5, d15, d5
vld1.8 {d31}, [r2, : 64]!
vmlal.s32 q5, d29, d4
vmlal.s32 q15, d20, d0
vext.32 d0, d6, d18, #1
vmlal.s32 q15, d21, d25
vrev64.i32 d0, d0
vmlal.s32 q15, d26, d24
vext.32 d1, d7, d19, #1
vext.32 d7, d10, d10, #0
vmlal.s32 q15, d27, d23
vrev64.i32 d1, d1
vld1.8 {d6}, [r2, : 64]
vmlal.s32 q15, d28, d22
vmlal.s32 q3, d16, d4
add r2, r2, #24
vmlal.s32 q3, d17, d2
vext.32 d4, d31, d30, #0
vmov d17, d11
vmlal.s32 q3, d14, d1
vext.32 d11, d13, d13, #0
vext.32 d13, d30, d30, #0
vmlal.s32 q3, d15, d0
vext.32 d1, d8, d8, #0
vmlal.s32 q3, d29, d3
vld1.8 {d5}, [r2, : 64]
sub r2, r2, #16
vext.32 d10, d6, d6, #0
vmov.i32 q1, #0xffffffff
vshl.i64 q4, q1, #25
add r5, sp, #480
vld1.8 {d14-d15}, [r5, : 128]
vadd.i64 q9, q2, q7
vshl.i64 q1, q1, #26
vshr.s64 q10, q9, #26
vld1.8 {d0}, [r2, : 64]!
vadd.i64 q5, q5, q10
vand q9, q9, q1
vld1.8 {d16}, [r2, : 64]!
add r2, sp, #496
vld1.8 {d20-d21}, [r2, : 128]
vadd.i64 q11, q5, q10
vsub.i64 q2, q2, q9
vshr.s64 q9, q11, #25
vext.32 d12, d5, d4, #0
vand q11, q11, q4
vadd.i64 q0, q0, q9
vmov d19, d7
vadd.i64 q3, q0, q7
vsub.i64 q5, q5, q11
vshr.s64 q11, q3, #26
vext.32 d18, d11, d10, #0
vand q3, q3, q1
vadd.i64 q8, q8, q11
vadd.i64 q11, q8, q10
vsub.i64 q0, q0, q3
vshr.s64 q3, q11, #25
vand q11, q11, q4
vadd.i64 q3, q6, q3
vadd.i64 q6, q3, q7
vsub.i64 q8, q8, q11
vshr.s64 q11, q6, #26
vand q6, q6, q1
vadd.i64 q9, q9, q11
vadd.i64 d25, d19, d21
vsub.i64 q3, q3, q6
vshr.s64 d23, d25, #25
vand q4, q12, q4
vadd.i64 d21, d23, d23
vshl.i64 d25, d23, #4
vadd.i64 d21, d21, d23
vadd.i64 d25, d25, d21
vadd.i64 d4, d4, d25
vzip.i32 q0, q8
vadd.i64 d12, d4, d14
add r2, r6, #8
vst1.8 d0, [r2, : 64]
vsub.i64 d19, d19, d9
add r2, r2, #16
vst1.8 d16, [r2, : 64]
vshr.s64 d22, d12, #26
vand q0, q6, q1
vadd.i64 d10, d10, d22
vzip.i32 q3, q9
vsub.i64 d4, d4, d0
sub r2, r2, #8
vst1.8 d6, [r2, : 64]
add r2, r2, #16
vst1.8 d18, [r2, : 64]
vzip.i32 q2, q5
sub r2, r2, #32
vst1.8 d4, [r2, : 64]
cmp r4, #0
beq .Lskippostcopy
add r2, r3, #144
mov r4, r4
vld1.8 {d0-d1}, [r2, : 128]!
vld1.8 {d2-d3}, [r2, : 128]!
vld1.8 {d4}, [r2, : 64]
vst1.8 {d0-d1}, [r4, : 128]!
vst1.8 {d2-d3}, [r4, : 128]!
vst1.8 d4, [r4, : 64]
.Lskippostcopy:
cmp r1, #1
bne .Lskipfinalcopy
add r2, r3, #288
add r4, r3, #144
vld1.8 {d0-d1}, [r2, : 128]!
vld1.8 {d2-d3}, [r2, : 128]!
vld1.8 {d4}, [r2, : 64]
vst1.8 {d0-d1}, [r4, : 128]!
vst1.8 {d2-d3}, [r4, : 128]!
vst1.8 d4, [r4, : 64]
.Lskipfinalcopy:
add r1, r1, #1
cmp r1, #12
blo .Linvertloop
add r1, r3, #144
ldr r2, [r1], #4
ldr r3, [r1], #4
ldr r4, [r1], #4
ldr r5, [r1], #4
ldr r6, [r1], #4
ldr r7, [r1], #4
ldr r8, [r1], #4
ldr r9, [r1], #4
ldr r10, [r1], #4
ldr r1, [r1]
add r11, r1, r1, LSL #4
add r11, r11, r1, LSL #1
add r11, r11, #16777216
mov r11, r11, ASR #25
add r11, r11, r2
mov r11, r11, ASR #26
add r11, r11, r3
mov r11, r11, ASR #25
add r11, r11, r4
mov r11, r11, ASR #26
add r11, r11, r5
mov r11, r11, ASR #25
add r11, r11, r6
mov r11, r11, ASR #26
add r11, r11, r7
mov r11, r11, ASR #25
add r11, r11, r8
mov r11, r11, ASR #26
add r11, r11, r9
mov r11, r11, ASR #25
add r11, r11, r10
mov r11, r11, ASR #26
add r11, r11, r1
mov r11, r11, ASR #25
add r2, r2, r11
add r2, r2, r11, LSL #1
add r2, r2, r11, LSL #4
mov r11, r2, ASR #26
add r3, r3, r11
sub r2, r2, r11, LSL #26
mov r11, r3, ASR #25
add r4, r4, r11
sub r3, r3, r11, LSL #25
mov r11, r4, ASR #26
add r5, r5, r11
sub r4, r4, r11, LSL #26
mov r11, r5, ASR #25
add r6, r6, r11
sub r5, r5, r11, LSL #25
mov r11, r6, ASR #26
add r7, r7, r11
sub r6, r6, r11, LSL #26
mov r11, r7, ASR #25
add r8, r8, r11
sub r7, r7, r11, LSL #25
mov r11, r8, ASR #26
add r9, r9, r11
sub r8, r8, r11, LSL #26
mov r11, r9, ASR #25
add r10, r10, r11
sub r9, r9, r11, LSL #25
mov r11, r10, ASR #26
add r1, r1, r11
sub r10, r10, r11, LSL #26
mov r11, r1, ASR #25
sub r1, r1, r11, LSL #25
add r2, r2, r3, LSL #26
mov r3, r3, LSR #6
add r3, r3, r4, LSL #19
mov r4, r4, LSR #13
add r4, r4, r5, LSL #13
mov r5, r5, LSR #19
add r5, r5, r6, LSL #6
add r6, r7, r8, LSL #25
mov r7, r8, LSR #7
add r7, r7, r9, LSL #19
mov r8, r9, LSR #13
add r8, r8, r10, LSL #12
mov r9, r10, LSR #20
add r1, r9, r1, LSL #6
str r2, [r0]
str r3, [r0, #4]
str r4, [r0, #8]
str r5, [r0, #12]
str r6, [r0, #16]
str r7, [r0, #20]
str r8, [r0, #24]
str r1, [r0, #28]
movw r0, #0
mov sp, ip
pop {r4-r11, pc}
ENDPROC(curve25519_neon)
|
aixcc-public/challenge-001-exemplar-source
| 12,047
|
arch/arm/crypto/crct10dif-ce-core.S
|
//
// Accelerated CRC-T10DIF using ARM NEON and Crypto Extensions instructions
//
// Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
// Copyright (C) 2019 Google LLC <ebiggers@google.com>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License version 2 as
// published by the Free Software Foundation.
//
// Derived from the x86 version:
//
// Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions
//
// Copyright (c) 2013, Intel Corporation
//
// Authors:
// Erdinc Ozturk <erdinc.ozturk@intel.com>
// Vinodh Gopal <vinodh.gopal@intel.com>
// James Guilford <james.guilford@intel.com>
// Tim Chen <tim.c.chen@linux.intel.com>
//
// This software is available to you under a choice of one of two
// licenses. You may choose to be licensed under the terms of the GNU
// General Public License (GPL) Version 2, available from the file
// COPYING in the main directory of this source tree, or the
// OpenIB.org BSD license below:
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the
// distribution.
//
// * Neither the name of the Intel Corporation nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
//
// THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Reference paper titled "Fast CRC Computation for Generic
// Polynomials Using PCLMULQDQ Instruction"
// URL: http://www.intel.com/content/dam/www/public/us/en/documents
// /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf
//
#include <linux/linkage.h>
#include <asm/assembler.h>
#ifdef CONFIG_CPU_ENDIAN_BE8
#define CPU_LE(code...)
#else
#define CPU_LE(code...) code
#endif
.text
.arch armv8-a
.fpu crypto-neon-fp-armv8
init_crc .req r0
buf .req r1
len .req r2
fold_consts_ptr .req ip
q0l .req d0
q0h .req d1
q1l .req d2
q1h .req d3
q2l .req d4
q2h .req d5
q3l .req d6
q3h .req d7
q4l .req d8
q4h .req d9
q5l .req d10
q5h .req d11
q6l .req d12
q6h .req d13
q7l .req d14
q7h .req d15
q8l .req d16
q8h .req d17
q9l .req d18
q9h .req d19
q10l .req d20
q10h .req d21
q11l .req d22
q11h .req d23
q12l .req d24
q12h .req d25
FOLD_CONSTS .req q10
FOLD_CONST_L .req q10l
FOLD_CONST_H .req q10h
// Fold reg1, reg2 into the next 32 data bytes, storing the result back
// into reg1, reg2.
.macro fold_32_bytes, reg1, reg2
vld1.64 {q11-q12}, [buf]!
vmull.p64 q8, \reg1\()h, FOLD_CONST_H
vmull.p64 \reg1, \reg1\()l, FOLD_CONST_L
vmull.p64 q9, \reg2\()h, FOLD_CONST_H
vmull.p64 \reg2, \reg2\()l, FOLD_CONST_L
CPU_LE( vrev64.8 q11, q11 )
CPU_LE( vrev64.8 q12, q12 )
vswp q11l, q11h
vswp q12l, q12h
veor.8 \reg1, \reg1, q8
veor.8 \reg2, \reg2, q9
veor.8 \reg1, \reg1, q11
veor.8 \reg2, \reg2, q12
.endm
// Fold src_reg into dst_reg, optionally loading the next fold constants
.macro fold_16_bytes, src_reg, dst_reg, load_next_consts
vmull.p64 q8, \src_reg\()l, FOLD_CONST_L
vmull.p64 \src_reg, \src_reg\()h, FOLD_CONST_H
.ifnb \load_next_consts
vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]!
.endif
veor.8 \dst_reg, \dst_reg, q8
veor.8 \dst_reg, \dst_reg, \src_reg
.endm
.macro __adrl, out, sym
movw \out, #:lower16:\sym
movt \out, #:upper16:\sym
.endm
//
// u16 crc_t10dif_pmull(u16 init_crc, const u8 *buf, size_t len);
//
// Assumes len >= 16.
//
ENTRY(crc_t10dif_pmull)
// For sizes less than 256 bytes, we can't fold 128 bytes at a time.
cmp len, #256
blt .Lless_than_256_bytes
__adrl fold_consts_ptr, .Lfold_across_128_bytes_consts
// Load the first 128 data bytes. Byte swapping is necessary to make
// the bit order match the polynomial coefficient order.
vld1.64 {q0-q1}, [buf]!
vld1.64 {q2-q3}, [buf]!
vld1.64 {q4-q5}, [buf]!
vld1.64 {q6-q7}, [buf]!
CPU_LE( vrev64.8 q0, q0 )
CPU_LE( vrev64.8 q1, q1 )
CPU_LE( vrev64.8 q2, q2 )
CPU_LE( vrev64.8 q3, q3 )
CPU_LE( vrev64.8 q4, q4 )
CPU_LE( vrev64.8 q5, q5 )
CPU_LE( vrev64.8 q6, q6 )
CPU_LE( vrev64.8 q7, q7 )
vswp q0l, q0h
vswp q1l, q1h
vswp q2l, q2h
vswp q3l, q3h
vswp q4l, q4h
vswp q5l, q5h
vswp q6l, q6h
vswp q7l, q7h
// XOR the first 16 data *bits* with the initial CRC value.
vmov.i8 q8h, #0
vmov.u16 q8h[3], init_crc
veor q0h, q0h, q8h
// Load the constants for folding across 128 bytes.
vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]!
// Subtract 128 for the 128 data bytes just consumed. Subtract another
// 128 to simplify the termination condition of the following loop.
sub len, len, #256
// While >= 128 data bytes remain (not counting q0-q7), fold the 128
// bytes q0-q7 into them, storing the result back into q0-q7.
.Lfold_128_bytes_loop:
fold_32_bytes q0, q1
fold_32_bytes q2, q3
fold_32_bytes q4, q5
fold_32_bytes q6, q7
subs len, len, #128
bge .Lfold_128_bytes_loop
// Now fold the 112 bytes in q0-q6 into the 16 bytes in q7.
// Fold across 64 bytes.
vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]!
fold_16_bytes q0, q4
fold_16_bytes q1, q5
fold_16_bytes q2, q6
fold_16_bytes q3, q7, 1
// Fold across 32 bytes.
fold_16_bytes q4, q6
fold_16_bytes q5, q7, 1
// Fold across 16 bytes.
fold_16_bytes q6, q7
// Add 128 to get the correct number of data bytes remaining in 0...127
// (not counting q7), following the previous extra subtraction by 128.
// Then subtract 16 to simplify the termination condition of the
// following loop.
adds len, len, #(128-16)
// While >= 16 data bytes remain (not counting q7), fold the 16 bytes q7
// into them, storing the result back into q7.
blt .Lfold_16_bytes_loop_done
.Lfold_16_bytes_loop:
vmull.p64 q8, q7l, FOLD_CONST_L
vmull.p64 q7, q7h, FOLD_CONST_H
veor.8 q7, q7, q8
vld1.64 {q0}, [buf]!
CPU_LE( vrev64.8 q0, q0 )
vswp q0l, q0h
veor.8 q7, q7, q0
subs len, len, #16
bge .Lfold_16_bytes_loop
.Lfold_16_bytes_loop_done:
// Add 16 to get the correct number of data bytes remaining in 0...15
// (not counting q7), following the previous extra subtraction by 16.
adds len, len, #16
beq .Lreduce_final_16_bytes
.Lhandle_partial_segment:
// Reduce the last '16 + len' bytes where 1 <= len <= 15 and the first
// 16 bytes are in q7 and the rest are the remaining data in 'buf'. To
// do this without needing a fold constant for each possible 'len',
// redivide the bytes into a first chunk of 'len' bytes and a second
// chunk of 16 bytes, then fold the first chunk into the second.
// q0 = last 16 original data bytes
add buf, buf, len
sub buf, buf, #16
vld1.64 {q0}, [buf]
CPU_LE( vrev64.8 q0, q0 )
vswp q0l, q0h
// q1 = high order part of second chunk: q7 left-shifted by 'len' bytes.
__adrl r3, .Lbyteshift_table + 16
sub r3, r3, len
vld1.8 {q2}, [r3]
vtbl.8 q1l, {q7l-q7h}, q2l
vtbl.8 q1h, {q7l-q7h}, q2h
// q3 = first chunk: q7 right-shifted by '16-len' bytes.
vmov.i8 q3, #0x80
veor.8 q2, q2, q3
vtbl.8 q3l, {q7l-q7h}, q2l
vtbl.8 q3h, {q7l-q7h}, q2h
// Convert to 8-bit masks: 'len' 0x00 bytes, then '16-len' 0xff bytes.
vshr.s8 q2, q2, #7
// q2 = second chunk: 'len' bytes from q0 (low-order bytes),
// then '16-len' bytes from q1 (high-order bytes).
vbsl.8 q2, q1, q0
// Fold the first chunk into the second chunk, storing the result in q7.
vmull.p64 q0, q3l, FOLD_CONST_L
vmull.p64 q7, q3h, FOLD_CONST_H
veor.8 q7, q7, q0
veor.8 q7, q7, q2
.Lreduce_final_16_bytes:
// Reduce the 128-bit value M(x), stored in q7, to the final 16-bit CRC.
// Load 'x^48 * (x^48 mod G(x))' and 'x^48 * (x^80 mod G(x))'.
vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]!
// Fold the high 64 bits into the low 64 bits, while also multiplying by
// x^64. This produces a 128-bit value congruent to x^64 * M(x) and
// whose low 48 bits are 0.
vmull.p64 q0, q7h, FOLD_CONST_H // high bits * x^48 * (x^80 mod G(x))
veor.8 q0h, q0h, q7l // + low bits * x^64
// Fold the high 32 bits into the low 96 bits. This produces a 96-bit
// value congruent to x^64 * M(x) and whose low 48 bits are 0.
vmov.i8 q1, #0
vmov s4, s3 // extract high 32 bits
vmov s3, s5 // zero high 32 bits
vmull.p64 q1, q1l, FOLD_CONST_L // high 32 bits * x^48 * (x^48 mod G(x))
veor.8 q0, q0, q1 // + low bits
// Load G(x) and floor(x^48 / G(x)).
vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]
// Use Barrett reduction to compute the final CRC value.
vmull.p64 q1, q0h, FOLD_CONST_H // high 32 bits * floor(x^48 / G(x))
vshr.u64 q1l, q1l, #32 // /= x^32
vmull.p64 q1, q1l, FOLD_CONST_L // *= G(x)
vshr.u64 q0l, q0l, #48
veor.8 q0l, q0l, q1l // + low 16 nonzero bits
// Final CRC value (x^16 * M(x)) mod G(x) is in low 16 bits of q0.
vmov.u16 r0, q0l[0]
bx lr
.Lless_than_256_bytes:
// Checksumming a buffer of length 16...255 bytes
__adrl fold_consts_ptr, .Lfold_across_16_bytes_consts
// Load the first 16 data bytes.
vld1.64 {q7}, [buf]!
CPU_LE( vrev64.8 q7, q7 )
vswp q7l, q7h
// XOR the first 16 data *bits* with the initial CRC value.
vmov.i8 q0h, #0
vmov.u16 q0h[3], init_crc
veor.8 q7h, q7h, q0h
// Load the fold-across-16-bytes constants.
vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]!
cmp len, #16
beq .Lreduce_final_16_bytes // len == 16
subs len, len, #32
addlt len, len, #16
blt .Lhandle_partial_segment // 17 <= len <= 31
b .Lfold_16_bytes_loop // 32 <= len <= 255
ENDPROC(crc_t10dif_pmull)
.section ".rodata", "a"
.align 4
// Fold constants precomputed from the polynomial 0x18bb7
// G(x) = x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1 + x^0
.Lfold_across_128_bytes_consts:
.quad 0x0000000000006123 // x^(8*128) mod G(x)
.quad 0x0000000000002295 // x^(8*128+64) mod G(x)
// .Lfold_across_64_bytes_consts:
.quad 0x0000000000001069 // x^(4*128) mod G(x)
.quad 0x000000000000dd31 // x^(4*128+64) mod G(x)
// .Lfold_across_32_bytes_consts:
.quad 0x000000000000857d // x^(2*128) mod G(x)
.quad 0x0000000000007acc // x^(2*128+64) mod G(x)
.Lfold_across_16_bytes_consts:
.quad 0x000000000000a010 // x^(1*128) mod G(x)
.quad 0x0000000000001faa // x^(1*128+64) mod G(x)
// .Lfinal_fold_consts:
.quad 0x1368000000000000 // x^48 * (x^48 mod G(x))
.quad 0x2d56000000000000 // x^48 * (x^80 mod G(x))
// .Lbarrett_reduction_consts:
.quad 0x0000000000018bb7 // G(x)
.quad 0x00000001f65a57f8 // floor(x^48 / G(x))
// For 1 <= len <= 15, the 16-byte vector beginning at &byteshift_table[16 -
// len] is the index vector to shift left by 'len' bytes, and is also {0x80,
// ..., 0x80} XOR the index vector to shift right by '16 - len' bytes.
.Lbyteshift_table:
.byte 0x0, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87
.byte 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f
.byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7
.byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe , 0x0
|
aixcc-public/challenge-001-exemplar-source
| 4,440
|
arch/arm/crypto/aes-cipher-core.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Scalar AES core transform
*
* Copyright (C) 2017 Linaro Ltd.
* Author: Ard Biesheuvel <ard.biesheuvel@linaro.org>
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/cache.h>
.text
.align 5
rk .req r0
rounds .req r1
in .req r2
out .req r3
ttab .req ip
t0 .req lr
t1 .req r2
t2 .req r3
.macro __select, out, in, idx
.if __LINUX_ARM_ARCH__ < 7
and \out, \in, #0xff << (8 * \idx)
.else
ubfx \out, \in, #(8 * \idx), #8
.endif
.endm
.macro __load, out, in, idx, sz, op
.if __LINUX_ARM_ARCH__ < 7 && \idx > 0
ldr\op \out, [ttab, \in, lsr #(8 * \idx) - \sz]
.else
ldr\op \out, [ttab, \in, lsl #\sz]
.endif
.endm
.macro __hround, out0, out1, in0, in1, in2, in3, t3, t4, enc, sz, op, oldcpsr
__select \out0, \in0, 0
__select t0, \in1, 1
__load \out0, \out0, 0, \sz, \op
__load t0, t0, 1, \sz, \op
.if \enc
__select \out1, \in1, 0
__select t1, \in2, 1
.else
__select \out1, \in3, 0
__select t1, \in0, 1
.endif
__load \out1, \out1, 0, \sz, \op
__select t2, \in2, 2
__load t1, t1, 1, \sz, \op
__load t2, t2, 2, \sz, \op
eor \out0, \out0, t0, ror #24
__select t0, \in3, 3
.if \enc
__select \t3, \in3, 2
__select \t4, \in0, 3
.else
__select \t3, \in1, 2
__select \t4, \in2, 3
.endif
__load \t3, \t3, 2, \sz, \op
__load t0, t0, 3, \sz, \op
__load \t4, \t4, 3, \sz, \op
.ifnb \oldcpsr
/*
* This is the final round and we're done with all data-dependent table
* lookups, so we can safely re-enable interrupts.
*/
restore_irqs \oldcpsr
.endif
eor \out1, \out1, t1, ror #24
eor \out0, \out0, t2, ror #16
ldm rk!, {t1, t2}
eor \out1, \out1, \t3, ror #16
eor \out0, \out0, t0, ror #8
eor \out1, \out1, \t4, ror #8
eor \out0, \out0, t1
eor \out1, \out1, t2
.endm
.macro fround, out0, out1, out2, out3, in0, in1, in2, in3, sz=2, op, oldcpsr
__hround \out0, \out1, \in0, \in1, \in2, \in3, \out2, \out3, 1, \sz, \op
__hround \out2, \out3, \in2, \in3, \in0, \in1, \in1, \in2, 1, \sz, \op, \oldcpsr
.endm
.macro iround, out0, out1, out2, out3, in0, in1, in2, in3, sz=2, op, oldcpsr
__hround \out0, \out1, \in0, \in3, \in2, \in1, \out2, \out3, 0, \sz, \op
__hround \out2, \out3, \in2, \in1, \in0, \in3, \in1, \in0, 0, \sz, \op, \oldcpsr
.endm
.macro do_crypt, round, ttab, ltab, bsz
push {r3-r11, lr}
// Load keys first, to reduce latency in case they're not cached yet.
ldm rk!, {r8-r11}
ldr r4, [in]
ldr r5, [in, #4]
ldr r6, [in, #8]
ldr r7, [in, #12]
#ifdef CONFIG_CPU_BIG_ENDIAN
rev_l r4, t0
rev_l r5, t0
rev_l r6, t0
rev_l r7, t0
#endif
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
eor r7, r7, r11
mov_l ttab, \ttab
/*
* Disable interrupts and prefetch the 1024-byte 'ft' or 'it' table into
* L1 cache, assuming cacheline size >= 32. This is a hardening measure
* intended to make cache-timing attacks more difficult. They may not
* be fully prevented, however; see the paper
* https://cr.yp.to/antiforgery/cachetiming-20050414.pdf
* ("Cache-timing attacks on AES") for a discussion of the many
* difficulties involved in writing truly constant-time AES software.
*/
save_and_disable_irqs t0
.set i, 0
.rept 1024 / 128
ldr r8, [ttab, #i + 0]
ldr r9, [ttab, #i + 32]
ldr r10, [ttab, #i + 64]
ldr r11, [ttab, #i + 96]
.set i, i + 128
.endr
push {t0} // oldcpsr
tst rounds, #2
bne 1f
0: \round r8, r9, r10, r11, r4, r5, r6, r7
\round r4, r5, r6, r7, r8, r9, r10, r11
1: subs rounds, rounds, #4
\round r8, r9, r10, r11, r4, r5, r6, r7
bls 2f
\round r4, r5, r6, r7, r8, r9, r10, r11
b 0b
2: .ifb \ltab
add ttab, ttab, #1
.else
mov_l ttab, \ltab
// Prefetch inverse S-box for final round; see explanation above
.set i, 0
.rept 256 / 64
ldr t0, [ttab, #i + 0]
ldr t1, [ttab, #i + 32]
.set i, i + 64
.endr
.endif
pop {rounds} // oldcpsr
\round r4, r5, r6, r7, r8, r9, r10, r11, \bsz, b, rounds
#ifdef CONFIG_CPU_BIG_ENDIAN
rev_l r4, t0
rev_l r5, t0
rev_l r6, t0
rev_l r7, t0
#endif
ldr out, [sp]
str r4, [out]
str r5, [out, #4]
str r6, [out, #8]
str r7, [out, #12]
pop {r3-r11, pc}
.align 3
.ltorg
.endm
ENTRY(__aes_arm_encrypt)
do_crypt fround, crypto_ft_tab,, 2
ENDPROC(__aes_arm_encrypt)
.align 5
ENTRY(__aes_arm_decrypt)
do_crypt iround, crypto_it_tab, crypto_aes_inv_sbox, 0
ENDPROC(__aes_arm_decrypt)
|
aixcc-public/challenge-001-exemplar-source
| 8,639
|
arch/arm/vfp/vfphw.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/arch/arm/vfp/vfphw.S
*
* Copyright (C) 2004 ARM Limited.
* Written by Deep Blue Solutions Limited.
*
* This code is called from the kernel's undefined instruction trap.
* r9 holds the return address for successful handling.
* lr holds the return address for unrecognised instructions.
* r10 points at the start of the private FP workspace in the thread structure
* sp points to a struct pt_regs (as defined in include/asm/proc/ptrace.h)
*/
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/thread_info.h>
#include <asm/vfpmacros.h>
#include <linux/kern_levels.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
.macro DBGSTR, str
#ifdef DEBUG
stmfd sp!, {r0-r3, ip, lr}
ldr r0, =1f
bl _printk
ldmfd sp!, {r0-r3, ip, lr}
.pushsection .rodata, "a"
1: .ascii KERN_DEBUG "VFP: \str\n"
.byte 0
.previous
#endif
.endm
.macro DBGSTR1, str, arg
#ifdef DEBUG
stmfd sp!, {r0-r3, ip, lr}
mov r1, \arg
ldr r0, =1f
bl _printk
ldmfd sp!, {r0-r3, ip, lr}
.pushsection .rodata, "a"
1: .ascii KERN_DEBUG "VFP: \str\n"
.byte 0
.previous
#endif
.endm
.macro DBGSTR3, str, arg1, arg2, arg3
#ifdef DEBUG
stmfd sp!, {r0-r3, ip, lr}
mov r3, \arg3
mov r2, \arg2
mov r1, \arg1
ldr r0, =1f
bl _printk
ldmfd sp!, {r0-r3, ip, lr}
.pushsection .rodata, "a"
1: .ascii KERN_DEBUG "VFP: \str\n"
.byte 0
.previous
#endif
.endm
@ VFP hardware support entry point.
@
@ r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
@ r2 = PC value to resume execution after successful emulation
@ r9 = normal "successful" return address
@ r10 = vfp_state union
@ r11 = CPU number
@ lr = unrecognised instruction return address
@ IRQs enabled.
ENTRY(vfp_support_entry)
DBGSTR3 "instr %08x pc %08x state %p", r0, r2, r10
.fpu vfpv2
VFPFMRX r1, FPEXC @ Is the VFP enabled?
DBGSTR1 "fpexc %08x", r1
tst r1, #FPEXC_EN
bne look_for_VFP_exceptions @ VFP is already enabled
DBGSTR1 "enable %x", r10
ldr r3, vfp_current_hw_state_address
orr r1, r1, #FPEXC_EN @ user FPEXC has the enable bit set
ldr r4, [r3, r11, lsl #2] @ vfp_current_hw_state pointer
bic r5, r1, #FPEXC_EX @ make sure exceptions are disabled
cmp r4, r10 @ this thread owns the hw context?
#ifndef CONFIG_SMP
@ For UP, checking that this thread owns the hw context is
@ sufficient to determine that the hardware state is valid.
beq vfp_hw_state_valid
@ On UP, we lazily save the VFP context. As a different
@ thread wants ownership of the VFP hardware, save the old
@ state if there was a previous (valid) owner.
VFPFMXR FPEXC, r5 @ enable VFP, disable any pending
@ exceptions, so we can get at the
@ rest of it
DBGSTR1 "save old state %p", r4
cmp r4, #0 @ if the vfp_current_hw_state is NULL
beq vfp_reload_hw @ then the hw state needs reloading
VFPFSTMIA r4, r5 @ save the working registers
VFPFMRX r5, FPSCR @ current status
#ifndef CONFIG_CPU_FEROCEON
tst r1, #FPEXC_EX @ is there additional state to save?
beq 1f
VFPFMRX r6, FPINST @ FPINST (only if FPEXC.EX is set)
tst r1, #FPEXC_FP2V @ is there an FPINST2 to read?
beq 1f
VFPFMRX r8, FPINST2 @ FPINST2 if needed (and present)
1:
#endif
stmia r4, {r1, r5, r6, r8} @ save FPEXC, FPSCR, FPINST, FPINST2
vfp_reload_hw:
#else
@ For SMP, if this thread does not own the hw context, then we
@ need to reload it. No need to save the old state as on SMP,
@ we always save the state when we switch away from a thread.
bne vfp_reload_hw
@ This thread has ownership of the current hardware context.
@ However, it may have been migrated to another CPU, in which
@ case the saved state is newer than the hardware context.
@ Check this by looking at the CPU number which the state was
@ last loaded onto.
ldr ip, [r10, #VFP_CPU]
teq ip, r11
beq vfp_hw_state_valid
vfp_reload_hw:
@ We're loading this threads state into the VFP hardware. Update
@ the CPU number which contains the most up to date VFP context.
str r11, [r10, #VFP_CPU]
VFPFMXR FPEXC, r5 @ enable VFP, disable any pending
@ exceptions, so we can get at the
@ rest of it
#endif
DBGSTR1 "load state %p", r10
str r10, [r3, r11, lsl #2] @ update the vfp_current_hw_state pointer
@ Load the saved state back into the VFP
VFPFLDMIA r10, r5 @ reload the working registers while
@ FPEXC is in a safe state
ldmia r10, {r1, r5, r6, r8} @ load FPEXC, FPSCR, FPINST, FPINST2
#ifndef CONFIG_CPU_FEROCEON
tst r1, #FPEXC_EX @ is there additional state to restore?
beq 1f
VFPFMXR FPINST, r6 @ restore FPINST (only if FPEXC.EX is set)
tst r1, #FPEXC_FP2V @ is there an FPINST2 to write?
beq 1f
VFPFMXR FPINST2, r8 @ FPINST2 if needed (and present)
1:
#endif
VFPFMXR FPSCR, r5 @ restore status
@ The context stored in the VFP hardware is up to date with this thread
vfp_hw_state_valid:
tst r1, #FPEXC_EX
bne process_exception @ might as well handle the pending
@ exception before retrying branch
@ out before setting an FPEXC that
@ stops us reading stuff
VFPFMXR FPEXC, r1 @ Restore FPEXC last
sub r2, r2, #4 @ Retry current instruction - if Thumb
str r2, [sp, #S_PC] @ mode it's two 16-bit instructions,
@ else it's one 32-bit instruction, so
@ always subtract 4 from the following
@ instruction address.
dec_preempt_count_ti r10, r4
ret r9 @ we think we have handled things
look_for_VFP_exceptions:
@ Check for synchronous or asynchronous exception
tst r1, #FPEXC_EX | FPEXC_DEX
bne process_exception
@ On some implementations of the VFP subarch 1, setting FPSCR.IXE
@ causes all the CDP instructions to be bounced synchronously without
@ setting the FPEXC.EX bit
VFPFMRX r5, FPSCR
tst r5, #FPSCR_IXE
bne process_exception
tst r5, #FPSCR_LENGTH_MASK
beq skip
orr r1, r1, #FPEXC_DEX
b process_exception
skip:
@ Fall into hand on to next handler - appropriate coproc instr
@ not recognised by VFP
DBGSTR "not VFP"
dec_preempt_count_ti r10, r4
ret lr
process_exception:
DBGSTR "bounce"
mov r2, sp @ nothing stacked - regdump is at TOS
mov lr, r9 @ setup for a return to the user code.
@ Now call the C code to package up the bounce to the support code
@ r0 holds the trigger instruction
@ r1 holds the FPEXC value
@ r2 pointer to register dump
b VFP_bounce @ we have handled this - the support
@ code will raise an exception if
@ required. If not, the user code will
@ retry the faulted instruction
ENDPROC(vfp_support_entry)
ENTRY(vfp_save_state)
@ Save the current VFP state
@ r0 - save location
@ r1 - FPEXC
DBGSTR1 "save VFP state %p", r0
VFPFSTMIA r0, r2 @ save the working registers
VFPFMRX r2, FPSCR @ current status
tst r1, #FPEXC_EX @ is there additional state to save?
beq 1f
VFPFMRX r3, FPINST @ FPINST (only if FPEXC.EX is set)
tst r1, #FPEXC_FP2V @ is there an FPINST2 to read?
beq 1f
VFPFMRX r12, FPINST2 @ FPINST2 if needed (and present)
1:
stmia r0, {r1, r2, r3, r12} @ save FPEXC, FPSCR, FPINST, FPINST2
ret lr
ENDPROC(vfp_save_state)
.align
vfp_current_hw_state_address:
.word vfp_current_hw_state
.macro tbl_branch, base, tmp, shift
#ifdef CONFIG_THUMB2_KERNEL
adr \tmp, 1f
add \tmp, \tmp, \base, lsl \shift
ret \tmp
#else
add pc, pc, \base, lsl \shift
mov r0, r0
#endif
1:
.endm
ENTRY(vfp_get_float)
tbl_branch r0, r3, #3
.fpu vfpv2
.irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
1: vmov r0, s\dr
ret lr
.org 1b + 8
.endr
.irp dr,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
1: vmov r0, s\dr
ret lr
.org 1b + 8
.endr
ENDPROC(vfp_get_float)
ENTRY(vfp_put_float)
tbl_branch r1, r3, #3
.fpu vfpv2
.irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
1: vmov s\dr, r0
ret lr
.org 1b + 8
.endr
.irp dr,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
1: vmov s\dr, r0
ret lr
.org 1b + 8
.endr
ENDPROC(vfp_put_float)
ENTRY(vfp_get_double)
tbl_branch r0, r3, #3
.fpu vfpv2
.irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
1: vmov r0, r1, d\dr
ret lr
.org 1b + 8
.endr
#ifdef CONFIG_VFPv3
@ d16 - d31 registers
.fpu vfpv3
.irp dr,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
1: vmov r0, r1, d\dr
ret lr
.org 1b + 8
.endr
#endif
@ virtual register 16 (or 32 if VFPv3) for compare with zero
mov r0, #0
mov r1, #0
ret lr
ENDPROC(vfp_get_double)
ENTRY(vfp_put_double)
tbl_branch r2, r3, #3
.fpu vfpv2
.irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
1: vmov d\dr, r0, r1
ret lr
.org 1b + 8
.endr
#ifdef CONFIG_VFPv3
.fpu vfpv3
@ d16 - d31 registers
.irp dr,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
1: vmov d\dr, r0, r1
ret lr
.org 1b + 8
.endr
#endif
ENDPROC(vfp_put_double)
|
aixcc-public/challenge-001-exemplar-source
| 4,783
|
arch/arm/mach-lpc32xx/suspend.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/mach-lpc32xx/suspend.S
*
* Original authors: Dmitry Chigirev, Vitaly Wool <source@mvista.com>
* Modified by Kevin Wells <kevin.wells@nxp.com>
*
* 2005 (c) MontaVista Software, Inc.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include "lpc32xx.h"
/* Using named register defines makes the code easier to follow */
#define WORK1_REG r0
#define WORK2_REG r1
#define SAVED_HCLK_DIV_REG r2
#define SAVED_HCLK_PLL_REG r3
#define SAVED_DRAM_CLKCTRL_REG r4
#define SAVED_PWR_CTRL_REG r5
#define CLKPWRBASE_REG r6
#define EMCBASE_REG r7
#define LPC32XX_EMC_STATUS_OFFS 0x04
#define LPC32XX_EMC_STATUS_BUSY 0x1
#define LPC32XX_EMC_STATUS_SELF_RFSH 0x4
#define LPC32XX_CLKPWR_PWR_CTRL_OFFS 0x44
#define LPC32XX_CLKPWR_HCLK_DIV_OFFS 0x40
#define LPC32XX_CLKPWR_HCLKPLL_CTRL_OFFS 0x58
#define CLKPWR_PCLK_DIV_MASK 0xFFFFFE7F
.text
ENTRY(lpc32xx_sys_suspend)
@ Save a copy of the used registers in IRAM, r0 is corrupted
adr r0, tmp_stack_end
stmfd r0!, {r3 - r7, sp, lr}
@ Load a few common register addresses
adr WORK1_REG, reg_bases
ldr CLKPWRBASE_REG, [WORK1_REG, #0]
ldr EMCBASE_REG, [WORK1_REG, #4]
ldr SAVED_PWR_CTRL_REG, [CLKPWRBASE_REG,\
#LPC32XX_CLKPWR_PWR_CTRL_OFFS]
orr WORK1_REG, SAVED_PWR_CTRL_REG, #LPC32XX_CLKPWR_SDRAM_SELF_RFSH
@ Wait for SDRAM busy status to go busy and then idle
@ This guarantees a small windows where DRAM isn't busy
1:
ldr WORK2_REG, [EMCBASE_REG, #LPC32XX_EMC_STATUS_OFFS]
and WORK2_REG, WORK2_REG, #LPC32XX_EMC_STATUS_BUSY
cmp WORK2_REG, #LPC32XX_EMC_STATUS_BUSY
bne 1b @ Branch while idle
2:
ldr WORK2_REG, [EMCBASE_REG, #LPC32XX_EMC_STATUS_OFFS]
and WORK2_REG, WORK2_REG, #LPC32XX_EMC_STATUS_BUSY
cmp WORK2_REG, #LPC32XX_EMC_STATUS_BUSY
beq 2b @ Branch until idle
@ Setup self-refresh with support for manual exit of
@ self-refresh mode
str WORK1_REG, [CLKPWRBASE_REG, #LPC32XX_CLKPWR_PWR_CTRL_OFFS]
orr WORK2_REG, WORK1_REG, #LPC32XX_CLKPWR_UPD_SDRAM_SELF_RFSH
str WORK2_REG, [CLKPWRBASE_REG, #LPC32XX_CLKPWR_PWR_CTRL_OFFS]
str WORK1_REG, [CLKPWRBASE_REG, #LPC32XX_CLKPWR_PWR_CTRL_OFFS]
@ Wait for self-refresh acknowledge, clocks to the DRAM device
@ will automatically stop on start of self-refresh
3:
ldr WORK2_REG, [EMCBASE_REG, #LPC32XX_EMC_STATUS_OFFS]
and WORK2_REG, WORK2_REG, #LPC32XX_EMC_STATUS_SELF_RFSH
cmp WORK2_REG, #LPC32XX_EMC_STATUS_SELF_RFSH
bne 3b @ Branch until self-refresh mode starts
@ Enter direct-run mode from run mode
bic WORK1_REG, WORK1_REG, #LPC32XX_CLKPWR_SELECT_RUN_MODE
str WORK1_REG, [CLKPWRBASE_REG, #LPC32XX_CLKPWR_PWR_CTRL_OFFS]
@ Safe disable of DRAM clock in EMC block, prevents DDR sync
@ issues on restart
ldr SAVED_HCLK_DIV_REG, [CLKPWRBASE_REG,\
#LPC32XX_CLKPWR_HCLK_DIV_OFFS]
and WORK2_REG, SAVED_HCLK_DIV_REG, #CLKPWR_PCLK_DIV_MASK
str WORK2_REG, [CLKPWRBASE_REG, #LPC32XX_CLKPWR_HCLK_DIV_OFFS]
@ Save HCLK PLL state and disable HCLK PLL
ldr SAVED_HCLK_PLL_REG, [CLKPWRBASE_REG,\
#LPC32XX_CLKPWR_HCLKPLL_CTRL_OFFS]
bic WORK2_REG, SAVED_HCLK_PLL_REG, #LPC32XX_CLKPWR_HCLKPLL_POWER_UP
str WORK2_REG, [CLKPWRBASE_REG, #LPC32XX_CLKPWR_HCLKPLL_CTRL_OFFS]
@ Enter stop mode until an enabled event occurs
orr WORK1_REG, WORK1_REG, #LPC32XX_CLKPWR_STOP_MODE_CTRL
str WORK1_REG, [CLKPWRBASE_REG, #LPC32XX_CLKPWR_PWR_CTRL_OFFS]
.rept 9
nop
.endr
@ Clear stop status
bic WORK1_REG, WORK1_REG, #LPC32XX_CLKPWR_STOP_MODE_CTRL
@ Restore original HCLK PLL value and wait for PLL lock
str SAVED_HCLK_PLL_REG, [CLKPWRBASE_REG,\
#LPC32XX_CLKPWR_HCLKPLL_CTRL_OFFS]
4:
ldr WORK2_REG, [CLKPWRBASE_REG, #LPC32XX_CLKPWR_HCLKPLL_CTRL_OFFS]
and WORK2_REG, WORK2_REG, #LPC32XX_CLKPWR_HCLKPLL_PLL_STS
bne 4b
@ Re-enter run mode with self-refresh flag cleared, but no DRAM
@ update yet. DRAM is still in self-refresh
str SAVED_PWR_CTRL_REG, [CLKPWRBASE_REG,\
#LPC32XX_CLKPWR_PWR_CTRL_OFFS]
@ Restore original DRAM clock mode to restore DRAM clocks
str SAVED_HCLK_DIV_REG, [CLKPWRBASE_REG,\
#LPC32XX_CLKPWR_HCLK_DIV_OFFS]
@ Clear self-refresh mode
orr WORK1_REG, SAVED_PWR_CTRL_REG,\
#LPC32XX_CLKPWR_UPD_SDRAM_SELF_RFSH
str WORK1_REG, [CLKPWRBASE_REG, #LPC32XX_CLKPWR_PWR_CTRL_OFFS]
str SAVED_PWR_CTRL_REG, [CLKPWRBASE_REG,\
#LPC32XX_CLKPWR_PWR_CTRL_OFFS]
@ Wait for EMC to clear self-refresh mode
5:
ldr WORK2_REG, [EMCBASE_REG, #LPC32XX_EMC_STATUS_OFFS]
and WORK2_REG, WORK2_REG, #LPC32XX_EMC_STATUS_SELF_RFSH
bne 5b @ Branch until self-refresh has exited
@ restore regs and return
adr r0, tmp_stack
ldmfd r0!, {r3 - r7, sp, pc}
reg_bases:
.long IO_ADDRESS(LPC32XX_CLK_PM_BASE)
.long IO_ADDRESS(LPC32XX_EMC_BASE)
tmp_stack:
.long 0, 0, 0, 0, 0, 0, 0
tmp_stack_end:
ENTRY(lpc32xx_sys_suspend_sz)
.word . - lpc32xx_sys_suspend
|
aixcc-public/challenge-001-exemplar-source
| 3,959
|
arch/arm/nwfpe/entry.S
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
NetWinder Floating Point Emulator
(c) Rebel.COM, 1998
(c) 1998, 1999 Philip Blundell
Direct questions, comments to Scott Bambrough <scottb@netwinder.org>
*/
#include <asm/assembler.h>
#include <asm/opcodes.h>
/* This is the kernel's entry point into the floating point emulator.
It is called from the kernel with code similar to this:
sub r4, r5, #4
ldrt r0, [r4] @ r0 = instruction
adrsvc al, r9, ret_from_exception @ r9 = normal FP return
adrsvc al, lr, fpundefinstr @ lr = undefined instr return
get_current_task r10
mov r8, #1
strb r8, [r10, #TSK_USED_MATH] @ set current->used_math
add r10, r10, #TSS_FPESAVE @ r10 = workspace
ldr r4, .LC2
ldr pc, [r4] @ Call FP emulator entry point
The kernel expects the emulator to return via one of two possible
points of return it passes to the emulator. The emulator, if
successful in its emulation, jumps to ret_from_exception (passed in
r9) and the kernel takes care of returning control from the trap to
the user code. If the emulator is unable to emulate the instruction,
it returns via _fpundefinstr (passed via lr) and the kernel halts the
user program with a core dump.
On entry to the emulator r10 points to an area of private FP workspace
reserved in the thread structure for this process. This is where the
emulator saves its registers across calls. The first word of this area
is used as a flag to detect the first time a process uses floating point,
so that the emulator startup cost can be avoided for tasks that don't
want it.
This routine does three things:
1) The kernel has created a struct pt_regs on the stack and saved the
user registers into it. See /usr/include/asm/proc/ptrace.h for details.
2) It calls EmulateAll to emulate a floating point instruction.
EmulateAll returns 1 if the emulation was successful, or 0 if not.
3) If an instruction has been emulated successfully, it looks ahead at
the next instruction. If it is a floating point instruction, it
executes the instruction, without returning to user space. In this
way it repeatedly looks ahead and executes floating point instructions
until it encounters a non floating point instruction, at which time it
returns via _fpreturn.
This is done to reduce the effect of the trap overhead on each
floating point instructions. GCC attempts to group floating point
instructions to allow the emulator to spread the cost of the trap over
several floating point instructions. */
#include <asm/asm-offsets.h>
.globl nwfpe_enter
nwfpe_enter:
mov r4, lr @ save the failure-return addresses
mov sl, sp @ we access the registers via 'sl'
ldr r5, [sp, #S_PC] @ get contents of PC;
mov r6, r0 @ save the opcode
emulate:
ldr r1, [sp, #S_PSR] @ fetch the PSR
bl arm_check_condition @ check the condition
cmp r0, #ARM_OPCODE_CONDTEST_PASS @ condition passed?
@ if condition code failed to match, next insn
bne next @ get the next instruction;
mov r0, r6 @ prepare for EmulateAll()
bl EmulateAll @ emulate the instruction
cmp r0, #0 @ was emulation successful
reteq r4 @ no, return failure
next:
uaccess_enable r3
.Lx1: ldrt r6, [r5], #4 @ get the next instruction and
@ increment PC
uaccess_disable r3
and r2, r6, #0x0F000000 @ test for FP insns
teq r2, #0x0C000000
teqne r2, #0x0D000000
teqne r2, #0x0E000000
retne r9 @ return ok if not a fp insn
str r5, [sp, #S_PC] @ update PC copy in regs
mov r0, r6 @ save a copy
b emulate @ check condition and emulate
@ We need to be prepared for the instructions at .Lx1 and .Lx2
@ to fault. Emit the appropriate exception gunk to fix things up.
@ ??? For some reason, faults can happen at .Lx2 even with a
@ plain LDR instruction. Weird, but it seems harmless.
.pushsection .text.fixup,"ax"
.align 2
.Lfix: ret r9 @ let the user eat segfaults
.popsection
.pushsection __ex_table,"a"
.align 3
.long .Lx1, .Lfix
.popsection
|
aixcc-public/challenge-001-exemplar-source
| 8,777
|
arch/arm/mach-omap1/ams-delta-fiq-handler.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/arch/arm/mach-omap1/ams-delta-fiq-handler.S
*
* Based on linux/arch/arm/lib/floppydma.S
* Renamed and modified to work with 2.6 kernel by Matt Callow
* Copyright (C) 1995, 1996 Russell King
* Copyright (C) 2004 Pete Trapps
* Copyright (C) 2006 Matt Callow
* Copyright (C) 2010 Janusz Krzysztofik
*/
#include <linux/linkage.h>
#include <linux/platform_data/ams-delta-fiq.h>
#include <linux/platform_data/gpio-omap.h>
#include <linux/soc/ti/omap1-io.h>
#include <asm/assembler.h>
#include <asm/irq.h>
#include "hardware.h"
#include "ams-delta-fiq.h"
#include "board-ams-delta.h"
#include "iomap.h"
/*
* OMAP1510 GPIO related symbol copied from arch/arm/mach-omap1/gpio15xx.c.
* Unfortunately, it was not placed in a separate header file.
*/
#define OMAP1510_GPIO_BASE 0xFFFCE000
/* GPIO register bitmasks */
#define KEYBRD_DATA_MASK (0x1 << AMS_DELTA_GPIO_PIN_KEYBRD_DATA)
#define KEYBRD_CLK_MASK (0x1 << AMS_DELTA_GPIO_PIN_KEYBRD_CLK)
#define MODEM_IRQ_MASK (0x1 << AMS_DELTA_GPIO_PIN_MODEM_IRQ)
#define HOOK_SWITCH_MASK (0x1 << AMS_DELTA_GPIO_PIN_HOOK_SWITCH)
#define OTHERS_MASK (MODEM_IRQ_MASK | HOOK_SWITCH_MASK)
/* IRQ handler register bitmasks */
#define DEFERRED_FIQ_MASK OMAP_IRQ_BIT(INT_DEFERRED_FIQ)
#define GPIO_BANK1_MASK OMAP_IRQ_BIT(INT_GPIO_BANK1)
/* Driver buffer byte offsets */
#define BUF_MASK (FIQ_MASK * 4)
#define BUF_STATE (FIQ_STATE * 4)
#define BUF_KEYS_CNT (FIQ_KEYS_CNT * 4)
#define BUF_TAIL_OFFSET (FIQ_TAIL_OFFSET * 4)
#define BUF_HEAD_OFFSET (FIQ_HEAD_OFFSET * 4)
#define BUF_BUF_LEN (FIQ_BUF_LEN * 4)
#define BUF_KEY (FIQ_KEY * 4)
#define BUF_MISSED_KEYS (FIQ_MISSED_KEYS * 4)
#define BUF_BUFFER_START (FIQ_BUFFER_START * 4)
#define BUF_GPIO_INT_MASK (FIQ_GPIO_INT_MASK * 4)
#define BUF_KEYS_HICNT (FIQ_KEYS_HICNT * 4)
#define BUF_IRQ_PEND (FIQ_IRQ_PEND * 4)
#define BUF_SIR_CODE_L1 (FIQ_SIR_CODE_L1 * 4)
#define BUF_SIR_CODE_L2 (IRQ_SIR_CODE_L2 * 4)
#define BUF_CNT_INT_00 (FIQ_CNT_INT_00 * 4)
#define BUF_CNT_INT_KEY (FIQ_CNT_INT_KEY * 4)
#define BUF_CNT_INT_MDM (FIQ_CNT_INT_MDM * 4)
#define BUF_CNT_INT_03 (FIQ_CNT_INT_03 * 4)
#define BUF_CNT_INT_HSW (FIQ_CNT_INT_HSW * 4)
#define BUF_CNT_INT_05 (FIQ_CNT_INT_05 * 4)
#define BUF_CNT_INT_06 (FIQ_CNT_INT_06 * 4)
#define BUF_CNT_INT_07 (FIQ_CNT_INT_07 * 4)
#define BUF_CNT_INT_08 (FIQ_CNT_INT_08 * 4)
#define BUF_CNT_INT_09 (FIQ_CNT_INT_09 * 4)
#define BUF_CNT_INT_10 (FIQ_CNT_INT_10 * 4)
#define BUF_CNT_INT_11 (FIQ_CNT_INT_11 * 4)
#define BUF_CNT_INT_12 (FIQ_CNT_INT_12 * 4)
#define BUF_CNT_INT_13 (FIQ_CNT_INT_13 * 4)
#define BUF_CNT_INT_14 (FIQ_CNT_INT_14 * 4)
#define BUF_CNT_INT_15 (FIQ_CNT_INT_15 * 4)
#define BUF_CIRC_BUFF (FIQ_CIRC_BUFF * 4)
/*
* Register usage
* r8 - temporary
* r9 - the driver buffer
* r10 - temporary
* r11 - interrupts mask
* r12 - base pointers
* r13 - interrupts status
*/
.text
.global qwerty_fiqin_end
ENTRY(qwerty_fiqin_start)
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ FIQ intrrupt handler
ldr r12, omap_ih1_base @ set pointer to level1 handler
ldr r11, [r12, #IRQ_MIR_REG_OFFSET] @ fetch interrupts mask
ldr r13, [r12, #IRQ_ITR_REG_OFFSET] @ fetch interrupts status
bics r13, r13, r11 @ clear masked - any left?
beq exit @ none - spurious FIQ? exit
ldr r10, [r12, #IRQ_SIR_FIQ_REG_OFFSET] @ get requested interrupt number
mov r8, #2 @ reset FIQ agreement
str r8, [r12, #IRQ_CONTROL_REG_OFFSET]
cmp r10, #(INT_GPIO_BANK1 - NR_IRQS_LEGACY) @ is it GPIO interrupt?
beq gpio @ yes - process it
mov r8, #1
orr r8, r11, r8, lsl r10 @ mask spurious interrupt
str r8, [r12, #IRQ_MIR_REG_OFFSET]
exit:
subs pc, lr, #4 @ return from FIQ
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@
gpio: @ GPIO bank interrupt handler
ldr r12, omap1510_gpio_base @ set base pointer to GPIO bank
ldr r11, [r12, #OMAP1510_GPIO_INT_MASK] @ fetch GPIO interrupts mask
restart:
ldr r13, [r12, #OMAP1510_GPIO_INT_STATUS] @ fetch status bits
bics r13, r13, r11 @ clear masked - any left?
beq exit @ no - spurious interrupt? exit
orr r11, r11, r13 @ mask all requested interrupts
str r11, [r12, #OMAP1510_GPIO_INT_MASK]
str r13, [r12, #OMAP1510_GPIO_INT_STATUS] @ ack all requested interrupts
ands r10, r13, #KEYBRD_CLK_MASK @ extract keyboard status - set?
beq hksw @ no - try next source
@@@@@@@@@@@@@@@@@@@@@@
@ Keyboard clock FIQ mode interrupt handler
@ r10 now contains KEYBRD_CLK_MASK, use it
bic r11, r11, r10 @ unmask it
str r11, [r12, #OMAP1510_GPIO_INT_MASK]
@ Process keyboard data
ldr r8, [r12, #OMAP1510_GPIO_DATA_INPUT] @ fetch GPIO input
ldr r10, [r9, #BUF_STATE] @ fetch kbd interface state
cmp r10, #0 @ are we expecting start bit?
bne data @ no - go to data processing
ands r8, r8, #KEYBRD_DATA_MASK @ check start bit - detected?
beq hksw @ no - try next source
@ r8 contains KEYBRD_DATA_MASK, use it
str r8, [r9, #BUF_STATE] @ enter data processing state
@ r10 already contains 0, reuse it
str r10, [r9, #BUF_KEY] @ clear keycode
mov r10, #2 @ reset input bit mask
str r10, [r9, #BUF_MASK]
@ Mask other GPIO line interrupts till key done
str r11, [r9, #BUF_GPIO_INT_MASK] @ save mask for later restore
mvn r11, #KEYBRD_CLK_MASK @ prepare all except kbd mask
str r11, [r12, #OMAP1510_GPIO_INT_MASK] @ store into the mask register
b restart @ restart
data: ldr r10, [r9, #BUF_MASK] @ fetch current input bit mask
@ r8 still contains GPIO input bits
ands r8, r8, #KEYBRD_DATA_MASK @ is keyboard data line low?
ldreq r8, [r9, #BUF_KEY] @ yes - fetch collected so far,
orreq r8, r8, r10 @ set 1 at current mask position
streq r8, [r9, #BUF_KEY] @ and save back
mov r10, r10, lsl #1 @ shift mask left
bics r10, r10, #0x800 @ have we got all the bits?
strne r10, [r9, #BUF_MASK] @ not yet - store the mask
bne restart @ and restart
@ r10 already contains 0, reuse it
str r10, [r9, #BUF_STATE] @ reset state to start
@ Key done - restore interrupt mask
ldr r10, [r9, #BUF_GPIO_INT_MASK] @ fetch saved mask
and r11, r11, r10 @ unmask all saved as unmasked
str r11, [r12, #OMAP1510_GPIO_INT_MASK] @ restore into the mask register
@ Try appending the keycode to the circular buffer
ldr r10, [r9, #BUF_KEYS_CNT] @ get saved keystrokes count
ldr r8, [r9, #BUF_BUF_LEN] @ get buffer size
cmp r10, r8 @ is buffer full?
beq hksw @ yes - key lost, next source
add r10, r10, #1 @ incremet keystrokes counter
str r10, [r9, #BUF_KEYS_CNT]
ldr r10, [r9, #BUF_TAIL_OFFSET] @ get buffer tail offset
@ r8 already contains buffer size
cmp r10, r8 @ end of buffer?
moveq r10, #0 @ yes - rewind to buffer start
ldr r12, [r9, #BUF_BUFFER_START] @ get buffer start address
add r12, r12, r10, LSL #2 @ calculate buffer tail address
ldr r8, [r9, #BUF_KEY] @ get last keycode
str r8, [r12] @ append it to the buffer tail
add r10, r10, #1 @ increment buffer tail offset
str r10, [r9, #BUF_TAIL_OFFSET]
ldr r10, [r9, #BUF_CNT_INT_KEY] @ increment interrupts counter
add r10, r10, #1
str r10, [r9, #BUF_CNT_INT_KEY]
@@@@@@@@@@@@@@@@@@@@@@@@
hksw: @Is hook switch interrupt requested?
tst r13, #HOOK_SWITCH_MASK @ is hook switch status bit set?
beq mdm @ no - try next source
@@@@@@@@@@@@@@@@@@@@@@@@
@ Hook switch interrupt FIQ mode simple handler
@ Don't toggle active edge, the switch always bounces
@ Increment hook switch interrupt counter
ldr r10, [r9, #BUF_CNT_INT_HSW]
add r10, r10, #1
str r10, [r9, #BUF_CNT_INT_HSW]
@@@@@@@@@@@@@@@@@@@@@@@@
mdm: @Is it a modem interrupt?
tst r13, #MODEM_IRQ_MASK @ is modem status bit set?
beq irq @ no - check for next interrupt
@@@@@@@@@@@@@@@@@@@@@@@@
@ Modem FIQ mode interrupt handler stub
@ Increment modem interrupt counter
ldr r10, [r9, #BUF_CNT_INT_MDM]
add r10, r10, #1
str r10, [r9, #BUF_CNT_INT_MDM]
@@@@@@@@@@@@@@@@@@@@@@@@
irq: @ Place deferred_fiq interrupt request
ldr r12, deferred_fiq_ih_base @ set pointer to IRQ handler
mov r10, #DEFERRED_FIQ_MASK @ set deferred_fiq bit
str r10, [r12, #IRQ_ISR_REG_OFFSET] @ place it in the ISR register
ldr r12, omap1510_gpio_base @ set pointer back to GPIO bank
b restart @ check for next GPIO interrupt
@@@@@@@@@@@@@@@@@@@@@@@@@@@
/*
* Virtual addresses for IO
*/
omap_ih1_base:
.word OMAP1_IO_ADDRESS(OMAP_IH1_BASE)
deferred_fiq_ih_base:
.word OMAP1_IO_ADDRESS(DEFERRED_FIQ_IH_BASE)
omap1510_gpio_base:
.word OMAP1_IO_ADDRESS(OMAP1510_GPIO_BASE)
qwerty_fiqin_end:
/*
* Check the size of the FIQ,
* it cannot go beyond 0xffff0200, and is copied to 0xffff001c
*/
.if (qwerty_fiqin_end - qwerty_fiqin_start) > (0x200 - 0x1c)
.err
.endif
|
aixcc-public/challenge-001-exemplar-source
| 1,437
|
arch/arm/mach-omap1/sram.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/arch/arm/plat-omap/sram-fn.S
*
* Functions that need to be run in internal SRAM
*/
#include <linux/linkage.h>
#include <linux/soc/ti/omap1-io.h>
#include <asm/assembler.h>
#include "hardware.h"
#include "iomap.h"
.text
/*
* Reprograms ULPD and CKCTL.
*/
.align 3
ENTRY(omap1_sram_reprogram_clock)
stmfd sp!, {r0 - r12, lr} @ save registers on stack
mov r2, #OMAP1_IO_ADDRESS(DPLL_CTL) & 0xff000000
orr r2, r2, #OMAP1_IO_ADDRESS(DPLL_CTL) & 0x00ff0000
orr r2, r2, #OMAP1_IO_ADDRESS(DPLL_CTL) & 0x0000ff00
mov r3, #OMAP1_IO_ADDRESS(ARM_CKCTL) & 0xff000000
orr r3, r3, #OMAP1_IO_ADDRESS(ARM_CKCTL) & 0x00ff0000
orr r3, r3, #OMAP1_IO_ADDRESS(ARM_CKCTL) & 0x0000ff00
tst r0, #1 << 4 @ want lock mode?
beq newck @ nope
bic r0, r0, #1 << 4 @ else clear lock bit
strh r0, [r2] @ set dpll into bypass mode
orr r0, r0, #1 << 4 @ set lock bit again
newck:
strh r1, [r3] @ write new ckctl value
strh r0, [r2] @ write new dpll value
mov r4, #0x0700 @ let the clocks settle
orr r4, r4, #0x00ff
delay: sub r4, r4, #1
cmp r4, #0
bne delay
lock: ldrh r4, [r2], #0 @ read back dpll value
tst r0, #1 << 4 @ want lock mode?
beq out @ nope
tst r4, #1 << 0 @ dpll rate locked?
beq lock @ try again
out:
ldmfd sp!, {r0 - r12, pc} @ restore regs and return
ENTRY(omap1_sram_reprogram_clock_sz)
.word . - omap1_sram_reprogram_clock
|
aixcc-public/challenge-001-exemplar-source
| 9,335
|
arch/arm/mach-omap1/sleep.S
|
/*
* linux/arch/arm/mach-omap1/sleep.S
*
* Low-level OMAP7XX/1510/1610 sleep/wakeUp support
*
* Initial SA1110 code:
* Copyright (c) 2001 Cliff Brake <cbrake@accelent.com>
*
* Adapted for PXA by Nicolas Pitre:
* Copyright (c) 2002 Monta Vista Software, Inc.
*
* Support for OMAP1510/1610 by Dirk Behme <dirk.behme@de.bosch.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include "hardware.h"
#include "iomap.h"
#include "pm.h"
.text
/*
* Forces OMAP into deep sleep state
*
* omapXXXX_cpu_suspend()
*
* The values of the registers ARM_IDLECT1 and ARM_IDLECT2 are passed
* as arg0 and arg1 from caller. arg0 is stored in register r0 and arg1
* in register r1.
*
* Note: This code get's copied to internal SRAM at boot. When the OMAP
* wakes up it continues execution at the point it went to sleep.
*
* Note: Because of errata work arounds we have processor specific functions
* here. They are mostly the same, but slightly different.
*
*/
#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
.align 3
ENTRY(omap7xx_cpu_suspend)
@ save registers on stack
stmfd sp!, {r0 - r12, lr}
@ Drain write cache
mov r4, #0
mcr p15, 0, r0, c7, c10, 4
nop
@ load base address of Traffic Controller
mov r6, #TCMIF_ASM_BASE & 0xff000000
orr r6, r6, #TCMIF_ASM_BASE & 0x00ff0000
orr r6, r6, #TCMIF_ASM_BASE & 0x0000ff00
@ prepare to put SDRAM into self-refresh manually
ldr r7, [r6, #EMIFF_SDRAM_CONFIG_ASM_OFFSET & 0xff]
orr r9, r7, #SELF_REFRESH_MODE & 0xff000000
orr r9, r9, #SELF_REFRESH_MODE & 0x000000ff
str r9, [r6, #EMIFF_SDRAM_CONFIG_ASM_OFFSET & 0xff]
@ prepare to put EMIFS to Sleep
ldr r8, [r6, #EMIFS_CONFIG_ASM_OFFSET & 0xff]
orr r9, r8, #IDLE_EMIFS_REQUEST & 0xff
str r9, [r6, #EMIFS_CONFIG_ASM_OFFSET & 0xff]
@ load base address of ARM_IDLECT1 and ARM_IDLECT2
mov r4, #CLKGEN_REG_ASM_BASE & 0xff000000
orr r4, r4, #CLKGEN_REG_ASM_BASE & 0x00ff0000
orr r4, r4, #CLKGEN_REG_ASM_BASE & 0x0000ff00
@ turn off clock domains
@ do not disable PERCK (0x04)
mov r5, #OMAP7XX_IDLECT2_SLEEP_VAL & 0xff
orr r5, r5, #OMAP7XX_IDLECT2_SLEEP_VAL & 0xff00
strh r5, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff]
@ request ARM idle
mov r3, #OMAP7XX_IDLECT1_SLEEP_VAL & 0xff
orr r3, r3, #OMAP7XX_IDLECT1_SLEEP_VAL & 0xff00
strh r3, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff]
@ disable instruction cache
mrc p15, 0, r9, c1, c0, 0
bic r2, r9, #0x1000
mcr p15, 0, r2, c1, c0, 0
nop
/*
* Let's wait for the next wake up event to wake us up. r0 can't be
* used here because r0 holds ARM_IDLECT1
*/
mov r2, #0
mcr p15, 0, r2, c7, c0, 4 @ wait for interrupt
/*
* omap7xx_cpu_suspend()'s resume point.
*
* It will just start executing here, so we'll restore stuff from the
* stack.
*/
@ re-enable Icache
mcr p15, 0, r9, c1, c0, 0
@ reset the ARM_IDLECT1 and ARM_IDLECT2.
strh r1, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff]
strh r0, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff]
@ Restore EMIFF controls
str r7, [r6, #EMIFF_SDRAM_CONFIG_ASM_OFFSET & 0xff]
str r8, [r6, #EMIFS_CONFIG_ASM_OFFSET & 0xff]
@ restore regs and return
ldmfd sp!, {r0 - r12, pc}
ENTRY(omap7xx_cpu_suspend_sz)
.word . - omap7xx_cpu_suspend
#endif /* CONFIG_ARCH_OMAP730 || CONFIG_ARCH_OMAP850 */
#ifdef CONFIG_ARCH_OMAP15XX
.align 3
ENTRY(omap1510_cpu_suspend)
@ save registers on stack
stmfd sp!, {r0 - r12, lr}
@ load base address of Traffic Controller
mov r4, #TCMIF_ASM_BASE & 0xff000000
orr r4, r4, #TCMIF_ASM_BASE & 0x00ff0000
orr r4, r4, #TCMIF_ASM_BASE & 0x0000ff00
@ work around errata of OMAP1510 PDE bit for TC shut down
@ clear PDE bit
ldr r5, [r4, #EMIFS_CONFIG_ASM_OFFSET & 0xff]
bic r5, r5, #PDE_BIT & 0xff
str r5, [r4, #EMIFS_CONFIG_ASM_OFFSET & 0xff]
@ set PWD_EN bit
and r5, r5, #PWD_EN_BIT & 0xff
str r5, [r4, #EMIFS_CONFIG_ASM_OFFSET & 0xff]
@ prepare to put SDRAM into self-refresh manually
ldr r5, [r4, #EMIFF_SDRAM_CONFIG_ASM_OFFSET & 0xff]
orr r5, r5, #SELF_REFRESH_MODE & 0xff000000
orr r5, r5, #SELF_REFRESH_MODE & 0x000000ff
str r5, [r4, #EMIFF_SDRAM_CONFIG_ASM_OFFSET & 0xff]
@ prepare to put EMIFS to Sleep
ldr r5, [r4, #EMIFS_CONFIG_ASM_OFFSET & 0xff]
orr r5, r5, #IDLE_EMIFS_REQUEST & 0xff
str r5, [r4, #EMIFS_CONFIG_ASM_OFFSET & 0xff]
@ load base address of ARM_IDLECT1 and ARM_IDLECT2
mov r4, #CLKGEN_REG_ASM_BASE & 0xff000000
orr r4, r4, #CLKGEN_REG_ASM_BASE & 0x00ff0000
orr r4, r4, #CLKGEN_REG_ASM_BASE & 0x0000ff00
@ turn off clock domains
mov r5, #OMAP1510_IDLE_CLOCK_DOMAINS & 0xff
orr r5, r5, #OMAP1510_IDLE_CLOCK_DOMAINS & 0xff00
strh r5, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff]
@ request ARM idle
mov r3, #OMAP1510_DEEP_SLEEP_REQUEST & 0xff
orr r3, r3, #OMAP1510_DEEP_SLEEP_REQUEST & 0xff00
strh r3, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff]
mov r5, #IDLE_WAIT_CYCLES & 0xff
orr r5, r5, #IDLE_WAIT_CYCLES & 0xff00
l_1510_2:
subs r5, r5, #1
bne l_1510_2
/*
* Let's wait for the next wake up event to wake us up. r0 can't be
* used here because r0 holds ARM_IDLECT1
*/
mov r2, #0
mcr p15, 0, r2, c7, c0, 4 @ wait for interrupt
/*
* omap1510_cpu_suspend()'s resume point.
*
* It will just start executing here, so we'll restore stuff from the
* stack, reset the ARM_IDLECT1 and ARM_IDLECT2.
*/
strh r1, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff]
strh r0, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff]
@ restore regs and return
ldmfd sp!, {r0 - r12, pc}
ENTRY(omap1510_cpu_suspend_sz)
.word . - omap1510_cpu_suspend
#endif /* CONFIG_ARCH_OMAP15XX */
#if defined(CONFIG_ARCH_OMAP16XX)
.align 3
ENTRY(omap1610_cpu_suspend)
@ save registers on stack
stmfd sp!, {r0 - r12, lr}
@ Drain write cache
mov r4, #0
mcr p15, 0, r0, c7, c10, 4
nop
@ Load base address of Traffic Controller
mov r6, #TCMIF_ASM_BASE & 0xff000000
orr r6, r6, #TCMIF_ASM_BASE & 0x00ff0000
orr r6, r6, #TCMIF_ASM_BASE & 0x0000ff00
@ Prepare to put SDRAM into self-refresh manually
ldr r7, [r6, #EMIFF_SDRAM_CONFIG_ASM_OFFSET & 0xff]
orr r9, r7, #SELF_REFRESH_MODE & 0xff000000
orr r9, r9, #SELF_REFRESH_MODE & 0x000000ff
str r9, [r6, #EMIFF_SDRAM_CONFIG_ASM_OFFSET & 0xff]
@ Prepare to put EMIFS to Sleep
ldr r8, [r6, #EMIFS_CONFIG_ASM_OFFSET & 0xff]
orr r9, r8, #IDLE_EMIFS_REQUEST & 0xff
str r9, [r6, #EMIFS_CONFIG_ASM_OFFSET & 0xff]
@ Load base address of ARM_IDLECT1 and ARM_IDLECT2
mov r4, #CLKGEN_REG_ASM_BASE & 0xff000000
orr r4, r4, #CLKGEN_REG_ASM_BASE & 0x00ff0000
orr r4, r4, #CLKGEN_REG_ASM_BASE & 0x0000ff00
@ Turn off clock domains
@ Do not disable PERCK (0x04)
mov r5, #OMAP1610_IDLECT2_SLEEP_VAL & 0xff
orr r5, r5, #OMAP1610_IDLECT2_SLEEP_VAL & 0xff00
strh r5, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff]
@ Request ARM idle
mov r3, #OMAP1610_IDLECT1_SLEEP_VAL & 0xff
orr r3, r3, #OMAP1610_IDLECT1_SLEEP_VAL & 0xff00
strh r3, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff]
/*
* Let's wait for the next wake up event to wake us up. r0 can't be
* used here because r0 holds ARM_IDLECT1
*/
mov r2, #0
mcr p15, 0, r2, c7, c0, 4 @ wait for interrupt
@ Errata (HEL3SU467, section 1.4.4) specifies nop-instructions
@ according to this formula:
@ 2 + (4*DPLL_MULT)/DPLL_DIV/ARMDIV
@ Max DPLL_MULT = 18
@ DPLL_DIV = 1
@ ARMDIV = 1
@ => 74 nop-instructions
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop @10
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop @20
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop @30
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop @40
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop @50
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop @60
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop @70
nop
nop
nop
nop @74
/*
* omap1610_cpu_suspend()'s resume point.
*
* It will just start executing here, so we'll restore stuff from the
* stack.
*/
@ Restore the ARM_IDLECT1 and ARM_IDLECT2.
strh r1, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff]
strh r0, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff]
@ Restore EMIFF controls
str r7, [r6, #EMIFF_SDRAM_CONFIG_ASM_OFFSET & 0xff]
str r8, [r6, #EMIFS_CONFIG_ASM_OFFSET & 0xff]
@ Restore regs and return
ldmfd sp!, {r0 - r12, pc}
ENTRY(omap1610_cpu_suspend_sz)
.word . - omap1610_cpu_suspend
#endif /* CONFIG_ARCH_OMAP16XX */
|
aixcc-public/challenge-001-exemplar-source
| 3,086
|
arch/arm/mach-shmobile/headsmp.S
|
/* SPDX-License-Identifier: GPL-2.0
*
* SMP support for R-Mobile / SH-Mobile
*
* Copyright (C) 2010 Magnus Damm
* Copyright (C) 2010 Takashi Yoshii
*
* Based on vexpress, Copyright (c) 2003 ARM Limited, All Rights Reserved
*/
#include <linux/init.h>
#include <linux/linkage.h>
#include <linux/threads.h>
#include <asm/assembler.h>
#include <asm/memory.h>
#define SCTLR_MMU 0x01
#define BOOTROM_ADDRESS 0xE6340000
#define RWTCSRA_ADDRESS 0xE6020004
#define RWTCSRA_WOVF 0x10
/*
* Reset vector for secondary CPUs.
* This will be mapped at address 0 by SBAR register.
* We need _long_ jump to the physical address.
*/
.arm
.align 12
ENTRY(shmobile_boot_vector)
ldr r1, 1f
bx r1
ENDPROC(shmobile_boot_vector)
.align 2
.globl shmobile_boot_fn
shmobile_boot_fn:
1: .space 4
.globl shmobile_boot_size
shmobile_boot_size:
.long . - shmobile_boot_vector
#ifdef CONFIG_ARCH_RCAR_GEN2
/*
* Reset vector for R-Car Gen2 and RZ/G1 secondary CPUs.
* This will be mapped at address 0 by SBAR register.
*/
ENTRY(shmobile_boot_vector_gen2)
mrc p15, 0, r0, c0, c0, 5 @ r0 = MPIDR
ldr r1, shmobile_boot_cpu_gen2
cmp r0, r1
bne shmobile_smp_continue_gen2
mrc p15, 0, r1, c1, c0, 0 @ r1 = SCTLR
and r0, r1, #SCTLR_MMU
cmp r0, #SCTLR_MMU
beq shmobile_smp_continue_gen2
ldr r0, rwtcsra
mov r1, #0
ldrb r1, [r0]
and r0, r1, #RWTCSRA_WOVF
cmp r0, #RWTCSRA_WOVF
bne shmobile_smp_continue_gen2
ldr r0, bootrom
bx r0
shmobile_smp_continue_gen2:
ldr r1, shmobile_boot_fn_gen2
bx r1
ENDPROC(shmobile_boot_vector_gen2)
.align 4
rwtcsra:
.word RWTCSRA_ADDRESS
bootrom:
.word BOOTROM_ADDRESS
.globl shmobile_boot_cpu_gen2
shmobile_boot_cpu_gen2:
.word 0x00000000
.align 2
.globl shmobile_boot_fn_gen2
shmobile_boot_fn_gen2:
.space 4
.globl shmobile_boot_size_gen2
shmobile_boot_size_gen2:
.long . - shmobile_boot_vector_gen2
#endif /* CONFIG_ARCH_RCAR_GEN2 */
/*
* Per-CPU SMP boot function/argument selection code based on MPIDR
*/
ENTRY(shmobile_smp_boot)
mrc p15, 0, r1, c0, c0, 5 @ r1 = MPIDR
and r0, r1, #0xffffff @ MPIDR_HWID_BITMASK
@ r0 = cpu_logical_map() value
mov r1, #0 @ r1 = CPU index
adr r2, 1f
ldmia r2, {r5, r6, r7}
add r5, r5, r2 @ array of per-cpu mpidr values
add r6, r6, r2 @ array of per-cpu functions
add r7, r7, r2 @ array of per-cpu arguments
shmobile_smp_boot_find_mpidr:
ldr r8, [r5, r1, lsl #2]
cmp r8, r0
bne shmobile_smp_boot_next
ldr r9, [r6, r1, lsl #2]
cmp r9, #0
bne shmobile_smp_boot_found
shmobile_smp_boot_next:
add r1, r1, #1
cmp r1, #NR_CPUS
blo shmobile_smp_boot_find_mpidr
b shmobile_smp_sleep
shmobile_smp_boot_found:
ldr r0, [r7, r1, lsl #2]
ret r9
ENDPROC(shmobile_smp_boot)
ENTRY(shmobile_smp_sleep)
wfi
b shmobile_smp_boot
ENDPROC(shmobile_smp_sleep)
.align 2
1: .long shmobile_smp_mpidr - .
.long shmobile_smp_fn - 1b
.long shmobile_smp_arg - 1b
.bss
.globl shmobile_smp_mpidr
shmobile_smp_mpidr:
.space NR_CPUS * 4
.globl shmobile_smp_fn
shmobile_smp_fn:
.space NR_CPUS * 4
.globl shmobile_smp_arg
shmobile_smp_arg:
.space NR_CPUS * 4
|
aixcc-public/challenge-001-exemplar-source
| 2,398
|
arch/arm/lib/copy_to_user.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/arch/arm/lib/copy_to_user.S
*
* Author: Nicolas Pitre
* Created: Sep 29, 2005
* Copyright: MontaVista Software, Inc.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
/*
* Prototype:
*
* size_t arm_copy_to_user(void *to, const void *from, size_t n)
*
* Purpose:
*
* copy a block to user memory from kernel memory
*
* Params:
*
* to = user memory
* from = kernel memory
* n = number of bytes to copy
*
* Return value:
*
* Number of bytes NOT copied.
*/
#define LDR1W_SHIFT 0
.macro ldr1w ptr reg abort
W(ldr) \reg, [\ptr], #4
.endm
.macro ldr4w ptr reg1 reg2 reg3 reg4 abort
ldmia \ptr!, {\reg1, \reg2, \reg3, \reg4}
.endm
.macro ldr8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort
ldmia \ptr!, {\reg1, \reg2, \reg3, \reg4, \reg5, \reg6, \reg7, \reg8}
.endm
.macro ldr1b ptr reg cond=al abort
ldrb\cond \reg, [\ptr], #1
.endm
#ifdef CONFIG_CPU_USE_DOMAINS
#ifndef CONFIG_THUMB2_KERNEL
#define STR1W_SHIFT 0
#else
#define STR1W_SHIFT 1
#endif
.macro str1w ptr reg abort
strusr \reg, \ptr, 4, abort=\abort
.endm
.macro str8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort
str1w \ptr, \reg1, \abort
str1w \ptr, \reg2, \abort
str1w \ptr, \reg3, \abort
str1w \ptr, \reg4, \abort
str1w \ptr, \reg5, \abort
str1w \ptr, \reg6, \abort
str1w \ptr, \reg7, \abort
str1w \ptr, \reg8, \abort
.endm
#else
#define STR1W_SHIFT 0
.macro str1w ptr reg abort
USERL(\abort, W(str) \reg, [\ptr], #4)
.endm
.macro str8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort
USERL(\abort, stmia \ptr!, {\reg1, \reg2, \reg3, \reg4, \reg5, \reg6, \reg7, \reg8})
.endm
#endif /* CONFIG_CPU_USE_DOMAINS */
.macro str1b ptr reg cond=al abort
strusr \reg, \ptr, 1, \cond, abort=\abort
.endm
.macro enter regs:vararg
mov r3, #0
UNWIND( .save {r0, r2, r3, \regs} )
stmdb sp!, {r0, r2, r3, \regs}
.endm
.macro exit regs:vararg
add sp, sp, #8
ldmfd sp!, {r0, \regs}
.endm
.text
ENTRY(__copy_to_user_std)
WEAK(arm_copy_to_user)
#ifdef CONFIG_CPU_SPECTRE
ldr r3, =TASK_SIZE
uaccess_mask_range_ptr r0, r2, r3, ip
#endif
#include "copy_template.S"
ENDPROC(arm_copy_to_user)
ENDPROC(__copy_to_user_std)
.pushsection .text.fixup,"ax"
.align 0
copy_abort_preamble
ldmfd sp!, {r1, r2, r3}
sub r0, r0, r1
rsb r0, r0, r2
copy_abort_end
.popsection
|
aixcc-public/challenge-001-exemplar-source
| 1,294
|
arch/arm/lib/clear_user.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/arch/arm/lib/clear_user.S
*
* Copyright (C) 1995, 1996,1997,1998 Russell King
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
.text
/* Prototype: unsigned long arm_clear_user(void *addr, size_t sz)
* Purpose : clear some user memory
* Params : addr - user memory address to clear
* : sz - number of bytes to clear
* Returns : number of bytes NOT cleared
*/
ENTRY(__clear_user_std)
WEAK(arm_clear_user)
UNWIND(.fnstart)
UNWIND(.save {r1, lr})
stmfd sp!, {r1, lr}
mov r2, #0
cmp r1, #4
blt 2f
ands ip, r0, #3
beq 1f
cmp ip, #2
strusr r2, r0, 1
strusr r2, r0, 1, le
strusr r2, r0, 1, lt
rsb ip, ip, #4
sub r1, r1, ip @ 7 6 5 4 3 2 1
1: subs r1, r1, #8 @ -1 -2 -3 -4 -5 -6 -7
strusr r2, r0, 4, pl, rept=2
bpl 1b
adds r1, r1, #4 @ 3 2 1 0 -1 -2 -3
strusr r2, r0, 4, pl
2: tst r1, #2 @ 1x 1x 0x 0x 1x 1x 0x
strusr r2, r0, 1, ne, rept=2
tst r1, #1 @ x1 x0 x1 x0 x1 x0 x1
it ne @ explicit IT needed for the label
USER( strbtne r2, [r0])
mov r0, #0
ldmfd sp!, {r1, pc}
UNWIND(.fnend)
ENDPROC(arm_clear_user)
ENDPROC(__clear_user_std)
.pushsection .text.fixup,"ax"
.align 0
9001: ldmfd sp!, {r0, pc}
.popsection
|
aixcc-public/challenge-001-exemplar-source
| 2,976
|
arch/arm/lib/csumpartial.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/arch/arm/lib/csumpartial.S
*
* Copyright (C) 1995-1998 Russell King
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
.text
/*
* Function: __u32 csum_partial(const char *src, int len, __u32 sum)
* Params : r0 = buffer, r1 = len, r2 = checksum
* Returns : r0 = new checksum
*/
buf .req r0
len .req r1
sum .req r2
td0 .req r3
td1 .req r4 @ save before use
td2 .req r5 @ save before use
td3 .req lr
.Lzero: mov r0, sum
add sp, sp, #4
ldr pc, [sp], #4
/*
* Handle 0 to 7 bytes, with any alignment of source and
* destination pointers. Note that when we get here, C = 0
*/
.Lless8: teq len, #0 @ check for zero count
beq .Lzero
/* we must have at least one byte. */
tst buf, #1 @ odd address?
movne sum, sum, ror #8
ldrbne td0, [buf], #1
subne len, len, #1
adcsne sum, sum, td0, put_byte_1
.Lless4: tst len, #6
beq .Lless8_byte
/* we are now half-word aligned */
.Lless8_wordlp:
#if __LINUX_ARM_ARCH__ >= 4
ldrh td0, [buf], #2
sub len, len, #2
#else
ldrb td0, [buf], #1
ldrb td3, [buf], #1
sub len, len, #2
#ifndef __ARMEB__
orr td0, td0, td3, lsl #8
#else
orr td0, td3, td0, lsl #8
#endif
#endif
adcs sum, sum, td0
tst len, #6
bne .Lless8_wordlp
.Lless8_byte: tst len, #1 @ odd number of bytes
ldrbne td0, [buf], #1 @ include last byte
adcsne sum, sum, td0, put_byte_0 @ update checksum
.Ldone: adc r0, sum, #0 @ collect up the last carry
ldr td0, [sp], #4
tst td0, #1 @ check buffer alignment
movne r0, r0, ror #8 @ rotate checksum by 8 bits
ldr pc, [sp], #4 @ return
.Lnot_aligned: tst buf, #1 @ odd address
ldrbne td0, [buf], #1 @ make even
subne len, len, #1
adcsne sum, sum, td0, put_byte_1 @ update checksum
tst buf, #2 @ 32-bit aligned?
#if __LINUX_ARM_ARCH__ >= 4
ldrhne td0, [buf], #2 @ make 32-bit aligned
subne len, len, #2
#else
ldrbne td0, [buf], #1
ldrbne ip, [buf], #1
subne len, len, #2
#ifndef __ARMEB__
orrne td0, td0, ip, lsl #8
#else
orrne td0, ip, td0, lsl #8
#endif
#endif
adcsne sum, sum, td0 @ update checksum
ret lr
ENTRY(csum_partial)
stmfd sp!, {buf, lr}
cmp len, #8 @ Ensure that we have at least
blo .Lless8 @ 8 bytes to copy.
tst buf, #1
movne sum, sum, ror #8
adds sum, sum, #0 @ C = 0
tst buf, #3 @ Test destination alignment
blne .Lnot_aligned @ align destination, return here
1: bics ip, len, #31
beq 3f
stmfd sp!, {r4 - r5}
2: ldmia buf!, {td0, td1, td2, td3}
adcs sum, sum, td0
adcs sum, sum, td1
adcs sum, sum, td2
adcs sum, sum, td3
ldmia buf!, {td0, td1, td2, td3}
adcs sum, sum, td0
adcs sum, sum, td1
adcs sum, sum, td2
adcs sum, sum, td3
sub ip, ip, #32
teq ip, #0
bne 2b
ldmfd sp!, {r4 - r5}
3: tst len, #0x1c @ should not change C
beq .Lless4
4: ldr td0, [buf], #4
sub len, len, #4
adcs sum, sum, td0
tst len, #0x1c
bne 4b
b .Lless4
ENDPROC(csum_partial)
|
aixcc-public/challenge-001-exemplar-source
| 7,226
|
arch/arm/lib/backtrace-clang.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/arch/arm/lib/backtrace-clang.S
*
* Copyright (C) 2019 Nathan Huckleberry
*
*/
#include <linux/kern_levels.h>
#include <linux/linkage.h>
#include <asm/assembler.h>
.text
/* fp is 0 or stack frame */
#define frame r4
#define sv_fp r5
#define sv_pc r6
#define mask r7
#define sv_lr r8
#define loglvl r9
ENTRY(c_backtrace)
#if !defined(CONFIG_FRAME_POINTER) || !defined(CONFIG_PRINTK)
ret lr
ENDPROC(c_backtrace)
#else
/*
* Clang does not store pc or sp in function prologues so we don't know exactly
* where the function starts.
*
* We can treat the current frame's lr as the saved pc and the preceding
* frame's lr as the current frame's lr, but we can't trace the most recent
* call. Inserting a false stack frame allows us to reference the function
* called last in the stacktrace.
*
* If the call instruction was a bl we can look at the callers branch
* instruction to calculate the saved pc. We can recover the pc in most cases,
* but in cases such as calling function pointers we cannot. In this case,
* default to using the lr. This will be some address in the function, but will
* not be the function start.
*
* Unfortunately due to the stack frame layout we can't dump r0 - r3, but these
* are less frequently saved.
*
* Stack frame layout:
* <larger addresses>
* saved lr
* frame=> saved fp
* optionally saved caller registers (r4 - r10)
* optionally saved arguments (r0 - r3)
* <top of stack frame>
* <smaller addresses>
*
* Functions start with the following code sequence:
* corrected pc => stmfd sp!, {..., fp, lr}
* add fp, sp, #x
* stmfd sp!, {r0 - r3} (optional)
*
*
*
*
*
*
* The diagram below shows an example stack setup for dump_stack.
*
* The frame for c_backtrace has pointers to the code of dump_stack. This is
* why the frame of c_backtrace is used to for the pc calculation of
* dump_stack. This is why we must move back a frame to print dump_stack.
*
* The stored locals for dump_stack are in dump_stack's frame. This means that
* to fully print dump_stack's frame we need both the frame for dump_stack (for
* locals) and the frame that was called by dump_stack (for pc).
*
* To print locals we must know where the function start is. If we read the
* function prologue opcodes we can determine which variables are stored in the
* stack frame.
*
* To find the function start of dump_stack we can look at the stored LR of
* show_stack. It points at the instruction directly after the bl dump_stack.
* We can then read the offset from the bl opcode to determine where the branch
* takes us. The address calculated must be the start of dump_stack.
*
* c_backtrace frame dump_stack:
* {[LR] } ============| ...
* {[FP] } =======| | bl c_backtrace
* | |=> ...
* {[R4-R10]} |
* {[R0-R3] } | show_stack:
* dump_stack frame | ...
* {[LR] } =============| bl dump_stack
* {[FP] } <=======| |=> ...
* {[R4-R10]}
* {[R0-R3] }
*/
stmfd sp!, {r4 - r9, fp, lr} @ Save an extra register
@ to ensure 8 byte alignment
movs frame, r0 @ if frame pointer is zero
beq no_frame @ we have no stack frames
mov loglvl, r2
tst r1, #0x10 @ 26 or 32-bit mode?
moveq mask, #0xfc000003
movne mask, #0 @ mask for 32-bit
/*
* Switches the current frame to be the frame for dump_stack.
*/
add frame, sp, #24 @ switch to false frame
for_each_frame: tst frame, mask @ Check for address exceptions
bne no_frame
/*
* sv_fp is the stack frame with the locals for the current considered
* function.
*
* sv_pc is the saved lr frame the frame above. This is a pointer to a code
* address within the current considered function, but it is not the function
* start. This value gets updated to be the function start later if it is
* possible.
*/
1001: ldr sv_pc, [frame, #4] @ get saved 'pc'
1002: ldr sv_fp, [frame, #0] @ get saved fp
teq sv_fp, mask @ make sure next frame exists
beq no_frame
/*
* sv_lr is the lr from the function that called the current function. This is
* a pointer to a code address in the current function's caller. sv_lr-4 is
* the instruction used to call the current function.
*
* This sv_lr can be used to calculate the function start if the function was
* called using a bl instruction. If the function start can be recovered sv_pc
* is overwritten with the function start.
*
* If the current function was called using a function pointer we cannot
* recover the function start and instead continue with sv_pc as an arbitrary
* value within the current function. If this is the case we cannot print
* registers for the current function, but the stacktrace is still printed
* properly.
*/
1003: ldr sv_lr, [sv_fp, #4] @ get saved lr from next frame
1004: ldr r0, [sv_lr, #-4] @ get call instruction
ldr r3, .Lopcode+4
and r2, r3, r0 @ is this a bl call
teq r2, r3
bne finished_setup @ give up if it's not
and r0, #0xffffff @ get call offset 24-bit int
lsl r0, r0, #8 @ sign extend offset
asr r0, r0, #8
ldr sv_pc, [sv_fp, #4] @ get lr address
add sv_pc, sv_pc, #-4 @ get call instruction address
add sv_pc, sv_pc, #8 @ take care of prefetch
add sv_pc, sv_pc, r0, lsl #2@ find function start
finished_setup:
bic sv_pc, sv_pc, mask @ mask PC/LR for the mode
/*
* Print the function (sv_pc) and where it was called from (sv_lr).
*/
mov r0, sv_pc
mov r1, sv_lr
mov r2, frame
bic r1, r1, mask @ mask PC/LR for the mode
mov r3, loglvl
bl dump_backtrace_entry
/*
* Test if the function start is a stmfd instruction to determine which
* registers were stored in the function prologue.
*
* If we could not recover the sv_pc because we were called through a function
* pointer the comparison will fail and no registers will print. Unwinding will
* continue as if there had been no registers stored in this frame.
*/
1005: ldr r1, [sv_pc, #0] @ if stmfd sp!, {..., fp, lr}
ldr r3, .Lopcode @ instruction exists,
teq r3, r1, lsr #11
ldr r0, [frame] @ locals are stored in
@ the preceding frame
subeq r0, r0, #4
mov r2, loglvl
bleq dump_backtrace_stm @ dump saved registers
/*
* If we are out of frames or if the next frame is invalid.
*/
teq sv_fp, #0 @ zero saved fp means
beq no_frame @ no further frames
cmp sv_fp, frame @ next frame must be
mov frame, sv_fp @ above the current frame
#ifdef CONFIG_IRQSTACKS
@
@ Kernel stacks may be discontiguous in memory. If the next
@ frame is below the previous frame, accept it as long as it
@ lives in kernel memory.
@
cmpls sv_fp, #PAGE_OFFSET
#endif
bhi for_each_frame
1006: adr r0, .Lbad
mov r1, loglvl
mov r2, frame
bl _printk
no_frame: ldmfd sp!, {r4 - r9, fp, pc}
ENDPROC(c_backtrace)
.pushsection __ex_table,"a"
.align 3
.long 1001b, 1006b
.long 1002b, 1006b
.long 1003b, 1006b
.long 1004b, finished_setup
.long 1005b, 1006b
.popsection
.Lbad: .asciz "%sBacktrace aborted due to bad frame pointer <%p>\n"
.align
.Lopcode: .word 0xe92d4800 >> 11 @ stmfd sp!, {... fp, lr}
.word 0x0b000000 @ bl if these bits are set
#endif
|
aixcc-public/challenge-001-exemplar-source
| 3,912
|
arch/arm/lib/div64.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/arch/arm/lib/div64.S
*
* Optimized computation of 64-bit dividend / 32-bit divisor
*
* Author: Nicolas Pitre
* Created: Oct 5, 2003
* Copyright: Monta Vista Software, Inc.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
#ifdef __ARMEB__
#define xh r0
#define xl r1
#define yh r2
#define yl r3
#else
#define xl r0
#define xh r1
#define yl r2
#define yh r3
#endif
/*
* __do_div64: perform a division with 64-bit dividend and 32-bit divisor.
*
* Note: Calling convention is totally non standard for optimal code.
* This is meant to be used by do_div() from include/asm/div64.h only.
*
* Input parameters:
* xh-xl = dividend (clobbered)
* r4 = divisor (preserved)
*
* Output values:
* yh-yl = result
* xh = remainder
*
* Clobbered regs: xl, ip
*/
ENTRY(__do_div64)
UNWIND(.fnstart)
@ Test for easy paths first.
subs ip, r4, #1
bls 9f @ divisor is 0 or 1
tst ip, r4
beq 8f @ divisor is power of 2
@ See if we need to handle upper 32-bit result.
cmp xh, r4
mov yh, #0
blo 3f
@ Align divisor with upper part of dividend.
@ The aligned divisor is stored in yl preserving the original.
@ The bit position is stored in ip.
#if __LINUX_ARM_ARCH__ >= 5
clz yl, r4
clz ip, xh
sub yl, yl, ip
mov ip, #1
mov ip, ip, lsl yl
mov yl, r4, lsl yl
#else
mov yl, r4
mov ip, #1
1: cmp yl, #0x80000000
cmpcc yl, xh
movcc yl, yl, lsl #1
movcc ip, ip, lsl #1
bcc 1b
#endif
@ The division loop for needed upper bit positions.
@ Break out early if dividend reaches 0.
2: cmp xh, yl
orrcs yh, yh, ip
subscs xh, xh, yl
movsne ip, ip, lsr #1
mov yl, yl, lsr #1
bne 2b
@ See if we need to handle lower 32-bit result.
3: cmp xh, #0
mov yl, #0
cmpeq xl, r4
movlo xh, xl
retlo lr
@ The division loop for lower bit positions.
@ Here we shift remainer bits leftwards rather than moving the
@ divisor for comparisons, considering the carry-out bit as well.
mov ip, #0x80000000
4: movs xl, xl, lsl #1
adcs xh, xh, xh
beq 6f
cmpcc xh, r4
5: orrcs yl, yl, ip
subcs xh, xh, r4
movs ip, ip, lsr #1
bne 4b
ret lr
@ The top part of remainder became zero. If carry is set
@ (the 33th bit) this is a false positive so resume the loop.
@ Otherwise, if lower part is also null then we are done.
6: bcs 5b
cmp xl, #0
reteq lr
@ We still have remainer bits in the low part. Bring them up.
#if __LINUX_ARM_ARCH__ >= 5
clz xh, xl @ we know xh is zero here so...
add xh, xh, #1
mov xl, xl, lsl xh
mov ip, ip, lsr xh
#else
7: movs xl, xl, lsl #1
mov ip, ip, lsr #1
bcc 7b
#endif
@ Current remainder is now 1. It is worthless to compare with
@ divisor at this point since divisor can not be smaller than 3 here.
@ If possible, branch for another shift in the division loop.
@ If no bit position left then we are done.
movs ip, ip, lsr #1
mov xh, #1
bne 4b
ret lr
8: @ Division by a power of 2: determine what that divisor order is
@ then simply shift values around
#if __LINUX_ARM_ARCH__ >= 5
clz ip, r4
rsb ip, ip, #31
#else
mov yl, r4
cmp r4, #(1 << 16)
mov ip, #0
movhs yl, yl, lsr #16
movhs ip, #16
cmp yl, #(1 << 8)
movhs yl, yl, lsr #8
addhs ip, ip, #8
cmp yl, #(1 << 4)
movhs yl, yl, lsr #4
addhs ip, ip, #4
cmp yl, #(1 << 2)
addhi ip, ip, #3
addls ip, ip, yl, lsr #1
#endif
mov yh, xh, lsr ip
mov yl, xl, lsr ip
rsb ip, ip, #32
ARM( orr yl, yl, xh, lsl ip )
THUMB( lsl xh, xh, ip )
THUMB( orr yl, yl, xh )
mov xh, xl, lsl ip
mov xh, xh, lsr ip
ret lr
@ eq -> division by 1: obvious enough...
9: moveq yl, xl
moveq yh, xh
moveq xh, #0
reteq lr
UNWIND(.fnend)
UNWIND(.fnstart)
UNWIND(.pad #4)
UNWIND(.save {lr})
Ldiv0_64:
@ Division by 0:
str lr, [sp, #-8]!
bl __div0
@ as wrong as it could be...
mov yl, #0
mov yh, #0
mov xh, #0
ldr pc, [sp], #8
UNWIND(.fnend)
ENDPROC(__do_div64)
|
aixcc-public/challenge-001-exemplar-source
| 6,834
|
arch/arm/lib/csumpartialcopygeneric.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/arch/arm/lib/csumpartialcopygeneric.S
*
* Copyright (C) 1995-2001 Russell King
*/
#include <asm/assembler.h>
/*
* unsigned int
* csum_partial_copy_xxx(const char *src, char *dst, int len, int sum, )
* r0 = src, r1 = dst, r2 = len, r3 = sum
* Returns : r0 = checksum
*
* Note that 'tst' and 'teq' preserve the carry flag.
*/
src .req r0
dst .req r1
len .req r2
sum .req r3
.Lzero: mov r0, sum
load_regs
/*
* Align an unaligned destination pointer. We know that
* we have >= 8 bytes here, so we don't need to check
* the length. Note that the source pointer hasn't been
* aligned yet.
*/
.Ldst_unaligned:
tst dst, #1
beq .Ldst_16bit
load1b ip
sub len, len, #1
adcs sum, sum, ip, put_byte_1 @ update checksum
strb ip, [dst], #1
tst dst, #2
reteq lr @ dst is now 32bit aligned
.Ldst_16bit: load2b r8, ip
sub len, len, #2
adcs sum, sum, r8, put_byte_0
strb r8, [dst], #1
adcs sum, sum, ip, put_byte_1
strb ip, [dst], #1
ret lr @ dst is now 32bit aligned
/*
* Handle 0 to 7 bytes, with any alignment of source and
* destination pointers. Note that when we get here, C = 0
*/
.Lless8: teq len, #0 @ check for zero count
beq .Lzero
/* we must have at least one byte. */
tst dst, #1 @ dst 16-bit aligned
beq .Lless8_aligned
/* Align dst */
load1b ip
sub len, len, #1
adcs sum, sum, ip, put_byte_1 @ update checksum
strb ip, [dst], #1
tst len, #6
beq .Lless8_byteonly
1: load2b r8, ip
sub len, len, #2
adcs sum, sum, r8, put_byte_0
strb r8, [dst], #1
adcs sum, sum, ip, put_byte_1
strb ip, [dst], #1
.Lless8_aligned:
tst len, #6
bne 1b
.Lless8_byteonly:
tst len, #1
beq .Ldone
load1b r8
adcs sum, sum, r8, put_byte_0 @ update checksum
strb r8, [dst], #1
b .Ldone
FN_ENTRY
save_regs
mov sum, #-1
cmp len, #8 @ Ensure that we have at least
blo .Lless8 @ 8 bytes to copy.
adds sum, sum, #0 @ C = 0
tst dst, #3 @ Test destination alignment
blne .Ldst_unaligned @ align destination, return here
/*
* Ok, the dst pointer is now 32bit aligned, and we know
* that we must have more than 4 bytes to copy. Note
* that C contains the carry from the dst alignment above.
*/
tst src, #3 @ Test source alignment
bne .Lsrc_not_aligned
/* Routine for src & dst aligned */
bics ip, len, #15
beq 2f
1: load4l r4, r5, r6, r7
stmia dst!, {r4, r5, r6, r7}
adcs sum, sum, r4
adcs sum, sum, r5
adcs sum, sum, r6
adcs sum, sum, r7
sub ip, ip, #16
teq ip, #0
bne 1b
2: ands ip, len, #12
beq 4f
tst ip, #8
beq 3f
load2l r4, r5
stmia dst!, {r4, r5}
adcs sum, sum, r4
adcs sum, sum, r5
tst ip, #4
beq 4f
3: load1l r4
str r4, [dst], #4
adcs sum, sum, r4
4: ands len, len, #3
beq .Ldone
load1l r4
tst len, #2
mov r5, r4, get_byte_0
beq .Lexit
adcs sum, sum, r4, lspush #16
strb r5, [dst], #1
mov r5, r4, get_byte_1
strb r5, [dst], #1
mov r5, r4, get_byte_2
.Lexit: tst len, #1
strbne r5, [dst], #1
andne r5, r5, #255
adcsne sum, sum, r5, put_byte_0
/*
* If the dst pointer was not 16-bit aligned, we
* need to rotate the checksum here to get around
* the inefficient byte manipulations in the
* architecture independent code.
*/
.Ldone: adc r0, sum, #0
ldr sum, [sp, #0] @ dst
tst sum, #1
movne r0, r0, ror #8
load_regs
.Lsrc_not_aligned:
adc sum, sum, #0 @ include C from dst alignment
and ip, src, #3
bic src, src, #3
load1l r5
cmp ip, #2
beq .Lsrc2_aligned
bhi .Lsrc3_aligned
mov r4, r5, lspull #8 @ C = 0
bics ip, len, #15
beq 2f
1: load4l r5, r6, r7, r8
orr r4, r4, r5, lspush #24
mov r5, r5, lspull #8
orr r5, r5, r6, lspush #24
mov r6, r6, lspull #8
orr r6, r6, r7, lspush #24
mov r7, r7, lspull #8
orr r7, r7, r8, lspush #24
stmia dst!, {r4, r5, r6, r7}
adcs sum, sum, r4
adcs sum, sum, r5
adcs sum, sum, r6
adcs sum, sum, r7
mov r4, r8, lspull #8
sub ip, ip, #16
teq ip, #0
bne 1b
2: ands ip, len, #12
beq 4f
tst ip, #8
beq 3f
load2l r5, r6
orr r4, r4, r5, lspush #24
mov r5, r5, lspull #8
orr r5, r5, r6, lspush #24
stmia dst!, {r4, r5}
adcs sum, sum, r4
adcs sum, sum, r5
mov r4, r6, lspull #8
tst ip, #4
beq 4f
3: load1l r5
orr r4, r4, r5, lspush #24
str r4, [dst], #4
adcs sum, sum, r4
mov r4, r5, lspull #8
4: ands len, len, #3
beq .Ldone
mov r5, r4, get_byte_0
tst len, #2
beq .Lexit
adcs sum, sum, r4, lspush #16
strb r5, [dst], #1
mov r5, r4, get_byte_1
strb r5, [dst], #1
mov r5, r4, get_byte_2
b .Lexit
.Lsrc2_aligned: mov r4, r5, lspull #16
adds sum, sum, #0
bics ip, len, #15
beq 2f
1: load4l r5, r6, r7, r8
orr r4, r4, r5, lspush #16
mov r5, r5, lspull #16
orr r5, r5, r6, lspush #16
mov r6, r6, lspull #16
orr r6, r6, r7, lspush #16
mov r7, r7, lspull #16
orr r7, r7, r8, lspush #16
stmia dst!, {r4, r5, r6, r7}
adcs sum, sum, r4
adcs sum, sum, r5
adcs sum, sum, r6
adcs sum, sum, r7
mov r4, r8, lspull #16
sub ip, ip, #16
teq ip, #0
bne 1b
2: ands ip, len, #12
beq 4f
tst ip, #8
beq 3f
load2l r5, r6
orr r4, r4, r5, lspush #16
mov r5, r5, lspull #16
orr r5, r5, r6, lspush #16
stmia dst!, {r4, r5}
adcs sum, sum, r4
adcs sum, sum, r5
mov r4, r6, lspull #16
tst ip, #4
beq 4f
3: load1l r5
orr r4, r4, r5, lspush #16
str r4, [dst], #4
adcs sum, sum, r4
mov r4, r5, lspull #16
4: ands len, len, #3
beq .Ldone
mov r5, r4, get_byte_0
tst len, #2
beq .Lexit
adcs sum, sum, r4
strb r5, [dst], #1
mov r5, r4, get_byte_1
strb r5, [dst], #1
tst len, #1
beq .Ldone
load1b r5
b .Lexit
.Lsrc3_aligned: mov r4, r5, lspull #24
adds sum, sum, #0
bics ip, len, #15
beq 2f
1: load4l r5, r6, r7, r8
orr r4, r4, r5, lspush #8
mov r5, r5, lspull #24
orr r5, r5, r6, lspush #8
mov r6, r6, lspull #24
orr r6, r6, r7, lspush #8
mov r7, r7, lspull #24
orr r7, r7, r8, lspush #8
stmia dst!, {r4, r5, r6, r7}
adcs sum, sum, r4
adcs sum, sum, r5
adcs sum, sum, r6
adcs sum, sum, r7
mov r4, r8, lspull #24
sub ip, ip, #16
teq ip, #0
bne 1b
2: ands ip, len, #12
beq 4f
tst ip, #8
beq 3f
load2l r5, r6
orr r4, r4, r5, lspush #8
mov r5, r5, lspull #24
orr r5, r5, r6, lspush #8
stmia dst!, {r4, r5}
adcs sum, sum, r4
adcs sum, sum, r5
mov r4, r6, lspull #24
tst ip, #4
beq 4f
3: load1l r5
orr r4, r4, r5, lspush #8
str r4, [dst], #4
adcs sum, sum, r4
mov r4, r5, lspull #24
4: ands len, len, #3
beq .Ldone
mov r5, r4, get_byte_0
tst len, #2
beq .Lexit
strb r5, [dst], #1
adcs sum, sum, r4
load1l r4
mov r5, r4, get_byte_0
strb r5, [dst], #1
adcs sum, sum, r4, lspush #24
mov r5, r4, get_byte_1
b .Lexit
FN_EXIT
|
aixcc-public/challenge-001-exemplar-source
| 3,440
|
arch/arm/lib/backtrace.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/arch/arm/lib/backtrace.S
*
* Copyright (C) 1995, 1996 Russell King
*
* 27/03/03 Ian Molton Clean up CONFIG_CPU
*/
#include <linux/kern_levels.h>
#include <linux/linkage.h>
#include <asm/assembler.h>
.text
@ fp is 0 or stack frame
#define frame r4
#define sv_fp r5
#define sv_pc r6
#define mask r7
#define offset r8
#define loglvl r9
ENTRY(c_backtrace)
#if !defined(CONFIG_FRAME_POINTER) || !defined(CONFIG_PRINTK)
ret lr
ENDPROC(c_backtrace)
#else
stmfd sp!, {r4 - r9, lr} @ Save an extra register so we have a location...
movs frame, r0 @ if frame pointer is zero
beq no_frame @ we have no stack frames
mov loglvl, r2
tst r1, #0x10 @ 26 or 32-bit mode?
ARM( moveq mask, #0xfc000003 )
THUMB( moveq mask, #0xfc000000 )
THUMB( orreq mask, #0x03 )
movne mask, #0 @ mask for 32-bit
1: stmfd sp!, {pc} @ calculate offset of PC stored
ldr r0, [sp], #4 @ by stmfd for this CPU
adr r1, 1b
sub offset, r0, r1
/*
* Stack frame layout:
* optionally saved caller registers (r4 - r10)
* saved fp
* saved sp
* saved lr
* frame => saved pc
* optionally saved arguments (r0 - r3)
* saved sp => <next word>
*
* Functions start with the following code sequence:
* mov ip, sp
* stmfd sp!, {r0 - r3} (optional)
* corrected pc => stmfd sp!, {..., fp, ip, lr, pc}
*/
for_each_frame: tst frame, mask @ Check for address exceptions
bne no_frame
1001: ldr sv_pc, [frame, #0] @ get saved pc
1002: ldr sv_fp, [frame, #-12] @ get saved fp
sub sv_pc, sv_pc, offset @ Correct PC for prefetching
bic sv_pc, sv_pc, mask @ mask PC/LR for the mode
1003: ldr r2, [sv_pc, #-4] @ if stmfd sp!, {args} exists,
ldr r3, .Ldsi+4 @ adjust saved 'pc' back one
teq r3, r2, lsr #11 @ instruction
subne r0, sv_pc, #4 @ allow for mov
subeq r0, sv_pc, #8 @ allow for mov + stmia
ldr r1, [frame, #-4] @ get saved lr
mov r2, frame
bic r1, r1, mask @ mask PC/LR for the mode
mov r3, loglvl
bl dump_backtrace_entry
ldr r1, [sv_pc, #-4] @ if stmfd sp!, {args} exists,
ldr r3, .Ldsi+4
teq r3, r1, lsr #11
ldreq r0, [frame, #-8] @ get sp
subeq r0, r0, #4 @ point at the last arg
mov r2, loglvl
bleq dump_backtrace_stm @ dump saved registers
1004: ldr r1, [sv_pc, #0] @ if stmfd sp!, {..., fp, ip, lr, pc}
ldr r3, .Ldsi @ instruction exists,
teq r3, r1, lsr #11
subeq r0, frame, #16
mov r2, loglvl
bleq dump_backtrace_stm @ dump saved registers
teq sv_fp, #0 @ zero saved fp means
beq no_frame @ no further frames
cmp sv_fp, frame @ next frame must be
mov frame, sv_fp @ above the current frame
#ifdef CONFIG_IRQSTACKS
@
@ Kernel stacks may be discontiguous in memory. If the next
@ frame is below the previous frame, accept it as long as it
@ lives in kernel memory.
@
cmpls sv_fp, #PAGE_OFFSET
#endif
bhi for_each_frame
1006: adr r0, .Lbad
mov r1, loglvl
mov r2, frame
bl _printk
no_frame: ldmfd sp!, {r4 - r9, pc}
ENDPROC(c_backtrace)
.pushsection __ex_table,"a"
.align 3
.long 1001b, 1006b
.long 1002b, 1006b
.long 1003b, 1006b
.long 1004b, 1006b
.popsection
.Lbad: .asciz "%sBacktrace aborted due to bad frame pointer <%p>\n"
.align
.Ldsi: .word 0xe92dd800 >> 11 @ stmfd sp!, {... fp, ip, lr, pc}
.word 0xe92d0000 >> 11 @ stmfd sp!, {}
#endif
|
aixcc-public/challenge-001-exemplar-source
| 6,377
|
arch/arm/lib/copy_template.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/arch/arm/lib/copy_template.s
*
* Code template for optimized memory copy functions
*
* Author: Nicolas Pitre
* Created: Sep 28, 2005
* Copyright: MontaVista Software, Inc.
*/
/*
* Theory of operation
* -------------------
*
* This file provides the core code for a forward memory copy used in
* the implementation of memcopy(), copy_to_user() and copy_from_user().
*
* The including file must define the following accessor macros
* according to the need of the given function:
*
* ldr1w ptr reg abort
*
* This loads one word from 'ptr', stores it in 'reg' and increments
* 'ptr' to the next word. The 'abort' argument is used for fixup tables.
*
* ldr4w ptr reg1 reg2 reg3 reg4 abort
* ldr8w ptr, reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort
*
* This loads four or eight words starting from 'ptr', stores them
* in provided registers and increments 'ptr' past those words.
* The'abort' argument is used for fixup tables.
*
* ldr1b ptr reg cond abort
*
* Similar to ldr1w, but it loads a byte and increments 'ptr' one byte.
* It also must apply the condition code if provided, otherwise the
* "al" condition is assumed by default.
*
* str1w ptr reg abort
* str8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort
* str1b ptr reg cond abort
*
* Same as their ldr* counterparts, but data is stored to 'ptr' location
* rather than being loaded.
*
* enter reg1 reg2
*
* Preserve the provided registers on the stack plus any additional
* data as needed by the implementation including this code. Called
* upon code entry.
*
* usave reg1 reg2
*
* Unwind annotation macro is corresponding for 'enter' macro.
* It tell unwinder that preserved some provided registers on the stack
* and additional data by a prior 'enter' macro.
*
* exit reg1 reg2
*
* Restore registers with the values previously saved with the
* 'preserv' macro. Called upon code termination.
*
* LDR1W_SHIFT
* STR1W_SHIFT
*
* Correction to be applied to the "ip" register when branching into
* the ldr1w or str1w instructions (some of these macros may expand to
* than one 32bit instruction in Thumb-2)
*/
UNWIND( .fnstart )
enter r4, UNWIND(fpreg,) lr
UNWIND( .setfp fpreg, sp )
UNWIND( mov fpreg, sp )
subs r2, r2, #4
blt 8f
ands ip, r0, #3
PLD( pld [r1, #0] )
bne 9f
ands ip, r1, #3
bne 10f
1: subs r2, r2, #(28)
stmfd sp!, {r5, r6, r8, r9}
blt 5f
CALGN( ands ip, r0, #31 )
CALGN( rsb r3, ip, #32 )
CALGN( sbcsne r4, r3, r2 ) @ C is always set here
CALGN( bcs 2f )
CALGN( adr r4, 6f )
CALGN( subs r2, r2, r3 ) @ C gets set
CALGN( add pc, r4, ip )
PLD( pld [r1, #0] )
2: PLD( subs r2, r2, #96 )
PLD( pld [r1, #28] )
PLD( blt 4f )
PLD( pld [r1, #60] )
PLD( pld [r1, #92] )
3: PLD( pld [r1, #124] )
4: ldr8w r1, r3, r4, r5, r6, r8, r9, ip, lr, abort=20f
subs r2, r2, #32
str8w r0, r3, r4, r5, r6, r8, r9, ip, lr, abort=20f
bge 3b
PLD( cmn r2, #96 )
PLD( bge 4b )
5: ands ip, r2, #28
rsb ip, ip, #32
#if LDR1W_SHIFT > 0
lsl ip, ip, #LDR1W_SHIFT
#endif
addne pc, pc, ip @ C is always clear here
b 7f
6:
.rept (1 << LDR1W_SHIFT)
W(nop)
.endr
ldr1w r1, r3, abort=20f
ldr1w r1, r4, abort=20f
ldr1w r1, r5, abort=20f
ldr1w r1, r6, abort=20f
ldr1w r1, r8, abort=20f
ldr1w r1, r9, abort=20f
ldr1w r1, lr, abort=20f
#if LDR1W_SHIFT < STR1W_SHIFT
lsl ip, ip, #STR1W_SHIFT - LDR1W_SHIFT
#elif LDR1W_SHIFT > STR1W_SHIFT
lsr ip, ip, #LDR1W_SHIFT - STR1W_SHIFT
#endif
add pc, pc, ip
nop
.rept (1 << STR1W_SHIFT)
W(nop)
.endr
str1w r0, r3, abort=20f
str1w r0, r4, abort=20f
str1w r0, r5, abort=20f
str1w r0, r6, abort=20f
str1w r0, r8, abort=20f
str1w r0, r9, abort=20f
str1w r0, lr, abort=20f
CALGN( bcs 2b )
7: ldmfd sp!, {r5, r6, r8, r9}
8: movs r2, r2, lsl #31
ldr1b r1, r3, ne, abort=21f
ldr1b r1, r4, cs, abort=21f
ldr1b r1, ip, cs, abort=21f
str1b r0, r3, ne, abort=21f
str1b r0, r4, cs, abort=21f
str1b r0, ip, cs, abort=21f
exit r4, UNWIND(fpreg,) pc
9: rsb ip, ip, #4
cmp ip, #2
ldr1b r1, r3, gt, abort=21f
ldr1b r1, r4, ge, abort=21f
ldr1b r1, lr, abort=21f
str1b r0, r3, gt, abort=21f
str1b r0, r4, ge, abort=21f
subs r2, r2, ip
str1b r0, lr, abort=21f
blt 8b
ands ip, r1, #3
beq 1b
10: bic r1, r1, #3
cmp ip, #2
ldr1w r1, lr, abort=21f
beq 17f
bgt 18f
.macro forward_copy_shift pull push
subs r2, r2, #28
blt 14f
CALGN( ands ip, r0, #31 )
CALGN( rsb ip, ip, #32 )
CALGN( sbcsne r4, ip, r2 ) @ C is always set here
CALGN( subcc r2, r2, ip )
CALGN( bcc 15f )
11: stmfd sp!, {r5, r6, r8 - r10}
PLD( pld [r1, #0] )
PLD( subs r2, r2, #96 )
PLD( pld [r1, #28] )
PLD( blt 13f )
PLD( pld [r1, #60] )
PLD( pld [r1, #92] )
12: PLD( pld [r1, #124] )
13: ldr4w r1, r4, r5, r6, r8, abort=19f
mov r3, lr, lspull #\pull
subs r2, r2, #32
ldr4w r1, r9, r10, ip, lr, abort=19f
orr r3, r3, r4, lspush #\push
mov r4, r4, lspull #\pull
orr r4, r4, r5, lspush #\push
mov r5, r5, lspull #\pull
orr r5, r5, r6, lspush #\push
mov r6, r6, lspull #\pull
orr r6, r6, r8, lspush #\push
mov r8, r8, lspull #\pull
orr r8, r8, r9, lspush #\push
mov r9, r9, lspull #\pull
orr r9, r9, r10, lspush #\push
mov r10, r10, lspull #\pull
orr r10, r10, ip, lspush #\push
mov ip, ip, lspull #\pull
orr ip, ip, lr, lspush #\push
str8w r0, r3, r4, r5, r6, r8, r9, r10, ip, abort=19f
bge 12b
PLD( cmn r2, #96 )
PLD( bge 13b )
ldmfd sp!, {r5, r6, r8 - r10}
14: ands ip, r2, #28
beq 16f
15: mov r3, lr, lspull #\pull
ldr1w r1, lr, abort=21f
subs ip, ip, #4
orr r3, r3, lr, lspush #\push
str1w r0, r3, abort=21f
bgt 15b
CALGN( cmp r2, #0 )
CALGN( bge 11b )
16: sub r1, r1, #(\push / 8)
b 8b
.endm
forward_copy_shift pull=8 push=24
17: forward_copy_shift pull=16 push=16
18: forward_copy_shift pull=24 push=8
UNWIND( .fnend )
/*
* Abort preamble and completion macros.
* If a fixup handler is required then those macros must surround it.
* It is assumed that the fixup code will handle the private part of
* the exit macro.
*/
.macro copy_abort_preamble
19: ldmfd sp!, {r5, r6, r8 - r10}
b 21f
20: ldmfd sp!, {r5, r6, r8, r9}
21:
.endm
.macro copy_abort_end
ldmfd sp!, {r4, UNWIND(fpreg,) pc}
.endm
|
aixcc-public/challenge-001-exemplar-source
| 1,298
|
arch/arm/lib/memcpy.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/arch/arm/lib/memcpy.S
*
* Author: Nicolas Pitre
* Created: Sep 28, 2005
* Copyright: MontaVista Software, Inc.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
#define LDR1W_SHIFT 0
#define STR1W_SHIFT 0
.macro ldr1w ptr reg abort
W(ldr) \reg, [\ptr], #4
.endm
.macro ldr4w ptr reg1 reg2 reg3 reg4 abort
ldmia \ptr!, {\reg1, \reg2, \reg3, \reg4}
.endm
.macro ldr8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort
ldmia \ptr!, {\reg1, \reg2, \reg3, \reg4, \reg5, \reg6, \reg7, \reg8}
.endm
.macro ldr1b ptr reg cond=al abort
ldrb\cond \reg, [\ptr], #1
.endm
.macro str1w ptr reg abort
W(str) \reg, [\ptr], #4
.endm
.macro str8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort
stmia \ptr!, {\reg1, \reg2, \reg3, \reg4, \reg5, \reg6, \reg7, \reg8}
.endm
.macro str1b ptr reg cond=al abort
strb\cond \reg, [\ptr], #1
.endm
.macro enter regs:vararg
UNWIND( .save {r0, \regs} )
stmdb sp!, {r0, \regs}
.endm
.macro exit regs:vararg
ldmfd sp!, {r0, \regs}
.endm
.text
/* Prototype: void *memcpy(void *dest, const void *src, size_t n); */
ENTRY(__memcpy)
ENTRY(mmiocpy)
WEAK(memcpy)
#include "copy_template.S"
ENDPROC(memcpy)
ENDPROC(mmiocpy)
ENDPROC(__memcpy)
|
aixcc-public/challenge-001-exemplar-source
| 1,680
|
arch/arm/lib/ashrdi3.S
|
/* Copyright 1995, 1996, 1998, 1999, 2000, 2003, 2004, 2005
Free Software Foundation, Inc.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2, or (at your option) any
later version.
In addition to the permissions in the GNU General Public License, the
Free Software Foundation gives you unlimited permission to link the
compiled version of this file into combinations with other programs,
and to distribute those combinations without any restriction coming
from the use of this file. (The General Public License restrictions
do apply in other respects; for example, they cover modification of
the file, and distribution when not linked into a combine
executable.)
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING. If not, write to
the Free Software Foundation, 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301, USA. */
#include <linux/linkage.h>
#include <asm/assembler.h>
#ifdef __ARMEB__
#define al r1
#define ah r0
#else
#define al r0
#define ah r1
#endif
ENTRY(__ashrdi3)
ENTRY(__aeabi_lasr)
subs r3, r2, #32
rsb ip, r2, #32
movmi al, al, lsr r2
movpl al, ah, asr r3
ARM( orrmi al, al, ah, lsl ip )
THUMB( lslmi r3, ah, ip )
THUMB( orrmi al, al, r3 )
mov ah, ah, asr r2
ret lr
ENDPROC(__ashrdi3)
ENDPROC(__aeabi_lasr)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.