repo_id
stringlengths
5
115
size
int64
590
5.01M
file_path
stringlengths
4
212
content
stringlengths
590
5.01M
aixcc-public/challenge-001-exemplar-source
1,539
arch/arm/lib/io-writesb.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/lib/io-writesb.S * * Copyright (C) 1995-2000 Russell King */ #include <linux/linkage.h> #include <asm/assembler.h> .macro outword, rd #ifndef __ARMEB__ strb \rd, [r0] mov \rd, \rd, lsr #8 strb \rd, [r0] mov \rd, \rd, lsr #8 strb \rd, [r0] mov \rd, \rd, lsr #8 strb \rd, [r0] #else mov lr, \rd, lsr #24 strb lr, [r0] mov lr, \rd, lsr #16 strb lr, [r0] mov lr, \rd, lsr #8 strb lr, [r0] strb \rd, [r0] #endif .endm .Loutsb_align: rsb ip, ip, #4 cmp ip, r2 movgt ip, r2 cmp ip, #2 ldrb r3, [r1], #1 strb r3, [r0] ldrbge r3, [r1], #1 strbge r3, [r0] ldrbgt r3, [r1], #1 strbgt r3, [r0] subs r2, r2, ip bne .Loutsb_aligned ENTRY(__raw_writesb) teq r2, #0 @ do we have to check for the zero len? reteq lr ands ip, r1, #3 bne .Loutsb_align .Loutsb_aligned: stmfd sp!, {r4, r5, lr} subs r2, r2, #16 bmi .Loutsb_no_16 .Loutsb_16_lp: ldmia r1!, {r3, r4, r5, ip} outword r3 outword r4 outword r5 outword ip subs r2, r2, #16 bpl .Loutsb_16_lp tst r2, #15 ldmfdeq sp!, {r4, r5, pc} .Loutsb_no_16: tst r2, #8 beq .Loutsb_no_8 ldmia r1!, {r3, r4} outword r3 outword r4 .Loutsb_no_8: tst r2, #4 beq .Loutsb_no_4 ldr r3, [r1], #4 outword r3 .Loutsb_no_4: ands r2, r2, #3 ldmfdeq sp!, {r4, r5, pc} cmp r2, #2 ldrb r3, [r1], #1 strb r3, [r0] ldrbge r3, [r1], #1 strbge r3, [r0] ldrbgt r3, [r1] strbgt r3, [r0] ldmfd sp!, {r4, r5, pc} ENDPROC(__raw_writesb)
aixcc-public/challenge-001-exemplar-source
1,680
arch/arm/lib/lshrdi3.S
/* Copyright 1995, 1996, 1998, 1999, 2000, 2003, 2004, 2005 Free Software Foundation, Inc. This file is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. In addition to the permissions in the GNU General Public License, the Free Software Foundation gives you unlimited permission to link the compiled version of this file into combinations with other programs, and to distribute those combinations without any restriction coming from the use of this file. (The General Public License restrictions do apply in other respects; for example, they cover modification of the file, and distribution when not linked into a combine executable.) This file is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; see the file COPYING. If not, write to the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <linux/linkage.h> #include <asm/assembler.h> #ifdef __ARMEB__ #define al r1 #define ah r0 #else #define al r0 #define ah r1 #endif ENTRY(__lshrdi3) ENTRY(__aeabi_llsr) subs r3, r2, #32 rsb ip, r2, #32 movmi al, al, lsr r2 movpl al, ah, lsr r3 ARM( orrmi al, al, ah, lsl ip ) THUMB( lslmi r3, ah, ip ) THUMB( orrmi al, al, r3 ) mov ah, ah, lsr r2 ret lr ENDPROC(__lshrdi3) ENDPROC(__aeabi_llsr)
aixcc-public/challenge-001-exemplar-source
1,611
arch/arm/lib/csumpartialcopyuser.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/lib/csumpartialcopyuser.S * * Copyright (C) 1995-1998 Russell King * * 27/03/03 Ian Molton Clean up CONFIG_CPU */ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/errno.h> #include <asm/asm-offsets.h> .text #ifdef CONFIG_CPU_SW_DOMAIN_PAN .macro save_regs mrc p15, 0, ip, c3, c0, 0 stmfd sp!, {r1, r2, r4 - r8, ip, lr} uaccess_enable ip .endm .macro load_regs ldmfd sp!, {r1, r2, r4 - r8, ip, lr} mcr p15, 0, ip, c3, c0, 0 ret lr .endm #else .macro save_regs stmfd sp!, {r1, r2, r4 - r8, lr} .endm .macro load_regs ldmfd sp!, {r1, r2, r4 - r8, pc} .endm #endif .macro load1b, reg1 ldrusr \reg1, r0, 1 .endm .macro load2b, reg1, reg2 ldrusr \reg1, r0, 1 ldrusr \reg2, r0, 1 .endm .macro load1l, reg1 ldrusr \reg1, r0, 4 .endm .macro load2l, reg1, reg2 ldrusr \reg1, r0, 4 ldrusr \reg2, r0, 4 .endm .macro load4l, reg1, reg2, reg3, reg4 ldrusr \reg1, r0, 4 ldrusr \reg2, r0, 4 ldrusr \reg3, r0, 4 ldrusr \reg4, r0, 4 .endm /* * unsigned int * csum_partial_copy_from_user(const char *src, char *dst, int len) * r0 = src, r1 = dst, r2 = len * Returns : r0 = checksum or 0 */ #define FN_ENTRY ENTRY(csum_partial_copy_from_user) #define FN_EXIT ENDPROC(csum_partial_copy_from_user) #include "csumpartialcopygeneric.S" /* * We report fault by returning 0 csum - impossible in normal case, since * we start with 0xffffffff for initial sum. */ .pushsection .text.fixup,"ax" .align 4 9001: mov r0, #0 load_regs .popsection
aixcc-public/challenge-001-exemplar-source
1,661
arch/arm/lib/io-readsw-armv3.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/lib/io-readsw-armv3.S * * Copyright (C) 1995-2000 Russell King */ #include <linux/linkage.h> #include <asm/assembler.h> .Linsw_bad_alignment: adr r0, .Linsw_bad_align_msg mov r2, lr b panic .Linsw_bad_align_msg: .asciz "insw: bad buffer alignment (0x%p, lr=0x%08lX)\n" .align .Linsw_align: tst r1, #1 bne .Linsw_bad_alignment ldr r3, [r0] strb r3, [r1], #1 mov r3, r3, lsr #8 strb r3, [r1], #1 subs r2, r2, #1 reteq lr ENTRY(__raw_readsw) teq r2, #0 @ do we have to check for the zero len? reteq lr tst r1, #3 bne .Linsw_align .Linsw_aligned: mov ip, #0xff orr ip, ip, ip, lsl #8 stmfd sp!, {r4, r5, r6, lr} subs r2, r2, #8 bmi .Lno_insw_8 .Linsw_8_lp: ldr r3, [r0] and r3, r3, ip ldr r4, [r0] orr r3, r3, r4, lsl #16 ldr r4, [r0] and r4, r4, ip ldr r5, [r0] orr r4, r4, r5, lsl #16 ldr r5, [r0] and r5, r5, ip ldr r6, [r0] orr r5, r5, r6, lsl #16 ldr r6, [r0] and r6, r6, ip ldr lr, [r0] orr r6, r6, lr, lsl #16 stmia r1!, {r3 - r6} subs r2, r2, #8 bpl .Linsw_8_lp tst r2, #7 ldmfdeq sp!, {r4, r5, r6, pc} .Lno_insw_8: tst r2, #4 beq .Lno_insw_4 ldr r3, [r0] and r3, r3, ip ldr r4, [r0] orr r3, r3, r4, lsl #16 ldr r4, [r0] and r4, r4, ip ldr r5, [r0] orr r4, r4, r5, lsl #16 stmia r1!, {r3, r4} .Lno_insw_4: tst r2, #2 beq .Lno_insw_2 ldr r3, [r0] and r3, r3, ip ldr r4, [r0] orr r3, r3, r4, lsl #16 str r3, [r1], #4 .Lno_insw_2: tst r2, #1 ldrne r3, [r0] strbne r3, [r1], #1 movne r3, r3, lsr #8 strbne r3, [r1] ldmfd sp!, {r4, r5, r6, pc}
aixcc-public/challenge-001-exemplar-source
4,289
arch/arm/lib/memmove.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/lib/memmove.S * * Author: Nicolas Pitre * Created: Sep 28, 2005 * Copyright: (C) MontaVista Software Inc. */ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/unwind.h> .text /* * Prototype: void *memmove(void *dest, const void *src, size_t n); * * Note: * * If the memory regions don't overlap, we simply branch to memcpy which is * normally a bit faster. Otherwise the copy is done going downwards. This * is a transposition of the code from copy_template.S but with the copy * occurring in the opposite direction. */ ENTRY(__memmove) WEAK(memmove) UNWIND( .fnstart ) subs ip, r0, r1 cmphi r2, ip bls __memcpy UNWIND( .fnend ) UNWIND( .fnstart ) UNWIND( .save {r0, r4, fpreg, lr} ) stmfd sp!, {r0, r4, UNWIND(fpreg,) lr} UNWIND( .setfp fpreg, sp ) UNWIND( mov fpreg, sp ) add r1, r1, r2 add r0, r0, r2 subs r2, r2, #4 blt 8f ands ip, r0, #3 PLD( pld [r1, #-4] ) bne 9f ands ip, r1, #3 bne 10f 1: subs r2, r2, #(28) stmfd sp!, {r5, r6, r8, r9} blt 5f CALGN( ands ip, r0, #31 ) CALGN( sbcsne r4, ip, r2 ) @ C is always set here CALGN( bcs 2f ) CALGN( adr r4, 6f ) CALGN( subs r2, r2, ip ) @ C is set here CALGN( rsb ip, ip, #32 ) CALGN( add pc, r4, ip ) PLD( pld [r1, #-4] ) 2: PLD( subs r2, r2, #96 ) PLD( pld [r1, #-32] ) PLD( blt 4f ) PLD( pld [r1, #-64] ) PLD( pld [r1, #-96] ) 3: PLD( pld [r1, #-128] ) 4: ldmdb r1!, {r3, r4, r5, r6, r8, r9, ip, lr} subs r2, r2, #32 stmdb r0!, {r3, r4, r5, r6, r8, r9, ip, lr} bge 3b PLD( cmn r2, #96 ) PLD( bge 4b ) 5: ands ip, r2, #28 rsb ip, ip, #32 addne pc, pc, ip @ C is always clear here b 7f 6: W(nop) W(ldr) r3, [r1, #-4]! W(ldr) r4, [r1, #-4]! W(ldr) r5, [r1, #-4]! W(ldr) r6, [r1, #-4]! W(ldr) r8, [r1, #-4]! W(ldr) r9, [r1, #-4]! W(ldr) lr, [r1, #-4]! add pc, pc, ip nop W(nop) W(str) r3, [r0, #-4]! W(str) r4, [r0, #-4]! W(str) r5, [r0, #-4]! W(str) r6, [r0, #-4]! W(str) r8, [r0, #-4]! W(str) r9, [r0, #-4]! W(str) lr, [r0, #-4]! CALGN( bcs 2b ) 7: ldmfd sp!, {r5, r6, r8, r9} 8: movs r2, r2, lsl #31 ldrbne r3, [r1, #-1]! ldrbcs r4, [r1, #-1]! ldrbcs ip, [r1, #-1] strbne r3, [r0, #-1]! strbcs r4, [r0, #-1]! strbcs ip, [r0, #-1] ldmfd sp!, {r0, r4, UNWIND(fpreg,) pc} 9: cmp ip, #2 ldrbgt r3, [r1, #-1]! ldrbge r4, [r1, #-1]! ldrb lr, [r1, #-1]! strbgt r3, [r0, #-1]! strbge r4, [r0, #-1]! subs r2, r2, ip strb lr, [r0, #-1]! blt 8b ands ip, r1, #3 beq 1b 10: bic r1, r1, #3 cmp ip, #2 ldr r3, [r1, #0] beq 17f blt 18f .macro backward_copy_shift push pull subs r2, r2, #28 blt 14f CALGN( ands ip, r0, #31 ) CALGN( sbcsne r4, ip, r2 ) @ C is always set here CALGN( subcc r2, r2, ip ) CALGN( bcc 15f ) 11: stmfd sp!, {r5, r6, r8 - r10} PLD( pld [r1, #-4] ) PLD( subs r2, r2, #96 ) PLD( pld [r1, #-32] ) PLD( blt 13f ) PLD( pld [r1, #-64] ) PLD( pld [r1, #-96] ) 12: PLD( pld [r1, #-128] ) 13: ldmdb r1!, {r8, r9, r10, ip} mov lr, r3, lspush #\push subs r2, r2, #32 ldmdb r1!, {r3, r4, r5, r6} orr lr, lr, ip, lspull #\pull mov ip, ip, lspush #\push orr ip, ip, r10, lspull #\pull mov r10, r10, lspush #\push orr r10, r10, r9, lspull #\pull mov r9, r9, lspush #\push orr r9, r9, r8, lspull #\pull mov r8, r8, lspush #\push orr r8, r8, r6, lspull #\pull mov r6, r6, lspush #\push orr r6, r6, r5, lspull #\pull mov r5, r5, lspush #\push orr r5, r5, r4, lspull #\pull mov r4, r4, lspush #\push orr r4, r4, r3, lspull #\pull stmdb r0!, {r4 - r6, r8 - r10, ip, lr} bge 12b PLD( cmn r2, #96 ) PLD( bge 13b ) ldmfd sp!, {r5, r6, r8 - r10} 14: ands ip, r2, #28 beq 16f 15: mov lr, r3, lspush #\push ldr r3, [r1, #-4]! subs ip, ip, #4 orr lr, lr, r3, lspull #\pull str lr, [r0, #-4]! bgt 15b CALGN( cmp r2, #0 ) CALGN( bge 11b ) 16: add r1, r1, #(\pull / 8) b 8b .endm backward_copy_shift push=8 pull=24 17: backward_copy_shift push=16 pull=16 18: backward_copy_shift push=24 pull=8 UNWIND( .fnend ) ENDPROC(memmove) ENDPROC(__memmove)
aixcc-public/challenge-001-exemplar-source
2,038
arch/arm/lib/io-writesw-armv3.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/lib/io-writesw-armv3.S * * Copyright (C) 1995-2000 Russell King */ #include <linux/linkage.h> #include <asm/assembler.h> .Loutsw_bad_alignment: adr r0, .Loutsw_bad_align_msg mov r2, lr b panic .Loutsw_bad_align_msg: .asciz "outsw: bad buffer alignment (0x%p, lr=0x%08lX)\n" .align .Loutsw_align: tst r1, #1 bne .Loutsw_bad_alignment add r1, r1, #2 ldr r3, [r1, #-4] mov r3, r3, lsr #16 orr r3, r3, r3, lsl #16 str r3, [r0] subs r2, r2, #1 reteq lr ENTRY(__raw_writesw) teq r2, #0 @ do we have to check for the zero len? reteq lr tst r1, #3 bne .Loutsw_align stmfd sp!, {r4, r5, r6, lr} subs r2, r2, #8 bmi .Lno_outsw_8 .Loutsw_8_lp: ldmia r1!, {r3, r4, r5, r6} mov ip, r3, lsl #16 orr ip, ip, ip, lsr #16 str ip, [r0] mov ip, r3, lsr #16 orr ip, ip, ip, lsl #16 str ip, [r0] mov ip, r4, lsl #16 orr ip, ip, ip, lsr #16 str ip, [r0] mov ip, r4, lsr #16 orr ip, ip, ip, lsl #16 str ip, [r0] mov ip, r5, lsl #16 orr ip, ip, ip, lsr #16 str ip, [r0] mov ip, r5, lsr #16 orr ip, ip, ip, lsl #16 str ip, [r0] mov ip, r6, lsl #16 orr ip, ip, ip, lsr #16 str ip, [r0] mov ip, r6, lsr #16 orr ip, ip, ip, lsl #16 str ip, [r0] subs r2, r2, #8 bpl .Loutsw_8_lp tst r2, #7 ldmfdeq sp!, {r4, r5, r6, pc} .Lno_outsw_8: tst r2, #4 beq .Lno_outsw_4 ldmia r1!, {r3, r4} mov ip, r3, lsl #16 orr ip, ip, ip, lsr #16 str ip, [r0] mov ip, r3, lsr #16 orr ip, ip, ip, lsl #16 str ip, [r0] mov ip, r4, lsl #16 orr ip, ip, ip, lsr #16 str ip, [r0] mov ip, r4, lsr #16 orr ip, ip, ip, lsl #16 str ip, [r0] .Lno_outsw_4: tst r2, #2 beq .Lno_outsw_2 ldr r3, [r1], #4 mov ip, r3, lsl #16 orr ip, ip, ip, lsr #16 str ip, [r0] mov ip, r3, lsr #16 orr ip, ip, ip, lsl #16 str ip, [r0] .Lno_outsw_2: tst r2, #1 ldrne r3, [r1] movne ip, r3, lsl #16 orrne ip, ip, ip, lsr #16 strne ip, [r0] ldmfd sp!, {r4, r5, r6, pc}
aixcc-public/challenge-001-exemplar-source
4,799
arch/arm/lib/findbit.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/lib/findbit.S * * Copyright (C) 1995-2000 Russell King * * 16th March 2001 - John Ripley <jripley@sonicblue.com> * Fixed so that "size" is an exclusive not an inclusive quantity. * All users of these functions expect exclusive sizes, and may * also call with zero size. * Reworked by rmk. */ #include <linux/linkage.h> #include <asm/assembler.h> .text /* * Purpose : Find a 'zero' bit * Prototype: int find_first_zero_bit(void *addr, unsigned int maxbit); */ ENTRY(_find_first_zero_bit_le) teq r1, #0 beq 3f mov r2, #0 1: ARM( ldrb r3, [r0, r2, lsr #3] ) THUMB( lsr r3, r2, #3 ) THUMB( ldrb r3, [r0, r3] ) eors r3, r3, #0xff @ invert bits bne .L_found @ any now set - found zero bit add r2, r2, #8 @ next bit pointer 2: cmp r2, r1 @ any more? blo 1b 3: mov r0, r1 @ no free bits ret lr ENDPROC(_find_first_zero_bit_le) /* * Purpose : Find next 'zero' bit * Prototype: int find_next_zero_bit(void *addr, unsigned int maxbit, int offset) */ ENTRY(_find_next_zero_bit_le) cmp r2, r1 bhs 3b ands ip, r2, #7 beq 1b @ If new byte, goto old routine ARM( ldrb r3, [r0, r2, lsr #3] ) THUMB( lsr r3, r2, #3 ) THUMB( ldrb r3, [r0, r3] ) eor r3, r3, #0xff @ now looking for a 1 bit movs r3, r3, lsr ip @ shift off unused bits bne .L_found orr r2, r2, #7 @ if zero, then no bits here add r2, r2, #1 @ align bit pointer b 2b @ loop for next bit ENDPROC(_find_next_zero_bit_le) /* * Purpose : Find a 'one' bit * Prototype: int find_first_bit(const unsigned long *addr, unsigned int maxbit); */ ENTRY(_find_first_bit_le) teq r1, #0 beq 3f mov r2, #0 1: ARM( ldrb r3, [r0, r2, lsr #3] ) THUMB( lsr r3, r2, #3 ) THUMB( ldrb r3, [r0, r3] ) movs r3, r3 bne .L_found @ any now set - found zero bit add r2, r2, #8 @ next bit pointer 2: cmp r2, r1 @ any more? blo 1b 3: mov r0, r1 @ no free bits ret lr ENDPROC(_find_first_bit_le) /* * Purpose : Find next 'one' bit * Prototype: int find_next_zero_bit(void *addr, unsigned int maxbit, int offset) */ ENTRY(_find_next_bit_le) cmp r2, r1 bhs 3b ands ip, r2, #7 beq 1b @ If new byte, goto old routine ARM( ldrb r3, [r0, r2, lsr #3] ) THUMB( lsr r3, r2, #3 ) THUMB( ldrb r3, [r0, r3] ) movs r3, r3, lsr ip @ shift off unused bits bne .L_found orr r2, r2, #7 @ if zero, then no bits here add r2, r2, #1 @ align bit pointer b 2b @ loop for next bit ENDPROC(_find_next_bit_le) #ifdef __ARMEB__ ENTRY(_find_first_zero_bit_be) teq r1, #0 beq 3f mov r2, #0 1: eor r3, r2, #0x18 @ big endian byte ordering ARM( ldrb r3, [r0, r3, lsr #3] ) THUMB( lsr r3, #3 ) THUMB( ldrb r3, [r0, r3] ) eors r3, r3, #0xff @ invert bits bne .L_found @ any now set - found zero bit add r2, r2, #8 @ next bit pointer 2: cmp r2, r1 @ any more? blo 1b 3: mov r0, r1 @ no free bits ret lr ENDPROC(_find_first_zero_bit_be) ENTRY(_find_next_zero_bit_be) cmp r2, r1 bhs 3b ands ip, r2, #7 beq 1b @ If new byte, goto old routine eor r3, r2, #0x18 @ big endian byte ordering ARM( ldrb r3, [r0, r3, lsr #3] ) THUMB( lsr r3, #3 ) THUMB( ldrb r3, [r0, r3] ) eor r3, r3, #0xff @ now looking for a 1 bit movs r3, r3, lsr ip @ shift off unused bits bne .L_found orr r2, r2, #7 @ if zero, then no bits here add r2, r2, #1 @ align bit pointer b 2b @ loop for next bit ENDPROC(_find_next_zero_bit_be) ENTRY(_find_first_bit_be) teq r1, #0 beq 3f mov r2, #0 1: eor r3, r2, #0x18 @ big endian byte ordering ARM( ldrb r3, [r0, r3, lsr #3] ) THUMB( lsr r3, #3 ) THUMB( ldrb r3, [r0, r3] ) movs r3, r3 bne .L_found @ any now set - found zero bit add r2, r2, #8 @ next bit pointer 2: cmp r2, r1 @ any more? blo 1b 3: mov r0, r1 @ no free bits ret lr ENDPROC(_find_first_bit_be) ENTRY(_find_next_bit_be) cmp r2, r1 bhs 3b ands ip, r2, #7 beq 1b @ If new byte, goto old routine eor r3, r2, #0x18 @ big endian byte ordering ARM( ldrb r3, [r0, r3, lsr #3] ) THUMB( lsr r3, #3 ) THUMB( ldrb r3, [r0, r3] ) movs r3, r3, lsr ip @ shift off unused bits bne .L_found orr r2, r2, #7 @ if zero, then no bits here add r2, r2, #1 @ align bit pointer b 2b @ loop for next bit ENDPROC(_find_next_bit_be) #endif /* * One or more bits in the LSB of r3 are assumed to be set. */ .L_found: #if __LINUX_ARM_ARCH__ >= 5 rsb r0, r3, #0 and r3, r3, r0 clz r3, r3 rsb r3, r3, #31 add r0, r2, r3 #else tst r3, #0x0f addeq r2, r2, #4 movne r3, r3, lsl #4 tst r3, #0x30 addeq r2, r2, #2 movne r3, r3, lsl #2 tst r3, #0x40 addeq r2, r2, #1 mov r0, r2 #endif cmp r1, r0 @ Clamp to maxbit movlo r0, r1 ret lr
aixcc-public/challenge-001-exemplar-source
1,141
arch/arm/lib/copy_page.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/lib/copypage.S * * Copyright (C) 1995-1999 Russell King * * ASM optimised string functions */ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/asm-offsets.h> #include <asm/cache.h> #define COPY_COUNT (PAGE_SZ / (2 * L1_CACHE_BYTES) PLD( -1 )) .text .align 5 /* * StrongARM optimised copy_page routine * now 1.78bytes/cycle, was 1.60 bytes/cycle (50MHz bus -> 89MB/s) * Note that we probably achieve closer to the 100MB/s target with * the core clock switching. */ ENTRY(copy_page) stmfd sp!, {r4, lr} @ 2 PLD( pld [r1, #0] ) PLD( pld [r1, #L1_CACHE_BYTES] ) mov r2, #COPY_COUNT @ 1 ldmia r1!, {r3, r4, ip, lr} @ 4+1 1: PLD( pld [r1, #2 * L1_CACHE_BYTES]) PLD( pld [r1, #3 * L1_CACHE_BYTES]) 2: .rept (2 * L1_CACHE_BYTES / 16 - 1) stmia r0!, {r3, r4, ip, lr} @ 4 ldmia r1!, {r3, r4, ip, lr} @ 4 .endr subs r2, r2, #1 @ 1 stmia r0!, {r3, r4, ip, lr} @ 4 ldmiagt r1!, {r3, r4, ip, lr} @ 4 bgt 1b @ 1 PLD( ldmiaeq r1!, {r3, r4, ip, lr} ) PLD( beq 2b ) ldmfd sp!, {r4, pc} @ 3 ENDPROC(copy_page)
aixcc-public/challenge-001-exemplar-source
1,680
arch/arm/lib/ashldi3.S
/* Copyright 1995, 1996, 1998, 1999, 2000, 2003, 2004, 2005 Free Software Foundation, Inc. This file is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. In addition to the permissions in the GNU General Public License, the Free Software Foundation gives you unlimited permission to link the compiled version of this file into combinations with other programs, and to distribute those combinations without any restriction coming from the use of this file. (The General Public License restrictions do apply in other respects; for example, they cover modification of the file, and distribution when not linked into a combine executable.) This file is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; see the file COPYING. If not, write to the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <linux/linkage.h> #include <asm/assembler.h> #ifdef __ARMEB__ #define al r1 #define ah r0 #else #define al r0 #define ah r1 #endif ENTRY(__ashldi3) ENTRY(__aeabi_llsl) subs r3, r2, #32 rsb ip, r2, #32 movmi ah, ah, lsl r2 movpl ah, al, lsl r3 ARM( orrmi ah, ah, al, lsr ip ) THUMB( lsrmi r3, al, ip ) THUMB( orrmi ah, ah, r3 ) mov al, al, lsl r2 ret lr ENDPROC(__ashldi3) ENDPROC(__aeabi_llsl)
aixcc-public/challenge-001-exemplar-source
2,358
arch/arm/lib/io-readsb.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/lib/io-readsb.S * * Copyright (C) 1995-2000 Russell King */ #include <linux/linkage.h> #include <asm/assembler.h> .Linsb_align: rsb ip, ip, #4 cmp ip, r2 movgt ip, r2 cmp ip, #2 ldrb r3, [r0] strb r3, [r1], #1 ldrbge r3, [r0] strbge r3, [r1], #1 ldrbgt r3, [r0] strbgt r3, [r1], #1 subs r2, r2, ip bne .Linsb_aligned ENTRY(__raw_readsb) teq r2, #0 @ do we have to check for the zero len? reteq lr ands ip, r1, #3 bne .Linsb_align .Linsb_aligned: stmfd sp!, {r4 - r6, lr} subs r2, r2, #16 bmi .Linsb_no_16 .Linsb_16_lp: ldrb r3, [r0] ldrb r4, [r0] ldrb r5, [r0] mov r3, r3, put_byte_0 ldrb r6, [r0] orr r3, r3, r4, put_byte_1 ldrb r4, [r0] orr r3, r3, r5, put_byte_2 ldrb r5, [r0] orr r3, r3, r6, put_byte_3 ldrb r6, [r0] mov r4, r4, put_byte_0 ldrb ip, [r0] orr r4, r4, r5, put_byte_1 ldrb r5, [r0] orr r4, r4, r6, put_byte_2 ldrb r6, [r0] orr r4, r4, ip, put_byte_3 ldrb ip, [r0] mov r5, r5, put_byte_0 ldrb lr, [r0] orr r5, r5, r6, put_byte_1 ldrb r6, [r0] orr r5, r5, ip, put_byte_2 ldrb ip, [r0] orr r5, r5, lr, put_byte_3 ldrb lr, [r0] mov r6, r6, put_byte_0 orr r6, r6, ip, put_byte_1 ldrb ip, [r0] orr r6, r6, lr, put_byte_2 orr r6, r6, ip, put_byte_3 stmia r1!, {r3 - r6} subs r2, r2, #16 bpl .Linsb_16_lp tst r2, #15 ldmfdeq sp!, {r4 - r6, pc} .Linsb_no_16: tst r2, #8 beq .Linsb_no_8 ldrb r3, [r0] ldrb r4, [r0] ldrb r5, [r0] mov r3, r3, put_byte_0 ldrb r6, [r0] orr r3, r3, r4, put_byte_1 ldrb r4, [r0] orr r3, r3, r5, put_byte_2 ldrb r5, [r0] orr r3, r3, r6, put_byte_3 ldrb r6, [r0] mov r4, r4, put_byte_0 ldrb ip, [r0] orr r4, r4, r5, put_byte_1 orr r4, r4, r6, put_byte_2 orr r4, r4, ip, put_byte_3 stmia r1!, {r3, r4} .Linsb_no_8: tst r2, #4 beq .Linsb_no_4 ldrb r3, [r0] ldrb r4, [r0] ldrb r5, [r0] ldrb r6, [r0] mov r3, r3, put_byte_0 orr r3, r3, r4, put_byte_1 orr r3, r3, r5, put_byte_2 orr r3, r3, r6, put_byte_3 str r3, [r1], #4 .Linsb_no_4: ands r2, r2, #3 ldmfdeq sp!, {r4 - r6, pc} cmp r2, #2 ldrb r3, [r0] strb r3, [r1], #1 ldrbge r3, [r0] strbge r3, [r1], #1 ldrbgt r3, [r0] strbgt r3, [r1] ldmfd sp!, {r4 - r6, pc} ENDPROC(__raw_readsb)
aixcc-public/challenge-001-exemplar-source
1,587
arch/arm/lib/io-writesw-armv4.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/lib/io-writesw-armv4.S * * Copyright (C) 1995-2000 Russell King */ #include <linux/linkage.h> #include <asm/assembler.h> .macro outword, rd #ifndef __ARMEB__ strh \rd, [r0] mov \rd, \rd, lsr #16 strh \rd, [r0] #else mov lr, \rd, lsr #16 strh lr, [r0] strh \rd, [r0] #endif .endm .Loutsw_align: movs ip, r1, lsl #31 bne .Loutsw_noalign ldrh r3, [r1], #2 sub r2, r2, #1 strh r3, [r0] ENTRY(__raw_writesw) teq r2, #0 reteq lr ands r3, r1, #3 bne .Loutsw_align stmfd sp!, {r4, r5, lr} subs r2, r2, #8 bmi .Lno_outsw_8 .Loutsw_8_lp: ldmia r1!, {r3, r4, r5, ip} subs r2, r2, #8 outword r3 outword r4 outword r5 outword ip bpl .Loutsw_8_lp .Lno_outsw_8: tst r2, #4 beq .Lno_outsw_4 ldmia r1!, {r3, ip} outword r3 outword ip .Lno_outsw_4: movs r2, r2, lsl #31 bcc .Lno_outsw_2 ldr r3, [r1], #4 outword r3 .Lno_outsw_2: ldrhne r3, [r1] strhne r3, [r0] ldmfd sp!, {r4, r5, pc} #ifdef __ARMEB__ #define pull_hbyte0 lsl #8 #define push_hbyte1 lsr #24 #else #define pull_hbyte0 lsr #24 #define push_hbyte1 lsl #8 #endif .Loutsw_noalign: ARM( ldr r3, [r1, -r3]! ) THUMB( rsb r3, r3, #0 ) THUMB( ldr r3, [r1, r3] ) THUMB( sub r1, r3 ) subcs r2, r2, #1 bcs 2f subs r2, r2, #2 bmi 3f 1: mov ip, r3, lsr #8 strh ip, [r0] 2: mov ip, r3, pull_hbyte0 ldr r3, [r1, #4]! subs r2, r2, #2 orr ip, ip, r3, push_hbyte1 strh ip, [r0] bpl 1b tst r2, #1 3: movne ip, r3, lsr #8 strhne ip, [r0] ret lr ENDPROC(__raw_writesw)
aixcc-public/challenge-001-exemplar-source
2,932
arch/arm/lib/memset.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/lib/memset.S * * Copyright (C) 1995-2000 Russell King * * ASM optimised string functions */ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/unwind.h> .text .align 5 ENTRY(__memset) ENTRY(mmioset) WEAK(memset) UNWIND( .fnstart ) ands r3, r0, #3 @ 1 unaligned? mov ip, r0 @ preserve r0 as return value bne 6f @ 1 /* * we know that the pointer in ip is aligned to a word boundary. */ 1: orr r1, r1, r1, lsl #8 orr r1, r1, r1, lsl #16 mov r3, r1 7: cmp r2, #16 blt 4f UNWIND( .fnend ) #if ! CALGN(1)+0 /* * We need 2 extra registers for this loop - use r8 and the LR */ UNWIND( .fnstart ) UNWIND( .save {r8, lr} ) stmfd sp!, {r8, lr} mov r8, r1 mov lr, r3 2: subs r2, r2, #64 stmiage ip!, {r1, r3, r8, lr} @ 64 bytes at a time. stmiage ip!, {r1, r3, r8, lr} stmiage ip!, {r1, r3, r8, lr} stmiage ip!, {r1, r3, r8, lr} bgt 2b ldmfdeq sp!, {r8, pc} @ Now <64 bytes to go. /* * No need to correct the count; we're only testing bits from now on */ tst r2, #32 stmiane ip!, {r1, r3, r8, lr} stmiane ip!, {r1, r3, r8, lr} tst r2, #16 stmiane ip!, {r1, r3, r8, lr} ldmfd sp!, {r8, lr} UNWIND( .fnend ) #else /* * This version aligns the destination pointer in order to write * whole cache lines at once. */ UNWIND( .fnstart ) UNWIND( .save {r4-r8, lr} ) stmfd sp!, {r4-r8, lr} mov r4, r1 mov r5, r3 mov r6, r1 mov r7, r3 mov r8, r1 mov lr, r3 cmp r2, #96 tstgt ip, #31 ble 3f and r8, ip, #31 rsb r8, r8, #32 sub r2, r2, r8 movs r8, r8, lsl #(32 - 4) stmiacs ip!, {r4, r5, r6, r7} stmiami ip!, {r4, r5} tst r8, #(1 << 30) mov r8, r1 strne r1, [ip], #4 3: subs r2, r2, #64 stmiage ip!, {r1, r3-r8, lr} stmiage ip!, {r1, r3-r8, lr} bgt 3b ldmfdeq sp!, {r4-r8, pc} tst r2, #32 stmiane ip!, {r1, r3-r8, lr} tst r2, #16 stmiane ip!, {r4-r7} ldmfd sp!, {r4-r8, lr} UNWIND( .fnend ) #endif UNWIND( .fnstart ) 4: tst r2, #8 stmiane ip!, {r1, r3} tst r2, #4 strne r1, [ip], #4 /* * When we get here, we've got less than 4 bytes to set. We * may have an unaligned pointer as well. */ 5: tst r2, #2 strbne r1, [ip], #1 strbne r1, [ip], #1 tst r2, #1 strbne r1, [ip], #1 ret lr 6: subs r2, r2, #4 @ 1 do we have enough blt 5b @ 1 bytes to align with? cmp r3, #2 @ 1 strblt r1, [ip], #1 @ 1 strble r1, [ip], #1 @ 1 strb r1, [ip], #1 @ 1 add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3)) b 1b UNWIND( .fnend ) ENDPROC(memset) ENDPROC(mmioset) ENDPROC(__memset) ENTRY(__memset32) UNWIND( .fnstart ) mov r3, r1 @ copy r1 to r3 and fall into memset64 UNWIND( .fnend ) ENDPROC(__memset32) ENTRY(__memset64) UNWIND( .fnstart ) mov ip, r0 @ preserve r0 as return value b 7b @ jump into the middle of memset UNWIND( .fnend ) ENDPROC(__memset64)
aixcc-public/challenge-001-exemplar-source
1,120
arch/arm/lib/io-writesl.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/lib/io-writesl.S * * Copyright (C) 1995-2000 Russell King */ #include <linux/linkage.h> #include <asm/assembler.h> ENTRY(__raw_writesl) teq r2, #0 @ do we have to check for the zero len? reteq lr ands ip, r1, #3 bne 3f subs r2, r2, #4 bmi 2f stmfd sp!, {r4, lr} 1: ldmia r1!, {r3, r4, ip, lr} subs r2, r2, #4 str r3, [r0, #0] str r4, [r0, #0] str ip, [r0, #0] str lr, [r0, #0] bpl 1b ldmfd sp!, {r4, lr} 2: movs r2, r2, lsl #31 ldmiacs r1!, {r3, ip} strcs r3, [r0, #0] ldrne r3, [r1, #0] strcs ip, [r0, #0] strne r3, [r0, #0] ret lr 3: bic r1, r1, #3 ldr r3, [r1], #4 cmp ip, #2 blt 5f bgt 6f 4: mov ip, r3, lspull #16 ldr r3, [r1], #4 subs r2, r2, #1 orr ip, ip, r3, lspush #16 str ip, [r0] bne 4b ret lr 5: mov ip, r3, lspull #8 ldr r3, [r1], #4 subs r2, r2, #1 orr ip, ip, r3, lspush #24 str ip, [r0] bne 5b ret lr 6: mov ip, r3, lspull #24 ldr r3, [r1], #4 subs r2, r2, #1 orr ip, ip, r3, lspush #8 str ip, [r0] bne 6b ret lr ENDPROC(__raw_writesl)
aixcc-public/challenge-001-exemplar-source
1,063
arch/arm/lib/delay-loop.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/lib/delay.S * * Copyright (C) 1995, 1996 Russell King */ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/delay.h> .text .LC0: .word loops_per_jiffy .LC1: .word UDELAY_MULT /* * loops = r0 * HZ * loops_per_jiffy / 1000000 * * r0 <= 2000 * HZ <= 1000 */ ENTRY(__loop_udelay) ldr r2, .LC1 mul r0, r2, r0 @ r0 = delay_us * UDELAY_MULT ENTRY(__loop_const_udelay) @ 0 <= r0 <= 0xfffffaf0 ldr r2, .LC0 ldr r2, [r2] umull r1, r0, r2, r0 @ r0-r1 = r0 * loops_per_jiffy adds r1, r1, #0xffffffff @ rounding up ... adcs r0, r0, r0 @ and right shift by 31 reteq lr .align 3 @ Delay routine ENTRY(__loop_delay) subs r0, r0, #1 #if 0 retls lr subs r0, r0, #1 retls lr subs r0, r0, #1 retls lr subs r0, r0, #1 retls lr subs r0, r0, #1 retls lr subs r0, r0, #1 retls lr subs r0, r0, #1 retls lr subs r0, r0, #1 #endif bhi __loop_delay ret lr ENDPROC(__loop_udelay) ENDPROC(__loop_const_udelay) ENDPROC(__loop_delay)
aixcc-public/challenge-001-exemplar-source
1,385
arch/arm/lib/io-readsl.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/lib/io-readsl.S * * Copyright (C) 1995-2000 Russell King */ #include <linux/linkage.h> #include <asm/assembler.h> ENTRY(__raw_readsl) teq r2, #0 @ do we have to check for the zero len? reteq lr ands ip, r1, #3 bne 3f subs r2, r2, #4 bmi 2f stmfd sp!, {r4, lr} 1: ldr r3, [r0, #0] ldr r4, [r0, #0] ldr ip, [r0, #0] ldr lr, [r0, #0] subs r2, r2, #4 stmia r1!, {r3, r4, ip, lr} bpl 1b ldmfd sp!, {r4, lr} 2: movs r2, r2, lsl #31 ldrcs r3, [r0, #0] ldrcs ip, [r0, #0] stmiacs r1!, {r3, ip} ldrne r3, [r0, #0] strne r3, [r1, #0] ret lr 3: ldr r3, [r0] cmp ip, #2 mov ip, r3, get_byte_0 strb ip, [r1], #1 bgt 6f mov ip, r3, get_byte_1 strb ip, [r1], #1 beq 5f mov ip, r3, get_byte_2 strb ip, [r1], #1 4: subs r2, r2, #1 mov ip, r3, lspull #24 ldrne r3, [r0] orrne ip, ip, r3, lspush #8 strne ip, [r1], #4 bne 4b b 8f 5: subs r2, r2, #1 mov ip, r3, lspull #16 ldrne r3, [r0] orrne ip, ip, r3, lspush #16 strne ip, [r1], #4 bne 5b b 7f 6: subs r2, r2, #1 mov ip, r3, lspull #8 ldrne r3, [r0] orrne ip, ip, r3, lspush #24 strne ip, [r1], #4 bne 6b mov r3, ip, get_byte_2 strb r3, [r1, #2] 7: mov r3, ip, get_byte_1 strb r3, [r1, #1] 8: mov r3, ip, get_byte_0 strb r3, [r1, #0] ret lr ENDPROC(__raw_readsl)
aixcc-public/challenge-001-exemplar-source
2,409
arch/arm/lib/copy_from_user.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/lib/copy_from_user.S * * Author: Nicolas Pitre * Created: Sep 29, 2005 * Copyright: MontaVista Software, Inc. */ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/unwind.h> /* * Prototype: * * size_t arm_copy_from_user(void *to, const void *from, size_t n) * * Purpose: * * copy a block to kernel memory from user memory * * Params: * * to = kernel memory * from = user memory * n = number of bytes to copy * * Return value: * * Number of bytes NOT copied. */ #ifdef CONFIG_CPU_USE_DOMAINS #ifndef CONFIG_THUMB2_KERNEL #define LDR1W_SHIFT 0 #else #define LDR1W_SHIFT 1 #endif .macro ldr1w ptr reg abort ldrusr \reg, \ptr, 4, abort=\abort .endm .macro ldr4w ptr reg1 reg2 reg3 reg4 abort ldr1w \ptr, \reg1, \abort ldr1w \ptr, \reg2, \abort ldr1w \ptr, \reg3, \abort ldr1w \ptr, \reg4, \abort .endm .macro ldr8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort ldr4w \ptr, \reg1, \reg2, \reg3, \reg4, \abort ldr4w \ptr, \reg5, \reg6, \reg7, \reg8, \abort .endm #else #define LDR1W_SHIFT 0 .macro ldr1w ptr reg abort USERL(\abort, W(ldr) \reg, [\ptr], #4) .endm .macro ldr4w ptr reg1 reg2 reg3 reg4 abort USERL(\abort, ldmia \ptr!, {\reg1, \reg2, \reg3, \reg4}) .endm .macro ldr8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort USERL(\abort, ldmia \ptr!, {\reg1, \reg2, \reg3, \reg4, \reg5, \reg6, \reg7, \reg8}) .endm #endif /* CONFIG_CPU_USE_DOMAINS */ .macro ldr1b ptr reg cond=al abort ldrusr \reg, \ptr, 1, \cond, abort=\abort .endm #define STR1W_SHIFT 0 .macro str1w ptr reg abort W(str) \reg, [\ptr], #4 .endm .macro str8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort stmia \ptr!, {\reg1, \reg2, \reg3, \reg4, \reg5, \reg6, \reg7, \reg8} .endm .macro str1b ptr reg cond=al abort strb\cond \reg, [\ptr], #1 .endm .macro enter regs:vararg mov r3, #0 UNWIND( .save {r0, r2, r3, \regs} ) stmdb sp!, {r0, r2, r3, \regs} .endm .macro exit regs:vararg add sp, sp, #8 ldmfd sp!, {r0, \regs} .endm .text ENTRY(arm_copy_from_user) #ifdef CONFIG_CPU_SPECTRE ldr r3, =TASK_SIZE uaccess_mask_range_ptr r1, r2, r3, ip #endif #include "copy_template.S" ENDPROC(arm_copy_from_user) .pushsection .text.fixup,"ax" .align 0 copy_abort_preamble ldmfd sp!, {r1, r2, r3} sub r0, r0, r1 rsb r0, r0, r2 copy_abort_end .popsection
aixcc-public/challenge-001-exemplar-source
8,247
arch/arm/lib/lib1funcs.S
/* * linux/arch/arm/lib/lib1funcs.S: Optimized ARM division routines * * Author: Nicolas Pitre <nico@fluxnic.net> * - contributed to gcc-3.4 on Sep 30, 2003 * - adapted for the Linux kernel on Oct 2, 2003 */ /* Copyright 1995, 1996, 1998, 1999, 2000, 2003 Free Software Foundation, Inc. This file is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. In addition to the permissions in the GNU General Public License, the Free Software Foundation gives you unlimited permission to link the compiled version of this file into combinations with other programs, and to distribute those combinations without any restriction coming from the use of this file. (The General Public License restrictions do apply in other respects; for example, they cover modification of the file, and distribution when not linked into a combine executable.) This file is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/unwind.h> .macro ARM_DIV_BODY dividend, divisor, result, curbit #if __LINUX_ARM_ARCH__ >= 5 clz \curbit, \divisor clz \result, \dividend sub \result, \curbit, \result mov \curbit, #1 mov \divisor, \divisor, lsl \result mov \curbit, \curbit, lsl \result mov \result, #0 #else @ Initially shift the divisor left 3 bits if possible, @ set curbit accordingly. This allows for curbit to be located @ at the left end of each 4 bit nibbles in the division loop @ to save one loop in most cases. tst \divisor, #0xe0000000 moveq \divisor, \divisor, lsl #3 moveq \curbit, #8 movne \curbit, #1 @ Unless the divisor is very big, shift it up in multiples of @ four bits, since this is the amount of unwinding in the main @ division loop. Continue shifting until the divisor is @ larger than the dividend. 1: cmp \divisor, #0x10000000 cmplo \divisor, \dividend movlo \divisor, \divisor, lsl #4 movlo \curbit, \curbit, lsl #4 blo 1b @ For very big divisors, we must shift it a bit at a time, or @ we will be in danger of overflowing. 1: cmp \divisor, #0x80000000 cmplo \divisor, \dividend movlo \divisor, \divisor, lsl #1 movlo \curbit, \curbit, lsl #1 blo 1b mov \result, #0 #endif @ Division loop 1: cmp \dividend, \divisor subhs \dividend, \dividend, \divisor orrhs \result, \result, \curbit cmp \dividend, \divisor, lsr #1 subhs \dividend, \dividend, \divisor, lsr #1 orrhs \result, \result, \curbit, lsr #1 cmp \dividend, \divisor, lsr #2 subhs \dividend, \dividend, \divisor, lsr #2 orrhs \result, \result, \curbit, lsr #2 cmp \dividend, \divisor, lsr #3 subhs \dividend, \dividend, \divisor, lsr #3 orrhs \result, \result, \curbit, lsr #3 cmp \dividend, #0 @ Early termination? movsne \curbit, \curbit, lsr #4 @ No, any more bits to do? movne \divisor, \divisor, lsr #4 bne 1b .endm .macro ARM_DIV2_ORDER divisor, order #if __LINUX_ARM_ARCH__ >= 5 clz \order, \divisor rsb \order, \order, #31 #else cmp \divisor, #(1 << 16) movhs \divisor, \divisor, lsr #16 movhs \order, #16 movlo \order, #0 cmp \divisor, #(1 << 8) movhs \divisor, \divisor, lsr #8 addhs \order, \order, #8 cmp \divisor, #(1 << 4) movhs \divisor, \divisor, lsr #4 addhs \order, \order, #4 cmp \divisor, #(1 << 2) addhi \order, \order, #3 addls \order, \order, \divisor, lsr #1 #endif .endm .macro ARM_MOD_BODY dividend, divisor, order, spare #if __LINUX_ARM_ARCH__ >= 5 clz \order, \divisor clz \spare, \dividend sub \order, \order, \spare mov \divisor, \divisor, lsl \order #else mov \order, #0 @ Unless the divisor is very big, shift it up in multiples of @ four bits, since this is the amount of unwinding in the main @ division loop. Continue shifting until the divisor is @ larger than the dividend. 1: cmp \divisor, #0x10000000 cmplo \divisor, \dividend movlo \divisor, \divisor, lsl #4 addlo \order, \order, #4 blo 1b @ For very big divisors, we must shift it a bit at a time, or @ we will be in danger of overflowing. 1: cmp \divisor, #0x80000000 cmplo \divisor, \dividend movlo \divisor, \divisor, lsl #1 addlo \order, \order, #1 blo 1b #endif @ Perform all needed subtractions to keep only the reminder. @ Do comparisons in batch of 4 first. subs \order, \order, #3 @ yes, 3 is intended here blt 2f 1: cmp \dividend, \divisor subhs \dividend, \dividend, \divisor cmp \dividend, \divisor, lsr #1 subhs \dividend, \dividend, \divisor, lsr #1 cmp \dividend, \divisor, lsr #2 subhs \dividend, \dividend, \divisor, lsr #2 cmp \dividend, \divisor, lsr #3 subhs \dividend, \dividend, \divisor, lsr #3 cmp \dividend, #1 mov \divisor, \divisor, lsr #4 subsge \order, \order, #4 bge 1b tst \order, #3 teqne \dividend, #0 beq 5f @ Either 1, 2 or 3 comparison/subtractions are left. 2: cmn \order, #2 blt 4f beq 3f cmp \dividend, \divisor subhs \dividend, \dividend, \divisor mov \divisor, \divisor, lsr #1 3: cmp \dividend, \divisor subhs \dividend, \dividend, \divisor mov \divisor, \divisor, lsr #1 4: cmp \dividend, \divisor subhs \dividend, \dividend, \divisor 5: .endm #ifdef CONFIG_ARM_PATCH_IDIV .align 3 #endif ENTRY(__udivsi3) ENTRY(__aeabi_uidiv) UNWIND(.fnstart) subs r2, r1, #1 reteq lr bcc Ldiv0 cmp r0, r1 bls 11f tst r1, r2 beq 12f ARM_DIV_BODY r0, r1, r2, r3 mov r0, r2 ret lr 11: moveq r0, #1 movne r0, #0 ret lr 12: ARM_DIV2_ORDER r1, r2 mov r0, r0, lsr r2 ret lr UNWIND(.fnend) ENDPROC(__udivsi3) ENDPROC(__aeabi_uidiv) ENTRY(__umodsi3) UNWIND(.fnstart) subs r2, r1, #1 @ compare divisor with 1 bcc Ldiv0 cmpne r0, r1 @ compare dividend with divisor moveq r0, #0 tsthi r1, r2 @ see if divisor is power of 2 andeq r0, r0, r2 retls lr ARM_MOD_BODY r0, r1, r2, r3 ret lr UNWIND(.fnend) ENDPROC(__umodsi3) #ifdef CONFIG_ARM_PATCH_IDIV .align 3 #endif ENTRY(__divsi3) ENTRY(__aeabi_idiv) UNWIND(.fnstart) cmp r1, #0 eor ip, r0, r1 @ save the sign of the result. beq Ldiv0 rsbmi r1, r1, #0 @ loops below use unsigned. subs r2, r1, #1 @ division by 1 or -1 ? beq 10f movs r3, r0 rsbmi r3, r0, #0 @ positive dividend value cmp r3, r1 bls 11f tst r1, r2 @ divisor is power of 2 ? beq 12f ARM_DIV_BODY r3, r1, r0, r2 cmp ip, #0 rsbmi r0, r0, #0 ret lr 10: teq ip, r0 @ same sign ? rsbmi r0, r0, #0 ret lr 11: movlo r0, #0 moveq r0, ip, asr #31 orreq r0, r0, #1 ret lr 12: ARM_DIV2_ORDER r1, r2 cmp ip, #0 mov r0, r3, lsr r2 rsbmi r0, r0, #0 ret lr UNWIND(.fnend) ENDPROC(__divsi3) ENDPROC(__aeabi_idiv) ENTRY(__modsi3) UNWIND(.fnstart) cmp r1, #0 beq Ldiv0 rsbmi r1, r1, #0 @ loops below use unsigned. movs ip, r0 @ preserve sign of dividend rsbmi r0, r0, #0 @ if negative make positive subs r2, r1, #1 @ compare divisor with 1 cmpne r0, r1 @ compare dividend with divisor moveq r0, #0 tsthi r1, r2 @ see if divisor is power of 2 andeq r0, r0, r2 bls 10f ARM_MOD_BODY r0, r1, r2, r3 10: cmp ip, #0 rsbmi r0, r0, #0 ret lr UNWIND(.fnend) ENDPROC(__modsi3) #ifdef CONFIG_AEABI ENTRY(__aeabi_uidivmod) UNWIND(.fnstart) UNWIND(.save {r0, r1, ip, lr} ) stmfd sp!, {r0, r1, ip, lr} bl __aeabi_uidiv ldmfd sp!, {r1, r2, ip, lr} mul r3, r0, r2 sub r1, r1, r3 ret lr UNWIND(.fnend) ENDPROC(__aeabi_uidivmod) ENTRY(__aeabi_idivmod) UNWIND(.fnstart) UNWIND(.save {r0, r1, ip, lr} ) stmfd sp!, {r0, r1, ip, lr} bl __aeabi_idiv ldmfd sp!, {r1, r2, ip, lr} mul r3, r0, r2 sub r1, r1, r3 ret lr UNWIND(.fnend) ENDPROC(__aeabi_idivmod) #endif Ldiv0: UNWIND(.fnstart) UNWIND(.pad #4) UNWIND(.save {lr}) str lr, [sp, #-8]! bl __div0 mov r0, #0 @ About as wrong as it could be. ldr pc, [sp], #8 UNWIND(.fnend) ENDPROC(Ldiv0)
aixcc-public/challenge-001-exemplar-source
2,232
arch/arm/lib/io-readsw-armv4.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/lib/io-readsw-armv4.S * * Copyright (C) 1995-2000 Russell King */ #include <linux/linkage.h> #include <asm/assembler.h> .macro pack, rd, hw1, hw2 #ifndef __ARMEB__ orr \rd, \hw1, \hw2, lsl #16 #else orr \rd, \hw2, \hw1, lsl #16 #endif .endm .Linsw_align: movs ip, r1, lsl #31 bne .Linsw_noalign ldrh ip, [r0] sub r2, r2, #1 strh ip, [r1], #2 ENTRY(__raw_readsw) teq r2, #0 reteq lr tst r1, #3 bne .Linsw_align stmfd sp!, {r4, r5, lr} subs r2, r2, #8 bmi .Lno_insw_8 .Linsw_8_lp: ldrh r3, [r0] ldrh r4, [r0] pack r3, r3, r4 ldrh r4, [r0] ldrh r5, [r0] pack r4, r4, r5 ldrh r5, [r0] ldrh ip, [r0] pack r5, r5, ip ldrh ip, [r0] ldrh lr, [r0] pack ip, ip, lr subs r2, r2, #8 stmia r1!, {r3 - r5, ip} bpl .Linsw_8_lp .Lno_insw_8: tst r2, #4 beq .Lno_insw_4 ldrh r3, [r0] ldrh r4, [r0] pack r3, r3, r4 ldrh r4, [r0] ldrh ip, [r0] pack r4, r4, ip stmia r1!, {r3, r4} .Lno_insw_4: movs r2, r2, lsl #31 bcc .Lno_insw_2 ldrh r3, [r0] ldrh ip, [r0] pack r3, r3, ip str r3, [r1], #4 .Lno_insw_2: ldrhne r3, [r0] strhne r3, [r1] ldmfd sp!, {r4, r5, pc} #ifdef __ARMEB__ #define _BE_ONLY_(code...) code #define _LE_ONLY_(code...) #define push_hbyte0 lsr #8 #define pull_hbyte1 lsl #24 #else #define _BE_ONLY_(code...) #define _LE_ONLY_(code...) code #define push_hbyte0 lsl #24 #define pull_hbyte1 lsr #8 #endif .Linsw_noalign: stmfd sp!, {r4, lr} ldrbcc ip, [r1, #-1]! bcc 1f ldrh ip, [r0] sub r2, r2, #1 _BE_ONLY_( mov ip, ip, ror #8 ) strb ip, [r1], #1 _LE_ONLY_( mov ip, ip, lsr #8 ) _BE_ONLY_( mov ip, ip, lsr #24 ) 1: subs r2, r2, #2 bmi 3f _BE_ONLY_( mov ip, ip, lsl #24 ) 2: ldrh r3, [r0] ldrh r4, [r0] subs r2, r2, #2 orr ip, ip, r3, lsl #8 orr ip, ip, r4, push_hbyte0 str ip, [r1], #4 mov ip, r4, pull_hbyte1 bpl 2b _BE_ONLY_( mov ip, ip, lsr #24 ) 3: tst r2, #1 strb ip, [r1], #1 ldrhne ip, [r0] _BE_ONLY_( movne ip, ip, ror #8 ) strbne ip, [r1], #1 _LE_ONLY_( movne ip, ip, lsr #8 ) _BE_ONLY_( movne ip, ip, lsr #24 ) strbne ip, [r1] ldmfd sp!, {r4, pc} ENDPROC(__raw_readsw)
aixcc-public/challenge-001-exemplar-source
3,479
arch/arm/lib/getuser.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/lib/getuser.S * * Copyright (C) 2001 Russell King * * Idea from x86 version, (C) Copyright 1998 Linus Torvalds * * These functions have a non-standard call interface to make them more * efficient, especially as they return an error value in addition to * the "real" return value. * * __get_user_X * * Inputs: r0 contains the address * r1 contains the address limit, which must be preserved * Outputs: r0 is the error code * r2, r3 contains the zero-extended value * lr corrupted * * No other registers must be altered. (see <asm/uaccess.h> * for specific ASM register usage). * * Note that ADDR_LIMIT is either 0 or 0xc0000000. * Note also that it is intended that __get_user_bad is not global. */ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/errno.h> #include <asm/domain.h> ENTRY(__get_user_1) check_uaccess r0, 1, r1, r2, __get_user_bad 1: TUSER(ldrb) r2, [r0] mov r0, #0 ret lr ENDPROC(__get_user_1) _ASM_NOKPROBE(__get_user_1) ENTRY(__get_user_2) check_uaccess r0, 2, r1, r2, __get_user_bad #if __LINUX_ARM_ARCH__ >= 6 2: TUSER(ldrh) r2, [r0] #else #ifdef CONFIG_CPU_USE_DOMAINS rb .req ip 2: ldrbt r2, [r0], #1 3: ldrbt rb, [r0], #0 #else rb .req r0 2: ldrb r2, [r0] 3: ldrb rb, [r0, #1] #endif #ifndef __ARMEB__ orr r2, r2, rb, lsl #8 #else orr r2, rb, r2, lsl #8 #endif #endif /* __LINUX_ARM_ARCH__ >= 6 */ mov r0, #0 ret lr ENDPROC(__get_user_2) _ASM_NOKPROBE(__get_user_2) ENTRY(__get_user_4) check_uaccess r0, 4, r1, r2, __get_user_bad 4: TUSER(ldr) r2, [r0] mov r0, #0 ret lr ENDPROC(__get_user_4) _ASM_NOKPROBE(__get_user_4) ENTRY(__get_user_8) check_uaccess r0, 8, r1, r2, __get_user_bad8 #ifdef CONFIG_THUMB2_KERNEL 5: TUSER(ldr) r2, [r0] 6: TUSER(ldr) r3, [r0, #4] #else 5: TUSER(ldr) r2, [r0], #4 6: TUSER(ldr) r3, [r0] #endif mov r0, #0 ret lr ENDPROC(__get_user_8) _ASM_NOKPROBE(__get_user_8) #ifdef __ARMEB__ ENTRY(__get_user_32t_8) check_uaccess r0, 8, r1, r2, __get_user_bad #ifdef CONFIG_CPU_USE_DOMAINS add r0, r0, #4 7: ldrt r2, [r0] #else 7: ldr r2, [r0, #4] #endif mov r0, #0 ret lr ENDPROC(__get_user_32t_8) _ASM_NOKPROBE(__get_user_32t_8) ENTRY(__get_user_64t_1) check_uaccess r0, 1, r1, r2, __get_user_bad8 8: TUSER(ldrb) r3, [r0] mov r0, #0 ret lr ENDPROC(__get_user_64t_1) _ASM_NOKPROBE(__get_user_64t_1) ENTRY(__get_user_64t_2) check_uaccess r0, 2, r1, r2, __get_user_bad8 #ifdef CONFIG_CPU_USE_DOMAINS rb .req ip 9: ldrbt r3, [r0], #1 10: ldrbt rb, [r0], #0 #else rb .req r0 9: ldrb r3, [r0] 10: ldrb rb, [r0, #1] #endif orr r3, rb, r3, lsl #8 mov r0, #0 ret lr ENDPROC(__get_user_64t_2) _ASM_NOKPROBE(__get_user_64t_2) ENTRY(__get_user_64t_4) check_uaccess r0, 4, r1, r2, __get_user_bad8 11: TUSER(ldr) r3, [r0] mov r0, #0 ret lr ENDPROC(__get_user_64t_4) _ASM_NOKPROBE(__get_user_64t_4) #endif __get_user_bad8: mov r3, #0 __get_user_bad: mov r2, #0 mov r0, #-EFAULT ret lr ENDPROC(__get_user_bad) ENDPROC(__get_user_bad8) _ASM_NOKPROBE(__get_user_bad) _ASM_NOKPROBE(__get_user_bad8) .pushsection __ex_table, "a" .long 1b, __get_user_bad .long 2b, __get_user_bad #if __LINUX_ARM_ARCH__ < 6 .long 3b, __get_user_bad #endif .long 4b, __get_user_bad .long 5b, __get_user_bad8 .long 6b, __get_user_bad8 #ifdef __ARMEB__ .long 7b, __get_user_bad .long 8b, __get_user_bad8 .long 9b, __get_user_bad8 .long 10b, __get_user_bad8 .long 11b, __get_user_bad8 #endif .popsection
aixcc-public/challenge-001-exemplar-source
2,052
arch/arm/lib/putuser.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/lib/putuser.S * * Copyright (C) 2001 Russell King * * Idea from x86 version, (C) Copyright 1998 Linus Torvalds * * These functions have a non-standard call interface to make * them more efficient, especially as they return an error * value in addition to the "real" return value. * * __put_user_X * * Inputs: r0 contains the address * r1 contains the address limit, which must be preserved * r2, r3 contains the value * Outputs: r0 is the error code * lr corrupted * * No other registers must be altered. (see <asm/uaccess.h> * for specific ASM register usage). * * Note that ADDR_LIMIT is either 0 or 0xc0000000 * Note also that it is intended that __put_user_bad is not global. */ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/errno.h> #include <asm/domain.h> ENTRY(__put_user_1) check_uaccess r0, 1, r1, ip, __put_user_bad 1: TUSER(strb) r2, [r0] mov r0, #0 ret lr ENDPROC(__put_user_1) ENTRY(__put_user_2) check_uaccess r0, 2, r1, ip, __put_user_bad #if __LINUX_ARM_ARCH__ >= 6 2: TUSER(strh) r2, [r0] #else mov ip, r2, lsr #8 #ifndef __ARMEB__ 2: TUSER(strb) r2, [r0], #1 3: TUSER(strb) ip, [r0] #else 2: TUSER(strb) ip, [r0], #1 3: TUSER(strb) r2, [r0] #endif #endif /* __LINUX_ARM_ARCH__ >= 6 */ mov r0, #0 ret lr ENDPROC(__put_user_2) ENTRY(__put_user_4) check_uaccess r0, 4, r1, ip, __put_user_bad 4: TUSER(str) r2, [r0] mov r0, #0 ret lr ENDPROC(__put_user_4) ENTRY(__put_user_8) check_uaccess r0, 8, r1, ip, __put_user_bad #ifdef CONFIG_THUMB2_KERNEL 5: TUSER(str) r2, [r0] 6: TUSER(str) r3, [r0, #4] #else 5: TUSER(str) r2, [r0], #4 6: TUSER(str) r3, [r0] #endif mov r0, #0 ret lr ENDPROC(__put_user_8) __put_user_bad: mov r0, #-EFAULT ret lr ENDPROC(__put_user_bad) .pushsection __ex_table, "a" .long 1b, __put_user_bad .long 2b, __put_user_bad #if __LINUX_ARM_ARCH__ < 6 .long 3b, __put_user_bad #endif .long 4b, __put_user_bad .long 5b, __put_user_bad .long 6b, __put_user_bad .popsection
aixcc-public/challenge-001-exemplar-source
1,123
arch/arm/lib/call_with_stack.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * arch/arm/lib/call_with_stack.S * * Copyright (C) 2011 ARM Ltd. * Written by Will Deacon <will.deacon@arm.com> */ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/unwind.h> /* * void call_with_stack(void (*fn)(void *), void *arg, void *sp) * * Change the stack to that pointed at by sp, then invoke fn(arg) with * the new stack. * * The sequence below follows the APCS frame convention for frame pointer * unwinding, and implements the unwinder annotations needed by the EABI * unwinder. */ ENTRY(call_with_stack) #if defined(CONFIG_UNWINDER_FRAME_POINTER) && defined(CONFIG_CC_IS_GCC) mov ip, sp push {fp, ip, lr, pc} sub fp, ip, #4 #else UNWIND( .fnstart ) UNWIND( .save {fpreg, lr} ) push {fpreg, lr} UNWIND( .setfp fpreg, sp ) mov fpreg, sp #endif mov sp, r2 mov r2, r0 mov r0, r1 bl_r r2 #if defined(CONFIG_UNWINDER_FRAME_POINTER) && defined(CONFIG_CC_IS_GCC) ldmdb fp, {fp, sp, pc} #else mov sp, fpreg pop {fpreg, pc} UNWIND( .fnend ) #endif .globl call_with_stack_end call_with_stack_end: ENDPROC(call_with_stack)
aixcc-public/challenge-001-exemplar-source
2,332
arch/arm/mach-exynos/sleep.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright (c) 2013 Samsung Electronics Co., Ltd. * http://www.samsung.com * * Exynos low-level resume code */ #include <linux/linkage.h> #include <asm/asm-offsets.h> #include <asm/hardware/cache-l2x0.h> #include "smc.h" #define CPU_MASK 0xff0ffff0 #define CPU_CORTEX_A9 0x410fc090 .text .align /* * sleep magic, to allow the bootloader to check for an valid * image to resume to. Must be the first word before the * exynos_cpu_resume entry. */ .word 0x2bedf00d /* * exynos_cpu_resume * * resume code entry for bootloader to call */ ENTRY(exynos_cpu_resume) #ifdef CONFIG_CACHE_L2X0 mrc p15, 0, r0, c0, c0, 0 ldr r1, =CPU_MASK and r0, r0, r1 ldr r1, =CPU_CORTEX_A9 cmp r0, r1 bleq l2c310_early_resume #endif b cpu_resume ENDPROC(exynos_cpu_resume) .align .arch armv7-a .arch_extension sec ENTRY(exynos_cpu_resume_ns) mrc p15, 0, r0, c0, c0, 0 ldr r1, =CPU_MASK and r0, r0, r1 ldr r1, =CPU_CORTEX_A9 cmp r0, r1 bne skip_cp15 adr r0, _cp15_save_power ldr r1, [r0] ldr r1, [r0, r1] adr r0, _cp15_save_diag ldr r2, [r0] ldr r2, [r0, r2] mov r0, #SMC_CMD_C15RESUME dsb smc #0 #ifdef CONFIG_CACHE_L2X0 adr r0, 1f ldr r2, [r0] add r0, r2, r0 /* Check that the address has been initialised. */ ldr r1, [r0, #L2X0_R_PHY_BASE] teq r1, #0 beq skip_l2x0 /* Check if controller has been enabled. */ ldr r2, [r1, #L2X0_CTRL] tst r2, #0x1 bne skip_l2x0 ldr r1, [r0, #L2X0_R_TAG_LATENCY] ldr r2, [r0, #L2X0_R_DATA_LATENCY] ldr r3, [r0, #L2X0_R_PREFETCH_CTRL] mov r0, #SMC_CMD_L2X0SETUP1 smc #0 /* Reload saved regs pointer because smc corrupts registers. */ adr r0, 1f ldr r2, [r0] add r0, r2, r0 ldr r1, [r0, #L2X0_R_PWR_CTRL] ldr r2, [r0, #L2X0_R_AUX_CTRL] mov r0, #SMC_CMD_L2X0SETUP2 smc #0 mov r0, #SMC_CMD_L2X0INVALL smc #0 mov r1, #1 mov r0, #SMC_CMD_L2X0CTRL smc #0 skip_l2x0: #endif /* CONFIG_CACHE_L2X0 */ skip_cp15: b cpu_resume ENDPROC(exynos_cpu_resume_ns) .align _cp15_save_power: .long cp15_save_power - . _cp15_save_diag: .long cp15_save_diag - . #ifdef CONFIG_CACHE_L2X0 1: .long l2x0_saved_regs - . #endif /* CONFIG_CACHE_L2X0 */ .data .align 2 .globl cp15_save_diag cp15_save_diag: .long 0 @ cp15 diagnostic .globl cp15_save_power cp15_save_power: .long 0 @ cp15 power control
aixcc-public/challenge-001-exemplar-source
2,898
arch/arm/mach-sa1100/sleep.S
/* * SA11x0 Assembler Sleep/WakeUp Management Routines * * Copyright (c) 2001 Cliff Brake <cbrake@accelent.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License. * * History: * * 2001-02-06: Cliff Brake Initial code * * 2001-08-29: Nicolas Pitre Simplified. * * 2002-05-27: Nicolas Pitre Revisited, more cleanup and simplification. * Storage is on the stack now. */ #include <linux/linkage.h> #include <asm/assembler.h> #include <mach/hardware.h> .text /* * sa1100_finish_suspend() * * Causes sa11x0 to enter sleep state * * Must be aligned to a cacheline. */ .balign 32 ENTRY(sa1100_finish_suspend) @ disable clock switching mcr p15, 0, r1, c15, c2, 2 ldr r6, =MDREFR ldr r4, [r6] orr r4, r4, #MDREFR_K1DB2 ldr r5, =PPCR @ Pre-load __loop_udelay into the I-cache mov r0, #1 bl __loop_udelay mov r0, r0 @ The following must all exist in a single cache line to @ avoid accessing memory until this sequence is complete, @ otherwise we occasionally hang. @ Adjust memory timing before lowering CPU clock str r4, [r6] @ delay 90us and set CPU PLL to lowest speed @ fixes resume problem on high speed SA1110 mov r0, #90 bl __loop_udelay mov r1, #0 str r1, [r5] mov r0, #90 bl __loop_udelay /* * SA1110 SDRAM controller workaround. register values: * * r0 = &MSC0 * r1 = &MSC1 * r2 = &MSC2 * r3 = MSC0 value * r4 = MSC1 value * r5 = MSC2 value * r6 = &MDREFR * r7 = first MDREFR value * r8 = second MDREFR value * r9 = &MDCNFG * r10 = MDCNFG value * r11 = third MDREFR value * r12 = &PMCR * r13 = PMCR value (1) */ ldr r0, =MSC0 ldr r1, =MSC1 ldr r2, =MSC2 ldr r3, [r0] bic r3, r3, #FMsk(MSC_RT) bic r3, r3, #FMsk(MSC_RT)<<16 ldr r4, [r1] bic r4, r4, #FMsk(MSC_RT) bic r4, r4, #FMsk(MSC_RT)<<16 ldr r5, [r2] bic r5, r5, #FMsk(MSC_RT) bic r5, r5, #FMsk(MSC_RT)<<16 ldr r7, [r6] bic r7, r7, #0x0000FF00 bic r7, r7, #0x000000F0 orr r8, r7, #MDREFR_SLFRSH ldr r9, =MDCNFG ldr r10, [r9] bic r10, r10, #(MDCNFG_DE0+MDCNFG_DE1) bic r10, r10, #(MDCNFG_DE2+MDCNFG_DE3) bic r11, r8, #MDREFR_SLFRSH bic r11, r11, #MDREFR_E1PIN ldr r12, =PMCR mov r13, #PMCR_SF b sa1110_sdram_controller_fix .align 5 sa1110_sdram_controller_fix: @ Step 1 clear RT field of all MSCx registers str r3, [r0] str r4, [r1] str r5, [r2] @ Step 2 clear DRI field in MDREFR str r7, [r6] @ Step 3 set SLFRSH bit in MDREFR str r8, [r6] @ Step 4 clear DE bis in MDCNFG str r10, [r9] @ Step 5 clear DRAM refresh control register str r11, [r6] @ Wow, now the hardware suspend request pins can be used, that makes them functional for @ about 7 ns out of the entire time that the CPU is running! @ Step 6 set force sleep bit in PMCR str r13, [r12] 20: b 20b @ loop waiting for sleep
aixcc-public/challenge-001-exemplar-source
3,266
arch/arm/mach-imx/suspend-imx53.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. */ /* */ #include <linux/linkage.h> #define M4IF_MCR0_OFFSET (0x008C) #define M4IF_MCR0_FDVFS (0x1 << 11) #define M4IF_MCR0_FDVACK (0x1 << 27) .align 3 /* * ==================== low level suspend ==================== * * On entry * r0: pm_info structure address; * * suspend ocram space layout: * ======================== high address ====================== * . * . * . * ^ * ^ * ^ * imx53_suspend code * PM_INFO structure(imx5_cpu_suspend_info) * ======================== low address ======================= */ /* Offsets of members of struct imx5_cpu_suspend_info */ #define SUSPEND_INFO_MX53_M4IF_V_OFFSET 0x0 #define SUSPEND_INFO_MX53_IOMUXC_V_OFFSET 0x4 #define SUSPEND_INFO_MX53_IO_COUNT_OFFSET 0x8 #define SUSPEND_INFO_MX53_IO_STATE_OFFSET 0xc ENTRY(imx53_suspend) stmfd sp!, {r4,r5,r6,r7} /* Save pad config */ ldr r1, [r0, #SUSPEND_INFO_MX53_IO_COUNT_OFFSET] cmp r1, #0 beq skip_pad_conf_1 add r2, r0, #SUSPEND_INFO_MX53_IO_STATE_OFFSET ldr r3, [r0, #SUSPEND_INFO_MX53_IOMUXC_V_OFFSET] 1: ldr r5, [r2], #12 /* IOMUXC register offset */ ldr r6, [r3, r5] /* current value */ str r6, [r2], #4 /* save area */ subs r1, r1, #1 bne 1b skip_pad_conf_1: /* Set FDVFS bit of M4IF_MCR0 to request DDR to enter self-refresh */ ldr r1, [r0, #SUSPEND_INFO_MX53_M4IF_V_OFFSET] ldr r2,[r1, #M4IF_MCR0_OFFSET] orr r2, r2, #M4IF_MCR0_FDVFS str r2,[r1, #M4IF_MCR0_OFFSET] /* Poll FDVACK bit of M4IF_MCR to wait for DDR to enter self-refresh */ wait_sr_ack: ldr r2,[r1, #M4IF_MCR0_OFFSET] ands r2, r2, #M4IF_MCR0_FDVACK beq wait_sr_ack /* Set pad config */ ldr r1, [r0, #SUSPEND_INFO_MX53_IO_COUNT_OFFSET] cmp r1, #0 beq skip_pad_conf_2 add r2, r0, #SUSPEND_INFO_MX53_IO_STATE_OFFSET ldr r3, [r0, #SUSPEND_INFO_MX53_IOMUXC_V_OFFSET] 2: ldr r5, [r2], #4 /* IOMUXC register offset */ ldr r6, [r2], #4 /* clear */ ldr r7, [r3, r5] bic r7, r7, r6 ldr r6, [r2], #8 /* set */ orr r7, r7, r6 str r7, [r3, r5] subs r1, r1, #1 bne 2b skip_pad_conf_2: /* Zzz, enter stop mode */ wfi nop nop nop nop /* Restore pad config */ ldr r1, [r0, #SUSPEND_INFO_MX53_IO_COUNT_OFFSET] cmp r1, #0 beq skip_pad_conf_3 add r2, r0, #SUSPEND_INFO_MX53_IO_STATE_OFFSET ldr r3, [r0, #SUSPEND_INFO_MX53_IOMUXC_V_OFFSET] 3: ldr r5, [r2], #12 /* IOMUXC register offset */ ldr r6, [r2], #4 /* saved value */ str r6, [r3, r5] subs r1, r1, #1 bne 3b skip_pad_conf_3: /* Clear FDVFS bit of M4IF_MCR0 to request DDR to exit self-refresh */ ldr r1, [r0, #SUSPEND_INFO_MX53_M4IF_V_OFFSET] ldr r2,[r1, #M4IF_MCR0_OFFSET] bic r2, r2, #M4IF_MCR0_FDVFS str r2,[r1, #M4IF_MCR0_OFFSET] /* Poll FDVACK bit of M4IF_MCR to wait for DDR to exit self-refresh */ wait_ar_ack: ldr r2,[r1, #M4IF_MCR0_OFFSET] ands r2, r2, #M4IF_MCR0_FDVACK bne wait_ar_ack /* Restore registers */ ldmfd sp!, {r4,r5,r6,r7} mov pc, lr ENDPROC(imx53_suspend) ENTRY(imx53_suspend_sz) .word . - imx53_suspend
aixcc-public/challenge-001-exemplar-source
2,864
arch/arm/mach-imx/ssi-fiq.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2009 Sascha Hauer <s.hauer@pengutronix.de> */ #include <linux/linkage.h> #include <asm/assembler.h> /* * r8 = bit 0-15: tx offset, bit 16-31: tx buffer size * r9 = bit 0-15: rx offset, bit 16-31: rx buffer size */ #define SSI_STX0 0x00 #define SSI_SRX0 0x08 #define SSI_SISR 0x14 #define SSI_SIER 0x18 #define SSI_SACNT 0x38 #define SSI_SACNT_AC97EN (1 << 0) #define SSI_SIER_TFE0_EN (1 << 0) #define SSI_SISR_TFE0 (1 << 0) #define SSI_SISR_RFF0 (1 << 2) #define SSI_SIER_RFF0_EN (1 << 2) .text .global imx_ssi_fiq_start .global imx_ssi_fiq_end .global imx_ssi_fiq_base .global imx_ssi_fiq_rx_buffer .global imx_ssi_fiq_tx_buffer /* * imx_ssi_fiq_start is _intentionally_ not marked as a function symbol * using ENDPROC(). imx_ssi_fiq_start and imx_ssi_fiq_end are used to * mark the function body so that it can be copied to the FIQ vector in * the vectors page. imx_ssi_fiq_start should only be called as the result * of an FIQ: calling it directly will not work. */ imx_ssi_fiq_start: ldr r12, .L_imx_ssi_fiq_base /* TX */ ldr r13, .L_imx_ssi_fiq_tx_buffer /* shall we send? */ ldr r11, [r12, #SSI_SIER] tst r11, #SSI_SIER_TFE0_EN beq 1f /* TX FIFO empty? */ ldr r11, [r12, #SSI_SISR] tst r11, #SSI_SISR_TFE0 beq 1f mov r10, #0x10000 sub r10, #1 and r10, r10, r8 /* r10: current buffer offset */ add r13, r13, r10 ldrh r11, [r13] strh r11, [r12, #SSI_STX0] ldrh r11, [r13, #2] strh r11, [r12, #SSI_STX0] ldrh r11, [r13, #4] strh r11, [r12, #SSI_STX0] ldrh r11, [r13, #6] strh r11, [r12, #SSI_STX0] add r10, #8 lsr r11, r8, #16 /* r11: buffer size */ cmp r10, r11 lslgt r8, r11, #16 addle r8, #8 1: /* RX */ /* shall we receive? */ ldr r11, [r12, #SSI_SIER] tst r11, #SSI_SIER_RFF0_EN beq 1f /* RX FIFO full? */ ldr r11, [r12, #SSI_SISR] tst r11, #SSI_SISR_RFF0 beq 1f ldr r13, .L_imx_ssi_fiq_rx_buffer mov r10, #0x10000 sub r10, #1 and r10, r10, r9 /* r10: current buffer offset */ add r13, r13, r10 ldr r11, [r12, #SSI_SACNT] tst r11, #SSI_SACNT_AC97EN ldr r11, [r12, #SSI_SRX0] strh r11, [r13] ldr r11, [r12, #SSI_SRX0] strh r11, [r13, #2] /* dummy read to skip slot 12 */ ldrne r11, [r12, #SSI_SRX0] ldr r11, [r12, #SSI_SRX0] strh r11, [r13, #4] ldr r11, [r12, #SSI_SRX0] strh r11, [r13, #6] /* dummy read to skip slot 12 */ ldrne r11, [r12, #SSI_SRX0] add r10, #8 lsr r11, r9, #16 /* r11: buffer size */ cmp r10, r11 lslgt r9, r11, #16 addle r9, #8 1: @ return from FIQ subs pc, lr, #4 .align .L_imx_ssi_fiq_base: imx_ssi_fiq_base: .word 0x0 .L_imx_ssi_fiq_rx_buffer: imx_ssi_fiq_rx_buffer: .word 0x0 .L_imx_ssi_fiq_tx_buffer: imx_ssi_fiq_tx_buffer: .word 0x0 .L_imx_ssi_fiq_end: imx_ssi_fiq_end:
aixcc-public/challenge-001-exemplar-source
7,911
arch/arm/mach-imx/suspend-imx6.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright 2014 Freescale Semiconductor, Inc. */ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/asm-offsets.h> #include <asm/hardware/cache-l2x0.h> #include "hardware.h" /* * ==================== low level suspend ==================== * * Better to follow below rules to use ARM registers: * r0: pm_info structure address; * r1 ~ r4: for saving pm_info members; * r5 ~ r10: free registers; * r11: io base address. * * suspend ocram space layout: * ======================== high address ====================== * . * . * . * ^ * ^ * ^ * imx6_suspend code * PM_INFO structure(imx6_cpu_pm_info) * ======================== low address ======================= */ /* * Below offsets are based on struct imx6_cpu_pm_info * which defined in arch/arm/mach-imx/pm-imx6q.c, this * structure contains necessary pm info for low level * suspend related code. */ #define PM_INFO_PBASE_OFFSET 0x0 #define PM_INFO_RESUME_ADDR_OFFSET 0x4 #define PM_INFO_DDR_TYPE_OFFSET 0x8 #define PM_INFO_PM_INFO_SIZE_OFFSET 0xC #define PM_INFO_MX6Q_MMDC_P_OFFSET 0x10 #define PM_INFO_MX6Q_MMDC_V_OFFSET 0x14 #define PM_INFO_MX6Q_SRC_P_OFFSET 0x18 #define PM_INFO_MX6Q_SRC_V_OFFSET 0x1C #define PM_INFO_MX6Q_IOMUXC_P_OFFSET 0x20 #define PM_INFO_MX6Q_IOMUXC_V_OFFSET 0x24 #define PM_INFO_MX6Q_CCM_P_OFFSET 0x28 #define PM_INFO_MX6Q_CCM_V_OFFSET 0x2C #define PM_INFO_MX6Q_GPC_P_OFFSET 0x30 #define PM_INFO_MX6Q_GPC_V_OFFSET 0x34 #define PM_INFO_MX6Q_L2_P_OFFSET 0x38 #define PM_INFO_MX6Q_L2_V_OFFSET 0x3C #define PM_INFO_MMDC_IO_NUM_OFFSET 0x40 #define PM_INFO_MMDC_IO_VAL_OFFSET 0x44 #define MX6Q_SRC_GPR1 0x20 #define MX6Q_SRC_GPR2 0x24 #define MX6Q_MMDC_MAPSR 0x404 #define MX6Q_MMDC_MPDGCTRL0 0x83c #define MX6Q_GPC_IMR1 0x08 #define MX6Q_GPC_IMR2 0x0c #define MX6Q_GPC_IMR3 0x10 #define MX6Q_GPC_IMR4 0x14 #define MX6Q_CCM_CCR 0x0 .align 3 .arm .macro sync_l2_cache /* sync L2 cache to drain L2's buffers to DRAM. */ #ifdef CONFIG_CACHE_L2X0 ldr r11, [r0, #PM_INFO_MX6Q_L2_V_OFFSET] teq r11, #0 beq 6f mov r6, #0x0 str r6, [r11, #L2X0_CACHE_SYNC] 1: ldr r6, [r11, #L2X0_CACHE_SYNC] ands r6, r6, #0x1 bne 1b 6: #endif .endm .macro resume_mmdc /* restore MMDC IO */ cmp r5, #0x0 ldreq r11, [r0, #PM_INFO_MX6Q_IOMUXC_V_OFFSET] ldrne r11, [r0, #PM_INFO_MX6Q_IOMUXC_P_OFFSET] ldr r6, [r0, #PM_INFO_MMDC_IO_NUM_OFFSET] ldr r7, =PM_INFO_MMDC_IO_VAL_OFFSET add r7, r7, r0 1: ldr r8, [r7], #0x4 ldr r9, [r7], #0x4 str r9, [r11, r8] subs r6, r6, #0x1 bne 1b cmp r5, #0x0 ldreq r11, [r0, #PM_INFO_MX6Q_MMDC_V_OFFSET] ldrne r11, [r0, #PM_INFO_MX6Q_MMDC_P_OFFSET] cmp r3, #IMX_DDR_TYPE_LPDDR2 bne 4f /* reset read FIFO, RST_RD_FIFO */ ldr r7, =MX6Q_MMDC_MPDGCTRL0 ldr r6, [r11, r7] orr r6, r6, #(1 << 31) str r6, [r11, r7] 2: ldr r6, [r11, r7] ands r6, r6, #(1 << 31) bne 2b /* reset FIFO a second time */ ldr r6, [r11, r7] orr r6, r6, #(1 << 31) str r6, [r11, r7] 3: ldr r6, [r11, r7] ands r6, r6, #(1 << 31) bne 3b 4: /* let DDR out of self-refresh */ ldr r7, [r11, #MX6Q_MMDC_MAPSR] bic r7, r7, #(1 << 21) str r7, [r11, #MX6Q_MMDC_MAPSR] 5: ldr r7, [r11, #MX6Q_MMDC_MAPSR] ands r7, r7, #(1 << 25) bne 5b /* enable DDR auto power saving */ ldr r7, [r11, #MX6Q_MMDC_MAPSR] bic r7, r7, #0x1 str r7, [r11, #MX6Q_MMDC_MAPSR] .endm ENTRY(imx6_suspend) ldr r1, [r0, #PM_INFO_PBASE_OFFSET] ldr r2, [r0, #PM_INFO_RESUME_ADDR_OFFSET] ldr r3, [r0, #PM_INFO_DDR_TYPE_OFFSET] ldr r4, [r0, #PM_INFO_PM_INFO_SIZE_OFFSET] /* * counting the resume address in iram * to set it in SRC register. */ ldr r6, =imx6_suspend ldr r7, =resume sub r7, r7, r6 add r8, r1, r4 add r9, r8, r7 /* * make sure TLB contain the addr we want, * as we will access them after MMDC IO floated. */ ldr r11, [r0, #PM_INFO_MX6Q_CCM_V_OFFSET] ldr r6, [r11, #0x0] ldr r11, [r0, #PM_INFO_MX6Q_GPC_V_OFFSET] ldr r6, [r11, #0x0] ldr r11, [r0, #PM_INFO_MX6Q_IOMUXC_V_OFFSET] ldr r6, [r11, #0x0] /* use r11 to store the IO address */ ldr r11, [r0, #PM_INFO_MX6Q_SRC_V_OFFSET] /* store physical resume addr and pm_info address. */ str r9, [r11, #MX6Q_SRC_GPR1] str r1, [r11, #MX6Q_SRC_GPR2] /* need to sync L2 cache before DSM. */ sync_l2_cache ldr r11, [r0, #PM_INFO_MX6Q_MMDC_V_OFFSET] /* * put DDR explicitly into self-refresh and * disable automatic power savings. */ ldr r7, [r11, #MX6Q_MMDC_MAPSR] orr r7, r7, #0x1 str r7, [r11, #MX6Q_MMDC_MAPSR] /* make the DDR explicitly enter self-refresh. */ ldr r7, [r11, #MX6Q_MMDC_MAPSR] orr r7, r7, #(1 << 21) str r7, [r11, #MX6Q_MMDC_MAPSR] poll_dvfs_set: ldr r7, [r11, #MX6Q_MMDC_MAPSR] ands r7, r7, #(1 << 25) beq poll_dvfs_set ldr r11, [r0, #PM_INFO_MX6Q_IOMUXC_V_OFFSET] ldr r6, =0x0 ldr r7, [r0, #PM_INFO_MMDC_IO_NUM_OFFSET] ldr r8, =PM_INFO_MMDC_IO_VAL_OFFSET add r8, r8, r0 /* LPDDR2's last 3 IOs need special setting */ cmp r3, #IMX_DDR_TYPE_LPDDR2 subeq r7, r7, #0x3 set_mmdc_io_lpm: ldr r9, [r8], #0x8 str r6, [r11, r9] subs r7, r7, #0x1 bne set_mmdc_io_lpm cmp r3, #IMX_DDR_TYPE_LPDDR2 bne set_mmdc_io_lpm_done ldr r6, =0x1000 ldr r9, [r8], #0x8 str r6, [r11, r9] ldr r9, [r8], #0x8 str r6, [r11, r9] ldr r6, =0x80000 ldr r9, [r8] str r6, [r11, r9] set_mmdc_io_lpm_done: /* * mask all GPC interrupts before * enabling the RBC counters to * avoid the counter starting too * early if an interupt is already * pending. */ ldr r11, [r0, #PM_INFO_MX6Q_GPC_V_OFFSET] ldr r6, [r11, #MX6Q_GPC_IMR1] ldr r7, [r11, #MX6Q_GPC_IMR2] ldr r8, [r11, #MX6Q_GPC_IMR3] ldr r9, [r11, #MX6Q_GPC_IMR4] ldr r10, =0xffffffff str r10, [r11, #MX6Q_GPC_IMR1] str r10, [r11, #MX6Q_GPC_IMR2] str r10, [r11, #MX6Q_GPC_IMR3] str r10, [r11, #MX6Q_GPC_IMR4] /* * enable the RBC bypass counter here * to hold off the interrupts. RBC counter * = 32 (1ms), Minimum RBC delay should be * 400us for the analog LDOs to power down. */ ldr r11, [r0, #PM_INFO_MX6Q_CCM_V_OFFSET] ldr r10, [r11, #MX6Q_CCM_CCR] bic r10, r10, #(0x3f << 21) orr r10, r10, #(0x20 << 21) str r10, [r11, #MX6Q_CCM_CCR] /* enable the counter. */ ldr r10, [r11, #MX6Q_CCM_CCR] orr r10, r10, #(0x1 << 27) str r10, [r11, #MX6Q_CCM_CCR] /* unmask all the GPC interrupts. */ ldr r11, [r0, #PM_INFO_MX6Q_GPC_V_OFFSET] str r6, [r11, #MX6Q_GPC_IMR1] str r7, [r11, #MX6Q_GPC_IMR2] str r8, [r11, #MX6Q_GPC_IMR3] str r9, [r11, #MX6Q_GPC_IMR4] /* * now delay for a short while (3usec) * ARM is at 1GHz at this point * so a short loop should be enough. * this delay is required to ensure that * the RBC counter can start counting in * case an interrupt is already pending * or in case an interrupt arrives just * as ARM is about to assert DSM_request. */ ldr r6, =2000 rbc_loop: subs r6, r6, #0x1 bne rbc_loop /* Zzz, enter stop mode */ wfi nop nop nop nop /* * run to here means there is pending * wakeup source, system should auto * resume, we need to restore MMDC IO first */ mov r5, #0x0 resume_mmdc /* return to suspend finish */ ret lr resume: /* invalidate L1 I-cache first */ mov r6, #0x0 mcr p15, 0, r6, c7, c5, 0 mcr p15, 0, r6, c7, c5, 6 /* enable the Icache and branch prediction */ mov r6, #0x1800 mcr p15, 0, r6, c1, c0, 0 isb /* get physical resume address from pm_info. */ ldr lr, [r0, #PM_INFO_RESUME_ADDR_OFFSET] /* clear core0's entry and parameter */ ldr r11, [r0, #PM_INFO_MX6Q_SRC_P_OFFSET] mov r7, #0x0 str r7, [r11, #MX6Q_SRC_GPR1] str r7, [r11, #MX6Q_SRC_GPR2] ldr r3, [r0, #PM_INFO_DDR_TYPE_OFFSET] mov r5, #0x1 resume_mmdc ret lr ENDPROC(imx6_suspend)
aixcc-public/challenge-001-exemplar-source
2,261
arch/arm/common/vlock.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * vlock.S - simple voting lock implementation for ARM * * Created by: Dave Martin, 2012-08-16 * Copyright: (C) 2012-2013 Linaro Limited * * This algorithm is described in more detail in * Documentation/arm/vlocks.rst. */ #include <linux/linkage.h> #include "vlock.h" /* Select different code if voting flags can fit in a single word. */ #if VLOCK_VOTING_SIZE > 4 #define FEW(x...) #define MANY(x...) x #else #define FEW(x...) x #define MANY(x...) #endif @ voting lock for first-man coordination .macro voting_begin rbase:req, rcpu:req, rscratch:req mov \rscratch, #1 strb \rscratch, [\rbase, \rcpu] dmb .endm .macro voting_end rbase:req, rcpu:req, rscratch:req dmb mov \rscratch, #0 strb \rscratch, [\rbase, \rcpu] dsb st sev .endm /* * The vlock structure must reside in Strongly-Ordered or Device memory. * This implementation deliberately eliminates most of the barriers which * would be required for other memory types, and assumes that independent * writes to neighbouring locations within a cacheline do not interfere * with one another. */ @ r0: lock structure base @ r1: CPU ID (0-based index within cluster) ENTRY(vlock_trylock) add r1, r1, #VLOCK_VOTING_OFFSET voting_begin r0, r1, r2 ldrb r2, [r0, #VLOCK_OWNER_OFFSET] @ check whether lock is held cmp r2, #VLOCK_OWNER_NONE bne trylock_fail @ fail if so @ Control dependency implies strb not observable before previous ldrb. strb r1, [r0, #VLOCK_OWNER_OFFSET] @ submit my vote voting_end r0, r1, r2 @ implies DMB @ Wait for the current round of voting to finish: MANY( mov r3, #VLOCK_VOTING_OFFSET ) 0: MANY( ldr r2, [r0, r3] ) FEW( ldr r2, [r0, #VLOCK_VOTING_OFFSET] ) cmp r2, #0 wfene bne 0b MANY( add r3, r3, #4 ) MANY( cmp r3, #VLOCK_VOTING_OFFSET + VLOCK_VOTING_SIZE ) MANY( bne 0b ) @ Check who won: dmb ldrb r2, [r0, #VLOCK_OWNER_OFFSET] eor r0, r1, r2 @ zero if I won, else nonzero bx lr trylock_fail: voting_end r0, r1, r2 mov r0, #1 @ nonzero indicates that I lost bx lr ENDPROC(vlock_trylock) @ r0: lock structure base ENTRY(vlock_unlock) dmb mov r1, #VLOCK_OWNER_NONE strb r1, [r0, #VLOCK_OWNER_OFFSET] dsb st sev bx lr ENDPROC(vlock_unlock)
aixcc-public/challenge-001-exemplar-source
5,240
arch/arm/common/mcpm_head.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * arch/arm/common/mcpm_head.S -- kernel entry point for multi-cluster PM * * Created by: Nicolas Pitre, March 2012 * Copyright: (C) 2012-2013 Linaro Limited * * Refer to Documentation/arm/cluster-pm-race-avoidance.rst * for details of the synchronisation algorithms used here. */ #include <linux/linkage.h> #include <asm/mcpm.h> #include <asm/assembler.h> #include "vlock.h" .if MCPM_SYNC_CLUSTER_CPUS .error "cpus must be the first member of struct mcpm_sync_struct" .endif .macro pr_dbg string #if defined(CONFIG_DEBUG_LL) && defined(DEBUG) b 1901f 1902: .asciz "CPU" 1903: .asciz " cluster" 1904: .asciz ": \string" .align 1901: adr r0, 1902b bl printascii mov r0, r9 bl printhex2 adr r0, 1903b bl printascii mov r0, r10 bl printhex2 adr r0, 1904b bl printascii #endif .endm .arm .align ENTRY(mcpm_entry_point) ARM_BE8(setend be) THUMB( badr r12, 1f ) THUMB( bx r12 ) THUMB( .thumb ) 1: mrc p15, 0, r0, c0, c0, 5 @ MPIDR ubfx r9, r0, #0, #8 @ r9 = cpu ubfx r10, r0, #8, #8 @ r10 = cluster mov r3, #MAX_CPUS_PER_CLUSTER mla r4, r3, r10, r9 @ r4 = canonical CPU index cmp r4, #(MAX_CPUS_PER_CLUSTER * MAX_NR_CLUSTERS) blo 2f /* We didn't expect this CPU. Try to cheaply make it quiet. */ 1: wfi wfe b 1b 2: pr_dbg "kernel mcpm_entry_point\n" /* * MMU is off so we need to get to various variables in a * position independent way. */ adr r5, 3f ldmia r5, {r0, r6, r7, r8, r11} add r0, r5, r0 @ r0 = mcpm_entry_early_pokes add r6, r5, r6 @ r6 = mcpm_entry_vectors ldr r7, [r5, r7] @ r7 = mcpm_power_up_setup_phys add r8, r5, r8 @ r8 = mcpm_sync add r11, r5, r11 @ r11 = first_man_locks @ Perform an early poke, if any add r0, r0, r4, lsl #3 ldmia r0, {r0, r1} teq r0, #0 strne r1, [r0] mov r0, #MCPM_SYNC_CLUSTER_SIZE mla r8, r0, r10, r8 @ r8 = sync cluster base @ Signal that this CPU is coming UP: mov r0, #CPU_COMING_UP mov r5, #MCPM_SYNC_CPU_SIZE mla r5, r9, r5, r8 @ r5 = sync cpu address strb r0, [r5] @ At this point, the cluster cannot unexpectedly enter the GOING_DOWN @ state, because there is at least one active CPU (this CPU). mov r0, #VLOCK_SIZE mla r11, r0, r10, r11 @ r11 = cluster first man lock mov r0, r11 mov r1, r9 @ cpu bl vlock_trylock @ implies DMB cmp r0, #0 @ failed to get the lock? bne mcpm_setup_wait @ wait for cluster setup if so ldrb r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER] cmp r0, #CLUSTER_UP @ cluster already up? bne mcpm_setup @ if not, set up the cluster @ Otherwise, release the first man lock and skip setup: mov r0, r11 bl vlock_unlock b mcpm_setup_complete mcpm_setup: @ Control dependency implies strb not observable before previous ldrb. @ Signal that the cluster is being brought up: mov r0, #INBOUND_COMING_UP strb r0, [r8, #MCPM_SYNC_CLUSTER_INBOUND] dmb @ Any CPU trying to take the cluster into CLUSTER_GOING_DOWN from this @ point onwards will observe INBOUND_COMING_UP and abort. @ Wait for any previously-pending cluster teardown operations to abort @ or complete: mcpm_teardown_wait: ldrb r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER] cmp r0, #CLUSTER_GOING_DOWN bne first_man_setup wfe b mcpm_teardown_wait first_man_setup: dmb @ If the outbound gave up before teardown started, skip cluster setup: cmp r0, #CLUSTER_UP beq mcpm_setup_leave @ power_up_setup is now responsible for setting up the cluster: cmp r7, #0 mov r0, #1 @ second (cluster) affinity level blxne r7 @ Call power_up_setup if defined dmb mov r0, #CLUSTER_UP strb r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER] dmb mcpm_setup_leave: @ Leave the cluster setup critical section: mov r0, #INBOUND_NOT_COMING_UP strb r0, [r8, #MCPM_SYNC_CLUSTER_INBOUND] dsb st sev mov r0, r11 bl vlock_unlock @ implies DMB b mcpm_setup_complete @ In the contended case, non-first men wait here for cluster setup @ to complete: mcpm_setup_wait: ldrb r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER] cmp r0, #CLUSTER_UP wfene bne mcpm_setup_wait dmb mcpm_setup_complete: @ If a platform-specific CPU setup hook is needed, it is @ called from here. cmp r7, #0 mov r0, #0 @ first (CPU) affinity level blxne r7 @ Call power_up_setup if defined dmb @ Mark the CPU as up: mov r0, #CPU_UP strb r0, [r5] @ Observability order of CPU_UP and opening of the gate does not matter. mcpm_entry_gated: ldr r5, [r6, r4, lsl #2] @ r5 = CPU entry vector cmp r5, #0 wfeeq beq mcpm_entry_gated dmb pr_dbg "released\n" bx r5 .align 2 3: .word mcpm_entry_early_pokes - . .word mcpm_entry_vectors - 3b .word mcpm_power_up_setup_phys - 3b .word mcpm_sync - 3b .word first_man_locks - 3b ENDPROC(mcpm_entry_point) .bss .align CACHE_WRITEBACK_ORDER .type first_man_locks, #object first_man_locks: .space VLOCK_SIZE * MAX_NR_CLUSTERS .align CACHE_WRITEBACK_ORDER .type mcpm_entry_vectors, #object ENTRY(mcpm_entry_vectors) .space 4 * MAX_NR_CLUSTERS * MAX_CPUS_PER_CLUSTER .type mcpm_entry_early_pokes, #object ENTRY(mcpm_entry_early_pokes) .space 8 * MAX_NR_CLUSTERS * MAX_CPUS_PER_CLUSTER .type mcpm_power_up_setup_phys, #object ENTRY(mcpm_power_up_setup_phys) .space 4 @ set by mcpm_sync_init()
aixcc-public/challenge-001-exemplar-source
24,166
arch/arm/mach-at91/pm_suspend.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * arch/arm/mach-at91/pm_slow_clock.S * * Copyright (C) 2006 Savin Zlobec * * AT91SAM9 support: * Copyright (C) 2007 Anti Sullin <anti.sullin@artecdesign.ee> */ #include <linux/linkage.h> #include <linux/clk/at91_pmc.h> #include "pm.h" #include "pm_data-offsets.h" #define SRAMC_SELF_FRESH_ACTIVE 0x01 #define SRAMC_SELF_FRESH_EXIT 0x00 pmc .req r0 tmp1 .req r4 tmp2 .req r5 tmp3 .req r6 /* * Wait until master clock is ready (after switching master clock source) * * @r_mckid: register holding master clock identifier * * Side effects: overwrites r7, r8 */ .macro wait_mckrdy r_mckid #ifdef CONFIG_SOC_SAMA7 cmp \r_mckid, #0 beq 1f mov r7, #AT91_PMC_MCKXRDY b 2f #endif 1: mov r7, #AT91_PMC_MCKRDY 2: ldr r8, [pmc, #AT91_PMC_SR] and r8, r7 cmp r8, r7 bne 2b .endm /* * Wait until master oscillator has stabilized. * * Side effects: overwrites r7 */ .macro wait_moscrdy 1: ldr r7, [pmc, #AT91_PMC_SR] tst r7, #AT91_PMC_MOSCS beq 1b .endm /* * Wait for main oscillator selection is done * * Side effects: overwrites r7 */ .macro wait_moscsels 1: ldr r7, [pmc, #AT91_PMC_SR] tst r7, #AT91_PMC_MOSCSELS beq 1b .endm /* * Put the processor to enter the idle state * * Side effects: overwrites r7 */ .macro at91_cpu_idle #if defined(CONFIG_CPU_V7) mov r7, #AT91_PMC_PCK str r7, [pmc, #AT91_PMC_SCDR] dsb wfi @ Wait For Interrupt #else mcr p15, 0, tmp1, c7, c0, 4 #endif .endm /** * Set state for 2.5V low power regulator * @ena: 0 - disable regulator * 1 - enable regulator * * Side effects: overwrites r7, r8, r9, r10 */ .macro at91_2_5V_reg_set_low_power ena #ifdef CONFIG_SOC_SAMA7 ldr r7, .sfrbu mov r8, #\ena ldr r9, [r7, #AT91_SFRBU_25LDOCR] orr r9, r9, #AT91_SFRBU_25LDOCR_LP cmp r8, #1 beq lp_done_\ena bic r9, r9, #AT91_SFRBU_25LDOCR_LP lp_done_\ena: ldr r10, =AT91_SFRBU_25LDOCR_LDOANAKEY orr r9, r9, r10 str r9, [r7, #AT91_SFRBU_25LDOCR] #endif .endm .macro at91_backup_set_lpm reg #ifdef CONFIG_SOC_SAMA7 orr \reg, \reg, #0x200000 #endif .endm .text .arm #ifdef CONFIG_SOC_SAMA7 /** * Enable self-refresh * * Side effects: overwrites r2, r3, tmp1, tmp2, tmp3, r7 */ .macro at91_sramc_self_refresh_ena ldr r2, .sramc_base ldr r3, .sramc_phy_base ldr r7, .pm_mode dsb /* Disable all AXI ports. */ ldr tmp1, [r2, #UDDRC_PCTRL_0] bic tmp1, tmp1, #0x1 str tmp1, [r2, #UDDRC_PCTRL_0] ldr tmp1, [r2, #UDDRC_PCTRL_1] bic tmp1, tmp1, #0x1 str tmp1, [r2, #UDDRC_PCTRL_1] ldr tmp1, [r2, #UDDRC_PCTRL_2] bic tmp1, tmp1, #0x1 str tmp1, [r2, #UDDRC_PCTRL_2] ldr tmp1, [r2, #UDDRC_PCTRL_3] bic tmp1, tmp1, #0x1 str tmp1, [r2, #UDDRC_PCTRL_3] ldr tmp1, [r2, #UDDRC_PCTRL_4] bic tmp1, tmp1, #0x1 str tmp1, [r2, #UDDRC_PCTRL_4] sr_ena_1: /* Wait for all ports to disable. */ ldr tmp1, [r2, #UDDRC_PSTAT] ldr tmp2, =UDDRC_PSTAT_ALL_PORTS tst tmp1, tmp2 bne sr_ena_1 /* Switch to self-refresh. */ ldr tmp1, [r2, #UDDRC_PWRCTL] orr tmp1, tmp1, #UDDRC_PWRCTL_SELFREF_SW str tmp1, [r2, #UDDRC_PWRCTL] sr_ena_2: /* Wait for self-refresh enter. */ ldr tmp1, [r2, #UDDRC_STAT] bic tmp1, tmp1, #~UDDRC_STAT_SELFREF_TYPE_MSK cmp tmp1, #UDDRC_STAT_SELFREF_TYPE_SW bne sr_ena_2 /* Disable DX DLLs for non-backup modes. */ cmp r7, #AT91_PM_BACKUP beq sr_ena_3 /* Do not soft reset the AC DLL. */ ldr tmp1, [r3, DDR3PHY_ACDLLCR] bic tmp1, tmp1, DDR3PHY_ACDLLCR_DLLSRST str tmp1, [r3, DDR3PHY_ACDLLCR] /* Disable DX DLLs. */ ldr tmp1, [r3, #DDR3PHY_DX0DLLCR] orr tmp1, tmp1, #DDR3PHY_DXDLLCR_DLLDIS str tmp1, [r3, #DDR3PHY_DX0DLLCR] ldr tmp1, [r3, #DDR3PHY_DX1DLLCR] orr tmp1, tmp1, #DDR3PHY_DXDLLCR_DLLDIS str tmp1, [r3, #DDR3PHY_DX1DLLCR] sr_ena_3: /* Power down DDR PHY data receivers. */ ldr tmp1, [r3, #DDR3PHY_DXCCR] orr tmp1, tmp1, #DDR3PHY_DXCCR_DXPDR str tmp1, [r3, #DDR3PHY_DXCCR] /* Power down ADDR/CMD IO. */ ldr tmp1, [r3, #DDR3PHY_ACIOCR] orr tmp1, tmp1, #DDR3PHY_ACIORC_ACPDD orr tmp1, tmp1, #DDR3PHY_ACIOCR_CKPDD_CK0 orr tmp1, tmp1, #DDR3PHY_ACIOCR_CSPDD_CS0 str tmp1, [r3, #DDR3PHY_ACIOCR] /* Power down ODT. */ ldr tmp1, [r3, #DDR3PHY_DSGCR] orr tmp1, tmp1, #DDR3PHY_DSGCR_ODTPDD_ODT0 str tmp1, [r3, #DDR3PHY_DSGCR] .endm /** * Disable self-refresh * * Side effects: overwrites r2, r3, tmp1, tmp2, tmp3 */ .macro at91_sramc_self_refresh_dis ldr r2, .sramc_base ldr r3, .sramc_phy_base /* Power up DDR PHY data receivers. */ ldr tmp1, [r3, #DDR3PHY_DXCCR] bic tmp1, tmp1, #DDR3PHY_DXCCR_DXPDR str tmp1, [r3, #DDR3PHY_DXCCR] /* Power up the output of CK and CS pins. */ ldr tmp1, [r3, #DDR3PHY_ACIOCR] bic tmp1, tmp1, #DDR3PHY_ACIORC_ACPDD bic tmp1, tmp1, #DDR3PHY_ACIOCR_CKPDD_CK0 bic tmp1, tmp1, #DDR3PHY_ACIOCR_CSPDD_CS0 str tmp1, [r3, #DDR3PHY_ACIOCR] /* Power up ODT. */ ldr tmp1, [r3, #DDR3PHY_DSGCR] bic tmp1, tmp1, #DDR3PHY_DSGCR_ODTPDD_ODT0 str tmp1, [r3, #DDR3PHY_DSGCR] /* Enable DX DLLs. */ ldr tmp1, [r3, #DDR3PHY_DX0DLLCR] bic tmp1, tmp1, #DDR3PHY_DXDLLCR_DLLDIS str tmp1, [r3, #DDR3PHY_DX0DLLCR] ldr tmp1, [r3, #DDR3PHY_DX1DLLCR] bic tmp1, tmp1, #DDR3PHY_DXDLLCR_DLLDIS str tmp1, [r3, #DDR3PHY_DX1DLLCR] /* Enable quasi-dynamic programming. */ mov tmp1, #0 str tmp1, [r2, #UDDRC_SWCTRL] /* De-assert SDRAM initialization. */ ldr tmp1, [r2, #UDDRC_DFIMISC] bic tmp1, tmp1, #UDDRC_DFIMISC_DFI_INIT_COMPLETE_EN str tmp1, [r2, #UDDRC_DFIMISC] /* Quasi-dynamic programming done. */ mov tmp1, #UDDRC_SWCTRL_SW_DONE str tmp1, [r2, #UDDRC_SWCTRL] sr_dis_1: ldr tmp1, [r2, #UDDRC_SWSTAT] tst tmp1, #UDDRC_SWSTAT_SW_DONE_ACK beq sr_dis_1 /* DLL soft-reset + DLL lock wait + ITM reset */ mov tmp1, #(DDR3PHY_PIR_INIT | DDR3PHY_PIR_DLLSRST | \ DDR3PHY_PIR_DLLLOCK | DDR3PHY_PIR_ITMSRST) str tmp1, [r3, #DDR3PHY_PIR] sr_dis_4: /* Wait for it. */ ldr tmp1, [r3, #DDR3PHY_PGSR] tst tmp1, #DDR3PHY_PGSR_IDONE beq sr_dis_4 /* Enable quasi-dynamic programming. */ mov tmp1, #0 str tmp1, [r2, #UDDRC_SWCTRL] /* Assert PHY init complete enable signal. */ ldr tmp1, [r2, #UDDRC_DFIMISC] orr tmp1, tmp1, #UDDRC_DFIMISC_DFI_INIT_COMPLETE_EN str tmp1, [r2, #UDDRC_DFIMISC] /* Programming is done. Set sw_done. */ mov tmp1, #UDDRC_SWCTRL_SW_DONE str tmp1, [r2, #UDDRC_SWCTRL] sr_dis_5: /* Wait for it. */ ldr tmp1, [r2, #UDDRC_SWSTAT] tst tmp1, #UDDRC_SWSTAT_SW_DONE_ACK beq sr_dis_5 /* Trigger self-refresh exit. */ ldr tmp1, [r2, #UDDRC_PWRCTL] bic tmp1, tmp1, #UDDRC_PWRCTL_SELFREF_SW str tmp1, [r2, #UDDRC_PWRCTL] sr_dis_6: /* Wait for self-refresh exit done. */ ldr tmp1, [r2, #UDDRC_STAT] bic tmp1, tmp1, #~UDDRC_STAT_OPMODE_MSK cmp tmp1, #UDDRC_STAT_OPMODE_NORMAL bne sr_dis_6 /* Enable all AXI ports. */ ldr tmp1, [r2, #UDDRC_PCTRL_0] orr tmp1, tmp1, #0x1 str tmp1, [r2, #UDDRC_PCTRL_0] ldr tmp1, [r2, #UDDRC_PCTRL_1] orr tmp1, tmp1, #0x1 str tmp1, [r2, #UDDRC_PCTRL_1] ldr tmp1, [r2, #UDDRC_PCTRL_2] orr tmp1, tmp1, #0x1 str tmp1, [r2, #UDDRC_PCTRL_2] ldr tmp1, [r2, #UDDRC_PCTRL_3] orr tmp1, tmp1, #0x1 str tmp1, [r2, #UDDRC_PCTRL_3] ldr tmp1, [r2, #UDDRC_PCTRL_4] orr tmp1, tmp1, #0x1 str tmp1, [r2, #UDDRC_PCTRL_4] dsb .endm #else /** * Enable self-refresh * * register usage: * @r1: memory type * @r2: base address of the sram controller * @r3: temporary */ .macro at91_sramc_self_refresh_ena ldr r1, .memtype ldr r2, .sramc_base cmp r1, #AT91_MEMCTRL_MC bne sr_ena_ddrc_sf /* Active SDRAM self-refresh mode */ mov r3, #1 str r3, [r2, #AT91_MC_SDRAMC_SRR] b sr_ena_exit sr_ena_ddrc_sf: cmp r1, #AT91_MEMCTRL_DDRSDR bne sr_ena_sdramc_sf /* * DDR Memory controller */ /* LPDDR1 --> force DDR2 mode during self-refresh */ ldr r3, [r2, #AT91_DDRSDRC_MDR] str r3, .saved_sam9_mdr bic r3, r3, #~AT91_DDRSDRC_MD cmp r3, #AT91_DDRSDRC_MD_LOW_POWER_DDR ldreq r3, [r2, #AT91_DDRSDRC_MDR] biceq r3, r3, #AT91_DDRSDRC_MD orreq r3, r3, #AT91_DDRSDRC_MD_DDR2 streq r3, [r2, #AT91_DDRSDRC_MDR] /* Active DDRC self-refresh mode */ ldr r3, [r2, #AT91_DDRSDRC_LPR] str r3, .saved_sam9_lpr bic r3, r3, #AT91_DDRSDRC_LPCB orr r3, r3, #AT91_DDRSDRC_LPCB_SELF_REFRESH str r3, [r2, #AT91_DDRSDRC_LPR] /* If using the 2nd ddr controller */ ldr r2, .sramc1_base cmp r2, #0 beq sr_ena_no_2nd_ddrc ldr r3, [r2, #AT91_DDRSDRC_MDR] str r3, .saved_sam9_mdr1 bic r3, r3, #~AT91_DDRSDRC_MD cmp r3, #AT91_DDRSDRC_MD_LOW_POWER_DDR ldreq r3, [r2, #AT91_DDRSDRC_MDR] biceq r3, r3, #AT91_DDRSDRC_MD orreq r3, r3, #AT91_DDRSDRC_MD_DDR2 streq r3, [r2, #AT91_DDRSDRC_MDR] /* Active DDRC self-refresh mode */ ldr r3, [r2, #AT91_DDRSDRC_LPR] str r3, .saved_sam9_lpr1 bic r3, r3, #AT91_DDRSDRC_LPCB orr r3, r3, #AT91_DDRSDRC_LPCB_SELF_REFRESH str r3, [r2, #AT91_DDRSDRC_LPR] sr_ena_no_2nd_ddrc: b sr_ena_exit /* * SDRAMC Memory controller */ sr_ena_sdramc_sf: /* Active SDRAMC self-refresh mode */ ldr r3, [r2, #AT91_SDRAMC_LPR] str r3, .saved_sam9_lpr bic r3, r3, #AT91_SDRAMC_LPCB orr r3, r3, #AT91_SDRAMC_LPCB_SELF_REFRESH str r3, [r2, #AT91_SDRAMC_LPR] ldr r3, .saved_sam9_lpr str r3, [r2, #AT91_SDRAMC_LPR] sr_ena_exit: .endm /** * Disable self-refresh * * register usage: * @r1: memory type * @r2: base address of the sram controller * @r3: temporary */ .macro at91_sramc_self_refresh_dis ldr r1, .memtype ldr r2, .sramc_base cmp r1, #AT91_MEMCTRL_MC bne sr_dis_ddrc_exit_sf /* * at91rm9200 Memory controller */ /* * For exiting the self-refresh mode, do nothing, * automatically exit the self-refresh mode. */ b sr_dis_exit sr_dis_ddrc_exit_sf: cmp r1, #AT91_MEMCTRL_DDRSDR bne sdramc_exit_sf /* DDR Memory controller */ /* Restore MDR in case of LPDDR1 */ ldr r3, .saved_sam9_mdr str r3, [r2, #AT91_DDRSDRC_MDR] /* Restore LPR on AT91 with DDRAM */ ldr r3, .saved_sam9_lpr str r3, [r2, #AT91_DDRSDRC_LPR] /* If using the 2nd ddr controller */ ldr r2, .sramc1_base cmp r2, #0 ldrne r3, .saved_sam9_mdr1 strne r3, [r2, #AT91_DDRSDRC_MDR] ldrne r3, .saved_sam9_lpr1 strne r3, [r2, #AT91_DDRSDRC_LPR] b sr_dis_exit sdramc_exit_sf: /* SDRAMC Memory controller */ ldr r3, .saved_sam9_lpr str r3, [r2, #AT91_SDRAMC_LPR] sr_dis_exit: .endm #endif .macro at91_pm_ulp0_mode ldr pmc, .pmc_base ldr tmp2, .pm_mode ldr tmp3, .mckr_offset /* Check if ULP0 fast variant has been requested. */ cmp tmp2, #AT91_PM_ULP0_FAST bne 0f /* Set highest prescaler for power saving */ ldr tmp1, [pmc, tmp3] bic tmp1, tmp1, #AT91_PMC_PRES orr tmp1, tmp1, #AT91_PMC_PRES_64 str tmp1, [pmc, tmp3] mov tmp3, #0 wait_mckrdy tmp3 b 1f 0: /* Turn off the crystal oscillator */ ldr tmp1, [pmc, #AT91_CKGR_MOR] bic tmp1, tmp1, #AT91_PMC_MOSCEN orr tmp1, tmp1, #AT91_PMC_KEY str tmp1, [pmc, #AT91_CKGR_MOR] /* Save RC oscillator state */ ldr tmp1, [pmc, #AT91_PMC_SR] str tmp1, .saved_osc_status tst tmp1, #AT91_PMC_MOSCRCS bne 1f /* Turn off RC oscillator */ ldr tmp1, [pmc, #AT91_CKGR_MOR] bic tmp1, tmp1, #AT91_PMC_MOSCRCEN bic tmp1, tmp1, #AT91_PMC_KEY_MASK orr tmp1, tmp1, #AT91_PMC_KEY str tmp1, [pmc, #AT91_CKGR_MOR] /* Wait main RC disabled done */ 2: ldr tmp1, [pmc, #AT91_PMC_SR] tst tmp1, #AT91_PMC_MOSCRCS bne 2b /* Wait for interrupt */ 1: at91_cpu_idle /* Check if ULP0 fast variant has been requested. */ cmp tmp2, #AT91_PM_ULP0_FAST bne 5f /* Set lowest prescaler for fast resume. */ ldr tmp3, .mckr_offset ldr tmp1, [pmc, tmp3] bic tmp1, tmp1, #AT91_PMC_PRES str tmp1, [pmc, tmp3] mov tmp3, #0 wait_mckrdy tmp3 b 6f 5: /* Restore RC oscillator state */ ldr tmp1, .saved_osc_status tst tmp1, #AT91_PMC_MOSCRCS beq 4f /* Turn on RC oscillator */ ldr tmp1, [pmc, #AT91_CKGR_MOR] orr tmp1, tmp1, #AT91_PMC_MOSCRCEN bic tmp1, tmp1, #AT91_PMC_KEY_MASK orr tmp1, tmp1, #AT91_PMC_KEY str tmp1, [pmc, #AT91_CKGR_MOR] /* Wait main RC stabilization */ 3: ldr tmp1, [pmc, #AT91_PMC_SR] tst tmp1, #AT91_PMC_MOSCRCS beq 3b /* Turn on the crystal oscillator */ 4: ldr tmp1, [pmc, #AT91_CKGR_MOR] orr tmp1, tmp1, #AT91_PMC_MOSCEN orr tmp1, tmp1, #AT91_PMC_KEY str tmp1, [pmc, #AT91_CKGR_MOR] wait_moscrdy 6: .endm /** * Note: This procedure only applies on the platform which uses * the external crystal oscillator as a main clock source. */ .macro at91_pm_ulp1_mode ldr pmc, .pmc_base ldr tmp2, .mckr_offset mov tmp3, #0 /* Save RC oscillator state and check if it is enabled. */ ldr tmp1, [pmc, #AT91_PMC_SR] str tmp1, .saved_osc_status tst tmp1, #AT91_PMC_MOSCRCS bne 2f /* Enable RC oscillator */ ldr tmp1, [pmc, #AT91_CKGR_MOR] orr tmp1, tmp1, #AT91_PMC_MOSCRCEN bic tmp1, tmp1, #AT91_PMC_KEY_MASK orr tmp1, tmp1, #AT91_PMC_KEY str tmp1, [pmc, #AT91_CKGR_MOR] /* Wait main RC stabilization */ 1: ldr tmp1, [pmc, #AT91_PMC_SR] tst tmp1, #AT91_PMC_MOSCRCS beq 1b /* Switch the main clock source to 12-MHz RC oscillator */ 2: ldr tmp1, [pmc, #AT91_CKGR_MOR] bic tmp1, tmp1, #AT91_PMC_MOSCSEL bic tmp1, tmp1, #AT91_PMC_KEY_MASK orr tmp1, tmp1, #AT91_PMC_KEY str tmp1, [pmc, #AT91_CKGR_MOR] wait_moscsels /* Disable the crystal oscillator */ ldr tmp1, [pmc, #AT91_CKGR_MOR] bic tmp1, tmp1, #AT91_PMC_MOSCEN bic tmp1, tmp1, #AT91_PMC_KEY_MASK orr tmp1, tmp1, #AT91_PMC_KEY str tmp1, [pmc, #AT91_CKGR_MOR] /* Switch the master clock source to main clock */ ldr tmp1, [pmc, tmp2] bic tmp1, tmp1, #AT91_PMC_CSS orr tmp1, tmp1, #AT91_PMC_CSS_MAIN str tmp1, [pmc, tmp2] wait_mckrdy tmp3 /* Enter the ULP1 mode by set WAITMODE bit in CKGR_MOR */ ldr tmp1, [pmc, #AT91_CKGR_MOR] orr tmp1, tmp1, #AT91_PMC_WAITMODE bic tmp1, tmp1, #AT91_PMC_KEY_MASK orr tmp1, tmp1, #AT91_PMC_KEY str tmp1, [pmc, #AT91_CKGR_MOR] /* Quirk for SAM9X60's PMC */ nop nop wait_mckrdy tmp3 /* Enable the crystal oscillator */ ldr tmp1, [pmc, #AT91_CKGR_MOR] orr tmp1, tmp1, #AT91_PMC_MOSCEN bic tmp1, tmp1, #AT91_PMC_KEY_MASK orr tmp1, tmp1, #AT91_PMC_KEY str tmp1, [pmc, #AT91_CKGR_MOR] wait_moscrdy /* Switch the master clock source to slow clock */ ldr tmp1, [pmc, tmp2] bic tmp1, tmp1, #AT91_PMC_CSS str tmp1, [pmc, tmp2] wait_mckrdy tmp3 /* Switch main clock source to crystal oscillator */ ldr tmp1, [pmc, #AT91_CKGR_MOR] orr tmp1, tmp1, #AT91_PMC_MOSCSEL bic tmp1, tmp1, #AT91_PMC_KEY_MASK orr tmp1, tmp1, #AT91_PMC_KEY str tmp1, [pmc, #AT91_CKGR_MOR] wait_moscsels /* Switch the master clock source to main clock */ ldr tmp1, [pmc, tmp2] bic tmp1, tmp1, #AT91_PMC_CSS orr tmp1, tmp1, #AT91_PMC_CSS_MAIN str tmp1, [pmc, tmp2] wait_mckrdy tmp3 /* Restore RC oscillator state */ ldr tmp1, .saved_osc_status tst tmp1, #AT91_PMC_MOSCRCS bne 3f /* Disable RC oscillator */ ldr tmp1, [pmc, #AT91_CKGR_MOR] bic tmp1, tmp1, #AT91_PMC_MOSCRCEN bic tmp1, tmp1, #AT91_PMC_KEY_MASK orr tmp1, tmp1, #AT91_PMC_KEY str tmp1, [pmc, #AT91_CKGR_MOR] /* Wait RC oscillator disable done */ 4: ldr tmp1, [pmc, #AT91_PMC_SR] tst tmp1, #AT91_PMC_MOSCRCS bne 4b 3: .endm .macro at91_plla_disable /* Save PLLA setting and disable it */ ldr tmp1, .pmc_version cmp tmp1, #AT91_PMC_V1 beq 1f #ifdef CONFIG_HAVE_AT91_SAM9X60_PLL /* Save PLLA settings. */ ldr tmp2, [pmc, #AT91_PMC_PLL_UPDT] bic tmp2, tmp2, #AT91_PMC_PLL_UPDT_ID str tmp2, [pmc, #AT91_PMC_PLL_UPDT] /* save div. */ mov tmp1, #0 ldr tmp2, [pmc, #AT91_PMC_PLL_CTRL0] bic tmp2, tmp2, #0xffffff00 orr tmp1, tmp1, tmp2 /* save mul. */ ldr tmp2, [pmc, #AT91_PMC_PLL_CTRL1] bic tmp2, tmp2, #0xffffff orr tmp1, tmp1, tmp2 str tmp1, .saved_pllar /* step 2. */ ldr tmp1, [pmc, #AT91_PMC_PLL_UPDT] bic tmp1, tmp1, #AT91_PMC_PLL_UPDT_UPDATE bic tmp1, tmp1, #AT91_PMC_PLL_UPDT_ID str tmp1, [pmc, #AT91_PMC_PLL_UPDT] /* step 3. */ ldr tmp1, [pmc, #AT91_PMC_PLL_CTRL0] bic tmp1, tmp1, #AT91_PMC_PLL_CTRL0_ENPLLCK orr tmp1, tmp1, #AT91_PMC_PLL_CTRL0_ENPLL str tmp1, [pmc, #AT91_PMC_PLL_CTRL0] /* step 4. */ ldr tmp1, [pmc, #AT91_PMC_PLL_UPDT] orr tmp1, tmp1, #AT91_PMC_PLL_UPDT_UPDATE bic tmp1, tmp1, #AT91_PMC_PLL_UPDT_ID str tmp1, [pmc, #AT91_PMC_PLL_UPDT] /* step 5. */ ldr tmp1, [pmc, #AT91_PMC_PLL_CTRL0] bic tmp1, tmp1, #AT91_PMC_PLL_CTRL0_ENPLL str tmp1, [pmc, #AT91_PMC_PLL_CTRL0] /* step 7. */ ldr tmp1, [pmc, #AT91_PMC_PLL_UPDT] orr tmp1, tmp1, #AT91_PMC_PLL_UPDT_UPDATE bic tmp1, tmp1, #AT91_PMC_PLL_UPDT_ID str tmp1, [pmc, #AT91_PMC_PLL_UPDT] b 2f #endif 1: /* Save PLLA setting and disable it */ ldr tmp1, [pmc, #AT91_CKGR_PLLAR] str tmp1, .saved_pllar /* Disable PLLA. */ mov tmp1, #AT91_PMC_PLLCOUNT orr tmp1, tmp1, #(1 << 29) /* bit 29 always set */ str tmp1, [pmc, #AT91_CKGR_PLLAR] 2: .endm .macro at91_plla_enable ldr tmp2, .saved_pllar ldr tmp3, .pmc_version cmp tmp3, #AT91_PMC_V1 beq 4f #ifdef CONFIG_HAVE_AT91_SAM9X60_PLL /* step 1. */ ldr tmp1, [pmc, #AT91_PMC_PLL_UPDT] bic tmp1, tmp1, #AT91_PMC_PLL_UPDT_ID bic tmp1, tmp1, #AT91_PMC_PLL_UPDT_UPDATE str tmp1, [pmc, #AT91_PMC_PLL_UPDT] /* step 2. */ ldr tmp1, =AT91_PMC_PLL_ACR_DEFAULT_PLLA str tmp1, [pmc, #AT91_PMC_PLL_ACR] /* step 3. */ ldr tmp1, [pmc, #AT91_PMC_PLL_CTRL1] mov tmp3, tmp2 bic tmp3, tmp3, #0xffffff orr tmp1, tmp1, tmp3 str tmp1, [pmc, #AT91_PMC_PLL_CTRL1] /* step 8. */ ldr tmp1, [pmc, #AT91_PMC_PLL_UPDT] bic tmp1, tmp1, #AT91_PMC_PLL_UPDT_ID orr tmp1, tmp1, #AT91_PMC_PLL_UPDT_UPDATE str tmp1, [pmc, #AT91_PMC_PLL_UPDT] /* step 9. */ ldr tmp1, [pmc, #AT91_PMC_PLL_CTRL0] orr tmp1, tmp1, #AT91_PMC_PLL_CTRL0_ENLOCK orr tmp1, tmp1, #AT91_PMC_PLL_CTRL0_ENPLL orr tmp1, tmp1, #AT91_PMC_PLL_CTRL0_ENPLLCK bic tmp1, tmp1, #0xff mov tmp3, tmp2 bic tmp3, tmp3, #0xffffff00 orr tmp1, tmp1, tmp3 str tmp1, [pmc, #AT91_PMC_PLL_CTRL0] /* step 10. */ ldr tmp1, [pmc, #AT91_PMC_PLL_UPDT] orr tmp1, tmp1, #AT91_PMC_PLL_UPDT_UPDATE bic tmp1, tmp1, #AT91_PMC_PLL_UPDT_ID str tmp1, [pmc, #AT91_PMC_PLL_UPDT] /* step 11. */ 3: ldr tmp1, [pmc, #AT91_PMC_PLL_ISR0] tst tmp1, #0x1 beq 3b b 2f #endif /* Restore PLLA setting */ 4: str tmp2, [pmc, #AT91_CKGR_PLLAR] /* Enable PLLA. */ tst tmp2, #(AT91_PMC_MUL & 0xff0000) bne 1f tst tmp2, #(AT91_PMC_MUL & ~0xff0000) beq 2f 1: ldr tmp1, [pmc, #AT91_PMC_SR] tst tmp1, #AT91_PMC_LOCKA beq 1b 2: .endm /** * at91_mckx_ps_enable: save MCK1..4 settings and switch it to main clock * * Side effects: overwrites tmp1, tmp2 */ .macro at91_mckx_ps_enable #ifdef CONFIG_SOC_SAMA7 ldr pmc, .pmc_base /* There are 4 MCKs we need to handle: MCK1..4 */ mov tmp1, #1 e_loop: cmp tmp1, #5 beq e_done /* Write MCK ID to retrieve the settings. */ str tmp1, [pmc, #AT91_PMC_MCR_V2] ldr tmp2, [pmc, #AT91_PMC_MCR_V2] e_save_mck1: cmp tmp1, #1 bne e_save_mck2 str tmp2, .saved_mck1 b e_ps e_save_mck2: cmp tmp1, #2 bne e_save_mck3 str tmp2, .saved_mck2 b e_ps e_save_mck3: cmp tmp1, #3 bne e_save_mck4 str tmp2, .saved_mck3 b e_ps e_save_mck4: str tmp2, .saved_mck4 e_ps: /* Use CSS=MAINCK and DIV=1. */ bic tmp2, tmp2, #AT91_PMC_MCR_V2_CSS bic tmp2, tmp2, #AT91_PMC_MCR_V2_DIV orr tmp2, tmp2, #AT91_PMC_MCR_V2_CSS_MAINCK orr tmp2, tmp2, #AT91_PMC_MCR_V2_DIV1 str tmp2, [pmc, #AT91_PMC_MCR_V2] wait_mckrdy tmp1 add tmp1, tmp1, #1 b e_loop e_done: #endif .endm /** * at91_mckx_ps_restore: restore MCK1..4 settings * * Side effects: overwrites tmp1, tmp2 */ .macro at91_mckx_ps_restore #ifdef CONFIG_SOC_SAMA7 ldr pmc, .pmc_base /* There are 4 MCKs we need to handle: MCK1..4 */ mov tmp1, #1 r_loop: cmp tmp1, #5 beq r_done r_save_mck1: cmp tmp1, #1 bne r_save_mck2 ldr tmp2, .saved_mck1 b r_ps r_save_mck2: cmp tmp1, #2 bne r_save_mck3 ldr tmp2, .saved_mck2 b r_ps r_save_mck3: cmp tmp1, #3 bne r_save_mck4 ldr tmp2, .saved_mck3 b r_ps r_save_mck4: ldr tmp2, .saved_mck4 r_ps: /* Write MCK ID to retrieve the settings. */ str tmp1, [pmc, #AT91_PMC_MCR_V2] ldr tmp3, [pmc, #AT91_PMC_MCR_V2] /* We need to restore CSS and DIV. */ bic tmp3, tmp3, #AT91_PMC_MCR_V2_CSS bic tmp3, tmp3, #AT91_PMC_MCR_V2_DIV orr tmp3, tmp3, tmp2 bic tmp3, tmp3, #AT91_PMC_MCR_V2_ID_MSK orr tmp3, tmp3, tmp1 orr tmp3, tmp3, #AT91_PMC_MCR_V2_CMD str tmp2, [pmc, #AT91_PMC_MCR_V2] wait_mckrdy tmp1 add tmp1, tmp1, #1 b r_loop r_done: #endif .endm .macro at91_ulp_mode at91_mckx_ps_enable ldr pmc, .pmc_base ldr tmp2, .mckr_offset ldr tmp3, .pm_mode /* Save Master clock setting */ ldr tmp1, [pmc, tmp2] str tmp1, .saved_mckr /* * Set master clock source to: * - MAINCK if using ULP0 fast variant * - slow clock, otherwise */ bic tmp1, tmp1, #AT91_PMC_CSS cmp tmp3, #AT91_PM_ULP0_FAST bne save_mck orr tmp1, tmp1, #AT91_PMC_CSS_MAIN save_mck: str tmp1, [pmc, tmp2] mov tmp3, #0 wait_mckrdy tmp3 at91_plla_disable /* Enable low power mode for 2.5V regulator. */ at91_2_5V_reg_set_low_power 1 ldr tmp3, .pm_mode cmp tmp3, #AT91_PM_ULP1 beq ulp1_mode at91_pm_ulp0_mode b ulp_exit ulp1_mode: at91_pm_ulp1_mode b ulp_exit ulp_exit: /* Disable low power mode for 2.5V regulator. */ at91_2_5V_reg_set_low_power 0 ldr pmc, .pmc_base at91_plla_enable /* * Restore master clock setting */ ldr tmp1, .mckr_offset ldr tmp2, .saved_mckr str tmp2, [pmc, tmp1] mov tmp3, #0 wait_mckrdy tmp3 at91_mckx_ps_restore .endm .macro at91_backup_mode /* Switch the master clock source to slow clock. */ ldr pmc, .pmc_base ldr tmp2, .mckr_offset ldr tmp1, [pmc, tmp2] bic tmp1, tmp1, #AT91_PMC_CSS str tmp1, [pmc, tmp2] mov tmp3, #0 wait_mckrdy tmp3 /*BUMEN*/ ldr r0, .sfrbu mov tmp1, #0x1 str tmp1, [r0, #0x10] /* Wait for it. */ 1: ldr tmp1, [r0, #0x10] tst tmp1, #0x1 beq 1b /* Shutdown */ ldr r0, .shdwc mov tmp1, #0xA5000000 add tmp1, tmp1, #0x1 at91_backup_set_lpm tmp1 str tmp1, [r0, #0] .endm /* * void at91_suspend_sram_fn(struct at91_pm_data*) * @input param: * @r0: base address of struct at91_pm_data */ /* at91_pm_suspend_in_sram must be 8-byte aligned per the requirements of fncpy() */ .align 3 ENTRY(at91_pm_suspend_in_sram) /* Save registers on stack */ stmfd sp!, {r4 - r12, lr} /* Drain write buffer */ mov tmp1, #0 mcr p15, 0, tmp1, c7, c10, 4 /* Flush tlb. */ mov r4, #0 mcr p15, 0, r4, c8, c7, 0 ldr tmp1, [r0, #PM_DATA_PMC_MCKR_OFFSET] str tmp1, .mckr_offset ldr tmp1, [r0, #PM_DATA_PMC_VERSION] str tmp1, .pmc_version ldr tmp1, [r0, #PM_DATA_MEMCTRL] str tmp1, .memtype ldr tmp1, [r0, #PM_DATA_MODE] str tmp1, .pm_mode /* * ldrne below are here to preload their address in the TLB as access * to RAM may be limited while in self-refresh. */ ldr tmp1, [r0, #PM_DATA_PMC] str tmp1, .pmc_base cmp tmp1, #0 ldrne tmp2, [tmp1, #0] ldr tmp1, [r0, #PM_DATA_RAMC0] str tmp1, .sramc_base cmp tmp1, #0 ldrne tmp2, [tmp1, #0] ldr tmp1, [r0, #PM_DATA_RAMC1] str tmp1, .sramc1_base cmp tmp1, #0 ldrne tmp2, [tmp1, #0] #ifndef CONFIG_SOC_SAM_V4_V5 /* ldrne below are here to preload their address in the TLB */ ldr tmp1, [r0, #PM_DATA_RAMC_PHY] str tmp1, .sramc_phy_base cmp tmp1, #0 ldrne tmp2, [tmp1, #0] ldr tmp1, [r0, #PM_DATA_SHDWC] str tmp1, .shdwc cmp tmp1, #0 ldrne tmp2, [tmp1, #0] ldr tmp1, [r0, #PM_DATA_SFRBU] str tmp1, .sfrbu cmp tmp1, #0 ldrne tmp2, [tmp1, #0x10] #endif /* Active the self-refresh mode */ at91_sramc_self_refresh_ena ldr r0, .pm_mode cmp r0, #AT91_PM_STANDBY beq standby cmp r0, #AT91_PM_BACKUP beq backup_mode at91_ulp_mode b exit_suspend standby: /* Wait for interrupt */ ldr pmc, .pmc_base at91_cpu_idle b exit_suspend backup_mode: at91_backup_mode exit_suspend: /* Exit the self-refresh mode */ at91_sramc_self_refresh_dis /* Restore registers, and return */ ldmfd sp!, {r4 - r12, pc} ENDPROC(at91_pm_suspend_in_sram) .pmc_base: .word 0 .sramc_base: .word 0 .sramc1_base: .word 0 .sramc_phy_base: .word 0 .shdwc: .word 0 .sfrbu: .word 0 .memtype: .word 0 .pm_mode: .word 0 .mckr_offset: .word 0 .pmc_version: .word 0 .saved_mckr: .word 0 .saved_pllar: .word 0 .saved_sam9_lpr: .word 0 .saved_sam9_lpr1: .word 0 .saved_sam9_mdr: .word 0 .saved_sam9_mdr1: .word 0 .saved_osc_status: .word 0 #ifdef CONFIG_SOC_SAMA7 .saved_mck1: .word 0 .saved_mck2: .word 0 .saved_mck3: .word 0 .saved_mck4: .word 0 #endif ENTRY(at91_pm_suspend_in_sram_sz) .word .-at91_pm_suspend_in_sram
aixcc-public/challenge-001-exemplar-source
3,134
arch/arm/mach-socfpga/self-refresh.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2014-2015 Altera Corporation. All rights reserved. */ #include <linux/linkage.h> #include <asm/assembler.h> #define MAX_LOOP_COUNT 1000 /* Register offset */ #define SDR_CTRLGRP_LOWPWREQ_ADDR 0x54 #define SDR_CTRLGRP_LOWPWRACK_ADDR 0x58 /* Bitfield positions */ #define SELFRSHREQ_POS 3 #define SELFRSHREQ_MASK 0x8 #define SELFRFSHACK_POS 1 #define SELFRFSHACK_MASK 0x2 /* * This code assumes that when the bootloader configured * the sdram controller for the DDR on the board it * configured the following fields depending on the DDR * vendor/configuration: * * sdr.ctrlcfg.lowpwreq.selfrfshmask * sdr.ctrlcfg.lowpwrtiming.clkdisablecycles * sdr.ctrlcfg.dramtiming4.selfrfshexit */ .arch armv7-a .text .align 3 /* * socfpga_sdram_self_refresh * * r0 : sdr_ctl_base_addr * r1 : temp storage of return value * r2 : temp storage of register values * r3 : loop counter * * return value: lower 16 bits: loop count going into self refresh * upper 16 bits: loop count exiting self refresh */ ENTRY(socfpga_sdram_self_refresh) /* Enable dynamic clock gating in the Power Control Register. */ mrc p15, 0, r2, c15, c0, 0 orr r2, r2, #1 mcr p15, 0, r2, c15, c0, 0 /* Enable self refresh: set sdr.ctrlgrp.lowpwreq.selfrshreq = 1 */ ldr r2, [r0, #SDR_CTRLGRP_LOWPWREQ_ADDR] orr r2, r2, #SELFRSHREQ_MASK str r2, [r0, #SDR_CTRLGRP_LOWPWREQ_ADDR] /* Poll until sdr.ctrlgrp.lowpwrack.selfrfshack == 1 or hit max loops */ mov r3, #0 while_ack_0: ldr r2, [r0, #SDR_CTRLGRP_LOWPWRACK_ADDR] and r2, r2, #SELFRFSHACK_MASK cmp r2, #SELFRFSHACK_MASK beq ack_1 add r3, #1 cmp r3, #MAX_LOOP_COUNT bne while_ack_0 ack_1: mov r1, r3 /* * Execute an ISB instruction to ensure that all of the * CP15 register changes have been committed. */ isb /* * Execute a barrier instruction to ensure that all cache, * TLB and branch predictor maintenance operations issued * by any CPU in the cluster have completed. */ dsb dmb wfi /* Disable self-refresh: set sdr.ctrlgrp.lowpwreq.selfrshreq = 0 */ ldr r2, [r0, #SDR_CTRLGRP_LOWPWREQ_ADDR] bic r2, r2, #SELFRSHREQ_MASK str r2, [r0, #SDR_CTRLGRP_LOWPWREQ_ADDR] /* Poll until sdr.ctrlgrp.lowpwrack.selfrfshack == 0 or hit max loops */ mov r3, #0 while_ack_1: ldr r2, [r0, #SDR_CTRLGRP_LOWPWRACK_ADDR] and r2, r2, #SELFRFSHACK_MASK cmp r2, #SELFRFSHACK_MASK bne ack_0 add r3, #1 cmp r3, #MAX_LOOP_COUNT bne while_ack_1 ack_0: /* * Prepare return value: * Shift loop count for exiting self refresh into upper 16 bits. * Leave loop count for requesting self refresh in lower 16 bits. */ mov r3, r3, lsl #16 add r1, r1, r3 /* Disable dynamic clock gating in the Power Control Register. */ mrc p15, 0, r2, c15, c0, 0 bic r2, r2, #1 mcr p15, 0, r2, c15, c0, 0 mov r0, r1 @ return value bx lr @ return ENDPROC(socfpga_sdram_self_refresh) ENTRY(socfpga_sdram_self_refresh_sz) .word . - socfpga_sdram_self_refresh
aixcc-public/challenge-001-exemplar-source
4,119
arch/arm/mach-mvebu/coherency_ll.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Coherency fabric: low level functions * * Copyright (C) 2012 Marvell * * Gregory CLEMENT <gregory.clement@free-electrons.com> * * This file implements the assembly function to add a CPU to the * coherency fabric. This function is called by each of the secondary * CPUs during their early boot in an SMP kernel, this why this * function have to callable from assembly. It can also be called by a * primary CPU from C code during its boot. */ #include <linux/linkage.h> #define ARMADA_XP_CFB_CTL_REG_OFFSET 0x0 #define ARMADA_XP_CFB_CFG_REG_OFFSET 0x4 #include <asm/assembler.h> #include <asm/cp15.h> .text /* * Returns the coherency base address in r1 (r0 is untouched), or 0 if * the coherency fabric is not enabled. */ ENTRY(ll_get_coherency_base) mrc p15, 0, r1, c1, c0, 0 tst r1, #CR_M @ Check MMU bit enabled bne 1f /* * MMU is disabled, use the physical address of the coherency * base address, (or 0x0 if the coherency fabric is not mapped) */ adr r1, 3f ldr r3, [r1] ldr r1, [r1, r3] b 2f 1: /* * MMU is enabled, use the virtual address of the coherency * base address. */ ldr r1, =coherency_base ldr r1, [r1] 2: ret lr ENDPROC(ll_get_coherency_base) /* * Returns the coherency CPU mask in r3 (r0 is untouched). This * coherency CPU mask can be used with the coherency fabric * configuration and control registers. Note that the mask is already * endian-swapped as appropriate so that the calling functions do not * have to care about endianness issues while accessing the coherency * fabric registers */ ENTRY(ll_get_coherency_cpumask) mrc p15, 0, r3, cr0, cr0, 5 and r3, r3, #15 mov r2, #(1 << 24) lsl r3, r2, r3 ARM_BE8(rev r3, r3) ret lr ENDPROC(ll_get_coherency_cpumask) /* * ll_add_cpu_to_smp_group(), ll_enable_coherency() and * ll_disable_coherency() use the strex/ldrex instructions while the * MMU can be disabled. The Armada XP SoC has an exclusive monitor * that tracks transactions to Device and/or SO memory and thanks to * that, exclusive transactions are functional even when the MMU is * disabled. */ ENTRY(ll_add_cpu_to_smp_group) /* * As r0 is not modified by ll_get_coherency_base() and * ll_get_coherency_cpumask(), we use it to temporarly save lr * and avoid it being modified by the branch and link * calls. This function is used very early in the secondary * CPU boot, and no stack is available at this point. */ mov r0, lr bl ll_get_coherency_base /* Bail out if the coherency is not enabled */ cmp r1, #0 reteq r0 bl ll_get_coherency_cpumask mov lr, r0 add r0, r1, #ARMADA_XP_CFB_CFG_REG_OFFSET 1: ldrex r2, [r0] orr r2, r2, r3 strex r1, r2, [r0] cmp r1, #0 bne 1b ret lr ENDPROC(ll_add_cpu_to_smp_group) ENTRY(ll_enable_coherency) /* * As r0 is not modified by ll_get_coherency_base() and * ll_get_coherency_cpumask(), we use it to temporarly save lr * and avoid it being modified by the branch and link * calls. This function is used very early in the secondary * CPU boot, and no stack is available at this point. */ mov r0, lr bl ll_get_coherency_base /* Bail out if the coherency is not enabled */ cmp r1, #0 reteq r0 bl ll_get_coherency_cpumask mov lr, r0 add r0, r1, #ARMADA_XP_CFB_CTL_REG_OFFSET 1: ldrex r2, [r0] orr r2, r2, r3 strex r1, r2, [r0] cmp r1, #0 bne 1b dsb mov r0, #0 ret lr ENDPROC(ll_enable_coherency) ENTRY(ll_disable_coherency) /* * As r0 is not modified by ll_get_coherency_base() and * ll_get_coherency_cpumask(), we use it to temporarly save lr * and avoid it being modified by the branch and link * calls. This function is used very early in the secondary * CPU boot, and no stack is available at this point. */ mov r0, lr bl ll_get_coherency_base /* Bail out if the coherency is not enabled */ cmp r1, #0 reteq r0 bl ll_get_coherency_cpumask mov lr, r0 add r0, r1, #ARMADA_XP_CFB_CTL_REG_OFFSET 1: ldrex r2, [r0] bic r2, r2, r3 strex r1, r2, [r0] cmp r1, #0 bne 1b dsb ret lr ENDPROC(ll_disable_coherency) .align 2 3: .long coherency_phys_base - .
aixcc-public/challenge-001-exemplar-source
1,853
arch/arm/mach-mvebu/pmsu_ll.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2014 Marvell * * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> * Gregory Clement <gregory.clement@free-electrons.com> */ #include <linux/linkage.h> #include <asm/assembler.h> ENTRY(armada_38x_scu_power_up) mrc p15, 4, r1, c15, c0 @ get SCU base address orr r1, r1, #0x8 @ SCU CPU Power Status Register mrc p15, 0, r0, cr0, cr0, 5 @ get the CPU ID and r0, r0, #15 add r1, r1, r0 mov r0, #0x0 strb r0, [r1] @ switch SCU power state to Normal mode ret lr ENDPROC(armada_38x_scu_power_up) /* * This is the entry point through which CPUs exiting cpuidle deep * idle state are going. */ ENTRY(armada_370_xp_cpu_resume) ARM_BE8(setend be ) @ go BE8 if entered LE /* * Disable the MMU that might have been enabled in BootROM if * this code is used in the resume path of a suspend/resume * cycle. */ mrc p15, 0, r1, c1, c0, 0 bic r1, #1 mcr p15, 0, r1, c1, c0, 0 bl ll_add_cpu_to_smp_group bl ll_enable_coherency b cpu_resume ENDPROC(armada_370_xp_cpu_resume) ENTRY(armada_38x_cpu_resume) /* do we need it for Armada 38x*/ ARM_BE8(setend be ) @ go BE8 if entered LE bl v7_invalidate_l1 bl armada_38x_scu_power_up b cpu_resume ENDPROC(armada_38x_cpu_resume) .global mvebu_boot_wa_start .global mvebu_boot_wa_end /* The following code will be executed from SRAM */ ENTRY(mvebu_boot_wa_start) ARM_BE8(setend be) adr r0, 1f ldr r0, [r0] @ load the address of the @ resume register ldr r0, [r0] @ load the value in the @ resume register ARM_BE8(rev r0, r0) @ the value is stored LE mov pc, r0 @ jump to this value /* * the last word of this piece of code will be filled by the physical * address of the boot address register just after being copied in SRAM */ 1: .long . mvebu_boot_wa_end: ENDPROC(mvebu_boot_wa_end)
aixcc-public/challenge-001-exemplar-source
18,661
arch/arm/mm/proc-xscale.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/mm/proc-xscale.S * * Author: Nicolas Pitre * Created: November 2000 * Copyright: (C) 2000, 2001 MontaVista Software Inc. * * MMU functions for the Intel XScale CPUs * * 2001 Aug 21: * some contributions by Brett Gaines <brett.w.gaines@intel.com> * Copyright 2001 by Intel Corp. * * 2001 Sep 08: * Completely revisited, many important fixes * Nicolas Pitre <nico@fluxnic.net> */ #include <linux/linkage.h> #include <linux/init.h> #include <linux/pgtable.h> #include <asm/assembler.h> #include <asm/hwcap.h> #include <asm/pgtable-hwdef.h> #include <asm/page.h> #include <asm/ptrace.h> #include "proc-macros.S" /* * This is the maximum size of an area which will be flushed. If the area * is larger than this, then we flush the whole cache */ #define MAX_AREA_SIZE 32768 /* * the cache line size of the I and D cache */ #define CACHELINESIZE 32 /* * the size of the data cache */ #define CACHESIZE 32768 /* * Virtual address used to allocate the cache when flushed * * This must be an address range which is _never_ used. It should * apparently have a mapping in the corresponding page table for * compatibility with future CPUs that _could_ require it. For instance we * don't care. * * This must be aligned on a 2*CACHESIZE boundary. The code selects one of * the 2 areas in alternance each time the clean_d_cache macro is used. * Without this the XScale core exhibits cache eviction problems and no one * knows why. * * Reminder: the vector table is located at 0xffff0000-0xffff0fff. */ #define CLEAN_ADDR 0xfffe0000 /* * This macro is used to wait for a CP15 write and is needed * when we have to ensure that the last operation to the co-pro * was completed before continuing with operation. */ .macro cpwait, rd mrc p15, 0, \rd, c2, c0, 0 @ arbitrary read of cp15 mov \rd, \rd @ wait for completion sub pc, pc, #4 @ flush instruction pipeline .endm .macro cpwait_ret, lr, rd mrc p15, 0, \rd, c2, c0, 0 @ arbitrary read of cp15 sub pc, \lr, \rd, LSR #32 @ wait for completion and @ flush instruction pipeline .endm /* * This macro cleans the entire dcache using line allocate. * The main loop has been unrolled to reduce loop overhead. * rd and rs are two scratch registers. */ .macro clean_d_cache, rd, rs ldr \rs, =clean_addr ldr \rd, [\rs] eor \rd, \rd, #CACHESIZE str \rd, [\rs] add \rs, \rd, #CACHESIZE 1: mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line add \rd, \rd, #CACHELINESIZE mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line add \rd, \rd, #CACHELINESIZE mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line add \rd, \rd, #CACHELINESIZE mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line add \rd, \rd, #CACHELINESIZE teq \rd, \rs bne 1b .endm .data .align 2 clean_addr: .word CLEAN_ADDR .text /* * cpu_xscale_proc_init() * * Nothing too exciting at the moment */ ENTRY(cpu_xscale_proc_init) @ enable write buffer coalescing. Some bootloader disable it mrc p15, 0, r1, c1, c0, 1 bic r1, r1, #1 mcr p15, 0, r1, c1, c0, 1 ret lr /* * cpu_xscale_proc_fin() */ ENTRY(cpu_xscale_proc_fin) mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1800 @ ...IZ........... bic r0, r0, #0x0006 @ .............CA. mcr p15, 0, r0, c1, c0, 0 @ disable caches ret lr /* * cpu_xscale_reset(loc) * * Perform a soft reset of the system. Put the CPU into the * same state as it would be if it had been reset, and branch * to what would be the reset vector. * * loc: location to jump to for soft reset * * Beware PXA270 erratum E7. */ .align 5 .pushsection .idmap.text, "ax" ENTRY(cpu_xscale_reset) mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE msr cpsr_c, r1 @ reset CPSR mcr p15, 0, r1, c10, c4, 1 @ unlock I-TLB mcr p15, 0, r1, c8, c5, 0 @ invalidate I-TLB mrc p15, 0, r1, c1, c0, 0 @ ctrl register bic r1, r1, #0x0086 @ ........B....CA. bic r1, r1, #0x3900 @ ..VIZ..S........ sub pc, pc, #4 @ flush pipeline @ *** cache line aligned *** mcr p15, 0, r1, c1, c0, 0 @ ctrl register bic r1, r1, #0x0001 @ ...............M mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches & BTB mcr p15, 0, r1, c1, c0, 0 @ ctrl register @ CAUTION: MMU turned off from this point. We count on the pipeline @ already containing those two last instructions to survive. mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs ret r0 ENDPROC(cpu_xscale_reset) .popsection /* * cpu_xscale_do_idle() * * Cause the processor to idle * * For now we do nothing but go to idle mode for every case * * XScale supports clock switching, but using idle mode support * allows external hardware to react to system state changes. */ .align 5 ENTRY(cpu_xscale_do_idle) mov r0, #1 mcr p14, 0, r0, c7, c0, 0 @ Go to IDLE ret lr /* ================================= CACHE ================================ */ /* * flush_icache_all() * * Unconditionally clean and invalidate the entire icache. */ ENTRY(xscale_flush_icache_all) mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache ret lr ENDPROC(xscale_flush_icache_all) /* * flush_user_cache_all() * * Invalidate all cache entries in a particular address * space. */ ENTRY(xscale_flush_user_cache_all) /* FALLTHROUGH */ /* * flush_kern_cache_all() * * Clean and invalidate the entire cache. */ ENTRY(xscale_flush_kern_cache_all) mov r2, #VM_EXEC mov ip, #0 __flush_whole_cache: clean_d_cache r0, r1 tst r2, #VM_EXEC mcrne p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer ret lr /* * flush_user_cache_range(start, end, vm_flags) * * Invalidate a range of cache entries in the specified * address space. * * - start - start address (may not be aligned) * - end - end address (exclusive, may not be aligned) * - vma - vma_area_struct describing address space */ .align 5 ENTRY(xscale_flush_user_cache_range) mov ip, #0 sub r3, r1, r0 @ calculate total size cmp r3, #MAX_AREA_SIZE bhs __flush_whole_cache 1: tst r2, #VM_EXEC mcrne p15, 0, r0, c7, c5, 1 @ Invalidate I cache line mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line mcr p15, 0, r0, c7, c6, 1 @ Invalidate D cache line add r0, r0, #CACHELINESIZE cmp r0, r1 blo 1b tst r2, #VM_EXEC mcrne p15, 0, ip, c7, c5, 6 @ Invalidate BTB mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer ret lr /* * coherent_kern_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address * * Note: single I-cache line invalidation isn't used here since * it also trashes the mini I-cache used by JTAG debuggers. */ ENTRY(xscale_coherent_kern_range) bic r0, r0, #CACHELINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHELINESIZE cmp r0, r1 blo 1b mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer ret lr /* * coherent_user_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(xscale_coherent_user_range) bic r0, r0, #CACHELINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcr p15, 0, r0, c7, c5, 1 @ Invalidate I cache entry add r0, r0, #CACHELINESIZE cmp r0, r1 blo 1b mov r0, #0 mcr p15, 0, r0, c7, c5, 6 @ Invalidate BTB mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer ret lr /* * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * * - addr - kernel address * - size - region size */ ENTRY(xscale_flush_kern_dcache_area) add r1, r0, r1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry add r0, r0, #CACHELINESIZE cmp r0, r1 blo 1b mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer ret lr /* * dma_inv_range(start, end) * * Invalidate (discard) the specified virtual address range. * May not write back any entries. If 'start' or 'end' * are not cache line aligned, those lines must be written * back. * * - start - virtual start address * - end - virtual end address */ xscale_dma_inv_range: tst r0, #CACHELINESIZE - 1 bic r0, r0, #CACHELINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry tst r1, #CACHELINESIZE - 1 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry add r0, r0, #CACHELINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer ret lr /* * dma_clean_range(start, end) * * Clean the specified virtual address range. * * - start - virtual start address * - end - virtual end address */ xscale_dma_clean_range: bic r0, r0, #CACHELINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHELINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer ret lr /* * dma_flush_range(start, end) * * Clean and invalidate the specified virtual address range. * * - start - virtual start address * - end - virtual end address */ ENTRY(xscale_dma_flush_range) bic r0, r0, #CACHELINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry add r0, r0, #CACHELINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer ret lr /* * dma_map_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(xscale_dma_map_area) add r1, r1, r0 cmp r2, #DMA_TO_DEVICE beq xscale_dma_clean_range bcs xscale_dma_inv_range b xscale_dma_flush_range ENDPROC(xscale_dma_map_area) /* * dma_map_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(xscale_80200_A0_A1_dma_map_area) add r1, r1, r0 teq r2, #DMA_TO_DEVICE beq xscale_dma_clean_range b xscale_dma_flush_range ENDPROC(xscale_80200_A0_A1_dma_map_area) /* * dma_unmap_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(xscale_dma_unmap_area) ret lr ENDPROC(xscale_dma_unmap_area) .globl xscale_flush_kern_cache_louis .equ xscale_flush_kern_cache_louis, xscale_flush_kern_cache_all @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) define_cache_functions xscale /* * On stepping A0/A1 of the 80200, invalidating D-cache by line doesn't * clear the dirty bits, which means that if we invalidate a dirty line, * the dirty data can still be written back to external memory later on. * * The recommended workaround is to always do a clean D-cache line before * doing an invalidate D-cache line, so on the affected processors, * dma_inv_range() is implemented as dma_flush_range(). * * See erratum #25 of "Intel 80200 Processor Specification Update", * revision January 22, 2003, available at: * http://www.intel.com/design/iio/specupdt/273415.htm */ .macro a0_alias basename .globl xscale_80200_A0_A1_\basename .type xscale_80200_A0_A1_\basename , %function .equ xscale_80200_A0_A1_\basename , xscale_\basename .endm /* * Most of the cache functions are unchanged for these processor revisions. * Export suitable alias symbols for the unchanged functions: */ a0_alias flush_icache_all a0_alias flush_user_cache_all a0_alias flush_kern_cache_all a0_alias flush_kern_cache_louis a0_alias flush_user_cache_range a0_alias coherent_kern_range a0_alias coherent_user_range a0_alias flush_kern_dcache_area a0_alias dma_flush_range a0_alias dma_unmap_area @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) define_cache_functions xscale_80200_A0_A1 ENTRY(cpu_xscale_dcache_clean_area) 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHELINESIZE subs r1, r1, #CACHELINESIZE bhi 1b ret lr /* =============================== PageTable ============================== */ /* * cpu_xscale_switch_mm(pgd) * * Set the translation base pointer to be as described by pgd. * * pgd: new page tables */ .align 5 ENTRY(cpu_xscale_switch_mm) clean_d_cache r1, r2 mcr p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer mcr p15, 0, r0, c2, c0, 0 @ load page table pointer mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs cpwait_ret lr, ip /* * cpu_xscale_set_pte_ext(ptep, pte, ext) * * Set a PTE and flush it out * * Errata 40: must set memory to write-through for user read-only pages. */ cpu_xscale_mt_table: .long 0x00 @ L_PTE_MT_UNCACHED .long PTE_BUFFERABLE @ L_PTE_MT_BUFFERABLE .long PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK .long PTE_EXT_TEX(1) | PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED .long 0x00 @ unused .long PTE_EXT_TEX(1) | PTE_CACHEABLE @ L_PTE_MT_MINICACHE .long PTE_EXT_TEX(1) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC .long 0x00 @ unused .long PTE_BUFFERABLE @ L_PTE_MT_DEV_WC .long 0x00 @ unused .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_DEV_CACHED .long 0x00 @ L_PTE_MT_DEV_NONSHARED .long 0x00 @ unused .long 0x00 @ unused .long 0x00 @ unused .align 5 ENTRY(cpu_xscale_set_pte_ext) xscale_set_pte_ext_prologue @ @ Erratum 40: must set memory to write-through for user read-only pages @ and ip, r1, #(L_PTE_MT_MASK | L_PTE_USER | L_PTE_RDONLY) & ~(4 << 2) teq ip, #L_PTE_MT_WRITEBACK | L_PTE_USER | L_PTE_RDONLY moveq r1, #L_PTE_MT_WRITETHROUGH and r1, r1, #L_PTE_MT_MASK adr ip, cpu_xscale_mt_table ldr ip, [ip, r1] bic r2, r2, #0x0c orr r2, r2, ip xscale_set_pte_ext_epilogue ret lr .ltorg .align .globl cpu_xscale_suspend_size .equ cpu_xscale_suspend_size, 4 * 6 #ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_xscale_do_suspend) stmfd sp!, {r4 - r9, lr} mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode mrc p15, 0, r5, c15, c1, 0 @ CP access reg mrc p15, 0, r6, c13, c0, 0 @ PID mrc p15, 0, r7, c3, c0, 0 @ domain ID mrc p15, 0, r8, c1, c0, 1 @ auxiliary control reg mrc p15, 0, r9, c1, c0, 0 @ control reg bic r4, r4, #2 @ clear frequency change bit stmia r0, {r4 - r9} @ store cp regs ldmfd sp!, {r4 - r9, pc} ENDPROC(cpu_xscale_do_suspend) ENTRY(cpu_xscale_do_resume) ldmia r0, {r4 - r9} @ load cp regs mov ip, #0 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB mcr p14, 0, r4, c6, c0, 0 @ clock configuration, turbo mode. mcr p15, 0, r5, c15, c1, 0 @ CP access reg mcr p15, 0, r6, c13, c0, 0 @ PID mcr p15, 0, r7, c3, c0, 0 @ domain ID mcr p15, 0, r1, c2, c0, 0 @ translation table base addr mcr p15, 0, r8, c1, c0, 1 @ auxiliary control reg mov r0, r9 @ control register b cpu_resume_mmu ENDPROC(cpu_xscale_do_resume) #endif .type __xscale_setup, #function __xscale_setup: mcr p15, 0, ip, c7, c7, 0 @ invalidate I, D caches & BTB mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer mcr p15, 0, ip, c8, c7, 0 @ invalidate I, D TLBs mov r0, #1 << 6 @ cp6 for IOP3xx and Bulverde orr r0, r0, #1 << 13 @ Its undefined whether this mcr p15, 0, r0, c15, c1, 0 @ affects USR or SVC modes adr r5, xscale_crval ldmia r5, {r5, r6} mrc p15, 0, r0, c1, c0, 0 @ get control register bic r0, r0, r5 orr r0, r0, r6 ret lr .size __xscale_setup, . - __xscale_setup /* * R * .RVI ZFRS BLDP WCAM * ..11 1.01 .... .101 * */ .type xscale_crval, #object xscale_crval: crval clear=0x00003b07, mmuset=0x00003905, ucset=0x00001900 __INITDATA @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) define_processor_functions xscale, dabort=v5t_early_abort, pabort=legacy_pabort, suspend=1 .section ".rodata" string cpu_arch_name, "armv5te" string cpu_elf_name, "v5" string cpu_80200_A0_A1_name, "XScale-80200 A0/A1" string cpu_80200_name, "XScale-80200" string cpu_80219_name, "XScale-80219" string cpu_8032x_name, "XScale-IOP8032x Family" string cpu_8033x_name, "XScale-IOP8033x Family" string cpu_pxa250_name, "XScale-PXA250" string cpu_pxa210_name, "XScale-PXA210" string cpu_ixp42x_name, "XScale-IXP42x Family" string cpu_ixp43x_name, "XScale-IXP43x Family" string cpu_ixp46x_name, "XScale-IXP46x Family" string cpu_ixp2400_name, "XScale-IXP2400" string cpu_ixp2800_name, "XScale-IXP2800" string cpu_pxa255_name, "XScale-PXA255" string cpu_pxa270_name, "XScale-PXA270" .align .section ".proc.info.init", "a" .macro xscale_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache .type __\name\()_proc_info,#object __\name\()_proc_info: .long \cpu_val .long \cpu_mask .long PMD_TYPE_SECT | \ PMD_SECT_BUFFERABLE | \ PMD_SECT_CACHEABLE | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ .long PMD_TYPE_SECT | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ initfn __xscale_setup, __\name\()_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP .long \cpu_name .long xscale_processor_functions .long v4wbi_tlb_fns .long xscale_mc_user_fns .ifb \cache .long xscale_cache_fns .else .long \cache .endif .size __\name\()_proc_info, . - __\name\()_proc_info .endm xscale_proc_info 80200_A0_A1, 0x69052000, 0xfffffffe, cpu_80200_name, \ cache=xscale_80200_A0_A1_cache_fns xscale_proc_info 80200, 0x69052000, 0xfffffff0, cpu_80200_name xscale_proc_info 80219, 0x69052e20, 0xffffffe0, cpu_80219_name xscale_proc_info 8032x, 0x69052420, 0xfffff7e0, cpu_8032x_name xscale_proc_info 8033x, 0x69054010, 0xfffffd30, cpu_8033x_name xscale_proc_info pxa250, 0x69052100, 0xfffff7f0, cpu_pxa250_name xscale_proc_info pxa210, 0x69052120, 0xfffff3f0, cpu_pxa210_name xscale_proc_info ixp2400, 0x69054190, 0xfffffff0, cpu_ixp2400_name xscale_proc_info ixp2800, 0x690541a0, 0xfffffff0, cpu_ixp2800_name xscale_proc_info ixp42x, 0x690541c0, 0xffffffc0, cpu_ixp42x_name xscale_proc_info ixp43x, 0x69054040, 0xfffffff0, cpu_ixp43x_name xscale_proc_info ixp46x, 0x69054200, 0xffffff00, cpu_ixp46x_name xscale_proc_info pxa255, 0x69052d00, 0xfffffff0, cpu_pxa255_name xscale_proc_info pxa270, 0x69054110, 0xfffffff0, cpu_pxa270_name
aixcc-public/challenge-001-exemplar-source
2,522
arch/arm/mm/tlb-v6.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/mm/tlb-v6.S * * Copyright (C) 1997-2002 Russell King * * ARM architecture version 6 TLB handling functions. * These assume a split I/D TLB. */ #include <linux/init.h> #include <linux/linkage.h> #include <asm/asm-offsets.h> #include <asm/assembler.h> #include <asm/page.h> #include <asm/tlbflush.h> #include "proc-macros.S" #define HARVARD_TLB /* * v6wbi_flush_user_tlb_range(start, end, vma) * * Invalidate a range of TLB entries in the specified address space. * * - start - start address (may not be aligned) * - end - end address (exclusive, may not be aligned) * - vma - vm_area_struct describing address range * * It is assumed that: * - the "Invalidate single entry" instruction will invalidate * both the I and the D TLBs on Harvard-style TLBs */ ENTRY(v6wbi_flush_user_tlb_range) vma_vm_mm r3, r2 @ get vma->vm_mm mov ip, #0 mmid r3, r3 @ get vm_mm->context.id mcr p15, 0, ip, c7, c10, 4 @ drain write buffer mov r0, r0, lsr #PAGE_SHIFT @ align address mov r1, r1, lsr #PAGE_SHIFT asid r3, r3 @ mask ASID orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA mov r1, r1, lsl #PAGE_SHIFT vma_vm_flags r2, r2 @ get vma->vm_flags 1: #ifdef HARVARD_TLB mcr p15, 0, r0, c8, c6, 1 @ TLB invalidate D MVA (was 1) tst r2, #VM_EXEC @ Executable area ? mcrne p15, 0, r0, c8, c5, 1 @ TLB invalidate I MVA (was 1) #else mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate MVA (was 1) #endif add r0, r0, #PAGE_SZ cmp r0, r1 blo 1b mcr p15, 0, ip, c7, c10, 4 @ data synchronization barrier ret lr /* * v6wbi_flush_kern_tlb_range(start,end) * * Invalidate a range of kernel TLB entries * * - start - start address (may not be aligned) * - end - end address (exclusive, may not be aligned) */ ENTRY(v6wbi_flush_kern_tlb_range) mov r2, #0 mcr p15, 0, r2, c7, c10, 4 @ drain write buffer mov r0, r0, lsr #PAGE_SHIFT @ align address mov r1, r1, lsr #PAGE_SHIFT mov r0, r0, lsl #PAGE_SHIFT mov r1, r1, lsl #PAGE_SHIFT 1: #ifdef HARVARD_TLB mcr p15, 0, r0, c8, c6, 1 @ TLB invalidate D MVA mcr p15, 0, r0, c8, c5, 1 @ TLB invalidate I MVA #else mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate MVA #endif add r0, r0, #PAGE_SZ cmp r0, r1 blo 1b mcr p15, 0, r2, c7, c10, 4 @ data synchronization barrier mcr p15, 0, r2, c7, c5, 4 @ prefetch flush (isb) ret lr __INIT /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */ define_tlb_functions v6wbi, v6wbi_tlb_flags
aixcc-public/challenge-001-exemplar-source
1,586
arch/arm/mm/tlb-v4wbi.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/mm/tlbv4wbi.S * * Copyright (C) 1997-2002 Russell King * * ARM architecture version 4 and version 5 TLB handling functions. * These assume a split I/D TLBs, with a write buffer. * * Processors: ARM920 ARM922 ARM925 ARM926 XScale */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> #include <asm/asm-offsets.h> #include <asm/tlbflush.h> #include "proc-macros.S" /* * v4wb_flush_user_tlb_range(start, end, mm) * * Invalidate a range of TLB entries in the specified address space. * * - start - range start address * - end - range end address * - mm - mm_struct describing address space */ .align 5 ENTRY(v4wbi_flush_user_tlb_range) vma_vm_mm ip, r2 act_mm r3 @ get current->active_mm eors r3, ip, r3 @ == mm ? retne lr @ no, we dont do anything mov r3, #0 mcr p15, 0, r3, c7, c10, 4 @ drain WB vma_vm_flags r2, r2 bic r0, r0, #0x0ff bic r0, r0, #0xf00 1: tst r2, #VM_EXEC mcrne p15, 0, r0, c8, c5, 1 @ invalidate I TLB entry mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry add r0, r0, #PAGE_SZ cmp r0, r1 blo 1b ret lr ENTRY(v4wbi_flush_kern_tlb_range) mov r3, #0 mcr p15, 0, r3, c7, c10, 4 @ drain WB bic r0, r0, #0x0ff bic r0, r0, #0xf00 1: mcr p15, 0, r0, c8, c5, 1 @ invalidate I TLB entry mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry add r0, r0, #PAGE_SZ cmp r0, r1 blo 1b ret lr __INITDATA /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */ define_tlb_functions v4wbi, v4wbi_tlb_flags
aixcc-public/challenge-001-exemplar-source
15,728
arch/arm/mm/proc-feroceon.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * linux/arch/arm/mm/proc-feroceon.S: MMU functions for Feroceon * * Heavily based on proc-arm926.S * Maintainer: Assaf Hoffman <hoffman@marvell.com> */ #include <linux/linkage.h> #include <linux/init.h> #include <linux/pgtable.h> #include <asm/assembler.h> #include <asm/hwcap.h> #include <asm/pgtable-hwdef.h> #include <asm/page.h> #include <asm/ptrace.h> #include "proc-macros.S" /* * This is the maximum size of an area which will be invalidated * using the single invalidate entry instructions. Anything larger * than this, and we go for the whole cache. * * This value should be chosen such that we choose the cheapest * alternative. */ #define CACHE_DLIMIT 16384 /* * the cache line size of the I and D cache */ #define CACHE_DLINESIZE 32 .bss .align 3 __cache_params_loc: .space 8 .text __cache_params: .word __cache_params_loc /* * cpu_feroceon_proc_init() */ ENTRY(cpu_feroceon_proc_init) mrc p15, 0, r0, c0, c0, 1 @ read cache type register ldr r1, __cache_params mov r2, #(16 << 5) tst r0, #(1 << 16) @ get way mov r0, r0, lsr #18 @ get cache size order movne r3, #((4 - 1) << 30) @ 4-way and r0, r0, #0xf moveq r3, #0 @ 1-way mov r2, r2, lsl r0 @ actual cache size movne r2, r2, lsr #2 @ turned into # of sets sub r2, r2, #(1 << 5) stmia r1, {r2, r3} ret lr /* * cpu_feroceon_proc_fin() */ ENTRY(cpu_feroceon_proc_fin) #if defined(CONFIG_CACHE_FEROCEON_L2) && \ !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH) mov r0, #0 mcr p15, 1, r0, c15, c9, 0 @ clean L2 mcr p15, 0, r0, c7, c10, 4 @ drain WB #endif mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches ret lr /* * cpu_feroceon_reset(loc) * * Perform a soft reset of the system. Put the CPU into the * same state as it would be if it had been reset, and branch * to what would be the reset vector. * * loc: location to jump to for soft reset */ .align 5 .pushsection .idmap.text, "ax" ENTRY(cpu_feroceon_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c10, 4 @ drain WB #ifdef CONFIG_MMU mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs #endif mrc p15, 0, ip, c1, c0, 0 @ ctrl register bic ip, ip, #0x000f @ ............wcam bic ip, ip, #0x1100 @ ...i...s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register ret r0 ENDPROC(cpu_feroceon_reset) .popsection /* * cpu_feroceon_do_idle() * * Called with IRQs disabled */ .align 5 ENTRY(cpu_feroceon_do_idle) mov r0, #0 mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt ret lr /* * flush_icache_all() * * Unconditionally clean and invalidate the entire icache. */ ENTRY(feroceon_flush_icache_all) mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache ret lr ENDPROC(feroceon_flush_icache_all) /* * flush_user_cache_all() * * Clean and invalidate all cache entries in a particular * address space. */ .align 5 ENTRY(feroceon_flush_user_cache_all) /* FALLTHROUGH */ /* * flush_kern_cache_all() * * Clean and invalidate the entire cache. */ ENTRY(feroceon_flush_kern_cache_all) mov r2, #VM_EXEC __flush_whole_cache: ldr r1, __cache_params ldmia r1, {r1, r3} 1: orr ip, r1, r3 2: mcr p15, 0, ip, c7, c14, 2 @ clean + invalidate D set/way subs ip, ip, #(1 << 30) @ next way bcs 2b subs r1, r1, #(1 << 5) @ next set bcs 1b tst r2, #VM_EXEC mov ip, #0 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache mcrne p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * flush_user_cache_range(start, end, flags) * * Clean and invalidate a range of cache entries in the * specified address range. * * - start - start address (inclusive) * - end - end address (exclusive) * - flags - vm_flags describing address space */ .align 5 ENTRY(feroceon_flush_user_cache_range) sub r3, r1, r0 @ calculate total size cmp r3, #CACHE_DLIMIT bgt __flush_whole_cache 1: tst r2, #VM_EXEC mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b tst r2, #VM_EXEC mov ip, #0 mcrne p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * coherent_kern_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start, end. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ .align 5 ENTRY(feroceon_coherent_kern_range) /* FALLTHROUGH */ /* * coherent_user_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start, end. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(feroceon_coherent_user_range) bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB mov r0, #0 ret lr /* * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * * - addr - kernel address * - size - region size */ .align 5 ENTRY(feroceon_flush_kern_dcache_area) add r1, r0, r1 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr .align 5 ENTRY(feroceon_range_flush_kern_dcache_area) mrs r2, cpsr add r1, r0, #PAGE_SZ - CACHE_DLINESIZE @ top addr is inclusive orr r3, r2, #PSR_I_BIT msr cpsr_c, r3 @ disable interrupts mcr p15, 5, r0, c15, c15, 0 @ D clean/inv range start mcr p15, 5, r1, c15, c15, 1 @ D clean/inv range top msr cpsr_c, r2 @ restore interrupts mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_inv_range(start, end) * * Invalidate (discard) the specified virtual address range. * May not write back any entries. If 'start' or 'end' * are not cache line aligned, those lines must be written * back. * * - start - virtual start address * - end - virtual end address * * (same as v4wb) */ .align 5 feroceon_dma_inv_range: tst r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry tst r1, #CACHE_DLINESIZE - 1 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr .align 5 feroceon_range_dma_inv_range: mrs r2, cpsr tst r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry tst r1, #CACHE_DLINESIZE - 1 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry cmp r1, r0 subne r1, r1, #1 @ top address is inclusive orr r3, r2, #PSR_I_BIT msr cpsr_c, r3 @ disable interrupts mcr p15, 5, r0, c15, c14, 0 @ D inv range start mcr p15, 5, r1, c15, c14, 1 @ D inv range top msr cpsr_c, r2 @ restore interrupts ret lr /* * dma_clean_range(start, end) * * Clean the specified virtual address range. * * - start - virtual start address * - end - virtual end address * * (same as v4wb) */ .align 5 feroceon_dma_clean_range: bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr .align 5 feroceon_range_dma_clean_range: mrs r2, cpsr cmp r1, r0 subne r1, r1, #1 @ top address is inclusive orr r3, r2, #PSR_I_BIT msr cpsr_c, r3 @ disable interrupts mcr p15, 5, r0, c15, c13, 0 @ D clean range start mcr p15, 5, r1, c15, c13, 1 @ D clean range top msr cpsr_c, r2 @ restore interrupts mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_flush_range(start, end) * * Clean and invalidate the specified virtual address range. * * - start - virtual start address * - end - virtual end address */ .align 5 ENTRY(feroceon_dma_flush_range) bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr .align 5 ENTRY(feroceon_range_dma_flush_range) mrs r2, cpsr cmp r1, r0 subne r1, r1, #1 @ top address is inclusive orr r3, r2, #PSR_I_BIT msr cpsr_c, r3 @ disable interrupts mcr p15, 5, r0, c15, c15, 0 @ D clean/inv range start mcr p15, 5, r1, c15, c15, 1 @ D clean/inv range top msr cpsr_c, r2 @ restore interrupts mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_map_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(feroceon_dma_map_area) add r1, r1, r0 cmp r2, #DMA_TO_DEVICE beq feroceon_dma_clean_range bcs feroceon_dma_inv_range b feroceon_dma_flush_range ENDPROC(feroceon_dma_map_area) /* * dma_map_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(feroceon_range_dma_map_area) add r1, r1, r0 cmp r2, #DMA_TO_DEVICE beq feroceon_range_dma_clean_range bcs feroceon_range_dma_inv_range b feroceon_range_dma_flush_range ENDPROC(feroceon_range_dma_map_area) /* * dma_unmap_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(feroceon_dma_unmap_area) ret lr ENDPROC(feroceon_dma_unmap_area) .globl feroceon_flush_kern_cache_louis .equ feroceon_flush_kern_cache_louis, feroceon_flush_kern_cache_all @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) define_cache_functions feroceon .macro range_alias basename .globl feroceon_range_\basename .type feroceon_range_\basename , %function .equ feroceon_range_\basename , feroceon_\basename .endm /* * Most of the cache functions are unchanged for this case. * Export suitable alias symbols for the unchanged functions: */ range_alias flush_icache_all range_alias flush_user_cache_all range_alias flush_kern_cache_all range_alias flush_kern_cache_louis range_alias flush_user_cache_range range_alias coherent_kern_range range_alias coherent_user_range range_alias dma_unmap_area define_cache_functions feroceon_range .align 5 ENTRY(cpu_feroceon_dcache_clean_area) #if defined(CONFIG_CACHE_FEROCEON_L2) && \ !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH) mov r2, r0 mov r3, r1 #endif 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE subs r1, r1, #CACHE_DLINESIZE bhi 1b #if defined(CONFIG_CACHE_FEROCEON_L2) && \ !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH) 1: mcr p15, 1, r2, c15, c9, 1 @ clean L2 entry add r2, r2, #CACHE_DLINESIZE subs r3, r3, #CACHE_DLINESIZE bhi 1b #endif mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* =============================== PageTable ============================== */ /* * cpu_feroceon_switch_mm(pgd) * * Set the translation base pointer to be as described by pgd. * * pgd: new page tables */ .align 5 ENTRY(cpu_feroceon_switch_mm) #ifdef CONFIG_MMU /* * Note: we wish to call __flush_whole_cache but we need to preserve * lr to do so. The only way without touching main memory is to * use r2 which is normally used to test the VM_EXEC flag, and * compensate locally for the skipped ops if it is not set. */ mov r2, lr @ abuse r2 to preserve lr bl __flush_whole_cache @ if r2 contains the VM_EXEC bit then the next 2 ops are done already tst r2, #VM_EXEC mcreq p15, 0, ip, c7, c5, 0 @ invalidate I cache mcreq p15, 0, ip, c7, c10, 4 @ drain WB mcr p15, 0, r0, c2, c0, 0 @ load page table pointer mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs ret r2 #else ret lr #endif /* * cpu_feroceon_set_pte_ext(ptep, pte, ext) * * Set a PTE and flush it out */ .align 5 ENTRY(cpu_feroceon_set_pte_ext) #ifdef CONFIG_MMU armv3_set_pte_ext wc_disable=0 mov r0, r0 mcr p15, 0, r0, c7, c10, 1 @ clean D entry #if defined(CONFIG_CACHE_FEROCEON_L2) && \ !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH) mcr p15, 1, r0, c15, c9, 1 @ clean L2 entry #endif mcr p15, 0, r0, c7, c10, 4 @ drain WB #endif ret lr /* Suspend/resume support: taken from arch/arm/mm/proc-arm926.S */ .globl cpu_feroceon_suspend_size .equ cpu_feroceon_suspend_size, 4 * 3 #ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_feroceon_do_suspend) stmfd sp!, {r4 - r6, lr} mrc p15, 0, r4, c13, c0, 0 @ PID mrc p15, 0, r5, c3, c0, 0 @ Domain ID mrc p15, 0, r6, c1, c0, 0 @ Control register stmia r0, {r4 - r6} ldmfd sp!, {r4 - r6, pc} ENDPROC(cpu_feroceon_do_suspend) ENTRY(cpu_feroceon_do_resume) mov ip, #0 mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches ldmia r0, {r4 - r6} mcr p15, 0, r4, c13, c0, 0 @ PID mcr p15, 0, r5, c3, c0, 0 @ Domain ID mcr p15, 0, r1, c2, c0, 0 @ TTB address mov r0, r6 @ control register b cpu_resume_mmu ENDPROC(cpu_feroceon_do_resume) #endif .type __feroceon_setup, #function __feroceon_setup: mov r0, #0 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 #ifdef CONFIG_MMU mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 #endif adr r5, feroceon_crval ldmia r5, {r5, r6} mrc p15, 0, r0, c1, c0 @ get control register v4 bic r0, r0, r5 orr r0, r0, r6 ret lr .size __feroceon_setup, . - __feroceon_setup /* * B * R P * .RVI UFRS BLDP WCAM * .011 .001 ..11 0101 * */ .type feroceon_crval, #object feroceon_crval: crval clear=0x0000773f, mmuset=0x00003135, ucset=0x00001134 __INITDATA @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) define_processor_functions feroceon, dabort=v5t_early_abort, pabort=legacy_pabort .section ".rodata" string cpu_arch_name, "armv5te" string cpu_elf_name, "v5" string cpu_feroceon_name, "Feroceon" string cpu_88fr531_name, "Feroceon 88FR531-vd" string cpu_88fr571_name, "Feroceon 88FR571-vd" string cpu_88fr131_name, "Feroceon 88FR131" .align .section ".proc.info.init", "a" .macro feroceon_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache:req .type __\name\()_proc_info,#object __\name\()_proc_info: .long \cpu_val .long \cpu_mask .long PMD_TYPE_SECT | \ PMD_SECT_BUFFERABLE | \ PMD_SECT_CACHEABLE | \ PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ .long PMD_TYPE_SECT | \ PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ initfn __feroceon_setup, __\name\()_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP .long \cpu_name .long feroceon_processor_functions .long v4wbi_tlb_fns .long feroceon_user_fns .long \cache .size __\name\()_proc_info, . - __\name\()_proc_info .endm #ifdef CONFIG_CPU_FEROCEON_OLD_ID feroceon_proc_info feroceon_old_id, 0x41009260, 0xff00fff0, \ cpu_name=cpu_feroceon_name, cache=feroceon_cache_fns #endif feroceon_proc_info 88fr531, 0x56055310, 0xfffffff0, cpu_88fr531_name, \ cache=feroceon_cache_fns feroceon_proc_info 88fr571, 0x56155710, 0xfffffff0, cpu_88fr571_name, \ cache=feroceon_range_cache_fns feroceon_proc_info 88fr131, 0x56251310, 0xfffffff0, cpu_88fr131_name, \ cache=feroceon_range_cache_fns
aixcc-public/challenge-001-exemplar-source
1,253
arch/arm/mm/abort-ev6.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <linux/linkage.h> #include <asm/assembler.h> #include "abort-macro.S" /* * Function: v6_early_abort * * Params : r2 = pt_regs * : r4 = aborted context pc * : r5 = aborted context psr * * Returns : r4 - r11, r13 preserved * * Purpose : obtain information about current aborted instruction. * Note: we read user space. This means we might cause a data * abort here if the I-TLB and D-TLB aren't seeing the same * picture. Unfortunately, this does happen. We live with it. */ .align 5 ENTRY(v6_early_abort) mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR /* * Faulty SWP instruction on 1136 doesn't set bit 11 in DFSR. */ #ifdef CONFIG_ARM_ERRATA_326103 ldr ip, =0x4107b36 mrc p15, 0, r3, c0, c0, 0 @ get processor id teq ip, r3, lsr #4 @ r0 ARM1136? bne 1f tst r5, #PSR_J_BIT @ Java? tsteq r5, #PSR_T_BIT @ Thumb? bne 1f bic r1, r1, #1 << 11 @ clear bit 11 of FSR ldr r3, [r4] @ read aborted ARM instruction ARM_BE8(rev r3, r3) teq_ldrd tmp=ip, insn=r3 @ insn was LDRD? beq 1f @ yes tst r3, #1 << 20 @ L = 0 -> write orreq r1, r1, #1 << 11 @ yes. #endif 1: uaccess_disable ip @ disable userspace access b do_DataAbort
aixcc-public/challenge-001-exemplar-source
11,154
arch/arm/mm/proc-arm920.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * linux/arch/arm/mm/proc-arm920.S: MMU functions for ARM920 * * Copyright (C) 1999,2000 ARM Limited * Copyright (C) 2000 Deep Blue Solutions Ltd. * hacked for non-paged-MM by Hyok S. Choi, 2003. * * These are the low level assembler for performing cache and TLB * functions on the arm920. * * CONFIG_CPU_ARM920_CPU_IDLE -> nohlt */ #include <linux/linkage.h> #include <linux/init.h> #include <linux/pgtable.h> #include <asm/assembler.h> #include <asm/hwcap.h> #include <asm/pgtable-hwdef.h> #include <asm/page.h> #include <asm/ptrace.h> #include "proc-macros.S" /* * The size of one data cache line. */ #define CACHE_DLINESIZE 32 /* * The number of data cache segments. */ #define CACHE_DSEGMENTS 8 /* * The number of lines in a cache segment. */ #define CACHE_DENTRIES 64 /* * This is the size at which it becomes more efficient to * clean the whole cache, rather than using the individual * cache line maintenance instructions. */ #define CACHE_DLIMIT 65536 .text /* * cpu_arm920_proc_init() */ ENTRY(cpu_arm920_proc_init) ret lr /* * cpu_arm920_proc_fin() */ ENTRY(cpu_arm920_proc_fin) mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches ret lr /* * cpu_arm920_reset(loc) * * Perform a soft reset of the system. Put the CPU into the * same state as it would be if it had been reset, and branch * to what would be the reset vector. * * loc: location to jump to for soft reset */ .align 5 .pushsection .idmap.text, "ax" ENTRY(cpu_arm920_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c10, 4 @ drain WB #ifdef CONFIG_MMU mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs #endif mrc p15, 0, ip, c1, c0, 0 @ ctrl register bic ip, ip, #0x000f @ ............wcam bic ip, ip, #0x1100 @ ...i...s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register ret r0 ENDPROC(cpu_arm920_reset) .popsection /* * cpu_arm920_do_idle() */ .align 5 ENTRY(cpu_arm920_do_idle) mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt ret lr #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH /* * flush_icache_all() * * Unconditionally clean and invalidate the entire icache. */ ENTRY(arm920_flush_icache_all) mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache ret lr ENDPROC(arm920_flush_icache_all) /* * flush_user_cache_all() * * Invalidate all cache entries in a particular address * space. */ ENTRY(arm920_flush_user_cache_all) /* FALLTHROUGH */ /* * flush_kern_cache_all() * * Clean and invalidate the entire cache. */ ENTRY(arm920_flush_kern_cache_all) mov r2, #VM_EXEC mov ip, #0 __flush_whole_cache: mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 8 segments 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 2: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index subs r3, r3, #1 << 26 bcs 2b @ entries 63 to 0 subs r1, r1, #1 << 5 bcs 1b @ segments 7 to 0 tst r2, #VM_EXEC mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache mcrne p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * flush_user_cache_range(start, end, flags) * * Invalidate a range of cache entries in the specified * address space. * * - start - start address (inclusive) * - end - end address (exclusive) * - flags - vm_flags for address space */ ENTRY(arm920_flush_user_cache_range) mov ip, #0 sub r3, r1, r0 @ calculate total size cmp r3, #CACHE_DLIMIT bhs __flush_whole_cache 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry tst r2, #VM_EXEC mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b tst r2, #VM_EXEC mcrne p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * coherent_kern_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start, end. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm920_coherent_kern_range) /* FALLTHROUGH */ /* * coherent_user_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start, end. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm920_coherent_user_range) bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB mov r0, #0 ret lr /* * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * * - addr - kernel address * - size - region size */ ENTRY(arm920_flush_kern_dcache_area) add r1, r0, r1 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_inv_range(start, end) * * Invalidate (discard) the specified virtual address range. * May not write back any entries. If 'start' or 'end' * are not cache line aligned, those lines must be written * back. * * - start - virtual start address * - end - virtual end address * * (same as v4wb) */ arm920_dma_inv_range: tst r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry tst r1, #CACHE_DLINESIZE - 1 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_clean_range(start, end) * * Clean the specified virtual address range. * * - start - virtual start address * - end - virtual end address * * (same as v4wb) */ arm920_dma_clean_range: bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_flush_range(start, end) * * Clean and invalidate the specified virtual address range. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm920_dma_flush_range) bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_map_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(arm920_dma_map_area) add r1, r1, r0 cmp r2, #DMA_TO_DEVICE beq arm920_dma_clean_range bcs arm920_dma_inv_range b arm920_dma_flush_range ENDPROC(arm920_dma_map_area) /* * dma_unmap_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(arm920_dma_unmap_area) ret lr ENDPROC(arm920_dma_unmap_area) .globl arm920_flush_kern_cache_louis .equ arm920_flush_kern_cache_louis, arm920_flush_kern_cache_all @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) define_cache_functions arm920 #endif ENTRY(cpu_arm920_dcache_clean_area) 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE subs r1, r1, #CACHE_DLINESIZE bhi 1b ret lr /* =============================== PageTable ============================== */ /* * cpu_arm920_switch_mm(pgd) * * Set the translation base pointer to be as described by pgd. * * pgd: new page tables */ .align 5 ENTRY(cpu_arm920_switch_mm) #ifdef CONFIG_MMU mov ip, #0 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache #else @ && 'Clean & Invalidate whole DCache' @ && Re-written to use Index Ops. @ && Uses registers r1, r3 and ip mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 8 segments 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index subs r3, r3, #1 << 26 bcs 2b @ entries 63 to 0 subs r1, r1, #1 << 5 bcs 1b @ segments 7 to 0 #endif mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache mcr p15, 0, ip, c7, c10, 4 @ drain WB mcr p15, 0, r0, c2, c0, 0 @ load page table pointer mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs #endif ret lr /* * cpu_arm920_set_pte(ptep, pte, ext) * * Set a PTE and flush it out */ .align 5 ENTRY(cpu_arm920_set_pte_ext) #ifdef CONFIG_MMU armv3_set_pte_ext mov r0, r0 mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcr p15, 0, r0, c7, c10, 4 @ drain WB #endif ret lr /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ .globl cpu_arm920_suspend_size .equ cpu_arm920_suspend_size, 4 * 3 #ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_arm920_do_suspend) stmfd sp!, {r4 - r6, lr} mrc p15, 0, r4, c13, c0, 0 @ PID mrc p15, 0, r5, c3, c0, 0 @ Domain ID mrc p15, 0, r6, c1, c0, 0 @ Control register stmia r0, {r4 - r6} ldmfd sp!, {r4 - r6, pc} ENDPROC(cpu_arm920_do_suspend) ENTRY(cpu_arm920_do_resume) mov ip, #0 mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches ldmia r0, {r4 - r6} mcr p15, 0, r4, c13, c0, 0 @ PID mcr p15, 0, r5, c3, c0, 0 @ Domain ID mcr p15, 0, r1, c2, c0, 0 @ TTB address mov r0, r6 @ control register b cpu_resume_mmu ENDPROC(cpu_arm920_do_resume) #endif .type __arm920_setup, #function __arm920_setup: mov r0, #0 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 #ifdef CONFIG_MMU mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 #endif adr r5, arm920_crval ldmia r5, {r5, r6} mrc p15, 0, r0, c1, c0 @ get control register v4 bic r0, r0, r5 orr r0, r0, r6 ret lr .size __arm920_setup, . - __arm920_setup /* * R * .RVI ZFRS BLDP WCAM * ..11 0001 ..11 0101 * */ .type arm920_crval, #object arm920_crval: crval clear=0x00003f3f, mmuset=0x00003135, ucset=0x00001130 __INITDATA @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) define_processor_functions arm920, dabort=v4t_early_abort, pabort=legacy_pabort, suspend=1 .section ".rodata" string cpu_arch_name, "armv4t" string cpu_elf_name, "v4" string cpu_arm920_name, "ARM920T" .align .section ".proc.info.init", "a" .type __arm920_proc_info,#object __arm920_proc_info: .long 0x41009200 .long 0xff00fff0 .long PMD_TYPE_SECT | \ PMD_SECT_BUFFERABLE | \ PMD_SECT_CACHEABLE | \ PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ .long PMD_TYPE_SECT | \ PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ initfn __arm920_setup, __arm920_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB .long cpu_arm920_name .long arm920_processor_functions .long v4wbi_tlb_fns .long v4wb_user_fns #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH .long arm920_cache_fns #else .long v4wt_cache_fns #endif .size __arm920_proc_info, . - __arm920_proc_info
aixcc-public/challenge-001-exemplar-source
1,457
arch/arm/mm/tlb-v4.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/mm/tlbv4.S * * Copyright (C) 1997-2002 Russell King * * ARM architecture version 4 TLB handling functions. * These assume a split I/D TLBs, and no write buffer. * * Processors: ARM720T */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> #include <asm/asm-offsets.h> #include <asm/tlbflush.h> #include "proc-macros.S" .align 5 /* * v4_flush_user_tlb_range(start, end, mm) * * Invalidate a range of TLB entries in the specified user address space. * * - start - range start address * - end - range end address * - mm - mm_struct describing address space */ .align 5 ENTRY(v4_flush_user_tlb_range) vma_vm_mm ip, r2 act_mm r3 @ get current->active_mm eors r3, ip, r3 @ == mm ? retne lr @ no, we dont do anything .v4_flush_kern_tlb_range: bic r0, r0, #0x0ff bic r0, r0, #0xf00 1: mcr p15, 0, r0, c8, c7, 1 @ invalidate TLB entry add r0, r0, #PAGE_SZ cmp r0, r1 blo 1b ret lr /* * v4_flush_kern_tlb_range(start, end) * * Invalidate a range of TLB entries in the specified kernel * address range. * * - start - virtual address (may not be aligned) * - end - virtual address (may not be aligned) */ .globl v4_flush_kern_tlb_range .equ v4_flush_kern_tlb_range, .v4_flush_kern_tlb_range __INITDATA /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */ define_tlb_functions v4, v4_tlb_flags
aixcc-public/challenge-001-exemplar-source
1,880
arch/arm/mm/pv-fixup-asm.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2015 Russell King * * This assembly is required to safely remap the physical address space * for Keystone 2 */ #include <linux/linkage.h> #include <linux/pgtable.h> #include <asm/asm-offsets.h> #include <asm/cp15.h> #include <asm/memory.h> .section ".idmap.text", "ax" #define L1_ORDER 3 #define L2_ORDER 3 ENTRY(lpae_pgtables_remap_asm) stmfd sp!, {r4-r8, lr} mrc p15, 0, r8, c1, c0, 0 @ read control reg bic ip, r8, #CR_M @ disable caches and MMU mcr p15, 0, ip, c1, c0, 0 dsb isb /* Update level 2 entries covering the kernel */ ldr r6, =(_end - 1) add r7, r2, #0x1000 add r6, r7, r6, lsr #SECTION_SHIFT - L2_ORDER add r7, r7, #KERNEL_OFFSET >> (SECTION_SHIFT - L2_ORDER) 1: ldrd r4, r5, [r7] adds r4, r4, r0 adc r5, r5, r1 strd r4, r5, [r7], #1 << L2_ORDER cmp r7, r6 bls 1b /* Update level 2 entries for the boot data */ add r7, r2, #0x1000 movw r3, #FDT_FIXED_BASE >> (SECTION_SHIFT - L2_ORDER) add r7, r7, r3 ldrd r4, r5, [r7] adds r4, r4, r0 adc r5, r5, r1 strd r4, r5, [r7], #1 << L2_ORDER ldrd r4, r5, [r7] adds r4, r4, r0 adc r5, r5, r1 strd r4, r5, [r7] /* Update level 1 entries */ mov r6, #4 mov r7, r2 2: ldrd r4, r5, [r7] adds r4, r4, r0 adc r5, r5, r1 strd r4, r5, [r7], #1 << L1_ORDER subs r6, r6, #1 bne 2b mrrc p15, 0, r4, r5, c2 @ read TTBR0 adds r4, r4, r0 @ update physical address adc r5, r5, r1 mcrr p15, 0, r4, r5, c2 @ write back TTBR0 mrrc p15, 1, r4, r5, c2 @ read TTBR1 adds r4, r4, r0 @ update physical address adc r5, r5, r1 mcrr p15, 1, r4, r5, c2 @ write back TTBR1 dsb mov ip, #0 mcr p15, 0, ip, c7, c5, 0 @ I+BTB cache invalidate mcr p15, 0, ip, c8, c7, 0 @ local_flush_tlb_all() dsb isb mcr p15, 0, r8, c1, c0, 0 @ re-enable MMU dsb isb ldmfd sp!, {r4-r8, pc} ENDPROC(lpae_pgtables_remap_asm)
aixcc-public/challenge-001-exemplar-source
1,499
arch/arm/mm/l2c-l2x0-resume.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * L2C-310 early resume code. This can be used by platforms to restore * the settings of their L2 cache controller before restoring the * processor state. * * This code can only be used to if you are running in the secure world. */ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/hardware/cache-l2x0.h> .text ENTRY(l2c310_early_resume) adr r0, 1f ldr r2, [r0] add r0, r2, r0 ldmia r0, {r1, r2, r3, r4, r5, r6, r7, r8} @ r1 = phys address of L2C-310 controller @ r2 = aux_ctrl @ r3 = tag_latency @ r4 = data_latency @ r5 = filter_start @ r6 = filter_end @ r7 = prefetch_ctrl @ r8 = pwr_ctrl @ Check that the address has been initialised teq r1, #0 reteq lr @ The prefetch and power control registers are revision dependent @ and can be written whether or not the L2 cache is enabled ldr r0, [r1, #L2X0_CACHE_ID] and r0, r0, #L2X0_CACHE_ID_RTL_MASK cmp r0, #L310_CACHE_ID_RTL_R2P0 strcs r7, [r1, #L310_PREFETCH_CTRL] cmp r0, #L310_CACHE_ID_RTL_R3P0 strcs r8, [r1, #L310_POWER_CTRL] @ Don't setup the L2 cache if it is already enabled ldr r0, [r1, #L2X0_CTRL] tst r0, #L2X0_CTRL_EN retne lr str r3, [r1, #L310_TAG_LATENCY_CTRL] str r4, [r1, #L310_DATA_LATENCY_CTRL] str r6, [r1, #L310_ADDR_FILTER_END] str r5, [r1, #L310_ADDR_FILTER_START] str r2, [r1, #L2X0_AUX_CTRL] mov r9, #L2X0_CTRL_EN str r9, [r1, #L2X0_CTRL] ret lr ENDPROC(l2c310_early_resume) .align 1: .long l2x0_saved_regs - .
aixcc-public/challenge-001-exemplar-source
5,204
arch/arm/mm/proc-sa110.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/mm/proc-sa110.S * * Copyright (C) 1997-2002 Russell King * hacked for non-paged-MM by Hyok S. Choi, 2003. * * MMU functions for SA110 * * These are the low level assembler for performing cache and TLB * functions on the StrongARM-110. */ #include <linux/linkage.h> #include <linux/init.h> #include <linux/pgtable.h> #include <asm/assembler.h> #include <asm/asm-offsets.h> #include <asm/hwcap.h> #include <mach/hardware.h> #include <asm/pgtable-hwdef.h> #include <asm/ptrace.h> #include "proc-macros.S" /* * the cache line size of the I and D cache */ #define DCACHELINESIZE 32 .text /* * cpu_sa110_proc_init() */ ENTRY(cpu_sa110_proc_init) mov r0, #0 mcr p15, 0, r0, c15, c1, 2 @ Enable clock switching ret lr /* * cpu_sa110_proc_fin() */ ENTRY(cpu_sa110_proc_fin) mov r0, #0 mcr p15, 0, r0, c15, c2, 2 @ Disable clock switching mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches ret lr /* * cpu_sa110_reset(loc) * * Perform a soft reset of the system. Put the CPU into the * same state as it would be if it had been reset, and branch * to what would be the reset vector. * * loc: location to jump to for soft reset */ .align 5 .pushsection .idmap.text, "ax" ENTRY(cpu_sa110_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c10, 4 @ drain WB #ifdef CONFIG_MMU mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs #endif mrc p15, 0, ip, c1, c0, 0 @ ctrl register bic ip, ip, #0x000f @ ............wcam bic ip, ip, #0x1100 @ ...i...s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register ret r0 ENDPROC(cpu_sa110_reset) .popsection /* * cpu_sa110_do_idle(type) * * Cause the processor to idle * * type: call type: * 0 = slow idle * 1 = fast idle * 2 = switch to slow processor clock * 3 = switch to fast processor clock */ .align 5 ENTRY(cpu_sa110_do_idle) mcr p15, 0, ip, c15, c2, 2 @ disable clock switching ldr r1, =UNCACHEABLE_ADDR @ load from uncacheable loc ldr r1, [r1, #0] @ force switch to MCLK mov r0, r0 @ safety mov r0, r0 @ safety mov r0, r0 @ safety mcr p15, 0, r0, c15, c8, 2 @ Wait for interrupt, cache aligned mov r0, r0 @ safety mov r0, r0 @ safety mov r0, r0 @ safety mcr p15, 0, r0, c15, c1, 2 @ enable clock switching ret lr /* ================================= CACHE ================================ */ /* * cpu_sa110_dcache_clean_area(addr,sz) * * Clean the specified entry of any caches such that the MMU * translation fetches will obtain correct data. * * addr: cache-unaligned virtual address */ .align 5 ENTRY(cpu_sa110_dcache_clean_area) 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #DCACHELINESIZE subs r1, r1, #DCACHELINESIZE bhi 1b ret lr /* =============================== PageTable ============================== */ /* * cpu_sa110_switch_mm(pgd) * * Set the translation base pointer to be as described by pgd. * * pgd: new page tables */ .align 5 ENTRY(cpu_sa110_switch_mm) #ifdef CONFIG_MMU str lr, [sp, #-4]! bl v4wb_flush_kern_cache_all @ clears IP mcr p15, 0, r0, c2, c0, 0 @ load page table pointer mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs ldr pc, [sp], #4 #else ret lr #endif /* * cpu_sa110_set_pte_ext(ptep, pte, ext) * * Set a PTE and flush it out */ .align 5 ENTRY(cpu_sa110_set_pte_ext) #ifdef CONFIG_MMU armv3_set_pte_ext wc_disable=0 mov r0, r0 mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcr p15, 0, r0, c7, c10, 4 @ drain WB #endif ret lr .type __sa110_setup, #function __sa110_setup: mov r10, #0 mcr p15, 0, r10, c7, c7 @ invalidate I,D caches on v4 mcr p15, 0, r10, c7, c10, 4 @ drain write buffer on v4 #ifdef CONFIG_MMU mcr p15, 0, r10, c8, c7 @ invalidate I,D TLBs on v4 #endif adr r5, sa110_crval ldmia r5, {r5, r6} mrc p15, 0, r0, c1, c0 @ get control register v4 bic r0, r0, r5 orr r0, r0, r6 ret lr .size __sa110_setup, . - __sa110_setup /* * R * .RVI ZFRS BLDP WCAM * ..01 0001 ..11 1101 * */ .type sa110_crval, #object sa110_crval: crval clear=0x00003f3f, mmuset=0x0000113d, ucset=0x00001130 __INITDATA @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) define_processor_functions sa110, dabort=v4_early_abort, pabort=legacy_pabort .section ".rodata" string cpu_arch_name, "armv4" string cpu_elf_name, "v4" string cpu_sa110_name, "StrongARM-110" .align .section ".proc.info.init", "a" .type __sa110_proc_info,#object __sa110_proc_info: .long 0x4401a100 .long 0xfffffff0 .long PMD_TYPE_SECT | \ PMD_SECT_BUFFERABLE | \ PMD_SECT_CACHEABLE | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ .long PMD_TYPE_SECT | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ initfn __sa110_setup, __sa110_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT .long cpu_sa110_name .long sa110_processor_functions .long v4wb_tlb_fns .long v4wb_user_fns .long v4wb_cache_fns .size __sa110_proc_info, . - __sa110_proc_info
aixcc-public/challenge-001-exemplar-source
25,318
arch/arm/mm/proc-v7.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/mm/proc-v7.S * * Copyright (C) 2001 Deep Blue Solutions Ltd. * * This is the "shell" of the ARMv7 processor support. */ #include <linux/arm-smccc.h> #include <linux/init.h> #include <linux/linkage.h> #include <linux/pgtable.h> #include <asm/assembler.h> #include <asm/asm-offsets.h> #include <asm/hwcap.h> #include <asm/pgtable-hwdef.h> #include <asm/memory.h> #include "proc-macros.S" #ifdef CONFIG_ARM_LPAE #include "proc-v7-3level.S" #else #include "proc-v7-2level.S" #endif ENTRY(cpu_v7_proc_init) ret lr ENDPROC(cpu_v7_proc_init) ENTRY(cpu_v7_proc_fin) mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x0006 @ .............ca. mcr p15, 0, r0, c1, c0, 0 @ disable caches ret lr ENDPROC(cpu_v7_proc_fin) /* * cpu_v7_reset(loc, hyp) * * Perform a soft reset of the system. Put the CPU into the * same state as it would be if it had been reset, and branch * to what would be the reset vector. * * - loc - location to jump to for soft reset * - hyp - indicate if restart occurs in HYP mode * * This code must be executed using a flat identity mapping with * caches disabled. */ .align 5 .pushsection .idmap.text, "ax" ENTRY(cpu_v7_reset) mrc p15, 0, r2, c1, c0, 0 @ ctrl register bic r2, r2, #0x1 @ ...............m THUMB( bic r2, r2, #1 << 30 ) @ SCTLR.TE (Thumb exceptions) mcr p15, 0, r2, c1, c0, 0 @ disable MMU isb #ifdef CONFIG_ARM_VIRT_EXT teq r1, #0 bne __hyp_soft_restart #endif bx r0 ENDPROC(cpu_v7_reset) .popsection /* * cpu_v7_do_idle() * * Idle the processor (eg, wait for interrupt). * * IRQs are already disabled. */ ENTRY(cpu_v7_do_idle) dsb @ WFI may enter a low-power mode wfi ret lr ENDPROC(cpu_v7_do_idle) ENTRY(cpu_v7_dcache_clean_area) ALT_SMP(W(nop)) @ MP extensions imply L1 PTW ALT_UP_B(1f) ret lr 1: dcache_line_size r2, r3 2: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, r2 subs r1, r1, r2 bhi 2b dsb ishst ret lr ENDPROC(cpu_v7_dcache_clean_area) #ifdef CONFIG_ARM_PSCI .arch_extension sec ENTRY(cpu_v7_smc_switch_mm) stmfd sp!, {r0 - r3} movw r0, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1 movt r0, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1 smc #0 ldmfd sp!, {r0 - r3} b cpu_v7_switch_mm ENDPROC(cpu_v7_smc_switch_mm) .arch_extension virt ENTRY(cpu_v7_hvc_switch_mm) stmfd sp!, {r0 - r3} movw r0, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1 movt r0, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1 hvc #0 ldmfd sp!, {r0 - r3} b cpu_v7_switch_mm ENDPROC(cpu_v7_hvc_switch_mm) #endif ENTRY(cpu_v7_iciallu_switch_mm) mov r3, #0 mcr p15, 0, r3, c7, c5, 0 @ ICIALLU b cpu_v7_switch_mm ENDPROC(cpu_v7_iciallu_switch_mm) ENTRY(cpu_v7_bpiall_switch_mm) mov r3, #0 mcr p15, 0, r3, c7, c5, 6 @ flush BTAC/BTB b cpu_v7_switch_mm ENDPROC(cpu_v7_bpiall_switch_mm) string cpu_v7_name, "ARMv7 Processor" .align /* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */ .globl cpu_v7_suspend_size .equ cpu_v7_suspend_size, 4 * 9 #ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_v7_do_suspend) stmfd sp!, {r4 - r11, lr} mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID mrc p15, 0, r5, c13, c0, 3 @ User r/o thread ID stmia r0!, {r4 - r5} #ifdef CONFIG_MMU mrc p15, 0, r6, c3, c0, 0 @ Domain ID #ifdef CONFIG_ARM_LPAE mrrc p15, 1, r5, r7, c2 @ TTB 1 #else mrc p15, 0, r7, c2, c0, 1 @ TTB 1 #endif mrc p15, 0, r11, c2, c0, 2 @ TTB control register #endif mrc p15, 0, r8, c1, c0, 0 @ Control register mrc p15, 0, r9, c1, c0, 1 @ Auxiliary control register mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control stmia r0, {r5 - r11} ldmfd sp!, {r4 - r11, pc} ENDPROC(cpu_v7_do_suspend) ENTRY(cpu_v7_do_resume) mov ip, #0 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache mcr p15, 0, ip, c13, c0, 1 @ set reserved context ID ldmia r0!, {r4 - r5} mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID mcr p15, 0, r5, c13, c0, 3 @ User r/o thread ID ldmia r0, {r5 - r11} #ifdef CONFIG_MMU mcr p15, 0, ip, c8, c7, 0 @ invalidate TLBs mcr p15, 0, r6, c3, c0, 0 @ Domain ID #ifdef CONFIG_ARM_LPAE mcrr p15, 0, r1, ip, c2 @ TTB 0 mcrr p15, 1, r5, r7, c2 @ TTB 1 #else ALT_SMP(orr r1, r1, #TTB_FLAGS_SMP) ALT_UP(orr r1, r1, #TTB_FLAGS_UP) mcr p15, 0, r1, c2, c0, 0 @ TTB 0 mcr p15, 0, r7, c2, c0, 1 @ TTB 1 #endif mcr p15, 0, r11, c2, c0, 2 @ TTB control register ldr r4, =PRRR @ PRRR ldr r5, =NMRR @ NMRR mcr p15, 0, r4, c10, c2, 0 @ write PRRR mcr p15, 0, r5, c10, c2, 1 @ write NMRR #endif /* CONFIG_MMU */ mrc p15, 0, r4, c1, c0, 1 @ Read Auxiliary control register teq r4, r9 @ Is it already set? mcrne p15, 0, r9, c1, c0, 1 @ No, so write it mcr p15, 0, r10, c1, c0, 2 @ Co-processor access control isb dsb mov r0, r8 @ control register b cpu_resume_mmu ENDPROC(cpu_v7_do_resume) #endif .globl cpu_ca9mp_suspend_size .equ cpu_ca9mp_suspend_size, cpu_v7_suspend_size + 4 * 2 #ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_ca9mp_do_suspend) stmfd sp!, {r4 - r5} mrc p15, 0, r4, c15, c0, 1 @ Diagnostic register mrc p15, 0, r5, c15, c0, 0 @ Power register stmia r0!, {r4 - r5} ldmfd sp!, {r4 - r5} b cpu_v7_do_suspend ENDPROC(cpu_ca9mp_do_suspend) ENTRY(cpu_ca9mp_do_resume) ldmia r0!, {r4 - r5} mrc p15, 0, r10, c15, c0, 1 @ Read Diagnostic register teq r4, r10 @ Already restored? mcrne p15, 0, r4, c15, c0, 1 @ No, so restore it mrc p15, 0, r10, c15, c0, 0 @ Read Power register teq r5, r10 @ Already restored? mcrne p15, 0, r5, c15, c0, 0 @ No, so restore it b cpu_v7_do_resume ENDPROC(cpu_ca9mp_do_resume) #endif #ifdef CONFIG_CPU_PJ4B globl_equ cpu_pj4b_switch_mm, cpu_v7_switch_mm globl_equ cpu_pj4b_set_pte_ext, cpu_v7_set_pte_ext globl_equ cpu_pj4b_proc_init, cpu_v7_proc_init globl_equ cpu_pj4b_proc_fin, cpu_v7_proc_fin globl_equ cpu_pj4b_reset, cpu_v7_reset #ifdef CONFIG_PJ4B_ERRATA_4742 ENTRY(cpu_pj4b_do_idle) dsb @ WFI may enter a low-power mode wfi dsb @barrier ret lr ENDPROC(cpu_pj4b_do_idle) #else globl_equ cpu_pj4b_do_idle, cpu_v7_do_idle #endif globl_equ cpu_pj4b_dcache_clean_area, cpu_v7_dcache_clean_area #ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_pj4b_do_suspend) stmfd sp!, {r6 - r10} mrc p15, 1, r6, c15, c1, 0 @ save CP15 - extra features mrc p15, 1, r7, c15, c2, 0 @ save CP15 - Aux Func Modes Ctrl 0 mrc p15, 1, r8, c15, c1, 2 @ save CP15 - Aux Debug Modes Ctrl 2 mrc p15, 1, r9, c15, c1, 1 @ save CP15 - Aux Debug Modes Ctrl 1 mrc p15, 0, r10, c9, c14, 0 @ save CP15 - PMC stmia r0!, {r6 - r10} ldmfd sp!, {r6 - r10} b cpu_v7_do_suspend ENDPROC(cpu_pj4b_do_suspend) ENTRY(cpu_pj4b_do_resume) ldmia r0!, {r6 - r10} mcr p15, 1, r6, c15, c1, 0 @ restore CP15 - extra features mcr p15, 1, r7, c15, c2, 0 @ restore CP15 - Aux Func Modes Ctrl 0 mcr p15, 1, r8, c15, c1, 2 @ restore CP15 - Aux Debug Modes Ctrl 2 mcr p15, 1, r9, c15, c1, 1 @ restore CP15 - Aux Debug Modes Ctrl 1 mcr p15, 0, r10, c9, c14, 0 @ restore CP15 - PMC b cpu_v7_do_resume ENDPROC(cpu_pj4b_do_resume) #endif .globl cpu_pj4b_suspend_size .equ cpu_pj4b_suspend_size, cpu_v7_suspend_size + 4 * 5 #endif @ @ Invoke the v7_invalidate_l1() function, which adheres to the AAPCS @ rules, and so it may corrupt registers that we need to preserve. @ .macro do_invalidate_l1 mov r6, r1 mov r7, r2 mov r10, lr bl v7_invalidate_l1 @ corrupts {r0-r3, ip, lr} mov r1, r6 mov r2, r7 mov lr, r10 .endm /* * __v7_setup * * Initialise TLB, Caches, and MMU state ready to switch the MMU * on. Return in r0 the new CP15 C1 control register setting. * * r1, r2, r4, r5, r9, r13 must be preserved - r13 is not a stack * r4: TTBR0 (low word) * r5: TTBR0 (high word if LPAE) * r8: TTBR1 * r9: Main ID register * * This should be able to cover all ARMv7 cores. * * It is assumed that: * - cache type register is implemented */ __v7_ca5mp_setup: __v7_ca9mp_setup: __v7_cr7mp_setup: __v7_cr8mp_setup: do_invalidate_l1 mov r10, #(1 << 0) @ Cache/TLB ops broadcasting b 1f __v7_ca7mp_setup: __v7_ca12mp_setup: __v7_ca15mp_setup: __v7_b15mp_setup: __v7_ca17mp_setup: do_invalidate_l1 mov r10, #0 1: #ifdef CONFIG_SMP orr r10, r10, #(1 << 6) @ Enable SMP/nAMP mode ALT_SMP(mrc p15, 0, r0, c1, c0, 1) ALT_UP(mov r0, r10) @ fake it for UP orr r10, r10, r0 @ Set required bits teq r10, r0 @ Were they already set? mcrne p15, 0, r10, c1, c0, 1 @ No, update register #endif b __v7_setup_cont /* * Errata: * r0, r10 available for use * r1, r2, r4, r5, r9, r13: must be preserved * r3: contains MIDR rX number in bits 23-20 * r6: contains MIDR rXpY as 8-bit XY number * r9: MIDR */ __ca8_errata: #if defined(CONFIG_ARM_ERRATA_430973) && !defined(CONFIG_ARCH_MULTIPLATFORM) teq r3, #0x00100000 @ only present in r1p* mrceq p15, 0, r0, c1, c0, 1 @ read aux control register orreq r0, r0, #(1 << 6) @ set IBE to 1 mcreq p15, 0, r0, c1, c0, 1 @ write aux control register #endif #ifdef CONFIG_ARM_ERRATA_458693 teq r6, #0x20 @ only present in r2p0 mrceq p15, 0, r0, c1, c0, 1 @ read aux control register orreq r0, r0, #(1 << 5) @ set L1NEON to 1 orreq r0, r0, #(1 << 9) @ set PLDNOP to 1 mcreq p15, 0, r0, c1, c0, 1 @ write aux control register #endif #ifdef CONFIG_ARM_ERRATA_460075 teq r6, #0x20 @ only present in r2p0 mrceq p15, 1, r0, c9, c0, 2 @ read L2 cache aux ctrl register tsteq r0, #1 << 22 orreq r0, r0, #(1 << 22) @ set the Write Allocate disable bit mcreq p15, 1, r0, c9, c0, 2 @ write the L2 cache aux ctrl register #endif b __errata_finish __ca9_errata: #ifdef CONFIG_ARM_ERRATA_742230 cmp r6, #0x22 @ only present up to r2p2 mrcle p15, 0, r0, c15, c0, 1 @ read diagnostic register orrle r0, r0, #1 << 4 @ set bit #4 mcrle p15, 0, r0, c15, c0, 1 @ write diagnostic register #endif #ifdef CONFIG_ARM_ERRATA_742231 teq r6, #0x20 @ present in r2p0 teqne r6, #0x21 @ present in r2p1 teqne r6, #0x22 @ present in r2p2 mrceq p15, 0, r0, c15, c0, 1 @ read diagnostic register orreq r0, r0, #1 << 12 @ set bit #12 orreq r0, r0, #1 << 22 @ set bit #22 mcreq p15, 0, r0, c15, c0, 1 @ write diagnostic register #endif #ifdef CONFIG_ARM_ERRATA_743622 teq r3, #0x00200000 @ only present in r2p* mrceq p15, 0, r0, c15, c0, 1 @ read diagnostic register orreq r0, r0, #1 << 6 @ set bit #6 mcreq p15, 0, r0, c15, c0, 1 @ write diagnostic register #endif #if defined(CONFIG_ARM_ERRATA_751472) && defined(CONFIG_SMP) ALT_SMP(cmp r6, #0x30) @ present prior to r3p0 ALT_UP_B(1f) mrclt p15, 0, r0, c15, c0, 1 @ read diagnostic register orrlt r0, r0, #1 << 11 @ set bit #11 mcrlt p15, 0, r0, c15, c0, 1 @ write diagnostic register 1: #endif b __errata_finish __ca15_errata: #ifdef CONFIG_ARM_ERRATA_773022 cmp r6, #0x4 @ only present up to r0p4 mrcle p15, 0, r0, c1, c0, 1 @ read aux control register orrle r0, r0, #1 << 1 @ disable loop buffer mcrle p15, 0, r0, c1, c0, 1 @ write aux control register #endif b __errata_finish __ca12_errata: #ifdef CONFIG_ARM_ERRATA_818325_852422 mrc p15, 0, r10, c15, c0, 1 @ read diagnostic register orr r10, r10, #1 << 12 @ set bit #12 mcr p15, 0, r10, c15, c0, 1 @ write diagnostic register #endif #ifdef CONFIG_ARM_ERRATA_821420 mrc p15, 0, r10, c15, c0, 2 @ read internal feature reg orr r10, r10, #1 << 1 @ set bit #1 mcr p15, 0, r10, c15, c0, 2 @ write internal feature reg #endif #ifdef CONFIG_ARM_ERRATA_825619 mrc p15, 0, r10, c15, c0, 1 @ read diagnostic register orr r10, r10, #1 << 24 @ set bit #24 mcr p15, 0, r10, c15, c0, 1 @ write diagnostic register #endif #ifdef CONFIG_ARM_ERRATA_857271 mrc p15, 0, r10, c15, c0, 1 @ read diagnostic register orr r10, r10, #3 << 10 @ set bits #10 and #11 mcr p15, 0, r10, c15, c0, 1 @ write diagnostic register #endif b __errata_finish __ca17_errata: #ifdef CONFIG_ARM_ERRATA_852421 cmp r6, #0x12 @ only present up to r1p2 mrcle p15, 0, r10, c15, c0, 1 @ read diagnostic register orrle r10, r10, #1 << 24 @ set bit #24 mcrle p15, 0, r10, c15, c0, 1 @ write diagnostic register #endif #ifdef CONFIG_ARM_ERRATA_852423 cmp r6, #0x12 @ only present up to r1p2 mrcle p15, 0, r10, c15, c0, 1 @ read diagnostic register orrle r10, r10, #1 << 12 @ set bit #12 mcrle p15, 0, r10, c15, c0, 1 @ write diagnostic register #endif #ifdef CONFIG_ARM_ERRATA_857272 mrc p15, 0, r10, c15, c0, 1 @ read diagnostic register orr r10, r10, #3 << 10 @ set bits #10 and #11 mcr p15, 0, r10, c15, c0, 1 @ write diagnostic register #endif b __errata_finish __v7_pj4b_setup: #ifdef CONFIG_CPU_PJ4B /* Auxiliary Debug Modes Control 1 Register */ #define PJ4B_STATIC_BP (1 << 2) /* Enable Static BP */ #define PJ4B_INTER_PARITY (1 << 8) /* Disable Internal Parity Handling */ #define PJ4B_CLEAN_LINE (1 << 16) /* Disable data transfer for clean line */ /* Auxiliary Debug Modes Control 2 Register */ #define PJ4B_FAST_LDR (1 << 23) /* Disable fast LDR */ #define PJ4B_SNOOP_DATA (1 << 25) /* Do not interleave write and snoop data */ #define PJ4B_CWF (1 << 27) /* Disable Critical Word First feature */ #define PJ4B_OUTSDNG_NC (1 << 29) /* Disable outstanding non cacheable rqst */ #define PJ4B_L1_REP_RR (1 << 30) /* L1 replacement - Strict round robin */ #define PJ4B_AUX_DBG_CTRL2 (PJ4B_SNOOP_DATA | PJ4B_CWF |\ PJ4B_OUTSDNG_NC | PJ4B_L1_REP_RR) /* Auxiliary Functional Modes Control Register 0 */ #define PJ4B_SMP_CFB (1 << 1) /* Set SMP mode. Join the coherency fabric */ #define PJ4B_L1_PAR_CHK (1 << 2) /* Support L1 parity checking */ #define PJ4B_BROADCAST_CACHE (1 << 8) /* Broadcast Cache and TLB maintenance */ /* Auxiliary Debug Modes Control 0 Register */ #define PJ4B_WFI_WFE (1 << 22) /* WFI/WFE - serve the DVM and back to idle */ /* Auxiliary Debug Modes Control 1 Register */ mrc p15, 1, r0, c15, c1, 1 orr r0, r0, #PJ4B_CLEAN_LINE orr r0, r0, #PJ4B_INTER_PARITY bic r0, r0, #PJ4B_STATIC_BP mcr p15, 1, r0, c15, c1, 1 /* Auxiliary Debug Modes Control 2 Register */ mrc p15, 1, r0, c15, c1, 2 bic r0, r0, #PJ4B_FAST_LDR orr r0, r0, #PJ4B_AUX_DBG_CTRL2 mcr p15, 1, r0, c15, c1, 2 /* Auxiliary Functional Modes Control Register 0 */ mrc p15, 1, r0, c15, c2, 0 #ifdef CONFIG_SMP orr r0, r0, #PJ4B_SMP_CFB #endif orr r0, r0, #PJ4B_L1_PAR_CHK orr r0, r0, #PJ4B_BROADCAST_CACHE mcr p15, 1, r0, c15, c2, 0 /* Auxiliary Debug Modes Control 0 Register */ mrc p15, 1, r0, c15, c1, 0 orr r0, r0, #PJ4B_WFI_WFE mcr p15, 1, r0, c15, c1, 0 #endif /* CONFIG_CPU_PJ4B */ __v7_setup: do_invalidate_l1 __v7_setup_cont: and r0, r9, #0xff000000 @ ARM? teq r0, #0x41000000 bne __errata_finish and r3, r9, #0x00f00000 @ variant and r6, r9, #0x0000000f @ revision orr r6, r6, r3, lsr #20-4 @ combine variant and revision ubfx r0, r9, #4, #12 @ primary part number /* Cortex-A8 Errata */ ldr r10, =0x00000c08 @ Cortex-A8 primary part number teq r0, r10 beq __ca8_errata /* Cortex-A9 Errata */ ldr r10, =0x00000c09 @ Cortex-A9 primary part number teq r0, r10 beq __ca9_errata /* Cortex-A12 Errata */ ldr r10, =0x00000c0d @ Cortex-A12 primary part number teq r0, r10 beq __ca12_errata /* Cortex-A17 Errata */ ldr r10, =0x00000c0e @ Cortex-A17 primary part number teq r0, r10 beq __ca17_errata /* Cortex-A15 Errata */ ldr r10, =0x00000c0f @ Cortex-A15 primary part number teq r0, r10 beq __ca15_errata __errata_finish: mov r10, #0 mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate #ifdef CONFIG_MMU mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs v7_ttb_setup r10, r4, r5, r8, r3 @ TTBCR, TTBRx setup ldr r3, =PRRR @ PRRR ldr r6, =NMRR @ NMRR mcr p15, 0, r3, c10, c2, 0 @ write PRRR mcr p15, 0, r6, c10, c2, 1 @ write NMRR #endif dsb @ Complete invalidations #ifndef CONFIG_ARM_THUMBEE mrc p15, 0, r0, c0, c1, 0 @ read ID_PFR0 for ThumbEE and r0, r0, #(0xf << 12) @ ThumbEE enabled field teq r0, #(1 << 12) @ check if ThumbEE is present bne 1f mov r3, #0 mcr p14, 6, r3, c1, c0, 0 @ Initialize TEEHBR to 0 mrc p14, 6, r0, c0, c0, 0 @ load TEECR orr r0, r0, #1 @ set the 1st bit in order to mcr p14, 6, r0, c0, c0, 0 @ stop userspace TEEHBR access 1: #endif adr r3, v7_crval ldmia r3, {r3, r6} ARM_BE8(orr r6, r6, #1 << 25) @ big-endian page tables #ifdef CONFIG_SWP_EMULATE orr r3, r3, #(1 << 10) @ set SW bit in "clear" bic r6, r6, #(1 << 10) @ clear it in "mmuset" #endif mrc p15, 0, r0, c1, c0, 0 @ read control register bic r0, r0, r3 @ clear bits them orr r0, r0, r6 @ set them THUMB( orr r0, r0, #1 << 30 ) @ Thumb exceptions ret lr @ return to head.S:__ret ENDPROC(__v7_setup) __INITDATA .weak cpu_v7_bugs_init @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR @ generic v7 bpiall on context switch globl_equ cpu_v7_bpiall_proc_init, cpu_v7_proc_init globl_equ cpu_v7_bpiall_proc_fin, cpu_v7_proc_fin globl_equ cpu_v7_bpiall_reset, cpu_v7_reset globl_equ cpu_v7_bpiall_do_idle, cpu_v7_do_idle globl_equ cpu_v7_bpiall_dcache_clean_area, cpu_v7_dcache_clean_area globl_equ cpu_v7_bpiall_set_pte_ext, cpu_v7_set_pte_ext globl_equ cpu_v7_bpiall_suspend_size, cpu_v7_suspend_size #ifdef CONFIG_ARM_CPU_SUSPEND globl_equ cpu_v7_bpiall_do_suspend, cpu_v7_do_suspend globl_equ cpu_v7_bpiall_do_resume, cpu_v7_do_resume #endif define_processor_functions v7_bpiall, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init #define HARDENED_BPIALL_PROCESSOR_FUNCTIONS v7_bpiall_processor_functions #else #define HARDENED_BPIALL_PROCESSOR_FUNCTIONS v7_processor_functions #endif #ifndef CONFIG_ARM_LPAE @ Cortex-A8 - always needs bpiall switch_mm implementation globl_equ cpu_ca8_proc_init, cpu_v7_proc_init globl_equ cpu_ca8_proc_fin, cpu_v7_proc_fin globl_equ cpu_ca8_reset, cpu_v7_reset globl_equ cpu_ca8_do_idle, cpu_v7_do_idle globl_equ cpu_ca8_dcache_clean_area, cpu_v7_dcache_clean_area globl_equ cpu_ca8_set_pte_ext, cpu_v7_set_pte_ext globl_equ cpu_ca8_switch_mm, cpu_v7_bpiall_switch_mm globl_equ cpu_ca8_suspend_size, cpu_v7_suspend_size #ifdef CONFIG_ARM_CPU_SUSPEND globl_equ cpu_ca8_do_suspend, cpu_v7_do_suspend globl_equ cpu_ca8_do_resume, cpu_v7_do_resume #endif define_processor_functions ca8, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_ca8_ibe @ Cortex-A9 - needs more registers preserved across suspend/resume @ and bpiall switch_mm for hardening globl_equ cpu_ca9mp_proc_init, cpu_v7_proc_init globl_equ cpu_ca9mp_proc_fin, cpu_v7_proc_fin globl_equ cpu_ca9mp_reset, cpu_v7_reset globl_equ cpu_ca9mp_do_idle, cpu_v7_do_idle globl_equ cpu_ca9mp_dcache_clean_area, cpu_v7_dcache_clean_area #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR globl_equ cpu_ca9mp_switch_mm, cpu_v7_bpiall_switch_mm #else globl_equ cpu_ca9mp_switch_mm, cpu_v7_switch_mm #endif globl_equ cpu_ca9mp_set_pte_ext, cpu_v7_set_pte_ext define_processor_functions ca9mp, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init #endif @ Cortex-A15 - needs iciallu switch_mm for hardening globl_equ cpu_ca15_proc_init, cpu_v7_proc_init globl_equ cpu_ca15_proc_fin, cpu_v7_proc_fin globl_equ cpu_ca15_reset, cpu_v7_reset globl_equ cpu_ca15_do_idle, cpu_v7_do_idle globl_equ cpu_ca15_dcache_clean_area, cpu_v7_dcache_clean_area #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR globl_equ cpu_ca15_switch_mm, cpu_v7_iciallu_switch_mm #else globl_equ cpu_ca15_switch_mm, cpu_v7_switch_mm #endif globl_equ cpu_ca15_set_pte_ext, cpu_v7_set_pte_ext globl_equ cpu_ca15_suspend_size, cpu_v7_suspend_size globl_equ cpu_ca15_do_suspend, cpu_v7_do_suspend globl_equ cpu_ca15_do_resume, cpu_v7_do_resume define_processor_functions ca15, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_ca15_ibe #ifdef CONFIG_CPU_PJ4B define_processor_functions pj4b, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 #endif .section ".rodata" string cpu_arch_name, "armv7" string cpu_elf_name, "v7" .align .section ".proc.info.init", "a" /* * Standard v7 proc info content */ .macro __v7_proc name, initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0, proc_fns = v7_processor_functions, cache_fns = v7_cache_fns ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \ PMD_SECT_AF | PMD_FLAGS_SMP | \mm_mmuflags) ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \ PMD_SECT_AF | PMD_FLAGS_UP | \mm_mmuflags) .long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ | PMD_SECT_AF | \io_mmuflags initfn \initfunc, \name .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_FAST_MULT | \ HWCAP_EDSP | HWCAP_TLS | \hwcaps .long cpu_v7_name .long \proc_fns .long v7wbi_tlb_fns .long v6_user_fns .long \cache_fns .endm #ifndef CONFIG_ARM_LPAE /* * ARM Ltd. Cortex A5 processor. */ .type __v7_ca5mp_proc_info, #object __v7_ca5mp_proc_info: .long 0x410fc050 .long 0xff0ffff0 __v7_proc __v7_ca5mp_proc_info, __v7_ca5mp_setup .size __v7_ca5mp_proc_info, . - __v7_ca5mp_proc_info /* * ARM Ltd. Cortex A9 processor. */ .type __v7_ca9mp_proc_info, #object __v7_ca9mp_proc_info: .long 0x410fc090 .long 0xff0ffff0 __v7_proc __v7_ca9mp_proc_info, __v7_ca9mp_setup, proc_fns = ca9mp_processor_functions .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info /* * ARM Ltd. Cortex A8 processor. */ .type __v7_ca8_proc_info, #object __v7_ca8_proc_info: .long 0x410fc080 .long 0xff0ffff0 __v7_proc __v7_ca8_proc_info, __v7_setup, proc_fns = ca8_processor_functions .size __v7_ca8_proc_info, . - __v7_ca8_proc_info #endif /* CONFIG_ARM_LPAE */ /* * Marvell PJ4B processor. */ #ifdef CONFIG_CPU_PJ4B .type __v7_pj4b_proc_info, #object __v7_pj4b_proc_info: .long 0x560f5800 .long 0xff0fff00 __v7_proc __v7_pj4b_proc_info, __v7_pj4b_setup, proc_fns = pj4b_processor_functions .size __v7_pj4b_proc_info, . - __v7_pj4b_proc_info #endif /* * ARM Ltd. Cortex R7 processor. */ .type __v7_cr7mp_proc_info, #object __v7_cr7mp_proc_info: .long 0x410fc170 .long 0xff0ffff0 __v7_proc __v7_cr7mp_proc_info, __v7_cr7mp_setup .size __v7_cr7mp_proc_info, . - __v7_cr7mp_proc_info /* * ARM Ltd. Cortex R8 processor. */ .type __v7_cr8mp_proc_info, #object __v7_cr8mp_proc_info: .long 0x410fc180 .long 0xff0ffff0 __v7_proc __v7_cr8mp_proc_info, __v7_cr8mp_setup .size __v7_cr8mp_proc_info, . - __v7_cr8mp_proc_info /* * ARM Ltd. Cortex A7 processor. */ .type __v7_ca7mp_proc_info, #object __v7_ca7mp_proc_info: .long 0x410fc070 .long 0xff0ffff0 __v7_proc __v7_ca7mp_proc_info, __v7_ca7mp_setup .size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info /* * ARM Ltd. Cortex A12 processor. */ .type __v7_ca12mp_proc_info, #object __v7_ca12mp_proc_info: .long 0x410fc0d0 .long 0xff0ffff0 __v7_proc __v7_ca12mp_proc_info, __v7_ca12mp_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS .size __v7_ca12mp_proc_info, . - __v7_ca12mp_proc_info /* * ARM Ltd. Cortex A15 processor. */ .type __v7_ca15mp_proc_info, #object __v7_ca15mp_proc_info: .long 0x410fc0f0 .long 0xff0ffff0 __v7_proc __v7_ca15mp_proc_info, __v7_ca15mp_setup, proc_fns = ca15_processor_functions .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info /* * Broadcom Corporation Brahma-B15 processor. */ .type __v7_b15mp_proc_info, #object __v7_b15mp_proc_info: .long 0x420f00f0 .long 0xff0ffff0 __v7_proc __v7_b15mp_proc_info, __v7_b15mp_setup, proc_fns = ca15_processor_functions, cache_fns = b15_cache_fns .size __v7_b15mp_proc_info, . - __v7_b15mp_proc_info /* * ARM Ltd. Cortex A17 processor. */ .type __v7_ca17mp_proc_info, #object __v7_ca17mp_proc_info: .long 0x410fc0e0 .long 0xff0ffff0 __v7_proc __v7_ca17mp_proc_info, __v7_ca17mp_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS .size __v7_ca17mp_proc_info, . - __v7_ca17mp_proc_info /* ARM Ltd. Cortex A73 processor */ .type __v7_ca73_proc_info, #object __v7_ca73_proc_info: .long 0x410fd090 .long 0xff0ffff0 __v7_proc __v7_ca73_proc_info, __v7_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS .size __v7_ca73_proc_info, . - __v7_ca73_proc_info /* ARM Ltd. Cortex A75 processor */ .type __v7_ca75_proc_info, #object __v7_ca75_proc_info: .long 0x410fd0a0 .long 0xff0ffff0 __v7_proc __v7_ca75_proc_info, __v7_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS .size __v7_ca75_proc_info, . - __v7_ca75_proc_info /* * Qualcomm Inc. Krait processors. */ .type __krait_proc_info, #object __krait_proc_info: .long 0x510f0400 @ Required ID value .long 0xff0ffc00 @ Mask for ID /* * Some Krait processors don't indicate support for SDIV and UDIV * instructions in the ARM instruction set, even though they actually * do support them. They also don't indicate support for fused multiply * instructions even though they actually do support them. */ __v7_proc __krait_proc_info, __v7_setup, hwcaps = HWCAP_IDIV | HWCAP_VFPv4 .size __krait_proc_info, . - __krait_proc_info /* * Match any ARMv7 processor core. */ .type __v7_proc_info, #object __v7_proc_info: .long 0x000f0000 @ Required ID value .long 0x000f0000 @ Mask for ID __v7_proc __v7_proc_info, __v7_setup .size __v7_proc_info, . - __v7_proc_info
aixcc-public/challenge-001-exemplar-source
2,511
arch/arm/mm/tlb-v7.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/mm/tlb-v7.S * * Copyright (C) 1997-2002 Russell King * Modified for ARMv7 by Catalin Marinas * * ARM architecture version 6 TLB handling functions. * These assume a split I/D TLB. */ #include <linux/init.h> #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/asm-offsets.h> #include <asm/page.h> #include <asm/tlbflush.h> #include "proc-macros.S" /* * v7wbi_flush_user_tlb_range(start, end, vma) * * Invalidate a range of TLB entries in the specified address space. * * - start - start address (may not be aligned) * - end - end address (exclusive, may not be aligned) * - vma - vm_area_struct describing address range * * It is assumed that: * - the "Invalidate single entry" instruction will invalidate * both the I and the D TLBs on Harvard-style TLBs */ ENTRY(v7wbi_flush_user_tlb_range) vma_vm_mm r3, r2 @ get vma->vm_mm mmid r3, r3 @ get vm_mm->context.id dsb ish mov r0, r0, lsr #PAGE_SHIFT @ align address mov r1, r1, lsr #PAGE_SHIFT asid r3, r3 @ mask ASID #ifdef CONFIG_ARM_ERRATA_720789 ALT_SMP(W(mov) r3, #0 ) ALT_UP(W(nop) ) #endif orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA mov r1, r1, lsl #PAGE_SHIFT 1: #ifdef CONFIG_ARM_ERRATA_720789 ALT_SMP(mcr p15, 0, r0, c8, c3, 3) @ TLB invalidate U MVA all ASID (shareable) #else ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable) #endif ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA add r0, r0, #PAGE_SZ cmp r0, r1 blo 1b dsb ish ret lr ENDPROC(v7wbi_flush_user_tlb_range) /* * v7wbi_flush_kern_tlb_range(start,end) * * Invalidate a range of kernel TLB entries * * - start - start address (may not be aligned) * - end - end address (exclusive, may not be aligned) */ ENTRY(v7wbi_flush_kern_tlb_range) dsb ish mov r0, r0, lsr #PAGE_SHIFT @ align address mov r1, r1, lsr #PAGE_SHIFT mov r0, r0, lsl #PAGE_SHIFT mov r1, r1, lsl #PAGE_SHIFT 1: #ifdef CONFIG_ARM_ERRATA_720789 ALT_SMP(mcr p15, 0, r0, c8, c3, 3) @ TLB invalidate U MVA all ASID (shareable) #else ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable) #endif ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA add r0, r0, #PAGE_SZ cmp r0, r1 blo 1b dsb ish isb ret lr ENDPROC(v7wbi_flush_kern_tlb_range) __INIT /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */ define_tlb_functions v7wbi, v7wbi_tlb_flags_up, flags_smp=v7wbi_tlb_flags_smp
aixcc-public/challenge-001-exemplar-source
3,825
arch/arm/mm/proc-arm740.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/mm/arm740.S: utility functions for ARM740 * * Copyright (C) 2004-2006 Hyok S. Choi (hyok.choi@samsung.com) */ #include <linux/linkage.h> #include <linux/init.h> #include <linux/pgtable.h> #include <asm/assembler.h> #include <asm/asm-offsets.h> #include <asm/hwcap.h> #include <asm/pgtable-hwdef.h> #include <asm/ptrace.h> #include "proc-macros.S" .text /* * cpu_arm740_proc_init() * cpu_arm740_do_idle() * cpu_arm740_dcache_clean_area() * cpu_arm740_switch_mm() * * These are not required. */ ENTRY(cpu_arm740_proc_init) ENTRY(cpu_arm740_do_idle) ENTRY(cpu_arm740_dcache_clean_area) ENTRY(cpu_arm740_switch_mm) ret lr /* * cpu_arm740_proc_fin() */ ENTRY(cpu_arm740_proc_fin) mrc p15, 0, r0, c1, c0, 0 bic r0, r0, #0x3f000000 @ bank/f/lock/s bic r0, r0, #0x0000000c @ w-buffer/cache mcr p15, 0, r0, c1, c0, 0 @ disable caches ret lr /* * cpu_arm740_reset(loc) * Params : r0 = address to jump to * Notes : This sets up everything for a reset */ .pushsection .idmap.text, "ax" ENTRY(cpu_arm740_reset) mov ip, #0 mcr p15, 0, ip, c7, c0, 0 @ invalidate cache mrc p15, 0, ip, c1, c0, 0 @ get ctrl register bic ip, ip, #0x0000000c @ ............wc.. mcr p15, 0, ip, c1, c0, 0 @ ctrl register ret r0 ENDPROC(cpu_arm740_reset) .popsection .type __arm740_setup, #function __arm740_setup: mov r0, #0 mcr p15, 0, r0, c7, c0, 0 @ invalidate caches mcr p15, 0, r0, c6, c3 @ disable area 3~7 mcr p15, 0, r0, c6, c4 mcr p15, 0, r0, c6, c5 mcr p15, 0, r0, c6, c6 mcr p15, 0, r0, c6, c7 mov r0, #0x0000003F @ base = 0, size = 4GB mcr p15, 0, r0, c6, c0 @ set area 0, default ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM ldr r3, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB) mov r4, #10 @ 11 is the minimum (4KB) 1: add r4, r4, #1 @ area size *= 2 movs r3, r3, lsr #1 bne 1b @ count not zero r-shift orr r0, r0, r4, lsl #1 @ the area register value orr r0, r0, #1 @ set enable bit mcr p15, 0, r0, c6, c1 @ set area 1, RAM ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH ldr r3, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB) cmp r3, #0 moveq r0, #0 beq 2f mov r4, #10 @ 11 is the minimum (4KB) 1: add r4, r4, #1 @ area size *= 2 movs r3, r3, lsr #1 bne 1b @ count not zero r-shift orr r0, r0, r4, lsl #1 @ the area register value orr r0, r0, #1 @ set enable bit 2: mcr p15, 0, r0, c6, c2 @ set area 2, ROM/FLASH mov r0, #0x06 mcr p15, 0, r0, c2, c0 @ Region 1&2 cacheable #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH mov r0, #0x00 @ disable whole write buffer #else mov r0, #0x02 @ Region 1 write bufferred #endif mcr p15, 0, r0, c3, c0 mov r0, #0x10000 sub r0, r0, #1 @ r0 = 0xffff mcr p15, 0, r0, c5, c0 @ all read/write access mrc p15, 0, r0, c1, c0 @ get control register bic r0, r0, #0x3F000000 @ set to standard caching mode @ need some benchmark orr r0, r0, #0x0000000d @ MPU/Cache/WB ret lr .size __arm740_setup, . - __arm740_setup __INITDATA @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) define_processor_functions arm740, dabort=v4t_late_abort, pabort=legacy_pabort, nommu=1 .section ".rodata" string cpu_arch_name, "armv4" string cpu_elf_name, "v4" string cpu_arm740_name, "ARM740T" .align .section ".proc.info.init", "a" .type __arm740_proc_info,#object __arm740_proc_info: .long 0x41807400 .long 0xfffffff0 .long 0 .long 0 initfn __arm740_setup, __arm740_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_26BIT .long cpu_arm740_name .long arm740_processor_functions .long 0 .long 0 .long v4_cache_fns @ cache model .size __arm740_proc_info, . - __arm740_proc_info
aixcc-public/challenge-001-exemplar-source
5,010
arch/arm/mm/proc-fa526.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * linux/arch/arm/mm/proc-fa526.S: MMU functions for FA526 * * Written by : Luke Lee * Copyright (C) 2005 Faraday Corp. * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt> * * These are the low level assembler for performing cache and TLB * functions on the fa526. */ #include <linux/linkage.h> #include <linux/init.h> #include <linux/pgtable.h> #include <asm/assembler.h> #include <asm/hwcap.h> #include <asm/pgtable-hwdef.h> #include <asm/page.h> #include <asm/ptrace.h> #include "proc-macros.S" #define CACHE_DLINESIZE 16 .text /* * cpu_fa526_proc_init() */ ENTRY(cpu_fa526_proc_init) ret lr /* * cpu_fa526_proc_fin() */ ENTRY(cpu_fa526_proc_fin) mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches nop nop ret lr /* * cpu_fa526_reset(loc) * * Perform a soft reset of the system. Put the CPU into the * same state as it would be if it had been reset, and branch * to what would be the reset vector. * * loc: location to jump to for soft reset */ .align 4 .pushsection .idmap.text, "ax" ENTRY(cpu_fa526_reset) /* TODO: Use CP8 if possible... */ mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c10, 4 @ drain WB #ifdef CONFIG_MMU mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs #endif mrc p15, 0, ip, c1, c0, 0 @ ctrl register bic ip, ip, #0x000f @ ............wcam bic ip, ip, #0x1100 @ ...i...s........ bic ip, ip, #0x0800 @ BTB off mcr p15, 0, ip, c1, c0, 0 @ ctrl register nop nop ret r0 ENDPROC(cpu_fa526_reset) .popsection /* * cpu_fa526_do_idle() */ .align 4 ENTRY(cpu_fa526_do_idle) ret lr ENTRY(cpu_fa526_dcache_clean_area) 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE subs r1, r1, #CACHE_DLINESIZE bhi 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* =============================== PageTable ============================== */ /* * cpu_fa526_switch_mm(pgd) * * Set the translation base pointer to be as described by pgd. * * pgd: new page tables */ .align 4 ENTRY(cpu_fa526_switch_mm) #ifdef CONFIG_MMU mov ip, #0 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache #else mcr p15, 0, ip, c7, c14, 0 @ clean and invalidate whole D cache #endif mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache mcr p15, 0, ip, c7, c5, 6 @ invalidate BTB since mm changed mcr p15, 0, ip, c7, c10, 4 @ data write barrier mcr p15, 0, ip, c7, c5, 4 @ prefetch flush mcr p15, 0, r0, c2, c0, 0 @ load page table pointer mcr p15, 0, ip, c8, c7, 0 @ invalidate UTLB #endif ret lr /* * cpu_fa526_set_pte_ext(ptep, pte, ext) * * Set a PTE and flush it out */ .align 4 ENTRY(cpu_fa526_set_pte_ext) #ifdef CONFIG_MMU armv3_set_pte_ext mov r0, r0 mcr p15, 0, r0, c7, c10, 1 @ clean D entry mov r0, #0 mcr p15, 0, r0, c7, c10, 4 @ drain WB #endif ret lr .type __fa526_setup, #function __fa526_setup: /* On return of this routine, r0 must carry correct flags for CFG register */ mov r0, #0 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 #ifdef CONFIG_MMU mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 #endif mcr p15, 0, r0, c7, c5, 5 @ invalidate IScratchpad RAM mov r0, #1 mcr p15, 0, r0, c1, c1, 0 @ turn-on ECR mov r0, #0 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB All mcr p15, 0, r0, c7, c10, 4 @ data write barrier mcr p15, 0, r0, c7, c5, 4 @ prefetch flush mov r0, #0x1f @ Domains 0, 1 = manager, 2 = client mcr p15, 0, r0, c3, c0 @ load domain access register mrc p15, 0, r0, c1, c0 @ get control register v4 ldr r5, fa526_cr1_clear bic r0, r0, r5 ldr r5, fa526_cr1_set orr r0, r0, r5 ret lr .size __fa526_setup, . - __fa526_setup /* * .RVI ZFRS BLDP WCAM * ..11 1001 .111 1101 * */ .type fa526_cr1_clear, #object .type fa526_cr1_set, #object fa526_cr1_clear: .word 0x3f3f fa526_cr1_set: .word 0x397D __INITDATA @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) define_processor_functions fa526, dabort=v4_early_abort, pabort=legacy_pabort .section ".rodata" string cpu_arch_name, "armv4" string cpu_elf_name, "v4" string cpu_fa526_name, "FA526" .align .section ".proc.info.init", "a" .type __fa526_proc_info,#object __fa526_proc_info: .long 0x66015261 .long 0xff01fff1 .long PMD_TYPE_SECT | \ PMD_SECT_BUFFERABLE | \ PMD_SECT_CACHEABLE | \ PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ .long PMD_TYPE_SECT | \ PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ initfn __fa526_setup, __fa526_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP | HWCAP_HALF .long cpu_fa526_name .long fa526_processor_functions .long fa_tlb_fns .long fa_user_fns .long fa_cache_fns .size __fa526_proc_info, . - __fa526_proc_info
aixcc-public/challenge-001-exemplar-source
11,291
arch/arm/mm/proc-arm1020e.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * linux/arch/arm/mm/proc-arm1020e.S: MMU functions for ARM1020 * * Copyright (C) 2000 ARM Limited * Copyright (C) 2000 Deep Blue Solutions Ltd. * hacked for non-paged-MM by Hyok S. Choi, 2003. * * These are the low level assembler for performing cache and TLB * functions on the arm1020e. */ #include <linux/linkage.h> #include <linux/init.h> #include <linux/pgtable.h> #include <asm/assembler.h> #include <asm/asm-offsets.h> #include <asm/hwcap.h> #include <asm/pgtable-hwdef.h> #include <asm/ptrace.h> #include "proc-macros.S" /* * This is the maximum size of an area which will be invalidated * using the single invalidate entry instructions. Anything larger * than this, and we go for the whole cache. * * This value should be chosen such that we choose the cheapest * alternative. */ #define MAX_AREA_SIZE 32768 /* * The size of one data cache line. */ #define CACHE_DLINESIZE 32 /* * The number of data cache segments. */ #define CACHE_DSEGMENTS 16 /* * The number of lines in a cache segment. */ #define CACHE_DENTRIES 64 /* * This is the size at which it becomes more efficient to * clean the whole cache, rather than using the individual * cache line maintenance instructions. */ #define CACHE_DLIMIT 32768 .text /* * cpu_arm1020e_proc_init() */ ENTRY(cpu_arm1020e_proc_init) ret lr /* * cpu_arm1020e_proc_fin() */ ENTRY(cpu_arm1020e_proc_fin) mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches ret lr /* * cpu_arm1020e_reset(loc) * * Perform a soft reset of the system. Put the CPU into the * same state as it would be if it had been reset, and branch * to what would be the reset vector. * * loc: location to jump to for soft reset */ .align 5 .pushsection .idmap.text, "ax" ENTRY(cpu_arm1020e_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c10, 4 @ drain WB #ifdef CONFIG_MMU mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs #endif mrc p15, 0, ip, c1, c0, 0 @ ctrl register bic ip, ip, #0x000f @ ............wcam bic ip, ip, #0x1100 @ ...i...s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register ret r0 ENDPROC(cpu_arm1020e_reset) .popsection /* * cpu_arm1020e_do_idle() */ .align 5 ENTRY(cpu_arm1020e_do_idle) mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt ret lr /* ================================= CACHE ================================ */ .align 5 /* * flush_icache_all() * * Unconditionally clean and invalidate the entire icache. */ ENTRY(arm1020e_flush_icache_all) #ifndef CONFIG_CPU_ICACHE_DISABLE mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache #endif ret lr ENDPROC(arm1020e_flush_icache_all) /* * flush_user_cache_all() * * Invalidate all cache entries in a particular address * space. */ ENTRY(arm1020e_flush_user_cache_all) /* FALLTHROUGH */ /* * flush_kern_cache_all() * * Clean and invalidate the entire cache. */ ENTRY(arm1020e_flush_kern_cache_all) mov r2, #VM_EXEC mov ip, #0 __flush_whole_cache: #ifndef CONFIG_CPU_DCACHE_DISABLE mcr p15, 0, ip, c7, c10, 4 @ drain WB mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 2: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index subs r3, r3, #1 << 26 bcs 2b @ entries 63 to 0 subs r1, r1, #1 << 5 bcs 1b @ segments 15 to 0 #endif tst r2, #VM_EXEC #ifndef CONFIG_CPU_ICACHE_DISABLE mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache #endif mcrne p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * flush_user_cache_range(start, end, flags) * * Invalidate a range of cache entries in the specified * address space. * * - start - start address (inclusive) * - end - end address (exclusive) * - flags - vm_flags for this space */ ENTRY(arm1020e_flush_user_cache_range) mov ip, #0 sub r3, r1, r0 @ calculate total size cmp r3, #CACHE_DLIMIT bhs __flush_whole_cache #ifndef CONFIG_CPU_DCACHE_DISABLE 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b #endif tst r2, #VM_EXEC #ifndef CONFIG_CPU_ICACHE_DISABLE mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache #endif mcrne p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * coherent_kern_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm1020e_coherent_kern_range) /* FALLTHROUGH */ /* * coherent_user_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm1020e_coherent_user_range) mov ip, #0 bic r0, r0, #CACHE_DLINESIZE - 1 1: #ifndef CONFIG_CPU_DCACHE_DISABLE mcr p15, 0, r0, c7, c10, 1 @ clean D entry #endif #ifndef CONFIG_CPU_ICACHE_DISABLE mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry #endif add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, ip, c7, c10, 4 @ drain WB mov r0, #0 ret lr /* * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * * - addr - kernel address * - size - region size */ ENTRY(arm1020e_flush_kern_dcache_area) mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE add r1, r0, r1 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b #endif mcr p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * dma_inv_range(start, end) * * Invalidate (discard) the specified virtual address range. * May not write back any entries. If 'start' or 'end' * are not cache line aligned, those lines must be written * back. * * - start - virtual start address * - end - virtual end address * * (same as v4wb) */ arm1020e_dma_inv_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE tst r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry tst r1, #CACHE_DLINESIZE - 1 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b #endif mcr p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * dma_clean_range(start, end) * * Clean the specified virtual address range. * * - start - virtual start address * - end - virtual end address * * (same as v4wb) */ arm1020e_dma_clean_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b #endif mcr p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * dma_flush_range(start, end) * * Clean and invalidate the specified virtual address range. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm1020e_dma_flush_range) mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b #endif mcr p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * dma_map_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(arm1020e_dma_map_area) add r1, r1, r0 cmp r2, #DMA_TO_DEVICE beq arm1020e_dma_clean_range bcs arm1020e_dma_inv_range b arm1020e_dma_flush_range ENDPROC(arm1020e_dma_map_area) /* * dma_unmap_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(arm1020e_dma_unmap_area) ret lr ENDPROC(arm1020e_dma_unmap_area) .globl arm1020e_flush_kern_cache_louis .equ arm1020e_flush_kern_cache_louis, arm1020e_flush_kern_cache_all @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) define_cache_functions arm1020e .align 5 ENTRY(cpu_arm1020e_dcache_clean_area) #ifndef CONFIG_CPU_DCACHE_DISABLE mov ip, #0 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE subs r1, r1, #CACHE_DLINESIZE bhi 1b #endif ret lr /* =============================== PageTable ============================== */ /* * cpu_arm1020e_switch_mm(pgd) * * Set the translation base pointer to be as described by pgd. * * pgd: new page tables */ .align 5 ENTRY(cpu_arm1020e_switch_mm) #ifdef CONFIG_MMU #ifndef CONFIG_CPU_DCACHE_DISABLE mcr p15, 0, r3, c7, c10, 4 mov r1, #0xF @ 16 segments 1: mov r3, #0x3F @ 64 entries 2: mov ip, r3, LSL #26 @ shift up entry orr ip, ip, r1, LSL #5 @ shift in/up index mcr p15, 0, ip, c7, c14, 2 @ Clean & Inval DCache entry mov ip, #0 subs r3, r3, #1 cmp r3, #0 bge 2b @ entries 3F to 0 subs r1, r1, #1 cmp r1, #0 bge 1b @ segments 15 to 0 #endif mov r1, #0 #ifndef CONFIG_CPU_ICACHE_DISABLE mcr p15, 0, r1, c7, c5, 0 @ invalidate I cache #endif mcr p15, 0, r1, c7, c10, 4 @ drain WB mcr p15, 0, r0, c2, c0, 0 @ load page table pointer mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs #endif ret lr /* * cpu_arm1020e_set_pte(ptep, pte) * * Set a PTE and flush it out */ .align 5 ENTRY(cpu_arm1020e_set_pte_ext) #ifdef CONFIG_MMU armv3_set_pte_ext mov r0, r0 #ifndef CONFIG_CPU_DCACHE_DISABLE mcr p15, 0, r0, c7, c10, 1 @ clean D entry #endif #endif /* CONFIG_MMU */ ret lr .type __arm1020e_setup, #function __arm1020e_setup: mov r0, #0 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 #ifdef CONFIG_MMU mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 #endif adr r5, arm1020e_crval ldmia r5, {r5, r6} mrc p15, 0, r0, c1, c0 @ get control register v4 bic r0, r0, r5 orr r0, r0, r6 #ifdef CONFIG_CPU_CACHE_ROUND_ROBIN orr r0, r0, #0x4000 @ .R.. .... .... .... #endif ret lr .size __arm1020e_setup, . - __arm1020e_setup /* * R * .RVI ZFRS BLDP WCAM * .011 1001 ..11 0101 */ .type arm1020e_crval, #object arm1020e_crval: crval clear=0x00007f3f, mmuset=0x00003935, ucset=0x00001930 __INITDATA @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) define_processor_functions arm1020e, dabort=v4t_early_abort, pabort=legacy_pabort .section ".rodata" string cpu_arch_name, "armv5te" string cpu_elf_name, "v5" string cpu_arm1020e_name, "ARM1020E" .align .section ".proc.info.init", "a" .type __arm1020e_proc_info,#object __arm1020e_proc_info: .long 0x4105a200 @ ARM 1020TE (Architecture v5TE) .long 0xff0ffff0 .long PMD_TYPE_SECT | \ PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ .long PMD_TYPE_SECT | \ PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ initfn __arm1020e_setup, __arm1020e_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_EDSP .long cpu_arm1020e_name .long arm1020e_processor_functions .long v4wbi_tlb_fns .long v4wb_user_fns .long arm1020e_cache_fns .size __arm1020e_proc_info, . - __arm1020e_proc_info
aixcc-public/challenge-001-exemplar-source
3,842
arch/arm/mm/proc-v7-3level.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * arch/arm/mm/proc-v7-3level.S * * Copyright (C) 2001 Deep Blue Solutions Ltd. * Copyright (C) 2011 ARM Ltd. * Author: Catalin Marinas <catalin.marinas@arm.com> * based on arch/arm/mm/proc-v7-2level.S */ #include <asm/assembler.h> #define TTB_IRGN_NC (0 << 8) #define TTB_IRGN_WBWA (1 << 8) #define TTB_IRGN_WT (2 << 8) #define TTB_IRGN_WB (3 << 8) #define TTB_RGN_NC (0 << 10) #define TTB_RGN_OC_WBWA (1 << 10) #define TTB_RGN_OC_WT (2 << 10) #define TTB_RGN_OC_WB (3 << 10) #define TTB_S (3 << 12) #define TTB_EAE (1 << 31) /* PTWs cacheable, inner WB not shareable, outer WB not shareable */ #define TTB_FLAGS_UP (TTB_IRGN_WB|TTB_RGN_OC_WB) #define PMD_FLAGS_UP (PMD_SECT_WB) /* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */ #define TTB_FLAGS_SMP (TTB_IRGN_WBWA|TTB_S|TTB_RGN_OC_WBWA) #define PMD_FLAGS_SMP (PMD_SECT_WBWA|PMD_SECT_S) #ifndef __ARMEB__ # define rpgdl r0 # define rpgdh r1 #else # define rpgdl r1 # define rpgdh r0 #endif /* * cpu_v7_switch_mm(pgd_phys, tsk) * * Set the translation table base pointer to be pgd_phys (physical address of * the new TTB). */ ENTRY(cpu_v7_switch_mm) #ifdef CONFIG_MMU mmid r2, r2 asid r2, r2 orr rpgdh, rpgdh, r2, lsl #(48 - 32) @ upper 32-bits of pgd mcrr p15, 0, rpgdl, rpgdh, c2 @ set TTB 0 isb #endif ret lr ENDPROC(cpu_v7_switch_mm) #ifdef __ARMEB__ #define rl r3 #define rh r2 #else #define rl r2 #define rh r3 #endif /* * cpu_v7_set_pte_ext(ptep, pte) * * Set a level 2 translation table entry. * - ptep - pointer to level 3 translation table entry * - pte - PTE value to store (64-bit in r2 and r3) */ ENTRY(cpu_v7_set_pte_ext) #ifdef CONFIG_MMU tst rl, #L_PTE_VALID beq 1f tst rh, #1 << (57 - 32) @ L_PTE_NONE bicne rl, #L_PTE_VALID bne 1f eor ip, rh, #1 << (55 - 32) @ toggle L_PTE_DIRTY in temp reg to @ test for !L_PTE_DIRTY || L_PTE_RDONLY tst ip, #1 << (55 - 32) | 1 << (58 - 32) orrne rl, #PTE_AP2 biceq rl, #PTE_AP2 1: strd r2, r3, [r0] ALT_SMP(W(nop)) ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte #endif ret lr ENDPROC(cpu_v7_set_pte_ext) /* * Memory region attributes for LPAE (defined in pgtable-3level.h): * * n = AttrIndx[2:0] * * n MAIR * UNCACHED 000 00000000 * BUFFERABLE 001 01000100 * DEV_WC 001 01000100 * WRITETHROUGH 010 10101010 * WRITEBACK 011 11101110 * DEV_CACHED 011 11101110 * DEV_SHARED 100 00000100 * DEV_NONSHARED 100 00000100 * unused 101 * unused 110 * WRITEALLOC 111 11111111 */ .equ PRRR, 0xeeaa4400 @ MAIR0 .equ NMRR, 0xff000004 @ MAIR1 /* * Macro for setting up the TTBRx and TTBCR registers. * - \ttbr1 updated. */ .macro v7_ttb_setup, zero, ttbr0l, ttbr0h, ttbr1, tmp ldr \tmp, =swapper_pg_dir @ swapper_pg_dir virtual address cmp \ttbr1, \tmp, lsr #12 @ PHYS_OFFSET > PAGE_OFFSET? mov \tmp, #TTB_EAE @ for TTB control egister ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP) ALT_UP(orr \tmp, \tmp, #TTB_FLAGS_UP) ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP << 16) ALT_UP(orr \tmp, \tmp, #TTB_FLAGS_UP << 16) /* * Only use split TTBRs if PHYS_OFFSET <= PAGE_OFFSET (cmp above), * otherwise booting secondary CPUs would end up using TTBR1 for the * identity mapping set up in TTBR0. */ orrls \tmp, \tmp, #TTBR1_SIZE @ TTBCR.T1SZ mcr p15, 0, \tmp, c2, c0, 2 @ TTBCR mov \tmp, \ttbr1, lsr #20 mov \ttbr1, \ttbr1, lsl #12 addls \ttbr1, \ttbr1, #TTBR1_OFFSET mcrr p15, 1, \ttbr1, \tmp, c2 @ load TTBR1 .endm /* * AT * TFR EV X F IHD LR S * .EEE ..EE PUI. .TAT 4RVI ZWRS BLDP WCAM * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced * 11 0 110 0 0011 1100 .111 1101 < we want */ .align 2 .type v7_crval, #object v7_crval: crval clear=0x0122c302, mmuset=0x30c03c7d, ucset=0x00c01c7c
aixcc-public/challenge-001-exemplar-source
14,217
arch/arm/mm/proc-xsc3.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/mm/proc-xsc3.S * * Original Author: Matthew Gilbert * Current Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> * * Copyright 2004 (C) Intel Corp. * Copyright 2005 (C) MontaVista Software, Inc. * * MMU functions for the Intel XScale3 Core (XSC3). The XSC3 core is * an extension to Intel's original XScale core that adds the following * features: * * - ARMv6 Supersections * - Low Locality Reference pages (replaces mini-cache) * - 36-bit addressing * - L2 cache * - Cache coherency if chipset supports it * * Based on original XScale code by Nicolas Pitre. */ #include <linux/linkage.h> #include <linux/init.h> #include <linux/pgtable.h> #include <asm/assembler.h> #include <asm/hwcap.h> #include <asm/pgtable-hwdef.h> #include <asm/page.h> #include <asm/ptrace.h> #include "proc-macros.S" /* * This is the maximum size of an area which will be flushed. If the * area is larger than this, then we flush the whole cache. */ #define MAX_AREA_SIZE 32768 /* * The cache line size of the L1 I, L1 D and unified L2 cache. */ #define CACHELINESIZE 32 /* * The size of the L1 D cache. */ #define CACHESIZE 32768 /* * This macro is used to wait for a CP15 write and is needed when we * have to ensure that the last operation to the coprocessor was * completed before continuing with operation. */ .macro cpwait_ret, lr, rd mrc p15, 0, \rd, c2, c0, 0 @ arbitrary read of cp15 sub pc, \lr, \rd, LSR #32 @ wait for completion and @ flush instruction pipeline .endm /* * This macro cleans and invalidates the entire L1 D cache. */ .macro clean_d_cache rd, rs mov \rd, #0x1f00 orr \rd, \rd, #0x00e0 1: mcr p15, 0, \rd, c7, c14, 2 @ clean/invalidate L1 D line adds \rd, \rd, #0x40000000 bcc 1b subs \rd, \rd, #0x20 bpl 1b .endm .text /* * cpu_xsc3_proc_init() * * Nothing too exciting at the moment */ ENTRY(cpu_xsc3_proc_init) ret lr /* * cpu_xsc3_proc_fin() */ ENTRY(cpu_xsc3_proc_fin) mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1800 @ ...IZ........... bic r0, r0, #0x0006 @ .............CA. mcr p15, 0, r0, c1, c0, 0 @ disable caches ret lr /* * cpu_xsc3_reset(loc) * * Perform a soft reset of the system. Put the CPU into the * same state as it would be if it had been reset, and branch * to what would be the reset vector. * * loc: location to jump to for soft reset */ .align 5 .pushsection .idmap.text, "ax" ENTRY(cpu_xsc3_reset) mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE msr cpsr_c, r1 @ reset CPSR mrc p15, 0, r1, c1, c0, 0 @ ctrl register bic r1, r1, #0x3900 @ ..VIZ..S........ bic r1, r1, #0x0086 @ ........B....CA. mcr p15, 0, r1, c1, c0, 0 @ ctrl register mcr p15, 0, ip, c7, c7, 0 @ invalidate L1 caches and BTB bic r1, r1, #0x0001 @ ...............M mcr p15, 0, r1, c1, c0, 0 @ ctrl register @ CAUTION: MMU turned off from this point. We count on the pipeline @ already containing those two last instructions to survive. mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs ret r0 ENDPROC(cpu_xsc3_reset) .popsection /* * cpu_xsc3_do_idle() * * Cause the processor to idle * * For now we do nothing but go to idle mode for every case * * XScale supports clock switching, but using idle mode support * allows external hardware to react to system state changes. */ .align 5 ENTRY(cpu_xsc3_do_idle) mov r0, #1 mcr p14, 0, r0, c7, c0, 0 @ go to idle ret lr /* ================================= CACHE ================================ */ /* * flush_icache_all() * * Unconditionally clean and invalidate the entire icache. */ ENTRY(xsc3_flush_icache_all) mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache ret lr ENDPROC(xsc3_flush_icache_all) /* * flush_user_cache_all() * * Invalidate all cache entries in a particular address * space. */ ENTRY(xsc3_flush_user_cache_all) /* FALLTHROUGH */ /* * flush_kern_cache_all() * * Clean and invalidate the entire cache. */ ENTRY(xsc3_flush_kern_cache_all) mov r2, #VM_EXEC mov ip, #0 __flush_whole_cache: clean_d_cache r0, r1 tst r2, #VM_EXEC mcrne p15, 0, ip, c7, c5, 0 @ invalidate L1 I cache and BTB mcrne p15, 0, ip, c7, c10, 4 @ data write barrier mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush ret lr /* * flush_user_cache_range(start, end, vm_flags) * * Invalidate a range of cache entries in the specified * address space. * * - start - start address (may not be aligned) * - end - end address (exclusive, may not be aligned) * - vma - vma_area_struct describing address space */ .align 5 ENTRY(xsc3_flush_user_cache_range) mov ip, #0 sub r3, r1, r0 @ calculate total size cmp r3, #MAX_AREA_SIZE bhs __flush_whole_cache 1: tst r2, #VM_EXEC mcrne p15, 0, r0, c7, c5, 1 @ invalidate L1 I line mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line add r0, r0, #CACHELINESIZE cmp r0, r1 blo 1b tst r2, #VM_EXEC mcrne p15, 0, ip, c7, c5, 6 @ invalidate BTB mcrne p15, 0, ip, c7, c10, 4 @ data write barrier mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush ret lr /* * coherent_kern_range(start, end) * * Ensure coherency between the I cache and the D cache in the * region described by start. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address * * Note: single I-cache line invalidation isn't used here since * it also trashes the mini I-cache used by JTAG debuggers. */ ENTRY(xsc3_coherent_kern_range) /* FALLTHROUGH */ ENTRY(xsc3_coherent_user_range) bic r0, r0, #CACHELINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line add r0, r0, #CACHELINESIZE cmp r0, r1 blo 1b mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate L1 I cache and BTB mcr p15, 0, r0, c7, c10, 4 @ data write barrier mcr p15, 0, r0, c7, c5, 4 @ prefetch flush ret lr /* * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache. * * - addr - kernel address * - size - region size */ ENTRY(xsc3_flush_kern_dcache_area) add r1, r0, r1 1: mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line add r0, r0, #CACHELINESIZE cmp r0, r1 blo 1b mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate L1 I cache and BTB mcr p15, 0, r0, c7, c10, 4 @ data write barrier mcr p15, 0, r0, c7, c5, 4 @ prefetch flush ret lr /* * dma_inv_range(start, end) * * Invalidate (discard) the specified virtual address range. * May not write back any entries. If 'start' or 'end' * are not cache line aligned, those lines must be written * back. * * - start - virtual start address * - end - virtual end address */ xsc3_dma_inv_range: tst r0, #CACHELINESIZE - 1 bic r0, r0, #CACHELINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean L1 D line tst r1, #CACHELINESIZE - 1 mcrne p15, 0, r1, c7, c10, 1 @ clean L1 D line 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate L1 D line add r0, r0, #CACHELINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ data write barrier ret lr /* * dma_clean_range(start, end) * * Clean the specified virtual address range. * * - start - virtual start address * - end - virtual end address */ xsc3_dma_clean_range: bic r0, r0, #CACHELINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line add r0, r0, #CACHELINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ data write barrier ret lr /* * dma_flush_range(start, end) * * Clean and invalidate the specified virtual address range. * * - start - virtual start address * - end - virtual end address */ ENTRY(xsc3_dma_flush_range) bic r0, r0, #CACHELINESIZE - 1 1: mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line add r0, r0, #CACHELINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ data write barrier ret lr /* * dma_map_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(xsc3_dma_map_area) add r1, r1, r0 cmp r2, #DMA_TO_DEVICE beq xsc3_dma_clean_range bcs xsc3_dma_inv_range b xsc3_dma_flush_range ENDPROC(xsc3_dma_map_area) /* * dma_unmap_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(xsc3_dma_unmap_area) ret lr ENDPROC(xsc3_dma_unmap_area) .globl xsc3_flush_kern_cache_louis .equ xsc3_flush_kern_cache_louis, xsc3_flush_kern_cache_all @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) define_cache_functions xsc3 ENTRY(cpu_xsc3_dcache_clean_area) 1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line add r0, r0, #CACHELINESIZE subs r1, r1, #CACHELINESIZE bhi 1b ret lr /* =============================== PageTable ============================== */ /* * cpu_xsc3_switch_mm(pgd) * * Set the translation base pointer to be as described by pgd. * * pgd: new page tables */ .align 5 ENTRY(cpu_xsc3_switch_mm) clean_d_cache r1, r2 mcr p15, 0, ip, c7, c5, 0 @ invalidate L1 I cache and BTB mcr p15, 0, ip, c7, c10, 4 @ data write barrier mcr p15, 0, ip, c7, c5, 4 @ prefetch flush orr r0, r0, #0x18 @ cache the page table in L2 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs cpwait_ret lr, ip /* * cpu_xsc3_set_pte_ext(ptep, pte, ext) * * Set a PTE and flush it out */ cpu_xsc3_mt_table: .long 0x00 @ L_PTE_MT_UNCACHED .long PTE_EXT_TEX(1) @ L_PTE_MT_BUFFERABLE .long PTE_EXT_TEX(5) | PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK .long PTE_EXT_TEX(1) | PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED .long 0x00 @ unused .long 0x00 @ L_PTE_MT_MINICACHE (not present) .long PTE_EXT_TEX(5) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC (not present?) .long 0x00 @ unused .long PTE_EXT_TEX(1) @ L_PTE_MT_DEV_WC .long 0x00 @ unused .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_DEV_CACHED .long PTE_EXT_TEX(2) @ L_PTE_MT_DEV_NONSHARED .long 0x00 @ unused .long 0x00 @ unused .long 0x00 @ unused .align 5 ENTRY(cpu_xsc3_set_pte_ext) xscale_set_pte_ext_prologue tst r1, #L_PTE_SHARED @ shared? and r1, r1, #L_PTE_MT_MASK adr ip, cpu_xsc3_mt_table ldr ip, [ip, r1] orrne r2, r2, #PTE_EXT_COHERENT @ interlock: mask in coherent bit bic r2, r2, #0x0c @ clear old C,B bits orr r2, r2, ip xscale_set_pte_ext_epilogue ret lr .ltorg .align .globl cpu_xsc3_suspend_size .equ cpu_xsc3_suspend_size, 4 * 6 #ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_xsc3_do_suspend) stmfd sp!, {r4 - r9, lr} mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode mrc p15, 0, r5, c15, c1, 0 @ CP access reg mrc p15, 0, r6, c13, c0, 0 @ PID mrc p15, 0, r7, c3, c0, 0 @ domain ID mrc p15, 0, r8, c1, c0, 1 @ auxiliary control reg mrc p15, 0, r9, c1, c0, 0 @ control reg bic r4, r4, #2 @ clear frequency change bit stmia r0, {r4 - r9} @ store cp regs ldmia sp!, {r4 - r9, pc} ENDPROC(cpu_xsc3_do_suspend) ENTRY(cpu_xsc3_do_resume) ldmia r0, {r4 - r9} @ load cp regs mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB mcr p15, 0, ip, c7, c10, 4 @ drain write (&fill) buffer mcr p15, 0, ip, c7, c5, 4 @ flush prefetch buffer mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs mcr p14, 0, r4, c6, c0, 0 @ clock configuration, turbo mode. mcr p15, 0, r5, c15, c1, 0 @ CP access reg mcr p15, 0, r6, c13, c0, 0 @ PID mcr p15, 0, r7, c3, c0, 0 @ domain ID orr r1, r1, #0x18 @ cache the page table in L2 mcr p15, 0, r1, c2, c0, 0 @ translation table base addr mcr p15, 0, r8, c1, c0, 1 @ auxiliary control reg mov r0, r9 @ control register b cpu_resume_mmu ENDPROC(cpu_xsc3_do_resume) #endif .type __xsc3_setup, #function __xsc3_setup: mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE msr cpsr_c, r0 mcr p15, 0, ip, c7, c7, 0 @ invalidate L1 caches and BTB mcr p15, 0, ip, c7, c10, 4 @ data write barrier mcr p15, 0, ip, c7, c5, 4 @ prefetch flush mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs orr r4, r4, #0x18 @ cache the page table in L2 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer mov r0, #1 << 6 @ cp6 access for early sched_clock mcr p15, 0, r0, c15, c1, 0 @ write CP access register mrc p15, 0, r0, c1, c0, 1 @ get auxiliary control reg and r0, r0, #2 @ preserve bit P bit setting orr r0, r0, #(1 << 10) @ enable L2 for LLR cache mcr p15, 0, r0, c1, c0, 1 @ set auxiliary control reg adr r5, xsc3_crval ldmia r5, {r5, r6} #ifdef CONFIG_CACHE_XSC3L2 mrc p15, 1, r0, c0, c0, 1 @ get L2 present information ands r0, r0, #0xf8 orrne r6, r6, #(1 << 26) @ enable L2 if present #endif mrc p15, 0, r0, c1, c0, 0 @ get control register bic r0, r0, r5 @ ..V. ..R. .... ..A. orr r0, r0, r6 @ ..VI Z..S .... .C.M (mmu) @ ...I Z..S .... .... (uc) ret lr .size __xsc3_setup, . - __xsc3_setup .type xsc3_crval, #object xsc3_crval: crval clear=0x04002202, mmuset=0x00003905, ucset=0x00001900 __INITDATA @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) define_processor_functions xsc3, dabort=v5t_early_abort, pabort=legacy_pabort, suspend=1 .section ".rodata" string cpu_arch_name, "armv5te" string cpu_elf_name, "v5" string cpu_xsc3_name, "XScale-V3 based processor" .align .section ".proc.info.init", "a" .macro xsc3_proc_info name:req, cpu_val:req, cpu_mask:req .type __\name\()_proc_info,#object __\name\()_proc_info: .long \cpu_val .long \cpu_mask .long PMD_TYPE_SECT | \ PMD_SECT_BUFFERABLE | \ PMD_SECT_CACHEABLE | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ .long PMD_TYPE_SECT | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ initfn __xsc3_setup, __\name\()_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP .long cpu_xsc3_name .long xsc3_processor_functions .long v4wbi_tlb_fns .long xsc3_mc_user_fns .long xsc3_cache_fns .size __\name\()_proc_info, . - __\name\()_proc_info .endm xsc3_proc_info xsc3, 0x69056000, 0xffffe000 /* Note: PXA935 changed its implementor ID from Intel to Marvell */ xsc3_proc_info xsc3_pxa935, 0x56056000, 0xffffe000
aixcc-public/challenge-001-exemplar-source
3,046
arch/arm/mm/proc-arm7tdmi.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/mm/proc-arm7tdmi.S: utility functions for ARM7TDMI * * Copyright (C) 2003-2006 Hyok S. Choi <hyok.choi@samsung.com> */ #include <linux/linkage.h> #include <linux/init.h> #include <linux/pgtable.h> #include <asm/assembler.h> #include <asm/asm-offsets.h> #include <asm/hwcap.h> #include <asm/pgtable-hwdef.h> #include <asm/ptrace.h> #include "proc-macros.S" .text /* * cpu_arm7tdmi_proc_init() * cpu_arm7tdmi_do_idle() * cpu_arm7tdmi_dcache_clean_area() * cpu_arm7tdmi_switch_mm() * * These are not required. */ ENTRY(cpu_arm7tdmi_proc_init) ENTRY(cpu_arm7tdmi_do_idle) ENTRY(cpu_arm7tdmi_dcache_clean_area) ENTRY(cpu_arm7tdmi_switch_mm) ret lr /* * cpu_arm7tdmi_proc_fin() */ ENTRY(cpu_arm7tdmi_proc_fin) ret lr /* * Function: cpu_arm7tdmi_reset(loc) * Params : loc(r0) address to jump to * Purpose : Sets up everything for a reset and jump to the location for soft reset. */ .pushsection .idmap.text, "ax" ENTRY(cpu_arm7tdmi_reset) ret r0 ENDPROC(cpu_arm7tdmi_reset) .popsection .type __arm7tdmi_setup, #function __arm7tdmi_setup: ret lr .size __arm7tdmi_setup, . - __arm7tdmi_setup __INITDATA @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) define_processor_functions arm7tdmi, dabort=v4t_late_abort, pabort=legacy_pabort, nommu=1 .section ".rodata" string cpu_arch_name, "armv4t" string cpu_elf_name, "v4" string cpu_arm7tdmi_name, "ARM7TDMI" string cpu_triscenda7_name, "Triscend-A7x" string cpu_at91_name, "Atmel-AT91M40xxx" string cpu_s3c3410_name, "Samsung-S3C3410" string cpu_s3c44b0x_name, "Samsung-S3C44B0x" string cpu_s3c4510b_name, "Samsung-S3C4510B" string cpu_s3c4530_name, "Samsung-S3C4530" string cpu_netarm_name, "NETARM" .align .section ".proc.info.init", "a" .macro arm7tdmi_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, \ extra_hwcaps=0 .type __\name\()_proc_info, #object __\name\()_proc_info: .long \cpu_val .long \cpu_mask .long 0 .long 0 initfn __arm7tdmi_setup, __\name\()_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP | HWCAP_26BIT | ( \extra_hwcaps ) .long \cpu_name .long arm7tdmi_processor_functions .long 0 .long 0 .long v4_cache_fns .size __\name\()_proc_info, . - __\name\()_proc_info .endm arm7tdmi_proc_info arm7tdmi, 0x41007700, 0xfff8ff00, \ cpu_arm7tdmi_name arm7tdmi_proc_info triscenda7, 0x0001d2ff, 0x0001ffff, \ cpu_triscenda7_name, extra_hwcaps=HWCAP_THUMB arm7tdmi_proc_info at91, 0x14000040, 0xfff000e0, \ cpu_at91_name, extra_hwcaps=HWCAP_THUMB arm7tdmi_proc_info s3c4510b, 0x36365000, 0xfffff000, \ cpu_s3c4510b_name, extra_hwcaps=HWCAP_THUMB arm7tdmi_proc_info s3c4530, 0x4c000000, 0xfff000e0, \ cpu_s3c4530_name, extra_hwcaps=HWCAP_THUMB arm7tdmi_proc_info s3c3410, 0x34100000, 0xffff0000, \ cpu_s3c3410_name, extra_hwcaps=HWCAP_THUMB arm7tdmi_proc_info s3c44b0x, 0x44b00000, 0xffff0000, \ cpu_s3c44b0x_name, extra_hwcaps=HWCAP_THUMB
aixcc-public/challenge-001-exemplar-source
12,134
arch/arm/mm/proc-arm926.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * linux/arch/arm/mm/proc-arm926.S: MMU functions for ARM926EJ-S * * Copyright (C) 1999-2001 ARM Limited * Copyright (C) 2000 Deep Blue Solutions Ltd. * hacked for non-paged-MM by Hyok S. Choi, 2003. * * These are the low level assembler for performing cache and TLB * functions on the arm926. * * CONFIG_CPU_ARM926_CPU_IDLE -> nohlt */ #include <linux/linkage.h> #include <linux/init.h> #include <linux/pgtable.h> #include <asm/assembler.h> #include <asm/hwcap.h> #include <asm/pgtable-hwdef.h> #include <asm/page.h> #include <asm/ptrace.h> #include "proc-macros.S" /* * This is the maximum size of an area which will be invalidated * using the single invalidate entry instructions. Anything larger * than this, and we go for the whole cache. * * This value should be chosen such that we choose the cheapest * alternative. */ #define CACHE_DLIMIT 16384 /* * the cache line size of the I and D cache */ #define CACHE_DLINESIZE 32 .text /* * cpu_arm926_proc_init() */ ENTRY(cpu_arm926_proc_init) ret lr /* * cpu_arm926_proc_fin() */ ENTRY(cpu_arm926_proc_fin) mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches ret lr /* * cpu_arm926_reset(loc) * * Perform a soft reset of the system. Put the CPU into the * same state as it would be if it had been reset, and branch * to what would be the reset vector. * * loc: location to jump to for soft reset */ .align 5 .pushsection .idmap.text, "ax" ENTRY(cpu_arm926_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c10, 4 @ drain WB #ifdef CONFIG_MMU mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs #endif mrc p15, 0, ip, c1, c0, 0 @ ctrl register bic ip, ip, #0x000f @ ............wcam bic ip, ip, #0x1100 @ ...i...s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register ret r0 ENDPROC(cpu_arm926_reset) .popsection /* * cpu_arm926_do_idle() * * Called with IRQs disabled */ .align 10 ENTRY(cpu_arm926_do_idle) mov r0, #0 mrc p15, 0, r1, c1, c0, 0 @ Read control register mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer bic r2, r1, #1 << 12 mrs r3, cpsr @ Disable FIQs while Icache orr ip, r3, #PSR_F_BIT @ is disabled msr cpsr_c, ip mcr p15, 0, r2, c1, c0, 0 @ Disable I cache mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt mcr p15, 0, r1, c1, c0, 0 @ Restore ICache enable msr cpsr_c, r3 @ Restore FIQ state ret lr /* * flush_icache_all() * * Unconditionally clean and invalidate the entire icache. */ ENTRY(arm926_flush_icache_all) mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache ret lr ENDPROC(arm926_flush_icache_all) /* * flush_user_cache_all() * * Clean and invalidate all cache entries in a particular * address space. */ ENTRY(arm926_flush_user_cache_all) /* FALLTHROUGH */ /* * flush_kern_cache_all() * * Clean and invalidate the entire cache. */ ENTRY(arm926_flush_kern_cache_all) mov r2, #VM_EXEC mov ip, #0 __flush_whole_cache: #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache #else 1: mrc p15, 0, APSR_nzcv, c7, c14, 3 @ test,clean,invalidate bne 1b #endif tst r2, #VM_EXEC mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache mcrne p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * flush_user_cache_range(start, end, flags) * * Clean and invalidate a range of cache entries in the * specified address range. * * - start - start address (inclusive) * - end - end address (exclusive) * - flags - vm_flags describing address space */ ENTRY(arm926_flush_user_cache_range) mov ip, #0 sub r3, r1, r0 @ calculate total size cmp r3, #CACHE_DLIMIT bgt __flush_whole_cache 1: tst r2, #VM_EXEC #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE #else mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE #endif cmp r0, r1 blo 1b tst r2, #VM_EXEC mcrne p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * coherent_kern_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start, end. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm926_coherent_kern_range) /* FALLTHROUGH */ /* * coherent_user_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start, end. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm926_coherent_user_range) bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB mov r0, #0 ret lr /* * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * * - addr - kernel address * - size - region size */ ENTRY(arm926_flush_kern_dcache_area) add r1, r0, r1 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_inv_range(start, end) * * Invalidate (discard) the specified virtual address range. * May not write back any entries. If 'start' or 'end' * are not cache line aligned, those lines must be written * back. * * - start - virtual start address * - end - virtual end address * * (same as v4wb) */ arm926_dma_inv_range: #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH tst r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry tst r1, #CACHE_DLINESIZE - 1 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry #endif bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_clean_range(start, end) * * Clean the specified virtual address range. * * - start - virtual start address * - end - virtual end address * * (same as v4wb) */ arm926_dma_clean_range: #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b #endif mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_flush_range(start, end) * * Clean and invalidate the specified virtual address range. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm926_dma_flush_range) bic r0, r0, #CACHE_DLINESIZE - 1 1: #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry #else mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry #endif add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_map_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(arm926_dma_map_area) add r1, r1, r0 cmp r2, #DMA_TO_DEVICE beq arm926_dma_clean_range bcs arm926_dma_inv_range b arm926_dma_flush_range ENDPROC(arm926_dma_map_area) /* * dma_unmap_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(arm926_dma_unmap_area) ret lr ENDPROC(arm926_dma_unmap_area) .globl arm926_flush_kern_cache_louis .equ arm926_flush_kern_cache_louis, arm926_flush_kern_cache_all @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) define_cache_functions arm926 ENTRY(cpu_arm926_dcache_clean_area) #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE subs r1, r1, #CACHE_DLINESIZE bhi 1b #endif mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* =============================== PageTable ============================== */ /* * cpu_arm926_switch_mm(pgd) * * Set the translation base pointer to be as described by pgd. * * pgd: new page tables */ .align 5 ENTRY(cpu_arm926_switch_mm) #ifdef CONFIG_MMU mov ip, #0 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache #else @ && 'Clean & Invalidate whole DCache' 1: mrc p15, 0, APSR_nzcv, c7, c14, 3 @ test,clean,invalidate bne 1b #endif mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache mcr p15, 0, ip, c7, c10, 4 @ drain WB mcr p15, 0, r0, c2, c0, 0 @ load page table pointer mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs #endif ret lr /* * cpu_arm926_set_pte_ext(ptep, pte, ext) * * Set a PTE and flush it out */ .align 5 ENTRY(cpu_arm926_set_pte_ext) #ifdef CONFIG_MMU armv3_set_pte_ext mov r0, r0 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH mcr p15, 0, r0, c7, c10, 1 @ clean D entry #endif mcr p15, 0, r0, c7, c10, 4 @ drain WB #endif ret lr /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ .globl cpu_arm926_suspend_size .equ cpu_arm926_suspend_size, 4 * 3 #ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_arm926_do_suspend) stmfd sp!, {r4 - r6, lr} mrc p15, 0, r4, c13, c0, 0 @ PID mrc p15, 0, r5, c3, c0, 0 @ Domain ID mrc p15, 0, r6, c1, c0, 0 @ Control register stmia r0, {r4 - r6} ldmfd sp!, {r4 - r6, pc} ENDPROC(cpu_arm926_do_suspend) ENTRY(cpu_arm926_do_resume) mov ip, #0 mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches ldmia r0, {r4 - r6} mcr p15, 0, r4, c13, c0, 0 @ PID mcr p15, 0, r5, c3, c0, 0 @ Domain ID mcr p15, 0, r1, c2, c0, 0 @ TTB address mov r0, r6 @ control register b cpu_resume_mmu ENDPROC(cpu_arm926_do_resume) #endif .type __arm926_setup, #function __arm926_setup: mov r0, #0 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 #ifdef CONFIG_MMU mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 #endif #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH mov r0, #4 @ disable write-back on caches explicitly mcr p15, 7, r0, c15, c0, 0 #endif adr r5, arm926_crval ldmia r5, {r5, r6} mrc p15, 0, r0, c1, c0 @ get control register v4 bic r0, r0, r5 orr r0, r0, r6 #ifdef CONFIG_CPU_CACHE_ROUND_ROBIN orr r0, r0, #0x4000 @ .1.. .... .... .... #endif ret lr .size __arm926_setup, . - __arm926_setup /* * R * .RVI ZFRS BLDP WCAM * .011 0001 ..11 0101 * */ .type arm926_crval, #object arm926_crval: crval clear=0x00007f3f, mmuset=0x00003135, ucset=0x00001134 __INITDATA @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) define_processor_functions arm926, dabort=v5tj_early_abort, pabort=legacy_pabort, suspend=1 .section ".rodata" string cpu_arch_name, "armv5tej" string cpu_elf_name, "v5" string cpu_arm926_name, "ARM926EJ-S" .align .section ".proc.info.init", "a" .type __arm926_proc_info,#object __arm926_proc_info: .long 0x41069260 @ ARM926EJ-S (v5TEJ) .long 0xff0ffff0 .long PMD_TYPE_SECT | \ PMD_SECT_BUFFERABLE | \ PMD_SECT_CACHEABLE | \ PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ .long PMD_TYPE_SECT | \ PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ initfn __arm926_setup, __arm926_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA .long cpu_arm926_name .long arm926_processor_functions .long v4wbi_tlb_fns .long v4wb_user_fns .long arm926_cache_fns .size __arm926_proc_info, . - __arm926_proc_info
aixcc-public/challenge-001-exemplar-source
9,328
arch/arm/mm/proc-macros.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * We need constants.h for: * VMA_VM_MM * VMA_VM_FLAGS * VM_EXEC */ #include <asm/asm-offsets.h> #include <asm/thread_info.h> #ifdef CONFIG_CPU_V7M #include <asm/v7m.h> #endif /* * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm) */ .macro vma_vm_mm, rd, rn ldr \rd, [\rn, #VMA_VM_MM] .endm /* * vma_vm_flags - get vma->vm_flags */ .macro vma_vm_flags, rd, rn ldr \rd, [\rn, #VMA_VM_FLAGS] .endm /* * act_mm - get current->active_mm */ .macro act_mm, rd get_current \rd .if (TSK_ACTIVE_MM > IMM12_MASK) add \rd, \rd, #TSK_ACTIVE_MM & ~IMM12_MASK .endif ldr \rd, [\rd, #TSK_ACTIVE_MM & IMM12_MASK] .endm /* * mmid - get context id from mm pointer (mm->context.id) * note, this field is 64bit, so in big-endian the two words are swapped too. */ .macro mmid, rd, rn #ifdef __ARMEB__ ldr \rd, [\rn, #MM_CONTEXT_ID + 4 ] #else ldr \rd, [\rn, #MM_CONTEXT_ID] #endif .endm /* * mask_asid - mask the ASID from the context ID */ .macro asid, rd, rn and \rd, \rn, #255 .endm .macro crval, clear, mmuset, ucset #ifdef CONFIG_MMU .word \clear .word \mmuset #else .word \clear .word \ucset #endif .endm /* * dcache_line_size - get the minimum D-cache line size from the CTR register * on ARMv7. */ .macro dcache_line_size, reg, tmp #ifdef CONFIG_CPU_V7M movw \tmp, #:lower16:BASEADDR_V7M_SCB + V7M_SCB_CTR movt \tmp, #:upper16:BASEADDR_V7M_SCB + V7M_SCB_CTR ldr \tmp, [\tmp] #else mrc p15, 0, \tmp, c0, c0, 1 @ read ctr #endif lsr \tmp, \tmp, #16 and \tmp, \tmp, #0xf @ cache line size encoding mov \reg, #4 @ bytes per word mov \reg, \reg, lsl \tmp @ actual cache line size .endm /* * icache_line_size - get the minimum I-cache line size from the CTR register * on ARMv7. */ .macro icache_line_size, reg, tmp #ifdef CONFIG_CPU_V7M movw \tmp, #:lower16:BASEADDR_V7M_SCB + V7M_SCB_CTR movt \tmp, #:upper16:BASEADDR_V7M_SCB + V7M_SCB_CTR ldr \tmp, [\tmp] #else mrc p15, 0, \tmp, c0, c0, 1 @ read ctr #endif and \tmp, \tmp, #0xf @ cache line size encoding mov \reg, #4 @ bytes per word mov \reg, \reg, lsl \tmp @ actual cache line size .endm /* * Sanity check the PTE configuration for the code below - which makes * certain assumptions about how these bits are laid out. */ #ifdef CONFIG_MMU #if L_PTE_SHARED != PTE_EXT_SHARED #error PTE shared bit mismatch #endif #if !defined (CONFIG_ARM_LPAE) && \ (L_PTE_XN+L_PTE_USER+L_PTE_RDONLY+L_PTE_DIRTY+L_PTE_YOUNG+\ L_PTE_PRESENT) > L_PTE_SHARED #error Invalid Linux PTE bit settings #endif #endif /* CONFIG_MMU */ /* * The ARMv6 and ARMv7 set_pte_ext translation function. * * Permission translation: * YUWD APX AP1 AP0 SVC User * 0xxx 0 0 0 no acc no acc * 100x 1 0 1 r/o no acc * 10x0 1 0 1 r/o no acc * 1011 0 0 1 r/w no acc * 110x 1 1 1 r/o r/o * 11x0 1 1 1 r/o r/o * 1111 0 1 1 r/w r/w */ .macro armv6_mt_table pfx \pfx\()_mt_table: .long 0x00 @ L_PTE_MT_UNCACHED .long PTE_EXT_TEX(1) @ L_PTE_MT_BUFFERABLE .long PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK .long PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED .long 0x00 @ unused .long 0x00 @ L_PTE_MT_MINICACHE (not present) .long PTE_EXT_TEX(1) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC .long 0x00 @ unused .long PTE_EXT_TEX(1) @ L_PTE_MT_DEV_WC .long 0x00 @ unused .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_DEV_CACHED .long PTE_EXT_TEX(2) @ L_PTE_MT_DEV_NONSHARED .long 0x00 @ unused .long 0x00 @ unused .long PTE_CACHEABLE | PTE_BUFFERABLE | PTE_EXT_APX @ L_PTE_MT_VECTORS .endm .macro armv6_set_pte_ext pfx str r1, [r0], #2048 @ linux version bic r3, r1, #0x000003fc bic r3, r3, #PTE_TYPE_MASK orr r3, r3, r2 orr r3, r3, #PTE_EXT_AP0 | 2 adr ip, \pfx\()_mt_table and r2, r1, #L_PTE_MT_MASK ldr r2, [ip, r2] eor r1, r1, #L_PTE_DIRTY tst r1, #L_PTE_DIRTY|L_PTE_RDONLY orrne r3, r3, #PTE_EXT_APX tst r1, #L_PTE_USER orrne r3, r3, #PTE_EXT_AP1 tstne r3, #PTE_EXT_APX @ user read-only -> kernel read-only bicne r3, r3, #PTE_EXT_AP0 tst r1, #L_PTE_XN orrne r3, r3, #PTE_EXT_XN eor r3, r3, r2 tst r1, #L_PTE_YOUNG tstne r1, #L_PTE_PRESENT moveq r3, #0 tstne r1, #L_PTE_NONE movne r3, #0 str r3, [r0] mcr p15, 0, r0, c7, c10, 1 @ flush_pte .endm /* * The ARMv3, ARMv4 and ARMv5 set_pte_ext translation function, * covering most CPUs except Xscale and Xscale 3. * * Permission translation: * YUWD AP SVC User * 0xxx 0x00 no acc no acc * 100x 0x00 r/o no acc * 10x0 0x00 r/o no acc * 1011 0x55 r/w no acc * 110x 0xaa r/w r/o * 11x0 0xaa r/w r/o * 1111 0xff r/w r/w */ .macro armv3_set_pte_ext wc_disable=1 str r1, [r0], #2048 @ linux version eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY bic r2, r1, #PTE_SMALL_AP_MASK @ keep C, B bits bic r2, r2, #PTE_TYPE_MASK orr r2, r2, #PTE_TYPE_SMALL tst r3, #L_PTE_USER @ user? orrne r2, r2, #PTE_SMALL_AP_URO_SRW tst r3, #L_PTE_RDONLY | L_PTE_DIRTY @ write and dirty? orreq r2, r2, #PTE_SMALL_AP_UNO_SRW tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ present and young? movne r2, #0 .if \wc_disable #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH tst r2, #PTE_CACHEABLE bicne r2, r2, #PTE_BUFFERABLE #endif .endif str r2, [r0] @ hardware version .endm /* * Xscale set_pte_ext translation, split into two halves to cope * with work-arounds. r3 must be preserved by code between these * two macros. * * Permission translation: * YUWD AP SVC User * 0xxx 00 no acc no acc * 100x 00 r/o no acc * 10x0 00 r/o no acc * 1011 01 r/w no acc * 110x 10 r/w r/o * 11x0 10 r/w r/o * 1111 11 r/w r/w */ .macro xscale_set_pte_ext_prologue str r1, [r0] @ linux version eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY bic r2, r1, #PTE_SMALL_AP_MASK @ keep C, B bits orr r2, r2, #PTE_TYPE_EXT @ extended page tst r3, #L_PTE_USER @ user? orrne r2, r2, #PTE_EXT_AP_URO_SRW @ yes -> user r/o, system r/w tst r3, #L_PTE_RDONLY | L_PTE_DIRTY @ write and dirty? orreq r2, r2, #PTE_EXT_AP_UNO_SRW @ yes -> user n/a, system r/w @ combined with user -> user r/w .endm .macro xscale_set_pte_ext_epilogue tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ present and young? movne r2, #0 @ no -> fault str r2, [r0, #2048]! @ hardware version mov ip, #0 mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line mcr p15, 0, ip, c7, c10, 4 @ data write barrier .endm .macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0, bugs=0 /* * If we are building for big.Little with branch predictor hardening, * we need the processor function tables to remain available after boot. */ #if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) .section ".rodata" #endif .type \name\()_processor_functions, #object .align 2 ENTRY(\name\()_processor_functions) .word \dabort .word \pabort .word cpu_\name\()_proc_init .word \bugs .word cpu_\name\()_proc_fin .word cpu_\name\()_reset .word cpu_\name\()_do_idle .word cpu_\name\()_dcache_clean_area .word cpu_\name\()_switch_mm .if \nommu .word 0 .else .word cpu_\name\()_set_pte_ext .endif .if \suspend .word cpu_\name\()_suspend_size #ifdef CONFIG_ARM_CPU_SUSPEND .word cpu_\name\()_do_suspend .word cpu_\name\()_do_resume #else .word 0 .word 0 #endif .else .word 0 .word 0 .word 0 .endif .size \name\()_processor_functions, . - \name\()_processor_functions #if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) .previous #endif .endm .macro define_cache_functions name:req .align 2 .type \name\()_cache_fns, #object ENTRY(\name\()_cache_fns) .long \name\()_flush_icache_all .long \name\()_flush_kern_cache_all .long \name\()_flush_kern_cache_louis .long \name\()_flush_user_cache_all .long \name\()_flush_user_cache_range .long \name\()_coherent_kern_range .long \name\()_coherent_user_range .long \name\()_flush_kern_dcache_area .long \name\()_dma_map_area .long \name\()_dma_unmap_area .long \name\()_dma_flush_range .size \name\()_cache_fns, . - \name\()_cache_fns .endm .macro define_tlb_functions name:req, flags_up:req, flags_smp .type \name\()_tlb_fns, #object .align 2 ENTRY(\name\()_tlb_fns) .long \name\()_flush_user_tlb_range .long \name\()_flush_kern_tlb_range .ifnb \flags_smp ALT_SMP(.long \flags_smp ) ALT_UP(.long \flags_up ) .else .long \flags_up .endif .size \name\()_tlb_fns, . - \name\()_tlb_fns .endm .macro globl_equ x, y .globl \x .equ \x, \y .endm .macro initfn, func, base .long \func - \base .endm /* * Macro to calculate the log2 size for the protection region * registers. This calculates rd = log2(size) - 1. tmp must * not be the same register as rd. */ .macro pr_sz, rd, size, tmp mov \tmp, \size, lsr #12 mov \rd, #11 1: movs \tmp, \tmp, lsr #1 addne \rd, \rd, #1 bne 1b .endm /* * Macro to generate a protection region register value * given a pre-masked address, size, and enable bit. * Corrupts size. */ .macro pr_val, dest, addr, size, enable pr_sz \dest, \size, \size @ calculate log2(size) - 1 orr \dest, \addr, \dest, lsl #1 @ mask in the region size orr \dest, \dest, \enable .endm
aixcc-public/challenge-001-exemplar-source
10,511
arch/arm/mm/proc-arm946.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/mm/arm946.S: utility functions for ARM946E-S * * Copyright (C) 2004-2006 Hyok S. Choi (hyok.choi@samsung.com) * * (Many of cache codes are from proc-arm926.S) */ #include <linux/linkage.h> #include <linux/init.h> #include <linux/pgtable.h> #include <asm/assembler.h> #include <asm/hwcap.h> #include <asm/pgtable-hwdef.h> #include <asm/ptrace.h> #include "proc-macros.S" /* * ARM946E-S is synthesizable to have 0KB to 1MB sized D-Cache, * comprising 256 lines of 32 bytes (8 words). */ #define CACHE_DSIZE (CONFIG_CPU_DCACHE_SIZE) /* typically 8KB. */ #define CACHE_DLINESIZE 32 /* fixed */ #define CACHE_DSEGMENTS 4 /* fixed */ #define CACHE_DENTRIES (CACHE_DSIZE / CACHE_DSEGMENTS / CACHE_DLINESIZE) #define CACHE_DLIMIT (CACHE_DSIZE * 4) /* benchmark needed */ .text /* * cpu_arm946_proc_init() * cpu_arm946_switch_mm() * * These are not required. */ ENTRY(cpu_arm946_proc_init) ENTRY(cpu_arm946_switch_mm) ret lr /* * cpu_arm946_proc_fin() */ ENTRY(cpu_arm946_proc_fin) mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x00001000 @ i-cache bic r0, r0, #0x00000004 @ d-cache mcr p15, 0, r0, c1, c0, 0 @ disable caches ret lr /* * cpu_arm946_reset(loc) * Params : r0 = address to jump to * Notes : This sets up everything for a reset */ .pushsection .idmap.text, "ax" ENTRY(cpu_arm946_reset) mov ip, #0 mcr p15, 0, ip, c7, c5, 0 @ flush I cache mcr p15, 0, ip, c7, c6, 0 @ flush D cache mcr p15, 0, ip, c7, c10, 4 @ drain WB mrc p15, 0, ip, c1, c0, 0 @ ctrl register bic ip, ip, #0x00000005 @ .............c.p bic ip, ip, #0x00001000 @ i-cache mcr p15, 0, ip, c1, c0, 0 @ ctrl register ret r0 ENDPROC(cpu_arm946_reset) .popsection /* * cpu_arm946_do_idle() */ .align 5 ENTRY(cpu_arm946_do_idle) mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt ret lr /* * flush_icache_all() * * Unconditionally clean and invalidate the entire icache. */ ENTRY(arm946_flush_icache_all) mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache ret lr ENDPROC(arm946_flush_icache_all) /* * flush_user_cache_all() */ ENTRY(arm946_flush_user_cache_all) /* FALLTHROUGH */ /* * flush_kern_cache_all() * * Clean and invalidate the entire cache. */ ENTRY(arm946_flush_kern_cache_all) mov r2, #VM_EXEC mov ip, #0 __flush_whole_cache: #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH mcr p15, 0, ip, c7, c6, 0 @ flush D cache #else mov r1, #(CACHE_DSEGMENTS - 1) << 29 @ 4 segments 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 4 @ n entries 2: mcr p15, 0, r3, c7, c14, 2 @ clean/flush D index subs r3, r3, #1 << 4 bcs 2b @ entries n to 0 subs r1, r1, #1 << 29 bcs 1b @ segments 3 to 0 #endif tst r2, #VM_EXEC mcrne p15, 0, ip, c7, c5, 0 @ flush I cache mcrne p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * flush_user_cache_range(start, end, flags) * * Clean and invalidate a range of cache entries in the * specified address range. * * - start - start address (inclusive) * - end - end address (exclusive) * - flags - vm_flags describing address space * (same as arm926) */ ENTRY(arm946_flush_user_cache_range) mov ip, #0 sub r3, r1, r0 @ calculate total size cmp r3, #CACHE_DLIMIT bhs __flush_whole_cache 1: tst r2, #VM_EXEC #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE #else mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE #endif cmp r0, r1 blo 1b tst r2, #VM_EXEC mcrne p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * coherent_kern_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start, end. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm946_coherent_kern_range) /* FALLTHROUGH */ /* * coherent_user_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start, end. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address * (same as arm926) */ ENTRY(arm946_coherent_user_range) bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB mov r0, #0 ret lr /* * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * * - addr - kernel address * - size - region size * (same as arm926) */ ENTRY(arm946_flush_kern_dcache_area) add r1, r0, r1 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_inv_range(start, end) * * Invalidate (discard) the specified virtual address range. * May not write back any entries. If 'start' or 'end' * are not cache line aligned, those lines must be written * back. * * - start - virtual start address * - end - virtual end address * (same as arm926) */ arm946_dma_inv_range: #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH tst r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry tst r1, #CACHE_DLINESIZE - 1 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry #endif bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_clean_range(start, end) * * Clean the specified virtual address range. * * - start - virtual start address * - end - virtual end address * * (same as arm926) */ arm946_dma_clean_range: #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b #endif mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_flush_range(start, end) * * Clean and invalidate the specified virtual address range. * * - start - virtual start address * - end - virtual end address * * (same as arm926) */ ENTRY(arm946_dma_flush_range) bic r0, r0, #CACHE_DLINESIZE - 1 1: #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry #else mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry #endif add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_map_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(arm946_dma_map_area) add r1, r1, r0 cmp r2, #DMA_TO_DEVICE beq arm946_dma_clean_range bcs arm946_dma_inv_range b arm946_dma_flush_range ENDPROC(arm946_dma_map_area) /* * dma_unmap_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(arm946_dma_unmap_area) ret lr ENDPROC(arm946_dma_unmap_area) .globl arm946_flush_kern_cache_louis .equ arm946_flush_kern_cache_louis, arm946_flush_kern_cache_all @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) define_cache_functions arm946 ENTRY(cpu_arm946_dcache_clean_area) #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE subs r1, r1, #CACHE_DLINESIZE bhi 1b #endif mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr .type __arm946_setup, #function __arm946_setup: mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c6, 0 @ invalidate D cache mcr p15, 0, r0, c7, c10, 4 @ drain WB mcr p15, 0, r0, c6, c3, 0 @ disable memory region 3~7 mcr p15, 0, r0, c6, c4, 0 mcr p15, 0, r0, c6, c5, 0 mcr p15, 0, r0, c6, c6, 0 mcr p15, 0, r0, c6, c7, 0 mov r0, #0x0000003F @ base = 0, size = 4GB mcr p15, 0, r0, c6, c0, 0 @ set region 0, default ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM ldr r7, =CONFIG_DRAM_SIZE @ size of RAM (must be >= 4KB) pr_val r3, r0, r7, #1 mcr p15, 0, r3, c6, c1, 0 ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH ldr r7, =CONFIG_FLASH_SIZE @ size of FLASH (must be >= 4KB) pr_val r3, r0, r7, #1 mcr p15, 0, r3, c6, c2, 0 mov r0, #0x06 mcr p15, 0, r0, c2, c0, 0 @ region 1,2 d-cacheable mcr p15, 0, r0, c2, c0, 1 @ region 1,2 i-cacheable #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH mov r0, #0x00 @ disable whole write buffer #else mov r0, #0x02 @ region 1 write bufferred #endif mcr p15, 0, r0, c3, c0, 0 /* * Access Permission Settings for future permission control by PU. * * priv. user * region 0 (whole) rw -- : b0001 * region 1 (RAM) rw rw : b0011 * region 2 (FLASH) rw r- : b0010 * region 3~7 (none) -- -- : b0000 */ mov r0, #0x00000031 orr r0, r0, #0x00000200 mcr p15, 0, r0, c5, c0, 2 @ set data access permission mcr p15, 0, r0, c5, c0, 3 @ set inst. access permission mrc p15, 0, r0, c1, c0 @ get control register orr r0, r0, #0x00001000 @ I-cache orr r0, r0, #0x00000005 @ MPU/D-cache #ifdef CONFIG_CPU_CACHE_ROUND_ROBIN orr r0, r0, #0x00004000 @ .1.. .... .... .... #endif ret lr .size __arm946_setup, . - __arm946_setup __INITDATA @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) define_processor_functions arm946, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1 .section ".rodata" string cpu_arch_name, "armv5te" string cpu_elf_name, "v5t" string cpu_arm946_name, "ARM946E-S" .align .section ".proc.info.init", "a" .type __arm946_proc_info,#object __arm946_proc_info: .long 0x41009460 .long 0xff00fff0 .long 0 .long 0 initfn __arm946_setup, __arm946_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB .long cpu_arm946_name .long arm946_processor_functions .long 0 .long 0 .long arm946_cache_fns .size __arm946_proc_info, . - __arm946_proc_info
aixcc-public/challenge-001-exemplar-source
6,825
arch/arm/mm/proc-sa1100.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/mm/proc-sa1100.S * * Copyright (C) 1997-2002 Russell King * hacked for non-paged-MM by Hyok S. Choi, 2003. * * MMU functions for SA110 * * These are the low level assembler for performing cache and TLB * functions on the StrongARM-1100 and StrongARM-1110. * * Note that SA1100 and SA1110 share everything but their name and CPU ID. * * 12-jun-2000, Erik Mouw (J.A.K.Mouw@its.tudelft.nl): * Flush the read buffer at context switches */ #include <linux/linkage.h> #include <linux/init.h> #include <linux/pgtable.h> #include <asm/assembler.h> #include <asm/asm-offsets.h> #include <asm/hwcap.h> #include <mach/hardware.h> #include <asm/pgtable-hwdef.h> #include "proc-macros.S" /* * the cache line size of the I and D cache */ #define DCACHELINESIZE 32 .section .text /* * cpu_sa1100_proc_init() */ ENTRY(cpu_sa1100_proc_init) mov r0, #0 mcr p15, 0, r0, c15, c1, 2 @ Enable clock switching mcr p15, 0, r0, c9, c0, 5 @ Allow read-buffer operations from userland ret lr /* * cpu_sa1100_proc_fin() * * Prepare the CPU for reset: * - Disable interrupts * - Clean and turn off caches. */ ENTRY(cpu_sa1100_proc_fin) mcr p15, 0, ip, c15, c2, 2 @ Disable clock switching mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches ret lr /* * cpu_sa1100_reset(loc) * * Perform a soft reset of the system. Put the CPU into the * same state as it would be if it had been reset, and branch * to what would be the reset vector. * * loc: location to jump to for soft reset */ .align 5 .pushsection .idmap.text, "ax" ENTRY(cpu_sa1100_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c10, 4 @ drain WB #ifdef CONFIG_MMU mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs #endif mrc p15, 0, ip, c1, c0, 0 @ ctrl register bic ip, ip, #0x000f @ ............wcam bic ip, ip, #0x1100 @ ...i...s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register ret r0 ENDPROC(cpu_sa1100_reset) .popsection /* * cpu_sa1100_do_idle(type) * * Cause the processor to idle * * type: call type: * 0 = slow idle * 1 = fast idle * 2 = switch to slow processor clock * 3 = switch to fast processor clock */ .align 5 ENTRY(cpu_sa1100_do_idle) mov r0, r0 @ 4 nop padding mov r0, r0 mov r0, r0 mov r0, r0 @ 4 nop padding mov r0, r0 mov r0, r0 mov r0, #0 ldr r1, =UNCACHEABLE_ADDR @ ptr to uncacheable address @ --- aligned to a cache line mcr p15, 0, r0, c15, c2, 2 @ disable clock switching ldr r1, [r1, #0] @ force switch to MCLK mcr p15, 0, r0, c15, c8, 2 @ wait for interrupt mov r0, r0 @ safety mcr p15, 0, r0, c15, c1, 2 @ enable clock switching ret lr /* ================================= CACHE ================================ */ /* * cpu_sa1100_dcache_clean_area(addr,sz) * * Clean the specified entry of any caches such that the MMU * translation fetches will obtain correct data. * * addr: cache-unaligned virtual address */ .align 5 ENTRY(cpu_sa1100_dcache_clean_area) 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #DCACHELINESIZE subs r1, r1, #DCACHELINESIZE bhi 1b ret lr /* =============================== PageTable ============================== */ /* * cpu_sa1100_switch_mm(pgd) * * Set the translation base pointer to be as described by pgd. * * pgd: new page tables */ .align 5 ENTRY(cpu_sa1100_switch_mm) #ifdef CONFIG_MMU str lr, [sp, #-4]! bl v4wb_flush_kern_cache_all @ clears IP mcr p15, 0, ip, c9, c0, 0 @ invalidate RB mcr p15, 0, r0, c2, c0, 0 @ load page table pointer mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs ldr pc, [sp], #4 #else ret lr #endif /* * cpu_sa1100_set_pte_ext(ptep, pte, ext) * * Set a PTE and flush it out */ .align 5 ENTRY(cpu_sa1100_set_pte_ext) #ifdef CONFIG_MMU armv3_set_pte_ext wc_disable=0 mov r0, r0 mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcr p15, 0, r0, c7, c10, 4 @ drain WB #endif ret lr .globl cpu_sa1100_suspend_size .equ cpu_sa1100_suspend_size, 4 * 3 #ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_sa1100_do_suspend) stmfd sp!, {r4 - r6, lr} mrc p15, 0, r4, c3, c0, 0 @ domain ID mrc p15, 0, r5, c13, c0, 0 @ PID mrc p15, 0, r6, c1, c0, 0 @ control reg stmia r0, {r4 - r6} @ store cp regs ldmfd sp!, {r4 - r6, pc} ENDPROC(cpu_sa1100_do_suspend) ENTRY(cpu_sa1100_do_resume) ldmia r0, {r4 - r6} @ load cp regs mov ip, #0 mcr p15, 0, ip, c8, c7, 0 @ flush I+D TLBs mcr p15, 0, ip, c7, c7, 0 @ flush I&D cache mcr p15, 0, ip, c9, c0, 0 @ invalidate RB mcr p15, 0, ip, c9, c0, 5 @ allow user space to use RB mcr p15, 0, r4, c3, c0, 0 @ domain ID mcr p15, 0, r1, c2, c0, 0 @ translation table base addr mcr p15, 0, r5, c13, c0, 0 @ PID mov r0, r6 @ control register b cpu_resume_mmu ENDPROC(cpu_sa1100_do_resume) #endif .type __sa1100_setup, #function __sa1100_setup: mov r0, #0 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 #ifdef CONFIG_MMU mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 #endif adr r5, sa1100_crval ldmia r5, {r5, r6} mrc p15, 0, r0, c1, c0 @ get control register v4 bic r0, r0, r5 orr r0, r0, r6 ret lr .size __sa1100_setup, . - __sa1100_setup /* * R * .RVI ZFRS BLDP WCAM * ..11 0001 ..11 1101 * */ .type sa1100_crval, #object sa1100_crval: crval clear=0x00003f3f, mmuset=0x0000313d, ucset=0x00001130 __INITDATA /* * SA1100 and SA1110 share the same function calls */ @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) define_processor_functions sa1100, dabort=v4_early_abort, pabort=legacy_pabort, suspend=1 .section ".rodata" string cpu_arch_name, "armv4" string cpu_elf_name, "v4" string cpu_sa1100_name, "StrongARM-1100" string cpu_sa1110_name, "StrongARM-1110" .align .section ".proc.info.init", "a" .macro sa1100_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req .type __\name\()_proc_info,#object __\name\()_proc_info: .long \cpu_val .long \cpu_mask .long PMD_TYPE_SECT | \ PMD_SECT_BUFFERABLE | \ PMD_SECT_CACHEABLE | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ .long PMD_TYPE_SECT | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ initfn __sa1100_setup, __\name\()_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT .long \cpu_name .long sa1100_processor_functions .long v4wb_tlb_fns .long v4_mc_user_fns .long v4wb_cache_fns .size __\name\()_proc_info, . - __\name\()_proc_info .endm sa1100_proc_info sa1100, 0x4401a110, 0xfffffff0, cpu_sa1100_name sa1100_proc_info sa1110, 0x6901b110, 0xfffffff0, cpu_sa1110_name
aixcc-public/challenge-001-exemplar-source
3,161
arch/arm/mm/cache-v4.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/mm/cache-v4.S * * Copyright (C) 1997-2002 Russell king */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> #include <asm/page.h> #include "proc-macros.S" /* * flush_icache_all() * * Unconditionally clean and invalidate the entire icache. */ ENTRY(v4_flush_icache_all) ret lr ENDPROC(v4_flush_icache_all) /* * flush_user_cache_all() * * Invalidate all cache entries in a particular address * space. * * - mm - mm_struct describing address space */ ENTRY(v4_flush_user_cache_all) /* FALLTHROUGH */ /* * flush_kern_cache_all() * * Clean and invalidate the entire cache. */ ENTRY(v4_flush_kern_cache_all) #ifdef CONFIG_CPU_CP15 mov r0, #0 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache ret lr #else /* FALLTHROUGH */ #endif /* * flush_user_cache_range(start, end, flags) * * Invalidate a range of cache entries in the specified * address space. * * - start - start address (may not be aligned) * - end - end address (exclusive, may not be aligned) * - flags - vma_area_struct flags describing address space */ ENTRY(v4_flush_user_cache_range) #ifdef CONFIG_CPU_CP15 mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ flush ID cache ret lr #else /* FALLTHROUGH */ #endif /* * coherent_kern_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(v4_coherent_kern_range) /* FALLTHROUGH */ /* * coherent_user_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(v4_coherent_user_range) mov r0, #0 ret lr /* * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * * - addr - kernel address * - size - region size */ ENTRY(v4_flush_kern_dcache_area) /* FALLTHROUGH */ /* * dma_flush_range(start, end) * * Clean and invalidate the specified virtual address range. * * - start - virtual start address * - end - virtual end address */ ENTRY(v4_dma_flush_range) #ifdef CONFIG_CPU_CP15 mov r0, #0 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache #endif ret lr /* * dma_unmap_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(v4_dma_unmap_area) teq r2, #DMA_TO_DEVICE bne v4_dma_flush_range /* FALLTHROUGH */ /* * dma_map_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(v4_dma_map_area) ret lr ENDPROC(v4_dma_unmap_area) ENDPROC(v4_dma_map_area) .globl v4_flush_kern_cache_louis .equ v4_flush_kern_cache_louis, v4_flush_kern_cache_all __INITDATA @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) define_cache_functions v4
aixcc-public/challenge-001-exemplar-source
13,094
arch/arm/mm/proc-arm925.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * linux/arch/arm/mm/arm925.S: MMU functions for ARM925 * * Copyright (C) 1999,2000 ARM Limited * Copyright (C) 2000 Deep Blue Solutions Ltd. * Copyright (C) 2002 RidgeRun, Inc. * Copyright (C) 2002-2003 MontaVista Software, Inc. * * Update for Linux-2.6 and cache flush improvements * Copyright (C) 2004 Nokia Corporation by Tony Lindgren <tony@atomide.com> * * hacked for non-paged-MM by Hyok S. Choi, 2004. * * These are the low level assembler for performing cache and TLB * functions on the arm925. * * CONFIG_CPU_ARM925_CPU_IDLE -> nohlt * * Some additional notes based on deciphering the TI TRM on OMAP-5910: * * NOTE1: The TI925T Configuration Register bit "D-cache clean and flush * entry mode" must be 0 to flush the entries in both segments * at once. This is the default value. See TRM 2-20 and 2-24 for * more information. * * NOTE2: Default is the "D-cache clean and flush entry mode". It looks * like the "Transparent mode" must be on for partial cache flushes * to work in this mode. This mode only works with 16-bit external * memory. See TRM 2-24 for more information. * * NOTE3: Write-back cache flushing seems to be flakey with devices using * direct memory access, such as USB OHCI. The workaround is to use * write-through cache with CONFIG_CPU_DCACHE_WRITETHROUGH (this is * the default for OMAP-1510). */ #include <linux/linkage.h> #include <linux/init.h> #include <linux/pgtable.h> #include <asm/assembler.h> #include <asm/hwcap.h> #include <asm/pgtable-hwdef.h> #include <asm/page.h> #include <asm/ptrace.h> #include "proc-macros.S" /* * The size of one data cache line. */ #define CACHE_DLINESIZE 16 /* * The number of data cache segments. */ #define CACHE_DSEGMENTS 2 /* * The number of lines in a cache segment. */ #define CACHE_DENTRIES 256 /* * This is the size at which it becomes more efficient to * clean the whole cache, rather than using the individual * cache line maintenance instructions. */ #define CACHE_DLIMIT 8192 .text /* * cpu_arm925_proc_init() */ ENTRY(cpu_arm925_proc_init) ret lr /* * cpu_arm925_proc_fin() */ ENTRY(cpu_arm925_proc_fin) mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches ret lr /* * cpu_arm925_reset(loc) * * Perform a soft reset of the system. Put the CPU into the * same state as it would be if it had been reset, and branch * to what would be the reset vector. * * loc: location to jump to for soft reset */ .align 5 .pushsection .idmap.text, "ax" ENTRY(cpu_arm925_reset) /* Send software reset to MPU and DSP */ mov ip, #0xff000000 orr ip, ip, #0x00fe0000 orr ip, ip, #0x0000ce00 mov r4, #1 strh r4, [ip, #0x10] ENDPROC(cpu_arm925_reset) .popsection mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c10, 4 @ drain WB #ifdef CONFIG_MMU mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs #endif mrc p15, 0, ip, c1, c0, 0 @ ctrl register bic ip, ip, #0x000f @ ............wcam bic ip, ip, #0x1100 @ ...i...s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register ret r0 /* * cpu_arm925_do_idle() * * Called with IRQs disabled */ .align 10 ENTRY(cpu_arm925_do_idle) mov r0, #0 mrc p15, 0, r1, c1, c0, 0 @ Read control register mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer bic r2, r1, #1 << 12 mcr p15, 0, r2, c1, c0, 0 @ Disable I cache mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt mcr p15, 0, r1, c1, c0, 0 @ Restore ICache enable ret lr /* * flush_icache_all() * * Unconditionally clean and invalidate the entire icache. */ ENTRY(arm925_flush_icache_all) mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache ret lr ENDPROC(arm925_flush_icache_all) /* * flush_user_cache_all() * * Clean and invalidate all cache entries in a particular * address space. */ ENTRY(arm925_flush_user_cache_all) /* FALLTHROUGH */ /* * flush_kern_cache_all() * * Clean and invalidate the entire cache. */ ENTRY(arm925_flush_kern_cache_all) mov r2, #VM_EXEC mov ip, #0 __flush_whole_cache: #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache #else /* Flush entries in both segments at once, see NOTE1 above */ mov r3, #(CACHE_DENTRIES - 1) << 4 @ 256 entries in segment 2: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index subs r3, r3, #1 << 4 bcs 2b @ entries 255 to 0 #endif tst r2, #VM_EXEC mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache mcrne p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * flush_user_cache_range(start, end, flags) * * Clean and invalidate a range of cache entries in the * specified address range. * * - start - start address (inclusive) * - end - end address (exclusive) * - flags - vm_flags describing address space */ ENTRY(arm925_flush_user_cache_range) mov ip, #0 sub r3, r1, r0 @ calculate total size cmp r3, #CACHE_DLIMIT bgt __flush_whole_cache 1: tst r2, #VM_EXEC #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE #else mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE #endif cmp r0, r1 blo 1b tst r2, #VM_EXEC mcrne p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * coherent_kern_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start, end. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm925_coherent_kern_range) /* FALLTHROUGH */ /* * coherent_user_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start, end. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm925_coherent_user_range) bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB mov r0, #0 ret lr /* * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * * - addr - kernel address * - size - region size */ ENTRY(arm925_flush_kern_dcache_area) add r1, r0, r1 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_inv_range(start, end) * * Invalidate (discard) the specified virtual address range. * May not write back any entries. If 'start' or 'end' * are not cache line aligned, those lines must be written * back. * * - start - virtual start address * - end - virtual end address * * (same as v4wb) */ arm925_dma_inv_range: #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH tst r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry tst r1, #CACHE_DLINESIZE - 1 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry #endif bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_clean_range(start, end) * * Clean the specified virtual address range. * * - start - virtual start address * - end - virtual end address * * (same as v4wb) */ arm925_dma_clean_range: #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b #endif mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_flush_range(start, end) * * Clean and invalidate the specified virtual address range. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm925_dma_flush_range) bic r0, r0, #CACHE_DLINESIZE - 1 1: #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry #else mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry #endif add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_map_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(arm925_dma_map_area) add r1, r1, r0 cmp r2, #DMA_TO_DEVICE beq arm925_dma_clean_range bcs arm925_dma_inv_range b arm925_dma_flush_range ENDPROC(arm925_dma_map_area) /* * dma_unmap_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(arm925_dma_unmap_area) ret lr ENDPROC(arm925_dma_unmap_area) .globl arm925_flush_kern_cache_louis .equ arm925_flush_kern_cache_louis, arm925_flush_kern_cache_all @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) define_cache_functions arm925 ENTRY(cpu_arm925_dcache_clean_area) #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE subs r1, r1, #CACHE_DLINESIZE bhi 1b #endif mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* =============================== PageTable ============================== */ /* * cpu_arm925_switch_mm(pgd) * * Set the translation base pointer to be as described by pgd. * * pgd: new page tables */ .align 5 ENTRY(cpu_arm925_switch_mm) #ifdef CONFIG_MMU mov ip, #0 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache #else /* Flush entries in bothe segments at once, see NOTE1 above */ mov r3, #(CACHE_DENTRIES - 1) << 4 @ 256 entries in segment 2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index subs r3, r3, #1 << 4 bcs 2b @ entries 255 to 0 #endif mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache mcr p15, 0, ip, c7, c10, 4 @ drain WB mcr p15, 0, r0, c2, c0, 0 @ load page table pointer mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs #endif ret lr /* * cpu_arm925_set_pte_ext(ptep, pte, ext) * * Set a PTE and flush it out */ .align 5 ENTRY(cpu_arm925_set_pte_ext) #ifdef CONFIG_MMU armv3_set_pte_ext mov r0, r0 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH mcr p15, 0, r0, c7, c10, 1 @ clean D entry #endif mcr p15, 0, r0, c7, c10, 4 @ drain WB #endif /* CONFIG_MMU */ ret lr .type __arm925_setup, #function __arm925_setup: mov r0, #0 /* Transparent on, D-cache clean & flush mode. See NOTE2 above */ orr r0,r0,#1 << 1 @ transparent mode on mcr p15, 0, r0, c15, c1, 0 @ write TI config register mov r0, #0 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 #ifdef CONFIG_MMU mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 #endif #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH mov r0, #4 @ disable write-back on caches explicitly mcr p15, 7, r0, c15, c0, 0 #endif adr r5, arm925_crval ldmia r5, {r5, r6} mrc p15, 0, r0, c1, c0 @ get control register v4 bic r0, r0, r5 orr r0, r0, r6 #ifdef CONFIG_CPU_CACHE_ROUND_ROBIN orr r0, r0, #0x4000 @ .1.. .... .... .... #endif ret lr .size __arm925_setup, . - __arm925_setup /* * R * .RVI ZFRS BLDP WCAM * .011 0001 ..11 1101 * */ .type arm925_crval, #object arm925_crval: crval clear=0x00007f3f, mmuset=0x0000313d, ucset=0x00001130 __INITDATA @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) define_processor_functions arm925, dabort=v4t_early_abort, pabort=legacy_pabort .section ".rodata" string cpu_arch_name, "armv4t" string cpu_elf_name, "v4" string cpu_arm925_name, "ARM925T" .align .section ".proc.info.init", "a" .macro arm925_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache .type __\name\()_proc_info,#object __\name\()_proc_info: .long \cpu_val .long \cpu_mask .long PMD_TYPE_SECT | \ PMD_SECT_CACHEABLE | \ PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ .long PMD_TYPE_SECT | \ PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ initfn __arm925_setup, __\name\()_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB .long cpu_arm925_name .long arm925_processor_functions .long v4wbi_tlb_fns .long v4wb_user_fns .long arm925_cache_fns .size __\name\()_proc_info, . - __\name\()_proc_info .endm arm925_proc_info arm925, 0x54029250, 0xfffffff0, cpu_arm925_name arm925_proc_info arm915, 0x54029150, 0xfffffff0, cpu_arm925_name
aixcc-public/challenge-001-exemplar-source
11,127
arch/arm/mm/proc-mohawk.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * linux/arch/arm/mm/proc-mohawk.S: MMU functions for Marvell PJ1 core * * PJ1 (codename Mohawk) is a hybrid of the xscale3 and Marvell's own core. * * Heavily based on proc-arm926.S and proc-xsc3.S */ #include <linux/linkage.h> #include <linux/init.h> #include <linux/pgtable.h> #include <asm/assembler.h> #include <asm/hwcap.h> #include <asm/pgtable-hwdef.h> #include <asm/page.h> #include <asm/ptrace.h> #include "proc-macros.S" /* * This is the maximum size of an area which will be flushed. If the * area is larger than this, then we flush the whole cache. */ #define CACHE_DLIMIT 32768 /* * The cache line size of the L1 D cache. */ #define CACHE_DLINESIZE 32 /* * cpu_mohawk_proc_init() */ ENTRY(cpu_mohawk_proc_init) ret lr /* * cpu_mohawk_proc_fin() */ ENTRY(cpu_mohawk_proc_fin) mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1800 @ ...iz........... bic r0, r0, #0x0006 @ .............ca. mcr p15, 0, r0, c1, c0, 0 @ disable caches ret lr /* * cpu_mohawk_reset(loc) * * Perform a soft reset of the system. Put the CPU into the * same state as it would be if it had been reset, and branch * to what would be the reset vector. * * loc: location to jump to for soft reset * * (same as arm926) */ .align 5 .pushsection .idmap.text, "ax" ENTRY(cpu_mohawk_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c10, 4 @ drain WB mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs mrc p15, 0, ip, c1, c0, 0 @ ctrl register bic ip, ip, #0x0007 @ .............cam bic ip, ip, #0x1100 @ ...i...s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register ret r0 ENDPROC(cpu_mohawk_reset) .popsection /* * cpu_mohawk_do_idle() * * Called with IRQs disabled */ .align 5 ENTRY(cpu_mohawk_do_idle) mov r0, #0 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer mcr p15, 0, r0, c7, c0, 4 @ wait for interrupt ret lr /* * flush_icache_all() * * Unconditionally clean and invalidate the entire icache. */ ENTRY(mohawk_flush_icache_all) mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache ret lr ENDPROC(mohawk_flush_icache_all) /* * flush_user_cache_all() * * Clean and invalidate all cache entries in a particular * address space. */ ENTRY(mohawk_flush_user_cache_all) /* FALLTHROUGH */ /* * flush_kern_cache_all() * * Clean and invalidate the entire cache. */ ENTRY(mohawk_flush_kern_cache_all) mov r2, #VM_EXEC mov ip, #0 __flush_whole_cache: mcr p15, 0, ip, c7, c14, 0 @ clean & invalidate all D cache tst r2, #VM_EXEC mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache mcrne p15, 0, ip, c7, c10, 0 @ drain write buffer ret lr /* * flush_user_cache_range(start, end, flags) * * Clean and invalidate a range of cache entries in the * specified address range. * * - start - start address (inclusive) * - end - end address (exclusive) * - flags - vm_flags describing address space * * (same as arm926) */ ENTRY(mohawk_flush_user_cache_range) mov ip, #0 sub r3, r1, r0 @ calculate total size cmp r3, #CACHE_DLIMIT bgt __flush_whole_cache 1: tst r2, #VM_EXEC mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b tst r2, #VM_EXEC mcrne p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * coherent_kern_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start, end. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(mohawk_coherent_kern_range) /* FALLTHROUGH */ /* * coherent_user_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start, end. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address * * (same as arm926) */ ENTRY(mohawk_coherent_user_range) bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB mov r0, #0 ret lr /* * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * * - addr - kernel address * - size - region size */ ENTRY(mohawk_flush_kern_dcache_area) add r1, r0, r1 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_inv_range(start, end) * * Invalidate (discard) the specified virtual address range. * May not write back any entries. If 'start' or 'end' * are not cache line aligned, those lines must be written * back. * * - start - virtual start address * - end - virtual end address * * (same as v4wb) */ mohawk_dma_inv_range: tst r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry tst r1, #CACHE_DLINESIZE - 1 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_clean_range(start, end) * * Clean the specified virtual address range. * * - start - virtual start address * - end - virtual end address * * (same as v4wb) */ mohawk_dma_clean_range: bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_flush_range(start, end) * * Clean and invalidate the specified virtual address range. * * - start - virtual start address * - end - virtual end address */ ENTRY(mohawk_dma_flush_range) bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_map_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(mohawk_dma_map_area) add r1, r1, r0 cmp r2, #DMA_TO_DEVICE beq mohawk_dma_clean_range bcs mohawk_dma_inv_range b mohawk_dma_flush_range ENDPROC(mohawk_dma_map_area) /* * dma_unmap_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(mohawk_dma_unmap_area) ret lr ENDPROC(mohawk_dma_unmap_area) .globl mohawk_flush_kern_cache_louis .equ mohawk_flush_kern_cache_louis, mohawk_flush_kern_cache_all @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) define_cache_functions mohawk ENTRY(cpu_mohawk_dcache_clean_area) 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE subs r1, r1, #CACHE_DLINESIZE bhi 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * cpu_mohawk_switch_mm(pgd) * * Set the translation base pointer to be as described by pgd. * * pgd: new page tables */ .align 5 ENTRY(cpu_mohawk_switch_mm) mov ip, #0 mcr p15, 0, ip, c7, c14, 0 @ clean & invalidate all D cache mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache mcr p15, 0, ip, c7, c10, 4 @ drain WB orr r0, r0, #0x18 @ cache the page table in L2 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs ret lr /* * cpu_mohawk_set_pte_ext(ptep, pte, ext) * * Set a PTE and flush it out */ .align 5 ENTRY(cpu_mohawk_set_pte_ext) #ifdef CONFIG_MMU armv3_set_pte_ext mov r0, r0 mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr #endif .globl cpu_mohawk_suspend_size .equ cpu_mohawk_suspend_size, 4 * 6 #ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_mohawk_do_suspend) stmfd sp!, {r4 - r9, lr} mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode mrc p15, 0, r5, c15, c1, 0 @ CP access reg mrc p15, 0, r6, c13, c0, 0 @ PID mrc p15, 0, r7, c3, c0, 0 @ domain ID mrc p15, 0, r8, c1, c0, 1 @ auxiliary control reg mrc p15, 0, r9, c1, c0, 0 @ control reg bic r4, r4, #2 @ clear frequency change bit stmia r0, {r4 - r9} @ store cp regs ldmia sp!, {r4 - r9, pc} ENDPROC(cpu_mohawk_do_suspend) ENTRY(cpu_mohawk_do_resume) ldmia r0, {r4 - r9} @ load cp regs mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB mcr p15, 0, ip, c7, c10, 4 @ drain write (&fill) buffer mcr p15, 0, ip, c7, c5, 4 @ flush prefetch buffer mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs mcr p14, 0, r4, c6, c0, 0 @ clock configuration, turbo mode. mcr p15, 0, r5, c15, c1, 0 @ CP access reg mcr p15, 0, r6, c13, c0, 0 @ PID mcr p15, 0, r7, c3, c0, 0 @ domain ID orr r1, r1, #0x18 @ cache the page table in L2 mcr p15, 0, r1, c2, c0, 0 @ translation table base addr mcr p15, 0, r8, c1, c0, 1 @ auxiliary control reg mov r0, r9 @ control register b cpu_resume_mmu ENDPROC(cpu_mohawk_do_resume) #endif .type __mohawk_setup, #function __mohawk_setup: mov r0, #0 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches mcr p15, 0, r0, c7, c10, 4 @ drain write buffer mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs orr r4, r4, #0x18 @ cache the page table in L2 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer mov r0, #0 @ don't allow CP access mcr p15, 0, r0, c15, c1, 0 @ write CP access register adr r5, mohawk_crval ldmia r5, {r5, r6} mrc p15, 0, r0, c1, c0 @ get control register bic r0, r0, r5 orr r0, r0, r6 ret lr .size __mohawk_setup, . - __mohawk_setup /* * R * .RVI ZFRS BLDP WCAM * .011 1001 ..00 0101 * */ .type mohawk_crval, #object mohawk_crval: crval clear=0x00007f3f, mmuset=0x00003905, ucset=0x00001134 __INITDATA @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) define_processor_functions mohawk, dabort=v5t_early_abort, pabort=legacy_pabort .section ".rodata" string cpu_arch_name, "armv5te" string cpu_elf_name, "v5" string cpu_mohawk_name, "Marvell 88SV331x" .align .section ".proc.info.init", "a" .type __88sv331x_proc_info,#object __88sv331x_proc_info: .long 0x56158000 @ Marvell 88SV331x (MOHAWK) .long 0xfffff000 .long PMD_TYPE_SECT | \ PMD_SECT_BUFFERABLE | \ PMD_SECT_CACHEABLE | \ PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ .long PMD_TYPE_SECT | \ PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ initfn __mohawk_setup, __88sv331x_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP .long cpu_mohawk_name .long mohawk_processor_functions .long v4wbi_tlb_fns .long v4wb_user_fns .long mohawk_cache_fns .size __88sv331x_proc_info, . - __88sv331x_proc_info
aixcc-public/challenge-001-exemplar-source
1,065
arch/arm/mm/abort-ev5tj.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <linux/linkage.h> #include <asm/assembler.h> #include "abort-macro.S" /* * Function: v5tj_early_abort * * Params : r2 = pt_regs * : r4 = aborted context pc * : r5 = aborted context psr * * Returns : r4 - r11, r13 preserved * * Purpose : obtain information about current aborted instruction. * Note: we read user space. This means we might cause a data * abort here if the I-TLB and D-TLB aren't seeing the same * picture. Unfortunately, this does happen. We live with it. */ .align 5 ENTRY(v5tj_early_abort) mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR tst r5, #PSR_J_BIT @ Java? bne do_DataAbort do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 ldreq r3, [r4] @ read aborted ARM instruction uaccess_disable ip @ disable userspace access teq_ldrd tmp=ip, insn=r3 @ insn was LDRD? beq do_DataAbort @ yes tst r3, #1 << 20 @ L = 0 -> write orreq r1, r1, #1 << 11 @ yes. b do_DataAbort
aixcc-public/challenge-001-exemplar-source
1,707
arch/arm/mm/tlb-fa.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/mm/tlb-fa.S * * Copyright (C) 2005 Faraday Corp. * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt> * * Based on tlb-v4wbi.S: * Copyright (C) 1997-2002 Russell King * * ARM architecture version 4, Faraday variation. * This assume an unified TLBs, with a write buffer, and branch target buffer (BTB) * * Processors: FA520 FA526 FA626 */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> #include <asm/asm-offsets.h> #include <asm/tlbflush.h> #include "proc-macros.S" /* * flush_user_tlb_range(start, end, mm) * * Invalidate a range of TLB entries in the specified address space. * * - start - range start address * - end - range end address * - mm - mm_struct describing address space */ .align 4 ENTRY(fa_flush_user_tlb_range) vma_vm_mm ip, r2 act_mm r3 @ get current->active_mm eors r3, ip, r3 @ == mm ? retne lr @ no, we dont do anything mov r3, #0 mcr p15, 0, r3, c7, c10, 4 @ drain WB bic r0, r0, #0x0ff bic r0, r0, #0xf00 1: mcr p15, 0, r0, c8, c7, 1 @ invalidate UTLB entry add r0, r0, #PAGE_SZ cmp r0, r1 blo 1b mcr p15, 0, r3, c7, c10, 4 @ data write barrier ret lr ENTRY(fa_flush_kern_tlb_range) mov r3, #0 mcr p15, 0, r3, c7, c10, 4 @ drain WB bic r0, r0, #0x0ff bic r0, r0, #0xf00 1: mcr p15, 0, r0, c8, c7, 1 @ invalidate UTLB entry add r0, r0, #PAGE_SZ cmp r0, r1 blo 1b mcr p15, 0, r3, c7, c10, 4 @ data write barrier mcr p15, 0, r3, c7, c5, 4 @ prefetch flush (isb) ret lr __INITDATA /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */ define_tlb_functions fa, fa_tlb_flags
aixcc-public/challenge-001-exemplar-source
4,618
arch/arm/mm/cache-v4wt.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/mm/cache-v4wt.S * * Copyright (C) 1997-2002 Russell king * * ARMv4 write through cache operations support. * * We assume that the write buffer is not enabled. */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> #include <asm/page.h> #include "proc-macros.S" /* * The size of one data cache line. */ #define CACHE_DLINESIZE 32 /* * The number of data cache segments. */ #define CACHE_DSEGMENTS 8 /* * The number of lines in a cache segment. */ #define CACHE_DENTRIES 64 /* * This is the size at which it becomes more efficient to * clean the whole cache, rather than using the individual * cache line maintenance instructions. * * *** This needs benchmarking */ #define CACHE_DLIMIT 16384 /* * flush_icache_all() * * Unconditionally clean and invalidate the entire icache. */ ENTRY(v4wt_flush_icache_all) mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache ret lr ENDPROC(v4wt_flush_icache_all) /* * flush_user_cache_all() * * Invalidate all cache entries in a particular address * space. */ ENTRY(v4wt_flush_user_cache_all) /* FALLTHROUGH */ /* * flush_kern_cache_all() * * Clean and invalidate the entire cache. */ ENTRY(v4wt_flush_kern_cache_all) mov r2, #VM_EXEC mov ip, #0 __flush_whole_cache: tst r2, #VM_EXEC mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache ret lr /* * flush_user_cache_range(start, end, flags) * * Clean and invalidate a range of cache entries in the specified * address space. * * - start - start address (inclusive, page aligned) * - end - end address (exclusive, page aligned) * - flags - vma_area_struct flags describing address space */ ENTRY(v4wt_flush_user_cache_range) sub r3, r1, r0 @ calculate total size cmp r3, #CACHE_DLIMIT bhs __flush_whole_cache 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry tst r2, #VM_EXEC mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b ret lr /* * coherent_kern_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(v4wt_coherent_kern_range) /* FALLTRHOUGH */ /* * coherent_user_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(v4wt_coherent_user_range) bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mov r0, #0 ret lr /* * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * * - addr - kernel address * - size - region size */ ENTRY(v4wt_flush_kern_dcache_area) mov r2, #0 mcr p15, 0, r2, c7, c5, 0 @ invalidate I cache add r1, r0, r1 /* fallthrough */ /* * dma_inv_range(start, end) * * Invalidate (discard) the specified virtual address range. * May not write back any entries. If 'start' or 'end' * are not cache line aligned, those lines must be written * back. * * - start - virtual start address * - end - virtual end address */ v4wt_dma_inv_range: bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b ret lr /* * dma_flush_range(start, end) * * Clean and invalidate the specified virtual address range. * * - start - virtual start address * - end - virtual end address */ .globl v4wt_dma_flush_range .equ v4wt_dma_flush_range, v4wt_dma_inv_range /* * dma_unmap_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(v4wt_dma_unmap_area) add r1, r1, r0 teq r2, #DMA_TO_DEVICE bne v4wt_dma_inv_range /* FALLTHROUGH */ /* * dma_map_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(v4wt_dma_map_area) ret lr ENDPROC(v4wt_dma_unmap_area) ENDPROC(v4wt_dma_map_area) .globl v4wt_flush_kern_cache_louis .equ v4wt_flush_kern_cache_louis, v4wt_flush_kern_cache_all __INITDATA @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) define_cache_functions v4wt
aixcc-public/challenge-001-exemplar-source
12,203
arch/arm/mm/proc-arm1020.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * linux/arch/arm/mm/proc-arm1020.S: MMU functions for ARM1020 * * Copyright (C) 2000 ARM Limited * Copyright (C) 2000 Deep Blue Solutions Ltd. * hacked for non-paged-MM by Hyok S. Choi, 2003. * * These are the low level assembler for performing cache and TLB * functions on the arm1020. */ #include <linux/linkage.h> #include <linux/init.h> #include <linux/pgtable.h> #include <asm/assembler.h> #include <asm/asm-offsets.h> #include <asm/hwcap.h> #include <asm/pgtable-hwdef.h> #include <asm/ptrace.h> #include "proc-macros.S" /* * This is the maximum size of an area which will be invalidated * using the single invalidate entry instructions. Anything larger * than this, and we go for the whole cache. * * This value should be chosen such that we choose the cheapest * alternative. */ #define MAX_AREA_SIZE 32768 /* * The size of one data cache line. */ #define CACHE_DLINESIZE 32 /* * The number of data cache segments. */ #define CACHE_DSEGMENTS 16 /* * The number of lines in a cache segment. */ #define CACHE_DENTRIES 64 /* * This is the size at which it becomes more efficient to * clean the whole cache, rather than using the individual * cache line maintenance instructions. */ #define CACHE_DLIMIT 32768 .text /* * cpu_arm1020_proc_init() */ ENTRY(cpu_arm1020_proc_init) ret lr /* * cpu_arm1020_proc_fin() */ ENTRY(cpu_arm1020_proc_fin) mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches ret lr /* * cpu_arm1020_reset(loc) * * Perform a soft reset of the system. Put the CPU into the * same state as it would be if it had been reset, and branch * to what would be the reset vector. * * loc: location to jump to for soft reset */ .align 5 .pushsection .idmap.text, "ax" ENTRY(cpu_arm1020_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c10, 4 @ drain WB #ifdef CONFIG_MMU mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs #endif mrc p15, 0, ip, c1, c0, 0 @ ctrl register bic ip, ip, #0x000f @ ............wcam bic ip, ip, #0x1100 @ ...i...s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register ret r0 ENDPROC(cpu_arm1020_reset) .popsection /* * cpu_arm1020_do_idle() */ .align 5 ENTRY(cpu_arm1020_do_idle) mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt ret lr /* ================================= CACHE ================================ */ .align 5 /* * flush_icache_all() * * Unconditionally clean and invalidate the entire icache. */ ENTRY(arm1020_flush_icache_all) #ifndef CONFIG_CPU_ICACHE_DISABLE mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache #endif ret lr ENDPROC(arm1020_flush_icache_all) /* * flush_user_cache_all() * * Invalidate all cache entries in a particular address * space. */ ENTRY(arm1020_flush_user_cache_all) /* FALLTHROUGH */ /* * flush_kern_cache_all() * * Clean and invalidate the entire cache. */ ENTRY(arm1020_flush_kern_cache_all) mov r2, #VM_EXEC mov ip, #0 __flush_whole_cache: #ifndef CONFIG_CPU_DCACHE_DISABLE mcr p15, 0, ip, c7, c10, 4 @ drain WB mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 2: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index mcr p15, 0, ip, c7, c10, 4 @ drain WB subs r3, r3, #1 << 26 bcs 2b @ entries 63 to 0 subs r1, r1, #1 << 5 bcs 1b @ segments 15 to 0 #endif tst r2, #VM_EXEC #ifndef CONFIG_CPU_ICACHE_DISABLE mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache #endif mcrne p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * flush_user_cache_range(start, end, flags) * * Invalidate a range of cache entries in the specified * address space. * * - start - start address (inclusive) * - end - end address (exclusive) * - flags - vm_flags for this space */ ENTRY(arm1020_flush_user_cache_range) mov ip, #0 sub r3, r1, r0 @ calculate total size cmp r3, #CACHE_DLIMIT bhs __flush_whole_cache #ifndef CONFIG_CPU_DCACHE_DISABLE mcr p15, 0, ip, c7, c10, 4 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry mcr p15, 0, ip, c7, c10, 4 @ drain WB add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b #endif tst r2, #VM_EXEC #ifndef CONFIG_CPU_ICACHE_DISABLE mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache #endif mcrne p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * coherent_kern_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm1020_coherent_kern_range) /* FALLTRHOUGH */ /* * coherent_user_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm1020_coherent_user_range) mov ip, #0 bic r0, r0, #CACHE_DLINESIZE - 1 mcr p15, 0, ip, c7, c10, 4 1: #ifndef CONFIG_CPU_DCACHE_DISABLE mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcr p15, 0, ip, c7, c10, 4 @ drain WB #endif #ifndef CONFIG_CPU_ICACHE_DISABLE mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry #endif add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, ip, c7, c10, 4 @ drain WB mov r0, #0 ret lr /* * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * * - addr - kernel address * - size - region size */ ENTRY(arm1020_flush_kern_dcache_area) mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE add r1, r0, r1 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry mcr p15, 0, ip, c7, c10, 4 @ drain WB add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b #endif mcr p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * dma_inv_range(start, end) * * Invalidate (discard) the specified virtual address range. * May not write back any entries. If 'start' or 'end' * are not cache line aligned, those lines must be written * back. * * - start - virtual start address * - end - virtual end address * * (same as v4wb) */ arm1020_dma_inv_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE tst r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, ip, c7, c10, 4 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry mcrne p15, 0, ip, c7, c10, 4 @ drain WB tst r1, #CACHE_DLINESIZE - 1 mcrne p15, 0, ip, c7, c10, 4 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry mcrne p15, 0, ip, c7, c10, 4 @ drain WB 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b #endif mcr p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * dma_clean_range(start, end) * * Clean the specified virtual address range. * * - start - virtual start address * - end - virtual end address * * (same as v4wb) */ arm1020_dma_clean_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcr p15, 0, ip, c7, c10, 4 @ drain WB add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b #endif mcr p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * dma_flush_range(start, end) * * Clean and invalidate the specified virtual address range. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm1020_dma_flush_range) mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE bic r0, r0, #CACHE_DLINESIZE - 1 mcr p15, 0, ip, c7, c10, 4 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry mcr p15, 0, ip, c7, c10, 4 @ drain WB add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b #endif mcr p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * dma_map_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(arm1020_dma_map_area) add r1, r1, r0 cmp r2, #DMA_TO_DEVICE beq arm1020_dma_clean_range bcs arm1020_dma_inv_range b arm1020_dma_flush_range ENDPROC(arm1020_dma_map_area) /* * dma_unmap_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(arm1020_dma_unmap_area) ret lr ENDPROC(arm1020_dma_unmap_area) .globl arm1020_flush_kern_cache_louis .equ arm1020_flush_kern_cache_louis, arm1020_flush_kern_cache_all @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) define_cache_functions arm1020 .align 5 ENTRY(cpu_arm1020_dcache_clean_area) #ifndef CONFIG_CPU_DCACHE_DISABLE mov ip, #0 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcr p15, 0, ip, c7, c10, 4 @ drain WB add r0, r0, #CACHE_DLINESIZE subs r1, r1, #CACHE_DLINESIZE bhi 1b #endif ret lr /* =============================== PageTable ============================== */ /* * cpu_arm1020_switch_mm(pgd) * * Set the translation base pointer to be as described by pgd. * * pgd: new page tables */ .align 5 ENTRY(cpu_arm1020_switch_mm) #ifdef CONFIG_MMU #ifndef CONFIG_CPU_DCACHE_DISABLE mcr p15, 0, r3, c7, c10, 4 mov r1, #0xF @ 16 segments 1: mov r3, #0x3F @ 64 entries 2: mov ip, r3, LSL #26 @ shift up entry orr ip, ip, r1, LSL #5 @ shift in/up index mcr p15, 0, ip, c7, c14, 2 @ Clean & Inval DCache entry mov ip, #0 mcr p15, 0, ip, c7, c10, 4 subs r3, r3, #1 cmp r3, #0 bge 2b @ entries 3F to 0 subs r1, r1, #1 cmp r1, #0 bge 1b @ segments 15 to 0 #endif mov r1, #0 #ifndef CONFIG_CPU_ICACHE_DISABLE mcr p15, 0, r1, c7, c5, 0 @ invalidate I cache #endif mcr p15, 0, r1, c7, c10, 4 @ drain WB mcr p15, 0, r0, c2, c0, 0 @ load page table pointer mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs #endif /* CONFIG_MMU */ ret lr /* * cpu_arm1020_set_pte(ptep, pte) * * Set a PTE and flush it out */ .align 5 ENTRY(cpu_arm1020_set_pte_ext) #ifdef CONFIG_MMU armv3_set_pte_ext mov r0, r0 #ifndef CONFIG_CPU_DCACHE_DISABLE mcr p15, 0, r0, c7, c10, 4 mcr p15, 0, r0, c7, c10, 1 @ clean D entry #endif mcr p15, 0, r0, c7, c10, 4 @ drain WB #endif /* CONFIG_MMU */ ret lr .type __arm1020_setup, #function __arm1020_setup: mov r0, #0 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 #ifdef CONFIG_MMU mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 #endif adr r5, arm1020_crval ldmia r5, {r5, r6} mrc p15, 0, r0, c1, c0 @ get control register v4 bic r0, r0, r5 orr r0, r0, r6 #ifdef CONFIG_CPU_CACHE_ROUND_ROBIN orr r0, r0, #0x4000 @ .R.. .... .... .... #endif ret lr .size __arm1020_setup, . - __arm1020_setup /* * R * .RVI ZFRS BLDP WCAM * .011 1001 ..11 0101 */ .type arm1020_crval, #object arm1020_crval: crval clear=0x0000593f, mmuset=0x00003935, ucset=0x00001930 __INITDATA @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) define_processor_functions arm1020, dabort=v4t_early_abort, pabort=legacy_pabort .section ".rodata" string cpu_arch_name, "armv5t" string cpu_elf_name, "v5" .type cpu_arm1020_name, #object cpu_arm1020_name: .ascii "ARM1020" #ifndef CONFIG_CPU_ICACHE_DISABLE .ascii "i" #endif #ifndef CONFIG_CPU_DCACHE_DISABLE .ascii "d" #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH .ascii "(wt)" #else .ascii "(wb)" #endif #endif #ifndef CONFIG_CPU_BPREDICT_DISABLE .ascii "B" #endif #ifdef CONFIG_CPU_CACHE_ROUND_ROBIN .ascii "RR" #endif .ascii "\0" .size cpu_arm1020_name, . - cpu_arm1020_name .align .section ".proc.info.init", "a" .type __arm1020_proc_info,#object __arm1020_proc_info: .long 0x4104a200 @ ARM 1020T (Architecture v5T) .long 0xff0ffff0 .long PMD_TYPE_SECT | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ .long PMD_TYPE_SECT | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ initfn __arm1020_setup, __arm1020_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB .long cpu_arm1020_name .long arm1020_processor_functions .long v4wbi_tlb_fns .long v4wb_user_fns .long arm1020_cache_fns .size __arm1020_proc_info, . - __arm1020_proc_info
aixcc-public/challenge-001-exemplar-source
12,863
arch/arm/mm/cache-v7.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/mm/cache-v7.S * * Copyright (C) 2001 Deep Blue Solutions Ltd. * Copyright (C) 2005 ARM Ltd. * * This is the "shell" of the ARMv7 processor support. */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> #include <asm/errno.h> #include <asm/unwind.h> #include <asm/hardware/cache-b15-rac.h> #include "proc-macros.S" #ifdef CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND .globl icache_size .data .align 2 icache_size: .long 64 .text #endif /* * The secondary kernel init calls v7_flush_dcache_all before it enables * the L1; however, the L1 comes out of reset in an undefined state, so * the clean + invalidate performed by v7_flush_dcache_all causes a bunch * of cache lines with uninitialized data and uninitialized tags to get * written out to memory, which does really unpleasant things to the main * processor. We fix this by performing an invalidate, rather than a * clean + invalidate, before jumping into the kernel. * * This function needs to be called for both secondary cores startup and * primary core resume procedures. */ ENTRY(v7_invalidate_l1) mov r0, #0 mcr p15, 2, r0, c0, c0, 0 @ select L1 data cache in CSSELR isb mrc p15, 1, r0, c0, c0, 0 @ read cache geometry from CCSIDR movw r3, #0x3ff and r3, r3, r0, lsr #3 @ 'Associativity' in CCSIDR[12:3] clz r1, r3 @ WayShift mov r2, #1 mov r3, r3, lsl r1 @ NumWays-1 shifted into bits [31:...] movs r1, r2, lsl r1 @ #1 shifted left by same amount moveq r1, #1 @ r1 needs value > 0 even if only 1 way and r2, r0, #0x7 add r2, r2, #4 @ SetShift 1: movw ip, #0x7fff and r0, ip, r0, lsr #13 @ 'NumSets' in CCSIDR[27:13] 2: mov ip, r0, lsl r2 @ NumSet << SetShift orr ip, ip, r3 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift) mcr p15, 0, ip, c7, c6, 2 subs r0, r0, #1 @ Set-- bpl 2b subs r3, r3, r1 @ Way-- bcc 3f mrc p15, 1, r0, c0, c0, 0 @ re-read cache geometry from CCSIDR b 1b 3: dsb st isb ret lr ENDPROC(v7_invalidate_l1) /* * v7_flush_icache_all() * * Flush the whole I-cache. * * Registers: * r0 - set to 0 */ ENTRY(v7_flush_icache_all) mov r0, #0 ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate ret lr ENDPROC(v7_flush_icache_all) /* * v7_flush_dcache_louis() * * Flush the D-cache up to the Level of Unification Inner Shareable * * Corrupted registers: r0-r6, r9-r10 */ ENTRY(v7_flush_dcache_louis) dmb @ ensure ordering with previous memory accesses mrc p15, 1, r0, c0, c0, 1 @ read clidr, r0 = clidr ALT_SMP(mov r3, r0, lsr #20) @ move LoUIS into position ALT_UP( mov r3, r0, lsr #26) @ move LoUU into position ands r3, r3, #7 << 1 @ extract LoU*2 field from clidr bne start_flush_levels @ LoU != 0, start flushing #ifdef CONFIG_ARM_ERRATA_643719 ALT_SMP(mrc p15, 0, r2, c0, c0, 0) @ read main ID register ALT_UP( ret lr) @ LoUU is zero, so nothing to do movw r1, #:lower16:(0x410fc090 >> 4) @ ID of ARM Cortex A9 r0p? movt r1, #:upper16:(0x410fc090 >> 4) teq r1, r2, lsr #4 @ test for errata affected core and if so... moveq r3, #1 << 1 @ fix LoUIS value beq start_flush_levels @ start flushing cache levels #endif ret lr ENDPROC(v7_flush_dcache_louis) /* * v7_flush_dcache_all() * * Flush the whole D-cache. * * Corrupted registers: r0-r6, r9-r10 * * - mm - mm_struct describing address space */ ENTRY(v7_flush_dcache_all) dmb @ ensure ordering with previous memory accesses mrc p15, 1, r0, c0, c0, 1 @ read clidr mov r3, r0, lsr #23 @ move LoC into position ands r3, r3, #7 << 1 @ extract LoC*2 from clidr beq finished @ if loc is 0, then no need to clean start_flush_levels: mov r10, #0 @ start clean at cache level 0 flush_levels: add r2, r10, r10, lsr #1 @ work out 3x current cache level mov r1, r0, lsr r2 @ extract cache type bits from clidr and r1, r1, #7 @ mask of the bits for current cache only cmp r1, #2 @ see what cache we have at this level blt skip @ skip if no cache, or just i-cache #ifdef CONFIG_PREEMPTION save_and_disable_irqs_notrace r9 @ make cssr&csidr read atomic #endif mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr isb @ isb to sych the new cssr&csidr mrc p15, 1, r1, c0, c0, 0 @ read the new csidr #ifdef CONFIG_PREEMPTION restore_irqs_notrace r9 #endif and r2, r1, #7 @ extract the length of the cache lines add r2, r2, #4 @ add 4 (line length offset) movw r4, #0x3ff ands r4, r4, r1, lsr #3 @ find maximum number on the way size clz r5, r4 @ find bit position of way size increment movw r6, #0x7fff and r1, r6, r1, lsr #13 @ extract max number of the index size mov r6, #1 movne r4, r4, lsl r5 @ # of ways shifted into bits [31:...] movne r6, r6, lsl r5 @ 1 shifted left by same amount loop1: mov r9, r1 @ create working copy of max index loop2: mov r5, r9, lsl r2 @ factor set number into r5 orr r5, r5, r4 @ factor way number into r5 orr r5, r5, r10 @ factor cache level into r5 mcr p15, 0, r5, c7, c14, 2 @ clean & invalidate by set/way subs r9, r9, #1 @ decrement the index bge loop2 subs r4, r4, r6 @ decrement the way bcs loop1 skip: add r10, r10, #2 @ increment cache number cmp r3, r10 #ifdef CONFIG_ARM_ERRATA_814220 dsb #endif bgt flush_levels finished: mov r10, #0 @ switch back to cache level 0 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr dsb st isb ret lr ENDPROC(v7_flush_dcache_all) /* * v7_flush_cache_all() * * Flush the entire cache system. * The data cache flush is now achieved using atomic clean / invalidates * working outwards from L1 cache. This is done using Set/Way based cache * maintenance instructions. * The instruction cache can still be invalidated back to the point of * unification in a single instruction. * */ ENTRY(v7_flush_kern_cache_all) stmfd sp!, {r4-r6, r9-r10, lr} bl v7_flush_dcache_all mov r0, #0 ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate ldmfd sp!, {r4-r6, r9-r10, lr} ret lr ENDPROC(v7_flush_kern_cache_all) /* * v7_flush_kern_cache_louis(void) * * Flush the data cache up to Level of Unification Inner Shareable. * Invalidate the I-cache to the point of unification. */ ENTRY(v7_flush_kern_cache_louis) stmfd sp!, {r4-r6, r9-r10, lr} bl v7_flush_dcache_louis mov r0, #0 ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate ldmfd sp!, {r4-r6, r9-r10, lr} ret lr ENDPROC(v7_flush_kern_cache_louis) /* * v7_flush_cache_all() * * Flush all TLB entries in a particular address space * * - mm - mm_struct describing address space */ ENTRY(v7_flush_user_cache_all) /*FALLTHROUGH*/ /* * v7_flush_cache_range(start, end, flags) * * Flush a range of TLB entries in the specified address space. * * - start - start address (may not be aligned) * - end - end address (exclusive, may not be aligned) * - flags - vm_area_struct flags describing address space * * It is assumed that: * - we have a VIPT cache. */ ENTRY(v7_flush_user_cache_range) ret lr ENDPROC(v7_flush_user_cache_all) ENDPROC(v7_flush_user_cache_range) /* * v7_coherent_kern_range(start,end) * * Ensure that the I and D caches are coherent within specified * region. This is typically used when code has been written to * a memory region, and will be executed. * * - start - virtual start address of region * - end - virtual end address of region * * It is assumed that: * - the Icache does not read data from the write buffer */ ENTRY(v7_coherent_kern_range) /* FALLTHROUGH */ /* * v7_coherent_user_range(start,end) * * Ensure that the I and D caches are coherent within specified * region. This is typically used when code has been written to * a memory region, and will be executed. * * - start - virtual start address of region * - end - virtual end address of region * * It is assumed that: * - the Icache does not read data from the write buffer */ ENTRY(v7_coherent_user_range) UNWIND(.fnstart ) dcache_line_size r2, r3 sub r3, r2, #1 bic r12, r0, r3 #ifdef CONFIG_ARM_ERRATA_764369 ALT_SMP(W(dsb)) ALT_UP(W(nop)) #endif 1: USER( mcr p15, 0, r12, c7, c11, 1 ) @ clean D line to the point of unification add r12, r12, r2 cmp r12, r1 blo 1b dsb ishst #ifdef CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND ldr r3, =icache_size ldr r2, [r3, #0] #else icache_line_size r2, r3 #endif sub r3, r2, #1 bic r12, r0, r3 2: USER( mcr p15, 0, r12, c7, c5, 1 ) @ invalidate I line add r12, r12, r2 cmp r12, r1 blo 2b mov r0, #0 ALT_SMP(mcr p15, 0, r0, c7, c1, 6) @ invalidate BTB Inner Shareable ALT_UP(mcr p15, 0, r0, c7, c5, 6) @ invalidate BTB dsb ishst isb ret lr /* * Fault handling for the cache operation above. If the virtual address in r0 * isn't mapped, fail with -EFAULT. */ 9001: #ifdef CONFIG_ARM_ERRATA_775420 dsb #endif mov r0, #-EFAULT ret lr UNWIND(.fnend ) ENDPROC(v7_coherent_kern_range) ENDPROC(v7_coherent_user_range) /* * v7_flush_kern_dcache_area(void *addr, size_t size) * * Ensure that the data held in the page kaddr is written back * to the page in question. * * - addr - kernel address * - size - region size */ ENTRY(v7_flush_kern_dcache_area) dcache_line_size r2, r3 add r1, r0, r1 sub r3, r2, #1 bic r0, r0, r3 #ifdef CONFIG_ARM_ERRATA_764369 ALT_SMP(W(dsb)) ALT_UP(W(nop)) #endif 1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line add r0, r0, r2 cmp r0, r1 blo 1b dsb st ret lr ENDPROC(v7_flush_kern_dcache_area) /* * v7_dma_inv_range(start,end) * * Invalidate the data cache within the specified region; we will * be performing a DMA operation in this region and we want to * purge old data in the cache. * * - start - virtual start address of region * - end - virtual end address of region */ v7_dma_inv_range: dcache_line_size r2, r3 sub r3, r2, #1 tst r0, r3 bic r0, r0, r3 #ifdef CONFIG_ARM_ERRATA_764369 ALT_SMP(W(dsb)) ALT_UP(W(nop)) #endif mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line addne r0, r0, r2 tst r1, r3 bic r1, r1, r3 mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D / U line cmp r0, r1 1: mcrlo p15, 0, r0, c7, c6, 1 @ invalidate D / U line addlo r0, r0, r2 cmplo r0, r1 blo 1b dsb st ret lr ENDPROC(v7_dma_inv_range) /* * v7_dma_clean_range(start,end) * - start - virtual start address of region * - end - virtual end address of region */ v7_dma_clean_range: dcache_line_size r2, r3 sub r3, r2, #1 bic r0, r0, r3 #ifdef CONFIG_ARM_ERRATA_764369 ALT_SMP(W(dsb)) ALT_UP(W(nop)) #endif 1: mcr p15, 0, r0, c7, c10, 1 @ clean D / U line add r0, r0, r2 cmp r0, r1 blo 1b dsb st ret lr ENDPROC(v7_dma_clean_range) /* * v7_dma_flush_range(start,end) * - start - virtual start address of region * - end - virtual end address of region */ ENTRY(v7_dma_flush_range) dcache_line_size r2, r3 sub r3, r2, #1 bic r0, r0, r3 #ifdef CONFIG_ARM_ERRATA_764369 ALT_SMP(W(dsb)) ALT_UP(W(nop)) #endif 1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line add r0, r0, r2 cmp r0, r1 blo 1b dsb st ret lr ENDPROC(v7_dma_flush_range) /* * dma_map_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(v7_dma_map_area) add r1, r1, r0 teq r2, #DMA_FROM_DEVICE beq v7_dma_inv_range b v7_dma_clean_range ENDPROC(v7_dma_map_area) /* * dma_unmap_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(v7_dma_unmap_area) add r1, r1, r0 teq r2, #DMA_TO_DEVICE bne v7_dma_inv_range ret lr ENDPROC(v7_dma_unmap_area) __INITDATA @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) define_cache_functions v7 /* The Broadcom Brahma-B15 read-ahead cache requires some modifications * to the v7_cache_fns, we only override the ones we need */ #ifndef CONFIG_CACHE_B15_RAC globl_equ b15_flush_kern_cache_all, v7_flush_kern_cache_all #endif globl_equ b15_flush_icache_all, v7_flush_icache_all globl_equ b15_flush_kern_cache_louis, v7_flush_kern_cache_louis globl_equ b15_flush_user_cache_all, v7_flush_user_cache_all globl_equ b15_flush_user_cache_range, v7_flush_user_cache_range globl_equ b15_coherent_kern_range, v7_coherent_kern_range globl_equ b15_coherent_user_range, v7_coherent_user_range globl_equ b15_flush_kern_dcache_area, v7_flush_kern_dcache_area globl_equ b15_dma_map_area, v7_dma_map_area globl_equ b15_dma_unmap_area, v7_dma_unmap_area globl_equ b15_dma_flush_range, v7_dma_flush_range define_cache_functions b15
aixcc-public/challenge-001-exemplar-source
10,917
arch/arm/mm/proc-arm1026.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * linux/arch/arm/mm/proc-arm1026.S: MMU functions for ARM1026EJ-S * * Copyright (C) 2000 ARM Limited * Copyright (C) 2000 Deep Blue Solutions Ltd. * hacked for non-paged-MM by Hyok S. Choi, 2003. * * These are the low level assembler for performing cache and TLB * functions on the ARM1026EJ-S. */ #include <linux/linkage.h> #include <linux/init.h> #include <linux/pgtable.h> #include <asm/assembler.h> #include <asm/asm-offsets.h> #include <asm/hwcap.h> #include <asm/pgtable-hwdef.h> #include <asm/ptrace.h> #include "proc-macros.S" /* * This is the maximum size of an area which will be invalidated * using the single invalidate entry instructions. Anything larger * than this, and we go for the whole cache. * * This value should be chosen such that we choose the cheapest * alternative. */ #define MAX_AREA_SIZE 32768 /* * The size of one data cache line. */ #define CACHE_DLINESIZE 32 /* * The number of data cache segments. */ #define CACHE_DSEGMENTS 16 /* * The number of lines in a cache segment. */ #define CACHE_DENTRIES 64 /* * This is the size at which it becomes more efficient to * clean the whole cache, rather than using the individual * cache line maintenance instructions. */ #define CACHE_DLIMIT 32768 .text /* * cpu_arm1026_proc_init() */ ENTRY(cpu_arm1026_proc_init) ret lr /* * cpu_arm1026_proc_fin() */ ENTRY(cpu_arm1026_proc_fin) mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches ret lr /* * cpu_arm1026_reset(loc) * * Perform a soft reset of the system. Put the CPU into the * same state as it would be if it had been reset, and branch * to what would be the reset vector. * * loc: location to jump to for soft reset */ .align 5 .pushsection .idmap.text, "ax" ENTRY(cpu_arm1026_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c10, 4 @ drain WB #ifdef CONFIG_MMU mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs #endif mrc p15, 0, ip, c1, c0, 0 @ ctrl register bic ip, ip, #0x000f @ ............wcam bic ip, ip, #0x1100 @ ...i...s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register ret r0 ENDPROC(cpu_arm1026_reset) .popsection /* * cpu_arm1026_do_idle() */ .align 5 ENTRY(cpu_arm1026_do_idle) mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt ret lr /* ================================= CACHE ================================ */ .align 5 /* * flush_icache_all() * * Unconditionally clean and invalidate the entire icache. */ ENTRY(arm1026_flush_icache_all) #ifndef CONFIG_CPU_ICACHE_DISABLE mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache #endif ret lr ENDPROC(arm1026_flush_icache_all) /* * flush_user_cache_all() * * Invalidate all cache entries in a particular address * space. */ ENTRY(arm1026_flush_user_cache_all) /* FALLTHROUGH */ /* * flush_kern_cache_all() * * Clean and invalidate the entire cache. */ ENTRY(arm1026_flush_kern_cache_all) mov r2, #VM_EXEC mov ip, #0 __flush_whole_cache: #ifndef CONFIG_CPU_DCACHE_DISABLE 1: mrc p15, 0, APSR_nzcv, c7, c14, 3 @ test, clean, invalidate bne 1b #endif tst r2, #VM_EXEC #ifndef CONFIG_CPU_ICACHE_DISABLE mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache #endif mcrne p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * flush_user_cache_range(start, end, flags) * * Invalidate a range of cache entries in the specified * address space. * * - start - start address (inclusive) * - end - end address (exclusive) * - flags - vm_flags for this space */ ENTRY(arm1026_flush_user_cache_range) mov ip, #0 sub r3, r1, r0 @ calculate total size cmp r3, #CACHE_DLIMIT bhs __flush_whole_cache #ifndef CONFIG_CPU_DCACHE_DISABLE 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b #endif tst r2, #VM_EXEC #ifndef CONFIG_CPU_ICACHE_DISABLE mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache #endif mcrne p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * coherent_kern_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm1026_coherent_kern_range) /* FALLTHROUGH */ /* * coherent_user_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm1026_coherent_user_range) mov ip, #0 bic r0, r0, #CACHE_DLINESIZE - 1 1: #ifndef CONFIG_CPU_DCACHE_DISABLE mcr p15, 0, r0, c7, c10, 1 @ clean D entry #endif #ifndef CONFIG_CPU_ICACHE_DISABLE mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry #endif add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, ip, c7, c10, 4 @ drain WB mov r0, #0 ret lr /* * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * * - addr - kernel address * - size - region size */ ENTRY(arm1026_flush_kern_dcache_area) mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE add r1, r0, r1 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b #endif mcr p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * dma_inv_range(start, end) * * Invalidate (discard) the specified virtual address range. * May not write back any entries. If 'start' or 'end' * are not cache line aligned, those lines must be written * back. * * - start - virtual start address * - end - virtual end address * * (same as v4wb) */ arm1026_dma_inv_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE tst r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry tst r1, #CACHE_DLINESIZE - 1 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b #endif mcr p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * dma_clean_range(start, end) * * Clean the specified virtual address range. * * - start - virtual start address * - end - virtual end address * * (same as v4wb) */ arm1026_dma_clean_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b #endif mcr p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * dma_flush_range(start, end) * * Clean and invalidate the specified virtual address range. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm1026_dma_flush_range) mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b #endif mcr p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * dma_map_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(arm1026_dma_map_area) add r1, r1, r0 cmp r2, #DMA_TO_DEVICE beq arm1026_dma_clean_range bcs arm1026_dma_inv_range b arm1026_dma_flush_range ENDPROC(arm1026_dma_map_area) /* * dma_unmap_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(arm1026_dma_unmap_area) ret lr ENDPROC(arm1026_dma_unmap_area) .globl arm1026_flush_kern_cache_louis .equ arm1026_flush_kern_cache_louis, arm1026_flush_kern_cache_all @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) define_cache_functions arm1026 .align 5 ENTRY(cpu_arm1026_dcache_clean_area) #ifndef CONFIG_CPU_DCACHE_DISABLE mov ip, #0 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE subs r1, r1, #CACHE_DLINESIZE bhi 1b #endif ret lr /* =============================== PageTable ============================== */ /* * cpu_arm1026_switch_mm(pgd) * * Set the translation base pointer to be as described by pgd. * * pgd: new page tables */ .align 5 ENTRY(cpu_arm1026_switch_mm) #ifdef CONFIG_MMU mov r1, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE 1: mrc p15, 0, APSR_nzcv, c7, c14, 3 @ test, clean, invalidate bne 1b #endif #ifndef CONFIG_CPU_ICACHE_DISABLE mcr p15, 0, r1, c7, c5, 0 @ invalidate I cache #endif mcr p15, 0, r1, c7, c10, 4 @ drain WB mcr p15, 0, r0, c2, c0, 0 @ load page table pointer mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs #endif ret lr /* * cpu_arm1026_set_pte_ext(ptep, pte, ext) * * Set a PTE and flush it out */ .align 5 ENTRY(cpu_arm1026_set_pte_ext) #ifdef CONFIG_MMU armv3_set_pte_ext mov r0, r0 #ifndef CONFIG_CPU_DCACHE_DISABLE mcr p15, 0, r0, c7, c10, 1 @ clean D entry #endif #endif /* CONFIG_MMU */ ret lr .type __arm1026_setup, #function __arm1026_setup: mov r0, #0 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 #ifdef CONFIG_MMU mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 mcr p15, 0, r4, c2, c0 @ load page table pointer #endif #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH mov r0, #4 @ explicitly disable writeback mcr p15, 7, r0, c15, c0, 0 #endif adr r5, arm1026_crval ldmia r5, {r5, r6} mrc p15, 0, r0, c1, c0 @ get control register v4 bic r0, r0, r5 orr r0, r0, r6 #ifdef CONFIG_CPU_CACHE_ROUND_ROBIN orr r0, r0, #0x4000 @ .R.. .... .... .... #endif ret lr .size __arm1026_setup, . - __arm1026_setup /* * R * .RVI ZFRS BLDP WCAM * .011 1001 ..11 0101 * */ .type arm1026_crval, #object arm1026_crval: crval clear=0x00007f3f, mmuset=0x00003935, ucset=0x00001934 __INITDATA @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) define_processor_functions arm1026, dabort=v5t_early_abort, pabort=legacy_pabort .section .rodata string cpu_arch_name, "armv5tej" string cpu_elf_name, "v5" .align string cpu_arm1026_name, "ARM1026EJ-S" .align .section ".proc.info.init", "a" .type __arm1026_proc_info,#object __arm1026_proc_info: .long 0x4106a260 @ ARM 1026EJ-S (v5TEJ) .long 0xff0ffff0 .long PMD_TYPE_SECT | \ PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ .long PMD_TYPE_SECT | \ PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ initfn __arm1026_setup, __arm1026_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA .long cpu_arm1026_name .long arm1026_processor_functions .long v4wbi_tlb_fns .long v4wb_user_fns .long arm1026_cache_fns .size __arm1026_proc_info, . - __arm1026_proc_info
aixcc-public/challenge-001-exemplar-source
1,188
arch/arm/mm/abort-macro.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * The ARM LDRD and Thumb LDRSB instructions use bit 20/11 (ARM/Thumb) * differently than every other instruction, so it is set to 0 (write) * even though the instructions are read instructions. This means that * during an abort the instructions will be treated as a write and the * handler will raise a signal from unwriteable locations if they * fault. We have to specifically check for these instructions * from the abort handlers to treat them properly. * */ .macro do_thumb_abort, fsr, pc, psr, tmp tst \psr, #PSR_T_BIT beq not_thumb ldrh \tmp, [\pc] @ Read aborted Thumb instruction uaccess_disable ip @ disable userspace access and \tmp, \tmp, # 0xfe00 @ Mask opcode field cmp \tmp, # 0x5600 @ Is it ldrsb? orreq \tmp, \tmp, #1 << 11 @ Set L-bit if yes tst \tmp, #1 << 11 @ L = 0 -> write orreq \fsr, \fsr, #1 << 11 @ yes. b do_DataAbort not_thumb: .endm /* * We check for the following instruction encoding for LDRD. * * [27:25] == 000 * [7:4] == 1101 * [20] == 0 */ .macro teq_ldrd, tmp, insn mov \tmp, #0x0e100000 orr \tmp, #0x000000f0 and \tmp, \insn, \tmp teq \tmp, #0x000000d0 .endm
aixcc-public/challenge-001-exemplar-source
6,163
arch/arm/mm/cache-v4wb.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/mm/cache-v4wb.S * * Copyright (C) 1997-2002 Russell king */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> #include <asm/memory.h> #include <asm/page.h> #include "proc-macros.S" /* * The size of one data cache line. */ #define CACHE_DLINESIZE 32 /* * The total size of the data cache. */ #if defined(CONFIG_CPU_SA110) # define CACHE_DSIZE 16384 #elif defined(CONFIG_CPU_SA1100) # define CACHE_DSIZE 8192 #else # error Unknown cache size #endif /* * This is the size at which it becomes more efficient to * clean the whole cache, rather than using the individual * cache line maintenance instructions. * * Size Clean (ticks) Dirty (ticks) * 4096 21 20 21 53 55 54 * 8192 40 41 40 106 100 102 * 16384 77 77 76 140 140 138 * 32768 150 149 150 214 216 212 <--- * 65536 296 297 296 351 358 361 * 131072 591 591 591 656 657 651 * Whole 132 136 132 221 217 207 <--- */ #define CACHE_DLIMIT (CACHE_DSIZE * 4) .data .align 2 flush_base: .long FLUSH_BASE .text /* * flush_icache_all() * * Unconditionally clean and invalidate the entire icache. */ ENTRY(v4wb_flush_icache_all) mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache ret lr ENDPROC(v4wb_flush_icache_all) /* * flush_user_cache_all() * * Clean and invalidate all cache entries in a particular address * space. */ ENTRY(v4wb_flush_user_cache_all) /* FALLTHROUGH */ /* * flush_kern_cache_all() * * Clean and invalidate the entire cache. */ ENTRY(v4wb_flush_kern_cache_all) mov ip, #0 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache __flush_whole_cache: ldr r3, =flush_base ldr r1, [r3, #0] eor r1, r1, #CACHE_DSIZE str r1, [r3, #0] add r2, r1, #CACHE_DSIZE 1: ldr r3, [r1], #32 cmp r1, r2 blo 1b #ifdef FLUSH_BASE_MINICACHE add r2, r2, #FLUSH_BASE_MINICACHE - FLUSH_BASE sub r1, r2, #512 @ only 512 bytes 1: ldr r3, [r1], #32 cmp r1, r2 blo 1b #endif mcr p15, 0, ip, c7, c10, 4 @ drain write buffer ret lr /* * flush_user_cache_range(start, end, flags) * * Invalidate a range of cache entries in the specified * address space. * * - start - start address (inclusive, page aligned) * - end - end address (exclusive, page aligned) * - flags - vma_area_struct flags describing address space */ ENTRY(v4wb_flush_user_cache_range) mov ip, #0 sub r3, r1, r0 @ calculate total size tst r2, #VM_EXEC @ executable region? mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache cmp r3, #CACHE_DLIMIT @ total size >= limit? bhs __flush_whole_cache @ flush whole D cache 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b tst r2, #VM_EXEC mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer ret lr /* * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * * - addr - kernel address * - size - region size */ ENTRY(v4wb_flush_kern_dcache_area) add r1, r0, r1 /* fall through */ /* * coherent_kern_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(v4wb_coherent_kern_range) /* fall through */ /* * coherent_user_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(v4wb_coherent_user_range) bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_inv_range(start, end) * * Invalidate (discard) the specified virtual address range. * May not write back any entries. If 'start' or 'end' * are not cache line aligned, those lines must be written * back. * * - start - virtual start address * - end - virtual end address */ v4wb_dma_inv_range: tst r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry tst r1, #CACHE_DLINESIZE - 1 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain write buffer ret lr /* * dma_clean_range(start, end) * * Clean (write back) the specified virtual address range. * * - start - virtual start address * - end - virtual end address */ v4wb_dma_clean_range: bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain write buffer ret lr /* * dma_flush_range(start, end) * * Clean and invalidate the specified virtual address range. * * - start - virtual start address * - end - virtual end address * * This is actually the same as v4wb_coherent_kern_range() */ .globl v4wb_dma_flush_range .set v4wb_dma_flush_range, v4wb_coherent_kern_range /* * dma_map_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(v4wb_dma_map_area) add r1, r1, r0 cmp r2, #DMA_TO_DEVICE beq v4wb_dma_clean_range bcs v4wb_dma_inv_range b v4wb_dma_flush_range ENDPROC(v4wb_dma_map_area) /* * dma_unmap_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(v4wb_dma_unmap_area) ret lr ENDPROC(v4wb_dma_unmap_area) .globl v4wb_flush_kern_cache_louis .equ v4wb_flush_kern_cache_louis, v4wb_flush_kern_cache_all __INITDATA @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) define_cache_functions v4wb
aixcc-public/challenge-001-exemplar-source
9,105
arch/arm/mm/proc-arm940.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/mm/arm940.S: utility functions for ARM940T * * Copyright (C) 2004-2006 Hyok S. Choi (hyok.choi@samsung.com) */ #include <linux/linkage.h> #include <linux/init.h> #include <linux/pgtable.h> #include <asm/assembler.h> #include <asm/hwcap.h> #include <asm/pgtable-hwdef.h> #include <asm/ptrace.h> #include "proc-macros.S" /* ARM940T has a 4KB DCache comprising 256 lines of 4 words */ #define CACHE_DLINESIZE 16 #define CACHE_DSEGMENTS 4 #define CACHE_DENTRIES 64 .text /* * cpu_arm940_proc_init() * cpu_arm940_switch_mm() * * These are not required. */ ENTRY(cpu_arm940_proc_init) ENTRY(cpu_arm940_switch_mm) ret lr /* * cpu_arm940_proc_fin() */ ENTRY(cpu_arm940_proc_fin) mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x00001000 @ i-cache bic r0, r0, #0x00000004 @ d-cache mcr p15, 0, r0, c1, c0, 0 @ disable caches ret lr /* * cpu_arm940_reset(loc) * Params : r0 = address to jump to * Notes : This sets up everything for a reset */ .pushsection .idmap.text, "ax" ENTRY(cpu_arm940_reset) mov ip, #0 mcr p15, 0, ip, c7, c5, 0 @ flush I cache mcr p15, 0, ip, c7, c6, 0 @ flush D cache mcr p15, 0, ip, c7, c10, 4 @ drain WB mrc p15, 0, ip, c1, c0, 0 @ ctrl register bic ip, ip, #0x00000005 @ .............c.p bic ip, ip, #0x00001000 @ i-cache mcr p15, 0, ip, c1, c0, 0 @ ctrl register ret r0 ENDPROC(cpu_arm940_reset) .popsection /* * cpu_arm940_do_idle() */ .align 5 ENTRY(cpu_arm940_do_idle) mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt ret lr /* * flush_icache_all() * * Unconditionally clean and invalidate the entire icache. */ ENTRY(arm940_flush_icache_all) mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache ret lr ENDPROC(arm940_flush_icache_all) /* * flush_user_cache_all() */ ENTRY(arm940_flush_user_cache_all) /* FALLTHROUGH */ /* * flush_kern_cache_all() * * Clean and invalidate the entire cache. */ ENTRY(arm940_flush_kern_cache_all) mov r2, #VM_EXEC /* FALLTHROUGH */ /* * flush_user_cache_range(start, end, flags) * * There is no efficient way to flush a range of cache entries * in the specified address range. Thus, flushes all. * * - start - start address (inclusive) * - end - end address (exclusive) * - flags - vm_flags describing address space */ ENTRY(arm940_flush_user_cache_range) mov ip, #0 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH mcr p15, 0, ip, c7, c6, 0 @ flush D cache #else mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 2: mcr p15, 0, r3, c7, c14, 2 @ clean/flush D index subs r3, r3, #1 << 26 bcs 2b @ entries 63 to 0 subs r1, r1, #1 << 4 bcs 1b @ segments 3 to 0 #endif tst r2, #VM_EXEC mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache mcrne p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * coherent_kern_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start, end. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm940_coherent_kern_range) /* FALLTHROUGH */ /* * coherent_user_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start, end. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm940_coherent_user_range) /* FALLTHROUGH */ /* * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * * - addr - kernel address * - size - region size */ ENTRY(arm940_flush_kern_dcache_area) mov r0, #0 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 2: mcr p15, 0, r3, c7, c14, 2 @ clean/flush D index subs r3, r3, #1 << 26 bcs 2b @ entries 63 to 0 subs r1, r1, #1 << 4 bcs 1b @ segments 7 to 0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_inv_range(start, end) * * There is no efficient way to invalidate a specifid virtual * address range. Thus, invalidates all. * * - start - virtual start address * - end - virtual end address */ arm940_dma_inv_range: mov ip, #0 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 2: mcr p15, 0, r3, c7, c6, 2 @ flush D entry subs r3, r3, #1 << 26 bcs 2b @ entries 63 to 0 subs r1, r1, #1 << 4 bcs 1b @ segments 7 to 0 mcr p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * dma_clean_range(start, end) * * There is no efficient way to clean a specifid virtual * address range. Thus, cleans all. * * - start - virtual start address * - end - virtual end address */ arm940_dma_clean_range: ENTRY(cpu_arm940_dcache_clean_area) mov ip, #0 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 2: mcr p15, 0, r3, c7, c10, 2 @ clean D entry subs r3, r3, #1 << 26 bcs 2b @ entries 63 to 0 subs r1, r1, #1 << 4 bcs 1b @ segments 7 to 0 #endif mcr p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * dma_flush_range(start, end) * * There is no efficient way to clean and invalidate a specifid * virtual address range. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm940_dma_flush_range) mov ip, #0 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 2: #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH mcr p15, 0, r3, c7, c14, 2 @ clean/flush D entry #else mcr p15, 0, r3, c7, c6, 2 @ invalidate D entry #endif subs r3, r3, #1 << 26 bcs 2b @ entries 63 to 0 subs r1, r1, #1 << 4 bcs 1b @ segments 7 to 0 mcr p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * dma_map_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(arm940_dma_map_area) add r1, r1, r0 cmp r2, #DMA_TO_DEVICE beq arm940_dma_clean_range bcs arm940_dma_inv_range b arm940_dma_flush_range ENDPROC(arm940_dma_map_area) /* * dma_unmap_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(arm940_dma_unmap_area) ret lr ENDPROC(arm940_dma_unmap_area) .globl arm940_flush_kern_cache_louis .equ arm940_flush_kern_cache_louis, arm940_flush_kern_cache_all @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) define_cache_functions arm940 .type __arm940_setup, #function __arm940_setup: mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c6, 0 @ invalidate D cache mcr p15, 0, r0, c7, c10, 4 @ drain WB mcr p15, 0, r0, c6, c3, 0 @ disable data area 3~7 mcr p15, 0, r0, c6, c4, 0 mcr p15, 0, r0, c6, c5, 0 mcr p15, 0, r0, c6, c6, 0 mcr p15, 0, r0, c6, c7, 0 mcr p15, 0, r0, c6, c3, 1 @ disable instruction area 3~7 mcr p15, 0, r0, c6, c4, 1 mcr p15, 0, r0, c6, c5, 1 mcr p15, 0, r0, c6, c6, 1 mcr p15, 0, r0, c6, c7, 1 mov r0, #0x0000003F @ base = 0, size = 4GB mcr p15, 0, r0, c6, c0, 0 @ set area 0, default mcr p15, 0, r0, c6, c0, 1 ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM ldr r7, =CONFIG_DRAM_SIZE >> 12 @ size of RAM (must be >= 4KB) pr_val r3, r0, r7, #1 mcr p15, 0, r3, c6, c1, 0 @ set area 1, RAM mcr p15, 0, r3, c6, c1, 1 ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH ldr r7, =CONFIG_FLASH_SIZE @ size of FLASH (must be >= 4KB) pr_val r3, r0, r6, #1 mcr p15, 0, r3, c6, c2, 0 @ set area 2, ROM/FLASH mcr p15, 0, r3, c6, c2, 1 mov r0, #0x06 mcr p15, 0, r0, c2, c0, 0 @ Region 1&2 cacheable mcr p15, 0, r0, c2, c0, 1 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH mov r0, #0x00 @ disable whole write buffer #else mov r0, #0x02 @ Region 1 write bufferred #endif mcr p15, 0, r0, c3, c0, 0 mov r0, #0x10000 sub r0, r0, #1 @ r0 = 0xffff mcr p15, 0, r0, c5, c0, 0 @ all read/write access mcr p15, 0, r0, c5, c0, 1 mrc p15, 0, r0, c1, c0 @ get control register orr r0, r0, #0x00001000 @ I-cache orr r0, r0, #0x00000005 @ MPU/D-cache ret lr .size __arm940_setup, . - __arm940_setup __INITDATA @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) define_processor_functions arm940, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1 .section ".rodata" string cpu_arch_name, "armv4t" string cpu_elf_name, "v4" string cpu_arm940_name, "ARM940T" .align .section ".proc.info.init", "a" .type __arm940_proc_info,#object __arm940_proc_info: .long 0x41009400 .long 0xff00fff0 .long 0 initfn __arm940_setup, __arm940_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB .long cpu_arm940_name .long arm940_processor_functions .long 0 .long 0 .long arm940_cache_fns .size __arm940_proc_info, . - __arm940_proc_info
aixcc-public/challenge-001-exemplar-source
1,786
arch/arm/mm/tlb-v4wb.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/mm/tlbv4wb.S * * Copyright (C) 1997-2002 Russell King * * ARM architecture version 4 TLB handling functions. * These assume a split I/D TLBs w/o I TLB entry, with a write buffer. * * Processors: SA110 SA1100 SA1110 */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> #include <asm/asm-offsets.h> #include <asm/tlbflush.h> #include "proc-macros.S" .align 5 /* * v4wb_flush_user_tlb_range(start, end, mm) * * Invalidate a range of TLB entries in the specified address space. * * - start - range start address * - end - range end address * - mm - mm_struct describing address space */ .align 5 ENTRY(v4wb_flush_user_tlb_range) vma_vm_mm ip, r2 act_mm r3 @ get current->active_mm eors r3, ip, r3 @ == mm ? retne lr @ no, we dont do anything vma_vm_flags r2, r2 mcr p15, 0, r3, c7, c10, 4 @ drain WB tst r2, #VM_EXEC mcrne p15, 0, r3, c8, c5, 0 @ invalidate I TLB bic r0, r0, #0x0ff bic r0, r0, #0xf00 1: mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry add r0, r0, #PAGE_SZ cmp r0, r1 blo 1b ret lr /* * v4_flush_kern_tlb_range(start, end) * * Invalidate a range of TLB entries in the specified kernel * address range. * * - start - virtual address (may not be aligned) * - end - virtual address (may not be aligned) */ ENTRY(v4wb_flush_kern_tlb_range) mov r3, #0 mcr p15, 0, r3, c7, c10, 4 @ drain WB bic r0, r0, #0x0ff bic r0, r0, #0xf00 mcr p15, 0, r3, c8, c5, 0 @ invalidate I TLB 1: mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry add r0, r0, #PAGE_SZ cmp r0, r1 blo 1b ret lr __INITDATA /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */ define_tlb_functions v4wb, v4wb_tlb_flags
aixcc-public/challenge-001-exemplar-source
11,098
arch/arm/mm/proc-arm1022.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * linux/arch/arm/mm/proc-arm1022.S: MMU functions for ARM1022E * * Copyright (C) 2000 ARM Limited * Copyright (C) 2000 Deep Blue Solutions Ltd. * hacked for non-paged-MM by Hyok S. Choi, 2003. * * These are the low level assembler for performing cache and TLB * functions on the ARM1022E. */ #include <linux/linkage.h> #include <linux/init.h> #include <linux/pgtable.h> #include <asm/assembler.h> #include <asm/asm-offsets.h> #include <asm/hwcap.h> #include <asm/pgtable-hwdef.h> #include <asm/ptrace.h> #include "proc-macros.S" /* * This is the maximum size of an area which will be invalidated * using the single invalidate entry instructions. Anything larger * than this, and we go for the whole cache. * * This value should be chosen such that we choose the cheapest * alternative. */ #define MAX_AREA_SIZE 32768 /* * The size of one data cache line. */ #define CACHE_DLINESIZE 32 /* * The number of data cache segments. */ #define CACHE_DSEGMENTS 16 /* * The number of lines in a cache segment. */ #define CACHE_DENTRIES 64 /* * This is the size at which it becomes more efficient to * clean the whole cache, rather than using the individual * cache line maintenance instructions. */ #define CACHE_DLIMIT 32768 .text /* * cpu_arm1022_proc_init() */ ENTRY(cpu_arm1022_proc_init) ret lr /* * cpu_arm1022_proc_fin() */ ENTRY(cpu_arm1022_proc_fin) mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches ret lr /* * cpu_arm1022_reset(loc) * * Perform a soft reset of the system. Put the CPU into the * same state as it would be if it had been reset, and branch * to what would be the reset vector. * * loc: location to jump to for soft reset */ .align 5 .pushsection .idmap.text, "ax" ENTRY(cpu_arm1022_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c10, 4 @ drain WB #ifdef CONFIG_MMU mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs #endif mrc p15, 0, ip, c1, c0, 0 @ ctrl register bic ip, ip, #0x000f @ ............wcam bic ip, ip, #0x1100 @ ...i...s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register ret r0 ENDPROC(cpu_arm1022_reset) .popsection /* * cpu_arm1022_do_idle() */ .align 5 ENTRY(cpu_arm1022_do_idle) mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt ret lr /* ================================= CACHE ================================ */ .align 5 /* * flush_icache_all() * * Unconditionally clean and invalidate the entire icache. */ ENTRY(arm1022_flush_icache_all) #ifndef CONFIG_CPU_ICACHE_DISABLE mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache #endif ret lr ENDPROC(arm1022_flush_icache_all) /* * flush_user_cache_all() * * Invalidate all cache entries in a particular address * space. */ ENTRY(arm1022_flush_user_cache_all) /* FALLTHROUGH */ /* * flush_kern_cache_all() * * Clean and invalidate the entire cache. */ ENTRY(arm1022_flush_kern_cache_all) mov r2, #VM_EXEC mov ip, #0 __flush_whole_cache: #ifndef CONFIG_CPU_DCACHE_DISABLE mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 2: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index subs r3, r3, #1 << 26 bcs 2b @ entries 63 to 0 subs r1, r1, #1 << 5 bcs 1b @ segments 15 to 0 #endif tst r2, #VM_EXEC #ifndef CONFIG_CPU_ICACHE_DISABLE mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache #endif mcrne p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * flush_user_cache_range(start, end, flags) * * Invalidate a range of cache entries in the specified * address space. * * - start - start address (inclusive) * - end - end address (exclusive) * - flags - vm_flags for this space */ ENTRY(arm1022_flush_user_cache_range) mov ip, #0 sub r3, r1, r0 @ calculate total size cmp r3, #CACHE_DLIMIT bhs __flush_whole_cache #ifndef CONFIG_CPU_DCACHE_DISABLE 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b #endif tst r2, #VM_EXEC #ifndef CONFIG_CPU_ICACHE_DISABLE mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache #endif mcrne p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * coherent_kern_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm1022_coherent_kern_range) /* FALLTHROUGH */ /* * coherent_user_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm1022_coherent_user_range) mov ip, #0 bic r0, r0, #CACHE_DLINESIZE - 1 1: #ifndef CONFIG_CPU_DCACHE_DISABLE mcr p15, 0, r0, c7, c10, 1 @ clean D entry #endif #ifndef CONFIG_CPU_ICACHE_DISABLE mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry #endif add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, ip, c7, c10, 4 @ drain WB mov r0, #0 ret lr /* * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * * - addr - kernel address * - size - region size */ ENTRY(arm1022_flush_kern_dcache_area) mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE add r1, r0, r1 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b #endif mcr p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * dma_inv_range(start, end) * * Invalidate (discard) the specified virtual address range. * May not write back any entries. If 'start' or 'end' * are not cache line aligned, those lines must be written * back. * * - start - virtual start address * - end - virtual end address * * (same as v4wb) */ arm1022_dma_inv_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE tst r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry tst r1, #CACHE_DLINESIZE - 1 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b #endif mcr p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * dma_clean_range(start, end) * * Clean the specified virtual address range. * * - start - virtual start address * - end - virtual end address * * (same as v4wb) */ arm1022_dma_clean_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b #endif mcr p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * dma_flush_range(start, end) * * Clean and invalidate the specified virtual address range. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm1022_dma_flush_range) mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b #endif mcr p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * dma_map_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(arm1022_dma_map_area) add r1, r1, r0 cmp r2, #DMA_TO_DEVICE beq arm1022_dma_clean_range bcs arm1022_dma_inv_range b arm1022_dma_flush_range ENDPROC(arm1022_dma_map_area) /* * dma_unmap_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(arm1022_dma_unmap_area) ret lr ENDPROC(arm1022_dma_unmap_area) .globl arm1022_flush_kern_cache_louis .equ arm1022_flush_kern_cache_louis, arm1022_flush_kern_cache_all @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) define_cache_functions arm1022 .align 5 ENTRY(cpu_arm1022_dcache_clean_area) #ifndef CONFIG_CPU_DCACHE_DISABLE mov ip, #0 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE subs r1, r1, #CACHE_DLINESIZE bhi 1b #endif ret lr /* =============================== PageTable ============================== */ /* * cpu_arm1022_switch_mm(pgd) * * Set the translation base pointer to be as described by pgd. * * pgd: new page tables */ .align 5 ENTRY(cpu_arm1022_switch_mm) #ifdef CONFIG_MMU #ifndef CONFIG_CPU_DCACHE_DISABLE mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 2: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index subs r3, r3, #1 << 26 bcs 2b @ entries 63 to 0 subs r1, r1, #1 << 5 bcs 1b @ segments 15 to 0 #endif mov r1, #0 #ifndef CONFIG_CPU_ICACHE_DISABLE mcr p15, 0, r1, c7, c5, 0 @ invalidate I cache #endif mcr p15, 0, r1, c7, c10, 4 @ drain WB mcr p15, 0, r0, c2, c0, 0 @ load page table pointer mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs #endif ret lr /* * cpu_arm1022_set_pte_ext(ptep, pte, ext) * * Set a PTE and flush it out */ .align 5 ENTRY(cpu_arm1022_set_pte_ext) #ifdef CONFIG_MMU armv3_set_pte_ext mov r0, r0 #ifndef CONFIG_CPU_DCACHE_DISABLE mcr p15, 0, r0, c7, c10, 1 @ clean D entry #endif #endif /* CONFIG_MMU */ ret lr .type __arm1022_setup, #function __arm1022_setup: mov r0, #0 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 #ifdef CONFIG_MMU mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 #endif adr r5, arm1022_crval ldmia r5, {r5, r6} mrc p15, 0, r0, c1, c0 @ get control register v4 bic r0, r0, r5 orr r0, r0, r6 #ifdef CONFIG_CPU_CACHE_ROUND_ROBIN orr r0, r0, #0x4000 @ .R.............. #endif ret lr .size __arm1022_setup, . - __arm1022_setup /* * R * .RVI ZFRS BLDP WCAM * .011 1001 ..11 0101 * */ .type arm1022_crval, #object arm1022_crval: crval clear=0x00007f3f, mmuset=0x00003935, ucset=0x00001930 __INITDATA @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) define_processor_functions arm1022, dabort=v4t_early_abort, pabort=legacy_pabort .section ".rodata" string cpu_arch_name, "armv5te" string cpu_elf_name, "v5" string cpu_arm1022_name, "ARM1022" .align .section ".proc.info.init", "a" .type __arm1022_proc_info,#object __arm1022_proc_info: .long 0x4105a220 @ ARM 1022E (v5TE) .long 0xff0ffff0 .long PMD_TYPE_SECT | \ PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ .long PMD_TYPE_SECT | \ PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ initfn __arm1022_setup, __arm1022_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_EDSP .long cpu_arm1022_name .long arm1022_processor_functions .long v4wbi_tlb_fns .long v4wb_user_fns .long arm1022_cache_fns .size __arm1022_proc_info, . - __arm1022_proc_info
aixcc-public/challenge-001-exemplar-source
10,396
arch/arm/mm/cache-v7m.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/mm/cache-v7m.S * * Based on linux/arch/arm/mm/cache-v7.S * * Copyright (C) 2001 Deep Blue Solutions Ltd. * Copyright (C) 2005 ARM Ltd. * * This is the "shell" of the ARMv7M processor support. */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> #include <asm/errno.h> #include <asm/unwind.h> #include <asm/v7m.h> #include "proc-macros.S" /* Generic V7M read/write macros for memory mapped cache operations */ .macro v7m_cache_read, rt, reg movw \rt, #:lower16:BASEADDR_V7M_SCB + \reg movt \rt, #:upper16:BASEADDR_V7M_SCB + \reg ldr \rt, [\rt] .endm .macro v7m_cacheop, rt, tmp, op, c = al movw\c \tmp, #:lower16:BASEADDR_V7M_SCB + \op movt\c \tmp, #:upper16:BASEADDR_V7M_SCB + \op str\c \rt, [\tmp] .endm .macro read_ccsidr, rt v7m_cache_read \rt, V7M_SCB_CCSIDR .endm .macro read_clidr, rt v7m_cache_read \rt, V7M_SCB_CLIDR .endm .macro write_csselr, rt, tmp v7m_cacheop \rt, \tmp, V7M_SCB_CSSELR .endm /* * dcisw: Invalidate data cache by set/way */ .macro dcisw, rt, tmp v7m_cacheop \rt, \tmp, V7M_SCB_DCISW .endm /* * dccisw: Clean and invalidate data cache by set/way */ .macro dccisw, rt, tmp v7m_cacheop \rt, \tmp, V7M_SCB_DCCISW .endm /* * dccimvac: Clean and invalidate data cache line by MVA to PoC. */ .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo .macro dccimvac\c, rt, tmp v7m_cacheop \rt, \tmp, V7M_SCB_DCCIMVAC, \c .endm .endr /* * dcimvac: Invalidate data cache line by MVA to PoC */ .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo .macro dcimvac\c, rt, tmp v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC, \c .endm .endr /* * dccmvau: Clean data cache line by MVA to PoU */ .macro dccmvau, rt, tmp v7m_cacheop \rt, \tmp, V7M_SCB_DCCMVAU .endm /* * dccmvac: Clean data cache line by MVA to PoC */ .macro dccmvac, rt, tmp v7m_cacheop \rt, \tmp, V7M_SCB_DCCMVAC .endm /* * icimvau: Invalidate instruction caches by MVA to PoU */ .macro icimvau, rt, tmp v7m_cacheop \rt, \tmp, V7M_SCB_ICIMVAU .endm /* * Invalidate the icache, inner shareable if SMP, invalidate BTB for UP. * rt data ignored by ICIALLU(IS), so can be used for the address */ .macro invalidate_icache, rt v7m_cacheop \rt, \rt, V7M_SCB_ICIALLU mov \rt, #0 .endm /* * Invalidate the BTB, inner shareable if SMP. * rt data ignored by BPIALL, so it can be used for the address */ .macro invalidate_bp, rt v7m_cacheop \rt, \rt, V7M_SCB_BPIALL mov \rt, #0 .endm ENTRY(v7m_invalidate_l1) mov r0, #0 write_csselr r0, r1 read_ccsidr r0 movw r1, #0x7fff and r2, r1, r0, lsr #13 movw r1, #0x3ff and r3, r1, r0, lsr #3 @ NumWays - 1 add r2, r2, #1 @ NumSets and r0, r0, #0x7 add r0, r0, #4 @ SetShift clz r1, r3 @ WayShift add r4, r3, #1 @ NumWays 1: sub r2, r2, #1 @ NumSets-- mov r3, r4 @ Temp = NumWays 2: subs r3, r3, #1 @ Temp-- mov r5, r3, lsl r1 mov r6, r2, lsl r0 orr r5, r5, r6 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift) dcisw r5, r6 bgt 2b cmp r2, #0 bgt 1b dsb st isb ret lr ENDPROC(v7m_invalidate_l1) /* * v7m_flush_icache_all() * * Flush the whole I-cache. * * Registers: * r0 - set to 0 */ ENTRY(v7m_flush_icache_all) invalidate_icache r0 ret lr ENDPROC(v7m_flush_icache_all) /* * v7m_flush_dcache_all() * * Flush the whole D-cache. * * Corrupted registers: r0-r7, r9-r11 */ ENTRY(v7m_flush_dcache_all) dmb @ ensure ordering with previous memory accesses read_clidr r0 mov r3, r0, lsr #23 @ move LoC into position ands r3, r3, #7 << 1 @ extract LoC*2 from clidr beq finished @ if loc is 0, then no need to clean start_flush_levels: mov r10, #0 @ start clean at cache level 0 flush_levels: add r2, r10, r10, lsr #1 @ work out 3x current cache level mov r1, r0, lsr r2 @ extract cache type bits from clidr and r1, r1, #7 @ mask of the bits for current cache only cmp r1, #2 @ see what cache we have at this level blt skip @ skip if no cache, or just i-cache #ifdef CONFIG_PREEMPTION save_and_disable_irqs_notrace r9 @ make cssr&csidr read atomic #endif write_csselr r10, r1 @ set current cache level isb @ isb to sych the new cssr&csidr read_ccsidr r1 @ read the new csidr #ifdef CONFIG_PREEMPTION restore_irqs_notrace r9 #endif and r2, r1, #7 @ extract the length of the cache lines add r2, r2, #4 @ add 4 (line length offset) movw r4, #0x3ff ands r4, r4, r1, lsr #3 @ find maximum number on the way size clz r5, r4 @ find bit position of way size increment movw r7, #0x7fff ands r7, r7, r1, lsr #13 @ extract max number of the index size loop1: mov r9, r7 @ create working copy of max index loop2: lsl r6, r4, r5 orr r11, r10, r6 @ factor way and cache number into r11 lsl r6, r9, r2 orr r11, r11, r6 @ factor index number into r11 dccisw r11, r6 @ clean/invalidate by set/way subs r9, r9, #1 @ decrement the index bge loop2 subs r4, r4, #1 @ decrement the way bge loop1 skip: add r10, r10, #2 @ increment cache number cmp r3, r10 bgt flush_levels finished: mov r10, #0 @ switch back to cache level 0 write_csselr r10, r3 @ select current cache level in cssr dsb st isb ret lr ENDPROC(v7m_flush_dcache_all) /* * v7m_flush_cache_all() * * Flush the entire cache system. * The data cache flush is now achieved using atomic clean / invalidates * working outwards from L1 cache. This is done using Set/Way based cache * maintenance instructions. * The instruction cache can still be invalidated back to the point of * unification in a single instruction. * */ ENTRY(v7m_flush_kern_cache_all) stmfd sp!, {r4-r7, r9-r11, lr} bl v7m_flush_dcache_all invalidate_icache r0 ldmfd sp!, {r4-r7, r9-r11, lr} ret lr ENDPROC(v7m_flush_kern_cache_all) /* * v7m_flush_cache_all() * * Flush all TLB entries in a particular address space * * - mm - mm_struct describing address space */ ENTRY(v7m_flush_user_cache_all) /*FALLTHROUGH*/ /* * v7m_flush_cache_range(start, end, flags) * * Flush a range of TLB entries in the specified address space. * * - start - start address (may not be aligned) * - end - end address (exclusive, may not be aligned) * - flags - vm_area_struct flags describing address space * * It is assumed that: * - we have a VIPT cache. */ ENTRY(v7m_flush_user_cache_range) ret lr ENDPROC(v7m_flush_user_cache_all) ENDPROC(v7m_flush_user_cache_range) /* * v7m_coherent_kern_range(start,end) * * Ensure that the I and D caches are coherent within specified * region. This is typically used when code has been written to * a memory region, and will be executed. * * - start - virtual start address of region * - end - virtual end address of region * * It is assumed that: * - the Icache does not read data from the write buffer */ ENTRY(v7m_coherent_kern_range) /* FALLTHROUGH */ /* * v7m_coherent_user_range(start,end) * * Ensure that the I and D caches are coherent within specified * region. This is typically used when code has been written to * a memory region, and will be executed. * * - start - virtual start address of region * - end - virtual end address of region * * It is assumed that: * - the Icache does not read data from the write buffer */ ENTRY(v7m_coherent_user_range) UNWIND(.fnstart ) dcache_line_size r2, r3 sub r3, r2, #1 bic r12, r0, r3 1: /* * We use open coded version of dccmvau otherwise USER() would * point at movw instruction. */ dccmvau r12, r3 add r12, r12, r2 cmp r12, r1 blo 1b dsb ishst icache_line_size r2, r3 sub r3, r2, #1 bic r12, r0, r3 2: icimvau r12, r3 add r12, r12, r2 cmp r12, r1 blo 2b invalidate_bp r0 dsb ishst isb ret lr UNWIND(.fnend ) ENDPROC(v7m_coherent_kern_range) ENDPROC(v7m_coherent_user_range) /* * v7m_flush_kern_dcache_area(void *addr, size_t size) * * Ensure that the data held in the page kaddr is written back * to the page in question. * * - addr - kernel address * - size - region size */ ENTRY(v7m_flush_kern_dcache_area) dcache_line_size r2, r3 add r1, r0, r1 sub r3, r2, #1 bic r0, r0, r3 1: dccimvac r0, r3 @ clean & invalidate D line / unified line add r0, r0, r2 cmp r0, r1 blo 1b dsb st ret lr ENDPROC(v7m_flush_kern_dcache_area) /* * v7m_dma_inv_range(start,end) * * Invalidate the data cache within the specified region; we will * be performing a DMA operation in this region and we want to * purge old data in the cache. * * - start - virtual start address of region * - end - virtual end address of region */ v7m_dma_inv_range: dcache_line_size r2, r3 sub r3, r2, #1 tst r0, r3 bic r0, r0, r3 dccimvacne r0, r3 addne r0, r0, r2 subne r3, r2, #1 @ restore r3, corrupted by v7m's dccimvac tst r1, r3 bic r1, r1, r3 dccimvacne r1, r3 cmp r0, r1 1: dcimvaclo r0, r3 addlo r0, r0, r2 cmplo r0, r1 blo 1b dsb st ret lr ENDPROC(v7m_dma_inv_range) /* * v7m_dma_clean_range(start,end) * - start - virtual start address of region * - end - virtual end address of region */ v7m_dma_clean_range: dcache_line_size r2, r3 sub r3, r2, #1 bic r0, r0, r3 1: dccmvac r0, r3 @ clean D / U line add r0, r0, r2 cmp r0, r1 blo 1b dsb st ret lr ENDPROC(v7m_dma_clean_range) /* * v7m_dma_flush_range(start,end) * - start - virtual start address of region * - end - virtual end address of region */ ENTRY(v7m_dma_flush_range) dcache_line_size r2, r3 sub r3, r2, #1 bic r0, r0, r3 1: dccimvac r0, r3 @ clean & invalidate D / U line add r0, r0, r2 cmp r0, r1 blo 1b dsb st ret lr ENDPROC(v7m_dma_flush_range) /* * dma_map_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(v7m_dma_map_area) add r1, r1, r0 teq r2, #DMA_FROM_DEVICE beq v7m_dma_inv_range b v7m_dma_clean_range ENDPROC(v7m_dma_map_area) /* * dma_unmap_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(v7m_dma_unmap_area) add r1, r1, r0 teq r2, #DMA_TO_DEVICE bne v7m_dma_inv_range ret lr ENDPROC(v7m_dma_unmap_area) .globl v7m_flush_kern_cache_louis .equ v7m_flush_kern_cache_louis, v7m_flush_kern_cache_all __INITDATA @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) define_cache_functions v7m
aixcc-public/challenge-001-exemplar-source
8,276
arch/arm/mm/proc-v6.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/mm/proc-v6.S * * Copyright (C) 2001 Deep Blue Solutions Ltd. * Modified by Catalin Marinas for noMMU support * * This is the "shell" of the ARMv6 processor support. */ #include <linux/init.h> #include <linux/linkage.h> #include <linux/pgtable.h> #include <asm/assembler.h> #include <asm/asm-offsets.h> #include <asm/hwcap.h> #include <asm/pgtable-hwdef.h> #include "proc-macros.S" #define D_CACHE_LINE_SIZE 32 #define TTB_C (1 << 0) #define TTB_S (1 << 1) #define TTB_IMP (1 << 2) #define TTB_RGN_NC (0 << 3) #define TTB_RGN_WBWA (1 << 3) #define TTB_RGN_WT (2 << 3) #define TTB_RGN_WB (3 << 3) #define TTB_FLAGS_UP TTB_RGN_WBWA #define PMD_FLAGS_UP PMD_SECT_WB #define TTB_FLAGS_SMP TTB_RGN_WBWA|TTB_S #define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S ENTRY(cpu_v6_proc_init) ret lr ENTRY(cpu_v6_proc_fin) mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x0006 @ .............ca. mcr p15, 0, r0, c1, c0, 0 @ disable caches ret lr /* * cpu_v6_reset(loc) * * Perform a soft reset of the system. Put the CPU into the * same state as it would be if it had been reset, and branch * to what would be the reset vector. * * - loc - location to jump to for soft reset */ .align 5 .pushsection .idmap.text, "ax" ENTRY(cpu_v6_reset) mrc p15, 0, r1, c1, c0, 0 @ ctrl register bic r1, r1, #0x1 @ ...............m mcr p15, 0, r1, c1, c0, 0 @ disable MMU mov r1, #0 mcr p15, 0, r1, c7, c5, 4 @ ISB ret r0 ENDPROC(cpu_v6_reset) .popsection /* * cpu_v6_do_idle() * * Idle the processor (eg, wait for interrupt). * * IRQs are already disabled. */ ENTRY(cpu_v6_do_idle) mov r1, #0 mcr p15, 0, r1, c7, c10, 4 @ DWB - WFI may enter a low-power mode mcr p15, 0, r1, c7, c0, 4 @ wait for interrupt ret lr ENTRY(cpu_v6_dcache_clean_area) 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #D_CACHE_LINE_SIZE subs r1, r1, #D_CACHE_LINE_SIZE bhi 1b ret lr /* * cpu_v6_switch_mm(pgd_phys, tsk) * * Set the translation table base pointer to be pgd_phys * * - pgd_phys - physical address of new TTB * * It is assumed that: * - we are not using split page tables */ ENTRY(cpu_v6_switch_mm) #ifdef CONFIG_MMU mov r2, #0 mmid r1, r1 @ get mm->context.id ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP) ALT_UP(orr r0, r0, #TTB_FLAGS_UP) mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB mcr p15, 0, r2, c7, c10, 4 @ drain write buffer mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 #ifdef CONFIG_PID_IN_CONTEXTIDR mrc p15, 0, r2, c13, c0, 1 @ read current context ID bic r2, r2, #0xff @ extract the PID and r1, r1, #0xff orr r1, r1, r2 @ insert into new context ID #endif mcr p15, 0, r1, c13, c0, 1 @ set context ID #endif ret lr /* * cpu_v6_set_pte_ext(ptep, pte, ext) * * Set a level 2 translation table entry. * * - ptep - pointer to level 2 translation table entry * (hardware version is stored at -1024 bytes) * - pte - PTE value to store * - ext - value for extended PTE bits */ armv6_mt_table cpu_v6 ENTRY(cpu_v6_set_pte_ext) #ifdef CONFIG_MMU armv6_set_pte_ext cpu_v6 #endif ret lr /* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */ .globl cpu_v6_suspend_size .equ cpu_v6_suspend_size, 4 * 6 #ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_v6_do_suspend) stmfd sp!, {r4 - r9, lr} mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID #ifdef CONFIG_MMU mrc p15, 0, r5, c3, c0, 0 @ Domain ID mrc p15, 0, r6, c2, c0, 1 @ Translation table base 1 #endif mrc p15, 0, r7, c1, c0, 1 @ auxiliary control register mrc p15, 0, r8, c1, c0, 2 @ co-processor access control mrc p15, 0, r9, c1, c0, 0 @ control register stmia r0, {r4 - r9} ldmfd sp!, {r4- r9, pc} ENDPROC(cpu_v6_do_suspend) ENTRY(cpu_v6_do_resume) mov ip, #0 mcr p15, 0, ip, c7, c14, 0 @ clean+invalidate D cache mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache mcr p15, 0, ip, c7, c15, 0 @ clean+invalidate cache mcr p15, 0, ip, c7, c10, 4 @ drain write buffer mcr p15, 0, ip, c13, c0, 1 @ set reserved context ID ldmia r0, {r4 - r9} mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID #ifdef CONFIG_MMU mcr p15, 0, r5, c3, c0, 0 @ Domain ID ALT_SMP(orr r1, r1, #TTB_FLAGS_SMP) ALT_UP(orr r1, r1, #TTB_FLAGS_UP) mcr p15, 0, r1, c2, c0, 0 @ Translation table base 0 mcr p15, 0, r6, c2, c0, 1 @ Translation table base 1 mcr p15, 0, ip, c2, c0, 2 @ TTB control register #endif mcr p15, 0, r7, c1, c0, 1 @ auxiliary control register mcr p15, 0, r8, c1, c0, 2 @ co-processor access control mcr p15, 0, ip, c7, c5, 4 @ ISB mov r0, r9 @ control register b cpu_resume_mmu ENDPROC(cpu_v6_do_resume) #endif string cpu_v6_name, "ARMv6-compatible processor" .align /* * __v6_setup * * Initialise TLB, Caches, and MMU state ready to switch the MMU * on. Return in r0 the new CP15 C1 control register setting. * * We automatically detect if we have a Harvard cache, and use the * Harvard cache control instructions insead of the unified cache * control instructions. * * This should be able to cover all ARMv6 cores. * * It is assumed that: * - cache type register is implemented */ __v6_setup: #ifdef CONFIG_SMP ALT_SMP(mrc p15, 0, r0, c1, c0, 1) @ Enable SMP/nAMP mode ALT_UP(nop) orr r0, r0, #0x20 ALT_SMP(mcr p15, 0, r0, c1, c0, 1) ALT_UP(nop) #endif mov r0, #0 mcr p15, 0, r0, c7, c14, 0 @ clean+invalidate D cache mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c15, 0 @ clean+invalidate cache #ifdef CONFIG_MMU mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs mcr p15, 0, r0, c2, c0, 2 @ TTB control register ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP) ALT_UP(orr r4, r4, #TTB_FLAGS_UP) ALT_SMP(orr r8, r8, #TTB_FLAGS_SMP) ALT_UP(orr r8, r8, #TTB_FLAGS_UP) mcr p15, 0, r8, c2, c0, 1 @ load TTB1 #endif /* CONFIG_MMU */ mcr p15, 0, r0, c7, c10, 4 @ drain write buffer and @ complete invalidations adr r5, v6_crval ldmia r5, {r5, r6} ARM_BE8(orr r6, r6, #1 << 25) @ big-endian page tables mrc p15, 0, r0, c1, c0, 0 @ read control register bic r0, r0, r5 @ clear bits them orr r0, r0, r6 @ set them #ifdef CONFIG_ARM_ERRATA_364296 /* * Workaround for the 364296 ARM1136 r0p2 erratum (possible cache data * corruption with hit-under-miss enabled). The conditional code below * (setting the undocumented bit 31 in the auxiliary control register * and the FI bit in the control register) disables hit-under-miss * without putting the processor into full low interrupt latency mode. */ ldr r6, =0x4107b362 @ id for ARM1136 r0p2 mrc p15, 0, r5, c0, c0, 0 @ get processor id teq r5, r6 @ check for the faulty core mrceq p15, 0, r5, c1, c0, 1 @ load aux control reg orreq r5, r5, #(1 << 31) @ set the undocumented bit 31 mcreq p15, 0, r5, c1, c0, 1 @ write aux control reg orreq r0, r0, #(1 << 21) @ low interrupt latency configuration #endif ret lr @ return to head.S:__ret /* * V X F I D LR * .... ...E PUI. .T.T 4RVI ZFRS BLDP WCAM * rrrr rrrx xxx0 0101 xxxx xxxx x111 xxxx < forced * 0 110 0011 1.00 .111 1101 < we want */ .type v6_crval, #object v6_crval: crval clear=0x01e0fb7f, mmuset=0x00c0387d, ucset=0x00c0187c __INITDATA @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) define_processor_functions v6, dabort=v6_early_abort, pabort=v6_pabort, suspend=1 .section ".rodata" string cpu_arch_name, "armv6" string cpu_elf_name, "v6" .align .section ".proc.info.init", "a" /* * Match any ARMv6 processor core. */ .type __v6_proc_info, #object __v6_proc_info: .long 0x0007b000 .long 0x0007f000 ALT_SMP(.long \ PMD_TYPE_SECT | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ | \ PMD_FLAGS_SMP) ALT_UP(.long \ PMD_TYPE_SECT | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ | \ PMD_FLAGS_UP) .long PMD_TYPE_SECT | \ PMD_SECT_XN | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ initfn __v6_setup, __v6_proc_info .long cpu_arch_name .long cpu_elf_name /* See also feat_v6_fixup() for HWCAP_TLS */ .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA|HWCAP_TLS .long cpu_v6_name .long v6_processor_functions .long v6wbi_tlb_fns .long v6_user_fns .long v6_cache_fns .size __v6_proc_info, . - __v6_proc_info
aixcc-public/challenge-001-exemplar-source
10,529
arch/arm/mm/proc-arm922.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * linux/arch/arm/mm/proc-arm922.S: MMU functions for ARM922 * * Copyright (C) 1999,2000 ARM Limited * Copyright (C) 2000 Deep Blue Solutions Ltd. * Copyright (C) 2001 Altera Corporation * hacked for non-paged-MM by Hyok S. Choi, 2003. * * These are the low level assembler for performing cache and TLB * functions on the arm922. * * CONFIG_CPU_ARM922_CPU_IDLE -> nohlt */ #include <linux/linkage.h> #include <linux/init.h> #include <linux/pgtable.h> #include <asm/assembler.h> #include <asm/hwcap.h> #include <asm/pgtable-hwdef.h> #include <asm/page.h> #include <asm/ptrace.h> #include "proc-macros.S" /* * The size of one data cache line. */ #define CACHE_DLINESIZE 32 /* * The number of data cache segments. */ #define CACHE_DSEGMENTS 4 /* * The number of lines in a cache segment. */ #define CACHE_DENTRIES 64 /* * This is the size at which it becomes more efficient to * clean the whole cache, rather than using the individual * cache line maintenance instructions. (I think this should * be 32768). */ #define CACHE_DLIMIT 8192 .text /* * cpu_arm922_proc_init() */ ENTRY(cpu_arm922_proc_init) ret lr /* * cpu_arm922_proc_fin() */ ENTRY(cpu_arm922_proc_fin) mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches ret lr /* * cpu_arm922_reset(loc) * * Perform a soft reset of the system. Put the CPU into the * same state as it would be if it had been reset, and branch * to what would be the reset vector. * * loc: location to jump to for soft reset */ .align 5 .pushsection .idmap.text, "ax" ENTRY(cpu_arm922_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c10, 4 @ drain WB #ifdef CONFIG_MMU mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs #endif mrc p15, 0, ip, c1, c0, 0 @ ctrl register bic ip, ip, #0x000f @ ............wcam bic ip, ip, #0x1100 @ ...i...s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register ret r0 ENDPROC(cpu_arm922_reset) .popsection /* * cpu_arm922_do_idle() */ .align 5 ENTRY(cpu_arm922_do_idle) mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt ret lr #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH /* * flush_icache_all() * * Unconditionally clean and invalidate the entire icache. */ ENTRY(arm922_flush_icache_all) mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache ret lr ENDPROC(arm922_flush_icache_all) /* * flush_user_cache_all() * * Clean and invalidate all cache entries in a particular * address space. */ ENTRY(arm922_flush_user_cache_all) /* FALLTHROUGH */ /* * flush_kern_cache_all() * * Clean and invalidate the entire cache. */ ENTRY(arm922_flush_kern_cache_all) mov r2, #VM_EXEC mov ip, #0 __flush_whole_cache: mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 8 segments 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 2: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index subs r3, r3, #1 << 26 bcs 2b @ entries 63 to 0 subs r1, r1, #1 << 5 bcs 1b @ segments 7 to 0 tst r2, #VM_EXEC mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache mcrne p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * flush_user_cache_range(start, end, flags) * * Clean and invalidate a range of cache entries in the * specified address range. * * - start - start address (inclusive) * - end - end address (exclusive) * - flags - vm_flags describing address space */ ENTRY(arm922_flush_user_cache_range) mov ip, #0 sub r3, r1, r0 @ calculate total size cmp r3, #CACHE_DLIMIT bhs __flush_whole_cache 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry tst r2, #VM_EXEC mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b tst r2, #VM_EXEC mcrne p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * coherent_kern_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start, end. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm922_coherent_kern_range) /* FALLTHROUGH */ /* * coherent_user_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start, end. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm922_coherent_user_range) bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB mov r0, #0 ret lr /* * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * * - addr - kernel address * - size - region size */ ENTRY(arm922_flush_kern_dcache_area) add r1, r0, r1 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_inv_range(start, end) * * Invalidate (discard) the specified virtual address range. * May not write back any entries. If 'start' or 'end' * are not cache line aligned, those lines must be written * back. * * - start - virtual start address * - end - virtual end address * * (same as v4wb) */ arm922_dma_inv_range: tst r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry tst r1, #CACHE_DLINESIZE - 1 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_clean_range(start, end) * * Clean the specified virtual address range. * * - start - virtual start address * - end - virtual end address * * (same as v4wb) */ arm922_dma_clean_range: bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_flush_range(start, end) * * Clean and invalidate the specified virtual address range. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm922_dma_flush_range) bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_map_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(arm922_dma_map_area) add r1, r1, r0 cmp r2, #DMA_TO_DEVICE beq arm922_dma_clean_range bcs arm922_dma_inv_range b arm922_dma_flush_range ENDPROC(arm922_dma_map_area) /* * dma_unmap_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(arm922_dma_unmap_area) ret lr ENDPROC(arm922_dma_unmap_area) .globl arm922_flush_kern_cache_louis .equ arm922_flush_kern_cache_louis, arm922_flush_kern_cache_all @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) define_cache_functions arm922 #endif ENTRY(cpu_arm922_dcache_clean_area) #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE subs r1, r1, #CACHE_DLINESIZE bhi 1b #endif ret lr /* =============================== PageTable ============================== */ /* * cpu_arm922_switch_mm(pgd) * * Set the translation base pointer to be as described by pgd. * * pgd: new page tables */ .align 5 ENTRY(cpu_arm922_switch_mm) #ifdef CONFIG_MMU mov ip, #0 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache #else @ && 'Clean & Invalidate whole DCache' @ && Re-written to use Index Ops. @ && Uses registers r1, r3 and ip mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 4 segments 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index subs r3, r3, #1 << 26 bcs 2b @ entries 63 to 0 subs r1, r1, #1 << 5 bcs 1b @ segments 7 to 0 #endif mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache mcr p15, 0, ip, c7, c10, 4 @ drain WB mcr p15, 0, r0, c2, c0, 0 @ load page table pointer mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs #endif ret lr /* * cpu_arm922_set_pte_ext(ptep, pte, ext) * * Set a PTE and flush it out */ .align 5 ENTRY(cpu_arm922_set_pte_ext) #ifdef CONFIG_MMU armv3_set_pte_ext mov r0, r0 mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcr p15, 0, r0, c7, c10, 4 @ drain WB #endif /* CONFIG_MMU */ ret lr .type __arm922_setup, #function __arm922_setup: mov r0, #0 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 #ifdef CONFIG_MMU mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 #endif adr r5, arm922_crval ldmia r5, {r5, r6} mrc p15, 0, r0, c1, c0 @ get control register v4 bic r0, r0, r5 orr r0, r0, r6 ret lr .size __arm922_setup, . - __arm922_setup /* * R * .RVI ZFRS BLDP WCAM * ..11 0001 ..11 0101 * */ .type arm922_crval, #object arm922_crval: crval clear=0x00003f3f, mmuset=0x00003135, ucset=0x00001130 __INITDATA @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) define_processor_functions arm922, dabort=v4t_early_abort, pabort=legacy_pabort .section ".rodata" string cpu_arch_name, "armv4t" string cpu_elf_name, "v4" string cpu_arm922_name, "ARM922T" .align .section ".proc.info.init", "a" .type __arm922_proc_info,#object __arm922_proc_info: .long 0x41009220 .long 0xff00fff0 .long PMD_TYPE_SECT | \ PMD_SECT_BUFFERABLE | \ PMD_SECT_CACHEABLE | \ PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ .long PMD_TYPE_SECT | \ PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ initfn __arm922_setup, __arm922_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB .long cpu_arm922_name .long arm922_processor_functions .long v4wbi_tlb_fns .long v4wb_user_fns #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH .long arm922_cache_fns #else .long v4wt_cache_fns #endif .size __arm922_proc_info, . - __arm922_proc_info
aixcc-public/challenge-001-exemplar-source
4,321
arch/arm/mm/proc-v7-2level.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * arch/arm/mm/proc-v7-2level.S * * Copyright (C) 2001 Deep Blue Solutions Ltd. */ #define TTB_S (1 << 1) #define TTB_RGN_NC (0 << 3) #define TTB_RGN_OC_WBWA (1 << 3) #define TTB_RGN_OC_WT (2 << 3) #define TTB_RGN_OC_WB (3 << 3) #define TTB_NOS (1 << 5) #define TTB_IRGN_NC ((0 << 0) | (0 << 6)) #define TTB_IRGN_WBWA ((0 << 0) | (1 << 6)) #define TTB_IRGN_WT ((1 << 0) | (0 << 6)) #define TTB_IRGN_WB ((1 << 0) | (1 << 6)) /* PTWs cacheable, inner WB not shareable, outer WB not shareable */ #define TTB_FLAGS_UP TTB_IRGN_WB|TTB_RGN_OC_WB #define PMD_FLAGS_UP PMD_SECT_WB /* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */ #define TTB_FLAGS_SMP TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA #define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S /* * cpu_v7_switch_mm(pgd_phys, tsk) * * Set the translation table base pointer to be pgd_phys * * - pgd_phys - physical address of new TTB * * It is assumed that: * - we are not using split page tables * * Note that we always need to flush BTAC/BTB if IBE is set * even on Cortex-A8 revisions not affected by 430973. * If IBE is not set, the flush BTAC/BTB won't do anything. */ ENTRY(cpu_v7_switch_mm) #ifdef CONFIG_MMU mmid r1, r1 @ get mm->context.id ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP) ALT_UP(orr r0, r0, #TTB_FLAGS_UP) #ifdef CONFIG_PID_IN_CONTEXTIDR mrc p15, 0, r2, c13, c0, 1 @ read current context ID lsr r2, r2, #8 @ extract the PID bfi r1, r2, #8, #24 @ insert into new context ID #endif #ifdef CONFIG_ARM_ERRATA_754322 dsb #endif mcr p15, 0, r1, c13, c0, 1 @ set context ID isb mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 isb #endif bx lr ENDPROC(cpu_v7_switch_mm) /* * cpu_v7_set_pte_ext(ptep, pte) * * Set a level 2 translation table entry. * * - ptep - pointer to level 2 translation table entry * (hardware version is stored at +2048 bytes) * - pte - PTE value to store * - ext - value for extended PTE bits */ ENTRY(cpu_v7_set_pte_ext) #ifdef CONFIG_MMU str r1, [r0] @ linux version bic r3, r1, #0x000003f0 bic r3, r3, #PTE_TYPE_MASK orr r3, r3, r2 orr r3, r3, #PTE_EXT_AP0 | 2 tst r1, #1 << 4 orrne r3, r3, #PTE_EXT_TEX(1) eor r1, r1, #L_PTE_DIRTY tst r1, #L_PTE_RDONLY | L_PTE_DIRTY orrne r3, r3, #PTE_EXT_APX tst r1, #L_PTE_USER orrne r3, r3, #PTE_EXT_AP1 tst r1, #L_PTE_XN orrne r3, r3, #PTE_EXT_XN tst r1, #L_PTE_YOUNG tstne r1, #L_PTE_VALID eorne r1, r1, #L_PTE_NONE tstne r1, #L_PTE_NONE moveq r3, #0 ARM( str r3, [r0, #2048]! ) THUMB( add r0, r0, #2048 ) THUMB( str r3, [r0] ) ALT_SMP(W(nop)) ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte #endif bx lr ENDPROC(cpu_v7_set_pte_ext) /* * Memory region attributes with SCTLR.TRE=1 * * n = TEX[0],C,B * TR = PRRR[2n+1:2n] - memory type * IR = NMRR[2n+1:2n] - inner cacheable property * OR = NMRR[2n+17:2n+16] - outer cacheable property * * n TR IR OR * UNCACHED 000 00 * BUFFERABLE 001 10 00 00 * WRITETHROUGH 010 10 10 10 * WRITEBACK 011 10 11 11 * reserved 110 * WRITEALLOC 111 10 01 01 * DEV_SHARED 100 01 * DEV_NONSHARED 100 01 * DEV_WC 001 10 * DEV_CACHED 011 10 * * Other attributes: * * DS0 = PRRR[16] = 0 - device shareable property * DS1 = PRRR[17] = 1 - device shareable property * NS0 = PRRR[18] = 0 - normal shareable property * NS1 = PRRR[19] = 1 - normal shareable property * NOS = PRRR[24+n] = 1 - not outer shareable */ .equ PRRR, 0xff0a81a8 .equ NMRR, 0x40e040e0 /* * Macro for setting up the TTBRx and TTBCR registers. * - \ttb0 and \ttb1 updated with the corresponding flags. */ .macro v7_ttb_setup, zero, ttbr0l, ttbr0h, ttbr1, tmp mcr p15, 0, \zero, c2, c0, 2 @ TTB control register ALT_SMP(orr \ttbr0l, \ttbr0l, #TTB_FLAGS_SMP) ALT_UP(orr \ttbr0l, \ttbr0l, #TTB_FLAGS_UP) ALT_SMP(orr \ttbr1, \ttbr1, #TTB_FLAGS_SMP) ALT_UP(orr \ttbr1, \ttbr1, #TTB_FLAGS_UP) mcr p15, 0, \ttbr1, c2, c0, 1 @ load TTB1 .endm /* AT * TFR EV X F I D LR S * .EEE ..EE PUI. .T.T 4RVI ZWRS BLDP WCAM * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced * 01 0 110 0011 1100 .111 1101 < we want */ .align 2 .type v7_crval, #object v7_crval: crval clear=0x2120c302, mmuset=0x10c03c7d, ucset=0x00c01c7c
aixcc-public/challenge-001-exemplar-source
2,194
arch/arm/mm/proc-arm9tdmi.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/mm/proc-arm9tdmi.S: utility functions for ARM9TDMI * * Copyright (C) 2003-2006 Hyok S. Choi <hyok.choi@samsung.com> */ #include <linux/linkage.h> #include <linux/init.h> #include <linux/pgtable.h> #include <asm/assembler.h> #include <asm/asm-offsets.h> #include <asm/hwcap.h> #include <asm/pgtable-hwdef.h> #include <asm/ptrace.h> #include "proc-macros.S" .text /* * cpu_arm9tdmi_proc_init() * cpu_arm9tdmi_do_idle() * cpu_arm9tdmi_dcache_clean_area() * cpu_arm9tdmi_switch_mm() * * These are not required. */ ENTRY(cpu_arm9tdmi_proc_init) ENTRY(cpu_arm9tdmi_do_idle) ENTRY(cpu_arm9tdmi_dcache_clean_area) ENTRY(cpu_arm9tdmi_switch_mm) ret lr /* * cpu_arm9tdmi_proc_fin() */ ENTRY(cpu_arm9tdmi_proc_fin) ret lr /* * Function: cpu_arm9tdmi_reset(loc) * Params : loc(r0) address to jump to * Purpose : Sets up everything for a reset and jump to the location for soft reset. */ .pushsection .idmap.text, "ax" ENTRY(cpu_arm9tdmi_reset) ret r0 ENDPROC(cpu_arm9tdmi_reset) .popsection .type __arm9tdmi_setup, #function __arm9tdmi_setup: ret lr .size __arm9tdmi_setup, . - __arm9tdmi_setup __INITDATA @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) define_processor_functions arm9tdmi, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1 .section ".rodata" string cpu_arch_name, "armv4t" string cpu_elf_name, "v4" string cpu_arm9tdmi_name, "ARM9TDMI" string cpu_p2001_name, "P2001" .align .section ".proc.info.init", "a" .macro arm9tdmi_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req .type __\name\()_proc_info, #object __\name\()_proc_info: .long \cpu_val .long \cpu_mask .long 0 .long 0 initfn __arm9tdmi_setup, __\name\()_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT .long \cpu_name .long arm9tdmi_processor_functions .long 0 .long 0 .long v4_cache_fns .size __\name\()_proc_info, . - __\name\()_proc_info .endm arm9tdmi_proc_info arm9tdmi, 0x41009900, 0xfff8ff00, cpu_arm9tdmi_name arm9tdmi_proc_info p2001, 0x41029000, 0xffffffff, cpu_p2001_name
aixcc-public/challenge-001-exemplar-source
1,170
arch/arm/mm/cache-nop.S
/* SPDX-License-Identifier: GPL-2.0-only */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> #include "proc-macros.S" ENTRY(nop_flush_icache_all) ret lr ENDPROC(nop_flush_icache_all) .globl nop_flush_kern_cache_all .equ nop_flush_kern_cache_all, nop_flush_icache_all .globl nop_flush_kern_cache_louis .equ nop_flush_kern_cache_louis, nop_flush_icache_all .globl nop_flush_user_cache_all .equ nop_flush_user_cache_all, nop_flush_icache_all .globl nop_flush_user_cache_range .equ nop_flush_user_cache_range, nop_flush_icache_all .globl nop_coherent_kern_range .equ nop_coherent_kern_range, nop_flush_icache_all ENTRY(nop_coherent_user_range) mov r0, 0 ret lr ENDPROC(nop_coherent_user_range) .globl nop_flush_kern_dcache_area .equ nop_flush_kern_dcache_area, nop_flush_icache_all .globl nop_dma_flush_range .equ nop_dma_flush_range, nop_flush_icache_all .globl nop_dma_map_area .equ nop_dma_map_area, nop_flush_icache_all .globl nop_dma_unmap_area .equ nop_dma_unmap_area, nop_flush_icache_all __INITDATA @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) define_cache_functions nop
aixcc-public/challenge-001-exemplar-source
6,896
arch/arm/mm/abort-lv4t.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <linux/linkage.h> #include <asm/assembler.h> /* * Function: v4t_late_abort * * Params : r2 = pt_regs * : r4 = aborted context pc * : r5 = aborted context psr * * Returns : r4-r5, r9-r11, r13 preserved * * Purpose : obtain information about current aborted instruction. * Note: we read user space. This means we might cause a data * abort here if the I-TLB and D-TLB aren't seeing the same * picture. Unfortunately, this does happen. We live with it. */ ENTRY(v4t_late_abort) tst r5, #PSR_T_BIT @ check for thumb mode #ifdef CONFIG_CPU_CP15_MMU mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR #else mov r0, #0 @ clear r0, r1 (no FSR/FAR) mov r1, #0 #endif bne .data_thumb_abort ldr r8, [r4] @ read arm instruction uaccess_disable ip @ disable userspace access tst r8, #1 << 20 @ L = 1 -> write? orreq r1, r1, #1 << 11 @ yes. and r7, r8, #15 << 24 add pc, pc, r7, lsr #22 @ Now branch to the relevant processing routine nop /* 0 */ b .data_arm_lateldrhpost @ ldrh rd, [rn], #m/rm /* 1 */ b .data_arm_lateldrhpre @ ldrh rd, [rn, #m/rm] /* 2 */ b .data_unknown /* 3 */ b .data_unknown /* 4 */ b .data_arm_lateldrpostconst @ ldr rd, [rn], #m /* 5 */ b .data_arm_lateldrpreconst @ ldr rd, [rn, #m] /* 6 */ b .data_arm_lateldrpostreg @ ldr rd, [rn], rm /* 7 */ b .data_arm_lateldrprereg @ ldr rd, [rn, rm] /* 8 */ b .data_arm_ldmstm @ ldm*a rn, <rlist> /* 9 */ b .data_arm_ldmstm @ ldm*b rn, <rlist> /* a */ b .data_unknown /* b */ b .data_unknown /* c */ b do_DataAbort @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m /* d */ b do_DataAbort @ ldc rd, [rn, #m] /* e */ b .data_unknown /* f */ b .data_unknown .data_unknown_r9: ldr r9, [sp], #4 .data_unknown: @ Part of jumptable mov r0, r4 mov r1, r8 b baddataabort .data_arm_ldmstm: tst r8, #1 << 21 @ check writeback bit beq do_DataAbort @ no writeback -> no fixup str r9, [sp, #-4]! mov r7, #0x11 orr r7, r7, #0x1100 and r6, r8, r7 and r9, r8, r7, lsl #1 add r6, r6, r9, lsr #1 and r9, r8, r7, lsl #2 add r6, r6, r9, lsr #2 and r9, r8, r7, lsl #3 add r6, r6, r9, lsr #3 add r6, r6, r6, lsr #8 add r6, r6, r6, lsr #4 and r6, r6, #15 @ r6 = no. of registers to transfer. and r9, r8, #15 << 16 @ Extract 'n' from instruction ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit subne r7, r7, r6, lsl #2 @ Undo increment addeq r7, r7, r6, lsl #2 @ Undo decrement str r7, [r2, r9, lsr #14] @ Put register 'Rn' ldr r9, [sp], #4 b do_DataAbort .data_arm_lateldrhpre: tst r8, #1 << 21 @ Check writeback bit beq do_DataAbort @ No writeback -> no fixup .data_arm_lateldrhpost: str r9, [sp, #-4]! and r9, r8, #0x00f @ get Rm / low nibble of immediate value tst r8, #1 << 22 @ if (immediate offset) andne r6, r8, #0xf00 @ { immediate high nibble orrne r6, r9, r6, lsr #4 @ combine nibbles } else ldreq r6, [r2, r9, lsl #2] @ { load Rm value } .data_arm_apply_r6_and_rn: and r9, r8, #15 << 16 @ Extract 'n' from instruction ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit subne r7, r7, r6 @ Undo incrmenet addeq r7, r7, r6 @ Undo decrement str r7, [r2, r9, lsr #14] @ Put register 'Rn' ldr r9, [sp], #4 b do_DataAbort .data_arm_lateldrpreconst: tst r8, #1 << 21 @ check writeback bit beq do_DataAbort @ no writeback -> no fixup .data_arm_lateldrpostconst: movs r6, r8, lsl #20 @ Get offset beq do_DataAbort @ zero -> no fixup str r9, [sp, #-4]! and r9, r8, #15 << 16 @ Extract 'n' from instruction ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit subne r7, r7, r6, lsr #20 @ Undo increment addeq r7, r7, r6, lsr #20 @ Undo decrement str r7, [r2, r9, lsr #14] @ Put register 'Rn' ldr r9, [sp], #4 b do_DataAbort .data_arm_lateldrprereg: tst r8, #1 << 21 @ check writeback bit beq do_DataAbort @ no writeback -> no fixup .data_arm_lateldrpostreg: and r7, r8, #15 @ Extract 'm' from instruction ldr r6, [r2, r7, lsl #2] @ Get register 'Rm' str r9, [sp, #-4]! mov r9, r8, lsr #7 @ get shift count ands r9, r9, #31 and r7, r8, #0x70 @ get shift type orreq r7, r7, #8 @ shift count = 0 add pc, pc, r7 nop mov r6, r6, lsl r9 @ 0: LSL #!0 b .data_arm_apply_r6_and_rn b .data_arm_apply_r6_and_rn @ 1: LSL #0 nop b .data_unknown_r9 @ 2: MUL? nop b .data_unknown_r9 @ 3: MUL? nop mov r6, r6, lsr r9 @ 4: LSR #!0 b .data_arm_apply_r6_and_rn mov r6, r6, lsr #32 @ 5: LSR #32 b .data_arm_apply_r6_and_rn b .data_unknown_r9 @ 6: MUL? nop b .data_unknown_r9 @ 7: MUL? nop mov r6, r6, asr r9 @ 8: ASR #!0 b .data_arm_apply_r6_and_rn mov r6, r6, asr #32 @ 9: ASR #32 b .data_arm_apply_r6_and_rn b .data_unknown_r9 @ A: MUL? nop b .data_unknown_r9 @ B: MUL? nop mov r6, r6, ror r9 @ C: ROR #!0 b .data_arm_apply_r6_and_rn mov r6, r6, rrx @ D: RRX b .data_arm_apply_r6_and_rn b .data_unknown_r9 @ E: MUL? nop b .data_unknown_r9 @ F: MUL? .data_thumb_abort: ldrh r8, [r4] @ read instruction uaccess_disable ip @ disable userspace access tst r8, #1 << 11 @ L = 1 -> write? orreq r1, r1, #1 << 8 @ yes and r7, r8, #15 << 12 add pc, pc, r7, lsr #10 @ lookup in table nop /* 0 */ b .data_unknown /* 1 */ b .data_unknown /* 2 */ b .data_unknown /* 3 */ b .data_unknown /* 4 */ b .data_unknown /* 5 */ b .data_thumb_reg /* 6 */ b do_DataAbort /* 7 */ b do_DataAbort /* 8 */ b do_DataAbort /* 9 */ b do_DataAbort /* A */ b .data_unknown /* B */ b .data_thumb_pushpop /* C */ b .data_thumb_ldmstm /* D */ b .data_unknown /* E */ b .data_unknown /* F */ b .data_unknown .data_thumb_reg: tst r8, #1 << 9 beq do_DataAbort tst r8, #1 << 10 @ If 'S' (signed) bit is set movne r1, #0 @ it must be a load instr b do_DataAbort .data_thumb_pushpop: tst r8, #1 << 10 beq .data_unknown str r9, [sp, #-4]! and r6, r8, #0x55 @ hweight8(r8) + R bit and r9, r8, #0xaa add r6, r6, r9, lsr #1 and r9, r6, #0xcc and r6, r6, #0x33 add r6, r6, r9, lsr #2 movs r7, r8, lsr #9 @ C = r8 bit 8 (R bit) adc r6, r6, r6, lsr #4 @ high + low nibble + R bit and r6, r6, #15 @ number of regs to transfer ldr r7, [r2, #13 << 2] tst r8, #1 << 11 addeq r7, r7, r6, lsl #2 @ increment SP if PUSH subne r7, r7, r6, lsl #2 @ decrement SP if POP str r7, [r2, #13 << 2] ldr r9, [sp], #4 b do_DataAbort .data_thumb_ldmstm: str r9, [sp, #-4]! and r6, r8, #0x55 @ hweight8(r8) and r9, r8, #0xaa add r6, r6, r9, lsr #1 and r9, r6, #0xcc and r6, r6, #0x33 add r6, r6, r9, lsr #2 add r6, r6, r6, lsr #4 and r9, r8, #7 << 8 ldr r7, [r2, r9, lsr #6] and r6, r6, #15 @ number of regs to transfer sub r7, r7, r6, lsl #2 @ always decrement str r7, [r2, r9, lsr #6] ldr r9, [sp], #4 b do_DataAbort
aixcc-public/challenge-001-exemplar-source
7,859
arch/arm/mm/cache-v6.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/mm/cache-v6.S * * Copyright (C) 2001 Deep Blue Solutions Ltd. * * This is the "shell" of the ARMv6 processor support. */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> #include <asm/errno.h> #include <asm/unwind.h> #include "proc-macros.S" #define HARVARD_CACHE #define CACHE_LINE_SIZE 32 #define D_CACHE_LINE_SIZE 32 #define BTB_FLUSH_SIZE 8 /* * v6_flush_icache_all() * * Flush the whole I-cache. * * ARM1136 erratum 411920 - Invalidate Instruction Cache operation can fail. * This erratum is present in 1136, 1156 and 1176. It does not affect the * MPCore. * * Registers: * r0 - set to 0 * r1 - corrupted */ ENTRY(v6_flush_icache_all) mov r0, #0 #ifdef CONFIG_ARM_ERRATA_411920 mrs r1, cpsr cpsid ifa @ disable interrupts mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache msr cpsr_cx, r1 @ restore interrupts .rept 11 @ ARM Ltd recommends at least nop @ 11 NOPs .endr #else mcr p15, 0, r0, c7, c5, 0 @ invalidate I-cache #endif ret lr ENDPROC(v6_flush_icache_all) /* * v6_flush_cache_all() * * Flush the entire cache. * * It is assumed that: */ ENTRY(v6_flush_kern_cache_all) mov r0, #0 #ifdef HARVARD_CACHE mcr p15, 0, r0, c7, c14, 0 @ D cache clean+invalidate #ifndef CONFIG_ARM_ERRATA_411920 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate #else b v6_flush_icache_all #endif #else mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate #endif ret lr /* * v6_flush_cache_all() * * Flush all TLB entries in a particular address space * * - mm - mm_struct describing address space */ ENTRY(v6_flush_user_cache_all) /*FALLTHROUGH*/ /* * v6_flush_cache_range(start, end, flags) * * Flush a range of TLB entries in the specified address space. * * - start - start address (may not be aligned) * - end - end address (exclusive, may not be aligned) * - flags - vm_area_struct flags describing address space * * It is assumed that: * - we have a VIPT cache. */ ENTRY(v6_flush_user_cache_range) ret lr /* * v6_coherent_kern_range(start,end) * * Ensure that the I and D caches are coherent within specified * region. This is typically used when code has been written to * a memory region, and will be executed. * * - start - virtual start address of region * - end - virtual end address of region * * It is assumed that: * - the Icache does not read data from the write buffer */ ENTRY(v6_coherent_kern_range) /* FALLTHROUGH */ /* * v6_coherent_user_range(start,end) * * Ensure that the I and D caches are coherent within specified * region. This is typically used when code has been written to * a memory region, and will be executed. * * - start - virtual start address of region * - end - virtual end address of region * * It is assumed that: * - the Icache does not read data from the write buffer */ ENTRY(v6_coherent_user_range) UNWIND(.fnstart ) #ifdef HARVARD_CACHE bic r0, r0, #CACHE_LINE_SIZE - 1 1: USER( mcr p15, 0, r0, c7, c10, 1 ) @ clean D line add r0, r0, #CACHE_LINE_SIZE cmp r0, r1 blo 1b #endif mov r0, #0 #ifdef HARVARD_CACHE mcr p15, 0, r0, c7, c10, 4 @ drain write buffer #ifndef CONFIG_ARM_ERRATA_411920 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate #else b v6_flush_icache_all #endif #else mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB #endif ret lr /* * Fault handling for the cache operation above. If the virtual address in r0 * isn't mapped, fail with -EFAULT. */ 9001: mov r0, #-EFAULT ret lr UNWIND(.fnend ) ENDPROC(v6_coherent_user_range) ENDPROC(v6_coherent_kern_range) /* * v6_flush_kern_dcache_area(void *addr, size_t size) * * Ensure that the data held in the page kaddr is written back * to the page in question. * * - addr - kernel address * - size - region size */ ENTRY(v6_flush_kern_dcache_area) add r1, r0, r1 bic r0, r0, #D_CACHE_LINE_SIZE - 1 1: #ifdef HARVARD_CACHE mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line #else mcr p15, 0, r0, c7, c15, 1 @ clean & invalidate unified line #endif add r0, r0, #D_CACHE_LINE_SIZE cmp r0, r1 blo 1b #ifdef HARVARD_CACHE mov r0, #0 mcr p15, 0, r0, c7, c10, 4 #endif ret lr /* * v6_dma_inv_range(start,end) * * Invalidate the data cache within the specified region; we will * be performing a DMA operation in this region and we want to * purge old data in the cache. * * - start - virtual start address of region * - end - virtual end address of region */ v6_dma_inv_range: #ifdef CONFIG_DMA_CACHE_RWFO ldrb r2, [r0] @ read for ownership strb r2, [r0] @ write for ownership #endif tst r0, #D_CACHE_LINE_SIZE - 1 bic r0, r0, #D_CACHE_LINE_SIZE - 1 #ifdef HARVARD_CACHE mcrne p15, 0, r0, c7, c10, 1 @ clean D line #else mcrne p15, 0, r0, c7, c11, 1 @ clean unified line #endif tst r1, #D_CACHE_LINE_SIZE - 1 #ifdef CONFIG_DMA_CACHE_RWFO ldrbne r2, [r1, #-1] @ read for ownership strbne r2, [r1, #-1] @ write for ownership #endif bic r1, r1, #D_CACHE_LINE_SIZE - 1 #ifdef HARVARD_CACHE mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D line #else mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line #endif 1: #ifdef HARVARD_CACHE mcr p15, 0, r0, c7, c6, 1 @ invalidate D line #else mcr p15, 0, r0, c7, c7, 1 @ invalidate unified line #endif add r0, r0, #D_CACHE_LINE_SIZE cmp r0, r1 #ifdef CONFIG_DMA_CACHE_RWFO ldrlo r2, [r0] @ read for ownership strlo r2, [r0] @ write for ownership #endif blo 1b mov r0, #0 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer ret lr /* * v6_dma_clean_range(start,end) * - start - virtual start address of region * - end - virtual end address of region */ v6_dma_clean_range: bic r0, r0, #D_CACHE_LINE_SIZE - 1 1: #ifdef CONFIG_DMA_CACHE_RWFO ldr r2, [r0] @ read for ownership #endif #ifdef HARVARD_CACHE mcr p15, 0, r0, c7, c10, 1 @ clean D line #else mcr p15, 0, r0, c7, c11, 1 @ clean unified line #endif add r0, r0, #D_CACHE_LINE_SIZE cmp r0, r1 blo 1b mov r0, #0 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer ret lr /* * v6_dma_flush_range(start,end) * - start - virtual start address of region * - end - virtual end address of region */ ENTRY(v6_dma_flush_range) #ifdef CONFIG_DMA_CACHE_RWFO ldrb r2, [r0] @ read for ownership strb r2, [r0] @ write for ownership #endif bic r0, r0, #D_CACHE_LINE_SIZE - 1 1: #ifdef HARVARD_CACHE mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line #else mcr p15, 0, r0, c7, c15, 1 @ clean & invalidate line #endif add r0, r0, #D_CACHE_LINE_SIZE cmp r0, r1 #ifdef CONFIG_DMA_CACHE_RWFO ldrblo r2, [r0] @ read for ownership strblo r2, [r0] @ write for ownership #endif blo 1b mov r0, #0 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer ret lr /* * dma_map_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(v6_dma_map_area) add r1, r1, r0 teq r2, #DMA_FROM_DEVICE beq v6_dma_inv_range #ifndef CONFIG_DMA_CACHE_RWFO b v6_dma_clean_range #else teq r2, #DMA_TO_DEVICE beq v6_dma_clean_range b v6_dma_flush_range #endif ENDPROC(v6_dma_map_area) /* * dma_unmap_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(v6_dma_unmap_area) #ifndef CONFIG_DMA_CACHE_RWFO add r1, r1, r0 teq r2, #DMA_TO_DEVICE bne v6_dma_inv_range #endif ret lr ENDPROC(v6_dma_unmap_area) .globl v6_flush_kern_cache_louis .equ v6_flush_kern_cache_louis, v6_flush_kern_cache_all __INITDATA @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) define_cache_functions v6
aixcc-public/challenge-001-exemplar-source
6,120
arch/arm/mm/cache-fa.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/mm/cache-fa.S * * Copyright (C) 2005 Faraday Corp. * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt> * * Based on cache-v4wb.S: * Copyright (C) 1997-2002 Russell king * * Processors: FA520 FA526 FA626 */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> #include <asm/memory.h> #include <asm/page.h> #include "proc-macros.S" /* * The size of one data cache line. */ #define CACHE_DLINESIZE 16 /* * The total size of the data cache. */ #ifdef CONFIG_ARCH_GEMINI #define CACHE_DSIZE 8192 #else #define CACHE_DSIZE 16384 #endif /* FIXME: put optimal value here. Current one is just estimation */ #define CACHE_DLIMIT (CACHE_DSIZE * 2) /* * flush_icache_all() * * Unconditionally clean and invalidate the entire icache. */ ENTRY(fa_flush_icache_all) mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache ret lr ENDPROC(fa_flush_icache_all) /* * flush_user_cache_all() * * Clean and invalidate all cache entries in a particular address * space. */ ENTRY(fa_flush_user_cache_all) /* FALLTHROUGH */ /* * flush_kern_cache_all() * * Clean and invalidate the entire cache. */ ENTRY(fa_flush_kern_cache_all) mov ip, #0 mov r2, #VM_EXEC __flush_whole_cache: mcr p15, 0, ip, c7, c14, 0 @ clean/invalidate D cache tst r2, #VM_EXEC mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache mcrne p15, 0, ip, c7, c5, 6 @ invalidate BTB mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush ret lr /* * flush_user_cache_range(start, end, flags) * * Invalidate a range of cache entries in the specified * address space. * * - start - start address (inclusive, page aligned) * - end - end address (exclusive, page aligned) * - flags - vma_area_struct flags describing address space */ ENTRY(fa_flush_user_cache_range) mov ip, #0 sub r3, r1, r0 @ calculate total size cmp r3, #CACHE_DLIMIT @ total size >= limit? bhs __flush_whole_cache @ flush whole D cache 1: tst r2, #VM_EXEC mcrne p15, 0, r0, c7, c5, 1 @ invalidate I line mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b tst r2, #VM_EXEC mcrne p15, 0, ip, c7, c5, 6 @ invalidate BTB mcrne p15, 0, ip, c7, c10, 4 @ data write barrier mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush ret lr /* * coherent_kern_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(fa_coherent_kern_range) /* fall through */ /* * coherent_user_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(fa_coherent_user_range) bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mov r0, #0 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB mcr p15, 0, r0, c7, c10, 4 @ drain write buffer mcr p15, 0, r0, c7, c5, 4 @ prefetch flush ret lr /* * flush_kern_dcache_area(void *addr, size_t size) * * Ensure that the data held in the page kaddr is written back * to the page in question. * * - addr - kernel address * - size - size of region */ ENTRY(fa_flush_kern_dcache_area) add r1, r0, r1 1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c10, 4 @ drain write buffer ret lr /* * dma_inv_range(start, end) * * Invalidate (discard) the specified virtual address range. * May not write back any entries. If 'start' or 'end' * are not cache line aligned, those lines must be written * back. * * - start - virtual start address * - end - virtual end address */ fa_dma_inv_range: tst r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D entry tst r1, #CACHE_DLINESIZE - 1 bic r1, r1, #CACHE_DLINESIZE - 1 mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D entry 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mov r0, #0 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer ret lr /* * dma_clean_range(start, end) * * Clean (write back) the specified virtual address range. * * - start - virtual start address * - end - virtual end address */ fa_dma_clean_range: bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mov r0, #0 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer ret lr /* * dma_flush_range(start,end) * - start - virtual start address of region * - end - virtual end address of region */ ENTRY(fa_dma_flush_range) bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mov r0, #0 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer ret lr /* * dma_map_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(fa_dma_map_area) add r1, r1, r0 cmp r2, #DMA_TO_DEVICE beq fa_dma_clean_range bcs fa_dma_inv_range b fa_dma_flush_range ENDPROC(fa_dma_map_area) /* * dma_unmap_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(fa_dma_unmap_area) ret lr ENDPROC(fa_dma_unmap_area) .globl fa_flush_kern_cache_louis .equ fa_flush_kern_cache_louis, fa_flush_kern_cache_all __INITDATA @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) define_cache_functions fa
aixcc-public/challenge-001-exemplar-source
5,372
arch/arm/mm/proc-arm720.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * linux/arch/arm/mm/proc-arm720.S: MMU functions for ARM720 * * Copyright (C) 2000 Steve Hill (sjhill@cotw.com) * Rob Scott (rscott@mtrob.fdns.net) * Copyright (C) 2000 ARM Limited, Deep Blue Solutions Ltd. * hacked for non-paged-MM by Hyok S. Choi, 2004. * * These are the low level assembler for performing cache and TLB * functions on the ARM720T. The ARM720T has a writethrough IDC * cache, so we don't need to clean it. * * Changelog: * 05-09-2000 SJH Created by moving 720 specific functions * out of 'proc-arm6,7.S' per RMK discussion * 07-25-2000 SJH Added idle function. * 08-25-2000 DBS Updated for integration of ARM Ltd version. * 04-20-2004 HSC modified for non-paged memory management mode. */ #include <linux/linkage.h> #include <linux/init.h> #include <linux/pgtable.h> #include <asm/assembler.h> #include <asm/asm-offsets.h> #include <asm/hwcap.h> #include <asm/pgtable-hwdef.h> #include <asm/ptrace.h> #include "proc-macros.S" /* * Function: arm720_proc_init (void) * : arm720_proc_fin (void) * * Notes : This processor does not require these */ ENTRY(cpu_arm720_dcache_clean_area) ENTRY(cpu_arm720_proc_init) ret lr ENTRY(cpu_arm720_proc_fin) mrc p15, 0, r0, c1, c0, 0 bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches ret lr /* * Function: arm720_proc_do_idle(void) * Params : r0 = unused * Purpose : put the processor in proper idle mode */ ENTRY(cpu_arm720_do_idle) ret lr /* * Function: arm720_switch_mm(unsigned long pgd_phys) * Params : pgd_phys Physical address of page table * Purpose : Perform a task switch, saving the old process' state and restoring * the new. */ ENTRY(cpu_arm720_switch_mm) #ifdef CONFIG_MMU mov r1, #0 mcr p15, 0, r1, c7, c7, 0 @ invalidate cache mcr p15, 0, r0, c2, c0, 0 @ update page table ptr mcr p15, 0, r1, c8, c7, 0 @ flush TLB (v4) #endif ret lr /* * Function: arm720_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext) * Params : r0 = Address to set * : r1 = value to set * Purpose : Set a PTE and flush it out of any WB cache */ .align 5 ENTRY(cpu_arm720_set_pte_ext) #ifdef CONFIG_MMU armv3_set_pte_ext wc_disable=0 #endif ret lr /* * Function: arm720_reset * Params : r0 = address to jump to * Notes : This sets up everything for a reset */ .pushsection .idmap.text, "ax" ENTRY(cpu_arm720_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate cache #ifdef CONFIG_MMU mcr p15, 0, ip, c8, c7, 0 @ flush TLB (v4) #endif mrc p15, 0, ip, c1, c0, 0 @ get ctrl register bic ip, ip, #0x000f @ ............wcam bic ip, ip, #0x2100 @ ..v....s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register ret r0 ENDPROC(cpu_arm720_reset) .popsection .type __arm710_setup, #function __arm710_setup: mov r0, #0 mcr p15, 0, r0, c7, c7, 0 @ invalidate caches #ifdef CONFIG_MMU mcr p15, 0, r0, c8, c7, 0 @ flush TLB (v4) #endif mrc p15, 0, r0, c1, c0 @ get control register ldr r5, arm710_cr1_clear bic r0, r0, r5 ldr r5, arm710_cr1_set orr r0, r0, r5 ret lr @ __ret (head.S) .size __arm710_setup, . - __arm710_setup /* * R * .RVI ZFRS BLDP WCAM * .... 0001 ..11 1101 * */ .type arm710_cr1_clear, #object .type arm710_cr1_set, #object arm710_cr1_clear: .word 0x0f3f arm710_cr1_set: .word 0x013d .type __arm720_setup, #function __arm720_setup: mov r0, #0 mcr p15, 0, r0, c7, c7, 0 @ invalidate caches #ifdef CONFIG_MMU mcr p15, 0, r0, c8, c7, 0 @ flush TLB (v4) #endif adr r5, arm720_crval ldmia r5, {r5, r6} mrc p15, 0, r0, c1, c0 @ get control register bic r0, r0, r5 orr r0, r0, r6 ret lr @ __ret (head.S) .size __arm720_setup, . - __arm720_setup /* * R * .RVI ZFRS BLDP WCAM * ..1. 1001 ..11 1101 * */ .type arm720_crval, #object arm720_crval: crval clear=0x00002f3f, mmuset=0x0000213d, ucset=0x00000130 __INITDATA @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) define_processor_functions arm720, dabort=v4t_late_abort, pabort=legacy_pabort .section ".rodata" string cpu_arch_name, "armv4t" string cpu_elf_name, "v4" string cpu_arm710_name, "ARM710T" string cpu_arm720_name, "ARM720T" .align /* * See <asm/procinfo.h> for a definition of this structure. */ .section ".proc.info.init", "a" .macro arm720_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cpu_flush:req .type __\name\()_proc_info,#object __\name\()_proc_info: .long \cpu_val .long \cpu_mask .long PMD_TYPE_SECT | \ PMD_SECT_BUFFERABLE | \ PMD_SECT_CACHEABLE | \ PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ .long PMD_TYPE_SECT | \ PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ initfn \cpu_flush, __\name\()_proc_info @ cpu_flush .long cpu_arch_name @ arch_name .long cpu_elf_name @ elf_name .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB @ elf_hwcap .long \cpu_name .long arm720_processor_functions .long v4_tlb_fns .long v4wt_user_fns .long v4_cache_fns .size __\name\()_proc_info, . - __\name\()_proc_info .endm arm720_proc_info arm710, 0x41807100, 0xffffff00, cpu_arm710_name, __arm710_setup arm720_proc_info arm720, 0x41807200, 0xffffff00, cpu_arm720_name, __arm720_setup
aixcc-public/challenge-001-exemplar-source
6,825
arch/arm/mm/proc-v7m.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/mm/proc-v7m.S * * Copyright (C) 2008 ARM Ltd. * Copyright (C) 2001 Deep Blue Solutions Ltd. * * This is the "shell" of the ARMv7-M processor support. */ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/memory.h> #include <asm/v7m.h> #include "proc-macros.S" ENTRY(cpu_v7m_proc_init) ret lr ENDPROC(cpu_v7m_proc_init) ENTRY(cpu_v7m_proc_fin) ret lr ENDPROC(cpu_v7m_proc_fin) /* * cpu_v7m_reset(loc) * * Perform a soft reset of the system. Put the CPU into the * same state as it would be if it had been reset, and branch * to what would be the reset vector. * * - loc - location to jump to for soft reset */ .align 5 ENTRY(cpu_v7m_reset) ret r0 ENDPROC(cpu_v7m_reset) /* * cpu_v7m_do_idle() * * Idle the processor (eg, wait for interrupt). * * IRQs are already disabled. */ ENTRY(cpu_v7m_do_idle) wfi ret lr ENDPROC(cpu_v7m_do_idle) ENTRY(cpu_v7m_dcache_clean_area) ret lr ENDPROC(cpu_v7m_dcache_clean_area) /* * There is no MMU, so here is nothing to do. */ ENTRY(cpu_v7m_switch_mm) ret lr ENDPROC(cpu_v7m_switch_mm) .globl cpu_v7m_suspend_size .equ cpu_v7m_suspend_size, 0 #ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_v7m_do_suspend) ret lr ENDPROC(cpu_v7m_do_suspend) ENTRY(cpu_v7m_do_resume) ret lr ENDPROC(cpu_v7m_do_resume) #endif ENTRY(cpu_cm7_dcache_clean_area) dcache_line_size r2, r3 movw r3, #:lower16:BASEADDR_V7M_SCB + V7M_SCB_DCCMVAC movt r3, #:upper16:BASEADDR_V7M_SCB + V7M_SCB_DCCMVAC 1: str r0, [r3] @ clean D entry add r0, r0, r2 subs r1, r1, r2 bhi 1b dsb ret lr ENDPROC(cpu_cm7_dcache_clean_area) ENTRY(cpu_cm7_proc_fin) movw r2, #:lower16:(BASEADDR_V7M_SCB + V7M_SCB_CCR) movt r2, #:upper16:(BASEADDR_V7M_SCB + V7M_SCB_CCR) ldr r0, [r2] bic r0, r0, #(V7M_SCB_CCR_DC | V7M_SCB_CCR_IC) str r0, [r2] ret lr ENDPROC(cpu_cm7_proc_fin) .section ".init.text", "ax" __v7m_cm7_setup: mov r8, #(V7M_SCB_CCR_DC | V7M_SCB_CCR_IC| V7M_SCB_CCR_BP) b __v7m_setup_cont /* * __v7m_setup * * This should be able to cover all ARMv7-M cores. */ __v7m_setup: mov r8, 0 __v7m_setup_cont: @ Configure the vector table base address ldr r0, =BASEADDR_V7M_SCB ldr r12, =vector_table str r12, [r0, V7M_SCB_VTOR] @ enable UsageFault, BusFault and MemManage fault. ldr r5, [r0, #V7M_SCB_SHCSR] orr r5, #(V7M_SCB_SHCSR_USGFAULTENA | V7M_SCB_SHCSR_BUSFAULTENA | V7M_SCB_SHCSR_MEMFAULTENA) str r5, [r0, #V7M_SCB_SHCSR] @ Lower the priority of the SVC and PendSV exceptions mov r5, #0x80000000 str r5, [r0, V7M_SCB_SHPR2] @ set SVC priority mov r5, #0x00800000 str r5, [r0, V7M_SCB_SHPR3] @ set PendSV priority @ SVC to switch to handler mode. Notice that this requires sp to @ point to writeable memory because the processor saves @ some registers to the stack. badr r1, 1f ldr r5, [r12, #11 * 4] @ read the SVC vector entry str r1, [r12, #11 * 4] @ write the temporary SVC vector entry dsb mov r6, lr @ save LR ldr sp, =init_thread_union + THREAD_START_SP cpsie i svc #0 1: cpsid i /* Calculate exc_ret */ orr r10, lr, #EXC_RET_THREADMODE_PROCESSSTACK ldmia sp, {r0-r3, r12} str r5, [r12, #11 * 4] @ restore the original SVC vector entry mov lr, r6 @ restore LR @ Special-purpose control register mov r1, #1 msr control, r1 @ Thread mode has unpriviledged access @ Configure caches (if implemented) teq r8, #0 stmiane sp, {r0-r6, lr} @ v7m_invalidate_l1 touches r0-r6 blne v7m_invalidate_l1 teq r8, #0 @ re-evalutae condition ldmiane sp, {r0-r6, lr} @ Configure the System Control Register to ensure 8-byte stack alignment @ Note the STKALIGN bit is either RW or RAO. ldr r0, [r0, V7M_SCB_CCR] @ system control register orr r0, #V7M_SCB_CCR_STKALIGN orr r0, r0, r8 ret lr ENDPROC(__v7m_setup) /* * Cortex-M7 processor functions */ globl_equ cpu_cm7_proc_init, cpu_v7m_proc_init globl_equ cpu_cm7_reset, cpu_v7m_reset globl_equ cpu_cm7_do_idle, cpu_v7m_do_idle globl_equ cpu_cm7_switch_mm, cpu_v7m_switch_mm define_processor_functions v7m, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1 define_processor_functions cm7, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1 .section ".rodata" string cpu_arch_name, "armv7m" string cpu_elf_name "v7m" string cpu_v7m_name "ARMv7-M" .section ".proc.info.init", "a" .macro __v7m_proc name, initfunc, cache_fns = nop_cache_fns, hwcaps = 0, proc_fns = v7m_processor_functions .long 0 /* proc_info_list.__cpu_mm_mmu_flags */ .long 0 /* proc_info_list.__cpu_io_mmu_flags */ initfn \initfunc, \name .long cpu_arch_name .long cpu_elf_name .long HWCAP_HALF | HWCAP_THUMB | HWCAP_FAST_MULT | \hwcaps .long cpu_v7m_name .long \proc_fns .long 0 /* proc_info_list.tlb */ .long 0 /* proc_info_list.user */ .long \cache_fns .endm /* * Match ARM Cortex-M55 processor. */ .type __v7m_cm55_proc_info, #object __v7m_cm55_proc_info: .long 0x410fd220 /* ARM Cortex-M55 0xD22 */ .long 0xff0ffff0 /* Mask off revision, patch release */ __v7m_proc __v7m_cm55_proc_info, __v7m_cm7_setup, hwcaps = HWCAP_EDSP, cache_fns = v7m_cache_fns, proc_fns = cm7_processor_functions .size __v7m_cm55_proc_info, . - __v7m_cm55_proc_info /* * Match ARM Cortex-M33 processor. */ .type __v7m_cm33_proc_info, #object __v7m_cm33_proc_info: .long 0x410fd210 /* ARM Cortex-M33 0xD21 */ .long 0xff0ffff0 /* Mask off revision, patch release */ __v7m_proc __v7m_cm33_proc_info, __v7m_setup, hwcaps = HWCAP_EDSP .size __v7m_cm33_proc_info, . - __v7m_cm33_proc_info /* * Match ARM Cortex-M7 processor. */ .type __v7m_cm7_proc_info, #object __v7m_cm7_proc_info: .long 0x410fc270 /* ARM Cortex-M7 0xC27 */ .long 0xff0ffff0 /* Mask off revision, patch release */ __v7m_proc __v7m_cm7_proc_info, __v7m_cm7_setup, hwcaps = HWCAP_EDSP, cache_fns = v7m_cache_fns, proc_fns = cm7_processor_functions .size __v7m_cm7_proc_info, . - __v7m_cm7_proc_info /* * Match ARM Cortex-M4 processor. */ .type __v7m_cm4_proc_info, #object __v7m_cm4_proc_info: .long 0x410fc240 /* ARM Cortex-M4 0xC24 */ .long 0xff0ffff0 /* Mask off revision, patch release */ __v7m_proc __v7m_cm4_proc_info, __v7m_setup, hwcaps = HWCAP_EDSP .size __v7m_cm4_proc_info, . - __v7m_cm4_proc_info /* * Match ARM Cortex-M3 processor. */ .type __v7m_cm3_proc_info, #object __v7m_cm3_proc_info: .long 0x410fc230 /* ARM Cortex-M3 0xC23 */ .long 0xff0ffff0 /* Mask off revision, patch release */ __v7m_proc __v7m_cm3_proc_info, __v7m_setup .size __v7m_cm3_proc_info, . - __v7m_cm3_proc_info /* * Match any ARMv7-M processor core. */ .type __v7m_proc_info, #object __v7m_proc_info: .long 0x000f0000 @ Required ID value .long 0x000f0000 @ Mask for ID __v7m_proc __v7m_proc_info, __v7m_setup .size __v7m_proc_info, . - __v7m_proc_info
aixcc-public/challenge-001-exemplar-source
7,106
arch/arm/mach-tegra/reset-handler.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2012, NVIDIA Corporation. All rights reserved. */ #include <linux/init.h> #include <linux/linkage.h> #include <soc/tegra/flowctrl.h> #include <soc/tegra/fuse.h> #include <asm/assembler.h> #include <asm/asm-offsets.h> #include <asm/cache.h> #include "iomap.h" #include "reset.h" #include "sleep.h" #define PMC_SCRATCH41 0x140 #ifdef CONFIG_PM_SLEEP /* * tegra_resume * * CPU boot vector when restarting the a CPU following * an LP2 transition. Also branched to by LP0 and LP1 resume after * re-enabling sdram. * * r6: SoC ID * r8: CPU part number */ ENTRY(tegra_resume) check_cpu_part_num 0xc09, r8, r9 bleq v7_invalidate_l1 cpu_id r0 cmp r0, #0 @ CPU0? THUMB( it ne ) bne cpu_resume @ no tegra_get_soc_id TEGRA_APB_MISC_BASE, r6 /* Are we on Tegra20? */ cmp r6, #TEGRA20 beq 1f @ Yes /* Clear the flow controller flags for this CPU. */ cpu_to_csr_reg r3, r0 mov32 r2, TEGRA_FLOW_CTRL_BASE ldr r1, [r2, r3] /* Clear event & intr flag */ orr r1, r1, \ #FLOW_CTRL_CSR_INTR_FLAG | FLOW_CTRL_CSR_EVENT_FLAG movw r0, #0x3FFD @ enable, cluster_switch, immed, bitmaps @ & ext flags for CPU power mgnt bic r1, r1, r0 str r1, [r2, r3] 1: mov32 r9, 0xc09 cmp r8, r9 bne end_ca9_scu_l2_resume #ifdef CONFIG_HAVE_ARM_SCU /* enable SCU */ mov32 r0, TEGRA_ARM_PERIF_BASE ldr r1, [r0] orr r1, r1, #1 str r1, [r0] #endif bl tegra_resume_trusted_foundations #ifdef CONFIG_CACHE_L2X0 /* L2 cache resume & re-enable */ bl l2c310_early_resume #endif end_ca9_scu_l2_resume: mov32 r9, 0xc0f cmp r8, r9 bleq tegra_init_l2_for_a15 b cpu_resume ENDPROC(tegra_resume) /* * tegra_resume_trusted_foundations * * Trusted Foundations firmware initialization. * * Doesn't return if firmware presents. * Corrupted registers: r1, r2 */ ENTRY(tegra_resume_trusted_foundations) /* Check whether Trusted Foundations firmware presents. */ mov32 r2, TEGRA_IRAM_BASE + TEGRA_IRAM_RESET_HANDLER_OFFSET ldr r1, =__tegra_cpu_reset_handler_data_offset + \ RESET_DATA(TF_PRESENT) ldr r1, [r2, r1] cmp r1, #0 reteq lr .arch_extension sec /* * First call after suspend wakes firmware. No arguments required * for some firmware versions. Downstream kernel of ASUS TF300T uses * r0=3 for the wake-up notification. */ mov r0, #3 smc #0 b cpu_resume ENDPROC(tegra_resume_trusted_foundations) #endif .align L1_CACHE_SHIFT ENTRY(__tegra_cpu_reset_handler_start) /* * __tegra_cpu_reset_handler: * * Common handler for all CPU reset events. * * Register usage within the reset handler: * * Others: scratch * R6 = SoC ID * R7 = CPU present (to the OS) mask * R8 = CPU in LP1 state mask * R9 = CPU in LP2 state mask * R10 = CPU number * R11 = CPU mask * R12 = pointer to reset handler data * * NOTE: This code is copied to IRAM. All code and data accesses * must be position-independent. */ .arm .align L1_CACHE_SHIFT ENTRY(__tegra_cpu_reset_handler) cpsid aif, 0x13 @ SVC mode, interrupts disabled tegra_get_soc_id TEGRA_APB_MISC_BASE, r6 adr r12, __tegra_cpu_reset_handler_data ldr r5, [r12, #RESET_DATA(TF_PRESENT)] cmp r5, #0 bne after_errata #ifdef CONFIG_ARCH_TEGRA_2x_SOC t20_check: cmp r6, #TEGRA20 bne after_t20_check t20_errata: # Tegra20 is a Cortex-A9 r1p1 mrc p15, 0, r0, c1, c0, 0 @ read system control register orr r0, r0, #1 << 14 @ erratum 716044 mcr p15, 0, r0, c1, c0, 0 @ write system control register mrc p15, 0, r0, c15, c0, 1 @ read diagnostic register orr r0, r0, #1 << 4 @ erratum 742230 orr r0, r0, #1 << 11 @ erratum 751472 mcr p15, 0, r0, c15, c0, 1 @ write diagnostic register b after_errata after_t20_check: #endif #ifdef CONFIG_ARCH_TEGRA_3x_SOC t30_check: cmp r6, #TEGRA30 bne after_t30_check t30_errata: # Tegra30 is a Cortex-A9 r2p9 mrc p15, 0, r0, c15, c0, 1 @ read diagnostic register orr r0, r0, #1 << 6 @ erratum 743622 orr r0, r0, #1 << 11 @ erratum 751472 mcr p15, 0, r0, c15, c0, 1 @ write diagnostic register b after_errata after_t30_check: #endif after_errata: mrc p15, 0, r10, c0, c0, 5 @ MPIDR and r10, r10, #0x3 @ R10 = CPU number mov r11, #1 mov r11, r11, lsl r10 @ R11 = CPU mask #ifdef CONFIG_SMP /* Does the OS know about this CPU? */ ldr r7, [r12, #RESET_DATA(MASK_PRESENT)] tst r7, r11 @ if !present bleq __die @ CPU not present (to OS) #endif /* Waking up from LP1? */ ldr r8, [r12, #RESET_DATA(MASK_LP1)] tst r8, r11 @ if in_lp1 beq __is_not_lp1 cmp r10, #0 bne __die @ only CPU0 can be here ldr lr, [r12, #RESET_DATA(STARTUP_LP1)] cmp lr, #0 bleq __die @ no LP1 startup handler THUMB( add lr, lr, #1 ) @ switch to Thumb mode bx lr __is_not_lp1: /* Waking up from LP2? */ ldr r9, [r12, #RESET_DATA(MASK_LP2)] tst r9, r11 @ if in_lp2 beq __is_not_lp2 ldr lr, [r12, #RESET_DATA(STARTUP_LP2)] cmp lr, #0 bleq __die @ no LP2 startup handler bx lr __is_not_lp2: #ifdef CONFIG_SMP /* * Can only be secondary boot (initial or hotplug) * CPU0 can't be here for Tegra20/30 */ cmp r6, #TEGRA114 beq __no_cpu0_chk cmp r10, #0 bleq __die @ CPU0 cannot be here __no_cpu0_chk: ldr lr, [r12, #RESET_DATA(STARTUP_SECONDARY)] cmp lr, #0 bleq __die @ no secondary startup handler bx lr #endif /* * We don't know why the CPU reset. Just kill it. * The LR register will contain the address we died at + 4. */ __die: sub lr, lr, #4 mov32 r7, TEGRA_PMC_BASE str lr, [r7, #PMC_SCRATCH41] mov32 r7, TEGRA_CLK_RESET_BASE /* Are we on Tegra20? */ cmp r6, #TEGRA20 bne 1f #ifdef CONFIG_ARCH_TEGRA_2x_SOC mov32 r0, 0x1111 mov r1, r0, lsl r10 str r1, [r7, #0x340] @ CLK_RST_CPU_CMPLX_SET #endif 1: #ifdef CONFIG_ARCH_TEGRA_3x_SOC mov32 r6, TEGRA_FLOW_CTRL_BASE cmp r10, #0 moveq r1, #FLOW_CTRL_HALT_CPU0_EVENTS moveq r2, #FLOW_CTRL_CPU0_CSR movne r1, r10, lsl #3 addne r2, r1, #(FLOW_CTRL_CPU1_CSR-8) addne r1, r1, #(FLOW_CTRL_HALT_CPU1_EVENTS-8) /* Clear CPU "event" and "interrupt" flags and power gate it when halting but not before it is in the "WFI" state. */ ldr r0, [r6, +r2] orr r0, r0, #FLOW_CTRL_CSR_INTR_FLAG | FLOW_CTRL_CSR_EVENT_FLAG orr r0, r0, #FLOW_CTRL_CSR_ENABLE str r0, [r6, +r2] /* Unconditionally halt this CPU */ mov r0, #FLOW_CTRL_WAITEVENT str r0, [r6, +r1] ldr r0, [r6, +r1] @ memory barrier dsb isb wfi @ CPU should be power gated here /* If the CPU didn't power gate above just kill it's clock. */ mov r0, r11, lsl #8 str r0, [r7, #348] @ CLK_CPU_CMPLX_SET #endif /* If the CPU still isn't dead, just spin here. */ b . ENDPROC(__tegra_cpu_reset_handler) .align L1_CACHE_SHIFT .type __tegra_cpu_reset_handler_data, %object .globl __tegra_cpu_reset_handler_data .globl __tegra_cpu_reset_handler_data_offset .equ __tegra_cpu_reset_handler_data_offset, \ . - __tegra_cpu_reset_handler_start __tegra_cpu_reset_handler_data: .rept TEGRA_RESET_DATA_SIZE .long 0 .endr .align L1_CACHE_SHIFT ENTRY(__tegra_cpu_reset_handler_end)
aixcc-public/challenge-001-exemplar-source
23,741
arch/arm/mach-tegra/sleep-tegra30.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2012, NVIDIA Corporation. All rights reserved. */ #include <linux/linkage.h> #include <soc/tegra/flowctrl.h> #include <soc/tegra/fuse.h> #include <asm/asm-offsets.h> #include <asm/assembler.h> #include <asm/cache.h> #include "irammap.h" #include "sleep.h" #define EMC_CFG 0xc #define EMC_ADR_CFG 0x10 #define EMC_TIMING_CONTROL 0x28 #define EMC_NOP 0xdc #define EMC_SELF_REF 0xe0 #define EMC_MRW 0xe8 #define EMC_FBIO_CFG5 0x104 #define EMC_AUTO_CAL_CONFIG 0x2a4 #define EMC_AUTO_CAL_INTERVAL 0x2a8 #define EMC_AUTO_CAL_STATUS 0x2ac #define EMC_REQ_CTRL 0x2b0 #define EMC_CFG_DIG_DLL 0x2bc #define EMC_EMC_STATUS 0x2b4 #define EMC_ZCAL_INTERVAL 0x2e0 #define EMC_ZQ_CAL 0x2ec #define EMC_XM2VTTGENPADCTRL 0x310 #define EMC_XM2VTTGENPADCTRL2 0x314 #define PMC_CTRL 0x0 #define PMC_CTRL_SIDE_EFFECT_LP0 (1 << 14) /* enter LP0 when CPU pwr gated */ #define PMC_PLLP_WB0_OVERRIDE 0xf8 #define PMC_IO_DPD_REQ 0x1b8 #define PMC_IO_DPD_STATUS 0x1bc #define CLK_RESET_CCLK_BURST 0x20 #define CLK_RESET_CCLK_DIVIDER 0x24 #define CLK_RESET_SCLK_BURST 0x28 #define CLK_RESET_SCLK_DIVIDER 0x2c #define CLK_RESET_PLLC_BASE 0x80 #define CLK_RESET_PLLC_MISC 0x8c #define CLK_RESET_PLLM_BASE 0x90 #define CLK_RESET_PLLM_MISC 0x9c #define CLK_RESET_PLLP_BASE 0xa0 #define CLK_RESET_PLLP_MISC 0xac #define CLK_RESET_PLLA_BASE 0xb0 #define CLK_RESET_PLLA_MISC 0xbc #define CLK_RESET_PLLX_BASE 0xe0 #define CLK_RESET_PLLX_MISC 0xe4 #define CLK_RESET_PLLX_MISC3 0x518 #define CLK_RESET_PLLX_MISC3_IDDQ 3 #define CLK_RESET_PLLM_MISC_IDDQ 5 #define CLK_RESET_PLLC_MISC_IDDQ 26 #define CLK_RESET_PLLP_RESHIFT 0x528 #define CLK_RESET_PLLP_RESHIFT_DEFAULT 0x3b #define CLK_RESET_PLLP_RESHIFT_ENABLE 0x3 #define CLK_RESET_CLK_SOURCE_MSELECT 0x3b4 #define MSELECT_CLKM (0x3 << 30) #define LOCK_DELAY 50 /* safety delay after lock is detected */ #define TEGRA30_POWER_HOTPLUG_SHUTDOWN (1 << 27) /* Hotplug shutdown */ #define PLLA_STORE_MASK (1 << 0) #define PLLC_STORE_MASK (1 << 1) #define PLLM_STORE_MASK (1 << 2) #define PLLP_STORE_MASK (1 << 3) #define PLLX_STORE_MASK (1 << 4) #define PLLM_PMC_STORE_MASK (1 << 5) .macro emc_device_mask, rd, base ldr \rd, [\base, #EMC_ADR_CFG] tst \rd, #0x1 moveq \rd, #(0x1 << 8) @ just 1 device movne \rd, #(0x3 << 8) @ 2 devices .endm .macro emc_timing_update, rd, base mov \rd, #1 str \rd, [\base, #EMC_TIMING_CONTROL] 1001: ldr \rd, [\base, #EMC_EMC_STATUS] tst \rd, #(0x1<<23) @ wait EMC_STATUS_TIMING_UPDATE_STALLED is clear bne 1001b .endm .macro test_pll_state, rd, test_mask ldr \rd, tegra_pll_state tst \rd, #\test_mask .endm .macro store_pll_state, rd, tmp, r_car_base, pll_base, pll_mask ldr \rd, [\r_car_base, #\pll_base] tst \rd, #(1 << 30) ldr \rd, tegra_pll_state biceq \rd, \rd, #\pll_mask orrne \rd, \rd, #\pll_mask adr \tmp, tegra_pll_state str \rd, [\tmp] .endm .macro store_pllm_pmc_state, rd, tmp, pmc_base ldr \rd, [\pmc_base, #PMC_PLLP_WB0_OVERRIDE] tst \rd, #(1 << 12) ldr \rd, tegra_pll_state biceq \rd, \rd, #PLLM_PMC_STORE_MASK orrne \rd, \rd, #PLLM_PMC_STORE_MASK adr \tmp, tegra_pll_state str \rd, [\tmp] .endm .macro pllm_pmc_enable, rd, pmc_base test_pll_state \rd, PLLM_PMC_STORE_MASK ldrne \rd, [\pmc_base, #PMC_PLLP_WB0_OVERRIDE] orrne \rd, \rd, #(1 << 12) strne \rd, [\pmc_base, #PMC_PLLP_WB0_OVERRIDE] .endm .macro pll_enable, rd, r_car_base, pll_base, pll_misc, test_mask test_pll_state \rd, \test_mask beq 1f ldr \rd, [\r_car_base, #\pll_base] tst \rd, #(1 << 30) orreq \rd, \rd, #(1 << 30) streq \rd, [\r_car_base, #\pll_base] /* Enable lock detector */ .if \pll_misc ldr \rd, [\r_car_base, #\pll_misc] bic \rd, \rd, #(1 << 18) str \rd, [\r_car_base, #\pll_misc] ldr \rd, [\r_car_base, #\pll_misc] ldr \rd, [\r_car_base, #\pll_misc] orr \rd, \rd, #(1 << 18) str \rd, [\r_car_base, #\pll_misc] .endif 1: .endm .macro pll_locked, rd, r_car_base, pll_base, test_mask test_pll_state \rd, \test_mask beq 2f 1: ldr \rd, [\r_car_base, #\pll_base] tst \rd, #(1 << 27) beq 1b 2: .endm .macro pll_iddq_exit, rd, car, iddq, iddq_bit ldr \rd, [\car, #\iddq] bic \rd, \rd, #(1<<\iddq_bit) str \rd, [\car, #\iddq] .endm .macro pll_iddq_entry, rd, car, iddq, iddq_bit ldr \rd, [\car, #\iddq] orr \rd, \rd, #(1<<\iddq_bit) str \rd, [\car, #\iddq] .endm #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PM_SLEEP) /* * tegra30_hotplug_shutdown(void) * * Powergates the current CPU. * Should never return. */ ENTRY(tegra30_hotplug_shutdown) /* Powergate this CPU */ mov r0, #TEGRA30_POWER_HOTPLUG_SHUTDOWN bl tegra30_cpu_shutdown ret lr @ should never get here ENDPROC(tegra30_hotplug_shutdown) /* * tegra30_cpu_shutdown(unsigned long flags) * * Puts the current CPU in wait-for-event mode on the flow controller * and powergates it -- flags (in R0) indicate the request type. * * r10 = SoC ID * corrupts r0-r4, r10-r12 */ ENTRY(tegra30_cpu_shutdown) cpu_id r3 tegra_get_soc_id TEGRA_APB_MISC_VIRT, r10 cmp r10, #TEGRA30 bne _no_cpu0_chk @ It's not Tegra30 cmp r3, #0 reteq lr @ Must never be called for CPU 0 _no_cpu0_chk: ldr r12, =TEGRA_FLOW_CTRL_VIRT cpu_to_csr_reg r1, r3 add r1, r1, r12 @ virtual CSR address for this CPU cpu_to_halt_reg r2, r3 add r2, r2, r12 @ virtual HALT_EVENTS address for this CPU /* * Clear this CPU's "event" and "interrupt" flags and power gate * it when halting but not before it is in the "WFE" state. */ movw r12, \ FLOW_CTRL_CSR_INTR_FLAG | FLOW_CTRL_CSR_EVENT_FLAG | \ FLOW_CTRL_CSR_ENABLE cmp r10, #TEGRA30 moveq r4, #(1 << 4) @ wfe bitmap movne r4, #(1 << 8) @ wfi bitmap ARM( orr r12, r12, r4, lsl r3 ) THUMB( lsl r4, r4, r3 ) THUMB( orr r12, r12, r4 ) str r12, [r1] /* Halt this CPU. */ mov r3, #0x400 delay_1: subs r3, r3, #1 @ delay as a part of wfe war. bge delay_1; cpsid a @ disable imprecise aborts. ldr r3, [r1] @ read CSR str r3, [r1] @ clear CSR tst r0, #TEGRA30_POWER_HOTPLUG_SHUTDOWN beq flow_ctrl_setting_for_lp2 /* flow controller set up for hotplug */ mov r3, #FLOW_CTRL_WAITEVENT @ For hotplug b flow_ctrl_done flow_ctrl_setting_for_lp2: /* flow controller set up for LP2 */ cmp r10, #TEGRA30 moveq r3, #FLOW_CTRL_WAIT_FOR_INTERRUPT @ For LP2 movne r3, #FLOW_CTRL_WAITEVENT orrne r3, r3, #FLOW_CTRL_HALT_GIC_IRQ orrne r3, r3, #FLOW_CTRL_HALT_GIC_FIQ flow_ctrl_done: cmp r10, #TEGRA30 str r3, [r2] ldr r0, [r2] b wfe_war __cpu_reset_again: dsb .align 5 wfeeq @ CPU should be power gated here wfine wfe_war: b __cpu_reset_again /* * 38 nop's, which fills rest of wfe cache line and * 4 more cachelines with nop */ .rept 38 nop .endr b . @ should never get here ENDPROC(tegra30_cpu_shutdown) #endif #ifdef CONFIG_PM_SLEEP /* * tegra30_sleep_core_finish(unsigned long v2p) * * Enters suspend in LP0 or LP1 by turning off the MMU and jumping to * tegra30_tear_down_core in IRAM */ ENTRY(tegra30_sleep_core_finish) mov r4, r0 /* Flush, disable the L1 data cache and exit SMP */ mov r0, #TEGRA_FLUSH_CACHE_ALL bl tegra_disable_clean_inv_dcache mov r0, r4 /* * Preload all the address literals that are needed for the * CPU power-gating process, to avoid loading from SDRAM which * are not supported once SDRAM is put into self-refresh. * LP0 / LP1 use physical address, since the MMU needs to be * disabled before putting SDRAM into self-refresh to avoid * memory access due to page table walks. */ mov32 r4, TEGRA_PMC_BASE mov32 r5, TEGRA_CLK_RESET_BASE mov32 r6, TEGRA_FLOW_CTRL_BASE mov32 r7, TEGRA_TMRUS_BASE mov32 r3, tegra_shut_off_mmu add r3, r3, r0 mov32 r0, tegra30_tear_down_core mov32 r1, tegra30_iram_start sub r0, r0, r1 mov32 r1, TEGRA_IRAM_LPx_RESUME_AREA add r0, r0, r1 ret r3 ENDPROC(tegra30_sleep_core_finish) /* * tegra30_pm_secondary_cpu_suspend(unsigned long unused_arg) * * Enters LP2 on secondary CPU by exiting coherency and powergating the CPU. */ ENTRY(tegra30_pm_secondary_cpu_suspend) mov r7, lr /* Flush and disable the L1 data cache */ mov r0, #TEGRA_FLUSH_CACHE_LOUIS bl tegra_disable_clean_inv_dcache /* Powergate this CPU. */ mov r0, #0 @ power mode flags (!hotplug) bl tegra30_cpu_shutdown mov r0, #1 @ never return here ret r7 ENDPROC(tegra30_pm_secondary_cpu_suspend) /* * tegra30_tear_down_cpu * * Switches the CPU to enter sleep. */ ENTRY(tegra30_tear_down_cpu) mov32 r6, TEGRA_FLOW_CTRL_BASE b tegra30_enter_sleep ENDPROC(tegra30_tear_down_cpu) /* START OF ROUTINES COPIED TO IRAM */ .align L1_CACHE_SHIFT .globl tegra30_iram_start tegra30_iram_start: /* * tegra30_lp1_reset * * reset vector for LP1 restore; copied into IRAM during suspend. * Brings the system back up to a safe staring point (SDRAM out of * self-refresh, PLLC, PLLM and PLLP reenabled, CPU running on PLLX, * system clock running on the same PLL that it suspended at), and * jumps to tegra_resume to restore virtual addressing. * The physical address of tegra_resume expected to be stored in * PMC_SCRATCH41. * * NOTE: THIS *MUST* BE RELOCATED TO TEGRA_IRAM_LPx_RESUME_AREA. */ ENTRY(tegra30_lp1_reset) /* * The CPU and system bus are running at 32KHz and executing from * IRAM when this code is executed; immediately switch to CLKM and * enable PLLP, PLLM, PLLC, PLLA and PLLX. */ mov32 r0, TEGRA_CLK_RESET_BASE mov r1, #(1 << 28) str r1, [r0, #CLK_RESET_SCLK_BURST] str r1, [r0, #CLK_RESET_CCLK_BURST] mov r1, #0 str r1, [r0, #CLK_RESET_CCLK_DIVIDER] str r1, [r0, #CLK_RESET_SCLK_DIVIDER] tegra_get_soc_id TEGRA_APB_MISC_BASE, r10 cmp r10, #TEGRA30 beq _no_pll_iddq_exit pll_iddq_exit r1, r0, CLK_RESET_PLLM_MISC, CLK_RESET_PLLM_MISC_IDDQ pll_iddq_exit r1, r0, CLK_RESET_PLLC_MISC, CLK_RESET_PLLC_MISC_IDDQ pll_iddq_exit r1, r0, CLK_RESET_PLLX_MISC3, CLK_RESET_PLLX_MISC3_IDDQ mov32 r7, TEGRA_TMRUS_BASE ldr r1, [r7] add r1, r1, #2 wait_until r1, r7, r3 /* enable PLLM via PMC */ mov32 r2, TEGRA_PMC_BASE pllm_pmc_enable r1, r2 pll_enable r1, r0, CLK_RESET_PLLM_BASE, 0, PLLM_STORE_MASK pll_enable r1, r0, CLK_RESET_PLLC_BASE, 0, PLLC_STORE_MASK pll_enable r1, r0, CLK_RESET_PLLX_BASE, 0, PLLX_STORE_MASK b _pll_m_c_x_done _no_pll_iddq_exit: /* enable PLLM via PMC */ mov32 r2, TEGRA_PMC_BASE pllm_pmc_enable r1, r2 pll_enable r1, r0, CLK_RESET_PLLM_BASE, CLK_RESET_PLLM_MISC, PLLM_STORE_MASK pll_enable r1, r0, CLK_RESET_PLLC_BASE, CLK_RESET_PLLC_MISC, PLLC_STORE_MASK _pll_m_c_x_done: pll_enable r1, r0, CLK_RESET_PLLP_BASE, CLK_RESET_PLLP_MISC, PLLP_STORE_MASK pll_enable r1, r0, CLK_RESET_PLLA_BASE, CLK_RESET_PLLA_MISC, PLLA_STORE_MASK pll_locked r1, r0, CLK_RESET_PLLM_BASE, PLLM_STORE_MASK pll_locked r1, r0, CLK_RESET_PLLP_BASE, PLLP_STORE_MASK pll_locked r1, r0, CLK_RESET_PLLA_BASE, PLLA_STORE_MASK pll_locked r1, r0, CLK_RESET_PLLC_BASE, PLLC_STORE_MASK /* * CPUFreq driver could select other PLL for CPU. PLLX will be * enabled by the Tegra30 CLK driver on an as-needed basis, see * tegra30_cpu_clock_resume(). */ tegra_get_soc_id TEGRA_APB_MISC_BASE, r1 cmp r1, #TEGRA30 beq 1f pll_locked r1, r0, CLK_RESET_PLLX_BASE, PLLX_STORE_MASK ldr r1, [r0, #CLK_RESET_PLLP_BASE] bic r1, r1, #(1<<31) @ disable PllP bypass str r1, [r0, #CLK_RESET_PLLP_BASE] mov r1, #CLK_RESET_PLLP_RESHIFT_DEFAULT str r1, [r0, #CLK_RESET_PLLP_RESHIFT] 1: mov32 r7, TEGRA_TMRUS_BASE ldr r1, [r7] add r1, r1, #LOCK_DELAY wait_until r1, r7, r3 adr r5, tegra_sdram_pad_save ldr r4, [r5, #0x18] @ restore CLK_SOURCE_MSELECT str r4, [r0, #CLK_RESET_CLK_SOURCE_MSELECT] ldr r4, [r5, #0x1C] @ restore SCLK_BURST str r4, [r0, #CLK_RESET_SCLK_BURST] movw r4, #:lower16:((1 << 28) | (0x4)) @ burst policy is PLLP movt r4, #:upper16:((1 << 28) | (0x4)) str r4, [r0, #CLK_RESET_CCLK_BURST] /* Restore pad power state to normal */ ldr r1, [r5, #0x14] @ PMC_IO_DPD_STATUS mvn r1, r1 bic r1, r1, #(1 << 31) orr r1, r1, #(1 << 30) str r1, [r2, #PMC_IO_DPD_REQ] @ DPD_OFF cmp r10, #TEGRA30 movweq r0, #:lower16:TEGRA_EMC_BASE @ r0 reserved for emc base movteq r0, #:upper16:TEGRA_EMC_BASE cmp r10, #TEGRA114 movweq r0, #:lower16:TEGRA_EMC0_BASE movteq r0, #:upper16:TEGRA_EMC0_BASE cmp r10, #TEGRA124 movweq r0, #:lower16:TEGRA124_EMC_BASE movteq r0, #:upper16:TEGRA124_EMC_BASE exit_self_refresh: ldr r1, [r5, #0xC] @ restore EMC_XM2VTTGENPADCTRL str r1, [r0, #EMC_XM2VTTGENPADCTRL] ldr r1, [r5, #0x10] @ restore EMC_XM2VTTGENPADCTRL2 str r1, [r0, #EMC_XM2VTTGENPADCTRL2] ldr r1, [r5, #0x8] @ restore EMC_AUTO_CAL_INTERVAL str r1, [r0, #EMC_AUTO_CAL_INTERVAL] /* Relock DLL */ ldr r1, [r0, #EMC_CFG_DIG_DLL] orr r1, r1, #(1 << 30) @ set DLL_RESET str r1, [r0, #EMC_CFG_DIG_DLL] emc_timing_update r1, r0 cmp r10, #TEGRA114 movweq r1, #:lower16:TEGRA_EMC1_BASE movteq r1, #:upper16:TEGRA_EMC1_BASE cmpeq r0, r1 ldr r1, [r0, #EMC_AUTO_CAL_CONFIG] orr r1, r1, #(1 << 31) @ set AUTO_CAL_ACTIVE orreq r1, r1, #(1 << 27) @ set slave mode for channel 1 str r1, [r0, #EMC_AUTO_CAL_CONFIG] emc_wait_auto_cal_onetime: ldr r1, [r0, #EMC_AUTO_CAL_STATUS] tst r1, #(1 << 31) @ wait until AUTO_CAL_ACTIVE is cleared bne emc_wait_auto_cal_onetime ldr r1, [r0, #EMC_CFG] bic r1, r1, #(1 << 31) @ disable DRAM_CLK_STOP_PD str r1, [r0, #EMC_CFG] mov r1, #0 str r1, [r0, #EMC_SELF_REF] @ take DRAM out of self refresh mov r1, #1 cmp r10, #TEGRA30 streq r1, [r0, #EMC_NOP] streq r1, [r0, #EMC_NOP] emc_device_mask r1, r0 exit_selfrefresh_loop: ldr r2, [r0, #EMC_EMC_STATUS] ands r2, r2, r1 bne exit_selfrefresh_loop lsr r1, r1, #8 @ devSel, bit0:dev0, bit1:dev1 mov32 r7, TEGRA_TMRUS_BASE ldr r2, [r0, #EMC_FBIO_CFG5] and r2, r2, #3 @ check DRAM_TYPE cmp r2, #2 beq emc_lpddr2 /* Issue a ZQ_CAL for dev0 - DDR3 */ mov32 r2, 0x80000011 @ DEV_SELECTION=2, LENGTH=LONG, CMD=1 str r2, [r0, #EMC_ZQ_CAL] ldr r2, [r7] add r2, r2, #10 wait_until r2, r7, r3 tst r1, #2 beq zcal_done /* Issue a ZQ_CAL for dev1 - DDR3 */ mov32 r2, 0x40000011 @ DEV_SELECTION=1, LENGTH=LONG, CMD=1 str r2, [r0, #EMC_ZQ_CAL] ldr r2, [r7] add r2, r2, #10 wait_until r2, r7, r3 b zcal_done emc_lpddr2: /* Issue a ZQ_CAL for dev0 - LPDDR2 */ mov32 r2, 0x800A00AB @ DEV_SELECTION=2, MA=10, OP=0xAB str r2, [r0, #EMC_MRW] ldr r2, [r7] add r2, r2, #1 wait_until r2, r7, r3 tst r1, #2 beq zcal_done /* Issue a ZQ_CAL for dev0 - LPDDR2 */ mov32 r2, 0x400A00AB @ DEV_SELECTION=1, MA=10, OP=0xAB str r2, [r0, #EMC_MRW] ldr r2, [r7] add r2, r2, #1 wait_until r2, r7, r3 zcal_done: mov r1, #0 @ unstall all transactions str r1, [r0, #EMC_REQ_CTRL] ldr r1, [r5, #0x4] @ restore EMC_ZCAL_INTERVAL str r1, [r0, #EMC_ZCAL_INTERVAL] ldr r1, [r5, #0x0] @ restore EMC_CFG str r1, [r0, #EMC_CFG] emc_timing_update r1, r0 /* Tegra114 had dual EMC channel, now config the other one */ cmp r10, #TEGRA114 bne __no_dual_emc_chanl mov32 r1, TEGRA_EMC1_BASE cmp r0, r1 movne r0, r1 addne r5, r5, #0x20 bne exit_self_refresh __no_dual_emc_chanl: mov32 r0, TEGRA_PMC_BASE ldr r0, [r0, #PMC_SCRATCH41] ret r0 @ jump to tegra_resume ENDPROC(tegra30_lp1_reset) .align L1_CACHE_SHIFT tegra30_sdram_pad_address: .word TEGRA_EMC_BASE + EMC_CFG @0x0 .word TEGRA_EMC_BASE + EMC_ZCAL_INTERVAL @0x4 .word TEGRA_EMC_BASE + EMC_AUTO_CAL_INTERVAL @0x8 .word TEGRA_EMC_BASE + EMC_XM2VTTGENPADCTRL @0xc .word TEGRA_EMC_BASE + EMC_XM2VTTGENPADCTRL2 @0x10 .word TEGRA_PMC_BASE + PMC_IO_DPD_STATUS @0x14 .word TEGRA_CLK_RESET_BASE + CLK_RESET_CLK_SOURCE_MSELECT @0x18 .word TEGRA_CLK_RESET_BASE + CLK_RESET_SCLK_BURST @0x1c tegra30_sdram_pad_address_end: tegra114_sdram_pad_address: .word TEGRA_EMC0_BASE + EMC_CFG @0x0 .word TEGRA_EMC0_BASE + EMC_ZCAL_INTERVAL @0x4 .word TEGRA_EMC0_BASE + EMC_AUTO_CAL_INTERVAL @0x8 .word TEGRA_EMC0_BASE + EMC_XM2VTTGENPADCTRL @0xc .word TEGRA_EMC0_BASE + EMC_XM2VTTGENPADCTRL2 @0x10 .word TEGRA_PMC_BASE + PMC_IO_DPD_STATUS @0x14 .word TEGRA_CLK_RESET_BASE + CLK_RESET_CLK_SOURCE_MSELECT @0x18 .word TEGRA_CLK_RESET_BASE + CLK_RESET_SCLK_BURST @0x1c .word TEGRA_EMC1_BASE + EMC_CFG @0x20 .word TEGRA_EMC1_BASE + EMC_ZCAL_INTERVAL @0x24 .word TEGRA_EMC1_BASE + EMC_AUTO_CAL_INTERVAL @0x28 .word TEGRA_EMC1_BASE + EMC_XM2VTTGENPADCTRL @0x2c .word TEGRA_EMC1_BASE + EMC_XM2VTTGENPADCTRL2 @0x30 tegra114_sdram_pad_adress_end: tegra124_sdram_pad_address: .word TEGRA124_EMC_BASE + EMC_CFG @0x0 .word TEGRA124_EMC_BASE + EMC_ZCAL_INTERVAL @0x4 .word TEGRA124_EMC_BASE + EMC_AUTO_CAL_INTERVAL @0x8 .word TEGRA124_EMC_BASE + EMC_XM2VTTGENPADCTRL @0xc .word TEGRA124_EMC_BASE + EMC_XM2VTTGENPADCTRL2 @0x10 .word TEGRA_PMC_BASE + PMC_IO_DPD_STATUS @0x14 .word TEGRA_CLK_RESET_BASE + CLK_RESET_CLK_SOURCE_MSELECT @0x18 .word TEGRA_CLK_RESET_BASE + CLK_RESET_SCLK_BURST @0x1c tegra124_sdram_pad_address_end: tegra30_sdram_pad_size: .word tegra30_sdram_pad_address_end - tegra30_sdram_pad_address tegra114_sdram_pad_size: .word tegra114_sdram_pad_adress_end - tegra114_sdram_pad_address .type tegra_sdram_pad_save, %object tegra_sdram_pad_save: .rept (tegra114_sdram_pad_adress_end - tegra114_sdram_pad_address) / 4 .long 0 .endr tegra_pll_state: .word 0x0 /* * tegra30_tear_down_core * * copied into and executed from IRAM * puts memory in self-refresh for LP0 and LP1 */ tegra30_tear_down_core: bl tegra30_sdram_self_refresh bl tegra30_switch_cpu_to_clk32k b tegra30_enter_sleep /* * tegra30_switch_cpu_to_clk32k * * In LP0 and LP1 all PLLs will be turned off. Switching the CPU and System CLK * to the 32KHz clock. * r4 = TEGRA_PMC_BASE * r5 = TEGRA_CLK_RESET_BASE * r6 = TEGRA_FLOW_CTRL_BASE * r7 = TEGRA_TMRUS_BASE * r10= SoC ID */ tegra30_switch_cpu_to_clk32k: /* * start by jumping to CLKM to safely disable PLLs, then jump to * CLKS. */ mov r0, #(1 << 28) str r0, [r5, #CLK_RESET_SCLK_BURST] /* 2uS delay delay between changing SCLK and CCLK */ ldr r1, [r7] add r1, r1, #2 wait_until r1, r7, r9 str r0, [r5, #CLK_RESET_CCLK_BURST] mov r0, #0 str r0, [r5, #CLK_RESET_CCLK_DIVIDER] str r0, [r5, #CLK_RESET_SCLK_DIVIDER] /* switch the clock source of mselect to be CLK_M */ ldr r0, [r5, #CLK_RESET_CLK_SOURCE_MSELECT] orr r0, r0, #MSELECT_CLKM str r0, [r5, #CLK_RESET_CLK_SOURCE_MSELECT] /* 2uS delay delay between changing SCLK and disabling PLLs */ ldr r1, [r7] add r1, r1, #2 wait_until r1, r7, r9 /* store enable-state of PLLs */ store_pll_state r0, r1, r5, CLK_RESET_PLLA_BASE, PLLA_STORE_MASK store_pll_state r0, r1, r5, CLK_RESET_PLLC_BASE, PLLC_STORE_MASK store_pll_state r0, r1, r5, CLK_RESET_PLLM_BASE, PLLM_STORE_MASK store_pll_state r0, r1, r5, CLK_RESET_PLLP_BASE, PLLP_STORE_MASK store_pll_state r0, r1, r5, CLK_RESET_PLLX_BASE, PLLX_STORE_MASK store_pllm_pmc_state r0, r1, r4 /* disable PLLM via PMC in LP1 */ ldr r0, [r4, #PMC_PLLP_WB0_OVERRIDE] bic r0, r0, #(1 << 12) str r0, [r4, #PMC_PLLP_WB0_OVERRIDE] /* disable PLLP, PLLA, PLLC and PLLX */ tegra_get_soc_id TEGRA_APB_MISC_BASE, r1 cmp r1, #TEGRA30 ldr r0, [r5, #CLK_RESET_PLLP_BASE] orrne r0, r0, #(1 << 31) @ enable PllP bypass on fast cluster bic r0, r0, #(1 << 30) str r0, [r5, #CLK_RESET_PLLP_BASE] beq 1f mov r0, #CLK_RESET_PLLP_RESHIFT_ENABLE str r0, [r5, #CLK_RESET_PLLP_RESHIFT] 1: ldr r0, [r5, #CLK_RESET_PLLA_BASE] bic r0, r0, #(1 << 30) str r0, [r5, #CLK_RESET_PLLA_BASE] ldr r0, [r5, #CLK_RESET_PLLC_BASE] bic r0, r0, #(1 << 30) str r0, [r5, #CLK_RESET_PLLC_BASE] ldr r0, [r5, #CLK_RESET_PLLX_BASE] bic r0, r0, #(1 << 30) str r0, [r5, #CLK_RESET_PLLX_BASE] cmp r10, #TEGRA30 beq _no_pll_in_iddq pll_iddq_entry r1, r5, CLK_RESET_PLLX_MISC3, CLK_RESET_PLLX_MISC3_IDDQ _no_pll_in_iddq: /* * Switch to clk_s (32KHz); bits 28:31=0 * Enable burst on CPU IRQ; bit 24=1 * Set IRQ burst clock source to clk_m; bits 10:8=0 */ mov r0, #(1 << 24) str r0, [r5, #CLK_RESET_SCLK_BURST] ret lr /* * tegra30_enter_sleep * * uses flow controller to enter sleep state * executes from IRAM with SDRAM in selfrefresh when target state is LP0 or LP1 * executes from SDRAM with target state is LP2 * r6 = TEGRA_FLOW_CTRL_BASE */ tegra30_enter_sleep: cpu_id r1 cpu_to_csr_reg r2, r1 ldr r0, [r6, r2] orr r0, r0, #FLOW_CTRL_CSR_INTR_FLAG | FLOW_CTRL_CSR_EVENT_FLAG orr r0, r0, #FLOW_CTRL_CSR_ENABLE str r0, [r6, r2] tegra_get_soc_id TEGRA_APB_MISC_BASE, r10 cmp r10, #TEGRA30 mov r0, #FLOW_CTRL_WAIT_FOR_INTERRUPT orreq r0, r0, #FLOW_CTRL_HALT_CPU_IRQ | FLOW_CTRL_HALT_CPU_FIQ orrne r0, r0, #FLOW_CTRL_HALT_LIC_IRQ | FLOW_CTRL_HALT_LIC_FIQ cpu_to_halt_reg r2, r1 str r0, [r6, r2] dsb ldr r0, [r6, r2] /* memory barrier */ cmp r10, #TEGRA30 halted: isb dsb wfine /* CPU should be power gated here */ wfeeq /* !!!FIXME!!! Implement halt failure handler */ b halted /* * tegra30_sdram_self_refresh * * called with MMU off and caches disabled * must be executed from IRAM * r4 = TEGRA_PMC_BASE * r5 = TEGRA_CLK_RESET_BASE * r6 = TEGRA_FLOW_CTRL_BASE * r7 = TEGRA_TMRUS_BASE * r10= SoC ID */ tegra30_sdram_self_refresh: adr r8, tegra_sdram_pad_save tegra_get_soc_id TEGRA_APB_MISC_BASE, r10 cmp r10, #TEGRA30 adreq r2, tegra30_sdram_pad_address ldreq r3, tegra30_sdram_pad_size cmp r10, #TEGRA114 adreq r2, tegra114_sdram_pad_address ldreq r3, tegra114_sdram_pad_size cmp r10, #TEGRA124 adreq r2, tegra124_sdram_pad_address ldreq r3, tegra30_sdram_pad_size mov r9, #0 padsave: ldr r0, [r2, r9] @ r0 is the addr in the pad_address ldr r1, [r0] str r1, [r8, r9] @ save the content of the addr add r9, r9, #4 cmp r3, r9 bne padsave padsave_done: dsb cmp r10, #TEGRA30 ldreq r0, =TEGRA_EMC_BASE @ r0 reserved for emc base addr cmp r10, #TEGRA114 ldreq r0, =TEGRA_EMC0_BASE cmp r10, #TEGRA124 ldreq r0, =TEGRA124_EMC_BASE enter_self_refresh: cmp r10, #TEGRA30 mov r1, #0 str r1, [r0, #EMC_ZCAL_INTERVAL] str r1, [r0, #EMC_AUTO_CAL_INTERVAL] ldr r1, [r0, #EMC_CFG] bic r1, r1, #(1 << 28) bicne r1, r1, #(1 << 29) str r1, [r0, #EMC_CFG] @ disable DYN_SELF_REF emc_timing_update r1, r0 ldr r1, [r7] add r1, r1, #5 wait_until r1, r7, r2 emc_wait_auto_cal: ldr r1, [r0, #EMC_AUTO_CAL_STATUS] tst r1, #(1 << 31) @ wait until AUTO_CAL_ACTIVE is cleared bne emc_wait_auto_cal mov r1, #3 str r1, [r0, #EMC_REQ_CTRL] @ stall incoming DRAM requests emcidle: ldr r1, [r0, #EMC_EMC_STATUS] tst r1, #4 beq emcidle mov r1, #1 str r1, [r0, #EMC_SELF_REF] emc_device_mask r1, r0 emcself: ldr r2, [r0, #EMC_EMC_STATUS] and r2, r2, r1 cmp r2, r1 bne emcself @ loop until DDR in self-refresh /* Put VTTGEN in the lowest power mode */ ldr r1, [r0, #EMC_XM2VTTGENPADCTRL] mov32 r2, 0xF8F8FFFF @ clear XM2VTTGEN_DRVUP and XM2VTTGEN_DRVDN and r1, r1, r2 str r1, [r0, #EMC_XM2VTTGENPADCTRL] ldr r1, [r0, #EMC_XM2VTTGENPADCTRL2] cmp r10, #TEGRA30 orreq r1, r1, #7 @ set E_NO_VTTGEN orrne r1, r1, #0x3f str r1, [r0, #EMC_XM2VTTGENPADCTRL2] emc_timing_update r1, r0 /* Tegra114 had dual EMC channel, now config the other one */ cmp r10, #TEGRA114 bne no_dual_emc_chanl mov32 r1, TEGRA_EMC1_BASE cmp r0, r1 movne r0, r1 bne enter_self_refresh no_dual_emc_chanl: ldr r1, [r4, #PMC_CTRL] tst r1, #PMC_CTRL_SIDE_EFFECT_LP0 bne pmc_io_dpd_skip /* * Put DDR_DATA, DISC_ADDR_CMD, DDR_ADDR_CMD, POP_ADDR_CMD, POP_CLK * and COMP in the lowest power mode when LP1. */ mov32 r1, 0x8EC00000 str r1, [r4, #PMC_IO_DPD_REQ] pmc_io_dpd_skip: dsb ret lr .ltorg /* dummy symbol for end of IRAM */ .align L1_CACHE_SHIFT .global tegra30_iram_end tegra30_iram_end: b . #endif
aixcc-public/challenge-001-exemplar-source
10,268
arch/arm/mach-tegra/sleep-tegra20.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2010-2012, NVIDIA Corporation. All rights reserved. * Copyright (c) 2011, Google, Inc. * * Author: Colin Cross <ccross@android.com> * Gary King <gking@nvidia.com> */ #include <linux/linkage.h> #include <soc/tegra/flowctrl.h> #include <asm/assembler.h> #include <asm/proc-fns.h> #include <asm/cp15.h> #include <asm/cache.h> #include "irammap.h" #include "reset.h" #include "sleep.h" #define EMC_CFG 0xc #define EMC_ADR_CFG 0x10 #define EMC_NOP 0xdc #define EMC_SELF_REF 0xe0 #define EMC_REQ_CTRL 0x2b0 #define EMC_EMC_STATUS 0x2b4 #define CLK_RESET_CCLK_BURST 0x20 #define CLK_RESET_CCLK_DIVIDER 0x24 #define CLK_RESET_SCLK_BURST 0x28 #define CLK_RESET_SCLK_DIVIDER 0x2c #define CLK_RESET_PLLC_BASE 0x80 #define CLK_RESET_PLLM_BASE 0x90 #define CLK_RESET_PLLP_BASE 0xa0 #define APB_MISC_XM2CFGCPADCTRL 0x8c8 #define APB_MISC_XM2CFGDPADCTRL 0x8cc #define APB_MISC_XM2CLKCFGPADCTRL 0x8d0 #define APB_MISC_XM2COMPPADCTRL 0x8d4 #define APB_MISC_XM2VTTGENPADCTRL 0x8d8 #define APB_MISC_XM2CFGCPADCTRL2 0x8e4 #define APB_MISC_XM2CFGDPADCTRL2 0x8e8 #define PLLC_STORE_MASK (1 << 0) #define PLLM_STORE_MASK (1 << 1) #define PLLP_STORE_MASK (1 << 2) .macro test_pll_state, rd, test_mask ldr \rd, tegra_pll_state tst \rd, #\test_mask .endm .macro store_pll_state, rd, tmp, r_car_base, pll_base, pll_mask ldr \rd, [\r_car_base, #\pll_base] tst \rd, #(1 << 30) ldr \rd, tegra_pll_state biceq \rd, \rd, #\pll_mask orrne \rd, \rd, #\pll_mask adr \tmp, tegra_pll_state str \rd, [\tmp] .endm .macro pll_enable, rd, r_car_base, pll_base, test_mask test_pll_state \rd, \test_mask beq 1f ldr \rd, [\r_car_base, #\pll_base] tst \rd, #(1 << 30) orreq \rd, \rd, #(1 << 30) streq \rd, [\r_car_base, #\pll_base] 1: .endm .macro emc_device_mask, rd, base ldr \rd, [\base, #EMC_ADR_CFG] tst \rd, #(0x3 << 24) moveq \rd, #(0x1 << 8) @ just 1 device movne \rd, #(0x3 << 8) @ 2 devices .endm #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PM_SLEEP) /* * tegra20_hotplug_shutdown(void) * * puts the current cpu in reset * should never return */ ENTRY(tegra20_hotplug_shutdown) /* Put this CPU down */ cpu_id r0 bl tegra20_cpu_shutdown ret lr @ should never get here ENDPROC(tegra20_hotplug_shutdown) /* * tegra20_cpu_shutdown(int cpu) * * r0 is cpu to reset * * puts the specified CPU in wait-for-event mode on the flow controller * and puts the CPU in reset * can be called on the current cpu or another cpu * if called on the current cpu, does not return * MUST NOT BE CALLED FOR CPU 0. * * corrupts r0-r3, r12 */ ENTRY(tegra20_cpu_shutdown) cmp r0, #0 reteq lr @ must not be called for CPU 0 cpu_to_halt_reg r1, r0 ldr r3, =TEGRA_FLOW_CTRL_VIRT mov r2, #FLOW_CTRL_WAITEVENT | FLOW_CTRL_JTAG_RESUME str r2, [r3, r1] @ put flow controller in wait event mode ldr r2, [r3, r1] isb dsb movw r1, 0x1011 mov r1, r1, lsl r0 ldr r3, =TEGRA_CLK_RESET_VIRT str r1, [r3, #0x340] @ put slave CPU in reset isb dsb cpu_id r3 cmp r3, r0 beq . ret lr ENDPROC(tegra20_cpu_shutdown) #endif #ifdef CONFIG_PM_SLEEP /* * tegra20_sleep_core_finish(unsigned long v2p) * * Enters suspend in LP0 or LP1 by turning off the mmu and jumping to * tegra20_tear_down_core in IRAM */ ENTRY(tegra20_sleep_core_finish) mov r4, r0 /* Flush, disable the L1 data cache and exit SMP */ mov r0, #TEGRA_FLUSH_CACHE_ALL bl tegra_disable_clean_inv_dcache mov r0, r4 mov32 r3, tegra_shut_off_mmu add r3, r3, r0 mov32 r0, tegra20_tear_down_core mov32 r1, tegra20_iram_start sub r0, r0, r1 mov32 r1, TEGRA_IRAM_LPx_RESUME_AREA add r0, r0, r1 ret r3 ENDPROC(tegra20_sleep_core_finish) /* * tegra20_tear_down_cpu * * Switches the CPU cluster to PLL-P and enters sleep. */ ENTRY(tegra20_tear_down_cpu) bl tegra_switch_cpu_to_pllp b tegra20_enter_sleep ENDPROC(tegra20_tear_down_cpu) /* START OF ROUTINES COPIED TO IRAM */ .align L1_CACHE_SHIFT .globl tegra20_iram_start tegra20_iram_start: /* * tegra20_lp1_reset * * reset vector for LP1 restore; copied into IRAM during suspend. * Brings the system back up to a safe staring point (SDRAM out of * self-refresh, PLLC, PLLM and PLLP reenabled, CPU running on PLLP, * system clock running on the same PLL that it suspended at), and * jumps to tegra_resume to restore virtual addressing and PLLX. * The physical address of tegra_resume expected to be stored in * PMC_SCRATCH41. * * NOTE: THIS *MUST* BE RELOCATED TO TEGRA_IRAM_LPx_RESUME_AREA. */ ENTRY(tegra20_lp1_reset) /* * The CPU and system bus are running at 32KHz and executing from * IRAM when this code is executed; immediately switch to CLKM and * enable PLLM, PLLP, PLLC. */ mov32 r0, TEGRA_CLK_RESET_BASE mov r1, #(1 << 28) str r1, [r0, #CLK_RESET_SCLK_BURST] str r1, [r0, #CLK_RESET_CCLK_BURST] mov r1, #0 str r1, [r0, #CLK_RESET_CCLK_DIVIDER] str r1, [r0, #CLK_RESET_SCLK_DIVIDER] pll_enable r1, r0, CLK_RESET_PLLM_BASE, PLLM_STORE_MASK pll_enable r1, r0, CLK_RESET_PLLP_BASE, PLLP_STORE_MASK pll_enable r1, r0, CLK_RESET_PLLC_BASE, PLLC_STORE_MASK adr r2, tegra20_sdram_pad_address adr r4, tegra20_sdram_pad_save mov r5, #0 ldr r6, tegra20_sdram_pad_size padload: ldr r7, [r2, r5] @ r7 is the addr in the pad_address ldr r1, [r4, r5] str r1, [r7] @ restore the value in pad_save add r5, r5, #4 cmp r6, r5 bne padload padload_done: /* 255uS delay for PLL stabilization */ mov32 r7, TEGRA_TMRUS_BASE ldr r1, [r7] add r1, r1, #0xff wait_until r1, r7, r9 adr r4, tegra20_sclk_save ldr r4, [r4] str r4, [r0, #CLK_RESET_SCLK_BURST] mov32 r4, ((1 << 28) | (4)) @ burst policy is PLLP str r4, [r0, #CLK_RESET_CCLK_BURST] mov32 r0, TEGRA_EMC_BASE ldr r1, [r0, #EMC_CFG] bic r1, r1, #(1 << 31) @ disable DRAM_CLK_STOP str r1, [r0, #EMC_CFG] mov r1, #0 str r1, [r0, #EMC_SELF_REF] @ take DRAM out of self refresh mov r1, #1 str r1, [r0, #EMC_NOP] str r1, [r0, #EMC_NOP] emc_device_mask r1, r0 exit_selfrefresh_loop: ldr r2, [r0, #EMC_EMC_STATUS] ands r2, r2, r1 bne exit_selfrefresh_loop mov r1, #0 @ unstall all transactions str r1, [r0, #EMC_REQ_CTRL] mov32 r0, TEGRA_PMC_BASE ldr r0, [r0, #PMC_SCRATCH41] ret r0 @ jump to tegra_resume ENDPROC(tegra20_lp1_reset) /* * tegra20_tear_down_core * * copied into and executed from IRAM * puts memory in self-refresh for LP0 and LP1 */ tegra20_tear_down_core: bl tegra20_sdram_self_refresh bl tegra20_switch_cpu_to_clk32k b tegra20_enter_sleep /* * tegra20_switch_cpu_to_clk32k * * In LP0 and LP1 all PLLs will be turned off. Switch the CPU and system clock * to the 32KHz clock. */ tegra20_switch_cpu_to_clk32k: /* * start by switching to CLKM to safely disable PLLs, then switch to * CLKS. */ mov r0, #(1 << 28) str r0, [r5, #CLK_RESET_SCLK_BURST] str r0, [r5, #CLK_RESET_CCLK_BURST] mov r0, #0 str r0, [r5, #CLK_RESET_CCLK_DIVIDER] str r0, [r5, #CLK_RESET_SCLK_DIVIDER] /* 2uS delay delay between changing SCLK and disabling PLLs */ mov32 r7, TEGRA_TMRUS_BASE ldr r1, [r7] add r1, r1, #2 wait_until r1, r7, r9 store_pll_state r0, r1, r5, CLK_RESET_PLLC_BASE, PLLC_STORE_MASK store_pll_state r0, r1, r5, CLK_RESET_PLLM_BASE, PLLM_STORE_MASK store_pll_state r0, r1, r5, CLK_RESET_PLLP_BASE, PLLP_STORE_MASK /* disable PLLM, PLLP and PLLC */ ldr r0, [r5, #CLK_RESET_PLLM_BASE] bic r0, r0, #(1 << 30) str r0, [r5, #CLK_RESET_PLLM_BASE] ldr r0, [r5, #CLK_RESET_PLLP_BASE] bic r0, r0, #(1 << 30) str r0, [r5, #CLK_RESET_PLLP_BASE] ldr r0, [r5, #CLK_RESET_PLLC_BASE] bic r0, r0, #(1 << 30) str r0, [r5, #CLK_RESET_PLLC_BASE] /* switch to CLKS */ mov r0, #0 /* brust policy = 32KHz */ str r0, [r5, #CLK_RESET_SCLK_BURST] ret lr /* * tegra20_enter_sleep * * uses flow controller to enter sleep state * executes from IRAM with SDRAM in selfrefresh when target state is LP0 or LP1 * executes from SDRAM with target state is LP2 */ tegra20_enter_sleep: mov32 r6, TEGRA_FLOW_CTRL_BASE mov r0, #FLOW_CTRL_WAIT_FOR_INTERRUPT orr r0, r0, #FLOW_CTRL_HALT_CPU_IRQ | FLOW_CTRL_HALT_CPU_FIQ cpu_id r1 cpu_to_halt_reg r1, r1 str r0, [r6, r1] dsb ldr r0, [r6, r1] /* memory barrier */ halted: dsb wfe /* CPU should be power gated here */ isb b halted /* * tegra20_sdram_self_refresh * * called with MMU off and caches disabled * puts sdram in self refresh * must be executed from IRAM */ tegra20_sdram_self_refresh: mov32 r1, TEGRA_EMC_BASE @ r1 reserved for emc base addr mov r2, #3 str r2, [r1, #EMC_REQ_CTRL] @ stall incoming DRAM requests emcidle: ldr r2, [r1, #EMC_EMC_STATUS] tst r2, #4 beq emcidle mov r2, #1 str r2, [r1, #EMC_SELF_REF] emc_device_mask r2, r1 emcself: ldr r3, [r1, #EMC_EMC_STATUS] and r3, r3, r2 cmp r3, r2 bne emcself @ loop until DDR in self-refresh adr r2, tegra20_sdram_pad_address adr r3, tegra20_sdram_pad_safe adr r4, tegra20_sdram_pad_save mov r5, #0 ldr r6, tegra20_sdram_pad_size padsave: ldr r0, [r2, r5] @ r0 is the addr in the pad_address ldr r1, [r0] str r1, [r4, r5] @ save the content of the addr ldr r1, [r3, r5] str r1, [r0] @ set the save val to the addr add r5, r5, #4 cmp r6, r5 bne padsave padsave_done: mov32 r5, TEGRA_CLK_RESET_BASE ldr r0, [r5, #CLK_RESET_SCLK_BURST] adr r2, tegra20_sclk_save str r0, [r2] dsb ret lr tegra20_sdram_pad_address: .word TEGRA_APB_MISC_BASE + APB_MISC_XM2CFGCPADCTRL .word TEGRA_APB_MISC_BASE + APB_MISC_XM2CFGDPADCTRL .word TEGRA_APB_MISC_BASE + APB_MISC_XM2CLKCFGPADCTRL .word TEGRA_APB_MISC_BASE + APB_MISC_XM2COMPPADCTRL .word TEGRA_APB_MISC_BASE + APB_MISC_XM2VTTGENPADCTRL .word TEGRA_APB_MISC_BASE + APB_MISC_XM2CFGCPADCTRL2 .word TEGRA_APB_MISC_BASE + APB_MISC_XM2CFGDPADCTRL2 tegra20_sdram_pad_size: .word tegra20_sdram_pad_size - tegra20_sdram_pad_address tegra20_sdram_pad_safe: .word 0x8 .word 0x8 .word 0x0 .word 0x8 .word 0x5500 .word 0x08080040 .word 0x0 tegra20_sclk_save: .word 0x0 tegra20_sdram_pad_save: .rept (tegra20_sdram_pad_size - tegra20_sdram_pad_address) / 4 .long 0 .endr tegra_pll_state: .word 0x0 .ltorg /* dummy symbol for end of IRAM */ .align L1_CACHE_SHIFT .globl tegra20_iram_end tegra20_iram_end: b . #endif
aixcc-public/challenge-001-exemplar-source
3,266
arch/arm/mach-tegra/sleep.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * arch/arm/mach-tegra/sleep.S * * Copyright (c) 2010-2011, NVIDIA Corporation. * Copyright (c) 2011, Google, Inc. * * Author: Colin Cross <ccross@android.com> * Gary King <gking@nvidia.com> */ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/cache.h> #include <asm/cp15.h> #include <asm/hardware/cache-l2x0.h> #include "iomap.h" #include "sleep.h" #define CLK_RESET_CCLK_BURST 0x20 #define CLK_RESET_CCLK_DIVIDER 0x24 #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PM_SLEEP) /* * tegra_disable_clean_inv_dcache * * disable, clean & invalidate the D-cache * * Corrupted registers: r1-r3, r6, r8, r9-r11 */ ENTRY(tegra_disable_clean_inv_dcache) stmfd sp!, {r0, r4-r5, r7, r9-r11, lr} dmb @ ensure ordering /* Disable the D-cache */ mrc p15, 0, r2, c1, c0, 0 tst r2, #CR_C @ see tegra_sleep_cpu() bic r2, r2, #CR_C mcrne p15, 0, r2, c1, c0, 0 isb /* Flush the D-cache */ cmp r0, #TEGRA_FLUSH_CACHE_ALL blne v7_flush_dcache_louis bleq v7_flush_dcache_all /* Trun off coherency */ exit_smp r4, r5 ldmfd sp!, {r0, r4-r5, r7, r9-r11, pc} ENDPROC(tegra_disable_clean_inv_dcache) #endif #ifdef CONFIG_PM_SLEEP /* * tegra_init_l2_for_a15 * * set up the correct L2 cache data RAM latency */ ENTRY(tegra_init_l2_for_a15) mrc p15, 0, r0, c0, c0, 5 ubfx r0, r0, #8, #4 tst r0, #1 @ only need for cluster 0 bne _exit_init_l2_a15 mrc p15, 0x1, r0, c9, c0, 2 and r0, r0, #7 cmp r0, #2 bicne r0, r0, #7 orrne r0, r0, #2 mcrne p15, 0x1, r0, c9, c0, 2 _exit_init_l2_a15: ret lr ENDPROC(tegra_init_l2_for_a15) /* * tegra_sleep_cpu_finish(unsigned long v2p) * * enters suspend in LP2 by turning off the mmu and jumping to * tegra?_tear_down_cpu */ ENTRY(tegra_sleep_cpu_finish) mov r4, r0 /* Flush and disable the L1 data cache */ mov r0, #TEGRA_FLUSH_CACHE_ALL bl tegra_disable_clean_inv_dcache mov r0, r4 mov32 r6, tegra_tear_down_cpu ldr r1, [r6] add r1, r1, r0 mov32 r3, tegra_shut_off_mmu add r3, r3, r0 mov r0, r1 ret r3 ENDPROC(tegra_sleep_cpu_finish) /* * tegra_shut_off_mmu * * r0 = physical address to jump to with mmu off * * called with VA=PA mapping * turns off MMU, icache, dcache and branch prediction */ .align L1_CACHE_SHIFT .pushsection .idmap.text, "ax" ENTRY(tegra_shut_off_mmu) mrc p15, 0, r3, c1, c0, 0 movw r2, #CR_I | CR_Z | CR_C | CR_M bic r3, r3, r2 dsb mcr p15, 0, r3, c1, c0, 0 isb #ifdef CONFIG_CACHE_L2X0 /* Disable L2 cache */ check_cpu_part_num 0xc09, r9, r10 retne r0 mov32 r2, TEGRA_ARM_PERIF_BASE + 0x3000 ldr r3, [r2, #L2X0_CTRL] tst r3, #L2X0_CTRL_EN @ see tegra_sleep_cpu() mov r3, #0 strne r3, [r2, #L2X0_CTRL] #endif ret r0 ENDPROC(tegra_shut_off_mmu) .popsection /* * tegra_switch_cpu_to_pllp * * In LP2 the normal cpu clock pllx will be turned off. Switch the CPU to pllp */ ENTRY(tegra_switch_cpu_to_pllp) /* in LP2 idle (SDRAM active), set the CPU burst policy to PLLP */ mov32 r5, TEGRA_CLK_RESET_BASE mov r0, #(2 << 28) @ burst policy = run mode orr r0, r0, #(4 << 4) @ use PLLP in run mode burst str r0, [r5, #CLK_RESET_CCLK_BURST] mov r0, #0 str r0, [r5, #CLK_RESET_CCLK_DIVIDER] ret lr ENDPROC(tegra_switch_cpu_to_pllp) #endif
aixcc-public/challenge-001-exemplar-source
1,385
arch/arm/mach-rockchip/sleep.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd * Author: Tony Xie <tony.xie@rock-chips.com> */ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/memory.h> .data /* * this code will be copied from * ddr to sram for system resumeing. * so it is ".data section". */ .align 2 ENTRY(rockchip_slp_cpu_resume) setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set svc, irqs off mrc p15, 0, r1, c0, c0, 5 and r1, r1, #0xf cmp r1, #0 /* olny cpu0 can continue to run, the others is halt here */ beq cpu0run secondary_loop: wfe b secondary_loop cpu0run: ldr r3, rkpm_bootdata_l2ctlr_f cmp r3, #0 beq sp_set ldr r3, rkpm_bootdata_l2ctlr mcr p15, 1, r3, c9, c0, 2 sp_set: ldr sp, rkpm_bootdata_cpusp ldr r1, rkpm_bootdata_cpu_code bx r1 ENDPROC(rockchip_slp_cpu_resume) /* Parameters filled in by the kernel */ /* Flag for whether to restore L2CTLR on resume */ .global rkpm_bootdata_l2ctlr_f rkpm_bootdata_l2ctlr_f: .long 0 /* Saved L2CTLR to restore on resume */ .global rkpm_bootdata_l2ctlr rkpm_bootdata_l2ctlr: .long 0 /* CPU resume SP addr */ .globl rkpm_bootdata_cpusp rkpm_bootdata_cpusp: .long 0 /* CPU resume function (physical address) */ .globl rkpm_bootdata_cpu_code rkpm_bootdata_cpu_code: .long 0 ENTRY(rk3288_bootram_sz) .word . - rockchip_slp_cpu_resume
aixcc-public/challenge-001-exemplar-source
39,017
arch/arm/boot/compressed/head.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/boot/compressed/head.S * * Copyright (C) 1996-2002 Russell King * Copyright (C) 2004 Hyok S. Choi (MPU support) */ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/v7m.h> #include "efi-header.S" #ifdef __ARMEB__ #define OF_DT_MAGIC 0xd00dfeed #else #define OF_DT_MAGIC 0xedfe0dd0 #endif AR_CLASS( .arch armv7-a ) M_CLASS( .arch armv7-m ) /* * Debugging stuff * * Note that these macros must not contain any code which is not * 100% relocatable. Any attempt to do so will result in a crash. * Please select one of the following when turning on debugging. */ #ifdef DEBUG #if defined(CONFIG_DEBUG_ICEDCC) #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7) .macro loadsp, rb, tmp1, tmp2 .endm .macro writeb, ch, rb, tmp mcr p14, 0, \ch, c0, c5, 0 .endm #elif defined(CONFIG_CPU_XSCALE) .macro loadsp, rb, tmp1, tmp2 .endm .macro writeb, ch, rb, tmp mcr p14, 0, \ch, c8, c0, 0 .endm #else .macro loadsp, rb, tmp1, tmp2 .endm .macro writeb, ch, rb, tmp mcr p14, 0, \ch, c1, c0, 0 .endm #endif #else #include CONFIG_DEBUG_LL_INCLUDE .macro writeb, ch, rb, tmp #ifdef CONFIG_DEBUG_UART_FLOW_CONTROL waituartcts \tmp, \rb #endif waituarttxrdy \tmp, \rb senduart \ch, \rb busyuart \tmp, \rb .endm #if defined(CONFIG_ARCH_SA1100) .macro loadsp, rb, tmp1, tmp2 mov \rb, #0x80000000 @ physical base address add \rb, \rb, #0x00010000 @ Ser1 .endm #else .macro loadsp, rb, tmp1, tmp2 addruart \rb, \tmp1, \tmp2 .endm #endif #endif #endif .macro kputc,val mov r0, \val bl putc .endm .macro kphex,val,len mov r0, \val mov r1, #\len bl phex .endm /* * Debug kernel copy by printing the memory addresses involved */ .macro dbgkc, begin, end, cbegin, cend #ifdef DEBUG kputc #'C' kputc #':' kputc #'0' kputc #'x' kphex \begin, 8 /* Start of compressed kernel */ kputc #'-' kputc #'0' kputc #'x' kphex \end, 8 /* End of compressed kernel */ kputc #'-' kputc #'>' kputc #'0' kputc #'x' kphex \cbegin, 8 /* Start of kernel copy */ kputc #'-' kputc #'0' kputc #'x' kphex \cend, 8 /* End of kernel copy */ kputc #'\n' #endif .endm /* * Debug print of the final appended DTB location */ .macro dbgadtb, begin, size #ifdef DEBUG kputc #'D' kputc #'T' kputc #'B' kputc #':' kputc #'0' kputc #'x' kphex \begin, 8 /* Start of appended DTB */ kputc #' ' kputc #'(' kputc #'0' kputc #'x' kphex \size, 8 /* Size of appended DTB */ kputc #')' kputc #'\n' #endif .endm .macro enable_cp15_barriers, reg mrc p15, 0, \reg, c1, c0, 0 @ read SCTLR tst \reg, #(1 << 5) @ CP15BEN bit set? bne .L_\@ orr \reg, \reg, #(1 << 5) @ CP15 barrier instructions mcr p15, 0, \reg, c1, c0, 0 @ write SCTLR ARM( .inst 0xf57ff06f @ v7+ isb ) THUMB( isb ) .L_\@: .endm /* * The kernel build system appends the size of the * decompressed kernel at the end of the compressed data * in little-endian form. */ .macro get_inflated_image_size, res:req, tmp1:req, tmp2:req adr \res, .Linflated_image_size_offset ldr \tmp1, [\res] add \tmp1, \tmp1, \res @ address of inflated image size ldrb \res, [\tmp1] @ get_unaligned_le32 ldrb \tmp2, [\tmp1, #1] orr \res, \res, \tmp2, lsl #8 ldrb \tmp2, [\tmp1, #2] ldrb \tmp1, [\tmp1, #3] orr \res, \res, \tmp2, lsl #16 orr \res, \res, \tmp1, lsl #24 .endm .macro be32tocpu, val, tmp #ifndef __ARMEB__ /* convert to little endian */ rev_l \val, \tmp #endif .endm .section ".start", "ax" /* * sort out different calling conventions */ .align /* * Always enter in ARM state for CPUs that support the ARM ISA. * As of today (2014) that's exactly the members of the A and R * classes. */ AR_CLASS( .arm ) start: .type start,#function /* * These 7 nops along with the 1 nop immediately below for * !THUMB2 form 8 nops that make the compressed kernel bootable * on legacy ARM systems that were assuming the kernel in a.out * binary format. The boot loaders on these systems would * jump 32 bytes into the image to skip the a.out header. * with these 8 nops filling exactly 32 bytes, things still * work as expected on these legacy systems. Thumb2 mode keeps * 7 of the nops as it turns out that some boot loaders * were patching the initial instructions of the kernel, i.e * had started to exploit this "patch area". */ __initial_nops .rept 5 __nop .endr #ifndef CONFIG_THUMB2_KERNEL __nop #else AR_CLASS( sub pc, pc, #3 ) @ A/R: switch to Thumb2 mode M_CLASS( nop.w ) @ M: already in Thumb2 mode .thumb #endif W(b) 1f .word _magic_sig @ Magic numbers to help the loader .word _magic_start @ absolute load/run zImage address .word _magic_end @ zImage end address .word 0x04030201 @ endianness flag .word 0x45454545 @ another magic number to indicate .word _magic_table @ additional data table __EFI_HEADER 1: ARM_BE8( setend be ) @ go BE8 if compiled for BE8 AR_CLASS( mrs r9, cpsr ) #ifdef CONFIG_ARM_VIRT_EXT bl __hyp_stub_install @ get into SVC mode, reversibly #endif mov r7, r1 @ save architecture ID mov r8, r2 @ save atags pointer #ifndef CONFIG_CPU_V7M /* * Booting from Angel - need to enter SVC mode and disable * FIQs/IRQs (numeric definitions from angel arm.h source). * We only do this if we were in user mode on entry. */ mrs r2, cpsr @ get current mode tst r2, #3 @ not user? bne not_angel mov r0, #0x17 @ angel_SWIreason_EnterSVC ARM( swi 0x123456 ) @ angel_SWI_ARM THUMB( svc 0xab ) @ angel_SWI_THUMB not_angel: safe_svcmode_maskall r0 msr spsr_cxsf, r9 @ Save the CPU boot mode in @ SPSR #endif /* * Note that some cache flushing and other stuff may * be needed here - is there an Angel SWI call for this? */ /* * some architecture specific code can be inserted * by the linker here, but it should preserve r7, r8, and r9. */ .text #ifdef CONFIG_AUTO_ZRELADDR /* * Find the start of physical memory. As we are executing * without the MMU on, we are in the physical address space. * We just need to get rid of any offset by aligning the * address. * * This alignment is a balance between the requirements of * different platforms - we have chosen 128MB to allow * platforms which align the start of their physical memory * to 128MB to use this feature, while allowing the zImage * to be placed within the first 128MB of memory on other * platforms. Increasing the alignment means we place * stricter alignment requirements on the start of physical * memory, but relaxing it means that we break people who * are already placing their zImage in (eg) the top 64MB * of this range. */ mov r0, pc and r0, r0, #0xf8000000 #ifdef CONFIG_USE_OF adr r1, LC1 #ifdef CONFIG_ARM_APPENDED_DTB /* * Look for an appended DTB. If found, we cannot use it to * validate the calculated start of physical memory, as its * memory nodes may need to be augmented by ATAGS stored at * an offset from the same start of physical memory. */ ldr r2, [r1, #4] @ get &_edata add r2, r2, r1 @ relocate it ldr r2, [r2] @ get DTB signature ldr r3, =OF_DT_MAGIC cmp r2, r3 @ do we have a DTB there? beq 1f @ if yes, skip validation #endif /* CONFIG_ARM_APPENDED_DTB */ /* * Make sure we have some stack before calling C code. * No GOT fixup has occurred yet, but none of the code we're * about to call uses any global variables. */ ldr sp, [r1] @ get stack location add sp, sp, r1 @ apply relocation /* Validate calculated start against passed DTB */ mov r1, r8 bl fdt_check_mem_start 1: #endif /* CONFIG_USE_OF */ /* Determine final kernel image address. */ add r4, r0, #TEXT_OFFSET #else ldr r4, =zreladdr #endif /* * Set up a page table only if it won't overwrite ourself. * That means r4 < pc || r4 - 16k page directory > &_end. * Given that r4 > &_end is most unfrequent, we add a rough * additional 1MB of room for a possible appended DTB. */ mov r0, pc cmp r0, r4 ldrcc r0, .Lheadroom addcc r0, r0, pc cmpcc r4, r0 orrcc r4, r4, #1 @ remember we skipped cache_on blcs cache_on restart: adr r0, LC1 ldr sp, [r0] ldr r6, [r0, #4] add sp, sp, r0 add r6, r6, r0 get_inflated_image_size r9, r10, lr #ifndef CONFIG_ZBOOT_ROM /* malloc space is above the relocated stack (64k max) */ add r10, sp, #MALLOC_SIZE #else /* * With ZBOOT_ROM the bss/stack is non relocatable, * but someone could still run this code from RAM, * in which case our reference is _edata. */ mov r10, r6 #endif mov r5, #0 @ init dtb size to 0 #ifdef CONFIG_ARM_APPENDED_DTB /* * r4 = final kernel address (possibly with LSB set) * r5 = appended dtb size (still unknown) * r6 = _edata * r7 = architecture ID * r8 = atags/device tree pointer * r9 = size of decompressed image * r10 = end of this image, including bss/stack/malloc space if non XIP * sp = stack pointer * * if there are device trees (dtb) appended to zImage, advance r10 so that the * dtb data will get relocated along with the kernel if necessary. */ ldr lr, [r6, #0] ldr r1, =OF_DT_MAGIC cmp lr, r1 bne dtb_check_done @ not found #ifdef CONFIG_ARM_ATAG_DTB_COMPAT /* * OK... Let's do some funky business here. * If we do have a DTB appended to zImage, and we do have * an ATAG list around, we want the later to be translated * and folded into the former here. No GOT fixup has occurred * yet, but none of the code we're about to call uses any * global variable. */ /* Get the initial DTB size */ ldr r5, [r6, #4] be32tocpu r5, r1 dbgadtb r6, r5 /* 50% DTB growth should be good enough */ add r5, r5, r5, lsr #1 /* preserve 64-bit alignment */ add r5, r5, #7 bic r5, r5, #7 /* clamp to 32KB min and 1MB max */ cmp r5, #(1 << 15) movlo r5, #(1 << 15) cmp r5, #(1 << 20) movhi r5, #(1 << 20) /* temporarily relocate the stack past the DTB work space */ add sp, sp, r5 mov r0, r8 mov r1, r6 mov r2, r5 bl atags_to_fdt /* * If returned value is 1, there is no ATAG at the location * pointed by r8. Try the typical 0x100 offset from start * of RAM and hope for the best. */ cmp r0, #1 sub r0, r4, #TEXT_OFFSET bic r0, r0, #1 add r0, r0, #0x100 mov r1, r6 mov r2, r5 bleq atags_to_fdt sub sp, sp, r5 #endif mov r8, r6 @ use the appended device tree /* * Make sure that the DTB doesn't end up in the final * kernel's .bss area. To do so, we adjust the decompressed * kernel size to compensate if that .bss size is larger * than the relocated code. */ ldr r5, =_kernel_bss_size adr r1, wont_overwrite sub r1, r6, r1 subs r1, r5, r1 addhi r9, r9, r1 /* Get the current DTB size */ ldr r5, [r6, #4] be32tocpu r5, r1 /* preserve 64-bit alignment */ add r5, r5, #7 bic r5, r5, #7 /* relocate some pointers past the appended dtb */ add r6, r6, r5 add r10, r10, r5 add sp, sp, r5 dtb_check_done: #endif /* * Check to see if we will overwrite ourselves. * r4 = final kernel address (possibly with LSB set) * r9 = size of decompressed image * r10 = end of this image, including bss/stack/malloc space if non XIP * We basically want: * r4 - 16k page directory >= r10 -> OK * r4 + image length <= address of wont_overwrite -> OK * Note: the possible LSB in r4 is harmless here. */ add r10, r10, #16384 cmp r4, r10 bhs wont_overwrite add r10, r4, r9 adr r9, wont_overwrite cmp r10, r9 bls wont_overwrite /* * Relocate ourselves past the end of the decompressed kernel. * r6 = _edata * r10 = end of the decompressed kernel * Because we always copy ahead, we need to do it from the end and go * backward in case the source and destination overlap. */ /* * Bump to the next 256-byte boundary with the size of * the relocation code added. This avoids overwriting * ourself when the offset is small. */ add r10, r10, #((reloc_code_end - restart + 256) & ~255) bic r10, r10, #255 /* Get start of code we want to copy and align it down. */ adr r5, restart bic r5, r5, #31 /* Relocate the hyp vector base if necessary */ #ifdef CONFIG_ARM_VIRT_EXT mrs r0, spsr and r0, r0, #MODE_MASK cmp r0, #HYP_MODE bne 1f /* * Compute the address of the hyp vectors after relocation. * Call __hyp_set_vectors with the new address so that we * can HVC again after the copy. */ adr_l r0, __hyp_stub_vectors sub r0, r0, r5 add r0, r0, r10 bl __hyp_set_vectors 1: #endif sub r9, r6, r5 @ size to copy add r9, r9, #31 @ rounded up to a multiple bic r9, r9, #31 @ ... of 32 bytes add r6, r9, r5 add r9, r9, r10 #ifdef DEBUG sub r10, r6, r5 sub r10, r9, r10 /* * We are about to copy the kernel to a new memory area. * The boundaries of the new memory area can be found in * r10 and r9, whilst r5 and r6 contain the boundaries * of the memory we are going to copy. * Calling dbgkc will help with the printing of this * information. */ dbgkc r5, r6, r10, r9 #endif 1: ldmdb r6!, {r0 - r3, r10 - r12, lr} cmp r6, r5 stmdb r9!, {r0 - r3, r10 - r12, lr} bhi 1b /* Preserve offset to relocated code. */ sub r6, r9, r6 mov r0, r9 @ start of relocated zImage add r1, sp, r6 @ end of relocated zImage bl cache_clean_flush badr r0, restart add r0, r0, r6 mov pc, r0 wont_overwrite: adr r0, LC0 ldmia r0, {r1, r2, r3, r11, r12} sub r0, r0, r1 @ calculate the delta offset /* * If delta is zero, we are running at the address we were linked at. * r0 = delta * r2 = BSS start * r3 = BSS end * r4 = kernel execution address (possibly with LSB set) * r5 = appended dtb size (0 if not present) * r7 = architecture ID * r8 = atags pointer * r11 = GOT start * r12 = GOT end * sp = stack pointer */ orrs r1, r0, r5 beq not_relocated add r11, r11, r0 add r12, r12, r0 #ifndef CONFIG_ZBOOT_ROM /* * If we're running fully PIC === CONFIG_ZBOOT_ROM = n, * we need to fix up pointers into the BSS region. * Note that the stack pointer has already been fixed up. */ add r2, r2, r0 add r3, r3, r0 /* * Relocate all entries in the GOT table. * Bump bss entries to _edata + dtb size */ 1: ldr r1, [r11, #0] @ relocate entries in the GOT add r1, r1, r0 @ This fixes up C references cmp r1, r2 @ if entry >= bss_start && cmphs r3, r1 @ bss_end > entry addhi r1, r1, r5 @ entry += dtb size str r1, [r11], #4 @ next entry cmp r11, r12 blo 1b /* bump our bss pointers too */ add r2, r2, r5 add r3, r3, r5 #else /* * Relocate entries in the GOT table. We only relocate * the entries that are outside the (relocated) BSS region. */ 1: ldr r1, [r11, #0] @ relocate entries in the GOT cmp r1, r2 @ entry < bss_start || cmphs r3, r1 @ _end < entry addlo r1, r1, r0 @ table. This fixes up the str r1, [r11], #4 @ C references. cmp r11, r12 blo 1b #endif not_relocated: mov r0, #0 1: str r0, [r2], #4 @ clear bss str r0, [r2], #4 str r0, [r2], #4 str r0, [r2], #4 cmp r2, r3 blo 1b /* * Did we skip the cache setup earlier? * That is indicated by the LSB in r4. * Do it now if so. */ tst r4, #1 bic r4, r4, #1 blne cache_on /* * The C runtime environment should now be setup sufficiently. * Set up some pointers, and start decompressing. * r4 = kernel execution address * r7 = architecture ID * r8 = atags pointer */ mov r0, r4 mov r1, sp @ malloc space above stack add r2, sp, #MALLOC_SIZE @ 64k max mov r3, r7 bl decompress_kernel get_inflated_image_size r1, r2, r3 mov r0, r4 @ start of inflated image add r1, r1, r0 @ end of inflated image bl cache_clean_flush bl cache_off #ifdef CONFIG_ARM_VIRT_EXT mrs r0, spsr @ Get saved CPU boot mode and r0, r0, #MODE_MASK cmp r0, #HYP_MODE @ if not booted in HYP mode... bne __enter_kernel @ boot kernel directly adr_l r0, __hyp_reentry_vectors bl __hyp_set_vectors __HVC(0) @ otherwise bounce to hyp mode b . @ should never be reached #else b __enter_kernel #endif .align 2 .type LC0, #object LC0: .word LC0 @ r1 .word __bss_start @ r2 .word _end @ r3 .word _got_start @ r11 .word _got_end @ ip .size LC0, . - LC0 .type LC1, #object LC1: .word .L_user_stack_end - LC1 @ sp .word _edata - LC1 @ r6 .size LC1, . - LC1 .Lheadroom: .word _end - restart + 16384 + 1024*1024 .Linflated_image_size_offset: .long (input_data_end - 4) - . #ifdef CONFIG_ARCH_RPC .globl params params: ldr r0, =0x10000100 @ params_phys for RPC mov pc, lr .ltorg .align #endif /* * dcache_line_size - get the minimum D-cache line size from the CTR register * on ARMv7. */ .macro dcache_line_size, reg, tmp #ifdef CONFIG_CPU_V7M movw \tmp, #:lower16:BASEADDR_V7M_SCB + V7M_SCB_CTR movt \tmp, #:upper16:BASEADDR_V7M_SCB + V7M_SCB_CTR ldr \tmp, [\tmp] #else mrc p15, 0, \tmp, c0, c0, 1 @ read ctr #endif lsr \tmp, \tmp, #16 and \tmp, \tmp, #0xf @ cache line size encoding mov \reg, #4 @ bytes per word mov \reg, \reg, lsl \tmp @ actual cache line size .endm /* * Turn on the cache. We need to setup some page tables so that we * can have both the I and D caches on. * * We place the page tables 16k down from the kernel execution address, * and we hope that nothing else is using it. If we're using it, we * will go pop! * * On entry, * r4 = kernel execution address * r7 = architecture number * r8 = atags pointer * On exit, * r0, r1, r2, r3, r9, r10, r12 corrupted * This routine must preserve: * r4, r7, r8 */ .align 5 cache_on: mov r3, #8 @ cache_on function b call_cache_fn /* * Initialize the highest priority protection region, PR7 * to cover all 32bit address and cacheable and bufferable. */ __armv4_mpu_cache_on: mov r0, #0x3f @ 4G, the whole mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting mcr p15, 0, r0, c6, c7, 1 mov r0, #0x80 @ PR7 mcr p15, 0, r0, c2, c0, 0 @ D-cache on mcr p15, 0, r0, c2, c0, 1 @ I-cache on mcr p15, 0, r0, c3, c0, 0 @ write-buffer on mov r0, #0xc000 mcr p15, 0, r0, c5, c0, 1 @ I-access permission mcr p15, 0, r0, c5, c0, 0 @ D-access permission mov r0, #0 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache mrc p15, 0, r0, c1, c0, 0 @ read control reg @ ...I .... ..D. WC.M orr r0, r0, #0x002d @ .... .... ..1. 11.1 orr r0, r0, #0x1000 @ ...1 .... .... .... mcr p15, 0, r0, c1, c0, 0 @ write control reg mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache mov pc, lr __armv3_mpu_cache_on: mov r0, #0x3f @ 4G, the whole mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting mov r0, #0x80 @ PR7 mcr p15, 0, r0, c2, c0, 0 @ cache on mcr p15, 0, r0, c3, c0, 0 @ write-buffer on mov r0, #0xc000 mcr p15, 0, r0, c5, c0, 0 @ access permission mov r0, #0 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 /* * ?? ARMv3 MMU does not allow reading the control register, * does this really work on ARMv3 MPU? */ mrc p15, 0, r0, c1, c0, 0 @ read control reg @ .... .... .... WC.M orr r0, r0, #0x000d @ .... .... .... 11.1 /* ?? this overwrites the value constructed above? */ mov r0, #0 mcr p15, 0, r0, c1, c0, 0 @ write control reg /* ?? invalidate for the second time? */ mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 mov pc, lr #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH #define CB_BITS 0x08 #else #define CB_BITS 0x0c #endif __setup_mmu: sub r3, r4, #16384 @ Page directory size bic r3, r3, #0xff @ Align the pointer bic r3, r3, #0x3f00 /* * Initialise the page tables, turning on the cacheable and bufferable * bits for the RAM area only. */ mov r0, r3 mov r9, r0, lsr #18 mov r9, r9, lsl #18 @ start of RAM add r10, r9, #0x10000000 @ a reasonable RAM size mov r1, #0x12 @ XN|U + section mapping orr r1, r1, #3 << 10 @ AP=11 add r2, r3, #16384 1: cmp r1, r9 @ if virt > start of RAM cmphs r10, r1 @ && end of RAM > virt bic r1, r1, #0x1c @ clear XN|U + C + B orrlo r1, r1, #0x10 @ Set XN|U for non-RAM orrhs r1, r1, r6 @ set RAM section settings str r1, [r0], #4 @ 1:1 mapping add r1, r1, #1048576 teq r0, r2 bne 1b /* * If ever we are running from Flash, then we surely want the cache * to be enabled also for our execution instance... We map 2MB of it * so there is no map overlap problem for up to 1 MB compressed kernel. * If the execution is in RAM then we would only be duplicating the above. */ orr r1, r6, #0x04 @ ensure B is set for this orr r1, r1, #3 << 10 mov r2, pc mov r2, r2, lsr #20 orr r1, r1, r2, lsl #20 add r0, r3, r2, lsl #2 str r1, [r0], #4 add r1, r1, #1048576 str r1, [r0] mov pc, lr ENDPROC(__setup_mmu) @ Enable unaligned access on v6, to allow better code generation @ for the decompressor C code: __armv6_mmu_cache_on: mrc p15, 0, r0, c1, c0, 0 @ read SCTLR bic r0, r0, #2 @ A (no unaligned access fault) orr r0, r0, #1 << 22 @ U (v6 unaligned access model) mcr p15, 0, r0, c1, c0, 0 @ write SCTLR b __armv4_mmu_cache_on __arm926ejs_mmu_cache_on: #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH mov r0, #4 @ put dcache in WT mode mcr p15, 7, r0, c15, c0, 0 #endif __armv4_mmu_cache_on: mov r12, lr #ifdef CONFIG_MMU mov r6, #CB_BITS | 0x12 @ U bl __setup_mmu mov r0, #0 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs mrc p15, 0, r0, c1, c0, 0 @ read control reg orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement orr r0, r0, #0x0030 ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables bl __common_mmu_cache_on mov r0, #0 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs #endif mov pc, r12 __armv7_mmu_cache_on: enable_cp15_barriers r11 mov r12, lr #ifdef CONFIG_MMU mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0 tst r11, #0xf @ VMSA movne r6, #CB_BITS | 0x02 @ !XN blne __setup_mmu mov r0, #0 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer tst r11, #0xf @ VMSA mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs #endif mrc p15, 0, r0, c1, c0, 0 @ read control reg bic r0, r0, #1 << 28 @ clear SCTLR.TRE orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement orr r0, r0, #0x003c @ write buffer bic r0, r0, #2 @ A (no unaligned access fault) orr r0, r0, #1 << 22 @ U (v6 unaligned access model) @ (needed for ARM1176) #ifdef CONFIG_MMU ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables mrcne p15, 0, r6, c2, c0, 2 @ read ttb control reg orrne r0, r0, #1 @ MMU enabled movne r1, #0xfffffffd @ domain 0 = client bic r6, r6, #1 << 31 @ 32-bit translation system bic r6, r6, #(7 << 0) | (1 << 4) @ use only ttbr0 mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer mcrne p15, 0, r1, c3, c0, 0 @ load domain access control mcrne p15, 0, r6, c2, c0, 2 @ load ttb control #endif mcr p15, 0, r0, c7, c5, 4 @ ISB mcr p15, 0, r0, c1, c0, 0 @ load control register mrc p15, 0, r0, c1, c0, 0 @ and read it back mov r0, #0 mcr p15, 0, r0, c7, c5, 4 @ ISB mov pc, r12 __fa526_cache_on: mov r12, lr mov r6, #CB_BITS | 0x12 @ U bl __setup_mmu mov r0, #0 mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache mcr p15, 0, r0, c7, c10, 4 @ drain write buffer mcr p15, 0, r0, c8, c7, 0 @ flush UTLB mrc p15, 0, r0, c1, c0, 0 @ read control reg orr r0, r0, #0x1000 @ I-cache enable bl __common_mmu_cache_on mov r0, #0 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB mov pc, r12 __common_mmu_cache_on: #ifndef CONFIG_THUMB2_KERNEL #ifndef DEBUG orr r0, r0, #0x000d @ Write buffer, mmu #endif mov r1, #-1 mcr p15, 0, r3, c2, c0, 0 @ load page table pointer mcr p15, 0, r1, c3, c0, 0 @ load domain access control b 1f .align 5 @ cache line aligned 1: mcr p15, 0, r0, c1, c0, 0 @ load control register mrc p15, 0, r0, c1, c0, 0 @ and read it back to sub pc, lr, r0, lsr #32 @ properly flush pipeline #endif #define PROC_ENTRY_SIZE (4*5) /* * Here follow the relocatable cache support functions for the * various processors. This is a generic hook for locating an * entry and jumping to an instruction at the specified offset * from the start of the block. Please note this is all position * independent code. * * r1 = corrupted * r2 = corrupted * r3 = block offset * r9 = corrupted * r12 = corrupted */ call_cache_fn: adr r12, proc_types #ifdef CONFIG_CPU_CP15 mrc p15, 0, r9, c0, c0 @ get processor ID #elif defined(CONFIG_CPU_V7M) /* * On v7-M the processor id is located in the V7M_SCB_CPUID * register, but as cache handling is IMPLEMENTATION DEFINED on * v7-M (if existant at all) we just return early here. * If V7M_SCB_CPUID were used the cpu ID functions (i.e. * __armv7_mmu_cache_{on,off,flush}) would be selected which * use cp15 registers that are not implemented on v7-M. */ bx lr #else ldr r9, =CONFIG_PROCESSOR_ID #endif 1: ldr r1, [r12, #0] @ get value ldr r2, [r12, #4] @ get mask eor r1, r1, r9 @ (real ^ match) tst r1, r2 @ & mask ARM( addeq pc, r12, r3 ) @ call cache function THUMB( addeq r12, r3 ) THUMB( moveq pc, r12 ) @ call cache function add r12, r12, #PROC_ENTRY_SIZE b 1b /* * Table for cache operations. This is basically: * - CPU ID match * - CPU ID mask * - 'cache on' method instruction * - 'cache off' method instruction * - 'cache flush' method instruction * * We match an entry using: ((real_id ^ match) & mask) == 0 * * Writethrough caches generally only need 'on' and 'off' * methods. Writeback caches _must_ have the flush method * defined. */ .align 2 .type proc_types,#object proc_types: .word 0x41000000 @ old ARM ID .word 0xff00f000 mov pc, lr THUMB( nop ) mov pc, lr THUMB( nop ) mov pc, lr THUMB( nop ) .word 0x41007000 @ ARM7/710 .word 0xfff8fe00 mov pc, lr THUMB( nop ) mov pc, lr THUMB( nop ) mov pc, lr THUMB( nop ) .word 0x41807200 @ ARM720T (writethrough) .word 0xffffff00 W(b) __armv4_mmu_cache_on W(b) __armv4_mmu_cache_off mov pc, lr THUMB( nop ) .word 0x41007400 @ ARM74x .word 0xff00ff00 W(b) __armv3_mpu_cache_on W(b) __armv3_mpu_cache_off W(b) __armv3_mpu_cache_flush .word 0x41009400 @ ARM94x .word 0xff00ff00 W(b) __armv4_mpu_cache_on W(b) __armv4_mpu_cache_off W(b) __armv4_mpu_cache_flush .word 0x41069260 @ ARM926EJ-S (v5TEJ) .word 0xff0ffff0 W(b) __arm926ejs_mmu_cache_on W(b) __armv4_mmu_cache_off W(b) __armv5tej_mmu_cache_flush .word 0x00007000 @ ARM7 IDs .word 0x0000f000 mov pc, lr THUMB( nop ) mov pc, lr THUMB( nop ) mov pc, lr THUMB( nop ) @ Everything from here on will be the new ID system. .word 0x4401a100 @ sa110 / sa1100 .word 0xffffffe0 W(b) __armv4_mmu_cache_on W(b) __armv4_mmu_cache_off W(b) __armv4_mmu_cache_flush .word 0x6901b110 @ sa1110 .word 0xfffffff0 W(b) __armv4_mmu_cache_on W(b) __armv4_mmu_cache_off W(b) __armv4_mmu_cache_flush .word 0x56056900 .word 0xffffff00 @ PXA9xx W(b) __armv4_mmu_cache_on W(b) __armv4_mmu_cache_off W(b) __armv4_mmu_cache_flush .word 0x56158000 @ PXA168 .word 0xfffff000 W(b) __armv4_mmu_cache_on W(b) __armv4_mmu_cache_off W(b) __armv5tej_mmu_cache_flush .word 0x56050000 @ Feroceon .word 0xff0f0000 W(b) __armv4_mmu_cache_on W(b) __armv4_mmu_cache_off W(b) __armv5tej_mmu_cache_flush #ifdef CONFIG_CPU_FEROCEON_OLD_ID /* this conflicts with the standard ARMv5TE entry */ .long 0x41009260 @ Old Feroceon .long 0xff00fff0 b __armv4_mmu_cache_on b __armv4_mmu_cache_off b __armv5tej_mmu_cache_flush #endif .word 0x66015261 @ FA526 .word 0xff01fff1 W(b) __fa526_cache_on W(b) __armv4_mmu_cache_off W(b) __fa526_cache_flush @ These match on the architecture ID .word 0x00020000 @ ARMv4T .word 0x000f0000 W(b) __armv4_mmu_cache_on W(b) __armv4_mmu_cache_off W(b) __armv4_mmu_cache_flush .word 0x00050000 @ ARMv5TE .word 0x000f0000 W(b) __armv4_mmu_cache_on W(b) __armv4_mmu_cache_off W(b) __armv4_mmu_cache_flush .word 0x00060000 @ ARMv5TEJ .word 0x000f0000 W(b) __armv4_mmu_cache_on W(b) __armv4_mmu_cache_off W(b) __armv5tej_mmu_cache_flush .word 0x0007b000 @ ARMv6 .word 0x000ff000 W(b) __armv6_mmu_cache_on W(b) __armv4_mmu_cache_off W(b) __armv6_mmu_cache_flush .word 0x000f0000 @ new CPU Id .word 0x000f0000 W(b) __armv7_mmu_cache_on W(b) __armv7_mmu_cache_off W(b) __armv7_mmu_cache_flush .word 0 @ unrecognised type .word 0 mov pc, lr THUMB( nop ) mov pc, lr THUMB( nop ) mov pc, lr THUMB( nop ) .size proc_types, . - proc_types /* * If you get a "non-constant expression in ".if" statement" * error from the assembler on this line, check that you have * not accidentally written a "b" instruction where you should * have written W(b). */ .if (. - proc_types) % PROC_ENTRY_SIZE != 0 .error "The size of one or more proc_types entries is wrong." .endif /* * Turn off the Cache and MMU. ARMv3 does not support * reading the control register, but ARMv4 does. * * On exit, * r0, r1, r2, r3, r9, r12 corrupted * This routine must preserve: * r4, r7, r8 */ .align 5 cache_off: mov r3, #12 @ cache_off function b call_cache_fn __armv4_mpu_cache_off: mrc p15, 0, r0, c1, c0 bic r0, r0, #0x000d mcr p15, 0, r0, c1, c0 @ turn MPU and cache off mov r0, #0 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer mcr p15, 0, r0, c7, c6, 0 @ flush D-Cache mcr p15, 0, r0, c7, c5, 0 @ flush I-Cache mov pc, lr __armv3_mpu_cache_off: mrc p15, 0, r0, c1, c0 bic r0, r0, #0x000d mcr p15, 0, r0, c1, c0, 0 @ turn MPU and cache off mov r0, #0 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 mov pc, lr __armv4_mmu_cache_off: #ifdef CONFIG_MMU mrc p15, 0, r0, c1, c0 bic r0, r0, #0x000d mcr p15, 0, r0, c1, c0 @ turn MMU and cache off mov r0, #0 mcr p15, 0, r0, c7, c7 @ invalidate whole cache v4 mcr p15, 0, r0, c8, c7 @ invalidate whole TLB v4 #endif mov pc, lr __armv7_mmu_cache_off: mrc p15, 0, r0, c1, c0 #ifdef CONFIG_MMU bic r0, r0, #0x0005 #else bic r0, r0, #0x0004 #endif mcr p15, 0, r0, c1, c0 @ turn MMU and cache off mov r0, #0 #ifdef CONFIG_MMU mcr p15, 0, r0, c8, c7, 0 @ invalidate whole TLB #endif mcr p15, 0, r0, c7, c5, 6 @ invalidate BTC mcr p15, 0, r0, c7, c10, 4 @ DSB mcr p15, 0, r0, c7, c5, 4 @ ISB mov pc, lr /* * Clean and flush the cache to maintain consistency. * * On entry, * r0 = start address * r1 = end address (exclusive) * On exit, * r1, r2, r3, r9, r10, r11, r12 corrupted * This routine must preserve: * r4, r6, r7, r8 */ .align 5 cache_clean_flush: mov r3, #16 mov r11, r1 b call_cache_fn __armv4_mpu_cache_flush: tst r4, #1 movne pc, lr mov r2, #1 mov r3, #0 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache mov r1, #7 << 5 @ 8 segments 1: orr r3, r1, #63 << 26 @ 64 entries 2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index subs r3, r3, #1 << 26 bcs 2b @ entries 63 to 0 subs r1, r1, #1 << 5 bcs 1b @ segments 7 to 0 teq r2, #0 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache mcr p15, 0, ip, c7, c10, 4 @ drain WB mov pc, lr __fa526_cache_flush: tst r4, #1 movne pc, lr mov r1, #0 mcr p15, 0, r1, c7, c14, 0 @ clean and invalidate D cache mcr p15, 0, r1, c7, c5, 0 @ flush I cache mcr p15, 0, r1, c7, c10, 4 @ drain WB mov pc, lr __armv6_mmu_cache_flush: mov r1, #0 tst r4, #1 mcreq p15, 0, r1, c7, c14, 0 @ clean+invalidate D mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB mcreq p15, 0, r1, c7, c15, 0 @ clean+invalidate unified mcr p15, 0, r1, c7, c10, 4 @ drain WB mov pc, lr __armv7_mmu_cache_flush: enable_cp15_barriers r10 tst r4, #1 bne iflush mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1 tst r10, #0xf << 16 @ hierarchical cache (ARMv7) mov r10, #0 beq hierarchical mcr p15, 0, r10, c7, c14, 0 @ clean+invalidate D b iflush hierarchical: dcache_line_size r1, r2 @ r1 := dcache min line size sub r2, r1, #1 @ r2 := line size mask bic r0, r0, r2 @ round down start to line size sub r11, r11, #1 @ end address is exclusive bic r11, r11, r2 @ round down end to line size 0: cmp r0, r11 @ finished? bgt iflush mcr p15, 0, r0, c7, c14, 1 @ Dcache clean/invalidate by VA add r0, r0, r1 b 0b iflush: mcr p15, 0, r10, c7, c10, 4 @ DSB mcr p15, 0, r10, c7, c5, 0 @ invalidate I+BTB mcr p15, 0, r10, c7, c10, 4 @ DSB mcr p15, 0, r10, c7, c5, 4 @ ISB mov pc, lr __armv5tej_mmu_cache_flush: tst r4, #1 movne pc, lr 1: mrc p15, 0, APSR_nzcv, c7, c14, 3 @ test,clean,invalidate D cache bne 1b mcr p15, 0, r0, c7, c5, 0 @ flush I cache mcr p15, 0, r0, c7, c10, 4 @ drain WB mov pc, lr __armv4_mmu_cache_flush: tst r4, #1 movne pc, lr mov r2, #64*1024 @ default: 32K dcache size (*2) mov r11, #32 @ default: 32 byte line size mrc p15, 0, r3, c0, c0, 1 @ read cache type teq r3, r9 @ cache ID register present? beq no_cache_id mov r1, r3, lsr #18 and r1, r1, #7 mov r2, #1024 mov r2, r2, lsl r1 @ base dcache size *2 tst r3, #1 << 14 @ test M bit addne r2, r2, r2, lsr #1 @ +1/2 size if M == 1 mov r3, r3, lsr #12 and r3, r3, #3 mov r11, #8 mov r11, r11, lsl r3 @ cache line size in bytes no_cache_id: mov r1, pc bic r1, r1, #63 @ align to longest cache line add r2, r1, r2 1: ARM( ldr r3, [r1], r11 ) @ s/w flush D cache THUMB( ldr r3, [r1] ) @ s/w flush D cache THUMB( add r1, r1, r11 ) teq r1, r2 bne 1b mcr p15, 0, r1, c7, c5, 0 @ flush I cache mcr p15, 0, r1, c7, c6, 0 @ flush D cache mcr p15, 0, r1, c7, c10, 4 @ drain WB mov pc, lr __armv3_mmu_cache_flush: __armv3_mpu_cache_flush: tst r4, #1 movne pc, lr mov r1, #0 mcr p15, 0, r1, c7, c0, 0 @ invalidate whole cache v3 mov pc, lr /* * Various debugging routines for printing hex characters and * memory, which again must be relocatable. */ #ifdef DEBUG .align 2 .type phexbuf,#object phexbuf: .space 12 .size phexbuf, . - phexbuf @ phex corrupts {r0, r1, r2, r3} phex: adr r3, phexbuf mov r2, #0 strb r2, [r3, r1] 1: subs r1, r1, #1 movmi r0, r3 bmi puts and r2, r0, #15 mov r0, r0, lsr #4 cmp r2, #10 addge r2, r2, #7 add r2, r2, #'0' strb r2, [r3, r1] b 1b @ puts corrupts {r0, r1, r2, r3} puts: loadsp r3, r2, r1 1: ldrb r2, [r0], #1 teq r2, #0 moveq pc, lr 2: writeb r2, r3, r1 mov r1, #0x00020000 3: subs r1, r1, #1 bne 3b teq r2, #'\n' moveq r2, #'\r' beq 2b teq r0, #0 bne 1b mov pc, lr @ putc corrupts {r0, r1, r2, r3} putc: mov r2, r0 loadsp r3, r1, r0 mov r0, #0 b 2b @ memdump corrupts {r0, r1, r2, r3, r10, r11, r12, lr} memdump: mov r12, r0 mov r10, lr mov r11, #0 2: mov r0, r11, lsl #2 add r0, r0, r12 mov r1, #8 bl phex mov r0, #':' bl putc 1: mov r0, #' ' bl putc ldr r0, [r12, r11, lsl #2] mov r1, #8 bl phex and r0, r11, #7 teq r0, #3 moveq r0, #' ' bleq putc and r0, r11, #7 add r11, r11, #1 teq r0, #7 bne 1b mov r0, #'\n' bl putc cmp r11, #64 blt 2b mov pc, r10 #endif .ltorg #ifdef CONFIG_ARM_VIRT_EXT .align 5 __hyp_reentry_vectors: W(b) . @ reset W(b) . @ undef #ifdef CONFIG_EFI_STUB W(b) __enter_kernel_from_hyp @ hvc from HYP #else W(b) . @ svc #endif W(b) . @ pabort W(b) . @ dabort W(b) __enter_kernel @ hyp W(b) . @ irq W(b) . @ fiq #endif /* CONFIG_ARM_VIRT_EXT */ __enter_kernel: mov r0, #0 @ must be 0 mov r1, r7 @ restore architecture number mov r2, r8 @ restore atags pointer ARM( mov pc, r4 ) @ call kernel M_CLASS( add r4, r4, #1 ) @ enter in Thumb mode for M class THUMB( bx r4 ) @ entry point is always ARM for A/R classes reloc_code_end: #ifdef CONFIG_EFI_STUB __enter_kernel_from_hyp: mrc p15, 4, r0, c1, c0, 0 @ read HSCTLR bic r0, r0, #0x5 @ disable MMU and caches mcr p15, 4, r0, c1, c0, 0 @ write HSCTLR isb b __enter_kernel ENTRY(efi_enter_kernel) mov r4, r0 @ preserve image base mov r8, r1 @ preserve DT pointer adr_l r0, call_cache_fn adr r1, 0f @ clean the region of code we bl cache_clean_flush @ may run with the MMU off #ifdef CONFIG_ARM_VIRT_EXT @ @ The EFI spec does not support booting on ARM in HYP mode, @ since it mandates that the MMU and caches are on, with all @ 32-bit addressable DRAM mapped 1:1 using short descriptors. @ @ While the EDK2 reference implementation adheres to this, @ U-Boot might decide to enter the EFI stub in HYP mode @ anyway, with the MMU and caches either on or off. @ mrs r0, cpsr @ get the current mode msr spsr_cxsf, r0 @ record boot mode and r0, r0, #MODE_MASK @ are we running in HYP mode? cmp r0, #HYP_MODE bne .Lefi_svc mrc p15, 4, r1, c1, c0, 0 @ read HSCTLR tst r1, #0x1 @ MMU enabled at HYP? beq 1f @ @ When running in HYP mode with the caches on, we're better @ off just carrying on using the cached 1:1 mapping that the @ firmware provided. Set up the HYP vectors so HVC instructions @ issued from HYP mode take us to the correct handler code. We @ will disable the MMU before jumping to the kernel proper. @ ARM( bic r1, r1, #(1 << 30) ) @ clear HSCTLR.TE THUMB( orr r1, r1, #(1 << 30) ) @ set HSCTLR.TE mcr p15, 4, r1, c1, c0, 0 adr r0, __hyp_reentry_vectors mcr p15, 4, r0, c12, c0, 0 @ set HYP vector base (HVBAR) isb b .Lefi_hyp @ @ When running in HYP mode with the caches off, we need to drop @ into SVC mode now, and let the decompressor set up its cached @ 1:1 mapping as usual. @ 1: mov r9, r4 @ preserve image base bl __hyp_stub_install @ install HYP stub vectors safe_svcmode_maskall r1 @ drop to SVC mode msr spsr_cxsf, r0 @ record boot mode orr r4, r9, #1 @ restore image base and set LSB b .Lefi_hyp .Lefi_svc: #endif mrc p15, 0, r0, c1, c0, 0 @ read SCTLR tst r0, #0x1 @ MMU enabled? orreq r4, r4, #1 @ set LSB if not .Lefi_hyp: mov r0, r8 @ DT start add r1, r8, r2 @ DT end bl cache_clean_flush adr r0, 0f @ switch to our stack ldr sp, [r0] add sp, sp, r0 mov r5, #0 @ appended DTB size mov r7, #0xFFFFFFFF @ machine ID b wont_overwrite ENDPROC(efi_enter_kernel) 0: .long .L_user_stack_end - . #endif .align .section ".stack", "aw", %nobits .L_user_stack: .space 4096 .L_user_stack_end:
aixcc-public/challenge-001-exemplar-source
4,204
arch/arm/boot/compressed/efi-header.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013-2017 Linaro Ltd * Authors: Roy Franz <roy.franz@linaro.org> * Ard Biesheuvel <ard.biesheuvel@linaro.org> */ #include <linux/pe.h> #include <linux/sizes.h> .macro __nop AR_CLASS( mov r0, r0 ) M_CLASS( nop.w ) .endm .macro __initial_nops #ifdef CONFIG_EFI_STUB @ This is a two-instruction NOP, which happens to bear the @ PE/COFF signature "MZ" in the first two bytes, so the kernel @ is accepted as an EFI binary. Booting via the UEFI stub @ will not execute those instructions, but the ARM/Linux @ boot protocol does, so we need some NOPs here. .inst MZ_MAGIC | (0xe225 << 16) @ eor r5, r5, 0x4d000 eor r5, r5, 0x4d000 @ undo previous insn #else __nop __nop #endif .endm .macro __EFI_HEADER #ifdef CONFIG_EFI_STUB .set start_offset, __efi_start - start .org start + 0x3c @ @ The PE header can be anywhere in the file, but for @ simplicity we keep it together with the MSDOS header @ The offset to the PE/COFF header needs to be at offset @ 0x3C in the MSDOS header. @ The only 2 fields of the MSDOS header that are used are this @ PE/COFF offset, and the "MZ" bytes at offset 0x0. @ .long pe_header - start @ Offset to the PE header. pe_header: .long PE_MAGIC coff_header: .short IMAGE_FILE_MACHINE_THUMB @ Machine .short section_count @ NumberOfSections .long 0 @ TimeDateStamp .long 0 @ PointerToSymbolTable .long 0 @ NumberOfSymbols .short section_table - optional_header @ SizeOfOptionalHeader .short IMAGE_FILE_32BIT_MACHINE | \ IMAGE_FILE_DEBUG_STRIPPED | \ IMAGE_FILE_EXECUTABLE_IMAGE | \ IMAGE_FILE_LINE_NUMS_STRIPPED @ Characteristics #define __pecoff_code_size (__pecoff_data_start - __efi_start) optional_header: .short PE_OPT_MAGIC_PE32 @ PE32 format .byte 0x02 @ MajorLinkerVersion .byte 0x14 @ MinorLinkerVersion .long __pecoff_code_size @ SizeOfCode .long __pecoff_data_size @ SizeOfInitializedData .long 0 @ SizeOfUninitializedData .long efi_pe_entry - start @ AddressOfEntryPoint .long start_offset @ BaseOfCode .long __pecoff_data_start - start @ BaseOfData extra_header_fields: .long 0 @ ImageBase .long SZ_4K @ SectionAlignment .long SZ_512 @ FileAlignment .short 0 @ MajorOsVersion .short 0 @ MinorOsVersion .short LINUX_EFISTUB_MAJOR_VERSION @ MajorImageVersion .short LINUX_EFISTUB_MINOR_VERSION @ MinorImageVersion .short 0 @ MajorSubsystemVersion .short 0 @ MinorSubsystemVersion .long 0 @ Win32VersionValue .long __pecoff_end - start @ SizeOfImage .long start_offset @ SizeOfHeaders .long 0 @ CheckSum .short IMAGE_SUBSYSTEM_EFI_APPLICATION @ Subsystem .short 0 @ DllCharacteristics .long 0 @ SizeOfStackReserve .long 0 @ SizeOfStackCommit .long 0 @ SizeOfHeapReserve .long 0 @ SizeOfHeapCommit .long 0 @ LoaderFlags .long (section_table - .) / 8 @ NumberOfRvaAndSizes .quad 0 @ ExportTable .quad 0 @ ImportTable .quad 0 @ ResourceTable .quad 0 @ ExceptionTable .quad 0 @ CertificationTable .quad 0 @ BaseRelocationTable section_table: .ascii ".text\0\0\0" .long __pecoff_code_size @ VirtualSize .long __efi_start @ VirtualAddress .long __pecoff_code_size @ SizeOfRawData .long __efi_start @ PointerToRawData .long 0 @ PointerToRelocations .long 0 @ PointerToLineNumbers .short 0 @ NumberOfRelocations .short 0 @ NumberOfLineNumbers .long IMAGE_SCN_CNT_CODE | \ IMAGE_SCN_MEM_READ | \ IMAGE_SCN_MEM_EXECUTE @ Characteristics .ascii ".data\0\0\0" .long __pecoff_data_size @ VirtualSize .long __pecoff_data_start - start @ VirtualAddress .long __pecoff_data_rawsize @ SizeOfRawData .long __pecoff_data_start - start @ PointerToRawData .long 0 @ PointerToRelocations .long 0 @ PointerToLineNumbers .short 0 @ NumberOfRelocations .short 0 @ NumberOfLineNumbers .long IMAGE_SCN_CNT_INITIALIZED_DATA | \ IMAGE_SCN_MEM_READ | \ IMAGE_SCN_MEM_WRITE @ Characteristics .set section_count, (. - section_table) / 40 .align 12 __efi_start: #endif .endm
aixcc-public/challenge-001-exemplar-source
2,722
arch/arm/boot/compressed/ll_char_wr.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/lib/ll_char_wr.S * * Copyright (C) 1995, 1996 Russell King. * * Speedups & 1bpp code (C) 1996 Philip Blundell & Russell King. * * 10-04-96 RMK Various cleanups & reduced register usage. * 08-04-98 RMK Shifts re-ordered */ @ Regs: [] = corruptible @ {} = used @ () = do not use #include <linux/linkage.h> #include <asm/assembler.h> .text LC0: .word LC0 .word bytes_per_char_h .word video_size_row .word acorndata_8x8 .word con_charconvtable /* * r0 = ptr * r1 = char * r2 = white */ ENTRY(ll_write_char) stmfd sp!, {r4 - r7, lr} @ @ Smashable regs: {r0 - r3}, [r4 - r7], (r8 - fp), [ip], (sp), [lr], (pc) @ /* * calculate offset into character table */ mov r1, r1, lsl #3 /* * calculate offset required for each row. */ adr ip, LC0 ldmia ip, {r3, r4, r5, r6, lr} sub ip, ip, r3 add r6, r6, ip add lr, lr, ip ldr r4, [r4, ip] ldr r5, [r5, ip] /* * Go to resolution-dependent routine... */ cmp r4, #4 blt Lrow1bpp add r0, r0, r5, lsl #3 @ Move to bottom of character orr r1, r1, #7 ldrb r7, [r6, r1] teq r4, #8 beq Lrow8bpplp @ @ Smashable regs: {r0 - r3}, [r4], {r5 - r7}, (r8 - fp), [ip], (sp), {lr}, (pc) @ Lrow4bpplp: ldr r7, [lr, r7, lsl #2] mul r7, r2, r7 sub r1, r1, #1 @ avoid using r7 directly after str r7, [r0, -r5]! ldrb r7, [r6, r1] ldr r7, [lr, r7, lsl #2] mul r7, r2, r7 tst r1, #7 @ avoid using r7 directly after str r7, [r0, -r5]! subne r1, r1, #1 ldrbne r7, [r6, r1] bne Lrow4bpplp ldmfd sp!, {r4 - r7, pc} @ @ Smashable regs: {r0 - r3}, [r4], {r5 - r7}, (r8 - fp), [ip], (sp), {lr}, (pc) @ Lrow8bpplp: mov ip, r7, lsr #4 ldr ip, [lr, ip, lsl #2] mul r4, r2, ip and ip, r7, #15 @ avoid r4 ldr ip, [lr, ip, lsl #2] @ avoid r4 mul ip, r2, ip @ avoid r4 sub r1, r1, #1 @ avoid ip sub r0, r0, r5 @ avoid ip stmia r0, {r4, ip} ldrb r7, [r6, r1] mov ip, r7, lsr #4 ldr ip, [lr, ip, lsl #2] mul r4, r2, ip and ip, r7, #15 @ avoid r4 ldr ip, [lr, ip, lsl #2] @ avoid r4 mul ip, r2, ip @ avoid r4 tst r1, #7 @ avoid ip sub r0, r0, r5 @ avoid ip stmia r0, {r4, ip} subne r1, r1, #1 ldrbne r7, [r6, r1] bne Lrow8bpplp ldmfd sp!, {r4 - r7, pc} @ @ Smashable regs: {r0 - r3}, [r4], {r5, r6}, [r7], (r8 - fp), [ip], (sp), [lr], (pc) @ Lrow1bpp: add r6, r6, r1 ldmia r6, {r4, r7} strb r4, [r0], r5 mov r4, r4, lsr #8 strb r4, [r0], r5 mov r4, r4, lsr #8 strb r4, [r0], r5 mov r4, r4, lsr #8 strb r4, [r0], r5 strb r7, [r0], r5 mov r7, r7, lsr #8 strb r7, [r0], r5 mov r7, r7, lsr #8 strb r7, [r0], r5 mov r7, r7, lsr #8 strb r7, [r0], r5 ldmfd sp!, {r4 - r7, pc} .bss ENTRY(con_charconvtable) .space 1024
aixcc-public/challenge-001-exemplar-source
1,291
arch/arm/boot/compressed/head-sa1100.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/arch/arm/boot/compressed/head-sa1100.S * * Copyright (C) 1999 Nicolas Pitre <nico@fluxnic.net> * * SA1100 specific tweaks. This is merged into head.S by the linker. * */ #include <linux/linkage.h> #include <asm/mach-types.h> .section ".start", "ax" .arch armv4 __SA1100_start: @ Preserve r8/r7 i.e. kernel entry values #ifdef CONFIG_SA1100_COLLIE mov r7, #MACH_TYPE_COLLIE #endif #ifdef CONFIG_SA1100_SIMPAD @ UNTIL we've something like an open bootldr mov r7, #MACH_TYPE_SIMPAD @should be 87 #endif mrc p15, 0, r0, c1, c0, 0 @ read control reg ands r0, r0, #0x0d beq 99f @ Data cache might be active. @ Be sure to flush kernel binary out of the cache, @ whatever state it is, before it is turned off. @ This is done by fetching through currently executed @ memory to be sure we hit the same cache. bic r2, pc, #0x1f add r3, r2, #0x4000 @ 16 kb is quite enough... 1: ldr r0, [r2], #32 teq r2, r3 bne 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB mcr p15, 0, r0, c7, c7, 0 @ flush I & D caches @ disabling MMU and caches mrc p15, 0, r0, c1, c0, 0 @ read control reg bic r0, r0, #0x0d @ clear WB, DC, MMU bic r0, r0, #0x1000 @ clear Icache mcr p15, 0, r0, c1, c0, 0 99:
aixcc-public/challenge-001-exemplar-source
3,403
arch/arm/boot/compressed/vmlinux.lds.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2000 Russell King */ #include <asm/vmlinux.lds.h> #ifdef CONFIG_CPU_ENDIAN_BE8 #define ZIMAGE_MAGIC(x) ( (((x) >> 24) & 0x000000ff) | \ (((x) >> 8) & 0x0000ff00) | \ (((x) << 8) & 0x00ff0000) | \ (((x) << 24) & 0xff000000) ) #else #define ZIMAGE_MAGIC(x) (x) #endif OUTPUT_ARCH(arm) ENTRY(_start) SECTIONS { /DISCARD/ : { COMMON_DISCARDS *(.ARM.exidx*) *(.ARM.extab*) *(.note.*) *(.rel.*) *(.printk_index) /* * Discard any r/w data - this produces a link error if we have any, * which is required for PIC decompression. Local data generates * GOTOFF relocations, which prevents it being relocated independently * of the text/got segments. */ *(.data) } . = TEXT_START; _text = .; .text : { _start = .; *(.start) *(.text) *(.text.*) ARM_STUBS_TEXT } .table : ALIGN(4) { _table_start = .; LONG(ZIMAGE_MAGIC(6)) LONG(ZIMAGE_MAGIC(0x5a534c4b)) LONG(ZIMAGE_MAGIC(__piggy_size_addr - _start)) LONG(ZIMAGE_MAGIC(_kernel_bss_size)) LONG(ZIMAGE_MAGIC(TEXT_OFFSET)) LONG(ZIMAGE_MAGIC(MALLOC_SIZE)) LONG(0) _table_end = .; } .rodata : { *(.rodata) *(.rodata.*) *(.data.rel.ro) *(.data.rel.ro.*) } .piggydata : { *(.piggydata) __piggy_size_addr = . - 4; } . = ALIGN(4); _etext = .; .got.plt : { *(.got.plt) } #ifndef CONFIG_EFI_STUB _got_start = .; .got : { *(.got) } _got_end = .; #endif /* ensure the zImage file size is always a multiple of 64 bits */ /* (without a dummy byte, ld just ignores the empty section) */ .pad : { BYTE(0); . = ALIGN(8); } #ifdef CONFIG_EFI_STUB .data : ALIGN(4096) { __pecoff_data_start = .; _got_start = .; *(.got) _got_end = .; /* * The EFI stub always executes from RAM, and runs strictly before the * decompressor, so we can make an exception for its r/w data, and keep it */ *(.data.efistub .bss.efistub) __pecoff_data_end = .; /* * PE/COFF mandates a file size which is a multiple of 512 bytes if the * section size equals or exceeds 4 KB */ . = ALIGN(512); } __pecoff_data_rawsize = . - ADDR(.data); #endif _edata = .; /* * The image_end section appears after any additional loadable sections * that the linker may decide to insert in the binary image. Having * this symbol allows further debug in the near future. */ .image_end (NOLOAD) : { /* * EFI requires that the image is aligned to 512 bytes, and appended * DTB requires that we know where the end of the image is. Ensure * that both are satisfied by ensuring that there are no additional * sections emitted into the decompressor image. */ _edata_real = .; } _magic_sig = ZIMAGE_MAGIC(0x016f2818); _magic_start = ZIMAGE_MAGIC(_start); _magic_end = ZIMAGE_MAGIC(_edata); _magic_table = ZIMAGE_MAGIC(_table_start - _start); . = BSS_START; __bss_start = .; .bss : { *(.bss) } _end = .; . = ALIGN(8); /* the stack must be 64-bit aligned */ .stack : { *(.stack) } PROVIDE(__pecoff_data_size = ALIGN(512) - ADDR(.data)); PROVIDE(__pecoff_end = ALIGN(512)); STABS_DEBUG DWARF_DEBUG ARM_DETAILS ARM_ASSERTS } ASSERT(_edata_real == _edata, "error: zImage file size is incorrect");
aixcc-public/challenge-001-exemplar-source
3,603
arch/arm/boot/compressed/head-sharpsl.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/arch/arm/boot/compressed/head-sharpsl.S * * Copyright (C) 2004-2005 Richard Purdie <rpurdie@rpsys.net> * * Sharp's bootloader doesn't pass any kind of machine ID * so we have to figure out the machine for ourselves... * * Support for Poodle, Corgi (SL-C700), Shepherd (SL-C750) * Husky (SL-C760), Tosa (SL-C6000), Spitz (SL-C3000), * Akita (SL-C1000) and Borzoi (SL-C3100). * */ #include <linux/linkage.h> #include <asm/mach-types.h> #ifndef CONFIG_PXA_SHARPSL #error What am I doing here... #endif .section ".start", "ax" __SharpSL_start: /* Check for TC6393 - if found we have a Tosa */ ldr r7, .TOSAID mov r1, #0x10000000 @ Base address of TC6393 chip mov r6, #0x03 ldrh r3, [r1, #8] @ Load TC6393XB Revison: This is 0x0003 cmp r6, r3 beq .SHARPEND @ Success -> tosa /* Check for pxa270 - if found, branch */ mrc p15, 0, r4, c0, c0 @ Get Processor ID and r4, r4, #0xffffff00 ldr r3, .PXA270ID cmp r4, r3 beq .PXA270 /* Check for w100 - if not found we have a Poodle */ ldr r1, .W100ADDR @ Base address of w100 chip + regs offset mov r6, #0x31 @ Load Magic Init value str r6, [r1, #0x280] @ to SCRATCH_UMSK mov r5, #0x3000 .W100LOOP: subs r5, r5, #1 bne .W100LOOP mov r6, #0x30 @ Load 2nd Magic Init value str r6, [r1, #0x280] @ to SCRATCH_UMSK ldr r6, [r1, #0] @ Load Chip ID ldr r3, .W100ID ldr r7, .POODLEID cmp r6, r3 bne .SHARPEND @ We have no w100 - Poodle /* Check for pxa250 - if found we have a Corgi */ ldr r7, .CORGIID ldr r3, .PXA255ID cmp r4, r3 blo .SHARPEND @ We have a PXA250 - Corgi /* Check for 64MiB flash - if found we have a Shepherd */ bl get_flash_ids ldr r7, .SHEPHERDID cmp r3, #0x76 @ 64MiB flash beq .SHARPEND @ We have Shepherd /* Must be a Husky */ ldr r7, .HUSKYID @ Must be Husky b .SHARPEND .PXA270: /* Check for 16MiB flash - if found we have Spitz */ bl get_flash_ids ldr r7, .SPITZID cmp r3, #0x73 @ 16MiB flash beq .SHARPEND @ We have Spitz /* Check for a second SCOOP chip - if found we have Borzoi */ ldr r1, .SCOOP2ADDR ldr r7, .BORZOIID mov r6, #0x0140 strh r6, [r1] ldrh r6, [r1] cmp r6, #0x0140 beq .SHARPEND @ We have Borzoi /* Must be Akita */ ldr r7, .AKITAID b .SHARPEND @ We have Borzoi .PXA255ID: .word 0x69052d00 @ PXA255 Processor ID .PXA270ID: .word 0x69054100 @ PXA270 Processor ID .W100ID: .word 0x57411002 @ w100 Chip ID .W100ADDR: .word 0x08010000 @ w100 Chip ID Reg Address .SCOOP2ADDR: .word 0x08800040 .POODLEID: .word MACH_TYPE_POODLE .CORGIID: .word MACH_TYPE_CORGI .SHEPHERDID: .word MACH_TYPE_SHEPHERD .HUSKYID: .word MACH_TYPE_HUSKY .TOSAID: .word MACH_TYPE_TOSA .SPITZID: .word MACH_TYPE_SPITZ .AKITAID: .word MACH_TYPE_AKITA .BORZOIID: .word MACH_TYPE_BORZOI /* * Return: r2 - NAND Manufacturer ID * r3 - NAND Chip ID * Corrupts: r1 */ get_flash_ids: mov r1, #0x0c000000 @ Base address of NAND chip ldrb r3, [r1, #24] @ Load FLASHCTL bic r3, r3, #0x11 @ SET NCE orr r3, r3, #0x0a @ SET CLR + FLWP strb r3, [r1, #24] @ Save to FLASHCTL mov r2, #0x90 @ Command "readid" strb r2, [r1, #20] @ Save to FLASHIO bic r3, r3, #2 @ CLR CLE orr r3, r3, #4 @ SET ALE strb r3, [r1, #24] @ Save to FLASHCTL mov r2, #0 @ Address 0x00 strb r2, [r1, #20] @ Save to FLASHIO bic r3, r3, #4 @ CLR ALE strb r3, [r1, #24] @ Save to FLASHCTL .fids1: ldrb r3, [r1, #24] @ Load FLASHCTL tst r3, #32 @ Is chip ready? beq .fids1 ldrb r2, [r1, #20] @ NAND Manufacturer ID ldrb r3, [r1, #20] @ NAND Chip ID mov pc, lr .SHARPEND:
aixcc-public/challenge-001-exemplar-source
2,477
arch/arm/boot/bootp/init.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/boot/bootp/init.S * * Copyright (C) 2000-2003 Russell King. * * "Header" file for splitting kernel + initrd. Note that we pass * r0 through to r3 straight through. * * This demonstrates how to append code to the start of the kernel * zImage, and boot the kernel without copying it around. This * example would be simpler; if we didn't have an object of unknown * size immediately following the kernel, we could build this into * a binary blob, and concatenate the zImage using the cat command. */ .section .start, "ax" .type _start, #function .globl _start _start: add lr, pc, #-0x8 @ lr = current load addr adr r13, data ldmia r13!, {r4-r6} @ r5 = dest, r6 = length add r4, r4, lr @ r4 = initrd_start + load addr bl move @ move the initrd /* * Setup the initrd parameters to pass to the kernel. This can only be * passed in via the tagged list. */ ldmia r13, {r5-r9} @ get size and addr of initrd @ r5 = ATAG_CORE @ r6 = ATAG_INITRD2 @ r7 = initrd start @ r8 = initrd end @ r9 = param_struct address ldr r10, [r9, #4] @ get first tag teq r10, r5 @ is it ATAG_CORE? /* * If we didn't find a valid tag list, create a dummy ATAG_CORE entry. */ movne r10, #0 @ terminator movne r4, #2 @ Size of this entry (2 words) stmiane r9, {r4, r5, r10} @ Size, ATAG_CORE, terminator /* * find the end of the tag list, and then add an INITRD tag on the end. * If there is already an INITRD tag, then we ignore it; the last INITRD * tag takes precedence. */ taglist: ldr r10, [r9, #0] @ tag length teq r10, #0 @ last tag (zero length)? addne r9, r9, r10, lsl #2 bne taglist mov r5, #4 @ Size of initrd tag (4 words) stmia r9, {r5, r6, r7, r8, r10} b kernel_start @ call kernel /* * Move the block of memory length r6 from address r4 to address r5 */ move: ldmia r4!, {r7 - r10} @ move 32-bytes at a time stmia r5!, {r7 - r10} ldmia r4!, {r7 - r10} stmia r5!, {r7 - r10} subs r6, r6, #8 * 4 bcs move mov pc, lr .size _start, . - _start .align .type data,#object data: .word initrd_start @ source initrd address .word initrd_phys @ destination initrd address .word initrd_size @ initrd size .word 0x54410001 @ r5 = ATAG_CORE .word 0x54420005 @ r6 = ATAG_INITRD2 .word initrd_phys @ r7 .word initrd_size @ r8 .word params_phys @ r9 .size data, . - data
aixcc-public/challenge-001-exemplar-source
1,074
arch/arm/include/debug/renesas-scif.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Renesas SCIF(A) debugging macro include header * * Based on r8a7790.S * * Copyright (C) 2012-2013 Renesas Electronics Corporation * Copyright (C) 1994-1999 Russell King */ #define SCIF_PHYS CONFIG_DEBUG_UART_PHYS #define SCIF_VIRT ((SCIF_PHYS & 0x00ffffff) | 0xfd000000) #if defined(CONFIG_DEBUG_R7S9210_SCIF2) || defined(CONFIG_DEBUG_R7S9210_SCIF4) /* RZ/A2 SCIFA */ #define FTDR 0x06 #define FSR 0x08 #elif CONFIG_DEBUG_UART_PHYS < 0xe6e00000 /* SCIFA */ #define FTDR 0x20 #define FSR 0x14 #else /* SCIF */ #define FTDR 0x0c #define FSR 0x10 #endif #define TDFE (1 << 5) #define TEND (1 << 6) .macro addruart, rp, rv, tmp ldr \rp, =SCIF_PHYS ldr \rv, =SCIF_VIRT .endm .macro waituartcts,rd,rx .endm .macro waituarttxrdy, rd, rx 1001: ldrh \rd, [\rx, #FSR] tst \rd, #TDFE beq 1001b .endm .macro senduart, rd, rx strb \rd, [\rx, #FTDR] ldrh \rd, [\rx, #FSR] bic \rd, \rd, #TEND strh \rd, [\rx, #FSR] .endm .macro busyuart, rd, rx 1001: ldrh \rd, [\rx, #FSR] tst \rd, #TEND beq 1001b .endm
aixcc-public/challenge-001-exemplar-source
1,037
arch/arm/include/debug/msm.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * * Copyright (C) 2007 Google, Inc. * Copyright (c) 2011, Code Aurora Forum. All rights reserved. * Author: Brian Swetland <swetland@google.com> */ .macro addruart, rp, rv, tmp ldr \rp, =CONFIG_DEBUG_UART_PHYS ldr \rv, =CONFIG_DEBUG_UART_VIRT .endm .macro senduart, rd, rx ARM_BE8(rev \rd, \rd ) @ Write the 1 character to UARTDM_TF str \rd, [\rx, #0x70] .endm .macro waituartcts,rd,rx .endm .macro waituarttxrdy, rd, rx @ check for TX_EMT in UARTDM_SR ldr \rd, [\rx, #0x08] ARM_BE8(rev \rd, \rd ) tst \rd, #0x08 bne 1002f @ wait for TXREADY in UARTDM_ISR 1001: ldr \rd, [\rx, #0x14] ARM_BE8(rev \rd, \rd ) tst \rd, #0x80 beq 1001b 1002: @ Clear TX_READY by writing to the UARTDM_CR register mov \rd, #0x300 ARM_BE8(rev \rd, \rd ) str \rd, [\rx, #0x10] @ Write 0x1 to NCF register mov \rd, #0x1 ARM_BE8(rev \rd, \rd ) str \rd, [\rx, #0x40] @ UARTDM reg. Read to induce delay ldr \rd, [\rx, #0x08] .endm .macro busyuart, rd, rx .endm
aixcc-public/challenge-001-exemplar-source
1,681
arch/arm/include/debug/sa1100.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* arch/arm/include/debug/sa1100.S * * Debugging macro include header * * Copyright (C) 1994-1999 Russell King * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks */ #define UTCR3 0x0c #define UTDR 0x14 #define UTSR1 0x20 #define UTCR3_TXE 0x00000002 /* Transmit Enable */ #define UTSR1_TBY 0x00000001 /* Transmitter BusY (read) */ #define UTSR1_TNF 0x00000004 /* Transmit FIFO Not Full (read) */ .macro addruart, rp, rv, tmp mrc p15, 0, \rp, c1, c0 tst \rp, #1 @ MMU enabled? moveq \rp, #0x80000000 @ physical base address movne \rp, #0xf8000000 @ virtual address @ We probe for the active serial port here, coherently with @ the comment in arch/arm/mach-sa1100/include/mach/uncompress.h. @ We assume r1 can be clobbered. @ see if Ser3 is active add \rp, \rp, #0x00050000 ldr \rv, [\rp, #UTCR3] tst \rv, #UTCR3_TXE @ if Ser3 is inactive, then try Ser1 addeq \rp, \rp, #(0x00010000 - 0x00050000) ldreq \rv, [\rp, #UTCR3] tsteq \rv, #UTCR3_TXE @ if Ser1 is inactive, then try Ser2 addeq \rp, \rp, #(0x00030000 - 0x00010000) ldreq \rv, [\rp, #UTCR3] tsteq \rv, #UTCR3_TXE @ clear top bits, and generate both phys and virt addresses lsl \rp, \rp, #8 lsr \rp, \rp, #8 orr \rv, \rp, #0xf8000000 @ virtual orr \rp, \rp, #0x80000000 @ physical .endm .macro senduart,rd,rx str \rd, [\rx, #UTDR] .endm .macro waituartcts,rd,rx .endm .macro waituarttxrdy,rd,rx 1001: ldr \rd, [\rx, #UTSR1] tst \rd, #UTSR1_TNF beq 1001b .endm .macro busyuart,rd,rx 1001: ldr \rd, [\rx, #UTSR1] tst \rd, #UTSR1_TBY bne 1001b .endm
aixcc-public/challenge-001-exemplar-source
1,484
arch/arm/include/debug/icedcc.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * arch/arm/include/debug/icedcc.S * * Copyright (C) 1994-1999 Russell King */ @@ debug using ARM EmbeddedICE DCC channel .macro addruart, rp, rv, tmp .endm #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7) .macro senduart, rd, rx mcr p14, 0, \rd, c0, c5, 0 .endm .macro busyuart, rd, rx 1001: mrc p14, 0, \rx, c0, c1, 0 tst \rx, #0x20000000 beq 1001b .endm .macro waituartcts, rd, rx .endm .macro waituarttxrdy, rd, rx mov \rd, #0x2000000 1001: subs \rd, \rd, #1 bmi 1002f mrc p14, 0, \rx, c0, c1, 0 tst \rx, #0x20000000 bne 1001b 1002: .endm #elif defined(CONFIG_CPU_XSCALE) .macro senduart, rd, rx mcr p14, 0, \rd, c8, c0, 0 .endm .macro busyuart, rd, rx 1001: mrc p14, 0, \rx, c14, c0, 0 tst \rx, #0x10000000 beq 1001b .endm .macro waituartcts, rd, rx .endm .macro waituarttxrdy, rd, rx mov \rd, #0x10000000 1001: subs \rd, \rd, #1 bmi 1002f mrc p14, 0, \rx, c14, c0, 0 tst \rx, #0x10000000 bne 1001b 1002: .endm #else .macro senduart, rd, rx mcr p14, 0, \rd, c1, c0, 0 .endm .macro busyuart, rd, rx 1001: mrc p14, 0, \rx, c0, c0, 0 tst \rx, #2 beq 1001b .endm .macro waituartcts, rd, rx .endm .macro waituarttxrdy, rd, rx mov \rd, #0x2000000 1001: subs \rd, \rd, #1 bmi 1002f mrc p14, 0, \rx, c0, c0, 0 tst \rx, #2 bne 1001b 1002: .endm #endif /* CONFIG_CPU_V6 */
aixcc-public/challenge-001-exemplar-source
1,070
arch/arm/include/debug/s3c24xx.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* arch/arm/mach-s3c2410/include/mach/debug-macro.S * * Debugging macro include header * * Copyright (C) 1994-1999 Russell King * Copyright (C) 2005 Simtec Electronics * * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks */ #include <linux/serial_s3c.h> #define S3C2410_UART1_OFF (0x4000) .macro addruart, rp, rv, tmp ldr \rp, = CONFIG_DEBUG_UART_PHYS ldr \rv, = CONFIG_DEBUG_UART_VIRT .endm .macro fifo_full_s3c2410 rd, rx ldr \rd, [\rx, # S3C2410_UFSTAT] tst \rd, #S3C2410_UFSTAT_TXFULL .endm .macro fifo_level_s3c2410 rd, rx ldr \rd, [\rx, # S3C2410_UFSTAT] and \rd, \rd, #S3C2410_UFSTAT_TXMASK .endm /* Select the correct implementation depending on the configuration. The * S3C2440 will get selected by default, as these are the most widely * used variants of these */ #if defined(CONFIG_DEBUG_S3C2410_UART) #define fifo_full fifo_full_s3c2410 #define fifo_level fifo_level_s3c2410 #endif /* include the reset of the code which will do the work */ #include <debug/samsung.S>
aixcc-public/challenge-001-exemplar-source
1,277
arch/arm/include/debug/zynq.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Debugging macro include header * * Copyright (C) 2011 Xilinx */ #define UART_CR_OFFSET 0x00 /* Control Register [8:0] */ #define UART_SR_OFFSET 0x2C /* Channel Status [11:0] */ #define UART_FIFO_OFFSET 0x30 /* FIFO [15:0] or [7:0] */ #define UART_SR_TXFULL 0x00000010 /* TX FIFO full */ #define UART_SR_TXEMPTY 0x00000008 /* TX FIFO empty */ #define UART0_PHYS 0xE0000000 #define UART0_VIRT 0xF0800000 #define UART1_PHYS 0xE0001000 #define UART1_VIRT 0xF0801000 #if IS_ENABLED(CONFIG_DEBUG_ZYNQ_UART1) # define LL_UART_PADDR UART1_PHYS # define LL_UART_VADDR UART1_VIRT #else # define LL_UART_PADDR UART0_PHYS # define LL_UART_VADDR UART0_VIRT #endif .macro addruart, rp, rv, tmp ldr \rp, =LL_UART_PADDR @ physical ldr \rv, =LL_UART_VADDR @ virtual .endm .macro senduart,rd,rx strb \rd, [\rx, #UART_FIFO_OFFSET] @ TXDATA .endm .macro waituartcts,rd,rx .endm .macro waituarttxrdy,rd,rx 1001: ldr \rd, [\rx, #UART_SR_OFFSET] ARM_BE8( rev \rd, \rd ) tst \rd, #UART_SR_TXEMPTY beq 1001b .endm .macro busyuart,rd,rx 1002: ldr \rd, [\rx, #UART_SR_OFFSET] @ get status register ARM_BE8( rev \rd, \rd ) tst \rd, #UART_SR_TXFULL @ bne 1002b @ wait if FIFO is full .endm
aixcc-public/challenge-001-exemplar-source
1,903
arch/arm/include/debug/samsung.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright 2005, 2007 Simtec Electronics * http://armlinux.simtec.co.uk/ * Ben Dooks <ben@simtec.co.uk> */ #include <linux/serial_s3c.h> /* The S5PV210/S5PC110 implementations are as belows. */ .macro fifo_level_s5pv210 rd, rx ldr \rd, [\rx, # S3C2410_UFSTAT] ARM_BE8(rev \rd, \rd) and \rd, \rd, #S5PV210_UFSTAT_TXMASK .endm .macro fifo_full_s5pv210 rd, rx ldr \rd, [\rx, # S3C2410_UFSTAT] ARM_BE8(rev \rd, \rd) tst \rd, #S5PV210_UFSTAT_TXFULL .endm /* The S3C2440 implementations are used by default as they are the * most widely re-used */ .macro fifo_level_s3c2440 rd, rx ldr \rd, [\rx, # S3C2410_UFSTAT] ARM_BE8(rev \rd, \rd) and \rd, \rd, #S3C2440_UFSTAT_TXMASK .endm #ifndef fifo_level #define fifo_level fifo_level_s3c2440 #endif .macro fifo_full_s3c2440 rd, rx ldr \rd, [\rx, # S3C2410_UFSTAT] ARM_BE8(rev \rd, \rd) tst \rd, #S3C2440_UFSTAT_TXFULL .endm #ifndef fifo_full #define fifo_full fifo_full_s3c2440 #endif .macro senduart,rd,rx strb \rd, [\rx, # S3C2410_UTXH] .endm .macro busyuart, rd, rx ldr \rd, [\rx, # S3C2410_UFCON] ARM_BE8(rev \rd, \rd) tst \rd, #S3C2410_UFCON_FIFOMODE @ fifo enabled? beq 1001f @ @ FIFO enabled... 1003: fifo_full \rd, \rx bne 1003b b 1002f 1001: @ busy waiting for non fifo ldr \rd, [\rx, # S3C2410_UTRSTAT] ARM_BE8(rev \rd, \rd) tst \rd, #S3C2410_UTRSTAT_TXFE beq 1001b 1002: @ exit busyuart .endm .macro waituartcts,rd,rx .endm .macro waituarttxrdy,rd,rx ldr \rd, [\rx, # S3C2410_UFCON] ARM_BE8(rev \rd, \rd) tst \rd, #S3C2410_UFCON_FIFOMODE @ fifo enabled? beq 1001f @ @ FIFO enabled... 1003: fifo_level \rd, \rx teq \rd, #0 bne 1003b b 1002f 1001: @ idle waiting for non fifo ldr \rd, [\rx, # S3C2410_UTRSTAT] ARM_BE8(rev \rd, \rd) tst \rd, #S3C2410_UTRSTAT_TXFE beq 1001b 1002: @ exit busyuart .endm
aixcc-public/challenge-001-exemplar-source
1,068
arch/arm/include/debug/8250.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * arch/arm/include/debug/8250.S * * Copyright (C) 1994-2013 Russell King */ #include <linux/serial_reg.h> .macro addruart, rp, rv, tmp ldr \rp, =CONFIG_DEBUG_UART_PHYS ldr \rv, =CONFIG_DEBUG_UART_VIRT .endm #ifdef CONFIG_DEBUG_UART_8250_WORD .macro store, rd, rx:vararg ARM_BE8(rev \rd, \rd) str \rd, \rx ARM_BE8(rev \rd, \rd) .endm .macro load, rd, rx:vararg ldr \rd, \rx ARM_BE8(rev \rd, \rd) .endm #else .macro store, rd, rx:vararg strb \rd, \rx .endm .macro load, rd, rx:vararg ldrb \rd, \rx .endm #endif #define UART_SHIFT CONFIG_DEBUG_UART_8250_SHIFT .macro senduart,rd,rx store \rd, [\rx, #UART_TX << UART_SHIFT] .endm .macro busyuart,rd,rx 1002: load \rd, [\rx, #UART_LSR << UART_SHIFT] and \rd, \rd, #UART_LSR_TEMT | UART_LSR_THRE teq \rd, #UART_LSR_TEMT | UART_LSR_THRE bne 1002b .endm .macro waituarttxrdy,rd,rx .endm .macro waituartcts,rd,rx 1001: load \rd, [\rx, #UART_MSR << UART_SHIFT] tst \rd, #UART_MSR_CTS beq 1001b .endm
aixcc-public/challenge-001-exemplar-source
5,550
arch/arm/include/debug/brcmstb.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* Copyright (C) 2016 Broadcom */ #include <linux/serial_reg.h> #include <asm/cputype.h> /* Physical register offset and virtual register offset */ #define REG_PHYS_BASE 0xf0000000 #define REG_PHYS_BASE_V7 0x08000000 #define REG_VIRT_BASE 0xfc000000 #define REG_PHYS_ADDR(x) ((x) + REG_PHYS_BASE) #define REG_PHYS_ADDR_V7(x) ((x) + REG_PHYS_BASE_V7) /* Product id can be read from here */ #define SUN_TOP_CTRL_BASE REG_PHYS_ADDR(0x404000) #define SUN_TOP_CTRL_BASE_V7 REG_PHYS_ADDR_V7(0x404000) #define UARTA_3390 REG_PHYS_ADDR(0x40a900) #define UARTA_72116 UARTA_7255 #define UARTA_7250 REG_PHYS_ADDR(0x40b400) #define UARTA_7255 REG_PHYS_ADDR(0x40c000) #define UARTA_7260 UARTA_7255 #define UARTA_7268 UARTA_7255 #define UARTA_7271 UARTA_7268 #define UARTA_7278 REG_PHYS_ADDR_V7(0x40c000) #define UARTA_7216 UARTA_7278 #define UARTA_72164 UARTA_7278 #define UARTA_72165 UARTA_7278 #define UARTA_7364 REG_PHYS_ADDR(0x40b000) #define UARTA_7366 UARTA_7364 #define UARTA_74371 REG_PHYS_ADDR(0x406b00) #define UARTA_7439 REG_PHYS_ADDR(0x40a900) #define UARTA_7445 REG_PHYS_ADDR(0x40ab00) #define UART_SHIFT 2 #define checkuart(rp, rv, family_id, family) \ /* Load family id */ \ ldr rp, =family_id ; \ /* Compare SUN_TOP_CTRL value against it */ \ cmp rp, rv ; \ /* Passed test, load address */ \ ldreq rp, =UARTA_##family ; \ /* Jump to save UART address */ \ beq 91f .macro addruart, rp, rv, tmp adr \rp, 99f @ actual addr of 99f ldr \rv, [\rp] @ linked addr is stored there sub \rv, \rv, \rp @ offset between the two ldr \rp, [\rp, #4] @ linked brcmstb_uart_config sub \tmp, \rp, \rv @ actual brcmstb_uart_config ldr \rp, [\tmp] @ Load brcmstb_uart_config cmp \rp, #1 @ needs initialization? bne 100f @ no; go load the addresses mov \rv, #0 @ yes; record init is done str \rv, [\tmp] /* Check for V7 memory map if B53 */ mrc p15, 0, \rv, c0, c0, 0 @ get Main ID register ldr \rp, =ARM_CPU_PART_MASK and \rv, \rv, \rp ldr \rp, =ARM_CPU_PART_BRAHMA_B53 @ check for B53 CPU cmp \rv, \rp bne 10f /* if PERIPHBASE doesn't overlap REG_PHYS_BASE use V7 map */ mrc p15, 1, \rv, c15, c3, 0 @ get PERIPHBASE from CBAR ands \rv, \rv, #REG_PHYS_BASE ldreq \rp, =SUN_TOP_CTRL_BASE_V7 /* Check SUN_TOP_CTRL base */ 10: ldrne \rp, =SUN_TOP_CTRL_BASE @ load SUN_TOP_CTRL PA ldr \rv, [\rp, #0] @ get register contents ARM_BE8( rev \rv, \rv ) and \rv, \rv, #0xffffff00 @ strip revision bits [7:0] /* Chip specific detection starts here */ 20: checkuart(\rp, \rv, 0x33900000, 3390) 21: checkuart(\rp, \rv, 0x07211600, 72116) 22: checkuart(\rp, \rv, 0x72160000, 7216) 23: checkuart(\rp, \rv, 0x07216400, 72164) 24: checkuart(\rp, \rv, 0x07216500, 72165) 25: checkuart(\rp, \rv, 0x72500000, 7250) 26: checkuart(\rp, \rv, 0x72550000, 7255) 27: checkuart(\rp, \rv, 0x72600000, 7260) 28: checkuart(\rp, \rv, 0x72680000, 7268) 29: checkuart(\rp, \rv, 0x72710000, 7271) 30: checkuart(\rp, \rv, 0x72780000, 7278) 31: checkuart(\rp, \rv, 0x73640000, 7364) 32: checkuart(\rp, \rv, 0x73660000, 7366) 33: checkuart(\rp, \rv, 0x07437100, 74371) 34: checkuart(\rp, \rv, 0x74390000, 7439) 35: checkuart(\rp, \rv, 0x74450000, 7445) /* No valid UART found */ 90: mov \rp, #0 /* fall through */ /* Record whichever UART we chose */ 91: str \rp, [\tmp, #4] @ Store in brcmstb_uart_phys cmp \rp, #0 @ Valid UART address? bne 92f @ Yes, go process it str \rp, [\tmp, #8] @ Store 0 in brcmstb_uart_virt b 100f @ Done 92: and \rv, \rp, #0xffffff @ offset within 16MB section add \rv, \rv, #REG_VIRT_BASE str \rv, [\tmp, #8] @ Store in brcmstb_uart_virt b 100f .align 99: .word . .word brcmstb_uart_config .ltorg /* Load previously selected UART address */ 100: ldr \rp, [\tmp, #4] @ Load brcmstb_uart_phys ldr \rv, [\tmp, #8] @ Load brcmstb_uart_virt .endm .macro store, rd, rx:vararg ARM_BE8( rev \rd, \rd ) str \rd, \rx .endm .macro load, rd, rx:vararg ldr \rd, \rx ARM_BE8( rev \rd, \rd ) .endm .macro senduart,rd,rx store \rd, [\rx, #UART_TX << UART_SHIFT] .endm .macro busyuart,rd,rx 1002: load \rd, [\rx, #UART_LSR << UART_SHIFT] and \rd, \rd, #UART_LSR_TEMT | UART_LSR_THRE teq \rd, #UART_LSR_TEMT | UART_LSR_THRE bne 1002b .endm .macro waituarttxrdy,rd,rx .endm .macro waituartcts,rd,rx .endm /* * Storage for the state maintained by the macros above. * * In the kernel proper, this data is located in arch/arm/mach-bcm/brcmstb.c. * That's because this header is included from multiple files, and we only * want a single copy of the data. In particular, the UART probing code above * assumes it's running using physical addresses. This is true when this file * is included from head.o, but not when included from debug.o. So we need * to share the probe results between the two copies, rather than having * to re-run the probing again later. * * In the decompressor, we put the symbol/storage right here, since common.c * isn't included in the decompressor build. This symbol gets put in .text * even though it's really data, since .data is discarded from the * decompressor. Luckily, .text is writeable in the decompressor, unless * CONFIG_ZBOOT_ROM. That dependency is handled in arch/arm/Kconfig.debug. */ #if defined(ZIMAGE) brcmstb_uart_config: /* Debug UART initialization required */ .word 1 /* Debug UART physical address */ .word 0 /* Debug UART virtual address */ .word 0 #endif