repo_id
stringlengths 5
115
| size
int64 590
5.01M
| file_path
stringlengths 4
212
| content
stringlengths 590
5.01M
|
|---|---|---|---|
AirFortressIlikara/LS2K0300-linux-4.19
| 14,306
|
arch/arm/kernel/head-nommu.S
|
/*
* linux/arch/arm/kernel/head-nommu.S
*
* Copyright (C) 1994-2002 Russell King
* Copyright (C) 2003-2006 Hyok S. Choi
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Common kernel startup code (non-paged MM)
*
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <asm/assembler.h>
#include <asm/ptrace.h>
#include <asm/asm-offsets.h>
#include <asm/memory.h>
#include <asm/cp15.h>
#include <asm/thread_info.h>
#include <asm/v7m.h>
#include <asm/mpu.h>
#include <asm/page.h>
/*
* Kernel startup entry point.
* ---------------------------
*
* This is normally called from the decompressor code. The requirements
* are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0,
* r1 = machine nr.
*
* See linux/arch/arm/tools/mach-types for the complete list of machine
* numbers for r1.
*
*/
__HEAD
#ifdef CONFIG_CPU_THUMBONLY
.thumb
ENTRY(stext)
#else
.arm
ENTRY(stext)
THUMB( badr r9, 1f ) @ Kernel is always entered in ARM.
THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
THUMB( .thumb ) @ switch to Thumb now.
THUMB(1: )
#endif
#ifdef CONFIG_ARM_VIRT_EXT
bl __hyp_stub_install
#endif
@ ensure svc mode and all interrupts masked
safe_svcmode_maskall r9
@ and irqs disabled
#if defined(CONFIG_CPU_CP15)
mrc p15, 0, r9, c0, c0 @ get processor id
#elif defined(CONFIG_CPU_V7M)
ldr r9, =BASEADDR_V7M_SCB
ldr r9, [r9, V7M_SCB_CPUID]
#else
ldr r9, =CONFIG_PROCESSOR_ID
#endif
bl __lookup_processor_type @ r5=procinfo r9=cpuid
movs r10, r5 @ invalid processor (r5=0)?
beq __error_p @ yes, error 'p'
#ifdef CONFIG_ARM_MPU
bl __setup_mpu
#endif
badr lr, 1f @ return (PIC) address
ldr r12, [r10, #PROCINFO_INITFUNC]
add r12, r12, r10
ret r12
1: ldr lr, =__mmap_switched
b __after_proc_init
ENDPROC(stext)
#ifdef CONFIG_SMP
.text
ENTRY(secondary_startup)
/*
* Common entry point for secondary CPUs.
*
* Ensure that we're in SVC mode, and IRQs are disabled. Lookup
* the processor type - there is no need to check the machine type
* as it has already been validated by the primary processor.
*/
#ifdef CONFIG_ARM_VIRT_EXT
bl __hyp_stub_install_secondary
#endif
safe_svcmode_maskall r9
#ifndef CONFIG_CPU_CP15
ldr r9, =CONFIG_PROCESSOR_ID
#else
mrc p15, 0, r9, c0, c0 @ get processor id
#endif
bl __lookup_processor_type @ r5=procinfo r9=cpuid
movs r10, r5 @ invalid processor?
beq __error_p @ yes, error 'p'
ldr r7, __secondary_data
#ifdef CONFIG_ARM_MPU
bl __secondary_setup_mpu @ Initialize the MPU
#endif
badr lr, 1f @ return (PIC) address
ldr r12, [r10, #PROCINFO_INITFUNC]
add r12, r12, r10
ret r12
1: bl __after_proc_init
ldr sp, [r7, #12] @ set up the stack pointer
mov fp, #0
b secondary_start_kernel
ENDPROC(secondary_startup)
.type __secondary_data, %object
__secondary_data:
.long secondary_data
#endif /* CONFIG_SMP */
/*
* Set the Control Register and Read the process ID.
*/
.text
__after_proc_init:
M_CLASS(movw r12, #:lower16:BASEADDR_V7M_SCB)
M_CLASS(movt r12, #:upper16:BASEADDR_V7M_SCB)
#ifdef CONFIG_ARM_MPU
M_CLASS(ldr r3, [r12, 0x50])
AR_CLASS(mrc p15, 0, r3, c0, c1, 4) @ Read ID_MMFR0
and r3, r3, #(MMFR0_PMSA) @ PMSA field
teq r3, #(MMFR0_PMSAv7) @ PMSA v7
beq 1f
teq r3, #(MMFR0_PMSAv8) @ PMSA v8
/*
* Memory region attributes for PMSAv8:
*
* n = AttrIndx[2:0]
* n MAIR
* DEVICE_nGnRnE 000 00000000
* NORMAL 001 11111111
*/
ldreq r3, =PMSAv8_MAIR(0x00, PMSAv8_RGN_DEVICE_nGnRnE) | \
PMSAv8_MAIR(0xff, PMSAv8_RGN_NORMAL)
AR_CLASS(mcreq p15, 0, r3, c10, c2, 0) @ MAIR 0
M_CLASS(streq r3, [r12, #PMSAv8_MAIR0])
moveq r3, #0
AR_CLASS(mcreq p15, 0, r3, c10, c2, 1) @ MAIR 1
M_CLASS(streq r3, [r12, #PMSAv8_MAIR1])
1:
#endif
#ifdef CONFIG_CPU_CP15
/*
* CP15 system control register value returned in r0 from
* the CPU init function.
*/
#ifdef CONFIG_ARM_MPU
biceq r0, r0, #CR_BR @ Disable the 'default mem-map'
orreq r0, r0, #CR_M @ Set SCTRL.M (MPU on)
#endif
#if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6
orr r0, r0, #CR_A
#else
bic r0, r0, #CR_A
#endif
#ifdef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CR_C
#endif
#ifdef CONFIG_CPU_BPREDICT_DISABLE
bic r0, r0, #CR_Z
#endif
#ifdef CONFIG_CPU_ICACHE_DISABLE
bic r0, r0, #CR_I
#endif
mcr p15, 0, r0, c1, c0, 0 @ write control reg
instr_sync
#elif defined (CONFIG_CPU_V7M)
#ifdef CONFIG_ARM_MPU
ldreq r3, [r12, MPU_CTRL]
biceq r3, #MPU_CTRL_PRIVDEFENA
orreq r3, #MPU_CTRL_ENABLE
streq r3, [r12, MPU_CTRL]
isb
#endif
/* For V7M systems we want to modify the CCR similarly to the SCTLR */
#ifdef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #V7M_SCB_CCR_DC
#endif
#ifdef CONFIG_CPU_BPREDICT_DISABLE
bic r0, r0, #V7M_SCB_CCR_BP
#endif
#ifdef CONFIG_CPU_ICACHE_DISABLE
bic r0, r0, #V7M_SCB_CCR_IC
#endif
str r0, [r12, V7M_SCB_CCR]
/* Pass exc_ret to __mmap_switched */
mov r0, r10
#endif /* CONFIG_CPU_CP15 elif CONFIG_CPU_V7M */
ret lr
ENDPROC(__after_proc_init)
.ltorg
#ifdef CONFIG_ARM_MPU
#ifndef CONFIG_CPU_V7M
/* Set which MPU region should be programmed */
.macro set_region_nr tmp, rgnr, unused
mov \tmp, \rgnr @ Use static region numbers
mcr p15, 0, \tmp, c6, c2, 0 @ Write RGNR
.endm
/* Setup a single MPU region, either D or I side (D-side for unified) */
.macro setup_region bar, acr, sr, side = PMSAv7_DATA_SIDE, unused
mcr p15, 0, \bar, c6, c1, (0 + \side) @ I/DRBAR
mcr p15, 0, \acr, c6, c1, (4 + \side) @ I/DRACR
mcr p15, 0, \sr, c6, c1, (2 + \side) @ I/DRSR
.endm
#else
.macro set_region_nr tmp, rgnr, base
mov \tmp, \rgnr
str \tmp, [\base, #PMSAv7_RNR]
.endm
.macro setup_region bar, acr, sr, unused, base
lsl \acr, \acr, #16
orr \acr, \acr, \sr
str \bar, [\base, #PMSAv7_RBAR]
str \acr, [\base, #PMSAv7_RASR]
.endm
#endif
/*
* Setup the MPU and initial MPU Regions. We create the following regions:
* Region 0: Use this for probing the MPU details, so leave disabled.
* Region 1: Background region - covers the whole of RAM as strongly ordered
* Region 2: Normal, Shared, cacheable for RAM. From PHYS_OFFSET, size from r6
* Region 3: Normal, shared, inaccessible from PL0 to protect the vectors page
*
* r6: Value to be written to DRSR (and IRSR if required) for PMSAv7_RAM_REGION
*/
__HEAD
ENTRY(__setup_mpu)
/* Probe for v7 PMSA compliance */
M_CLASS(movw r12, #:lower16:BASEADDR_V7M_SCB)
M_CLASS(movt r12, #:upper16:BASEADDR_V7M_SCB)
AR_CLASS(mrc p15, 0, r0, c0, c1, 4) @ Read ID_MMFR0
M_CLASS(ldr r0, [r12, 0x50])
and r0, r0, #(MMFR0_PMSA) @ PMSA field
teq r0, #(MMFR0_PMSAv7) @ PMSA v7
beq __setup_pmsa_v7
teq r0, #(MMFR0_PMSAv8) @ PMSA v8
beq __setup_pmsa_v8
ret lr
ENDPROC(__setup_mpu)
ENTRY(__setup_pmsa_v7)
/* Calculate the size of a region covering just the kernel */
ldr r5, =PLAT_PHYS_OFFSET @ Region start: PHYS_OFFSET
ldr r6, =(_end) @ Cover whole kernel
sub r6, r6, r5 @ Minimum size of region to map
clz r6, r6 @ Region size must be 2^N...
rsb r6, r6, #31 @ ...so round up region size
lsl r6, r6, #PMSAv7_RSR_SZ @ Put size in right field
orr r6, r6, #(1 << PMSAv7_RSR_EN) @ Set region enabled bit
/* Determine whether the D/I-side memory map is unified. We set the
* flags here and continue to use them for the rest of this function */
AR_CLASS(mrc p15, 0, r0, c0, c0, 4) @ MPUIR
M_CLASS(ldr r0, [r12, #MPU_TYPE])
ands r5, r0, #MPUIR_DREGION_SZMASK @ 0 size d region => No MPU
bxeq lr
tst r0, #MPUIR_nU @ MPUIR_nU = 0 for unified
/* Setup second region first to free up r6 */
set_region_nr r0, #PMSAv7_RAM_REGION, r12
isb
/* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */
ldr r0, =PLAT_PHYS_OFFSET @ RAM starts at PHYS_OFFSET
ldr r5,=(PMSAv7_AP_PL1RW_PL0RW | PMSAv7_RGN_NORMAL)
setup_region r0, r5, r6, PMSAv7_DATA_SIDE, r12 @ PHYS_OFFSET, shared, enabled
beq 1f @ Memory-map not unified
setup_region r0, r5, r6, PMSAv7_INSTR_SIDE, r12 @ PHYS_OFFSET, shared, enabled
1: isb
/* First/background region */
set_region_nr r0, #PMSAv7_BG_REGION, r12
isb
/* Execute Never, strongly ordered, inaccessible to PL0, rw PL1 */
mov r0, #0 @ BG region starts at 0x0
ldr r5,=(PMSAv7_ACR_XN | PMSAv7_RGN_STRONGLY_ORDERED | PMSAv7_AP_PL1RW_PL0NA)
mov r6, #PMSAv7_RSR_ALL_MEM @ 4GB region, enabled
setup_region r0, r5, r6, PMSAv7_DATA_SIDE, r12 @ 0x0, BG region, enabled
beq 2f @ Memory-map not unified
setup_region r0, r5, r6, PMSAv7_INSTR_SIDE r12 @ 0x0, BG region, enabled
2: isb
#ifdef CONFIG_XIP_KERNEL
set_region_nr r0, #PMSAv7_ROM_REGION, r12
isb
ldr r5,=(PMSAv7_AP_PL1RO_PL0NA | PMSAv7_RGN_NORMAL)
ldr r0, =CONFIG_XIP_PHYS_ADDR @ ROM start
ldr r6, =(_exiprom) @ ROM end
sub r6, r6, r0 @ Minimum size of region to map
clz r6, r6 @ Region size must be 2^N...
rsb r6, r6, #31 @ ...so round up region size
lsl r6, r6, #PMSAv7_RSR_SZ @ Put size in right field
orr r6, r6, #(1 << PMSAv7_RSR_EN) @ Set region enabled bit
setup_region r0, r5, r6, PMSAv7_DATA_SIDE, r12 @ XIP_PHYS_ADDR, shared, enabled
beq 3f @ Memory-map not unified
setup_region r0, r5, r6, PMSAv7_INSTR_SIDE, r12 @ XIP_PHYS_ADDR, shared, enabled
3: isb
#endif
ret lr
ENDPROC(__setup_pmsa_v7)
ENTRY(__setup_pmsa_v8)
mov r0, #0
AR_CLASS(mcr p15, 0, r0, c6, c2, 1) @ PRSEL
M_CLASS(str r0, [r12, #PMSAv8_RNR])
isb
#ifdef CONFIG_XIP_KERNEL
ldr r5, =CONFIG_XIP_PHYS_ADDR @ ROM start
ldr r6, =(_exiprom) @ ROM end
sub r6, r6, #1
bic r6, r6, #(PMSAv8_MINALIGN - 1)
orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED)
orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN)
AR_CLASS(mcr p15, 0, r5, c6, c8, 0) @ PRBAR0
AR_CLASS(mcr p15, 0, r6, c6, c8, 1) @ PRLAR0
M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(0)])
M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(0)])
#endif
ldr r5, =KERNEL_START
ldr r6, =KERNEL_END
sub r6, r6, #1
bic r6, r6, #(PMSAv8_MINALIGN - 1)
orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED)
orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN)
AR_CLASS(mcr p15, 0, r5, c6, c8, 4) @ PRBAR1
AR_CLASS(mcr p15, 0, r6, c6, c8, 5) @ PRLAR1
M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(1)])
M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(1)])
/* Setup Background: 0x0 - min(KERNEL_START, XIP_PHYS_ADDR) */
#ifdef CONFIG_XIP_KERNEL
ldr r6, =KERNEL_START
ldr r5, =CONFIG_XIP_PHYS_ADDR
cmp r6, r5
movcs r6, r5
#else
ldr r6, =KERNEL_START
#endif
cmp r6, #0
beq 1f
mov r5, #0
sub r6, r6, #1
bic r6, r6, #(PMSAv8_MINALIGN - 1)
orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN)
orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN)
AR_CLASS(mcr p15, 0, r5, c6, c9, 0) @ PRBAR2
AR_CLASS(mcr p15, 0, r6, c6, c9, 1) @ PRLAR2
M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(2)])
M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(2)])
1:
/* Setup Background: max(KERNEL_END, _exiprom) - 0xffffffff */
#ifdef CONFIG_XIP_KERNEL
ldr r5, =KERNEL_END
ldr r6, =(_exiprom)
cmp r5, r6
movcc r5, r6
#else
ldr r5, =KERNEL_END
#endif
mov r6, #0xffffffff
bic r6, r6, #(PMSAv8_MINALIGN - 1)
orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN)
orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN)
AR_CLASS(mcr p15, 0, r5, c6, c9, 4) @ PRBAR3
AR_CLASS(mcr p15, 0, r6, c6, c9, 5) @ PRLAR3
M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(3)])
M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(3)])
#ifdef CONFIG_XIP_KERNEL
/* Setup Background: min(_exiprom, KERNEL_END) - max(KERNEL_START, XIP_PHYS_ADDR) */
ldr r5, =(_exiprom)
ldr r6, =KERNEL_END
cmp r5, r6
movcs r5, r6
ldr r6, =KERNEL_START
ldr r0, =CONFIG_XIP_PHYS_ADDR
cmp r6, r0
movcc r6, r0
sub r6, r6, #1
bic r6, r6, #(PMSAv8_MINALIGN - 1)
orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN)
orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN)
#ifdef CONFIG_CPU_V7M
/* There is no alias for n == 4 */
mov r0, #4
str r0, [r12, #PMSAv8_RNR] @ PRSEL
isb
str r5, [r12, #PMSAv8_RBAR_A(0)]
str r6, [r12, #PMSAv8_RLAR_A(0)]
#else
mcr p15, 0, r5, c6, c10, 0 @ PRBAR4
mcr p15, 0, r6, c6, c10, 1 @ PRLAR4
#endif
#endif
ret lr
ENDPROC(__setup_pmsa_v8)
#ifdef CONFIG_SMP
/*
* r6: pointer at mpu_rgn_info
*/
.text
ENTRY(__secondary_setup_mpu)
/* Use MPU region info supplied by __cpu_up */
ldr r6, [r7] @ get secondary_data.mpu_rgn_info
/* Probe for v7 PMSA compliance */
mrc p15, 0, r0, c0, c1, 4 @ Read ID_MMFR0
and r0, r0, #(MMFR0_PMSA) @ PMSA field
teq r0, #(MMFR0_PMSAv7) @ PMSA v7
beq __secondary_setup_pmsa_v7
teq r0, #(MMFR0_PMSAv8) @ PMSA v8
beq __secondary_setup_pmsa_v8
b __error_p
ENDPROC(__secondary_setup_mpu)
/*
* r6: pointer at mpu_rgn_info
*/
ENTRY(__secondary_setup_pmsa_v7)
/* Determine whether the D/I-side memory map is unified. We set the
* flags here and continue to use them for the rest of this function */
mrc p15, 0, r0, c0, c0, 4 @ MPUIR
ands r5, r0, #MPUIR_DREGION_SZMASK @ 0 size d region => No MPU
beq __error_p
ldr r4, [r6, #MPU_RNG_INFO_USED]
mov r5, #MPU_RNG_SIZE
add r3, r6, #MPU_RNG_INFO_RNGS
mla r3, r4, r5, r3
1:
tst r0, #MPUIR_nU @ MPUIR_nU = 0 for unified
sub r3, r3, #MPU_RNG_SIZE
sub r4, r4, #1
set_region_nr r0, r4
isb
ldr r0, [r3, #MPU_RGN_DRBAR]
ldr r6, [r3, #MPU_RGN_DRSR]
ldr r5, [r3, #MPU_RGN_DRACR]
setup_region r0, r5, r6, PMSAv7_DATA_SIDE
beq 2f
setup_region r0, r5, r6, PMSAv7_INSTR_SIDE
2: isb
mrc p15, 0, r0, c0, c0, 4 @ Reevaluate the MPUIR
cmp r4, #0
bgt 1b
ret lr
ENDPROC(__secondary_setup_pmsa_v7)
ENTRY(__secondary_setup_pmsa_v8)
ldr r4, [r6, #MPU_RNG_INFO_USED]
#ifndef CONFIG_XIP_KERNEL
add r4, r4, #1
#endif
mov r5, #MPU_RNG_SIZE
add r3, r6, #MPU_RNG_INFO_RNGS
mla r3, r4, r5, r3
1:
sub r3, r3, #MPU_RNG_SIZE
sub r4, r4, #1
mcr p15, 0, r4, c6, c2, 1 @ PRSEL
isb
ldr r5, [r3, #MPU_RGN_PRBAR]
ldr r6, [r3, #MPU_RGN_PRLAR]
mcr p15, 0, r5, c6, c3, 0 @ PRBAR
mcr p15, 0, r6, c6, c3, 1 @ PRLAR
cmp r4, #0
bgt 1b
ret lr
ENDPROC(__secondary_setup_pmsa_v8)
#endif /* CONFIG_SMP */
#endif /* CONFIG_ARM_MPU */
#include "head-common.S"
|
AirFortressIlikara/LS2K0300-linux-4.19
| 7,806
|
arch/arm/kernel/entry-ftrace.S
|
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <asm/assembler.h>
#include <asm/ftrace.h>
#include <asm/unwind.h>
#include "entry-header.S"
/*
* When compiling with -pg, gcc inserts a call to the mcount routine at the
* start of every function. In mcount, apart from the function's address (in
* lr), we need to get hold of the function's caller's address.
*
* Older GCCs (pre-4.4) inserted a call to a routine called mcount like this:
*
* bl mcount
*
* These versions have the limitation that in order for the mcount routine to
* be able to determine the function's caller's address, an APCS-style frame
* pointer (which is set up with something like the code below) is required.
*
* mov ip, sp
* push {fp, ip, lr, pc}
* sub fp, ip, #4
*
* With EABI, these frame pointers are not available unless -mapcs-frame is
* specified, and if building as Thumb-2, not even then.
*
* Newer GCCs (4.4+) solve this problem by introducing a new version of mcount,
* with call sites like:
*
* push {lr}
* bl __gnu_mcount_nc
*
* With these compilers, frame pointers are not necessary.
*
* mcount can be thought of as a function called in the middle of a subroutine
* call. As such, it needs to be transparent for both the caller and the
* callee: the original lr needs to be restored when leaving mcount, and no
* registers should be clobbered. (In the __gnu_mcount_nc implementation, we
* clobber the ip register. This is OK because the ARM calling convention
* allows it to be clobbered in subroutines and doesn't use it to hold
* parameters.)
*
* When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0"
* for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see
* arch/arm/kernel/ftrace.c).
*/
#ifndef CONFIG_OLD_MCOUNT
#if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4))
#error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0.
#endif
#endif
.macro mcount_adjust_addr rd, rn
bic \rd, \rn, #1 @ clear the Thumb bit if present
sub \rd, \rd, #MCOUNT_INSN_SIZE
.endm
.macro __mcount suffix
mcount_enter
ldr r0, =ftrace_trace_function
ldr r2, [r0]
adr r0, .Lftrace_stub
cmp r0, r2
bne 1f
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ldr r1, =ftrace_graph_return
ldr r2, [r1]
cmp r0, r2
bne ftrace_graph_caller\suffix
ldr r1, =ftrace_graph_entry
ldr r2, [r1]
ldr r0, =ftrace_graph_entry_stub
cmp r0, r2
bne ftrace_graph_caller\suffix
#endif
mcount_exit
1: mcount_get_lr r1 @ lr of instrumented func
mcount_adjust_addr r0, lr @ instrumented function
badr lr, 2f
mov pc, r2
2: mcount_exit
.endm
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
.macro __ftrace_regs_caller
sub sp, sp, #8 @ space for PC and CPSR OLD_R0,
@ OLD_R0 will overwrite previous LR
add ip, sp, #12 @ move in IP the value of SP as it was
@ before the push {lr} of the mcount mechanism
str lr, [sp, #0] @ store LR instead of PC
ldr lr, [sp, #8] @ get previous LR
str r0, [sp, #8] @ write r0 as OLD_R0 over previous LR
stmdb sp!, {ip, lr}
stmdb sp!, {r0-r11, lr}
@ stack content at this point:
@ 0 4 48 52 56 60 64 68 72
@ R0 | R1 | ... | LR | SP + 4 | previous LR | LR | PSR | OLD_R0 |
mov r3, sp @ struct pt_regs*
ldr r2, =function_trace_op
ldr r2, [r2] @ pointer to the current
@ function tracing op
ldr r1, [sp, #S_LR] @ lr of instrumented func
ldr lr, [sp, #S_PC] @ get LR
mcount_adjust_addr r0, lr @ instrumented function
.globl ftrace_regs_call
ftrace_regs_call:
bl ftrace_stub
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_regs_call
ftrace_graph_regs_call:
mov r0, r0
#endif
@ pop saved regs
ldmia sp!, {r0-r12} @ restore r0 through r12
ldr ip, [sp, #8] @ restore PC
ldr lr, [sp, #4] @ restore LR
ldr sp, [sp, #0] @ restore SP
mov pc, ip @ return
.endm
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.macro __ftrace_graph_regs_caller
sub r0, fp, #4 @ lr of instrumented routine (parent)
@ called from __ftrace_regs_caller
ldr r1, [sp, #S_PC] @ instrumented routine (func)
mcount_adjust_addr r1, r1
mov r2, fp @ frame pointer
bl prepare_ftrace_return
@ pop registers saved in ftrace_regs_caller
ldmia sp!, {r0-r12} @ restore r0 through r12
ldr ip, [sp, #8] @ restore PC
ldr lr, [sp, #4] @ restore LR
ldr sp, [sp, #0] @ restore SP
mov pc, ip @ return
.endm
#endif
#endif
.macro __ftrace_caller suffix
mcount_enter
mcount_get_lr r1 @ lr of instrumented func
mcount_adjust_addr r0, lr @ instrumented function
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
ldr r2, =function_trace_op
ldr r2, [r2] @ pointer to the current
@ function tracing op
mov r3, #0 @ regs is NULL
#endif
.globl ftrace_call\suffix
ftrace_call\suffix:
bl ftrace_stub
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_call\suffix
ftrace_graph_call\suffix:
mov r0, r0
#endif
mcount_exit
.endm
.macro __ftrace_graph_caller
sub r0, fp, #4 @ &lr of instrumented routine (&parent)
#ifdef CONFIG_DYNAMIC_FTRACE
@ called from __ftrace_caller, saved in mcount_enter
ldr r1, [sp, #16] @ instrumented routine (func)
mcount_adjust_addr r1, r1
#else
@ called from __mcount, untouched in lr
mcount_adjust_addr r1, lr @ instrumented routine (func)
#endif
mov r2, fp @ frame pointer
bl prepare_ftrace_return
mcount_exit
.endm
#ifdef CONFIG_OLD_MCOUNT
/*
* mcount
*/
.macro mcount_enter
stmdb sp!, {r0-r3, lr}
.endm
.macro mcount_get_lr reg
ldr \reg, [fp, #-4]
.endm
.macro mcount_exit
ldr lr, [fp, #-4]
ldmia sp!, {r0-r3, pc}
.endm
ENTRY(mcount)
#ifdef CONFIG_DYNAMIC_FTRACE
stmdb sp!, {lr}
ldr lr, [fp, #-4]
ldmia sp!, {pc}
#else
__mcount _old
#endif
ENDPROC(mcount)
#ifdef CONFIG_DYNAMIC_FTRACE
ENTRY(ftrace_caller_old)
__ftrace_caller _old
ENDPROC(ftrace_caller_old)
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller_old)
__ftrace_graph_caller
ENDPROC(ftrace_graph_caller_old)
#endif
.purgem mcount_enter
.purgem mcount_get_lr
.purgem mcount_exit
#endif
/*
* __gnu_mcount_nc
*/
.macro mcount_enter
/*
* This pad compensates for the push {lr} at the call site. Note that we are
* unable to unwind through a function which does not otherwise save its lr.
*/
UNWIND(.pad #4)
stmdb sp!, {r0-r3, lr}
UNWIND(.save {r0-r3, lr})
.endm
.macro mcount_get_lr reg
ldr \reg, [sp, #20]
.endm
.macro mcount_exit
ldmia sp!, {r0-r3, ip, lr}
ret ip
.endm
ENTRY(__gnu_mcount_nc)
UNWIND(.fnstart)
#ifdef CONFIG_DYNAMIC_FTRACE
mov ip, lr
ldmia sp!, {lr}
ret ip
#else
__mcount
#endif
UNWIND(.fnend)
ENDPROC(__gnu_mcount_nc)
#ifdef CONFIG_DYNAMIC_FTRACE
ENTRY(ftrace_caller)
UNWIND(.fnstart)
__ftrace_caller
UNWIND(.fnend)
ENDPROC(ftrace_caller)
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
ENTRY(ftrace_regs_caller)
UNWIND(.fnstart)
__ftrace_regs_caller
UNWIND(.fnend)
ENDPROC(ftrace_regs_caller)
#endif
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller)
UNWIND(.fnstart)
__ftrace_graph_caller
UNWIND(.fnend)
ENDPROC(ftrace_graph_caller)
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
ENTRY(ftrace_graph_regs_caller)
UNWIND(.fnstart)
__ftrace_graph_regs_caller
UNWIND(.fnend)
ENDPROC(ftrace_graph_regs_caller)
#endif
#endif
.purgem mcount_enter
.purgem mcount_get_lr
.purgem mcount_exit
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl return_to_handler
return_to_handler:
stmdb sp!, {r0-r3}
mov r0, fp @ frame pointer
bl ftrace_return_to_handler
mov lr, r0 @ r0 has real ret addr
ldmia sp!, {r0-r3}
ret lr
#endif
ENTRY(ftrace_stub)
.Lftrace_stub:
ret lr
ENDPROC(ftrace_stub)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 4,935
|
arch/arm/kernel/vmlinux-xip.lds.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* ld script to make ARM Linux kernel
* taken from the i386 version by Russell King
* Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
*/
/* No __ro_after_init data in the .rodata section - which will always be ro */
#define RO_AFTER_INIT_DATA
#include <linux/sizes.h>
#include <asm-generic/vmlinux.lds.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
#include <asm/memory.h>
#include <asm/mpu.h>
#include <asm/page.h>
#include "vmlinux.lds.h"
OUTPUT_ARCH(arm)
ENTRY(stext)
#ifndef __ARMEB__
jiffies = jiffies_64;
#else
jiffies = jiffies_64 + 4;
#endif
SECTIONS
{
/*
* XXX: The linker does not define how output sections are
* assigned to input sections when there are multiple statements
* matching the same input section name. There is no documented
* order of matching.
*
* unwind exit sections must be discarded before the rest of the
* unwind sections get included.
*/
/DISCARD/ : {
ARM_DISCARD
*(.alt.smp.init)
*(.pv_table)
}
. = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR);
_xiprom = .; /* XIP ROM area to be mapped */
.head.text : {
_text = .;
HEAD_TEXT
}
.text : { /* Real text segment */
_stext = .; /* Text and read-only data */
ARM_TEXT
}
RO_DATA(PAGE_SIZE)
. = ALIGN(4);
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
__start___ex_table = .;
ARM_MMU_KEEP(*(__ex_table))
__stop___ex_table = .;
}
#ifdef CONFIG_ARM_UNWIND
ARM_UNWIND_SECTIONS
#endif
NOTES
_etext = .; /* End of text and rodata section */
ARM_VECTORS
INIT_TEXT_SECTION(8)
.exit.text : {
ARM_EXIT_KEEP(EXIT_TEXT)
}
.init.proc.info : {
ARM_CPU_DISCARD(PROC_INFO)
}
.init.arch.info : {
__arch_info_begin = .;
*(.arch.info.init)
__arch_info_end = .;
}
.init.tagtable : {
__tagtable_begin = .;
*(.taglist.init)
__tagtable_end = .;
}
.init.rodata : {
INIT_SETUP(16)
INIT_CALLS
CON_INITCALL
SECURITY_INITCALL
INIT_RAM_FS
}
#ifdef CONFIG_ARM_MPU
. = ALIGN(SZ_128K);
#endif
_exiprom = .; /* End of XIP ROM area */
/*
* From this point, stuff is considered writable and will be copied to RAM
*/
__data_loc = ALIGN(4); /* location in file */
. = PAGE_OFFSET + TEXT_OFFSET; /* location in memory */
#undef LOAD_OFFSET
#define LOAD_OFFSET (PAGE_OFFSET + TEXT_OFFSET - __data_loc)
. = ALIGN(THREAD_SIZE);
_sdata = .;
RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
.data.ro_after_init : AT(ADDR(.data.ro_after_init) - LOAD_OFFSET) {
*(.data..ro_after_init)
}
_edata = .;
. = ALIGN(PAGE_SIZE);
__init_begin = .;
.init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
INIT_DATA
}
.exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
ARM_EXIT_KEEP(EXIT_DATA)
}
#ifdef CONFIG_SMP
PERCPU_SECTION(L1_CACHE_BYTES)
#endif
#ifdef CONFIG_HAVE_TCM
ARM_TCM
#endif
/*
* End of copied data. We need a dummy section to get its LMA.
* Also located before final ALIGN() as trailing padding is not stored
* in the resulting binary file and useless to copy.
*/
.data.endmark : AT(ADDR(.data.endmark) - LOAD_OFFSET) { }
_edata_loc = LOADADDR(.data.endmark);
. = ALIGN(PAGE_SIZE);
__init_end = .;
BSS_SECTION(0, 0, 8)
#ifdef CONFIG_ARM_MPU
. = ALIGN(PMSAv8_MINALIGN);
#endif
_end = .;
STABS_DEBUG
}
/*
* These must never be empty
* If you have to comment these two assert statements out, your
* binutils is too old (for other reasons as well)
*/
ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
/*
* The HYP init code can't be more than a page long,
* and should not cross a page boundary.
* The above comment applies as well.
*/
ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE,
"HYP init code too big or misaligned")
#ifdef CONFIG_XIP_DEFLATED_DATA
/*
* The .bss is used as a stack area for __inflate_kernel_data() whose stack
* frame is 9568 bytes. Make sure it has extra room left.
*/
ASSERT((_end - __bss_start) >= 12288, ".bss too small for CONFIG_XIP_DEFLATED_DATA")
#endif
#ifdef CONFIG_ARM_MPU
/*
* Due to PMSAv7 restriction on base address and size we have to
* enforce minimal alignment restrictions. It was seen that weaker
* alignment restriction on _xiprom will likely force XIP address
* space spawns multiple MPU regions thus it is likely we run in
* situation when we are reprogramming MPU region we run on with
* something which doesn't cover reprogramming code itself, so as soon
* as we update MPU settings we'd immediately try to execute straight
* from background region which is XN.
* It seem that alignment in 1M should suit most users.
* _exiprom is aligned as 1/8 of 1M so can be covered by subregion
* disable
*/
ASSERT(!(_xiprom & (SZ_1M - 1)), "XIP start address may cause MPU programming issues")
ASSERT(!(_exiprom & (SZ_128K - 1)), "XIP end address may cause MPU programming issues")
#endif
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,303
|
arch/arm/kernel/relocate_kernel.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* relocate_kernel.S - put the kernel image in place to boot
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/kexec.h>
.align 3 /* not needed for this code, but keeps fncpy() happy */
ENTRY(relocate_new_kernel)
adr r7, relocate_new_kernel_end
ldr r0, [r7, #KEXEC_INDIR_PAGE]
ldr r1, [r7, #KEXEC_START_ADDR]
/*
* If there is no indirection page (we are doing crashdumps)
* skip any relocation.
*/
cmp r0, #0
beq 2f
0: /* top, read another word for the indirection page */
ldr r3, [r0],#4
/* Is it a destination page. Put destination address to r4 */
tst r3,#1,0
beq 1f
bic r4,r3,#1
b 0b
1:
/* Is it an indirection page */
tst r3,#2,0
beq 1f
bic r0,r3,#2
b 0b
1:
/* are we done ? */
tst r3,#4,0
beq 1f
b 2f
1:
/* is it source ? */
tst r3,#8,0
beq 0b
bic r3,r3,#8
mov r6,#1024
9:
ldr r5,[r3],#4
str r5,[r4],#4
subs r6,r6,#1
bne 9b
b 0b
2:
/* Jump to relocated kernel */
mov lr, r1
mov r0, #0
ldr r1, [r7, #KEXEC_MACH_TYPE]
ldr r2, [r7, #KEXEC_R2]
ARM( ret lr )
THUMB( bx lr )
ENDPROC(relocate_new_kernel)
.align 3
relocate_new_kernel_end:
.globl relocate_new_kernel_size
relocate_new_kernel_size:
.long relocate_new_kernel_end - relocate_new_kernel
|
AirFortressIlikara/LS2K0300-linux-4.19
| 8,685
|
arch/arm/kernel/iwmmxt.S
|
/*
* linux/arch/arm/kernel/iwmmxt.S
*
* XScale iWMMXt (Concan) context switching and handling
*
* Initial code:
* Copyright (c) 2003, Intel Corporation
*
* Full lazy switching support, optimizations and more, by Nicolas Pitre
* Copyright (c) 2003-2004, MontaVista Software, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/ptrace.h>
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
#include <asm/assembler.h>
#if defined(CONFIG_CPU_PJ4) || defined(CONFIG_CPU_PJ4B)
#define PJ4(code...) code
#define XSC(code...)
#elif defined(CONFIG_CPU_MOHAWK) || \
defined(CONFIG_CPU_XSC3) || \
defined(CONFIG_CPU_XSCALE)
#define PJ4(code...)
#define XSC(code...) code
#else
#error "Unsupported iWMMXt architecture"
#endif
#define MMX_WR0 (0x00)
#define MMX_WR1 (0x08)
#define MMX_WR2 (0x10)
#define MMX_WR3 (0x18)
#define MMX_WR4 (0x20)
#define MMX_WR5 (0x28)
#define MMX_WR6 (0x30)
#define MMX_WR7 (0x38)
#define MMX_WR8 (0x40)
#define MMX_WR9 (0x48)
#define MMX_WR10 (0x50)
#define MMX_WR11 (0x58)
#define MMX_WR12 (0x60)
#define MMX_WR13 (0x68)
#define MMX_WR14 (0x70)
#define MMX_WR15 (0x78)
#define MMX_WCSSF (0x80)
#define MMX_WCASF (0x84)
#define MMX_WCGR0 (0x88)
#define MMX_WCGR1 (0x8C)
#define MMX_WCGR2 (0x90)
#define MMX_WCGR3 (0x94)
#define MMX_SIZE (0x98)
.text
.arm
/*
* Lazy switching of Concan coprocessor context
*
* r10 = struct thread_info pointer
* r9 = ret_from_exception
* lr = undefined instr exit
*
* called from prefetch exception handler with interrupts enabled
*/
ENTRY(iwmmxt_task_enable)
inc_preempt_count r10, r3
XSC(mrc p15, 0, r2, c15, c1, 0)
PJ4(mrc p15, 0, r2, c1, c0, 2)
@ CP0 and CP1 accessible?
XSC(tst r2, #0x3)
PJ4(tst r2, #0xf)
bne 4f @ if so no business here
@ enable access to CP0 and CP1
XSC(orr r2, r2, #0x3)
XSC(mcr p15, 0, r2, c15, c1, 0)
PJ4(orr r2, r2, #0xf)
PJ4(mcr p15, 0, r2, c1, c0, 2)
ldr r3, =concan_owner
add r0, r10, #TI_IWMMXT_STATE @ get task Concan save area
ldr r2, [sp, #60] @ current task pc value
ldr r1, [r3] @ get current Concan owner
str r0, [r3] @ this task now owns Concan regs
sub r2, r2, #4 @ adjust pc back
str r2, [sp, #60]
mrc p15, 0, r2, c2, c0, 0
mov r2, r2 @ cpwait
bl concan_save
#ifdef CONFIG_PREEMPT_COUNT
get_thread_info r10
#endif
4: dec_preempt_count r10, r3
ret r9 @ normal exit from exception
concan_save:
teq r1, #0 @ test for last ownership
beq concan_load @ no owner, skip save
tmrc r2, wCon
@ CUP? wCx
tst r2, #0x1
beq 1f
concan_dump:
wstrw wCSSF, [r1, #MMX_WCSSF]
wstrw wCASF, [r1, #MMX_WCASF]
wstrw wCGR0, [r1, #MMX_WCGR0]
wstrw wCGR1, [r1, #MMX_WCGR1]
wstrw wCGR2, [r1, #MMX_WCGR2]
wstrw wCGR3, [r1, #MMX_WCGR3]
1: @ MUP? wRn
tst r2, #0x2
beq 2f
wstrd wR0, [r1, #MMX_WR0]
wstrd wR1, [r1, #MMX_WR1]
wstrd wR2, [r1, #MMX_WR2]
wstrd wR3, [r1, #MMX_WR3]
wstrd wR4, [r1, #MMX_WR4]
wstrd wR5, [r1, #MMX_WR5]
wstrd wR6, [r1, #MMX_WR6]
wstrd wR7, [r1, #MMX_WR7]
wstrd wR8, [r1, #MMX_WR8]
wstrd wR9, [r1, #MMX_WR9]
wstrd wR10, [r1, #MMX_WR10]
wstrd wR11, [r1, #MMX_WR11]
wstrd wR12, [r1, #MMX_WR12]
wstrd wR13, [r1, #MMX_WR13]
wstrd wR14, [r1, #MMX_WR14]
wstrd wR15, [r1, #MMX_WR15]
2: teq r0, #0 @ anything to load?
reteq lr @ if not, return
concan_load:
@ Load wRn
wldrd wR0, [r0, #MMX_WR0]
wldrd wR1, [r0, #MMX_WR1]
wldrd wR2, [r0, #MMX_WR2]
wldrd wR3, [r0, #MMX_WR3]
wldrd wR4, [r0, #MMX_WR4]
wldrd wR5, [r0, #MMX_WR5]
wldrd wR6, [r0, #MMX_WR6]
wldrd wR7, [r0, #MMX_WR7]
wldrd wR8, [r0, #MMX_WR8]
wldrd wR9, [r0, #MMX_WR9]
wldrd wR10, [r0, #MMX_WR10]
wldrd wR11, [r0, #MMX_WR11]
wldrd wR12, [r0, #MMX_WR12]
wldrd wR13, [r0, #MMX_WR13]
wldrd wR14, [r0, #MMX_WR14]
wldrd wR15, [r0, #MMX_WR15]
@ Load wCx
wldrw wCSSF, [r0, #MMX_WCSSF]
wldrw wCASF, [r0, #MMX_WCASF]
wldrw wCGR0, [r0, #MMX_WCGR0]
wldrw wCGR1, [r0, #MMX_WCGR1]
wldrw wCGR2, [r0, #MMX_WCGR2]
wldrw wCGR3, [r0, #MMX_WCGR3]
@ clear CUP/MUP (only if r1 != 0)
teq r1, #0
mov r2, #0
reteq lr
tmcr wCon, r2
ret lr
ENDPROC(iwmmxt_task_enable)
/*
* Back up Concan regs to save area and disable access to them
* (mainly for gdb or sleep mode usage)
*
* r0 = struct thread_info pointer of target task or NULL for any
*/
ENTRY(iwmmxt_task_disable)
stmfd sp!, {r4, lr}
mrs ip, cpsr
orr r2, ip, #PSR_I_BIT @ disable interrupts
msr cpsr_c, r2
ldr r3, =concan_owner
add r2, r0, #TI_IWMMXT_STATE @ get task Concan save area
ldr r1, [r3] @ get current Concan owner
teq r1, #0 @ any current owner?
beq 1f @ no: quit
teq r0, #0 @ any owner?
teqne r1, r2 @ or specified one?
bne 1f @ no: quit
@ enable access to CP0 and CP1
XSC(mrc p15, 0, r4, c15, c1, 0)
XSC(orr r4, r4, #0x3)
XSC(mcr p15, 0, r4, c15, c1, 0)
PJ4(mrc p15, 0, r4, c1, c0, 2)
PJ4(orr r4, r4, #0xf)
PJ4(mcr p15, 0, r4, c1, c0, 2)
mov r0, #0 @ nothing to load
str r0, [r3] @ no more current owner
mrc p15, 0, r2, c2, c0, 0
mov r2, r2 @ cpwait
bl concan_save
@ disable access to CP0 and CP1
XSC(bic r4, r4, #0x3)
XSC(mcr p15, 0, r4, c15, c1, 0)
PJ4(bic r4, r4, #0xf)
PJ4(mcr p15, 0, r4, c1, c0, 2)
mrc p15, 0, r2, c2, c0, 0
mov r2, r2 @ cpwait
1: msr cpsr_c, ip @ restore interrupt mode
ldmfd sp!, {r4, pc}
ENDPROC(iwmmxt_task_disable)
/*
* Copy Concan state to given memory address
*
* r0 = struct thread_info pointer of target task
* r1 = memory address where to store Concan state
*
* this is called mainly in the creation of signal stack frames
*/
ENTRY(iwmmxt_task_copy)
mrs ip, cpsr
orr r2, ip, #PSR_I_BIT @ disable interrupts
msr cpsr_c, r2
ldr r3, =concan_owner
add r2, r0, #TI_IWMMXT_STATE @ get task Concan save area
ldr r3, [r3] @ get current Concan owner
teq r2, r3 @ does this task own it...
beq 1f
@ current Concan values are in the task save area
msr cpsr_c, ip @ restore interrupt mode
mov r0, r1
mov r1, r2
mov r2, #MMX_SIZE
b memcpy
1: @ this task owns Concan regs -- grab a copy from there
mov r0, #0 @ nothing to load
mov r2, #3 @ save all regs
mov r3, lr @ preserve return address
bl concan_dump
msr cpsr_c, ip @ restore interrupt mode
ret r3
ENDPROC(iwmmxt_task_copy)
/*
* Restore Concan state from given memory address
*
* r0 = struct thread_info pointer of target task
* r1 = memory address where to get Concan state from
*
* this is used to restore Concan state when unwinding a signal stack frame
*/
ENTRY(iwmmxt_task_restore)
mrs ip, cpsr
orr r2, ip, #PSR_I_BIT @ disable interrupts
msr cpsr_c, r2
ldr r3, =concan_owner
add r2, r0, #TI_IWMMXT_STATE @ get task Concan save area
ldr r3, [r3] @ get current Concan owner
bic r2, r2, #0x7 @ 64-bit alignment
teq r2, r3 @ does this task own it...
beq 1f
@ this task doesn't own Concan regs -- use its save area
msr cpsr_c, ip @ restore interrupt mode
mov r0, r2
mov r2, #MMX_SIZE
b memcpy
1: @ this task owns Concan regs -- load them directly
mov r0, r1
mov r1, #0 @ don't clear CUP/MUP
mov r3, lr @ preserve return address
bl concan_load
msr cpsr_c, ip @ restore interrupt mode
ret r3
ENDPROC(iwmmxt_task_restore)
/*
* Concan handling on task switch
*
* r0 = next thread_info pointer
*
* Called only from the iwmmxt notifier with task preemption disabled.
*/
ENTRY(iwmmxt_task_switch)
XSC(mrc p15, 0, r1, c15, c1, 0)
PJ4(mrc p15, 0, r1, c1, c0, 2)
@ CP0 and CP1 accessible?
XSC(tst r1, #0x3)
PJ4(tst r1, #0xf)
bne 1f @ yes: block them for next task
ldr r2, =concan_owner
add r3, r0, #TI_IWMMXT_STATE @ get next task Concan save area
ldr r2, [r2] @ get current Concan owner
teq r2, r3 @ next task owns it?
retne lr @ no: leave Concan disabled
1: @ flip Concan access
XSC(eor r1, r1, #0x3)
XSC(mcr p15, 0, r1, c15, c1, 0)
PJ4(eor r1, r1, #0xf)
PJ4(mcr p15, 0, r1, c1, c0, 2)
mrc p15, 0, r1, c2, c0, 0
sub pc, lr, r1, lsr #32 @ cpwait and return
ENDPROC(iwmmxt_task_switch)
/*
* Remove Concan ownership of given task
*
* r0 = struct thread_info pointer
*/
ENTRY(iwmmxt_task_release)
mrs r2, cpsr
orr ip, r2, #PSR_I_BIT @ disable interrupts
msr cpsr_c, ip
ldr r3, =concan_owner
add r0, r0, #TI_IWMMXT_STATE @ get task Concan save area
ldr r1, [r3] @ get current Concan owner
eors r0, r0, r1 @ if equal...
streq r0, [r3] @ then clear ownership
msr cpsr_c, r2 @ restore interrupts
ret lr
ENDPROC(iwmmxt_task_release)
.data
.align 2
concan_owner:
.word 0
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,579
|
arch/arm/kernel/entry-v7m.S
|
/*
* linux/arch/arm/kernel/entry-v7m.S
*
* Copyright (C) 2008 ARM Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Low-level vector interface routines for the ARMv7-M architecture
*/
#include <asm/memory.h>
#include <asm/glue.h>
#include <asm/thread_notify.h>
#include <asm/v7m.h>
#include "entry-header.S"
#ifdef CONFIG_TRACE_IRQFLAGS
#error "CONFIG_TRACE_IRQFLAGS not supported on the current ARMv7M implementation"
#endif
__invalid_entry:
v7m_exception_entry
#ifdef CONFIG_PRINTK
adr r0, strerr
mrs r1, ipsr
mov r2, lr
bl printk
#endif
mov r0, sp
bl show_regs
1: b 1b
ENDPROC(__invalid_entry)
strerr: .asciz "\nUnhandled exception: IPSR = %08lx LR = %08lx\n"
.align 2
__irq_entry:
v7m_exception_entry
@
@ Invoke the IRQ handler
@
mrs r0, ipsr
ldr r1, =V7M_xPSR_EXCEPTIONNO
and r0, r1
sub r0, #16
mov r1, sp
stmdb sp!, {lr}
@ routine called with r0 = irq number, r1 = struct pt_regs *
bl nvic_handle_irq
pop {lr}
@
@ Check for any pending work if returning to user
@
ldr r1, =BASEADDR_V7M_SCB
ldr r0, [r1, V7M_SCB_ICSR]
tst r0, V7M_SCB_ICSR_RETTOBASE
beq 2f
get_thread_info tsk
ldr r2, [tsk, #TI_FLAGS]
tst r2, #_TIF_WORK_MASK
beq 2f @ no work pending
mov r0, #V7M_SCB_ICSR_PENDSVSET
str r0, [r1, V7M_SCB_ICSR] @ raise PendSV
2:
@ registers r0-r3 and r12 are automatically restored on exception
@ return. r4-r7 were not clobbered in v7m_exception_entry so for
@ correctness they don't need to be restored. So only r8-r11 must be
@ restored here. The easiest way to do so is to restore r0-r7, too.
ldmia sp!, {r0-r11}
add sp, #PT_REGS_SIZE-S_IP
cpsie i
bx lr
ENDPROC(__irq_entry)
__pendsv_entry:
v7m_exception_entry
ldr r1, =BASEADDR_V7M_SCB
mov r0, #V7M_SCB_ICSR_PENDSVCLR
str r0, [r1, V7M_SCB_ICSR] @ clear PendSV
@ execute the pending work, including reschedule
get_thread_info tsk
mov why, #0
b ret_to_user_from_irq
ENDPROC(__pendsv_entry)
/*
* Register switch for ARMv7-M processors.
* r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
* previous and next are guaranteed not to be the same.
*/
ENTRY(__switch_to)
.fnstart
.cantunwind
add ip, r1, #TI_CPU_SAVE
stmia ip!, {r4 - r11} @ Store most regs on stack
str sp, [ip], #4
str lr, [ip], #4
mov r5, r0
add r4, r2, #TI_CPU_SAVE
ldr r0, =thread_notify_head
mov r1, #THREAD_NOTIFY_SWITCH
bl atomic_notifier_call_chain
mov ip, r4
mov r0, r5
ldmia ip!, {r4 - r11} @ Load all regs saved previously
ldr sp, [ip]
ldr pc, [ip, #4]!
.fnend
ENDPROC(__switch_to)
.data
#if CONFIG_CPU_V7M_NUM_IRQ <= 112
.align 9
#else
.align 10
#endif
/*
* Vector table (Natural alignment need to be ensured)
*/
ENTRY(vector_table)
.long 0 @ 0 - Reset stack pointer
.long __invalid_entry @ 1 - Reset
.long __invalid_entry @ 2 - NMI
.long __invalid_entry @ 3 - HardFault
.long __invalid_entry @ 4 - MemManage
.long __invalid_entry @ 5 - BusFault
.long __invalid_entry @ 6 - UsageFault
.long __invalid_entry @ 7 - Reserved
.long __invalid_entry @ 8 - Reserved
.long __invalid_entry @ 9 - Reserved
.long __invalid_entry @ 10 - Reserved
.long vector_swi @ 11 - SVCall
.long __invalid_entry @ 12 - Debug Monitor
.long __invalid_entry @ 13 - Reserved
.long __pendsv_entry @ 14 - PendSV
.long __invalid_entry @ 15 - SysTick
.rept CONFIG_CPU_V7M_NUM_IRQ
.long __irq_entry @ External Interrupts
.endr
.align 2
.globl exc_ret
exc_ret:
.space 4
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,906
|
arch/arm/kernel/sigreturn_codes.S
|
/*
* sigreturn_codes.S - code sinpets for sigreturn syscalls
*
* Created by: Victor Kamensky, 2013-08-13
* Copyright: (C) 2013 Linaro Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
/*
* For ARM syscalls, we encode the syscall number into the instruction.
* With EABI, the syscall number has to be loaded into r7. As result
* ARM syscall sequence snippet will have move and svc in .arm encoding
*
* For Thumb syscalls, we pass the syscall number via r7. We therefore
* need two 16-bit instructions in .thumb encoding
*
* Please note sigreturn_codes code are not executed in place. Instead
* they just copied by kernel into appropriate places. Code inside of
* arch/arm/kernel/signal.c is very sensitive to layout of these code
* snippets.
*/
/*
* In CPU_THUMBONLY case kernel arm opcodes are not allowed.
* Note in this case codes skips those instructions but it uses .org
* directive to keep correct layout of sigreturn_codes array.
*/
#ifndef CONFIG_CPU_THUMBONLY
#define ARM_OK(code...) code
#else
#define ARM_OK(code...)
#endif
.macro arm_slot n
.org sigreturn_codes + 12 * (\n)
ARM_OK( .arm )
.endm
.macro thumb_slot n
.org sigreturn_codes + 12 * (\n) + 8
.thumb
.endm
.macro arm_fdpic_slot n
.org sigreturn_codes + 24 + 20 * (\n)
ARM_OK( .arm )
.endm
.macro thumb_fdpic_slot n
.org sigreturn_codes + 24 + 20 * (\n) + 12
.thumb
.endm
#if __LINUX_ARM_ARCH__ <= 4
/*
* Note we manually set minimally required arch that supports
* required thumb opcodes for early arch versions. It is OK
* for this file to be used in combination with other
* lower arch variants, since these code snippets are only
* used as input data.
*/
.arch armv4t
#endif
.section .rodata
.global sigreturn_codes
.type sigreturn_codes, #object
.align
sigreturn_codes:
/* ARM sigreturn syscall code snippet */
arm_slot 0
ARM_OK( mov r7, #(__NR_sigreturn - __NR_SYSCALL_BASE) )
ARM_OK( swi #(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE) )
/* Thumb sigreturn syscall code snippet */
thumb_slot 0
movs r7, #(__NR_sigreturn - __NR_SYSCALL_BASE)
swi #0
/* ARM sigreturn_rt syscall code snippet */
arm_slot 1
ARM_OK( mov r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE) )
ARM_OK( swi #(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE) )
/* Thumb sigreturn_rt syscall code snippet */
thumb_slot 1
movs r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE)
swi #0
/* ARM sigreturn restorer FDPIC bounce code snippet */
arm_fdpic_slot 0
ARM_OK( ldr r3, [sp, #SIGFRAME_RC3_OFFSET] )
ARM_OK( ldmia r3, {r3, r9} )
#ifdef CONFIG_ARM_THUMB
ARM_OK( bx r3 )
#else
ARM_OK( ret r3 )
#endif
/* Thumb sigreturn restorer FDPIC bounce code snippet */
thumb_fdpic_slot 0
ldr r3, [sp, #SIGFRAME_RC3_OFFSET]
ldmia r3, {r2, r3}
mov r9, r3
bx r2
/* ARM sigreturn_rt restorer FDPIC bounce code snippet */
arm_fdpic_slot 1
ARM_OK( ldr r3, [sp, #RT_SIGFRAME_RC3_OFFSET] )
ARM_OK( ldmia r3, {r3, r9} )
#ifdef CONFIG_ARM_THUMB
ARM_OK( bx r3 )
#else
ARM_OK( ret r3 )
#endif
/* Thumb sigreturn_rt restorer FDPIC bounce code snippet */
thumb_fdpic_slot 1
ldr r3, [sp, #RT_SIGFRAME_RC3_OFFSET]
ldmia r3, {r2, r3}
mov r9, r3
bx r2
/*
* Note on additional space: setup_return in signal.c
* always copies the same number of words regardless whether
* it is thumb case or not, so we need one additional padding
* word after the last entry.
*/
.space 4
.size sigreturn_codes, . - sigreturn_codes
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,621
|
arch/arm/kernel/smccc-call.S
|
/*
* Copyright (c) 2015, Linaro Limited
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/linkage.h>
#include <asm/opcodes-sec.h>
#include <asm/opcodes-virt.h>
#include <asm/unwind.h>
/*
* Wrap c macros in asm macros to delay expansion until after the
* SMCCC asm macro is expanded.
*/
.macro SMCCC_SMC
__SMC(0)
.endm
.macro SMCCC_HVC
__HVC(0)
.endm
.macro SMCCC instr
UNWIND( .fnstart)
mov r12, sp
push {r4-r7}
UNWIND( .save {r4-r7})
ldm r12, {r4-r7}
\instr
pop {r4-r7}
ldr r12, [sp, #(4 * 4)]
stm r12, {r0-r3}
bx lr
UNWIND( .fnend)
.endm
/*
* void smccc_smc(unsigned long a0, unsigned long a1, unsigned long a2,
* unsigned long a3, unsigned long a4, unsigned long a5,
* unsigned long a6, unsigned long a7, struct arm_smccc_res *res,
* struct arm_smccc_quirk *quirk)
*/
ENTRY(__arm_smccc_smc)
SMCCC SMCCC_SMC
ENDPROC(__arm_smccc_smc)
/*
* void smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2,
* unsigned long a3, unsigned long a4, unsigned long a5,
* unsigned long a6, unsigned long a7, struct arm_smccc_res *res,
* struct arm_smccc_quirk *quirk)
*/
ENTRY(__arm_smccc_hvc)
SMCCC SMCCC_HVC
ENDPROC(__arm_smccc_hvc)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,250
|
arch/arm/kernel/fiqasm.S
|
/*
* linux/arch/arm/kernel/fiqasm.S
*
* Derived from code originally in linux/arch/arm/kernel/fiq.c:
*
* Copyright (C) 1998 Russell King
* Copyright (C) 1998, 1999 Phil Blundell
* Copyright (C) 2011, Linaro Limited
*
* FIQ support written by Philip Blundell <philb@gnu.org>, 1998.
*
* FIQ support re-written by Russell King to be more generic
*
* v7/Thumb-2 compatibility modifications by Linaro Limited, 2011.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
/*
* Taking an interrupt in FIQ mode is death, so both these functions
* disable irqs for the duration.
*/
ENTRY(__set_fiq_regs)
mov r2, #PSR_I_BIT | PSR_F_BIT | FIQ_MODE
mrs r1, cpsr
msr cpsr_c, r2 @ select FIQ mode
mov r0, r0 @ avoid hazard prior to ARMv4
ldmia r0!, {r8 - r12}
ldr sp, [r0], #4
ldr lr, [r0]
msr cpsr_c, r1 @ return to SVC mode
mov r0, r0 @ avoid hazard prior to ARMv4
ret lr
ENDPROC(__set_fiq_regs)
ENTRY(__get_fiq_regs)
mov r2, #PSR_I_BIT | PSR_F_BIT | FIQ_MODE
mrs r1, cpsr
msr cpsr_c, r2 @ select FIQ mode
mov r0, r0 @ avoid hazard prior to ARMv4
stmia r0!, {r8 - r12}
str sp, [r0], #4
str lr, [r0]
msr cpsr_c, r1 @ return to SVC mode
mov r0, r0 @ avoid hazard prior to ARMv4
ret lr
ENDPROC(__get_fiq_regs)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 11,521
|
arch/arm/kernel/entry-common.S
|
/*
* linux/arch/arm/kernel/entry-common.S
*
* Copyright (C) 2000 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <asm/assembler.h>
#include <asm/unistd.h>
#include <asm/ftrace.h>
#include <asm/unwind.h>
#include <asm/memory.h>
#ifdef CONFIG_AEABI
#include <asm/unistd-oabi.h>
#endif
.equ NR_syscalls, __NR_syscalls
#ifdef CONFIG_NEED_RET_TO_USER
#include <mach/entry-macro.S>
#else
.macro arch_ret_to_user, tmp1, tmp2
.endm
#endif
#include "entry-header.S"
saved_psr .req r8
#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING)
saved_pc .req r9
#define TRACE(x...) x
#else
saved_pc .req lr
#define TRACE(x...)
#endif
.section .entry.text,"ax",%progbits
.align 5
#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING) || \
IS_ENABLED(CONFIG_DEBUG_RSEQ))
/*
* This is the fast syscall return path. We do as little as possible here,
* such as avoiding writing r0 to the stack. We only use this path if we
* have tracing, context tracking and rseq debug disabled - the overheads
* from those features make this path too inefficient.
*/
ret_fast_syscall:
__ret_fast_syscall:
UNWIND(.fnstart )
UNWIND(.cantunwind )
disable_irq_notrace @ disable interrupts
ldr r2, [tsk, #TI_ADDR_LIMIT]
cmp r2, #TASK_SIZE
blne addr_limit_check_failed
ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
bne fast_work_pending
/* perform architecture specific actions before user return */
arch_ret_to_user r1, lr
restore_user_regs fast = 1, offset = S_OFF
UNWIND(.fnend )
ENDPROC(ret_fast_syscall)
/* Ok, we need to do extra processing, enter the slow path. */
fast_work_pending:
str r0, [sp, #S_R0+S_OFF]! @ returned r0
/* fall through to work_pending */
#else
/*
* The "replacement" ret_fast_syscall for when tracing, context tracking,
* or rseq debug is enabled. As we will need to call out to some C functions,
* we save r0 first to avoid needing to save registers around each C function
* call.
*/
ret_fast_syscall:
__ret_fast_syscall:
UNWIND(.fnstart )
UNWIND(.cantunwind )
str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
#if IS_ENABLED(CONFIG_DEBUG_RSEQ)
/* do_rseq_syscall needs interrupts enabled. */
mov r0, sp @ 'regs'
bl do_rseq_syscall
#endif
disable_irq_notrace @ disable interrupts
ldr r2, [tsk, #TI_ADDR_LIMIT]
cmp r2, #TASK_SIZE
blne addr_limit_check_failed
ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
beq no_work_pending
UNWIND(.fnend )
ENDPROC(ret_fast_syscall)
/* Slower path - fall through to work_pending */
#endif
tst r1, #_TIF_SYSCALL_WORK
bne __sys_trace_return_nosave
slow_work_pending:
mov r0, sp @ 'regs'
mov r2, why @ 'syscall'
bl do_work_pending
cmp r0, #0
beq no_work_pending
movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
ldmia sp, {r0 - r6} @ have to reload r0 - r6
b local_restart @ ... and off we go
ENDPROC(ret_fast_syscall)
/*
* "slow" syscall return path. "why" tells us if this was a real syscall.
* IRQs may be enabled here, so always disable them. Note that we use the
* "notrace" version to avoid calling into the tracing code unnecessarily.
* do_work_pending() will update this state if necessary.
*/
ENTRY(ret_to_user)
ret_slow_syscall:
#if IS_ENABLED(CONFIG_DEBUG_RSEQ)
/* do_rseq_syscall needs interrupts enabled. */
enable_irq_notrace @ enable interrupts
mov r0, sp @ 'regs'
bl do_rseq_syscall
#endif
disable_irq_notrace @ disable interrupts
ENTRY(ret_to_user_from_irq)
ldr r2, [tsk, #TI_ADDR_LIMIT]
cmp r2, #TASK_SIZE
blne addr_limit_check_failed
ldr r1, [tsk, #TI_FLAGS]
tst r1, #_TIF_WORK_MASK
bne slow_work_pending
no_work_pending:
asm_trace_hardirqs_on save = 0
/* perform architecture specific actions before user return */
arch_ret_to_user r1, lr
ct_user_enter save = 0
restore_user_regs fast = 0, offset = 0
ENDPROC(ret_to_user_from_irq)
ENDPROC(ret_to_user)
/*
* This is how we return from a fork.
*/
ENTRY(ret_from_fork)
bl schedule_tail
cmp r5, #0
movne r0, r4
badrne lr, 1f
retne r5
1: get_thread_info tsk
b ret_slow_syscall
ENDPROC(ret_from_fork)
/*=============================================================================
* SWI handler
*-----------------------------------------------------------------------------
*/
.align 5
ENTRY(vector_swi)
#ifdef CONFIG_CPU_V7M
v7m_exception_entry
#else
sub sp, sp, #PT_REGS_SIZE
stmia sp, {r0 - r12} @ Calling r0 - r12
ARM( add r8, sp, #S_PC )
ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr
THUMB( mov r8, sp )
THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr
mrs saved_psr, spsr @ called from non-FIQ mode, so ok.
TRACE( mov saved_pc, lr )
str saved_pc, [sp, #S_PC] @ Save calling PC
str saved_psr, [sp, #S_PSR] @ Save CPSR
str r0, [sp, #S_OLD_R0] @ Save OLD_R0
#endif
zero_fp
alignment_trap r10, ip, __cr_alignment
asm_trace_hardirqs_on save=0
enable_irq_notrace
ct_user_exit save=0
/*
* Get the system call number.
*/
#if defined(CONFIG_OABI_COMPAT)
/*
* If we have CONFIG_OABI_COMPAT then we need to look at the swi
* value to determine if it is an EABI or an old ABI call.
*/
#ifdef CONFIG_ARM_THUMB
tst saved_psr, #PSR_T_BIT
movne r10, #0 @ no thumb OABI emulation
USER( ldreq r10, [saved_pc, #-4] ) @ get SWI instruction
#else
USER( ldr r10, [saved_pc, #-4] ) @ get SWI instruction
#endif
ARM_BE8(rev r10, r10) @ little endian instruction
#elif defined(CONFIG_AEABI)
/*
* Pure EABI user space always put syscall number into scno (r7).
*/
#elif defined(CONFIG_ARM_THUMB)
/* Legacy ABI only, possibly thumb mode. */
tst saved_psr, #PSR_T_BIT @ this is SPSR from save_user_regs
addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in
USER( ldreq scno, [saved_pc, #-4] )
#else
/* Legacy ABI only. */
USER( ldr scno, [saved_pc, #-4] ) @ get SWI instruction
#endif
/* saved_psr and saved_pc are now dead */
uaccess_disable tbl
adr tbl, sys_call_table @ load syscall table pointer
#if defined(CONFIG_OABI_COMPAT)
/*
* If the swi argument is zero, this is an EABI call and we do nothing.
*
* If this is an old ABI call, get the syscall number into scno and
* get the old ABI syscall table address.
*/
bics r10, r10, #0xff000000
eorne scno, r10, #__NR_OABI_SYSCALL_BASE
ldrne tbl, =sys_oabi_call_table
#elif !defined(CONFIG_AEABI)
bic scno, scno, #0xff000000 @ mask off SWI op-code
eor scno, scno, #__NR_SYSCALL_BASE @ check OS number
#endif
get_thread_info tsk
/*
* Reload the registers that may have been corrupted on entry to
* the syscall assembly (by tracing or context tracking.)
*/
TRACE( ldmia sp, {r0 - r3} )
local_restart:
ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing
stmdb sp!, {r4, r5} @ push fifth and sixth args
tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
bne __sys_trace
invoke_syscall tbl, scno, r10, __ret_fast_syscall
add r1, sp, #S_OFF
2: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back
bcs arm_syscall
mov why, #0 @ no longer a real syscall
b sys_ni_syscall @ not private func
#if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI)
/*
* We failed to handle a fault trying to access the page
* containing the swi instruction, but we're not really in a
* position to return -EFAULT. Instead, return back to the
* instruction and re-enter the user fault handling path trying
* to page it in. This will likely result in sending SEGV to the
* current task.
*/
9001:
sub lr, saved_pc, #4
str lr, [sp, #S_PC]
get_thread_info tsk
b ret_fast_syscall
#endif
ENDPROC(vector_swi)
/*
* This is the really slow path. We're going to be doing
* context switches, and waiting for our parent to respond.
*/
__sys_trace:
mov r1, scno
add r0, sp, #S_OFF
bl syscall_trace_enter
mov scno, r0
invoke_syscall tbl, scno, r10, __sys_trace_return, reload=1
cmp scno, #-1 @ skip the syscall?
bne 2b
add sp, sp, #S_OFF @ restore stack
__sys_trace_return_nosave:
enable_irq_notrace
mov r0, sp
bl syscall_trace_exit
b ret_slow_syscall
__sys_trace_return:
str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
mov r0, sp
bl syscall_trace_exit
b ret_slow_syscall
.align 5
#ifdef CONFIG_ALIGNMENT_TRAP
.type __cr_alignment, #object
__cr_alignment:
.word cr_alignment
#endif
.ltorg
.macro syscall_table_start, sym
.equ __sys_nr, 0
.type \sym, #object
ENTRY(\sym)
.endm
.macro syscall, nr, func
.ifgt __sys_nr - \nr
.error "Duplicated/unorded system call entry"
.endif
.rept \nr - __sys_nr
.long sys_ni_syscall
.endr
.long \func
.equ __sys_nr, \nr + 1
.endm
.macro syscall_table_end, sym
.ifgt __sys_nr - __NR_syscalls
.error "System call table too big"
.endif
.rept __NR_syscalls - __sys_nr
.long sys_ni_syscall
.endr
.size \sym, . - \sym
.endm
#define NATIVE(nr, func) syscall nr, func
/*
* This is the syscall table declaration for native ABI syscalls.
* With EABI a couple syscalls are obsolete and defined as sys_ni_syscall.
*/
syscall_table_start sys_call_table
#define COMPAT(nr, native, compat) syscall nr, native
#ifdef CONFIG_AEABI
#include <calls-eabi.S>
#else
#include <calls-oabi.S>
#endif
#undef COMPAT
syscall_table_end sys_call_table
/*============================================================================
* Special system call wrappers
*/
@ r0 = syscall number
@ r8 = syscall table
sys_syscall:
bic scno, r0, #__NR_OABI_SYSCALL_BASE
cmp scno, #__NR_syscall - __NR_SYSCALL_BASE
cmpne scno, #NR_syscalls @ check range
#ifdef CONFIG_CPU_SPECTRE
movhs scno, #0
csdb
#endif
stmloia sp, {r5, r6} @ shuffle args
movlo r0, r1
movlo r1, r2
movlo r2, r3
movlo r3, r4
ldrlo pc, [tbl, scno, lsl #2]
b sys_ni_syscall
ENDPROC(sys_syscall)
sys_sigreturn_wrapper:
add r0, sp, #S_OFF
mov why, #0 @ prevent syscall restart handling
b sys_sigreturn
ENDPROC(sys_sigreturn_wrapper)
sys_rt_sigreturn_wrapper:
add r0, sp, #S_OFF
mov why, #0 @ prevent syscall restart handling
b sys_rt_sigreturn
ENDPROC(sys_rt_sigreturn_wrapper)
sys_statfs64_wrapper:
teq r1, #88
moveq r1, #84
b sys_statfs64
ENDPROC(sys_statfs64_wrapper)
sys_fstatfs64_wrapper:
teq r1, #88
moveq r1, #84
b sys_fstatfs64
ENDPROC(sys_fstatfs64_wrapper)
/*
* Note: off_4k (r5) is always units of 4K. If we can't do the requested
* offset, we return EINVAL.
*/
sys_mmap2:
str r5, [sp, #4]
b sys_mmap_pgoff
ENDPROC(sys_mmap2)
#ifdef CONFIG_OABI_COMPAT
/*
* These are syscalls with argument register differences
*/
sys_oabi_pread64:
stmia sp, {r3, r4}
b sys_pread64
ENDPROC(sys_oabi_pread64)
sys_oabi_pwrite64:
stmia sp, {r3, r4}
b sys_pwrite64
ENDPROC(sys_oabi_pwrite64)
sys_oabi_truncate64:
mov r3, r2
mov r2, r1
b sys_truncate64
ENDPROC(sys_oabi_truncate64)
sys_oabi_ftruncate64:
mov r3, r2
mov r2, r1
b sys_ftruncate64
ENDPROC(sys_oabi_ftruncate64)
sys_oabi_readahead:
str r3, [sp]
mov r3, r2
mov r2, r1
b sys_readahead
ENDPROC(sys_oabi_readahead)
/*
* Let's declare a second syscall table for old ABI binaries
* using the compatibility syscall entries.
*/
syscall_table_start sys_oabi_call_table
#define COMPAT(nr, native, compat) syscall nr, compat
#include <calls-oabi.S>
syscall_table_end sys_oabi_call_table
#endif
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,646
|
arch/arm/kernel/vmlinux.lds.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* ld script to make ARM Linux kernel
* taken from the i386 version by Russell King
* Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
*/
#ifdef CONFIG_XIP_KERNEL
#include "vmlinux-xip.lds.S"
#else
#include <asm-generic/vmlinux.lds.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
#include <asm/memory.h>
#include <asm/mpu.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include "vmlinux.lds.h"
OUTPUT_ARCH(arm)
ENTRY(stext)
#ifndef __ARMEB__
jiffies = jiffies_64;
#else
jiffies = jiffies_64 + 4;
#endif
SECTIONS
{
/*
* XXX: The linker does not define how output sections are
* assigned to input sections when there are multiple statements
* matching the same input section name. There is no documented
* order of matching.
*
* unwind exit sections must be discarded before the rest of the
* unwind sections get included.
*/
/DISCARD/ : {
ARM_DISCARD
#ifndef CONFIG_SMP_ON_UP
*(.alt.smp.init)
#endif
}
. = PAGE_OFFSET + TEXT_OFFSET;
.head.text : {
_text = .;
HEAD_TEXT
}
#ifdef CONFIG_STRICT_KERNEL_RWX
. = ALIGN(1<<SECTION_SHIFT);
#endif
#ifdef CONFIG_ARM_MPU
. = ALIGN(PMSAv8_MINALIGN);
#endif
.text : { /* Real text segment */
_stext = .; /* Text and read-only data */
ARM_TEXT
}
#ifdef CONFIG_DEBUG_ALIGN_RODATA
. = ALIGN(1<<SECTION_SHIFT);
#endif
_etext = .; /* End of text section */
RO_DATA(PAGE_SIZE)
. = ALIGN(4);
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
__start___ex_table = .;
ARM_MMU_KEEP(*(__ex_table))
__stop___ex_table = .;
}
#ifdef CONFIG_ARM_UNWIND
ARM_UNWIND_SECTIONS
#endif
NOTES
#ifdef CONFIG_STRICT_KERNEL_RWX
. = ALIGN(1<<SECTION_SHIFT);
#else
. = ALIGN(PAGE_SIZE);
#endif
__init_begin = .;
ARM_VECTORS
INIT_TEXT_SECTION(8)
.exit.text : {
ARM_EXIT_KEEP(EXIT_TEXT)
}
.init.proc.info : {
ARM_CPU_DISCARD(PROC_INFO)
}
.init.arch.info : {
__arch_info_begin = .;
*(.arch.info.init)
__arch_info_end = .;
}
.init.tagtable : {
__tagtable_begin = .;
*(.taglist.init)
__tagtable_end = .;
}
#ifdef CONFIG_SMP_ON_UP
.init.smpalt : {
__smpalt_begin = .;
*(.alt.smp.init)
__smpalt_end = .;
}
#endif
.init.pv_table : {
__pv_table_begin = .;
*(.pv_table)
__pv_table_end = .;
}
INIT_DATA_SECTION(16)
.exit.data : {
ARM_EXIT_KEEP(EXIT_DATA)
}
#ifdef CONFIG_SMP
PERCPU_SECTION(L1_CACHE_BYTES)
#endif
#ifdef CONFIG_HAVE_TCM
ARM_TCM
#endif
#ifdef CONFIG_STRICT_KERNEL_RWX
. = ALIGN(1<<SECTION_SHIFT);
#else
. = ALIGN(THREAD_SIZE);
#endif
__init_end = .;
_sdata = .;
RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
_edata = .;
BSS_SECTION(0, 0, 0)
#ifdef CONFIG_ARM_MPU
. = ALIGN(PMSAv8_MINALIGN);
#endif
_end = .;
STABS_DEBUG
}
#ifdef CONFIG_STRICT_KERNEL_RWX
/*
* Without CONFIG_DEBUG_ALIGN_RODATA, __start_rodata_section_aligned will
* be the first section-aligned location after __start_rodata. Otherwise,
* it will be equal to __start_rodata.
*/
__start_rodata_section_aligned = ALIGN(__start_rodata, 1 << SECTION_SHIFT);
#endif
/*
* These must never be empty
* If you have to comment these two assert statements out, your
* binutils is too old (for other reasons as well)
*/
ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
/*
* The HYP init code can't be more than a page long,
* and should not cross a page boundary.
* The above comment applies as well.
*/
ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE,
"HYP init code too big or misaligned")
#endif /* CONFIG_XIP_KERNEL */
|
AirFortressIlikara/LS2K0300-linux-4.19
| 5,153
|
arch/arm/kernel/sleep.S
|
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
#include <linux/threads.h>
#include <asm/asm-offsets.h>
#include <asm/assembler.h>
#include <asm/glue-cache.h>
#include <asm/glue-proc.h>
.text
/*
* Implementation of MPIDR hash algorithm through shifting
* and OR'ing.
*
* @dst: register containing hash result
* @rs0: register containing affinity level 0 bit shift
* @rs1: register containing affinity level 1 bit shift
* @rs2: register containing affinity level 2 bit shift
* @mpidr: register containing MPIDR value
* @mask: register containing MPIDR mask
*
* Pseudo C-code:
*
*u32 dst;
*
*compute_mpidr_hash(u32 rs0, u32 rs1, u32 rs2, u32 mpidr, u32 mask) {
* u32 aff0, aff1, aff2;
* u32 mpidr_masked = mpidr & mask;
* aff0 = mpidr_masked & 0xff;
* aff1 = mpidr_masked & 0xff00;
* aff2 = mpidr_masked & 0xff0000;
* dst = (aff0 >> rs0 | aff1 >> rs1 | aff2 >> rs2);
*}
* Input registers: rs0, rs1, rs2, mpidr, mask
* Output register: dst
* Note: input and output registers must be disjoint register sets
(eg: a macro instance with mpidr = r1 and dst = r1 is invalid)
*/
.macro compute_mpidr_hash dst, rs0, rs1, rs2, mpidr, mask
and \mpidr, \mpidr, \mask @ mask out MPIDR bits
and \dst, \mpidr, #0xff @ mask=aff0
ARM( mov \dst, \dst, lsr \rs0 ) @ dst=aff0>>rs0
THUMB( lsr \dst, \dst, \rs0 )
and \mask, \mpidr, #0xff00 @ mask = aff1
ARM( orr \dst, \dst, \mask, lsr \rs1 ) @ dst|=(aff1>>rs1)
THUMB( lsr \mask, \mask, \rs1 )
THUMB( orr \dst, \dst, \mask )
and \mask, \mpidr, #0xff0000 @ mask = aff2
ARM( orr \dst, \dst, \mask, lsr \rs2 ) @ dst|=(aff2>>rs2)
THUMB( lsr \mask, \mask, \rs2 )
THUMB( orr \dst, \dst, \mask )
.endm
/*
* Save CPU state for a suspend. This saves the CPU general purpose
* registers, and allocates space on the kernel stack to save the CPU
* specific registers and some other data for resume.
* r0 = suspend function arg0
* r1 = suspend function
* r2 = MPIDR value the resuming CPU will use
*/
ENTRY(__cpu_suspend)
stmfd sp!, {r4 - r11, lr}
#ifdef MULTI_CPU
ldr r10, =processor
ldr r4, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state
#else
ldr r4, =cpu_suspend_size
#endif
mov r5, sp @ current virtual SP
add r4, r4, #12 @ Space for pgd, virt sp, phys resume fn
sub sp, sp, r4 @ allocate CPU state on stack
ldr r3, =sleep_save_sp
stmfd sp!, {r0, r1} @ save suspend func arg and pointer
ldr r3, [r3, #SLEEP_SAVE_SP_VIRT]
ALT_SMP(ldr r0, =mpidr_hash)
ALT_UP_B(1f)
/* This ldmia relies on the memory layout of the mpidr_hash struct */
ldmia r0, {r1, r6-r8} @ r1 = mpidr mask (r6,r7,r8) = l[0,1,2] shifts
compute_mpidr_hash r0, r6, r7, r8, r2, r1
add r3, r3, r0, lsl #2
1: mov r2, r5 @ virtual SP
mov r1, r4 @ size of save block
add r0, sp, #8 @ pointer to save block
bl __cpu_suspend_save
badr lr, cpu_suspend_abort
ldmfd sp!, {r0, pc} @ call suspend fn
ENDPROC(__cpu_suspend)
.ltorg
cpu_suspend_abort:
ldmia sp!, {r1 - r3} @ pop phys pgd, virt SP, phys resume fn
teq r0, #0
moveq r0, #1 @ force non-zero value
mov sp, r2
ldmfd sp!, {r4 - r11, pc}
ENDPROC(cpu_suspend_abort)
/*
* r0 = control register value
*/
.align 5
.pushsection .idmap.text,"ax"
ENTRY(cpu_resume_mmu)
ldr r3, =cpu_resume_after_mmu
instr_sync
mcr p15, 0, r0, c1, c0, 0 @ turn on MMU, I-cache, etc
mrc p15, 0, r0, c0, c0, 0 @ read id reg
instr_sync
mov r0, r0
mov r0, r0
ret r3 @ jump to virtual address
ENDPROC(cpu_resume_mmu)
.popsection
cpu_resume_after_mmu:
bl cpu_init @ restore the und/abt/irq banked regs
mov r0, #0 @ return zero on success
ldmfd sp!, {r4 - r11, pc}
ENDPROC(cpu_resume_after_mmu)
.text
.align
#ifdef CONFIG_MCPM
.arm
THUMB( .thumb )
ENTRY(cpu_resume_no_hyp)
ARM_BE8(setend be) @ ensure we are in BE mode
b no_hyp
#endif
#ifdef CONFIG_MMU
.arm
ENTRY(cpu_resume_arm)
THUMB( badr r9, 1f ) @ Kernel is entered in ARM.
THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
THUMB( .thumb ) @ switch to Thumb now.
THUMB(1: )
#endif
ENTRY(cpu_resume)
ARM_BE8(setend be) @ ensure we are in BE mode
#ifdef CONFIG_ARM_VIRT_EXT
bl __hyp_stub_install_secondary
#endif
safe_svcmode_maskall r1
no_hyp:
mov r1, #0
ALT_SMP(mrc p15, 0, r0, c0, c0, 5)
ALT_UP_B(1f)
adr r2, mpidr_hash_ptr
ldr r3, [r2]
add r2, r2, r3 @ r2 = struct mpidr_hash phys address
/*
* This ldmia relies on the memory layout of the mpidr_hash
* struct mpidr_hash.
*/
ldmia r2, { r3-r6 } @ r3 = mpidr mask (r4,r5,r6) = l[0,1,2] shifts
compute_mpidr_hash r1, r4, r5, r6, r0, r3
1:
adr r0, _sleep_save_sp
ldr r2, [r0]
add r0, r0, r2
ldr r0, [r0, #SLEEP_SAVE_SP_PHYS]
ldr r0, [r0, r1, lsl #2]
@ load phys pgd, stack, resume fn
ARM( ldmia r0!, {r1, sp, pc} )
THUMB( ldmia r0!, {r1, r2, r3} )
THUMB( mov sp, r2 )
THUMB( bx r3 )
ENDPROC(cpu_resume)
#ifdef CONFIG_MMU
ENDPROC(cpu_resume_arm)
#endif
#ifdef CONFIG_MCPM
ENDPROC(cpu_resume_no_hyp)
#endif
.align 2
_sleep_save_sp:
.long sleep_save_sp - .
mpidr_hash_ptr:
.long mpidr_hash - . @ mpidr_hash struct offset
.data
.align 2
.type sleep_save_sp, #object
ENTRY(sleep_save_sp)
.space SLEEP_SAVE_SP_SZ @ struct sleep_save_sp
|
AirFortressIlikara/LS2K0300-linux-4.19
| 7,662
|
arch/arm/kernel/hyp-stub.S
|
/*
* Copyright (c) 2012 Linaro Limited.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <linux/init.h>
#include <linux/irqchip/arm-gic-v3.h>
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/virt.h>
#ifndef ZIMAGE
/*
* For the kernel proper, we need to find out the CPU boot mode long after
* boot, so we need to store it in a writable variable.
*
* This is not in .bss, because we set it sufficiently early that the boot-time
* zeroing of .bss would clobber it.
*/
.data
.align 2
ENTRY(__boot_cpu_mode)
.long 0
.text
/*
* Save the primary CPU boot mode. Requires 3 scratch registers.
*/
.macro store_primary_cpu_mode reg1, reg2, reg3
mrs \reg1, cpsr
and \reg1, \reg1, #MODE_MASK
adr \reg2, .L__boot_cpu_mode_offset
ldr \reg3, [\reg2]
str \reg1, [\reg2, \reg3]
.endm
/*
* Compare the current mode with the one saved on the primary CPU.
* If they don't match, record that fact. The Z bit indicates
* if there's a match or not.
* Requires 3 additionnal scratch registers.
*/
.macro compare_cpu_mode_with_primary mode, reg1, reg2, reg3
adr \reg2, .L__boot_cpu_mode_offset
ldr \reg3, [\reg2]
ldr \reg1, [\reg2, \reg3]
cmp \mode, \reg1 @ matches primary CPU boot mode?
orrne \reg1, \reg1, #BOOT_CPU_MODE_MISMATCH
strne \reg1, [\reg2, \reg3] @ record what happened and give up
.endm
#else /* ZIMAGE */
.macro store_primary_cpu_mode reg1:req, reg2:req, reg3:req
.endm
/*
* The zImage loader only runs on one CPU, so we don't bother with mult-CPU
* consistency checking:
*/
.macro compare_cpu_mode_with_primary mode, reg1, reg2, reg3
cmp \mode, \mode
.endm
#endif /* ZIMAGE */
/*
* Hypervisor stub installation functions.
*
* These must be called with the MMU and D-cache off.
* They are not ABI compliant and are only intended to be called from the kernel
* entry points in head.S.
*/
@ Call this from the primary CPU
ENTRY(__hyp_stub_install)
store_primary_cpu_mode r4, r5, r6
ENDPROC(__hyp_stub_install)
@ fall through...
@ Secondary CPUs should call here
ENTRY(__hyp_stub_install_secondary)
mrs r4, cpsr
and r4, r4, #MODE_MASK
/*
* If the secondary has booted with a different mode, give up
* immediately.
*/
compare_cpu_mode_with_primary r4, r5, r6, r7
retne lr
/*
* Once we have given up on one CPU, we do not try to install the
* stub hypervisor on the remaining ones: because the saved boot mode
* is modified, it can't compare equal to the CPSR mode field any
* more.
*
* Otherwise...
*/
cmp r4, #HYP_MODE
retne lr @ give up if the CPU is not in HYP mode
/*
* Configure HSCTLR to set correct exception endianness/instruction set
* state etc.
* Turn off all traps
* Eventually, CPU-specific code might be needed -- assume not for now
*
* This code relies on the "eret" instruction to synchronize the
* various coprocessor accesses. This is done when we switch to SVC
* (see safe_svcmode_maskall).
*/
@ Now install the hypervisor stub:
W(adr) r7, __hyp_stub_vectors
mcr p15, 4, r7, c12, c0, 0 @ set hypervisor vector base (HVBAR)
@ Disable all traps, so we don't get any nasty surprise
mov r7, #0
mcr p15, 4, r7, c1, c1, 0 @ HCR
mcr p15, 4, r7, c1, c1, 2 @ HCPTR
mcr p15, 4, r7, c1, c1, 3 @ HSTR
THUMB( orr r7, #(1 << 30) ) @ HSCTLR.TE
ARM_BE8(orr r7, r7, #(1 << 25)) @ HSCTLR.EE
mcr p15, 4, r7, c1, c0, 0 @ HSCTLR
mrc p15, 4, r7, c1, c1, 1 @ HDCR
and r7, #0x1f @ Preserve HPMN
mcr p15, 4, r7, c1, c1, 1 @ HDCR
@ Make sure NS-SVC is initialised appropriately
mrc p15, 0, r7, c1, c0, 0 @ SCTLR
orr r7, #(1 << 5) @ CP15 barriers enabled
bic r7, #(3 << 7) @ Clear SED/ITD for v8 (RES0 for v7)
bic r7, #(3 << 19) @ WXN and UWXN disabled
mcr p15, 0, r7, c1, c0, 0 @ SCTLR
mrc p15, 0, r7, c0, c0, 0 @ MIDR
mcr p15, 4, r7, c0, c0, 0 @ VPIDR
mrc p15, 0, r7, c0, c0, 5 @ MPIDR
mcr p15, 4, r7, c0, c0, 5 @ VMPIDR
#if !defined(ZIMAGE) && defined(CONFIG_ARM_ARCH_TIMER)
@ make CNTP_* and CNTPCT accessible from PL1
mrc p15, 0, r7, c0, c1, 1 @ ID_PFR1
ubfx r7, r7, #16, #4
teq r7, #0
beq 1f
mrc p15, 4, r7, c14, c1, 0 @ CNTHCTL
orr r7, r7, #3 @ PL1PCEN | PL1PCTEN
mcr p15, 4, r7, c14, c1, 0 @ CNTHCTL
mov r7, #0
mcrr p15, 4, r7, r7, c14 @ CNTVOFF
@ Disable virtual timer in case it was counting
mrc p15, 0, r7, c14, c3, 1 @ CNTV_CTL
bic r7, #1 @ Clear ENABLE
mcr p15, 0, r7, c14, c3, 1 @ CNTV_CTL
1:
#endif
#ifdef CONFIG_ARM_GIC_V3
@ Check whether GICv3 system registers are available
mrc p15, 0, r7, c0, c1, 1 @ ID_PFR1
ubfx r7, r7, #28, #4
teq r7, #0
beq 2f
@ Enable system register accesses
mrc p15, 4, r7, c12, c9, 5 @ ICC_HSRE
orr r7, r7, #(ICC_SRE_EL2_ENABLE | ICC_SRE_EL2_SRE)
mcr p15, 4, r7, c12, c9, 5 @ ICC_HSRE
isb
@ SRE bit could be forced to 0 by firmware.
@ Check whether it sticks before accessing any other sysreg
mrc p15, 4, r7, c12, c9, 5 @ ICC_HSRE
tst r7, #ICC_SRE_EL2_SRE
beq 2f
mov r7, #0
mcr p15, 4, r7, c12, c11, 0 @ ICH_HCR
2:
#endif
bx lr @ The boot CPU mode is left in r4.
ENDPROC(__hyp_stub_install_secondary)
__hyp_stub_do_trap:
teq r0, #HVC_SET_VECTORS
bne 1f
mcr p15, 4, r1, c12, c0, 0 @ set HVBAR
b __hyp_stub_exit
1: teq r0, #HVC_SOFT_RESTART
bne 1f
bx r1
1: teq r0, #HVC_RESET_VECTORS
beq __hyp_stub_exit
ldr r0, =HVC_STUB_ERR
__ERET
__hyp_stub_exit:
mov r0, #0
__ERET
ENDPROC(__hyp_stub_do_trap)
/*
* __hyp_set_vectors: Call this after boot to set the initial hypervisor
* vectors as part of hypervisor installation. On an SMP system, this should
* be called on each CPU.
*
* r0 must be the physical address of the new vector table (which must lie in
* the bottom 4GB of physical address space.
*
* r0 must be 32-byte aligned.
*
* Before calling this, you must check that the stub hypervisor is installed
* everywhere, by waiting for any secondary CPUs to be brought up and then
* checking that BOOT_CPU_MODE_HAVE_HYP(__boot_cpu_mode) is true.
*
* If not, there is a pre-existing hypervisor, some CPUs failed to boot, or
* something else went wrong... in such cases, trying to install a new
* hypervisor is unlikely to work as desired.
*
* When you call into your shiny new hypervisor, sp_hyp will contain junk,
* so you will need to set that to something sensible at the new hypervisor's
* initialisation entry point.
*/
ENTRY(__hyp_set_vectors)
mov r1, r0
mov r0, #HVC_SET_VECTORS
__HVC(0)
ret lr
ENDPROC(__hyp_set_vectors)
ENTRY(__hyp_soft_restart)
mov r1, r0
mov r0, #HVC_SOFT_RESTART
__HVC(0)
ret lr
ENDPROC(__hyp_soft_restart)
ENTRY(__hyp_reset_vectors)
mov r0, #HVC_RESET_VECTORS
__HVC(0)
ret lr
ENDPROC(__hyp_reset_vectors)
#ifndef ZIMAGE
.align 2
.L__boot_cpu_mode_offset:
.long __boot_cpu_mode - .
#endif
.align 5
ENTRY(__hyp_stub_vectors)
__hyp_stub_reset: W(b) .
__hyp_stub_und: W(b) .
__hyp_stub_svc: W(b) .
__hyp_stub_pabort: W(b) .
__hyp_stub_dabort: W(b) .
__hyp_stub_trap: W(b) __hyp_stub_do_trap
__hyp_stub_irq: W(b) .
__hyp_stub_fiq: W(b) .
ENDPROC(__hyp_stub_vectors)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,660
|
arch/arm/kernel/debug.S
|
/*
* linux/arch/arm/kernel/debug.S
*
* Copyright (C) 1994-1999 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* 32-bit debugging code
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
.text
/*
* Some debugging routines (useful if you've got MM problems and
* printk isn't working). For DEBUGGING ONLY!!! Do not leave
* references to these in a production kernel!
*/
#if !defined(CONFIG_DEBUG_SEMIHOSTING)
#include CONFIG_DEBUG_LL_INCLUDE
#endif
#ifdef CONFIG_MMU
.macro addruart_current, rx, tmp1, tmp2
addruart \tmp1, \tmp2, \rx
mrc p15, 0, \rx, c1, c0
tst \rx, #1
moveq \rx, \tmp1
movne \rx, \tmp2
.endm
#else /* !CONFIG_MMU */
.macro addruart_current, rx, tmp1, tmp2
addruart \rx, \tmp1, \tmp2
.endm
#endif /* CONFIG_MMU */
/*
* Useful debugging routines
*/
ENTRY(printhex8)
mov r1, #8
b printhex
ENDPROC(printhex8)
ENTRY(printhex4)
mov r1, #4
b printhex
ENDPROC(printhex4)
ENTRY(printhex2)
mov r1, #2
printhex: adr r2, hexbuf_rel
ldr r3, [r2]
add r2, r2, r3
add r3, r2, r1
mov r1, #0
strb r1, [r3]
1: and r1, r0, #15
mov r0, r0, lsr #4
cmp r1, #10
addlt r1, r1, #'0'
addge r1, r1, #'a' - 10
strb r1, [r3, #-1]!
teq r3, r2
bne 1b
mov r0, r2
b printascii
ENDPROC(printhex2)
.pushsection .bss
hexbuf_addr: .space 16
.popsection
.align
hexbuf_rel: .long hexbuf_addr - .
.ltorg
#ifndef CONFIG_DEBUG_SEMIHOSTING
ENTRY(printascii)
addruart_current r3, r1, r2
1: teq r0, #0
ldrneb r1, [r0], #1
teqne r1, #0
reteq lr
2: teq r1, #'\n'
bne 3f
mov r1, #'\r'
waituart r2, r3
senduart r1, r3
busyuart r2, r3
mov r1, #'\n'
3: waituart r2, r3
senduart r1, r3
busyuart r2, r3
b 1b
ENDPROC(printascii)
ENTRY(printch)
addruart_current r3, r1, r2
mov r1, r0
mov r0, #0
b 2b
ENDPROC(printch)
#ifdef CONFIG_MMU
ENTRY(debug_ll_addr)
addruart r2, r3, ip
str r2, [r0]
str r3, [r1]
ret lr
ENDPROC(debug_ll_addr)
#endif
#else
ENTRY(printascii)
mov r1, r0
mov r0, #0x04 @ SYS_WRITE0
ARM( svc #0x123456 )
#ifdef CONFIG_CPU_V7M
THUMB( bkpt #0xab )
#else
THUMB( svc #0xab )
#endif
ret lr
ENDPROC(printascii)
ENTRY(printch)
adr r1, hexbuf_rel
ldr r2, [r1]
add r1, r1, r2
strb r0, [r1]
mov r0, #0x03 @ SYS_WRITEC
ARM( svc #0x123456 )
#ifdef CONFIG_CPU_V7M
THUMB( bkpt #0xab )
#else
THUMB( svc #0xab )
#endif
ret lr
ENDPROC(printch)
ENTRY(debug_ll_addr)
mov r2, #0
str r2, [r0]
str r2, [r1]
ret lr
ENDPROC(debug_ll_addr)
#endif
|
AirFortressIlikara/LS2K0300-linux-4.19
| 20,715
|
arch/arm/crypto/sha1-armv7-neon.S
|
/* sha1-armv7-neon.S - ARM/NEON accelerated SHA-1 transform function
*
* Copyright © 2013-2014 Jussi Kivilinna <jussi.kivilinna@iki.fi>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
.syntax unified
.fpu neon
.text
/* Context structure */
#define state_h0 0
#define state_h1 4
#define state_h2 8
#define state_h3 12
#define state_h4 16
/* Constants */
#define K1 0x5A827999
#define K2 0x6ED9EBA1
#define K3 0x8F1BBCDC
#define K4 0xCA62C1D6
.align 4
.LK_VEC:
.LK1: .long K1, K1, K1, K1
.LK2: .long K2, K2, K2, K2
.LK3: .long K3, K3, K3, K3
.LK4: .long K4, K4, K4, K4
/* Register macros */
#define RSTATE r0
#define RDATA r1
#define RNBLKS r2
#define ROLDSTACK r3
#define RWK lr
#define _a r4
#define _b r5
#define _c r6
#define _d r7
#define _e r8
#define RT0 r9
#define RT1 r10
#define RT2 r11
#define RT3 r12
#define W0 q0
#define W1 q7
#define W2 q2
#define W3 q3
#define W4 q4
#define W5 q6
#define W6 q5
#define W7 q1
#define tmp0 q8
#define tmp1 q9
#define tmp2 q10
#define tmp3 q11
#define qK1 q12
#define qK2 q13
#define qK3 q14
#define qK4 q15
#ifdef CONFIG_CPU_BIG_ENDIAN
#define ARM_LE(code...)
#else
#define ARM_LE(code...) code
#endif
/* Round function macros. */
#define WK_offs(i) (((i) & 15) * 4)
#define _R_F1(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
ldr RT3, [sp, WK_offs(i)]; \
pre1(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
bic RT0, d, b; \
add e, e, a, ror #(32 - 5); \
and RT1, c, b; \
pre2(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
add RT0, RT0, RT3; \
add e, e, RT1; \
ror b, #(32 - 30); \
pre3(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
add e, e, RT0;
#define _R_F2(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
ldr RT3, [sp, WK_offs(i)]; \
pre1(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
eor RT0, d, b; \
add e, e, a, ror #(32 - 5); \
eor RT0, RT0, c; \
pre2(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
add e, e, RT3; \
ror b, #(32 - 30); \
pre3(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
add e, e, RT0; \
#define _R_F3(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
ldr RT3, [sp, WK_offs(i)]; \
pre1(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
eor RT0, b, c; \
and RT1, b, c; \
add e, e, a, ror #(32 - 5); \
pre2(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
and RT0, RT0, d; \
add RT1, RT1, RT3; \
add e, e, RT0; \
ror b, #(32 - 30); \
pre3(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
add e, e, RT1;
#define _R_F4(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
_R_F2(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28)
#define _R(a,b,c,d,e,f,i,pre1,pre2,pre3,i16,\
W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
_R_##f(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28)
#define R(a,b,c,d,e,f,i) \
_R_##f(a,b,c,d,e,i,dummy,dummy,dummy,i16,\
W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28)
#define dummy(...)
/* Input expansion macros. */
/********* Precalc macros for rounds 0-15 *************************************/
#define W_PRECALC_00_15() \
add RWK, sp, #(WK_offs(0)); \
\
vld1.32 {W0, W7}, [RDATA]!; \
ARM_LE(vrev32.8 W0, W0; ) /* big => little */ \
vld1.32 {W6, W5}, [RDATA]!; \
vadd.u32 tmp0, W0, curK; \
ARM_LE(vrev32.8 W7, W7; ) /* big => little */ \
ARM_LE(vrev32.8 W6, W6; ) /* big => little */ \
vadd.u32 tmp1, W7, curK; \
ARM_LE(vrev32.8 W5, W5; ) /* big => little */ \
vadd.u32 tmp2, W6, curK; \
vst1.32 {tmp0, tmp1}, [RWK]!; \
vadd.u32 tmp3, W5, curK; \
vst1.32 {tmp2, tmp3}, [RWK]; \
#define WPRECALC_00_15_0(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vld1.32 {W0, W7}, [RDATA]!; \
#define WPRECALC_00_15_1(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
add RWK, sp, #(WK_offs(0)); \
#define WPRECALC_00_15_2(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
ARM_LE(vrev32.8 W0, W0; ) /* big => little */ \
#define WPRECALC_00_15_3(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vld1.32 {W6, W5}, [RDATA]!; \
#define WPRECALC_00_15_4(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vadd.u32 tmp0, W0, curK; \
#define WPRECALC_00_15_5(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
ARM_LE(vrev32.8 W7, W7; ) /* big => little */ \
#define WPRECALC_00_15_6(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
ARM_LE(vrev32.8 W6, W6; ) /* big => little */ \
#define WPRECALC_00_15_7(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vadd.u32 tmp1, W7, curK; \
#define WPRECALC_00_15_8(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
ARM_LE(vrev32.8 W5, W5; ) /* big => little */ \
#define WPRECALC_00_15_9(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vadd.u32 tmp2, W6, curK; \
#define WPRECALC_00_15_10(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vst1.32 {tmp0, tmp1}, [RWK]!; \
#define WPRECALC_00_15_11(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vadd.u32 tmp3, W5, curK; \
#define WPRECALC_00_15_12(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vst1.32 {tmp2, tmp3}, [RWK]; \
/********* Precalc macros for rounds 16-31 ************************************/
#define WPRECALC_16_31_0(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
veor tmp0, tmp0; \
vext.8 W, W_m16, W_m12, #8; \
#define WPRECALC_16_31_1(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
add RWK, sp, #(WK_offs(i)); \
vext.8 tmp0, W_m04, tmp0, #4; \
#define WPRECALC_16_31_2(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
veor tmp0, tmp0, W_m16; \
veor.32 W, W, W_m08; \
#define WPRECALC_16_31_3(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
veor tmp1, tmp1; \
veor W, W, tmp0; \
#define WPRECALC_16_31_4(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vshl.u32 tmp0, W, #1; \
#define WPRECALC_16_31_5(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vext.8 tmp1, tmp1, W, #(16-12); \
vshr.u32 W, W, #31; \
#define WPRECALC_16_31_6(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vorr tmp0, tmp0, W; \
vshr.u32 W, tmp1, #30; \
#define WPRECALC_16_31_7(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vshl.u32 tmp1, tmp1, #2; \
#define WPRECALC_16_31_8(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
veor tmp0, tmp0, W; \
#define WPRECALC_16_31_9(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
veor W, tmp0, tmp1; \
#define WPRECALC_16_31_10(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vadd.u32 tmp0, W, curK; \
#define WPRECALC_16_31_11(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vst1.32 {tmp0}, [RWK];
/********* Precalc macros for rounds 32-79 ************************************/
#define WPRECALC_32_79_0(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
veor W, W_m28; \
#define WPRECALC_32_79_1(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vext.8 tmp0, W_m08, W_m04, #8; \
#define WPRECALC_32_79_2(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
veor W, W_m16; \
#define WPRECALC_32_79_3(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
veor W, tmp0; \
#define WPRECALC_32_79_4(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
add RWK, sp, #(WK_offs(i&~3)); \
#define WPRECALC_32_79_5(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vshl.u32 tmp1, W, #2; \
#define WPRECALC_32_79_6(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vshr.u32 tmp0, W, #30; \
#define WPRECALC_32_79_7(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vorr W, tmp0, tmp1; \
#define WPRECALC_32_79_8(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vadd.u32 tmp0, W, curK; \
#define WPRECALC_32_79_9(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vst1.32 {tmp0}, [RWK];
/*
* Transform nblks*64 bytes (nblks*16 32-bit words) at DATA.
*
* unsigned int
* sha1_transform_neon (void *ctx, const unsigned char *data,
* unsigned int nblks)
*/
.align 3
ENTRY(sha1_transform_neon)
/* input:
* r0: ctx, CTX
* r1: data (64*nblks bytes)
* r2: nblks
*/
cmp RNBLKS, #0;
beq .Ldo_nothing;
push {r4-r12, lr};
/*vpush {q4-q7};*/
adr RT3, .LK_VEC;
mov ROLDSTACK, sp;
/* Align stack. */
sub RT0, sp, #(16*4);
and RT0, #(~(16-1));
mov sp, RT0;
vld1.32 {qK1-qK2}, [RT3]!; /* Load K1,K2 */
/* Get the values of the chaining variables. */
ldm RSTATE, {_a-_e};
vld1.32 {qK3-qK4}, [RT3]; /* Load K3,K4 */
#undef curK
#define curK qK1
/* Precalc 0-15. */
W_PRECALC_00_15();
.Loop:
/* Transform 0-15 + Precalc 16-31. */
_R( _a, _b, _c, _d, _e, F1, 0,
WPRECALC_16_31_0, WPRECALC_16_31_1, WPRECALC_16_31_2, 16,
W4, W5, W6, W7, W0, _, _, _ );
_R( _e, _a, _b, _c, _d, F1, 1,
WPRECALC_16_31_3, WPRECALC_16_31_4, WPRECALC_16_31_5, 16,
W4, W5, W6, W7, W0, _, _, _ );
_R( _d, _e, _a, _b, _c, F1, 2,
WPRECALC_16_31_6, WPRECALC_16_31_7, WPRECALC_16_31_8, 16,
W4, W5, W6, W7, W0, _, _, _ );
_R( _c, _d, _e, _a, _b, F1, 3,
WPRECALC_16_31_9, WPRECALC_16_31_10,WPRECALC_16_31_11,16,
W4, W5, W6, W7, W0, _, _, _ );
#undef curK
#define curK qK2
_R( _b, _c, _d, _e, _a, F1, 4,
WPRECALC_16_31_0, WPRECALC_16_31_1, WPRECALC_16_31_2, 20,
W3, W4, W5, W6, W7, _, _, _ );
_R( _a, _b, _c, _d, _e, F1, 5,
WPRECALC_16_31_3, WPRECALC_16_31_4, WPRECALC_16_31_5, 20,
W3, W4, W5, W6, W7, _, _, _ );
_R( _e, _a, _b, _c, _d, F1, 6,
WPRECALC_16_31_6, WPRECALC_16_31_7, WPRECALC_16_31_8, 20,
W3, W4, W5, W6, W7, _, _, _ );
_R( _d, _e, _a, _b, _c, F1, 7,
WPRECALC_16_31_9, WPRECALC_16_31_10,WPRECALC_16_31_11,20,
W3, W4, W5, W6, W7, _, _, _ );
_R( _c, _d, _e, _a, _b, F1, 8,
WPRECALC_16_31_0, WPRECALC_16_31_1, WPRECALC_16_31_2, 24,
W2, W3, W4, W5, W6, _, _, _ );
_R( _b, _c, _d, _e, _a, F1, 9,
WPRECALC_16_31_3, WPRECALC_16_31_4, WPRECALC_16_31_5, 24,
W2, W3, W4, W5, W6, _, _, _ );
_R( _a, _b, _c, _d, _e, F1, 10,
WPRECALC_16_31_6, WPRECALC_16_31_7, WPRECALC_16_31_8, 24,
W2, W3, W4, W5, W6, _, _, _ );
_R( _e, _a, _b, _c, _d, F1, 11,
WPRECALC_16_31_9, WPRECALC_16_31_10,WPRECALC_16_31_11,24,
W2, W3, W4, W5, W6, _, _, _ );
_R( _d, _e, _a, _b, _c, F1, 12,
WPRECALC_16_31_0, WPRECALC_16_31_1, WPRECALC_16_31_2, 28,
W1, W2, W3, W4, W5, _, _, _ );
_R( _c, _d, _e, _a, _b, F1, 13,
WPRECALC_16_31_3, WPRECALC_16_31_4, WPRECALC_16_31_5, 28,
W1, W2, W3, W4, W5, _, _, _ );
_R( _b, _c, _d, _e, _a, F1, 14,
WPRECALC_16_31_6, WPRECALC_16_31_7, WPRECALC_16_31_8, 28,
W1, W2, W3, W4, W5, _, _, _ );
_R( _a, _b, _c, _d, _e, F1, 15,
WPRECALC_16_31_9, WPRECALC_16_31_10,WPRECALC_16_31_11,28,
W1, W2, W3, W4, W5, _, _, _ );
/* Transform 16-63 + Precalc 32-79. */
_R( _e, _a, _b, _c, _d, F1, 16,
WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 32,
W0, W1, W2, W3, W4, W5, W6, W7);
_R( _d, _e, _a, _b, _c, F1, 17,
WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 32,
W0, W1, W2, W3, W4, W5, W6, W7);
_R( _c, _d, _e, _a, _b, F1, 18,
WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 32,
W0, W1, W2, W3, W4, W5, W6, W7);
_R( _b, _c, _d, _e, _a, F1, 19,
WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 32,
W0, W1, W2, W3, W4, W5, W6, W7);
_R( _a, _b, _c, _d, _e, F2, 20,
WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 36,
W7, W0, W1, W2, W3, W4, W5, W6);
_R( _e, _a, _b, _c, _d, F2, 21,
WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 36,
W7, W0, W1, W2, W3, W4, W5, W6);
_R( _d, _e, _a, _b, _c, F2, 22,
WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 36,
W7, W0, W1, W2, W3, W4, W5, W6);
_R( _c, _d, _e, _a, _b, F2, 23,
WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 36,
W7, W0, W1, W2, W3, W4, W5, W6);
#undef curK
#define curK qK3
_R( _b, _c, _d, _e, _a, F2, 24,
WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 40,
W6, W7, W0, W1, W2, W3, W4, W5);
_R( _a, _b, _c, _d, _e, F2, 25,
WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 40,
W6, W7, W0, W1, W2, W3, W4, W5);
_R( _e, _a, _b, _c, _d, F2, 26,
WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 40,
W6, W7, W0, W1, W2, W3, W4, W5);
_R( _d, _e, _a, _b, _c, F2, 27,
WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 40,
W6, W7, W0, W1, W2, W3, W4, W5);
_R( _c, _d, _e, _a, _b, F2, 28,
WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 44,
W5, W6, W7, W0, W1, W2, W3, W4);
_R( _b, _c, _d, _e, _a, F2, 29,
WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 44,
W5, W6, W7, W0, W1, W2, W3, W4);
_R( _a, _b, _c, _d, _e, F2, 30,
WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 44,
W5, W6, W7, W0, W1, W2, W3, W4);
_R( _e, _a, _b, _c, _d, F2, 31,
WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 44,
W5, W6, W7, W0, W1, W2, W3, W4);
_R( _d, _e, _a, _b, _c, F2, 32,
WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 48,
W4, W5, W6, W7, W0, W1, W2, W3);
_R( _c, _d, _e, _a, _b, F2, 33,
WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 48,
W4, W5, W6, W7, W0, W1, W2, W3);
_R( _b, _c, _d, _e, _a, F2, 34,
WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 48,
W4, W5, W6, W7, W0, W1, W2, W3);
_R( _a, _b, _c, _d, _e, F2, 35,
WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 48,
W4, W5, W6, W7, W0, W1, W2, W3);
_R( _e, _a, _b, _c, _d, F2, 36,
WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 52,
W3, W4, W5, W6, W7, W0, W1, W2);
_R( _d, _e, _a, _b, _c, F2, 37,
WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 52,
W3, W4, W5, W6, W7, W0, W1, W2);
_R( _c, _d, _e, _a, _b, F2, 38,
WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 52,
W3, W4, W5, W6, W7, W0, W1, W2);
_R( _b, _c, _d, _e, _a, F2, 39,
WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 52,
W3, W4, W5, W6, W7, W0, W1, W2);
_R( _a, _b, _c, _d, _e, F3, 40,
WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 56,
W2, W3, W4, W5, W6, W7, W0, W1);
_R( _e, _a, _b, _c, _d, F3, 41,
WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 56,
W2, W3, W4, W5, W6, W7, W0, W1);
_R( _d, _e, _a, _b, _c, F3, 42,
WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 56,
W2, W3, W4, W5, W6, W7, W0, W1);
_R( _c, _d, _e, _a, _b, F3, 43,
WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 56,
W2, W3, W4, W5, W6, W7, W0, W1);
#undef curK
#define curK qK4
_R( _b, _c, _d, _e, _a, F3, 44,
WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 60,
W1, W2, W3, W4, W5, W6, W7, W0);
_R( _a, _b, _c, _d, _e, F3, 45,
WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 60,
W1, W2, W3, W4, W5, W6, W7, W0);
_R( _e, _a, _b, _c, _d, F3, 46,
WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 60,
W1, W2, W3, W4, W5, W6, W7, W0);
_R( _d, _e, _a, _b, _c, F3, 47,
WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 60,
W1, W2, W3, W4, W5, W6, W7, W0);
_R( _c, _d, _e, _a, _b, F3, 48,
WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 64,
W0, W1, W2, W3, W4, W5, W6, W7);
_R( _b, _c, _d, _e, _a, F3, 49,
WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 64,
W0, W1, W2, W3, W4, W5, W6, W7);
_R( _a, _b, _c, _d, _e, F3, 50,
WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 64,
W0, W1, W2, W3, W4, W5, W6, W7);
_R( _e, _a, _b, _c, _d, F3, 51,
WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 64,
W0, W1, W2, W3, W4, W5, W6, W7);
_R( _d, _e, _a, _b, _c, F3, 52,
WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 68,
W7, W0, W1, W2, W3, W4, W5, W6);
_R( _c, _d, _e, _a, _b, F3, 53,
WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 68,
W7, W0, W1, W2, W3, W4, W5, W6);
_R( _b, _c, _d, _e, _a, F3, 54,
WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 68,
W7, W0, W1, W2, W3, W4, W5, W6);
_R( _a, _b, _c, _d, _e, F3, 55,
WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 68,
W7, W0, W1, W2, W3, W4, W5, W6);
_R( _e, _a, _b, _c, _d, F3, 56,
WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 72,
W6, W7, W0, W1, W2, W3, W4, W5);
_R( _d, _e, _a, _b, _c, F3, 57,
WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 72,
W6, W7, W0, W1, W2, W3, W4, W5);
_R( _c, _d, _e, _a, _b, F3, 58,
WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 72,
W6, W7, W0, W1, W2, W3, W4, W5);
_R( _b, _c, _d, _e, _a, F3, 59,
WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 72,
W6, W7, W0, W1, W2, W3, W4, W5);
subs RNBLKS, #1;
_R( _a, _b, _c, _d, _e, F4, 60,
WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 76,
W5, W6, W7, W0, W1, W2, W3, W4);
_R( _e, _a, _b, _c, _d, F4, 61,
WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 76,
W5, W6, W7, W0, W1, W2, W3, W4);
_R( _d, _e, _a, _b, _c, F4, 62,
WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 76,
W5, W6, W7, W0, W1, W2, W3, W4);
_R( _c, _d, _e, _a, _b, F4, 63,
WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 76,
W5, W6, W7, W0, W1, W2, W3, W4);
beq .Lend;
/* Transform 64-79 + Precalc 0-15 of next block. */
#undef curK
#define curK qK1
_R( _b, _c, _d, _e, _a, F4, 64,
WPRECALC_00_15_0, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _a, _b, _c, _d, _e, F4, 65,
WPRECALC_00_15_1, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _e, _a, _b, _c, _d, F4, 66,
WPRECALC_00_15_2, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _d, _e, _a, _b, _c, F4, 67,
WPRECALC_00_15_3, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _c, _d, _e, _a, _b, F4, 68,
dummy, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _b, _c, _d, _e, _a, F4, 69,
dummy, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _a, _b, _c, _d, _e, F4, 70,
WPRECALC_00_15_4, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _e, _a, _b, _c, _d, F4, 71,
WPRECALC_00_15_5, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _d, _e, _a, _b, _c, F4, 72,
dummy, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _c, _d, _e, _a, _b, F4, 73,
dummy, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _b, _c, _d, _e, _a, F4, 74,
WPRECALC_00_15_6, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _a, _b, _c, _d, _e, F4, 75,
WPRECALC_00_15_7, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _e, _a, _b, _c, _d, F4, 76,
WPRECALC_00_15_8, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _d, _e, _a, _b, _c, F4, 77,
WPRECALC_00_15_9, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _c, _d, _e, _a, _b, F4, 78,
WPRECALC_00_15_10, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _b, _c, _d, _e, _a, F4, 79,
WPRECALC_00_15_11, dummy, WPRECALC_00_15_12, _, _, _, _, _, _, _, _, _ );
/* Update the chaining variables. */
ldm RSTATE, {RT0-RT3};
add _a, RT0;
ldr RT0, [RSTATE, #state_h4];
add _b, RT1;
add _c, RT2;
add _d, RT3;
add _e, RT0;
stm RSTATE, {_a-_e};
b .Loop;
.Lend:
/* Transform 64-79 */
R( _b, _c, _d, _e, _a, F4, 64 );
R( _a, _b, _c, _d, _e, F4, 65 );
R( _e, _a, _b, _c, _d, F4, 66 );
R( _d, _e, _a, _b, _c, F4, 67 );
R( _c, _d, _e, _a, _b, F4, 68 );
R( _b, _c, _d, _e, _a, F4, 69 );
R( _a, _b, _c, _d, _e, F4, 70 );
R( _e, _a, _b, _c, _d, F4, 71 );
R( _d, _e, _a, _b, _c, F4, 72 );
R( _c, _d, _e, _a, _b, F4, 73 );
R( _b, _c, _d, _e, _a, F4, 74 );
R( _a, _b, _c, _d, _e, F4, 75 );
R( _e, _a, _b, _c, _d, F4, 76 );
R( _d, _e, _a, _b, _c, F4, 77 );
R( _c, _d, _e, _a, _b, F4, 78 );
R( _b, _c, _d, _e, _a, F4, 79 );
mov sp, ROLDSTACK;
/* Update the chaining variables. */
ldm RSTATE, {RT0-RT3};
add _a, RT0;
ldr RT0, [RSTATE, #state_h4];
add _b, RT1;
add _c, RT2;
add _d, RT3;
/*vpop {q4-q7};*/
add _e, RT0;
stm RSTATE, {_a-_e};
pop {r4-r12, pc};
.Ldo_nothing:
bx lr
ENDPROC(sha1_transform_neon)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,696
|
arch/arm/crypto/sha1-ce-core.S
|
/*
* sha1-ce-core.S - SHA-1 secure hash using ARMv8 Crypto Extensions
*
* Copyright (C) 2015 Linaro Ltd.
* Author: Ard Biesheuvel <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
.text
.fpu crypto-neon-fp-armv8
k0 .req q0
k1 .req q1
k2 .req q2
k3 .req q3
ta0 .req q4
ta1 .req q5
tb0 .req q5
tb1 .req q4
dga .req q6
dgb .req q7
dgbs .req s28
dg0 .req q12
dg1a0 .req q13
dg1a1 .req q14
dg1b0 .req q14
dg1b1 .req q13
.macro add_only, op, ev, rc, s0, dg1
.ifnb \s0
vadd.u32 tb\ev, q\s0, \rc
.endif
sha1h.32 dg1b\ev, dg0
.ifb \dg1
sha1\op\().32 dg0, dg1a\ev, ta\ev
.else
sha1\op\().32 dg0, \dg1, ta\ev
.endif
.endm
.macro add_update, op, ev, rc, s0, s1, s2, s3, dg1
sha1su0.32 q\s0, q\s1, q\s2
add_only \op, \ev, \rc, \s1, \dg1
sha1su1.32 q\s0, q\s3
.endm
.align 6
.Lsha1_rcon:
.word 0x5a827999, 0x5a827999, 0x5a827999, 0x5a827999
.word 0x6ed9eba1, 0x6ed9eba1, 0x6ed9eba1, 0x6ed9eba1
.word 0x8f1bbcdc, 0x8f1bbcdc, 0x8f1bbcdc, 0x8f1bbcdc
.word 0xca62c1d6, 0xca62c1d6, 0xca62c1d6, 0xca62c1d6
/*
* void sha1_ce_transform(struct sha1_state *sst, u8 const *src,
* int blocks);
*/
ENTRY(sha1_ce_transform)
/* load round constants */
adr ip, .Lsha1_rcon
vld1.32 {k0-k1}, [ip, :128]!
vld1.32 {k2-k3}, [ip, :128]
/* load state */
vld1.32 {dga}, [r0]
vldr dgbs, [r0, #16]
/* load input */
0: vld1.32 {q8-q9}, [r1]!
vld1.32 {q10-q11}, [r1]!
subs r2, r2, #1
#ifndef CONFIG_CPU_BIG_ENDIAN
vrev32.8 q8, q8
vrev32.8 q9, q9
vrev32.8 q10, q10
vrev32.8 q11, q11
#endif
vadd.u32 ta0, q8, k0
vmov dg0, dga
add_update c, 0, k0, 8, 9, 10, 11, dgb
add_update c, 1, k0, 9, 10, 11, 8
add_update c, 0, k0, 10, 11, 8, 9
add_update c, 1, k0, 11, 8, 9, 10
add_update c, 0, k1, 8, 9, 10, 11
add_update p, 1, k1, 9, 10, 11, 8
add_update p, 0, k1, 10, 11, 8, 9
add_update p, 1, k1, 11, 8, 9, 10
add_update p, 0, k1, 8, 9, 10, 11
add_update p, 1, k2, 9, 10, 11, 8
add_update m, 0, k2, 10, 11, 8, 9
add_update m, 1, k2, 11, 8, 9, 10
add_update m, 0, k2, 8, 9, 10, 11
add_update m, 1, k2, 9, 10, 11, 8
add_update m, 0, k3, 10, 11, 8, 9
add_update p, 1, k3, 11, 8, 9, 10
add_only p, 0, k3, 9
add_only p, 1, k3, 10
add_only p, 0, k3, 11
add_only p, 1
/* update state */
vadd.u32 dga, dga, dg0
vadd.u32 dgb, dgb, dg1a0
bne 0b
/* store new state */
vst1.32 {dga}, [r0]
vstr dgbs, [r0, #16]
bx lr
ENDPROC(sha1_ce_transform)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 13,768
|
arch/arm/crypto/sha1-armv4-large.S
|
#define __ARM_ARCH__ __LINUX_ARM_ARCH__
@ SPDX-License-Identifier: GPL-2.0
@ This code is taken from the OpenSSL project but the author (Andy Polyakov)
@ has relicensed it under the GPLv2. Therefore this program is free software;
@ you can redistribute it and/or modify it under the terms of the GNU General
@ Public License version 2 as published by the Free Software Foundation.
@
@ The original headers, including the original license headers, are
@ included below for completeness.
@ ====================================================================
@ Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
@ project. The module is, however, dual licensed under OpenSSL and
@ CRYPTOGAMS licenses depending on where you obtain it. For further
@ details see http://www.openssl.org/~appro/cryptogams/.
@ ====================================================================
@ sha1_block procedure for ARMv4.
@
@ January 2007.
@ Size/performance trade-off
@ ====================================================================
@ impl size in bytes comp cycles[*] measured performance
@ ====================================================================
@ thumb 304 3212 4420
@ armv4-small 392/+29% 1958/+64% 2250/+96%
@ armv4-compact 740/+89% 1552/+26% 1840/+22%
@ armv4-large 1420/+92% 1307/+19% 1370/+34%[***]
@ full unroll ~5100/+260% ~1260/+4% ~1300/+5%
@ ====================================================================
@ thumb = same as 'small' but in Thumb instructions[**] and
@ with recurring code in two private functions;
@ small = detached Xload/update, loops are folded;
@ compact = detached Xload/update, 5x unroll;
@ large = interleaved Xload/update, 5x unroll;
@ full unroll = interleaved Xload/update, full unroll, estimated[!];
@
@ [*] Manually counted instructions in "grand" loop body. Measured
@ performance is affected by prologue and epilogue overhead,
@ i-cache availability, branch penalties, etc.
@ [**] While each Thumb instruction is twice smaller, they are not as
@ diverse as ARM ones: e.g., there are only two arithmetic
@ instructions with 3 arguments, no [fixed] rotate, addressing
@ modes are limited. As result it takes more instructions to do
@ the same job in Thumb, therefore the code is never twice as
@ small and always slower.
@ [***] which is also ~35% better than compiler generated code. Dual-
@ issue Cortex A8 core was measured to process input block in
@ ~990 cycles.
@ August 2010.
@
@ Rescheduling for dual-issue pipeline resulted in 13% improvement on
@ Cortex A8 core and in absolute terms ~870 cycles per input block
@ [or 13.6 cycles per byte].
@ February 2011.
@
@ Profiler-assisted and platform-specific optimization resulted in 10%
@ improvement on Cortex A8 core and 12.2 cycles per byte.
#include <linux/linkage.h>
.text
.align 2
ENTRY(sha1_block_data_order)
stmdb sp!,{r4-r12,lr}
add r2,r1,r2,lsl#6 @ r2 to point at the end of r1
ldmia r0,{r3,r4,r5,r6,r7}
.Lloop:
ldr r8,.LK_00_19
mov r14,sp
sub sp,sp,#15*4
mov r5,r5,ror#30
mov r6,r6,ror#30
mov r7,r7,ror#30 @ [6]
.L_00_15:
#if __ARM_ARCH__<7
ldrb r10,[r1,#2]
ldrb r9,[r1,#3]
ldrb r11,[r1,#1]
add r7,r8,r7,ror#2 @ E+=K_00_19
ldrb r12,[r1],#4
orr r9,r9,r10,lsl#8
eor r10,r5,r6 @ F_xx_xx
orr r9,r9,r11,lsl#16
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
orr r9,r9,r12,lsl#24
#else
ldr r9,[r1],#4 @ handles unaligned
add r7,r8,r7,ror#2 @ E+=K_00_19
eor r10,r5,r6 @ F_xx_xx
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
#ifdef __ARMEL__
rev r9,r9 @ byte swap
#endif
#endif
and r10,r4,r10,ror#2
add r7,r7,r9 @ E+=X[i]
eor r10,r10,r6,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]!
add r7,r7,r10 @ E+=F_00_19(B,C,D)
#if __ARM_ARCH__<7
ldrb r10,[r1,#2]
ldrb r9,[r1,#3]
ldrb r11,[r1,#1]
add r6,r8,r6,ror#2 @ E+=K_00_19
ldrb r12,[r1],#4
orr r9,r9,r10,lsl#8
eor r10,r4,r5 @ F_xx_xx
orr r9,r9,r11,lsl#16
add r6,r6,r7,ror#27 @ E+=ROR(A,27)
orr r9,r9,r12,lsl#24
#else
ldr r9,[r1],#4 @ handles unaligned
add r6,r8,r6,ror#2 @ E+=K_00_19
eor r10,r4,r5 @ F_xx_xx
add r6,r6,r7,ror#27 @ E+=ROR(A,27)
#ifdef __ARMEL__
rev r9,r9 @ byte swap
#endif
#endif
and r10,r3,r10,ror#2
add r6,r6,r9 @ E+=X[i]
eor r10,r10,r5,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]!
add r6,r6,r10 @ E+=F_00_19(B,C,D)
#if __ARM_ARCH__<7
ldrb r10,[r1,#2]
ldrb r9,[r1,#3]
ldrb r11,[r1,#1]
add r5,r8,r5,ror#2 @ E+=K_00_19
ldrb r12,[r1],#4
orr r9,r9,r10,lsl#8
eor r10,r3,r4 @ F_xx_xx
orr r9,r9,r11,lsl#16
add r5,r5,r6,ror#27 @ E+=ROR(A,27)
orr r9,r9,r12,lsl#24
#else
ldr r9,[r1],#4 @ handles unaligned
add r5,r8,r5,ror#2 @ E+=K_00_19
eor r10,r3,r4 @ F_xx_xx
add r5,r5,r6,ror#27 @ E+=ROR(A,27)
#ifdef __ARMEL__
rev r9,r9 @ byte swap
#endif
#endif
and r10,r7,r10,ror#2
add r5,r5,r9 @ E+=X[i]
eor r10,r10,r4,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]!
add r5,r5,r10 @ E+=F_00_19(B,C,D)
#if __ARM_ARCH__<7
ldrb r10,[r1,#2]
ldrb r9,[r1,#3]
ldrb r11,[r1,#1]
add r4,r8,r4,ror#2 @ E+=K_00_19
ldrb r12,[r1],#4
orr r9,r9,r10,lsl#8
eor r10,r7,r3 @ F_xx_xx
orr r9,r9,r11,lsl#16
add r4,r4,r5,ror#27 @ E+=ROR(A,27)
orr r9,r9,r12,lsl#24
#else
ldr r9,[r1],#4 @ handles unaligned
add r4,r8,r4,ror#2 @ E+=K_00_19
eor r10,r7,r3 @ F_xx_xx
add r4,r4,r5,ror#27 @ E+=ROR(A,27)
#ifdef __ARMEL__
rev r9,r9 @ byte swap
#endif
#endif
and r10,r6,r10,ror#2
add r4,r4,r9 @ E+=X[i]
eor r10,r10,r3,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]!
add r4,r4,r10 @ E+=F_00_19(B,C,D)
#if __ARM_ARCH__<7
ldrb r10,[r1,#2]
ldrb r9,[r1,#3]
ldrb r11,[r1,#1]
add r3,r8,r3,ror#2 @ E+=K_00_19
ldrb r12,[r1],#4
orr r9,r9,r10,lsl#8
eor r10,r6,r7 @ F_xx_xx
orr r9,r9,r11,lsl#16
add r3,r3,r4,ror#27 @ E+=ROR(A,27)
orr r9,r9,r12,lsl#24
#else
ldr r9,[r1],#4 @ handles unaligned
add r3,r8,r3,ror#2 @ E+=K_00_19
eor r10,r6,r7 @ F_xx_xx
add r3,r3,r4,ror#27 @ E+=ROR(A,27)
#ifdef __ARMEL__
rev r9,r9 @ byte swap
#endif
#endif
and r10,r5,r10,ror#2
add r3,r3,r9 @ E+=X[i]
eor r10,r10,r7,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]!
add r3,r3,r10 @ E+=F_00_19(B,C,D)
cmp r14,sp
bne .L_00_15 @ [((11+4)*5+2)*3]
sub sp,sp,#25*4
#if __ARM_ARCH__<7
ldrb r10,[r1,#2]
ldrb r9,[r1,#3]
ldrb r11,[r1,#1]
add r7,r8,r7,ror#2 @ E+=K_00_19
ldrb r12,[r1],#4
orr r9,r9,r10,lsl#8
eor r10,r5,r6 @ F_xx_xx
orr r9,r9,r11,lsl#16
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
orr r9,r9,r12,lsl#24
#else
ldr r9,[r1],#4 @ handles unaligned
add r7,r8,r7,ror#2 @ E+=K_00_19
eor r10,r5,r6 @ F_xx_xx
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
#ifdef __ARMEL__
rev r9,r9 @ byte swap
#endif
#endif
and r10,r4,r10,ror#2
add r7,r7,r9 @ E+=X[i]
eor r10,r10,r6,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]!
add r7,r7,r10 @ E+=F_00_19(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r6,r8,r6,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r4,r5 @ F_xx_xx
mov r9,r9,ror#31
add r6,r6,r7,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r3,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r6,r6,r9 @ E+=X[i]
eor r10,r10,r5,ror#2 @ F_00_19(B,C,D)
add r6,r6,r10 @ E+=F_00_19(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r5,r8,r5,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r3,r4 @ F_xx_xx
mov r9,r9,ror#31
add r5,r5,r6,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r7,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r5,r5,r9 @ E+=X[i]
eor r10,r10,r4,ror#2 @ F_00_19(B,C,D)
add r5,r5,r10 @ E+=F_00_19(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r4,r8,r4,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r7,r3 @ F_xx_xx
mov r9,r9,ror#31
add r4,r4,r5,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r6,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r4,r4,r9 @ E+=X[i]
eor r10,r10,r3,ror#2 @ F_00_19(B,C,D)
add r4,r4,r10 @ E+=F_00_19(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r3,r8,r3,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r6,r7 @ F_xx_xx
mov r9,r9,ror#31
add r3,r3,r4,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r5,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r3,r3,r9 @ E+=X[i]
eor r10,r10,r7,ror#2 @ F_00_19(B,C,D)
add r3,r3,r10 @ E+=F_00_19(B,C,D)
ldr r8,.LK_20_39 @ [+15+16*4]
cmn sp,#0 @ [+3], clear carry to denote 20_39
.L_20_39_or_60_79:
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r7,r8,r7,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r5,r6 @ F_xx_xx
mov r9,r9,ror#31
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
eor r10,r4,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r7,r7,r9 @ E+=X[i]
add r7,r7,r10 @ E+=F_20_39(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r6,r8,r6,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r4,r5 @ F_xx_xx
mov r9,r9,ror#31
add r6,r6,r7,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
eor r10,r3,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r6,r6,r9 @ E+=X[i]
add r6,r6,r10 @ E+=F_20_39(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r5,r8,r5,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r3,r4 @ F_xx_xx
mov r9,r9,ror#31
add r5,r5,r6,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
eor r10,r7,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r5,r5,r9 @ E+=X[i]
add r5,r5,r10 @ E+=F_20_39(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r4,r8,r4,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r7,r3 @ F_xx_xx
mov r9,r9,ror#31
add r4,r4,r5,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
eor r10,r6,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r4,r4,r9 @ E+=X[i]
add r4,r4,r10 @ E+=F_20_39(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r3,r8,r3,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r6,r7 @ F_xx_xx
mov r9,r9,ror#31
add r3,r3,r4,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
eor r10,r5,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r3,r3,r9 @ E+=X[i]
add r3,r3,r10 @ E+=F_20_39(B,C,D)
ARM( teq r14,sp ) @ preserve carry
THUMB( mov r11,sp )
THUMB( teq r14,r11 ) @ preserve carry
bne .L_20_39_or_60_79 @ [+((12+3)*5+2)*4]
bcs .L_done @ [+((12+3)*5+2)*4], spare 300 bytes
ldr r8,.LK_40_59
sub sp,sp,#20*4 @ [+2]
.L_40_59:
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r7,r8,r7,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r5,r6 @ F_xx_xx
mov r9,r9,ror#31
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r4,r10,ror#2 @ F_xx_xx
and r11,r5,r6 @ F_xx_xx
add r7,r7,r9 @ E+=X[i]
add r7,r7,r10 @ E+=F_40_59(B,C,D)
add r7,r7,r11,ror#2
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r6,r8,r6,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r4,r5 @ F_xx_xx
mov r9,r9,ror#31
add r6,r6,r7,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r3,r10,ror#2 @ F_xx_xx
and r11,r4,r5 @ F_xx_xx
add r6,r6,r9 @ E+=X[i]
add r6,r6,r10 @ E+=F_40_59(B,C,D)
add r6,r6,r11,ror#2
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r5,r8,r5,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r3,r4 @ F_xx_xx
mov r9,r9,ror#31
add r5,r5,r6,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r7,r10,ror#2 @ F_xx_xx
and r11,r3,r4 @ F_xx_xx
add r5,r5,r9 @ E+=X[i]
add r5,r5,r10 @ E+=F_40_59(B,C,D)
add r5,r5,r11,ror#2
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r4,r8,r4,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r7,r3 @ F_xx_xx
mov r9,r9,ror#31
add r4,r4,r5,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r6,r10,ror#2 @ F_xx_xx
and r11,r7,r3 @ F_xx_xx
add r4,r4,r9 @ E+=X[i]
add r4,r4,r10 @ E+=F_40_59(B,C,D)
add r4,r4,r11,ror#2
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r3,r8,r3,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r6,r7 @ F_xx_xx
mov r9,r9,ror#31
add r3,r3,r4,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r5,r10,ror#2 @ F_xx_xx
and r11,r6,r7 @ F_xx_xx
add r3,r3,r9 @ E+=X[i]
add r3,r3,r10 @ E+=F_40_59(B,C,D)
add r3,r3,r11,ror#2
cmp r14,sp
bne .L_40_59 @ [+((12+5)*5+2)*4]
ldr r8,.LK_60_79
sub sp,sp,#20*4
cmp sp,#0 @ set carry to denote 60_79
b .L_20_39_or_60_79 @ [+4], spare 300 bytes
.L_done:
add sp,sp,#80*4 @ "deallocate" stack frame
ldmia r0,{r8,r9,r10,r11,r12}
add r3,r8,r3
add r4,r9,r4
add r5,r10,r5,ror#2
add r6,r11,r6,ror#2
add r7,r12,r7,ror#2
stmia r0,{r3,r4,r5,r6,r7}
teq r1,r2
bne .Lloop @ [+18], total 1307
ldmia sp!,{r4-r12,pc}
.align 2
.LK_00_19: .word 0x5a827999
.LK_20_39: .word 0x6ed9eba1
.LK_40_59: .word 0x8f1bbcdc
.LK_60_79: .word 0xca62c1d6
ENDPROC(sha1_block_data_order)
.asciz "SHA1 block transform for ARMv4, CRYPTOGAMS by <appro@openssl.org>"
.align 2
|
AirFortressIlikara/LS2K0300-linux-4.19
| 10,981
|
arch/arm/crypto/chacha20-neon-core.S
|
/*
* ChaCha20 256-bit cipher algorithm, RFC7539, ARM NEON functions
*
* Copyright (C) 2016 Linaro, Ltd. <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Based on:
* ChaCha20 256-bit cipher algorithm, RFC7539, x64 SSE3 functions
*
* Copyright (C) 2015 Martin Willi
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/linkage.h>
.text
.fpu neon
.align 5
ENTRY(chacha20_block_xor_neon)
// r0: Input state matrix, s
// r1: 1 data block output, o
// r2: 1 data block input, i
//
// This function encrypts one ChaCha20 block by loading the state matrix
// in four NEON registers. It performs matrix operation on four words in
// parallel, but requireds shuffling to rearrange the words after each
// round.
//
// x0..3 = s0..3
add ip, r0, #0x20
vld1.32 {q0-q1}, [r0]
vld1.32 {q2-q3}, [ip]
vmov q8, q0
vmov q9, q1
vmov q10, q2
vmov q11, q3
mov r3, #10
.Ldoubleround:
// x0 += x1, x3 = rotl32(x3 ^ x0, 16)
vadd.i32 q0, q0, q1
veor q3, q3, q0
vrev32.16 q3, q3
// x2 += x3, x1 = rotl32(x1 ^ x2, 12)
vadd.i32 q2, q2, q3
veor q4, q1, q2
vshl.u32 q1, q4, #12
vsri.u32 q1, q4, #20
// x0 += x1, x3 = rotl32(x3 ^ x0, 8)
vadd.i32 q0, q0, q1
veor q4, q3, q0
vshl.u32 q3, q4, #8
vsri.u32 q3, q4, #24
// x2 += x3, x1 = rotl32(x1 ^ x2, 7)
vadd.i32 q2, q2, q3
veor q4, q1, q2
vshl.u32 q1, q4, #7
vsri.u32 q1, q4, #25
// x1 = shuffle32(x1, MASK(0, 3, 2, 1))
vext.8 q1, q1, q1, #4
// x2 = shuffle32(x2, MASK(1, 0, 3, 2))
vext.8 q2, q2, q2, #8
// x3 = shuffle32(x3, MASK(2, 1, 0, 3))
vext.8 q3, q3, q3, #12
// x0 += x1, x3 = rotl32(x3 ^ x0, 16)
vadd.i32 q0, q0, q1
veor q3, q3, q0
vrev32.16 q3, q3
// x2 += x3, x1 = rotl32(x1 ^ x2, 12)
vadd.i32 q2, q2, q3
veor q4, q1, q2
vshl.u32 q1, q4, #12
vsri.u32 q1, q4, #20
// x0 += x1, x3 = rotl32(x3 ^ x0, 8)
vadd.i32 q0, q0, q1
veor q4, q3, q0
vshl.u32 q3, q4, #8
vsri.u32 q3, q4, #24
// x2 += x3, x1 = rotl32(x1 ^ x2, 7)
vadd.i32 q2, q2, q3
veor q4, q1, q2
vshl.u32 q1, q4, #7
vsri.u32 q1, q4, #25
// x1 = shuffle32(x1, MASK(2, 1, 0, 3))
vext.8 q1, q1, q1, #12
// x2 = shuffle32(x2, MASK(1, 0, 3, 2))
vext.8 q2, q2, q2, #8
// x3 = shuffle32(x3, MASK(0, 3, 2, 1))
vext.8 q3, q3, q3, #4
subs r3, r3, #1
bne .Ldoubleround
add ip, r2, #0x20
vld1.8 {q4-q5}, [r2]
vld1.8 {q6-q7}, [ip]
// o0 = i0 ^ (x0 + s0)
vadd.i32 q0, q0, q8
veor q0, q0, q4
// o1 = i1 ^ (x1 + s1)
vadd.i32 q1, q1, q9
veor q1, q1, q5
// o2 = i2 ^ (x2 + s2)
vadd.i32 q2, q2, q10
veor q2, q2, q6
// o3 = i3 ^ (x3 + s3)
vadd.i32 q3, q3, q11
veor q3, q3, q7
add ip, r1, #0x20
vst1.8 {q0-q1}, [r1]
vst1.8 {q2-q3}, [ip]
bx lr
ENDPROC(chacha20_block_xor_neon)
.align 5
ENTRY(chacha20_4block_xor_neon)
push {r4-r6, lr}
mov ip, sp // preserve the stack pointer
sub r3, sp, #0x20 // allocate a 32 byte buffer
bic r3, r3, #0x1f // aligned to 32 bytes
mov sp, r3
// r0: Input state matrix, s
// r1: 4 data blocks output, o
// r2: 4 data blocks input, i
//
// This function encrypts four consecutive ChaCha20 blocks by loading
// the state matrix in NEON registers four times. The algorithm performs
// each operation on the corresponding word of each state matrix, hence
// requires no word shuffling. For final XORing step we transpose the
// matrix by interleaving 32- and then 64-bit words, which allows us to
// do XOR in NEON registers.
//
// x0..15[0-3] = s0..3[0..3]
add r3, r0, #0x20
vld1.32 {q0-q1}, [r0]
vld1.32 {q2-q3}, [r3]
adr r3, CTRINC
vdup.32 q15, d7[1]
vdup.32 q14, d7[0]
vld1.32 {q11}, [r3, :128]
vdup.32 q13, d6[1]
vdup.32 q12, d6[0]
vadd.i32 q12, q12, q11 // x12 += counter values 0-3
vdup.32 q11, d5[1]
vdup.32 q10, d5[0]
vdup.32 q9, d4[1]
vdup.32 q8, d4[0]
vdup.32 q7, d3[1]
vdup.32 q6, d3[0]
vdup.32 q5, d2[1]
vdup.32 q4, d2[0]
vdup.32 q3, d1[1]
vdup.32 q2, d1[0]
vdup.32 q1, d0[1]
vdup.32 q0, d0[0]
mov r3, #10
.Ldoubleround4:
// x0 += x4, x12 = rotl32(x12 ^ x0, 16)
// x1 += x5, x13 = rotl32(x13 ^ x1, 16)
// x2 += x6, x14 = rotl32(x14 ^ x2, 16)
// x3 += x7, x15 = rotl32(x15 ^ x3, 16)
vadd.i32 q0, q0, q4
vadd.i32 q1, q1, q5
vadd.i32 q2, q2, q6
vadd.i32 q3, q3, q7
veor q12, q12, q0
veor q13, q13, q1
veor q14, q14, q2
veor q15, q15, q3
vrev32.16 q12, q12
vrev32.16 q13, q13
vrev32.16 q14, q14
vrev32.16 q15, q15
// x8 += x12, x4 = rotl32(x4 ^ x8, 12)
// x9 += x13, x5 = rotl32(x5 ^ x9, 12)
// x10 += x14, x6 = rotl32(x6 ^ x10, 12)
// x11 += x15, x7 = rotl32(x7 ^ x11, 12)
vadd.i32 q8, q8, q12
vadd.i32 q9, q9, q13
vadd.i32 q10, q10, q14
vadd.i32 q11, q11, q15
vst1.32 {q8-q9}, [sp, :256]
veor q8, q4, q8
veor q9, q5, q9
vshl.u32 q4, q8, #12
vshl.u32 q5, q9, #12
vsri.u32 q4, q8, #20
vsri.u32 q5, q9, #20
veor q8, q6, q10
veor q9, q7, q11
vshl.u32 q6, q8, #12
vshl.u32 q7, q9, #12
vsri.u32 q6, q8, #20
vsri.u32 q7, q9, #20
// x0 += x4, x12 = rotl32(x12 ^ x0, 8)
// x1 += x5, x13 = rotl32(x13 ^ x1, 8)
// x2 += x6, x14 = rotl32(x14 ^ x2, 8)
// x3 += x7, x15 = rotl32(x15 ^ x3, 8)
vadd.i32 q0, q0, q4
vadd.i32 q1, q1, q5
vadd.i32 q2, q2, q6
vadd.i32 q3, q3, q7
veor q8, q12, q0
veor q9, q13, q1
vshl.u32 q12, q8, #8
vshl.u32 q13, q9, #8
vsri.u32 q12, q8, #24
vsri.u32 q13, q9, #24
veor q8, q14, q2
veor q9, q15, q3
vshl.u32 q14, q8, #8
vshl.u32 q15, q9, #8
vsri.u32 q14, q8, #24
vsri.u32 q15, q9, #24
vld1.32 {q8-q9}, [sp, :256]
// x8 += x12, x4 = rotl32(x4 ^ x8, 7)
// x9 += x13, x5 = rotl32(x5 ^ x9, 7)
// x10 += x14, x6 = rotl32(x6 ^ x10, 7)
// x11 += x15, x7 = rotl32(x7 ^ x11, 7)
vadd.i32 q8, q8, q12
vadd.i32 q9, q9, q13
vadd.i32 q10, q10, q14
vadd.i32 q11, q11, q15
vst1.32 {q8-q9}, [sp, :256]
veor q8, q4, q8
veor q9, q5, q9
vshl.u32 q4, q8, #7
vshl.u32 q5, q9, #7
vsri.u32 q4, q8, #25
vsri.u32 q5, q9, #25
veor q8, q6, q10
veor q9, q7, q11
vshl.u32 q6, q8, #7
vshl.u32 q7, q9, #7
vsri.u32 q6, q8, #25
vsri.u32 q7, q9, #25
vld1.32 {q8-q9}, [sp, :256]
// x0 += x5, x15 = rotl32(x15 ^ x0, 16)
// x1 += x6, x12 = rotl32(x12 ^ x1, 16)
// x2 += x7, x13 = rotl32(x13 ^ x2, 16)
// x3 += x4, x14 = rotl32(x14 ^ x3, 16)
vadd.i32 q0, q0, q5
vadd.i32 q1, q1, q6
vadd.i32 q2, q2, q7
vadd.i32 q3, q3, q4
veor q15, q15, q0
veor q12, q12, q1
veor q13, q13, q2
veor q14, q14, q3
vrev32.16 q15, q15
vrev32.16 q12, q12
vrev32.16 q13, q13
vrev32.16 q14, q14
// x10 += x15, x5 = rotl32(x5 ^ x10, 12)
// x11 += x12, x6 = rotl32(x6 ^ x11, 12)
// x8 += x13, x7 = rotl32(x7 ^ x8, 12)
// x9 += x14, x4 = rotl32(x4 ^ x9, 12)
vadd.i32 q10, q10, q15
vadd.i32 q11, q11, q12
vadd.i32 q8, q8, q13
vadd.i32 q9, q9, q14
vst1.32 {q8-q9}, [sp, :256]
veor q8, q7, q8
veor q9, q4, q9
vshl.u32 q7, q8, #12
vshl.u32 q4, q9, #12
vsri.u32 q7, q8, #20
vsri.u32 q4, q9, #20
veor q8, q5, q10
veor q9, q6, q11
vshl.u32 q5, q8, #12
vshl.u32 q6, q9, #12
vsri.u32 q5, q8, #20
vsri.u32 q6, q9, #20
// x0 += x5, x15 = rotl32(x15 ^ x0, 8)
// x1 += x6, x12 = rotl32(x12 ^ x1, 8)
// x2 += x7, x13 = rotl32(x13 ^ x2, 8)
// x3 += x4, x14 = rotl32(x14 ^ x3, 8)
vadd.i32 q0, q0, q5
vadd.i32 q1, q1, q6
vadd.i32 q2, q2, q7
vadd.i32 q3, q3, q4
veor q8, q15, q0
veor q9, q12, q1
vshl.u32 q15, q8, #8
vshl.u32 q12, q9, #8
vsri.u32 q15, q8, #24
vsri.u32 q12, q9, #24
veor q8, q13, q2
veor q9, q14, q3
vshl.u32 q13, q8, #8
vshl.u32 q14, q9, #8
vsri.u32 q13, q8, #24
vsri.u32 q14, q9, #24
vld1.32 {q8-q9}, [sp, :256]
// x10 += x15, x5 = rotl32(x5 ^ x10, 7)
// x11 += x12, x6 = rotl32(x6 ^ x11, 7)
// x8 += x13, x7 = rotl32(x7 ^ x8, 7)
// x9 += x14, x4 = rotl32(x4 ^ x9, 7)
vadd.i32 q10, q10, q15
vadd.i32 q11, q11, q12
vadd.i32 q8, q8, q13
vadd.i32 q9, q9, q14
vst1.32 {q8-q9}, [sp, :256]
veor q8, q7, q8
veor q9, q4, q9
vshl.u32 q7, q8, #7
vshl.u32 q4, q9, #7
vsri.u32 q7, q8, #25
vsri.u32 q4, q9, #25
veor q8, q5, q10
veor q9, q6, q11
vshl.u32 q5, q8, #7
vshl.u32 q6, q9, #7
vsri.u32 q5, q8, #25
vsri.u32 q6, q9, #25
subs r3, r3, #1
beq 0f
vld1.32 {q8-q9}, [sp, :256]
b .Ldoubleround4
// x0[0-3] += s0[0]
// x1[0-3] += s0[1]
// x2[0-3] += s0[2]
// x3[0-3] += s0[3]
0: ldmia r0!, {r3-r6}
vdup.32 q8, r3
vdup.32 q9, r4
vadd.i32 q0, q0, q8
vadd.i32 q1, q1, q9
vdup.32 q8, r5
vdup.32 q9, r6
vadd.i32 q2, q2, q8
vadd.i32 q3, q3, q9
// x4[0-3] += s1[0]
// x5[0-3] += s1[1]
// x6[0-3] += s1[2]
// x7[0-3] += s1[3]
ldmia r0!, {r3-r6}
vdup.32 q8, r3
vdup.32 q9, r4
vadd.i32 q4, q4, q8
vadd.i32 q5, q5, q9
vdup.32 q8, r5
vdup.32 q9, r6
vadd.i32 q6, q6, q8
vadd.i32 q7, q7, q9
// interleave 32-bit words in state n, n+1
vzip.32 q0, q1
vzip.32 q2, q3
vzip.32 q4, q5
vzip.32 q6, q7
// interleave 64-bit words in state n, n+2
vswp d1, d4
vswp d3, d6
vswp d9, d12
vswp d11, d14
// xor with corresponding input, write to output
vld1.8 {q8-q9}, [r2]!
veor q8, q8, q0
veor q9, q9, q4
vst1.8 {q8-q9}, [r1]!
vld1.32 {q8-q9}, [sp, :256]
// x8[0-3] += s2[0]
// x9[0-3] += s2[1]
// x10[0-3] += s2[2]
// x11[0-3] += s2[3]
ldmia r0!, {r3-r6}
vdup.32 q0, r3
vdup.32 q4, r4
vadd.i32 q8, q8, q0
vadd.i32 q9, q9, q4
vdup.32 q0, r5
vdup.32 q4, r6
vadd.i32 q10, q10, q0
vadd.i32 q11, q11, q4
// x12[0-3] += s3[0]
// x13[0-3] += s3[1]
// x14[0-3] += s3[2]
// x15[0-3] += s3[3]
ldmia r0!, {r3-r6}
vdup.32 q0, r3
vdup.32 q4, r4
adr r3, CTRINC
vadd.i32 q12, q12, q0
vld1.32 {q0}, [r3, :128]
vadd.i32 q13, q13, q4
vadd.i32 q12, q12, q0 // x12 += counter values 0-3
vdup.32 q0, r5
vdup.32 q4, r6
vadd.i32 q14, q14, q0
vadd.i32 q15, q15, q4
// interleave 32-bit words in state n, n+1
vzip.32 q8, q9
vzip.32 q10, q11
vzip.32 q12, q13
vzip.32 q14, q15
// interleave 64-bit words in state n, n+2
vswp d17, d20
vswp d19, d22
vswp d25, d28
vswp d27, d30
vmov q4, q1
vld1.8 {q0-q1}, [r2]!
veor q0, q0, q8
veor q1, q1, q12
vst1.8 {q0-q1}, [r1]!
vld1.8 {q0-q1}, [r2]!
veor q0, q0, q2
veor q1, q1, q6
vst1.8 {q0-q1}, [r1]!
vld1.8 {q0-q1}, [r2]!
veor q0, q0, q10
veor q1, q1, q14
vst1.8 {q0-q1}, [r1]!
vld1.8 {q0-q1}, [r2]!
veor q0, q0, q4
veor q1, q1, q5
vst1.8 {q0-q1}, [r1]!
vld1.8 {q0-q1}, [r2]!
veor q0, q0, q9
veor q1, q1, q13
vst1.8 {q0-q1}, [r1]!
vld1.8 {q0-q1}, [r2]!
veor q0, q0, q3
veor q1, q1, q7
vst1.8 {q0-q1}, [r1]!
vld1.8 {q0-q1}, [r2]
veor q0, q0, q11
veor q1, q1, q15
vst1.8 {q0-q1}, [r1]
mov sp, ip
pop {r4-r6, pc}
ENDPROC(chacha20_4block_xor_neon)
.align 4
CTRINC: .word 0, 1, 2, 3
|
AirFortressIlikara/LS2K0300-linux-4.19
| 5,231
|
arch/arm/crypto/ghash-ce-core.S
|
/*
* Accelerated GHASH implementation with NEON/ARMv8 vmull.p8/64 instructions.
*
* Copyright (C) 2015 - 2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
SHASH .req q0
T1 .req q1
XL .req q2
XM .req q3
XH .req q4
IN1 .req q4
SHASH_L .req d0
SHASH_H .req d1
T1_L .req d2
T1_H .req d3
XL_L .req d4
XL_H .req d5
XM_L .req d6
XM_H .req d7
XH_L .req d8
t0l .req d10
t0h .req d11
t1l .req d12
t1h .req d13
t2l .req d14
t2h .req d15
t3l .req d16
t3h .req d17
t4l .req d18
t4h .req d19
t0q .req q5
t1q .req q6
t2q .req q7
t3q .req q8
t4q .req q9
T2 .req q9
s1l .req d20
s1h .req d21
s2l .req d22
s2h .req d23
s3l .req d24
s3h .req d25
s4l .req d26
s4h .req d27
MASK .req d28
SHASH2_p8 .req d28
k16 .req d29
k32 .req d30
k48 .req d31
SHASH2_p64 .req d31
.text
.fpu crypto-neon-fp-armv8
.macro __pmull_p64, rd, rn, rm, b1, b2, b3, b4
vmull.p64 \rd, \rn, \rm
.endm
/*
* This implementation of 64x64 -> 128 bit polynomial multiplication
* using vmull.p8 instructions (8x8 -> 16) is taken from the paper
* "Fast Software Polynomial Multiplication on ARM Processors Using
* the NEON Engine" by Danilo Camara, Conrado Gouvea, Julio Lopez and
* Ricardo Dahab (https://hal.inria.fr/hal-01506572)
*
* It has been slightly tweaked for in-order performance, and to allow
* 'rq' to overlap with 'ad' or 'bd'.
*/
.macro __pmull_p8, rq, ad, bd, b1=t4l, b2=t3l, b3=t4l, b4=t3l
vext.8 t0l, \ad, \ad, #1 @ A1
.ifc \b1, t4l
vext.8 t4l, \bd, \bd, #1 @ B1
.endif
vmull.p8 t0q, t0l, \bd @ F = A1*B
vext.8 t1l, \ad, \ad, #2 @ A2
vmull.p8 t4q, \ad, \b1 @ E = A*B1
.ifc \b2, t3l
vext.8 t3l, \bd, \bd, #2 @ B2
.endif
vmull.p8 t1q, t1l, \bd @ H = A2*B
vext.8 t2l, \ad, \ad, #3 @ A3
vmull.p8 t3q, \ad, \b2 @ G = A*B2
veor t0q, t0q, t4q @ L = E + F
.ifc \b3, t4l
vext.8 t4l, \bd, \bd, #3 @ B3
.endif
vmull.p8 t2q, t2l, \bd @ J = A3*B
veor t0l, t0l, t0h @ t0 = (L) (P0 + P1) << 8
veor t1q, t1q, t3q @ M = G + H
.ifc \b4, t3l
vext.8 t3l, \bd, \bd, #4 @ B4
.endif
vmull.p8 t4q, \ad, \b3 @ I = A*B3
veor t1l, t1l, t1h @ t1 = (M) (P2 + P3) << 16
vmull.p8 t3q, \ad, \b4 @ K = A*B4
vand t0h, t0h, k48
vand t1h, t1h, k32
veor t2q, t2q, t4q @ N = I + J
veor t0l, t0l, t0h
veor t1l, t1l, t1h
veor t2l, t2l, t2h @ t2 = (N) (P4 + P5) << 24
vand t2h, t2h, k16
veor t3l, t3l, t3h @ t3 = (K) (P6 + P7) << 32
vmov.i64 t3h, #0
vext.8 t0q, t0q, t0q, #15
veor t2l, t2l, t2h
vext.8 t1q, t1q, t1q, #14
vmull.p8 \rq, \ad, \bd @ D = A*B
vext.8 t2q, t2q, t2q, #13
vext.8 t3q, t3q, t3q, #12
veor t0q, t0q, t1q
veor t2q, t2q, t3q
veor \rq, \rq, t0q
veor \rq, \rq, t2q
.endm
//
// PMULL (64x64->128) based reduction for CPUs that can do
// it in a single instruction.
//
.macro __pmull_reduce_p64
vmull.p64 T1, XL_L, MASK
veor XH_L, XH_L, XM_H
vext.8 T1, T1, T1, #8
veor XL_H, XL_H, XM_L
veor T1, T1, XL
vmull.p64 XL, T1_H, MASK
.endm
//
// Alternative reduction for CPUs that lack support for the
// 64x64->128 PMULL instruction
//
.macro __pmull_reduce_p8
veor XL_H, XL_H, XM_L
veor XH_L, XH_L, XM_H
vshl.i64 T1, XL, #57
vshl.i64 T2, XL, #62
veor T1, T1, T2
vshl.i64 T2, XL, #63
veor T1, T1, T2
veor XL_H, XL_H, T1_L
veor XH_L, XH_L, T1_H
vshr.u64 T1, XL, #1
veor XH, XH, XL
veor XL, XL, T1
vshr.u64 T1, T1, #6
vshr.u64 XL, XL, #1
.endm
.macro ghash_update, pn
vld1.64 {XL}, [r1]
/* do the head block first, if supplied */
ldr ip, [sp]
teq ip, #0
beq 0f
vld1.64 {T1}, [ip]
teq r0, #0
b 1f
0: vld1.64 {T1}, [r2]!
subs r0, r0, #1
1: /* multiply XL by SHASH in GF(2^128) */
#ifndef CONFIG_CPU_BIG_ENDIAN
vrev64.8 T1, T1
#endif
vext.8 IN1, T1, T1, #8
veor T1_L, T1_L, XL_H
veor XL, XL, IN1
__pmull_\pn XH, XL_H, SHASH_H, s1h, s2h, s3h, s4h @ a1 * b1
veor T1, T1, XL
__pmull_\pn XL, XL_L, SHASH_L, s1l, s2l, s3l, s4l @ a0 * b0
__pmull_\pn XM, T1_L, SHASH2_\pn @ (a1+a0)(b1+b0)
veor T1, XL, XH
veor XM, XM, T1
__pmull_reduce_\pn
veor T1, T1, XH
veor XL, XL, T1
bne 0b
vst1.64 {XL}, [r1]
bx lr
.endm
/*
* void pmull_ghash_update(int blocks, u64 dg[], const char *src,
* struct ghash_key const *k, const char *head)
*/
ENTRY(pmull_ghash_update_p64)
vld1.64 {SHASH}, [r3]
veor SHASH2_p64, SHASH_L, SHASH_H
vmov.i8 MASK, #0xe1
vshl.u64 MASK, MASK, #57
ghash_update p64
ENDPROC(pmull_ghash_update_p64)
ENTRY(pmull_ghash_update_p8)
vld1.64 {SHASH}, [r3]
veor SHASH2_p8, SHASH_L, SHASH_H
vext.8 s1l, SHASH_L, SHASH_L, #1
vext.8 s2l, SHASH_L, SHASH_L, #2
vext.8 s3l, SHASH_L, SHASH_L, #3
vext.8 s4l, SHASH_L, SHASH_L, #4
vext.8 s1h, SHASH_H, SHASH_H, #1
vext.8 s2h, SHASH_H, SHASH_H, #2
vext.8 s3h, SHASH_H, SHASH_H, #3
vext.8 s4h, SHASH_H, SHASH_H, #4
vmov.i64 k16, #0xffff
vmov.i64 k32, #0xffffffff
vmov.i64 k48, #0xffffffffffff
ghash_update p8
ENDPROC(pmull_ghash_update_p8)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 21,961
|
arch/arm/crypto/aes-neonbs-core.S
|
/*
* Bit sliced AES using NEON instructions
*
* Copyright (C) 2017 Linaro Ltd.
* Author: Ard Biesheuvel <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/*
* The algorithm implemented here is described in detail by the paper
* 'Faster and Timing-Attack Resistant AES-GCM' by Emilia Kaesper and
* Peter Schwabe (https://eprint.iacr.org/2009/129.pdf)
*
* This implementation is based primarily on the OpenSSL implementation
* for 32-bit ARM written by Andy Polyakov <appro@openssl.org>
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
.text
.fpu neon
rounds .req ip
bskey .req r4
q0l .req d0
q0h .req d1
q1l .req d2
q1h .req d3
q2l .req d4
q2h .req d5
q3l .req d6
q3h .req d7
q4l .req d8
q4h .req d9
q5l .req d10
q5h .req d11
q6l .req d12
q6h .req d13
q7l .req d14
q7h .req d15
q8l .req d16
q8h .req d17
q9l .req d18
q9h .req d19
q10l .req d20
q10h .req d21
q11l .req d22
q11h .req d23
q12l .req d24
q12h .req d25
q13l .req d26
q13h .req d27
q14l .req d28
q14h .req d29
q15l .req d30
q15h .req d31
.macro __tbl, out, tbl, in, tmp
.ifc \out, \tbl
.ifb \tmp
.error __tbl needs temp register if out == tbl
.endif
vmov \tmp, \out
.endif
vtbl.8 \out\()l, {\tbl}, \in\()l
.ifc \out, \tbl
vtbl.8 \out\()h, {\tmp}, \in\()h
.else
vtbl.8 \out\()h, {\tbl}, \in\()h
.endif
.endm
.macro __ldr, out, sym
vldr \out\()l, \sym
vldr \out\()h, \sym + 8
.endm
.macro __adr, reg, lbl
adr \reg, \lbl
THUMB( orr \reg, \reg, #1 )
.endm
.macro in_bs_ch, b0, b1, b2, b3, b4, b5, b6, b7
veor \b2, \b2, \b1
veor \b5, \b5, \b6
veor \b3, \b3, \b0
veor \b6, \b6, \b2
veor \b5, \b5, \b0
veor \b6, \b6, \b3
veor \b3, \b3, \b7
veor \b7, \b7, \b5
veor \b3, \b3, \b4
veor \b4, \b4, \b5
veor \b2, \b2, \b7
veor \b3, \b3, \b1
veor \b1, \b1, \b5
.endm
.macro out_bs_ch, b0, b1, b2, b3, b4, b5, b6, b7
veor \b0, \b0, \b6
veor \b1, \b1, \b4
veor \b4, \b4, \b6
veor \b2, \b2, \b0
veor \b6, \b6, \b1
veor \b1, \b1, \b5
veor \b5, \b5, \b3
veor \b3, \b3, \b7
veor \b7, \b7, \b5
veor \b2, \b2, \b5
veor \b4, \b4, \b7
.endm
.macro inv_in_bs_ch, b6, b1, b2, b4, b7, b0, b3, b5
veor \b1, \b1, \b7
veor \b4, \b4, \b7
veor \b7, \b7, \b5
veor \b1, \b1, \b3
veor \b2, \b2, \b5
veor \b3, \b3, \b7
veor \b6, \b6, \b1
veor \b2, \b2, \b0
veor \b5, \b5, \b3
veor \b4, \b4, \b6
veor \b0, \b0, \b6
veor \b1, \b1, \b4
.endm
.macro inv_out_bs_ch, b6, b5, b0, b3, b7, b1, b4, b2
veor \b1, \b1, \b5
veor \b2, \b2, \b7
veor \b3, \b3, \b1
veor \b4, \b4, \b5
veor \b7, \b7, \b5
veor \b3, \b3, \b4
veor \b5, \b5, \b0
veor \b3, \b3, \b7
veor \b6, \b6, \b2
veor \b2, \b2, \b1
veor \b6, \b6, \b3
veor \b3, \b3, \b0
veor \b5, \b5, \b6
.endm
.macro mul_gf4, x0, x1, y0, y1, t0, t1
veor \t0, \y0, \y1
vand \t0, \t0, \x0
veor \x0, \x0, \x1
vand \t1, \x1, \y0
vand \x0, \x0, \y1
veor \x1, \t1, \t0
veor \x0, \x0, \t1
.endm
.macro mul_gf4_n_gf4, x0, x1, y0, y1, t0, x2, x3, y2, y3, t1
veor \t0, \y0, \y1
veor \t1, \y2, \y3
vand \t0, \t0, \x0
vand \t1, \t1, \x2
veor \x0, \x0, \x1
veor \x2, \x2, \x3
vand \x1, \x1, \y0
vand \x3, \x3, \y2
vand \x0, \x0, \y1
vand \x2, \x2, \y3
veor \x1, \x1, \x0
veor \x2, \x2, \x3
veor \x0, \x0, \t0
veor \x3, \x3, \t1
.endm
.macro mul_gf16_2, x0, x1, x2, x3, x4, x5, x6, x7, \
y0, y1, y2, y3, t0, t1, t2, t3
veor \t0, \x0, \x2
veor \t1, \x1, \x3
mul_gf4 \x0, \x1, \y0, \y1, \t2, \t3
veor \y0, \y0, \y2
veor \y1, \y1, \y3
mul_gf4_n_gf4 \t0, \t1, \y0, \y1, \t3, \x2, \x3, \y2, \y3, \t2
veor \x0, \x0, \t0
veor \x2, \x2, \t0
veor \x1, \x1, \t1
veor \x3, \x3, \t1
veor \t0, \x4, \x6
veor \t1, \x5, \x7
mul_gf4_n_gf4 \t0, \t1, \y0, \y1, \t3, \x6, \x7, \y2, \y3, \t2
veor \y0, \y0, \y2
veor \y1, \y1, \y3
mul_gf4 \x4, \x5, \y0, \y1, \t2, \t3
veor \x4, \x4, \t0
veor \x6, \x6, \t0
veor \x5, \x5, \t1
veor \x7, \x7, \t1
.endm
.macro inv_gf256, x0, x1, x2, x3, x4, x5, x6, x7, \
t0, t1, t2, t3, s0, s1, s2, s3
veor \t3, \x4, \x6
veor \t0, \x5, \x7
veor \t1, \x1, \x3
veor \s1, \x7, \x6
veor \s0, \x0, \x2
veor \s3, \t3, \t0
vorr \t2, \t0, \t1
vand \s2, \t3, \s0
vorr \t3, \t3, \s0
veor \s0, \s0, \t1
vand \t0, \t0, \t1
veor \t1, \x3, \x2
vand \s3, \s3, \s0
vand \s1, \s1, \t1
veor \t1, \x4, \x5
veor \s0, \x1, \x0
veor \t3, \t3, \s1
veor \t2, \t2, \s1
vand \s1, \t1, \s0
vorr \t1, \t1, \s0
veor \t3, \t3, \s3
veor \t0, \t0, \s1
veor \t2, \t2, \s2
veor \t1, \t1, \s3
veor \t0, \t0, \s2
vand \s0, \x7, \x3
veor \t1, \t1, \s2
vand \s1, \x6, \x2
vand \s2, \x5, \x1
vorr \s3, \x4, \x0
veor \t3, \t3, \s0
veor \t1, \t1, \s2
veor \s0, \t0, \s3
veor \t2, \t2, \s1
vand \s2, \t3, \t1
veor \s1, \t2, \s2
veor \s3, \s0, \s2
vbsl \s1, \t1, \s0
vmvn \t0, \s0
vbsl \s0, \s1, \s3
vbsl \t0, \s1, \s3
vbsl \s3, \t3, \t2
veor \t3, \t3, \t2
vand \s2, \s0, \s3
veor \t1, \t1, \t0
veor \s2, \s2, \t3
mul_gf16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \
\s3, \s2, \s1, \t1, \s0, \t0, \t2, \t3
.endm
.macro sbox, b0, b1, b2, b3, b4, b5, b6, b7, \
t0, t1, t2, t3, s0, s1, s2, s3
in_bs_ch \b0, \b1, \b2, \b3, \b4, \b5, \b6, \b7
inv_gf256 \b6, \b5, \b0, \b3, \b7, \b1, \b4, \b2, \
\t0, \t1, \t2, \t3, \s0, \s1, \s2, \s3
out_bs_ch \b7, \b1, \b4, \b2, \b6, \b5, \b0, \b3
.endm
.macro inv_sbox, b0, b1, b2, b3, b4, b5, b6, b7, \
t0, t1, t2, t3, s0, s1, s2, s3
inv_in_bs_ch \b0, \b1, \b2, \b3, \b4, \b5, \b6, \b7
inv_gf256 \b5, \b1, \b2, \b6, \b3, \b7, \b0, \b4, \
\t0, \t1, \t2, \t3, \s0, \s1, \s2, \s3
inv_out_bs_ch \b3, \b7, \b0, \b4, \b5, \b1, \b2, \b6
.endm
.macro shift_rows, x0, x1, x2, x3, x4, x5, x6, x7, \
t0, t1, t2, t3, mask
vld1.8 {\t0-\t1}, [bskey, :256]!
veor \t0, \t0, \x0
vld1.8 {\t2-\t3}, [bskey, :256]!
veor \t1, \t1, \x1
__tbl \x0, \t0, \mask
veor \t2, \t2, \x2
__tbl \x1, \t1, \mask
vld1.8 {\t0-\t1}, [bskey, :256]!
veor \t3, \t3, \x3
__tbl \x2, \t2, \mask
__tbl \x3, \t3, \mask
vld1.8 {\t2-\t3}, [bskey, :256]!
veor \t0, \t0, \x4
veor \t1, \t1, \x5
__tbl \x4, \t0, \mask
veor \t2, \t2, \x6
__tbl \x5, \t1, \mask
veor \t3, \t3, \x7
__tbl \x6, \t2, \mask
__tbl \x7, \t3, \mask
.endm
.macro inv_shift_rows, x0, x1, x2, x3, x4, x5, x6, x7, \
t0, t1, t2, t3, mask
__tbl \x0, \x0, \mask, \t0
__tbl \x1, \x1, \mask, \t1
__tbl \x2, \x2, \mask, \t2
__tbl \x3, \x3, \mask, \t3
__tbl \x4, \x4, \mask, \t0
__tbl \x5, \x5, \mask, \t1
__tbl \x6, \x6, \mask, \t2
__tbl \x7, \x7, \mask, \t3
.endm
.macro mix_cols, x0, x1, x2, x3, x4, x5, x6, x7, \
t0, t1, t2, t3, t4, t5, t6, t7, inv
vext.8 \t0, \x0, \x0, #12
vext.8 \t1, \x1, \x1, #12
veor \x0, \x0, \t0
vext.8 \t2, \x2, \x2, #12
veor \x1, \x1, \t1
vext.8 \t3, \x3, \x3, #12
veor \x2, \x2, \t2
vext.8 \t4, \x4, \x4, #12
veor \x3, \x3, \t3
vext.8 \t5, \x5, \x5, #12
veor \x4, \x4, \t4
vext.8 \t6, \x6, \x6, #12
veor \x5, \x5, \t5
vext.8 \t7, \x7, \x7, #12
veor \x6, \x6, \t6
veor \t1, \t1, \x0
veor.8 \x7, \x7, \t7
vext.8 \x0, \x0, \x0, #8
veor \t2, \t2, \x1
veor \t0, \t0, \x7
veor \t1, \t1, \x7
vext.8 \x1, \x1, \x1, #8
veor \t5, \t5, \x4
veor \x0, \x0, \t0
veor \t6, \t6, \x5
veor \x1, \x1, \t1
vext.8 \t0, \x4, \x4, #8
veor \t4, \t4, \x3
vext.8 \t1, \x5, \x5, #8
veor \t7, \t7, \x6
vext.8 \x4, \x3, \x3, #8
veor \t3, \t3, \x2
vext.8 \x5, \x7, \x7, #8
veor \t4, \t4, \x7
vext.8 \x3, \x6, \x6, #8
veor \t3, \t3, \x7
vext.8 \x6, \x2, \x2, #8
veor \x7, \t1, \t5
.ifb \inv
veor \x2, \t0, \t4
veor \x4, \x4, \t3
veor \x5, \x5, \t7
veor \x3, \x3, \t6
veor \x6, \x6, \t2
.else
veor \t3, \t3, \x4
veor \x5, \x5, \t7
veor \x2, \x3, \t6
veor \x3, \t0, \t4
veor \x4, \x6, \t2
vmov \x6, \t3
.endif
.endm
.macro inv_mix_cols, x0, x1, x2, x3, x4, x5, x6, x7, \
t0, t1, t2, t3, t4, t5, t6, t7
vld1.8 {\t0-\t1}, [bskey, :256]!
veor \x0, \x0, \t0
vld1.8 {\t2-\t3}, [bskey, :256]!
veor \x1, \x1, \t1
vld1.8 {\t4-\t5}, [bskey, :256]!
veor \x2, \x2, \t2
vld1.8 {\t6-\t7}, [bskey, :256]
sub bskey, bskey, #224
veor \x3, \x3, \t3
veor \x4, \x4, \t4
veor \x5, \x5, \t5
veor \x6, \x6, \t6
veor \x7, \x7, \t7
vext.8 \t0, \x0, \x0, #8
vext.8 \t6, \x6, \x6, #8
vext.8 \t7, \x7, \x7, #8
veor \t0, \t0, \x0
vext.8 \t1, \x1, \x1, #8
veor \t6, \t6, \x6
vext.8 \t2, \x2, \x2, #8
veor \t7, \t7, \x7
vext.8 \t3, \x3, \x3, #8
veor \t1, \t1, \x1
vext.8 \t4, \x4, \x4, #8
veor \t2, \t2, \x2
vext.8 \t5, \x5, \x5, #8
veor \t3, \t3, \x3
veor \t4, \t4, \x4
veor \t5, \t5, \x5
veor \x0, \x0, \t6
veor \x1, \x1, \t6
veor \x2, \x2, \t0
veor \x4, \x4, \t2
veor \x3, \x3, \t1
veor \x1, \x1, \t7
veor \x2, \x2, \t7
veor \x4, \x4, \t6
veor \x5, \x5, \t3
veor \x3, \x3, \t6
veor \x6, \x6, \t4
veor \x4, \x4, \t7
veor \x5, \x5, \t7
veor \x7, \x7, \t5
mix_cols \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \
\t0, \t1, \t2, \t3, \t4, \t5, \t6, \t7, 1
.endm
.macro swapmove_2x, a0, b0, a1, b1, n, mask, t0, t1
vshr.u64 \t0, \b0, #\n
vshr.u64 \t1, \b1, #\n
veor \t0, \t0, \a0
veor \t1, \t1, \a1
vand \t0, \t0, \mask
vand \t1, \t1, \mask
veor \a0, \a0, \t0
vshl.s64 \t0, \t0, #\n
veor \a1, \a1, \t1
vshl.s64 \t1, \t1, #\n
veor \b0, \b0, \t0
veor \b1, \b1, \t1
.endm
.macro bitslice, x7, x6, x5, x4, x3, x2, x1, x0, t0, t1, t2, t3
vmov.i8 \t0, #0x55
vmov.i8 \t1, #0x33
swapmove_2x \x0, \x1, \x2, \x3, 1, \t0, \t2, \t3
swapmove_2x \x4, \x5, \x6, \x7, 1, \t0, \t2, \t3
vmov.i8 \t0, #0x0f
swapmove_2x \x0, \x2, \x1, \x3, 2, \t1, \t2, \t3
swapmove_2x \x4, \x6, \x5, \x7, 2, \t1, \t2, \t3
swapmove_2x \x0, \x4, \x1, \x5, 4, \t0, \t2, \t3
swapmove_2x \x2, \x6, \x3, \x7, 4, \t0, \t2, \t3
.endm
.align 4
M0: .quad 0x02060a0e03070b0f, 0x0004080c0105090d
/*
* void aesbs_convert_key(u8 out[], u32 const rk[], int rounds)
*/
ENTRY(aesbs_convert_key)
vld1.32 {q7}, [r1]! // load round 0 key
vld1.32 {q15}, [r1]! // load round 1 key
vmov.i8 q8, #0x01 // bit masks
vmov.i8 q9, #0x02
vmov.i8 q10, #0x04
vmov.i8 q11, #0x08
vmov.i8 q12, #0x10
vmov.i8 q13, #0x20
__ldr q14, M0
sub r2, r2, #1
vst1.8 {q7}, [r0, :128]! // save round 0 key
.Lkey_loop:
__tbl q7, q15, q14
vmov.i8 q6, #0x40
vmov.i8 q15, #0x80
vtst.8 q0, q7, q8
vtst.8 q1, q7, q9
vtst.8 q2, q7, q10
vtst.8 q3, q7, q11
vtst.8 q4, q7, q12
vtst.8 q5, q7, q13
vtst.8 q6, q7, q6
vtst.8 q7, q7, q15
vld1.32 {q15}, [r1]! // load next round key
vmvn q0, q0
vmvn q1, q1
vmvn q5, q5
vmvn q6, q6
subs r2, r2, #1
vst1.8 {q0-q1}, [r0, :256]!
vst1.8 {q2-q3}, [r0, :256]!
vst1.8 {q4-q5}, [r0, :256]!
vst1.8 {q6-q7}, [r0, :256]!
bne .Lkey_loop
vmov.i8 q7, #0x63 // compose .L63
veor q15, q15, q7
vst1.8 {q15}, [r0, :128]
bx lr
ENDPROC(aesbs_convert_key)
.align 4
M0SR: .quad 0x0a0e02060f03070b, 0x0004080c05090d01
aesbs_encrypt8:
vld1.8 {q9}, [bskey, :128]! // round 0 key
__ldr q8, M0SR
veor q10, q0, q9 // xor with round0 key
veor q11, q1, q9
__tbl q0, q10, q8
veor q12, q2, q9
__tbl q1, q11, q8
veor q13, q3, q9
__tbl q2, q12, q8
veor q14, q4, q9
__tbl q3, q13, q8
veor q15, q5, q9
__tbl q4, q14, q8
veor q10, q6, q9
__tbl q5, q15, q8
veor q11, q7, q9
__tbl q6, q10, q8
__tbl q7, q11, q8
bitslice q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11
sub rounds, rounds, #1
b .Lenc_sbox
.align 5
SR: .quad 0x0504070600030201, 0x0f0e0d0c0a09080b
SRM0: .quad 0x0304090e00050a0f, 0x01060b0c0207080d
.Lenc_last:
__ldr q12, SRM0
.Lenc_loop:
shift_rows q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11, q12
.Lenc_sbox:
sbox q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11, q12, \
q13, q14, q15
subs rounds, rounds, #1
bcc .Lenc_done
mix_cols q0, q1, q4, q6, q3, q7, q2, q5, q8, q9, q10, q11, q12, \
q13, q14, q15
beq .Lenc_last
__ldr q12, SR
b .Lenc_loop
.Lenc_done:
vld1.8 {q12}, [bskey, :128] // last round key
bitslice q0, q1, q4, q6, q3, q7, q2, q5, q8, q9, q10, q11
veor q0, q0, q12
veor q1, q1, q12
veor q4, q4, q12
veor q6, q6, q12
veor q3, q3, q12
veor q7, q7, q12
veor q2, q2, q12
veor q5, q5, q12
bx lr
ENDPROC(aesbs_encrypt8)
.align 4
M0ISR: .quad 0x0a0e0206070b0f03, 0x0004080c0d010509
aesbs_decrypt8:
add bskey, bskey, rounds, lsl #7
sub bskey, bskey, #112
vld1.8 {q9}, [bskey, :128] // round 0 key
sub bskey, bskey, #128
__ldr q8, M0ISR
veor q10, q0, q9 // xor with round0 key
veor q11, q1, q9
__tbl q0, q10, q8
veor q12, q2, q9
__tbl q1, q11, q8
veor q13, q3, q9
__tbl q2, q12, q8
veor q14, q4, q9
__tbl q3, q13, q8
veor q15, q5, q9
__tbl q4, q14, q8
veor q10, q6, q9
__tbl q5, q15, q8
veor q11, q7, q9
__tbl q6, q10, q8
__tbl q7, q11, q8
bitslice q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11
sub rounds, rounds, #1
b .Ldec_sbox
.align 5
ISR: .quad 0x0504070602010003, 0x0f0e0d0c080b0a09
ISRM0: .quad 0x01040b0e0205080f, 0x0306090c00070a0d
.Ldec_last:
__ldr q12, ISRM0
.Ldec_loop:
inv_shift_rows q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11, q12
.Ldec_sbox:
inv_sbox q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11, q12, \
q13, q14, q15
subs rounds, rounds, #1
bcc .Ldec_done
inv_mix_cols q0, q1, q6, q4, q2, q7, q3, q5, q8, q9, q10, q11, q12, \
q13, q14, q15
beq .Ldec_last
__ldr q12, ISR
b .Ldec_loop
.Ldec_done:
add bskey, bskey, #112
vld1.8 {q12}, [bskey, :128] // last round key
bitslice q0, q1, q6, q4, q2, q7, q3, q5, q8, q9, q10, q11
veor q0, q0, q12
veor q1, q1, q12
veor q6, q6, q12
veor q4, q4, q12
veor q2, q2, q12
veor q7, q7, q12
veor q3, q3, q12
veor q5, q5, q12
bx lr
ENDPROC(aesbs_decrypt8)
/*
* aesbs_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
* int blocks)
* aesbs_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
* int blocks)
*/
.macro __ecb_crypt, do8, o0, o1, o2, o3, o4, o5, o6, o7
push {r4-r6, lr}
ldr r5, [sp, #16] // number of blocks
99: __adr ip, 0f
and lr, r5, #7
cmp r5, #8
sub ip, ip, lr, lsl #2
bxlt ip // computed goto if blocks < 8
vld1.8 {q0}, [r1]!
vld1.8 {q1}, [r1]!
vld1.8 {q2}, [r1]!
vld1.8 {q3}, [r1]!
vld1.8 {q4}, [r1]!
vld1.8 {q5}, [r1]!
vld1.8 {q6}, [r1]!
vld1.8 {q7}, [r1]!
0: mov bskey, r2
mov rounds, r3
bl \do8
__adr ip, 1f
and lr, r5, #7
cmp r5, #8
sub ip, ip, lr, lsl #2
bxlt ip // computed goto if blocks < 8
vst1.8 {\o0}, [r0]!
vst1.8 {\o1}, [r0]!
vst1.8 {\o2}, [r0]!
vst1.8 {\o3}, [r0]!
vst1.8 {\o4}, [r0]!
vst1.8 {\o5}, [r0]!
vst1.8 {\o6}, [r0]!
vst1.8 {\o7}, [r0]!
1: subs r5, r5, #8
bgt 99b
pop {r4-r6, pc}
.endm
.align 4
ENTRY(aesbs_ecb_encrypt)
__ecb_crypt aesbs_encrypt8, q0, q1, q4, q6, q3, q7, q2, q5
ENDPROC(aesbs_ecb_encrypt)
.align 4
ENTRY(aesbs_ecb_decrypt)
__ecb_crypt aesbs_decrypt8, q0, q1, q6, q4, q2, q7, q3, q5
ENDPROC(aesbs_ecb_decrypt)
/*
* aesbs_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[],
* int rounds, int blocks, u8 iv[])
*/
.align 4
ENTRY(aesbs_cbc_decrypt)
mov ip, sp
push {r4-r6, lr}
ldm ip, {r5-r6} // load args 4-5
99: __adr ip, 0f
and lr, r5, #7
cmp r5, #8
sub ip, ip, lr, lsl #2
mov lr, r1
bxlt ip // computed goto if blocks < 8
vld1.8 {q0}, [lr]!
vld1.8 {q1}, [lr]!
vld1.8 {q2}, [lr]!
vld1.8 {q3}, [lr]!
vld1.8 {q4}, [lr]!
vld1.8 {q5}, [lr]!
vld1.8 {q6}, [lr]!
vld1.8 {q7}, [lr]
0: mov bskey, r2
mov rounds, r3
bl aesbs_decrypt8
vld1.8 {q8}, [r6]
vmov q9, q8
vmov q10, q8
vmov q11, q8
vmov q12, q8
vmov q13, q8
vmov q14, q8
vmov q15, q8
__adr ip, 1f
and lr, r5, #7
cmp r5, #8
sub ip, ip, lr, lsl #2
bxlt ip // computed goto if blocks < 8
vld1.8 {q9}, [r1]!
vld1.8 {q10}, [r1]!
vld1.8 {q11}, [r1]!
vld1.8 {q12}, [r1]!
vld1.8 {q13}, [r1]!
vld1.8 {q14}, [r1]!
vld1.8 {q15}, [r1]!
W(nop)
1: __adr ip, 2f
sub ip, ip, lr, lsl #3
bxlt ip // computed goto if blocks < 8
veor q0, q0, q8
vst1.8 {q0}, [r0]!
veor q1, q1, q9
vst1.8 {q1}, [r0]!
veor q6, q6, q10
vst1.8 {q6}, [r0]!
veor q4, q4, q11
vst1.8 {q4}, [r0]!
veor q2, q2, q12
vst1.8 {q2}, [r0]!
veor q7, q7, q13
vst1.8 {q7}, [r0]!
veor q3, q3, q14
vst1.8 {q3}, [r0]!
veor q5, q5, q15
vld1.8 {q8}, [r1]! // load next round's iv
2: vst1.8 {q5}, [r0]!
subs r5, r5, #8
vst1.8 {q8}, [r6] // store next round's iv
bgt 99b
pop {r4-r6, pc}
ENDPROC(aesbs_cbc_decrypt)
.macro next_ctr, q
vmov.32 \q\()h[1], r10
adds r10, r10, #1
vmov.32 \q\()h[0], r9
adcs r9, r9, #0
vmov.32 \q\()l[1], r8
adcs r8, r8, #0
vmov.32 \q\()l[0], r7
adc r7, r7, #0
vrev32.8 \q, \q
.endm
/*
* aesbs_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[],
* int rounds, int blocks, u8 ctr[], u8 final[])
*/
ENTRY(aesbs_ctr_encrypt)
mov ip, sp
push {r4-r10, lr}
ldm ip, {r5-r7} // load args 4-6
teq r7, #0
addne r5, r5, #1 // one extra block if final != 0
vld1.8 {q0}, [r6] // load counter
vrev32.8 q1, q0
vmov r9, r10, d3
vmov r7, r8, d2
adds r10, r10, #1
adcs r9, r9, #0
adcs r8, r8, #0
adc r7, r7, #0
99: vmov q1, q0
vmov q2, q0
vmov q3, q0
vmov q4, q0
vmov q5, q0
vmov q6, q0
vmov q7, q0
__adr ip, 0f
sub lr, r5, #1
and lr, lr, #7
cmp r5, #8
sub ip, ip, lr, lsl #5
sub ip, ip, lr, lsl #2
bxlt ip // computed goto if blocks < 8
next_ctr q1
next_ctr q2
next_ctr q3
next_ctr q4
next_ctr q5
next_ctr q6
next_ctr q7
0: mov bskey, r2
mov rounds, r3
bl aesbs_encrypt8
__adr ip, 1f
and lr, r5, #7
cmp r5, #8
movgt r4, #0
ldrle r4, [sp, #40] // load final in the last round
sub ip, ip, lr, lsl #2
bxlt ip // computed goto if blocks < 8
vld1.8 {q8}, [r1]!
vld1.8 {q9}, [r1]!
vld1.8 {q10}, [r1]!
vld1.8 {q11}, [r1]!
vld1.8 {q12}, [r1]!
vld1.8 {q13}, [r1]!
vld1.8 {q14}, [r1]!
teq r4, #0 // skip last block if 'final'
1: bne 2f
vld1.8 {q15}, [r1]!
2: __adr ip, 3f
cmp r5, #8
sub ip, ip, lr, lsl #3
bxlt ip // computed goto if blocks < 8
veor q0, q0, q8
vst1.8 {q0}, [r0]!
veor q1, q1, q9
vst1.8 {q1}, [r0]!
veor q4, q4, q10
vst1.8 {q4}, [r0]!
veor q6, q6, q11
vst1.8 {q6}, [r0]!
veor q3, q3, q12
vst1.8 {q3}, [r0]!
veor q7, q7, q13
vst1.8 {q7}, [r0]!
veor q2, q2, q14
vst1.8 {q2}, [r0]!
teq r4, #0 // skip last block if 'final'
W(bne) 5f
3: veor q5, q5, q15
vst1.8 {q5}, [r0]!
4: next_ctr q0
subs r5, r5, #8
bgt 99b
vst1.8 {q0}, [r6]
pop {r4-r10, pc}
5: vst1.8 {q5}, [r4]
b 4b
ENDPROC(aesbs_ctr_encrypt)
.macro next_tweak, out, in, const, tmp
vshr.s64 \tmp, \in, #63
vand \tmp, \tmp, \const
vadd.u64 \out, \in, \in
vext.8 \tmp, \tmp, \tmp, #8
veor \out, \out, \tmp
.endm
.align 4
.Lxts_mul_x:
.quad 1, 0x87
/*
* aesbs_xts_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
* int blocks, u8 iv[])
* aesbs_xts_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
* int blocks, u8 iv[])
*/
__xts_prepare8:
vld1.8 {q14}, [r7] // load iv
__ldr q15, .Lxts_mul_x // load tweak mask
vmov q12, q14
__adr ip, 0f
and r4, r6, #7
cmp r6, #8
sub ip, ip, r4, lsl #5
mov r4, sp
bxlt ip // computed goto if blocks < 8
vld1.8 {q0}, [r1]!
next_tweak q12, q14, q15, q13
veor q0, q0, q14
vst1.8 {q14}, [r4, :128]!
vld1.8 {q1}, [r1]!
next_tweak q14, q12, q15, q13
veor q1, q1, q12
vst1.8 {q12}, [r4, :128]!
vld1.8 {q2}, [r1]!
next_tweak q12, q14, q15, q13
veor q2, q2, q14
vst1.8 {q14}, [r4, :128]!
vld1.8 {q3}, [r1]!
next_tweak q14, q12, q15, q13
veor q3, q3, q12
vst1.8 {q12}, [r4, :128]!
vld1.8 {q4}, [r1]!
next_tweak q12, q14, q15, q13
veor q4, q4, q14
vst1.8 {q14}, [r4, :128]!
vld1.8 {q5}, [r1]!
next_tweak q14, q12, q15, q13
veor q5, q5, q12
vst1.8 {q12}, [r4, :128]!
vld1.8 {q6}, [r1]!
next_tweak q12, q14, q15, q13
veor q6, q6, q14
vst1.8 {q14}, [r4, :128]!
vld1.8 {q7}, [r1]!
next_tweak q14, q12, q15, q13
veor q7, q7, q12
vst1.8 {q12}, [r4, :128]
0: vst1.8 {q14}, [r7] // store next iv
bx lr
ENDPROC(__xts_prepare8)
.macro __xts_crypt, do8, o0, o1, o2, o3, o4, o5, o6, o7
push {r4-r8, lr}
mov r5, sp // preserve sp
ldrd r6, r7, [sp, #24] // get blocks and iv args
sub ip, sp, #128 // make room for 8x tweak
bic ip, ip, #0xf // align sp to 16 bytes
mov sp, ip
99: bl __xts_prepare8
mov bskey, r2
mov rounds, r3
bl \do8
__adr ip, 0f
and lr, r6, #7
cmp r6, #8
sub ip, ip, lr, lsl #2
mov r4, sp
bxlt ip // computed goto if blocks < 8
vld1.8 {q8}, [r4, :128]!
vld1.8 {q9}, [r4, :128]!
vld1.8 {q10}, [r4, :128]!
vld1.8 {q11}, [r4, :128]!
vld1.8 {q12}, [r4, :128]!
vld1.8 {q13}, [r4, :128]!
vld1.8 {q14}, [r4, :128]!
vld1.8 {q15}, [r4, :128]
0: __adr ip, 1f
sub ip, ip, lr, lsl #3
bxlt ip // computed goto if blocks < 8
veor \o0, \o0, q8
vst1.8 {\o0}, [r0]!
veor \o1, \o1, q9
vst1.8 {\o1}, [r0]!
veor \o2, \o2, q10
vst1.8 {\o2}, [r0]!
veor \o3, \o3, q11
vst1.8 {\o3}, [r0]!
veor \o4, \o4, q12
vst1.8 {\o4}, [r0]!
veor \o5, \o5, q13
vst1.8 {\o5}, [r0]!
veor \o6, \o6, q14
vst1.8 {\o6}, [r0]!
veor \o7, \o7, q15
vst1.8 {\o7}, [r0]!
1: subs r6, r6, #8
bgt 99b
mov sp, r5
pop {r4-r8, pc}
.endm
ENTRY(aesbs_xts_encrypt)
__xts_crypt aesbs_encrypt8, q0, q1, q4, q6, q3, q7, q2, q5
ENDPROC(aesbs_xts_encrypt)
ENTRY(aesbs_xts_decrypt)
__xts_crypt aesbs_decrypt8, q0, q1, q6, q4, q2, q7, q3, q5
ENDPROC(aesbs_xts_decrypt)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 6,853
|
arch/arm/crypto/crc32-ce-core.S
|
/*
* Accelerated CRC32(C) using ARM CRC, NEON and Crypto Extensions instructions
*
* Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 only,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License version 2 for more details (a copy is included
* in the LICENSE file that accompanied this code).
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see http://www.gnu.org/licenses
*
* Please visit http://www.xyratex.com/contact if you need additional
* information or have any questions.
*
* GPL HEADER END
*/
/*
* Copyright 2012 Xyratex Technology Limited
*
* Using hardware provided PCLMULQDQ instruction to accelerate the CRC32
* calculation.
* CRC32 polynomial:0x04c11db7(BE)/0xEDB88320(LE)
* PCLMULQDQ is a new instruction in Intel SSE4.2, the reference can be found
* at:
* http://www.intel.com/products/processor/manuals/
* Intel(R) 64 and IA-32 Architectures Software Developer's Manual
* Volume 2B: Instruction Set Reference, N-Z
*
* Authors: Gregory Prestas <Gregory_Prestas@us.xyratex.com>
* Alexander Boyko <Alexander_Boyko@xyratex.com>
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
.text
.align 6
.arch armv8-a
.arch_extension crc
.fpu crypto-neon-fp-armv8
.Lcrc32_constants:
/*
* [x4*128+32 mod P(x) << 32)]' << 1 = 0x154442bd4
* #define CONSTANT_R1 0x154442bd4LL
*
* [(x4*128-32 mod P(x) << 32)]' << 1 = 0x1c6e41596
* #define CONSTANT_R2 0x1c6e41596LL
*/
.quad 0x0000000154442bd4
.quad 0x00000001c6e41596
/*
* [(x128+32 mod P(x) << 32)]' << 1 = 0x1751997d0
* #define CONSTANT_R3 0x1751997d0LL
*
* [(x128-32 mod P(x) << 32)]' << 1 = 0x0ccaa009e
* #define CONSTANT_R4 0x0ccaa009eLL
*/
.quad 0x00000001751997d0
.quad 0x00000000ccaa009e
/*
* [(x64 mod P(x) << 32)]' << 1 = 0x163cd6124
* #define CONSTANT_R5 0x163cd6124LL
*/
.quad 0x0000000163cd6124
.quad 0x00000000FFFFFFFF
/*
* #define CRCPOLY_TRUE_LE_FULL 0x1DB710641LL
*
* Barrett Reduction constant (u64`) = u` = (x**64 / P(x))`
* = 0x1F7011641LL
* #define CONSTANT_RU 0x1F7011641LL
*/
.quad 0x00000001DB710641
.quad 0x00000001F7011641
.Lcrc32c_constants:
.quad 0x00000000740eef02
.quad 0x000000009e4addf8
.quad 0x00000000f20c0dfe
.quad 0x000000014cd00bd6
.quad 0x00000000dd45aab8
.quad 0x00000000FFFFFFFF
.quad 0x0000000105ec76f0
.quad 0x00000000dea713f1
dCONSTANTl .req d0
dCONSTANTh .req d1
qCONSTANT .req q0
BUF .req r0
LEN .req r1
CRC .req r2
qzr .req q9
/**
* Calculate crc32
* BUF - buffer
* LEN - sizeof buffer (multiple of 16 bytes), LEN should be > 63
* CRC - initial crc32
* return %eax crc32
* uint crc32_pmull_le(unsigned char const *buffer,
* size_t len, uint crc32)
*/
ENTRY(crc32_pmull_le)
adr r3, .Lcrc32_constants
b 0f
ENTRY(crc32c_pmull_le)
adr r3, .Lcrc32c_constants
0: bic LEN, LEN, #15
vld1.8 {q1-q2}, [BUF, :128]!
vld1.8 {q3-q4}, [BUF, :128]!
vmov.i8 qzr, #0
vmov.i8 qCONSTANT, #0
vmov.32 dCONSTANTl[0], CRC
veor.8 d2, d2, dCONSTANTl
sub LEN, LEN, #0x40
cmp LEN, #0x40
blt less_64
vld1.64 {qCONSTANT}, [r3]
loop_64: /* 64 bytes Full cache line folding */
sub LEN, LEN, #0x40
vmull.p64 q5, d3, dCONSTANTh
vmull.p64 q6, d5, dCONSTANTh
vmull.p64 q7, d7, dCONSTANTh
vmull.p64 q8, d9, dCONSTANTh
vmull.p64 q1, d2, dCONSTANTl
vmull.p64 q2, d4, dCONSTANTl
vmull.p64 q3, d6, dCONSTANTl
vmull.p64 q4, d8, dCONSTANTl
veor.8 q1, q1, q5
vld1.8 {q5}, [BUF, :128]!
veor.8 q2, q2, q6
vld1.8 {q6}, [BUF, :128]!
veor.8 q3, q3, q7
vld1.8 {q7}, [BUF, :128]!
veor.8 q4, q4, q8
vld1.8 {q8}, [BUF, :128]!
veor.8 q1, q1, q5
veor.8 q2, q2, q6
veor.8 q3, q3, q7
veor.8 q4, q4, q8
cmp LEN, #0x40
bge loop_64
less_64: /* Folding cache line into 128bit */
vldr dCONSTANTl, [r3, #16]
vldr dCONSTANTh, [r3, #24]
vmull.p64 q5, d3, dCONSTANTh
vmull.p64 q1, d2, dCONSTANTl
veor.8 q1, q1, q5
veor.8 q1, q1, q2
vmull.p64 q5, d3, dCONSTANTh
vmull.p64 q1, d2, dCONSTANTl
veor.8 q1, q1, q5
veor.8 q1, q1, q3
vmull.p64 q5, d3, dCONSTANTh
vmull.p64 q1, d2, dCONSTANTl
veor.8 q1, q1, q5
veor.8 q1, q1, q4
teq LEN, #0
beq fold_64
loop_16: /* Folding rest buffer into 128bit */
subs LEN, LEN, #0x10
vld1.8 {q2}, [BUF, :128]!
vmull.p64 q5, d3, dCONSTANTh
vmull.p64 q1, d2, dCONSTANTl
veor.8 q1, q1, q5
veor.8 q1, q1, q2
bne loop_16
fold_64:
/* perform the last 64 bit fold, also adds 32 zeroes
* to the input stream */
vmull.p64 q2, d2, dCONSTANTh
vext.8 q1, q1, qzr, #8
veor.8 q1, q1, q2
/* final 32-bit fold */
vldr dCONSTANTl, [r3, #32]
vldr d6, [r3, #40]
vmov.i8 d7, #0
vext.8 q2, q1, qzr, #4
vand.8 d2, d2, d6
vmull.p64 q1, d2, dCONSTANTl
veor.8 q1, q1, q2
/* Finish up with the bit-reversed barrett reduction 64 ==> 32 bits */
vldr dCONSTANTl, [r3, #48]
vldr dCONSTANTh, [r3, #56]
vand.8 q2, q1, q3
vext.8 q2, qzr, q2, #8
vmull.p64 q2, d5, dCONSTANTh
vand.8 q2, q2, q3
vmull.p64 q2, d4, dCONSTANTl
veor.8 q1, q1, q2
vmov r0, s5
bx lr
ENDPROC(crc32_pmull_le)
ENDPROC(crc32c_pmull_le)
.macro __crc32, c
subs ip, r2, #8
bmi .Ltail\c
tst r1, #3
bne .Lunaligned\c
teq ip, #0
.Laligned8\c:
ldrd r2, r3, [r1], #8
ARM_BE8(rev r2, r2 )
ARM_BE8(rev r3, r3 )
crc32\c\()w r0, r0, r2
crc32\c\()w r0, r0, r3
bxeq lr
subs ip, ip, #8
bpl .Laligned8\c
.Ltail\c:
tst ip, #4
beq 2f
ldr r3, [r1], #4
ARM_BE8(rev r3, r3 )
crc32\c\()w r0, r0, r3
2: tst ip, #2
beq 1f
ldrh r3, [r1], #2
ARM_BE8(rev16 r3, r3 )
crc32\c\()h r0, r0, r3
1: tst ip, #1
bxeq lr
ldrb r3, [r1]
crc32\c\()b r0, r0, r3
bx lr
.Lunaligned\c:
tst r1, #1
beq 2f
ldrb r3, [r1], #1
subs r2, r2, #1
crc32\c\()b r0, r0, r3
tst r1, #2
beq 0f
2: ldrh r3, [r1], #2
subs r2, r2, #2
ARM_BE8(rev16 r3, r3 )
crc32\c\()h r0, r0, r3
0: subs ip, r2, #8
bpl .Laligned8\c
b .Ltail\c
.endm
.align 5
ENTRY(crc32_armv8_le)
__crc32
ENDPROC(crc32_armv8_le)
.align 5
ENTRY(crc32c_armv8_le)
__crc32 c
ENDPROC(crc32c_armv8_le)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,900
|
arch/arm/crypto/sha2-ce-core.S
|
/*
* sha2-ce-core.S - SHA-224/256 secure hash using ARMv8 Crypto Extensions
*
* Copyright (C) 2015 Linaro Ltd.
* Author: Ard Biesheuvel <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
.text
.fpu crypto-neon-fp-armv8
k0 .req q7
k1 .req q8
rk .req r3
ta0 .req q9
ta1 .req q10
tb0 .req q10
tb1 .req q9
dga .req q11
dgb .req q12
dg0 .req q13
dg1 .req q14
dg2 .req q15
.macro add_only, ev, s0
vmov dg2, dg0
.ifnb \s0
vld1.32 {k\ev}, [rk, :128]!
.endif
sha256h.32 dg0, dg1, tb\ev
sha256h2.32 dg1, dg2, tb\ev
.ifnb \s0
vadd.u32 ta\ev, q\s0, k\ev
.endif
.endm
.macro add_update, ev, s0, s1, s2, s3
sha256su0.32 q\s0, q\s1
add_only \ev, \s1
sha256su1.32 q\s0, q\s2, q\s3
.endm
.align 6
.Lsha256_rcon:
.word 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5
.word 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5
.word 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3
.word 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174
.word 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc
.word 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da
.word 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7
.word 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967
.word 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13
.word 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85
.word 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3
.word 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070
.word 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5
.word 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3
.word 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208
.word 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
/*
* void sha2_ce_transform(struct sha256_state *sst, u8 const *src,
int blocks);
*/
ENTRY(sha2_ce_transform)
/* load state */
vld1.32 {dga-dgb}, [r0]
/* load input */
0: vld1.32 {q0-q1}, [r1]!
vld1.32 {q2-q3}, [r1]!
subs r2, r2, #1
#ifndef CONFIG_CPU_BIG_ENDIAN
vrev32.8 q0, q0
vrev32.8 q1, q1
vrev32.8 q2, q2
vrev32.8 q3, q3
#endif
/* load first round constant */
adr rk, .Lsha256_rcon
vld1.32 {k0}, [rk, :128]!
vadd.u32 ta0, q0, k0
vmov dg0, dga
vmov dg1, dgb
add_update 1, 0, 1, 2, 3
add_update 0, 1, 2, 3, 0
add_update 1, 2, 3, 0, 1
add_update 0, 3, 0, 1, 2
add_update 1, 0, 1, 2, 3
add_update 0, 1, 2, 3, 0
add_update 1, 2, 3, 0, 1
add_update 0, 3, 0, 1, 2
add_update 1, 0, 1, 2, 3
add_update 0, 1, 2, 3, 0
add_update 1, 2, 3, 0, 1
add_update 0, 3, 0, 1, 2
add_only 1, 1
add_only 0, 2
add_only 1, 3
add_only 0
/* update state */
vadd.u32 dga, dga, dg0
vadd.u32 dgb, dgb, dg1
bne 0b
/* store new state */
vst1.32 {dga-dgb}, [r0]
bx lr
ENDPROC(sha2_ce_transform)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 10,898
|
arch/arm/crypto/aes-ce-core.S
|
/*
* aes-ce-core.S - AES in CBC/CTR/XTS mode using ARMv8 Crypto Extensions
*
* Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
.text
.fpu crypto-neon-fp-armv8
.align 3
.macro enc_round, state, key
aese.8 \state, \key
aesmc.8 \state, \state
.endm
.macro dec_round, state, key
aesd.8 \state, \key
aesimc.8 \state, \state
.endm
.macro enc_dround, key1, key2
enc_round q0, \key1
enc_round q0, \key2
.endm
.macro dec_dround, key1, key2
dec_round q0, \key1
dec_round q0, \key2
.endm
.macro enc_fround, key1, key2, key3
enc_round q0, \key1
aese.8 q0, \key2
veor q0, q0, \key3
.endm
.macro dec_fround, key1, key2, key3
dec_round q0, \key1
aesd.8 q0, \key2
veor q0, q0, \key3
.endm
.macro enc_dround_3x, key1, key2
enc_round q0, \key1
enc_round q1, \key1
enc_round q2, \key1
enc_round q0, \key2
enc_round q1, \key2
enc_round q2, \key2
.endm
.macro dec_dround_3x, key1, key2
dec_round q0, \key1
dec_round q1, \key1
dec_round q2, \key1
dec_round q0, \key2
dec_round q1, \key2
dec_round q2, \key2
.endm
.macro enc_fround_3x, key1, key2, key3
enc_round q0, \key1
enc_round q1, \key1
enc_round q2, \key1
aese.8 q0, \key2
aese.8 q1, \key2
aese.8 q2, \key2
veor q0, q0, \key3
veor q1, q1, \key3
veor q2, q2, \key3
.endm
.macro dec_fround_3x, key1, key2, key3
dec_round q0, \key1
dec_round q1, \key1
dec_round q2, \key1
aesd.8 q0, \key2
aesd.8 q1, \key2
aesd.8 q2, \key2
veor q0, q0, \key3
veor q1, q1, \key3
veor q2, q2, \key3
.endm
.macro do_block, dround, fround
cmp r3, #12 @ which key size?
vld1.8 {q10-q11}, [ip]!
\dround q8, q9
vld1.8 {q12-q13}, [ip]!
\dround q10, q11
vld1.8 {q10-q11}, [ip]!
\dround q12, q13
vld1.8 {q12-q13}, [ip]!
\dround q10, q11
blo 0f @ AES-128: 10 rounds
vld1.8 {q10-q11}, [ip]!
\dround q12, q13
beq 1f @ AES-192: 12 rounds
vld1.8 {q12-q13}, [ip]
\dround q10, q11
0: \fround q12, q13, q14
bx lr
1: \fround q10, q11, q14
bx lr
.endm
/*
* Internal, non-AAPCS compliant functions that implement the core AES
* transforms. These should preserve all registers except q0 - q2 and ip
* Arguments:
* q0 : first in/output block
* q1 : second in/output block (_3x version only)
* q2 : third in/output block (_3x version only)
* q8 : first round key
* q9 : secound round key
* q14 : final round key
* r2 : address of round key array
* r3 : number of rounds
*/
.align 6
aes_encrypt:
add ip, r2, #32 @ 3rd round key
.Laes_encrypt_tweak:
do_block enc_dround, enc_fround
ENDPROC(aes_encrypt)
.align 6
aes_decrypt:
add ip, r2, #32 @ 3rd round key
do_block dec_dround, dec_fround
ENDPROC(aes_decrypt)
.align 6
aes_encrypt_3x:
add ip, r2, #32 @ 3rd round key
do_block enc_dround_3x, enc_fround_3x
ENDPROC(aes_encrypt_3x)
.align 6
aes_decrypt_3x:
add ip, r2, #32 @ 3rd round key
do_block dec_dround_3x, dec_fround_3x
ENDPROC(aes_decrypt_3x)
.macro prepare_key, rk, rounds
add ip, \rk, \rounds, lsl #4
vld1.8 {q8-q9}, [\rk] @ load first 2 round keys
vld1.8 {q14}, [ip] @ load last round key
.endm
/*
* aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
* int blocks)
* aes_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
* int blocks)
*/
ENTRY(ce_aes_ecb_encrypt)
push {r4, lr}
ldr r4, [sp, #8]
prepare_key r2, r3
.Lecbencloop3x:
subs r4, r4, #3
bmi .Lecbenc1x
vld1.8 {q0-q1}, [r1]!
vld1.8 {q2}, [r1]!
bl aes_encrypt_3x
vst1.8 {q0-q1}, [r0]!
vst1.8 {q2}, [r0]!
b .Lecbencloop3x
.Lecbenc1x:
adds r4, r4, #3
beq .Lecbencout
.Lecbencloop:
vld1.8 {q0}, [r1]!
bl aes_encrypt
vst1.8 {q0}, [r0]!
subs r4, r4, #1
bne .Lecbencloop
.Lecbencout:
pop {r4, pc}
ENDPROC(ce_aes_ecb_encrypt)
ENTRY(ce_aes_ecb_decrypt)
push {r4, lr}
ldr r4, [sp, #8]
prepare_key r2, r3
.Lecbdecloop3x:
subs r4, r4, #3
bmi .Lecbdec1x
vld1.8 {q0-q1}, [r1]!
vld1.8 {q2}, [r1]!
bl aes_decrypt_3x
vst1.8 {q0-q1}, [r0]!
vst1.8 {q2}, [r0]!
b .Lecbdecloop3x
.Lecbdec1x:
adds r4, r4, #3
beq .Lecbdecout
.Lecbdecloop:
vld1.8 {q0}, [r1]!
bl aes_decrypt
vst1.8 {q0}, [r0]!
subs r4, r4, #1
bne .Lecbdecloop
.Lecbdecout:
pop {r4, pc}
ENDPROC(ce_aes_ecb_decrypt)
/*
* aes_cbc_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
* int blocks, u8 iv[])
* aes_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
* int blocks, u8 iv[])
*/
ENTRY(ce_aes_cbc_encrypt)
push {r4-r6, lr}
ldrd r4, r5, [sp, #16]
vld1.8 {q0}, [r5]
prepare_key r2, r3
.Lcbcencloop:
vld1.8 {q1}, [r1]! @ get next pt block
veor q0, q0, q1 @ ..and xor with iv
bl aes_encrypt
vst1.8 {q0}, [r0]!
subs r4, r4, #1
bne .Lcbcencloop
vst1.8 {q0}, [r5]
pop {r4-r6, pc}
ENDPROC(ce_aes_cbc_encrypt)
ENTRY(ce_aes_cbc_decrypt)
push {r4-r6, lr}
ldrd r4, r5, [sp, #16]
vld1.8 {q6}, [r5] @ keep iv in q6
prepare_key r2, r3
.Lcbcdecloop3x:
subs r4, r4, #3
bmi .Lcbcdec1x
vld1.8 {q0-q1}, [r1]!
vld1.8 {q2}, [r1]!
vmov q3, q0
vmov q4, q1
vmov q5, q2
bl aes_decrypt_3x
veor q0, q0, q6
veor q1, q1, q3
veor q2, q2, q4
vmov q6, q5
vst1.8 {q0-q1}, [r0]!
vst1.8 {q2}, [r0]!
b .Lcbcdecloop3x
.Lcbcdec1x:
adds r4, r4, #3
beq .Lcbcdecout
vmov q15, q14 @ preserve last round key
.Lcbcdecloop:
vld1.8 {q0}, [r1]! @ get next ct block
veor q14, q15, q6 @ combine prev ct with last key
vmov q6, q0
bl aes_decrypt
vst1.8 {q0}, [r0]!
subs r4, r4, #1
bne .Lcbcdecloop
.Lcbcdecout:
vst1.8 {q6}, [r5] @ keep iv in q6
pop {r4-r6, pc}
ENDPROC(ce_aes_cbc_decrypt)
/*
* aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
* int blocks, u8 ctr[])
*/
ENTRY(ce_aes_ctr_encrypt)
push {r4-r6, lr}
ldrd r4, r5, [sp, #16]
vld1.8 {q6}, [r5] @ load ctr
prepare_key r2, r3
vmov r6, s27 @ keep swabbed ctr in r6
rev r6, r6
cmn r6, r4 @ 32 bit overflow?
bcs .Lctrloop
.Lctrloop3x:
subs r4, r4, #3
bmi .Lctr1x
add r6, r6, #1
vmov q0, q6
vmov q1, q6
rev ip, r6
add r6, r6, #1
vmov q2, q6
vmov s7, ip
rev ip, r6
add r6, r6, #1
vmov s11, ip
vld1.8 {q3-q4}, [r1]!
vld1.8 {q5}, [r1]!
bl aes_encrypt_3x
veor q0, q0, q3
veor q1, q1, q4
veor q2, q2, q5
rev ip, r6
vst1.8 {q0-q1}, [r0]!
vst1.8 {q2}, [r0]!
vmov s27, ip
b .Lctrloop3x
.Lctr1x:
adds r4, r4, #3
beq .Lctrout
.Lctrloop:
vmov q0, q6
bl aes_encrypt
subs r4, r4, #1
bmi .Lctrtailblock @ blocks < 0 means tail block
vld1.8 {q3}, [r1]!
veor q3, q0, q3
vst1.8 {q3}, [r0]!
adds r6, r6, #1 @ increment BE ctr
rev ip, r6
vmov s27, ip
bcs .Lctrcarry
teq r4, #0
bne .Lctrloop
.Lctrout:
vst1.8 {q6}, [r5]
pop {r4-r6, pc}
.Lctrtailblock:
vst1.8 {q0}, [r0, :64] @ return just the key stream
pop {r4-r6, pc}
.Lctrcarry:
.irp sreg, s26, s25, s24
vmov ip, \sreg @ load next word of ctr
rev ip, ip @ ... to handle the carry
adds ip, ip, #1
rev ip, ip
vmov \sreg, ip
bcc 0f
.endr
0: teq r4, #0
beq .Lctrout
b .Lctrloop
ENDPROC(ce_aes_ctr_encrypt)
/*
* aes_xts_encrypt(u8 out[], u8 const in[], u8 const rk1[], int rounds,
* int blocks, u8 iv[], u8 const rk2[], int first)
* aes_xts_decrypt(u8 out[], u8 const in[], u8 const rk1[], int rounds,
* int blocks, u8 iv[], u8 const rk2[], int first)
*/
.macro next_tweak, out, in, const, tmp
vshr.s64 \tmp, \in, #63
vand \tmp, \tmp, \const
vadd.u64 \out, \in, \in
vext.8 \tmp, \tmp, \tmp, #8
veor \out, \out, \tmp
.endm
.align 3
.Lxts_mul_x:
.quad 1, 0x87
ce_aes_xts_init:
vldr d14, .Lxts_mul_x
vldr d15, .Lxts_mul_x + 8
ldrd r4, r5, [sp, #16] @ load args
ldr r6, [sp, #28]
vld1.8 {q0}, [r5] @ load iv
teq r6, #1 @ start of a block?
bxne lr
@ Encrypt the IV in q0 with the second AES key. This should only
@ be done at the start of a block.
ldr r6, [sp, #24] @ load AES key 2
prepare_key r6, r3
add ip, r6, #32 @ 3rd round key of key 2
b .Laes_encrypt_tweak @ tail call
ENDPROC(ce_aes_xts_init)
ENTRY(ce_aes_xts_encrypt)
push {r4-r6, lr}
bl ce_aes_xts_init @ run shared prologue
prepare_key r2, r3
vmov q3, q0
teq r6, #0 @ start of a block?
bne .Lxtsenc3x
.Lxtsencloop3x:
next_tweak q3, q3, q7, q6
.Lxtsenc3x:
subs r4, r4, #3
bmi .Lxtsenc1x
vld1.8 {q0-q1}, [r1]! @ get 3 pt blocks
vld1.8 {q2}, [r1]!
next_tweak q4, q3, q7, q6
veor q0, q0, q3
next_tweak q5, q4, q7, q6
veor q1, q1, q4
veor q2, q2, q5
bl aes_encrypt_3x
veor q0, q0, q3
veor q1, q1, q4
veor q2, q2, q5
vst1.8 {q0-q1}, [r0]! @ write 3 ct blocks
vst1.8 {q2}, [r0]!
vmov q3, q5
teq r4, #0
beq .Lxtsencout
b .Lxtsencloop3x
.Lxtsenc1x:
adds r4, r4, #3
beq .Lxtsencout
.Lxtsencloop:
vld1.8 {q0}, [r1]!
veor q0, q0, q3
bl aes_encrypt
veor q0, q0, q3
vst1.8 {q0}, [r0]!
subs r4, r4, #1
beq .Lxtsencout
next_tweak q3, q3, q7, q6
b .Lxtsencloop
.Lxtsencout:
vst1.8 {q3}, [r5]
pop {r4-r6, pc}
ENDPROC(ce_aes_xts_encrypt)
ENTRY(ce_aes_xts_decrypt)
push {r4-r6, lr}
bl ce_aes_xts_init @ run shared prologue
prepare_key r2, r3
vmov q3, q0
teq r6, #0 @ start of a block?
bne .Lxtsdec3x
.Lxtsdecloop3x:
next_tweak q3, q3, q7, q6
.Lxtsdec3x:
subs r4, r4, #3
bmi .Lxtsdec1x
vld1.8 {q0-q1}, [r1]! @ get 3 ct blocks
vld1.8 {q2}, [r1]!
next_tweak q4, q3, q7, q6
veor q0, q0, q3
next_tweak q5, q4, q7, q6
veor q1, q1, q4
veor q2, q2, q5
bl aes_decrypt_3x
veor q0, q0, q3
veor q1, q1, q4
veor q2, q2, q5
vst1.8 {q0-q1}, [r0]! @ write 3 pt blocks
vst1.8 {q2}, [r0]!
vmov q3, q5
teq r4, #0
beq .Lxtsdecout
b .Lxtsdecloop3x
.Lxtsdec1x:
adds r4, r4, #3
beq .Lxtsdecout
.Lxtsdecloop:
vld1.8 {q0}, [r1]!
veor q0, q0, q3
add ip, r2, #32 @ 3rd round key
bl aes_decrypt
veor q0, q0, q3
vst1.8 {q0}, [r0]!
subs r4, r4, #1
beq .Lxtsdecout
next_tweak q3, q3, q7, q6
b .Lxtsdecloop
.Lxtsdecout:
vst1.8 {q3}, [r5]
pop {r4-r6, pc}
ENDPROC(ce_aes_xts_decrypt)
/*
* u32 ce_aes_sub(u32 input) - use the aese instruction to perform the
* AES sbox substitution on each byte in
* 'input'
*/
ENTRY(ce_aes_sub)
vdup.32 q1, r0
veor q0, q0, q0
aese.8 q0, q1
vmov r0, s0
bx lr
ENDPROC(ce_aes_sub)
/*
* void ce_aes_invert(u8 *dst, u8 *src) - perform the Inverse MixColumns
* operation on round key *src
*/
ENTRY(ce_aes_invert)
vld1.8 {q0}, [r1]
aesimc.8 q0, q0
vst1.8 {q0}, [r0]
bx lr
ENDPROC(ce_aes_invert)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 12,009
|
arch/arm/crypto/crct10dif-ce-core.S
|
//
// Accelerated CRC-T10DIF using ARM NEON and Crypto Extensions instructions
//
// Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License version 2 as
// published by the Free Software Foundation.
//
//
// Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions
//
// Copyright (c) 2013, Intel Corporation
//
// Authors:
// Erdinc Ozturk <erdinc.ozturk@intel.com>
// Vinodh Gopal <vinodh.gopal@intel.com>
// James Guilford <james.guilford@intel.com>
// Tim Chen <tim.c.chen@linux.intel.com>
//
// This software is available to you under a choice of one of two
// licenses. You may choose to be licensed under the terms of the GNU
// General Public License (GPL) Version 2, available from the file
// COPYING in the main directory of this source tree, or the
// OpenIB.org BSD license below:
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the
// distribution.
//
// * Neither the name of the Intel Corporation nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
//
// THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Function API:
// UINT16 crc_t10dif_pcl(
// UINT16 init_crc, //initial CRC value, 16 bits
// const unsigned char *buf, //buffer pointer to calculate CRC on
// UINT64 len //buffer length in bytes (64-bit data)
// );
//
// Reference paper titled "Fast CRC Computation for Generic
// Polynomials Using PCLMULQDQ Instruction"
// URL: http://www.intel.com/content/dam/www/public/us/en/documents
// /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf
//
//
#include <linux/linkage.h>
#include <asm/assembler.h>
#ifdef CONFIG_CPU_ENDIAN_BE8
#define CPU_LE(code...)
#else
#define CPU_LE(code...) code
#endif
.text
.fpu crypto-neon-fp-armv8
arg1_low32 .req r0
arg2 .req r1
arg3 .req r2
qzr .req q13
q0l .req d0
q0h .req d1
q1l .req d2
q1h .req d3
q2l .req d4
q2h .req d5
q3l .req d6
q3h .req d7
q4l .req d8
q4h .req d9
q5l .req d10
q5h .req d11
q6l .req d12
q6h .req d13
q7l .req d14
q7h .req d15
ENTRY(crc_t10dif_pmull)
vmov.i8 qzr, #0 // init zero register
// adjust the 16-bit initial_crc value, scale it to 32 bits
lsl arg1_low32, arg1_low32, #16
// check if smaller than 256
cmp arg3, #256
// for sizes less than 128, we can't fold 64B at a time...
blt _less_than_128
// load the initial crc value
// crc value does not need to be byte-reflected, but it needs
// to be moved to the high part of the register.
// because data will be byte-reflected and will align with
// initial crc at correct place.
vmov s0, arg1_low32 // initial crc
vext.8 q10, qzr, q0, #4
// receive the initial 64B data, xor the initial crc value
vld1.64 {q0-q1}, [arg2]!
vld1.64 {q2-q3}, [arg2]!
vld1.64 {q4-q5}, [arg2]!
vld1.64 {q6-q7}, [arg2]!
CPU_LE( vrev64.8 q0, q0 )
CPU_LE( vrev64.8 q1, q1 )
CPU_LE( vrev64.8 q2, q2 )
CPU_LE( vrev64.8 q3, q3 )
CPU_LE( vrev64.8 q4, q4 )
CPU_LE( vrev64.8 q5, q5 )
CPU_LE( vrev64.8 q6, q6 )
CPU_LE( vrev64.8 q7, q7 )
vswp d0, d1
vswp d2, d3
vswp d4, d5
vswp d6, d7
vswp d8, d9
vswp d10, d11
vswp d12, d13
vswp d14, d15
// XOR the initial_crc value
veor.8 q0, q0, q10
adr ip, rk3
vld1.64 {q10}, [ip, :128] // xmm10 has rk3 and rk4
//
// we subtract 256 instead of 128 to save one instruction from the loop
//
sub arg3, arg3, #256
// at this section of the code, there is 64*x+y (0<=y<64) bytes of
// buffer. The _fold_64_B_loop will fold 64B at a time
// until we have 64+y Bytes of buffer
// fold 64B at a time. This section of the code folds 4 vector
// registers in parallel
_fold_64_B_loop:
.macro fold64, reg1, reg2
vld1.64 {q11-q12}, [arg2]!
vmull.p64 q8, \reg1\()h, d21
vmull.p64 \reg1, \reg1\()l, d20
vmull.p64 q9, \reg2\()h, d21
vmull.p64 \reg2, \reg2\()l, d20
CPU_LE( vrev64.8 q11, q11 )
CPU_LE( vrev64.8 q12, q12 )
vswp d22, d23
vswp d24, d25
veor.8 \reg1, \reg1, q8
veor.8 \reg2, \reg2, q9
veor.8 \reg1, \reg1, q11
veor.8 \reg2, \reg2, q12
.endm
fold64 q0, q1
fold64 q2, q3
fold64 q4, q5
fold64 q6, q7
subs arg3, arg3, #128
// check if there is another 64B in the buffer to be able to fold
bge _fold_64_B_loop
// at this point, the buffer pointer is pointing at the last y Bytes
// of the buffer the 64B of folded data is in 4 of the vector
// registers: v0, v1, v2, v3
// fold the 8 vector registers to 1 vector register with different
// constants
adr ip, rk9
vld1.64 {q10}, [ip, :128]!
.macro fold16, reg, rk
vmull.p64 q8, \reg\()l, d20
vmull.p64 \reg, \reg\()h, d21
.ifnb \rk
vld1.64 {q10}, [ip, :128]!
.endif
veor.8 q7, q7, q8
veor.8 q7, q7, \reg
.endm
fold16 q0, rk11
fold16 q1, rk13
fold16 q2, rk15
fold16 q3, rk17
fold16 q4, rk19
fold16 q5, rk1
fold16 q6
// instead of 64, we add 48 to the loop counter to save 1 instruction
// from the loop instead of a cmp instruction, we use the negative
// flag with the jl instruction
adds arg3, arg3, #(128-16)
blt _final_reduction_for_128
// now we have 16+y bytes left to reduce. 16 Bytes is in register v7
// and the rest is in memory. We can fold 16 bytes at a time if y>=16
// continue folding 16B at a time
_16B_reduction_loop:
vmull.p64 q8, d14, d20
vmull.p64 q7, d15, d21
veor.8 q7, q7, q8
vld1.64 {q0}, [arg2]!
CPU_LE( vrev64.8 q0, q0 )
vswp d0, d1
veor.8 q7, q7, q0
subs arg3, arg3, #16
// instead of a cmp instruction, we utilize the flags with the
// jge instruction equivalent of: cmp arg3, 16-16
// check if there is any more 16B in the buffer to be able to fold
bge _16B_reduction_loop
// now we have 16+z bytes left to reduce, where 0<= z < 16.
// first, we reduce the data in the xmm7 register
_final_reduction_for_128:
// check if any more data to fold. If not, compute the CRC of
// the final 128 bits
adds arg3, arg3, #16
beq _128_done
// here we are getting data that is less than 16 bytes.
// since we know that there was data before the pointer, we can
// offset the input pointer before the actual point, to receive
// exactly 16 bytes. after that the registers need to be adjusted.
_get_last_two_regs:
add arg2, arg2, arg3
sub arg2, arg2, #16
vld1.64 {q1}, [arg2]
CPU_LE( vrev64.8 q1, q1 )
vswp d2, d3
// get rid of the extra data that was loaded before
// load the shift constant
adr ip, tbl_shf_table + 16
sub ip, ip, arg3
vld1.8 {q0}, [ip]
// shift v2 to the left by arg3 bytes
vtbl.8 d4, {d14-d15}, d0
vtbl.8 d5, {d14-d15}, d1
// shift v7 to the right by 16-arg3 bytes
vmov.i8 q9, #0x80
veor.8 q0, q0, q9
vtbl.8 d18, {d14-d15}, d0
vtbl.8 d19, {d14-d15}, d1
// blend
vshr.s8 q0, q0, #7 // convert to 8-bit mask
vbsl.8 q0, q2, q1
// fold 16 Bytes
vmull.p64 q8, d18, d20
vmull.p64 q7, d19, d21
veor.8 q7, q7, q8
veor.8 q7, q7, q0
_128_done:
// compute crc of a 128-bit value
vldr d20, rk5
vldr d21, rk6 // rk5 and rk6 in xmm10
// 64b fold
vext.8 q0, qzr, q7, #8
vmull.p64 q7, d15, d20
veor.8 q7, q7, q0
// 32b fold
vext.8 q0, q7, qzr, #12
vmov s31, s3
vmull.p64 q0, d0, d21
veor.8 q7, q0, q7
// barrett reduction
_barrett:
vldr d20, rk7
vldr d21, rk8
vmull.p64 q0, d15, d20
vext.8 q0, qzr, q0, #12
vmull.p64 q0, d1, d21
vext.8 q0, qzr, q0, #12
veor.8 q7, q7, q0
vmov r0, s29
_cleanup:
// scale the result back to 16 bits
lsr r0, r0, #16
bx lr
_less_than_128:
teq arg3, #0
beq _cleanup
vmov.i8 q0, #0
vmov s3, arg1_low32 // get the initial crc value
vld1.64 {q7}, [arg2]!
CPU_LE( vrev64.8 q7, q7 )
vswp d14, d15
veor.8 q7, q7, q0
cmp arg3, #16
beq _128_done // exactly 16 left
blt _less_than_16_left
// now if there is, load the constants
vldr d20, rk1
vldr d21, rk2 // rk1 and rk2 in xmm10
// check if there is enough buffer to be able to fold 16B at a time
subs arg3, arg3, #32
addlt arg3, arg3, #16
blt _get_last_two_regs
b _16B_reduction_loop
_less_than_16_left:
// shl r9, 4
adr ip, tbl_shf_table + 16
sub ip, ip, arg3
vld1.8 {q0}, [ip]
vmov.i8 q9, #0x80
veor.8 q0, q0, q9
vtbl.8 d18, {d14-d15}, d0
vtbl.8 d15, {d14-d15}, d1
vmov d14, d18
b _128_done
ENDPROC(crc_t10dif_pmull)
// precomputed constants
// these constants are precomputed from the poly:
// 0x8bb70000 (0x8bb7 scaled to 32 bits)
.align 4
// Q = 0x18BB70000
// rk1 = 2^(32*3) mod Q << 32
// rk2 = 2^(32*5) mod Q << 32
// rk3 = 2^(32*15) mod Q << 32
// rk4 = 2^(32*17) mod Q << 32
// rk5 = 2^(32*3) mod Q << 32
// rk6 = 2^(32*2) mod Q << 32
// rk7 = floor(2^64/Q)
// rk8 = Q
rk3: .quad 0x9d9d000000000000
rk4: .quad 0x7cf5000000000000
rk5: .quad 0x2d56000000000000
rk6: .quad 0x1368000000000000
rk7: .quad 0x00000001f65a57f8
rk8: .quad 0x000000018bb70000
rk9: .quad 0xceae000000000000
rk10: .quad 0xbfd6000000000000
rk11: .quad 0x1e16000000000000
rk12: .quad 0x713c000000000000
rk13: .quad 0xf7f9000000000000
rk14: .quad 0x80a6000000000000
rk15: .quad 0x044c000000000000
rk16: .quad 0xe658000000000000
rk17: .quad 0xad18000000000000
rk18: .quad 0xa497000000000000
rk19: .quad 0x6ee3000000000000
rk20: .quad 0xe7b5000000000000
rk1: .quad 0x2d56000000000000
rk2: .quad 0x06df000000000000
tbl_shf_table:
// use these values for shift constants for the tbl/tbx instruction
// different alignments result in values as shown:
// DDQ 0x008f8e8d8c8b8a898887868584838281 # shl 15 (16-1) / shr1
// DDQ 0x01008f8e8d8c8b8a8988878685848382 # shl 14 (16-3) / shr2
// DDQ 0x0201008f8e8d8c8b8a89888786858483 # shl 13 (16-4) / shr3
// DDQ 0x030201008f8e8d8c8b8a898887868584 # shl 12 (16-4) / shr4
// DDQ 0x04030201008f8e8d8c8b8a8988878685 # shl 11 (16-5) / shr5
// DDQ 0x0504030201008f8e8d8c8b8a89888786 # shl 10 (16-6) / shr6
// DDQ 0x060504030201008f8e8d8c8b8a898887 # shl 9 (16-7) / shr7
// DDQ 0x07060504030201008f8e8d8c8b8a8988 # shl 8 (16-8) / shr8
// DDQ 0x0807060504030201008f8e8d8c8b8a89 # shl 7 (16-9) / shr9
// DDQ 0x090807060504030201008f8e8d8c8b8a # shl 6 (16-10) / shr10
// DDQ 0x0a090807060504030201008f8e8d8c8b # shl 5 (16-11) / shr11
// DDQ 0x0b0a090807060504030201008f8e8d8c # shl 4 (16-12) / shr12
// DDQ 0x0c0b0a090807060504030201008f8e8d # shl 3 (16-13) / shr13
// DDQ 0x0d0c0b0a090807060504030201008f8e # shl 2 (16-14) / shr14
// DDQ 0x0e0d0c0b0a090807060504030201008f # shl 1 (16-15) / shr15
.byte 0x0, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87
.byte 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f
.byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7
.byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe , 0x0
|
AirFortressIlikara/LS2K0300-linux-4.19
| 5,685
|
arch/arm/crypto/aes-cipher-core.S
|
/*
* Scalar AES core transform
*
* Copyright (C) 2017 Linaro Ltd.
* Author: Ard Biesheuvel <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/cache.h>
.text
.align 5
rk .req r0
rounds .req r1
in .req r2
out .req r3
ttab .req ip
t0 .req lr
t1 .req r2
t2 .req r3
.macro __select, out, in, idx
.if __LINUX_ARM_ARCH__ < 7
and \out, \in, #0xff << (8 * \idx)
.else
ubfx \out, \in, #(8 * \idx), #8
.endif
.endm
.macro __load, out, in, idx, sz, op
.if __LINUX_ARM_ARCH__ < 7 && \idx > 0
ldr\op \out, [ttab, \in, lsr #(8 * \idx) - \sz]
.else
ldr\op \out, [ttab, \in, lsl #\sz]
.endif
.endm
.macro __hround, out0, out1, in0, in1, in2, in3, t3, t4, enc, sz, op
__select \out0, \in0, 0
__select t0, \in1, 1
__load \out0, \out0, 0, \sz, \op
__load t0, t0, 1, \sz, \op
.if \enc
__select \out1, \in1, 0
__select t1, \in2, 1
.else
__select \out1, \in3, 0
__select t1, \in0, 1
.endif
__load \out1, \out1, 0, \sz, \op
__select t2, \in2, 2
__load t1, t1, 1, \sz, \op
__load t2, t2, 2, \sz, \op
eor \out0, \out0, t0, ror #24
__select t0, \in3, 3
.if \enc
__select \t3, \in3, 2
__select \t4, \in0, 3
.else
__select \t3, \in1, 2
__select \t4, \in2, 3
.endif
__load \t3, \t3, 2, \sz, \op
__load t0, t0, 3, \sz, \op
__load \t4, \t4, 3, \sz, \op
eor \out1, \out1, t1, ror #24
eor \out0, \out0, t2, ror #16
ldm rk!, {t1, t2}
eor \out1, \out1, \t3, ror #16
eor \out0, \out0, t0, ror #8
eor \out1, \out1, \t4, ror #8
eor \out0, \out0, t1
eor \out1, \out1, t2
.endm
.macro fround, out0, out1, out2, out3, in0, in1, in2, in3, sz=2, op
__hround \out0, \out1, \in0, \in1, \in2, \in3, \out2, \out3, 1, \sz, \op
__hround \out2, \out3, \in2, \in3, \in0, \in1, \in1, \in2, 1, \sz, \op
.endm
.macro iround, out0, out1, out2, out3, in0, in1, in2, in3, sz=2, op
__hround \out0, \out1, \in0, \in3, \in2, \in1, \out2, \out3, 0, \sz, \op
__hround \out2, \out3, \in2, \in1, \in0, \in3, \in1, \in0, 0, \sz, \op
.endm
.macro __rev, out, in
.if __LINUX_ARM_ARCH__ < 6
lsl t0, \in, #24
and t1, \in, #0xff00
and t2, \in, #0xff0000
orr \out, t0, \in, lsr #24
orr \out, \out, t1, lsl #8
orr \out, \out, t2, lsr #8
.else
rev \out, \in
.endif
.endm
.macro __adrl, out, sym, c
.if __LINUX_ARM_ARCH__ < 7
ldr\c \out, =\sym
.else
movw\c \out, #:lower16:\sym
movt\c \out, #:upper16:\sym
.endif
.endm
.macro do_crypt, round, ttab, ltab, bsz
push {r3-r11, lr}
ldr r4, [in]
ldr r5, [in, #4]
ldr r6, [in, #8]
ldr r7, [in, #12]
ldm rk!, {r8-r11}
#ifdef CONFIG_CPU_BIG_ENDIAN
__rev r4, r4
__rev r5, r5
__rev r6, r6
__rev r7, r7
#endif
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
eor r7, r7, r11
__adrl ttab, \ttab
tst rounds, #2
bne 1f
0: \round r8, r9, r10, r11, r4, r5, r6, r7
\round r4, r5, r6, r7, r8, r9, r10, r11
1: subs rounds, rounds, #4
\round r8, r9, r10, r11, r4, r5, r6, r7
bls 2f
\round r4, r5, r6, r7, r8, r9, r10, r11
b 0b
2: __adrl ttab, \ltab
\round r4, r5, r6, r7, r8, r9, r10, r11, \bsz, b
#ifdef CONFIG_CPU_BIG_ENDIAN
__rev r4, r4
__rev r5, r5
__rev r6, r6
__rev r7, r7
#endif
ldr out, [sp]
str r4, [out]
str r5, [out, #4]
str r6, [out, #8]
str r7, [out, #12]
pop {r3-r11, pc}
.align 3
.ltorg
.endm
ENTRY(__aes_arm_encrypt)
do_crypt fround, crypto_ft_tab, crypto_ft_tab + 1, 2
ENDPROC(__aes_arm_encrypt)
.align 5
ENTRY(__aes_arm_decrypt)
do_crypt iround, crypto_it_tab, __aes_arm_inverse_sbox, 0
ENDPROC(__aes_arm_decrypt)
.section ".rodata", "a"
.align L1_CACHE_SHIFT
.type __aes_arm_inverse_sbox, %object
__aes_arm_inverse_sbox:
.byte 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
.byte 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
.byte 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
.byte 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
.byte 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
.byte 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
.byte 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
.byte 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
.byte 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
.byte 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
.byte 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
.byte 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
.byte 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
.byte 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
.byte 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
.byte 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
.byte 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
.byte 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
.byte 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
.byte 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
.byte 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
.byte 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
.byte 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
.byte 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
.byte 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
.byte 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
.byte 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
.byte 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
.byte 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
.byte 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
.byte 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
.byte 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
.size __aes_arm_inverse_sbox, . - __aes_arm_inverse_sbox
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,458
|
arch/arm/vfp/entry.S
|
/*
* linux/arch/arm/vfp/entry.S
*
* Copyright (C) 2004 ARM Limited.
* Written by Deep Blue Solutions Limited.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/thread_info.h>
#include <asm/vfpmacros.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
@ VFP entry point.
@
@ r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
@ r2 = PC value to resume execution after successful emulation
@ r9 = normal "successful" return address
@ r10 = this threads thread_info structure
@ lr = unrecognised instruction return address
@ IRQs enabled.
@
ENTRY(do_vfp)
inc_preempt_count r10, r4
ldr r4, .LCvfp
ldr r11, [r10, #TI_CPU] @ CPU number
add r10, r10, #TI_VFPSTATE @ r10 = workspace
ldr pc, [r4] @ call VFP entry point
ENDPROC(do_vfp)
ENTRY(vfp_null_entry)
dec_preempt_count_ti r10, r4
ret lr
ENDPROC(vfp_null_entry)
.align 2
.LCvfp:
.word vfp_vector
@ This code is called if the VFP does not exist. It needs to flag the
@ failure to the VFP initialisation code.
__INIT
ENTRY(vfp_testing_entry)
dec_preempt_count_ti r10, r4
ldr r0, VFP_arch_address
str r0, [r0] @ set to non-zero value
ret r9 @ we have handled the fault
ENDPROC(vfp_testing_entry)
.align 2
VFP_arch_address:
.word VFP_arch
__FINIT
|
AirFortressIlikara/LS2K0300-linux-4.19
| 8,906
|
arch/arm/vfp/vfphw.S
|
/*
* linux/arch/arm/vfp/vfphw.S
*
* Copyright (C) 2004 ARM Limited.
* Written by Deep Blue Solutions Limited.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This code is called from the kernel's undefined instruction trap.
* r9 holds the return address for successful handling.
* lr holds the return address for unrecognised instructions.
* r10 points at the start of the private FP workspace in the thread structure
* sp points to a struct pt_regs (as defined in include/asm/proc/ptrace.h)
*/
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/thread_info.h>
#include <asm/vfpmacros.h>
#include <linux/kern_levels.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
.macro DBGSTR, str
#ifdef DEBUG
stmfd sp!, {r0-r3, ip, lr}
ldr r0, =1f
bl printk
ldmfd sp!, {r0-r3, ip, lr}
.pushsection .rodata, "a"
1: .ascii KERN_DEBUG "VFP: \str\n"
.byte 0
.previous
#endif
.endm
.macro DBGSTR1, str, arg
#ifdef DEBUG
stmfd sp!, {r0-r3, ip, lr}
mov r1, \arg
ldr r0, =1f
bl printk
ldmfd sp!, {r0-r3, ip, lr}
.pushsection .rodata, "a"
1: .ascii KERN_DEBUG "VFP: \str\n"
.byte 0
.previous
#endif
.endm
.macro DBGSTR3, str, arg1, arg2, arg3
#ifdef DEBUG
stmfd sp!, {r0-r3, ip, lr}
mov r3, \arg3
mov r2, \arg2
mov r1, \arg1
ldr r0, =1f
bl printk
ldmfd sp!, {r0-r3, ip, lr}
.pushsection .rodata, "a"
1: .ascii KERN_DEBUG "VFP: \str\n"
.byte 0
.previous
#endif
.endm
@ VFP hardware support entry point.
@
@ r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
@ r2 = PC value to resume execution after successful emulation
@ r9 = normal "successful" return address
@ r10 = vfp_state union
@ r11 = CPU number
@ lr = unrecognised instruction return address
@ IRQs enabled.
ENTRY(vfp_support_entry)
DBGSTR3 "instr %08x pc %08x state %p", r0, r2, r10
ldr r3, [sp, #S_PSR] @ Neither lazy restore nor FP exceptions
and r3, r3, #MODE_MASK @ are supported in kernel mode
teq r3, #USR_MODE
bne vfp_kmode_exception @ Returns through lr
VFPFMRX r1, FPEXC @ Is the VFP enabled?
DBGSTR1 "fpexc %08x", r1
tst r1, #FPEXC_EN
bne look_for_VFP_exceptions @ VFP is already enabled
DBGSTR1 "enable %x", r10
ldr r3, vfp_current_hw_state_address
orr r1, r1, #FPEXC_EN @ user FPEXC has the enable bit set
ldr r4, [r3, r11, lsl #2] @ vfp_current_hw_state pointer
bic r5, r1, #FPEXC_EX @ make sure exceptions are disabled
cmp r4, r10 @ this thread owns the hw context?
#ifndef CONFIG_SMP
@ For UP, checking that this thread owns the hw context is
@ sufficient to determine that the hardware state is valid.
beq vfp_hw_state_valid
@ On UP, we lazily save the VFP context. As a different
@ thread wants ownership of the VFP hardware, save the old
@ state if there was a previous (valid) owner.
VFPFMXR FPEXC, r5 @ enable VFP, disable any pending
@ exceptions, so we can get at the
@ rest of it
DBGSTR1 "save old state %p", r4
cmp r4, #0 @ if the vfp_current_hw_state is NULL
beq vfp_reload_hw @ then the hw state needs reloading
VFPFSTMIA r4, r5 @ save the working registers
VFPFMRX r5, FPSCR @ current status
#ifndef CONFIG_CPU_FEROCEON
tst r1, #FPEXC_EX @ is there additional state to save?
beq 1f
VFPFMRX r6, FPINST @ FPINST (only if FPEXC.EX is set)
tst r1, #FPEXC_FP2V @ is there an FPINST2 to read?
beq 1f
VFPFMRX r8, FPINST2 @ FPINST2 if needed (and present)
1:
#endif
stmia r4, {r1, r5, r6, r8} @ save FPEXC, FPSCR, FPINST, FPINST2
vfp_reload_hw:
#else
@ For SMP, if this thread does not own the hw context, then we
@ need to reload it. No need to save the old state as on SMP,
@ we always save the state when we switch away from a thread.
bne vfp_reload_hw
@ This thread has ownership of the current hardware context.
@ However, it may have been migrated to another CPU, in which
@ case the saved state is newer than the hardware context.
@ Check this by looking at the CPU number which the state was
@ last loaded onto.
ldr ip, [r10, #VFP_CPU]
teq ip, r11
beq vfp_hw_state_valid
vfp_reload_hw:
@ We're loading this threads state into the VFP hardware. Update
@ the CPU number which contains the most up to date VFP context.
str r11, [r10, #VFP_CPU]
VFPFMXR FPEXC, r5 @ enable VFP, disable any pending
@ exceptions, so we can get at the
@ rest of it
#endif
DBGSTR1 "load state %p", r10
str r10, [r3, r11, lsl #2] @ update the vfp_current_hw_state pointer
@ Load the saved state back into the VFP
VFPFLDMIA r10, r5 @ reload the working registers while
@ FPEXC is in a safe state
ldmia r10, {r1, r5, r6, r8} @ load FPEXC, FPSCR, FPINST, FPINST2
#ifndef CONFIG_CPU_FEROCEON
tst r1, #FPEXC_EX @ is there additional state to restore?
beq 1f
VFPFMXR FPINST, r6 @ restore FPINST (only if FPEXC.EX is set)
tst r1, #FPEXC_FP2V @ is there an FPINST2 to write?
beq 1f
VFPFMXR FPINST2, r8 @ FPINST2 if needed (and present)
1:
#endif
VFPFMXR FPSCR, r5 @ restore status
@ The context stored in the VFP hardware is up to date with this thread
vfp_hw_state_valid:
tst r1, #FPEXC_EX
bne process_exception @ might as well handle the pending
@ exception before retrying branch
@ out before setting an FPEXC that
@ stops us reading stuff
VFPFMXR FPEXC, r1 @ Restore FPEXC last
sub r2, r2, #4 @ Retry current instruction - if Thumb
str r2, [sp, #S_PC] @ mode it's two 16-bit instructions,
@ else it's one 32-bit instruction, so
@ always subtract 4 from the following
@ instruction address.
dec_preempt_count_ti r10, r4
ret r9 @ we think we have handled things
look_for_VFP_exceptions:
@ Check for synchronous or asynchronous exception
tst r1, #FPEXC_EX | FPEXC_DEX
bne process_exception
@ On some implementations of the VFP subarch 1, setting FPSCR.IXE
@ causes all the CDP instructions to be bounced synchronously without
@ setting the FPEXC.EX bit
VFPFMRX r5, FPSCR
tst r5, #FPSCR_IXE
bne process_exception
tst r5, #FPSCR_LENGTH_MASK
beq skip
orr r1, r1, #FPEXC_DEX
b process_exception
skip:
@ Fall into hand on to next handler - appropriate coproc instr
@ not recognised by VFP
DBGSTR "not VFP"
dec_preempt_count_ti r10, r4
ret lr
process_exception:
DBGSTR "bounce"
mov r2, sp @ nothing stacked - regdump is at TOS
mov lr, r9 @ setup for a return to the user code.
@ Now call the C code to package up the bounce to the support code
@ r0 holds the trigger instruction
@ r1 holds the FPEXC value
@ r2 pointer to register dump
b VFP_bounce @ we have handled this - the support
@ code will raise an exception if
@ required. If not, the user code will
@ retry the faulted instruction
ENDPROC(vfp_support_entry)
ENTRY(vfp_save_state)
@ Save the current VFP state
@ r0 - save location
@ r1 - FPEXC
DBGSTR1 "save VFP state %p", r0
VFPFSTMIA r0, r2 @ save the working registers
VFPFMRX r2, FPSCR @ current status
tst r1, #FPEXC_EX @ is there additional state to save?
beq 1f
VFPFMRX r3, FPINST @ FPINST (only if FPEXC.EX is set)
tst r1, #FPEXC_FP2V @ is there an FPINST2 to read?
beq 1f
VFPFMRX r12, FPINST2 @ FPINST2 if needed (and present)
1:
stmia r0, {r1, r2, r3, r12} @ save FPEXC, FPSCR, FPINST, FPINST2
ret lr
ENDPROC(vfp_save_state)
.align
vfp_current_hw_state_address:
.word vfp_current_hw_state
.macro tbl_branch, base, tmp, shift
#ifdef CONFIG_THUMB2_KERNEL
adr \tmp, 1f
add \tmp, \tmp, \base, lsl \shift
ret \tmp
#else
add pc, pc, \base, lsl \shift
mov r0, r0
#endif
1:
.endm
ENTRY(vfp_get_float)
tbl_branch r0, r3, #3
.irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
1: mrc p10, 0, r0, c\dr, c0, 0 @ fmrs r0, s0
ret lr
.org 1b + 8
1: mrc p10, 0, r0, c\dr, c0, 4 @ fmrs r0, s1
ret lr
.org 1b + 8
.endr
ENDPROC(vfp_get_float)
ENTRY(vfp_put_float)
tbl_branch r1, r3, #3
.irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
1: mcr p10, 0, r0, c\dr, c0, 0 @ fmsr r0, s0
ret lr
.org 1b + 8
1: mcr p10, 0, r0, c\dr, c0, 4 @ fmsr r0, s1
ret lr
.org 1b + 8
.endr
ENDPROC(vfp_put_float)
ENTRY(vfp_get_double)
tbl_branch r0, r3, #3
.irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
1: fmrrd r0, r1, d\dr
ret lr
.org 1b + 8
.endr
#ifdef CONFIG_VFPv3
@ d16 - d31 registers
.irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
1: mrrc p11, 3, r0, r1, c\dr @ fmrrd r0, r1, d\dr
ret lr
.org 1b + 8
.endr
#endif
@ virtual register 16 (or 32 if VFPv3) for compare with zero
mov r0, #0
mov r1, #0
ret lr
ENDPROC(vfp_get_double)
ENTRY(vfp_put_double)
tbl_branch r2, r3, #3
.irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
1: fmdrr d\dr, r0, r1
ret lr
.org 1b + 8
.endr
#ifdef CONFIG_VFPv3
@ d16 - d31 registers
.irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
1: mcrr p11, 3, r0, r1, c\dr @ fmdrr r0, r1, d\dr
ret lr
.org 1b + 8
.endr
#endif
ENDPROC(vfp_put_double)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,118
|
arch/arm/mach-spear/headsmp.S
|
/*
* arch/arm/mach-spear13XX/headsmp.S
*
* Picked from realview
* Copyright (c) 2012 ST Microelectronics Limited
* Shiraz Hashim <shiraz.linux.kernel@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <linux/init.h>
__INIT
/*
* spear13xx specific entry point for secondary CPUs. This provides
* a "holding pen" into which all secondary cores are held until we're
* ready for them to initialise.
*/
ENTRY(spear13xx_secondary_startup)
mrc p15, 0, r0, c0, c0, 5
and r0, r0, #15
adr r4, 1f
ldmia r4, {r5, r6}
sub r4, r4, r5
add r6, r6, r4
pen: ldr r7, [r6]
cmp r7, r0
bne pen
/* re-enable coherency */
mrc p15, 0, r0, c1, c0, 1
orr r0, r0, #(1 << 6) | (1 << 0)
mcr p15, 0, r0, c1, c0, 1
/*
* we've been released from the holding pen: secondary_stack
* should now contain the SVC stack for this core
*/
b secondary_startup
.align
1: .long .
.long pen_release
ENDPROC(spear13xx_secondary_startup)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 4,959
|
arch/arm/mach-lpc32xx/suspend.S
|
/*
* arch/arm/mach-lpc32xx/suspend.S
*
* Original authors: Dmitry Chigirev, Vitaly Wool <source@mvista.com>
* Modified by Kevin Wells <kevin.wells@nxp.com>
*
* 2005 (c) MontaVista Software, Inc. This file is licensed under
* the terms of the GNU General Public License version 2. This program
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <mach/platform.h>
#include <mach/hardware.h>
/* Using named register defines makes the code easier to follow */
#define WORK1_REG r0
#define WORK2_REG r1
#define SAVED_HCLK_DIV_REG r2
#define SAVED_HCLK_PLL_REG r3
#define SAVED_DRAM_CLKCTRL_REG r4
#define SAVED_PWR_CTRL_REG r5
#define CLKPWRBASE_REG r6
#define EMCBASE_REG r7
#define LPC32XX_EMC_STATUS_OFFS 0x04
#define LPC32XX_EMC_STATUS_BUSY 0x1
#define LPC32XX_EMC_STATUS_SELF_RFSH 0x4
#define LPC32XX_CLKPWR_PWR_CTRL_OFFS 0x44
#define LPC32XX_CLKPWR_HCLK_DIV_OFFS 0x40
#define LPC32XX_CLKPWR_HCLKPLL_CTRL_OFFS 0x58
#define CLKPWR_PCLK_DIV_MASK 0xFFFFFE7F
.text
ENTRY(lpc32xx_sys_suspend)
@ Save a copy of the used registers in IRAM, r0 is corrupted
adr r0, tmp_stack_end
stmfd r0!, {r3 - r7, sp, lr}
@ Load a few common register addresses
adr WORK1_REG, reg_bases
ldr CLKPWRBASE_REG, [WORK1_REG, #0]
ldr EMCBASE_REG, [WORK1_REG, #4]
ldr SAVED_PWR_CTRL_REG, [CLKPWRBASE_REG,\
#LPC32XX_CLKPWR_PWR_CTRL_OFFS]
orr WORK1_REG, SAVED_PWR_CTRL_REG, #LPC32XX_CLKPWR_SDRAM_SELF_RFSH
@ Wait for SDRAM busy status to go busy and then idle
@ This guarantees a small windows where DRAM isn't busy
1:
ldr WORK2_REG, [EMCBASE_REG, #LPC32XX_EMC_STATUS_OFFS]
and WORK2_REG, WORK2_REG, #LPC32XX_EMC_STATUS_BUSY
cmp WORK2_REG, #LPC32XX_EMC_STATUS_BUSY
bne 1b @ Branch while idle
2:
ldr WORK2_REG, [EMCBASE_REG, #LPC32XX_EMC_STATUS_OFFS]
and WORK2_REG, WORK2_REG, #LPC32XX_EMC_STATUS_BUSY
cmp WORK2_REG, #LPC32XX_EMC_STATUS_BUSY
beq 2b @ Branch until idle
@ Setup self-refresh with support for manual exit of
@ self-refresh mode
str WORK1_REG, [CLKPWRBASE_REG, #LPC32XX_CLKPWR_PWR_CTRL_OFFS]
orr WORK2_REG, WORK1_REG, #LPC32XX_CLKPWR_UPD_SDRAM_SELF_RFSH
str WORK2_REG, [CLKPWRBASE_REG, #LPC32XX_CLKPWR_PWR_CTRL_OFFS]
str WORK1_REG, [CLKPWRBASE_REG, #LPC32XX_CLKPWR_PWR_CTRL_OFFS]
@ Wait for self-refresh acknowledge, clocks to the DRAM device
@ will automatically stop on start of self-refresh
3:
ldr WORK2_REG, [EMCBASE_REG, #LPC32XX_EMC_STATUS_OFFS]
and WORK2_REG, WORK2_REG, #LPC32XX_EMC_STATUS_SELF_RFSH
cmp WORK2_REG, #LPC32XX_EMC_STATUS_SELF_RFSH
bne 3b @ Branch until self-refresh mode starts
@ Enter direct-run mode from run mode
bic WORK1_REG, WORK1_REG, #LPC32XX_CLKPWR_SELECT_RUN_MODE
str WORK1_REG, [CLKPWRBASE_REG, #LPC32XX_CLKPWR_PWR_CTRL_OFFS]
@ Safe disable of DRAM clock in EMC block, prevents DDR sync
@ issues on restart
ldr SAVED_HCLK_DIV_REG, [CLKPWRBASE_REG,\
#LPC32XX_CLKPWR_HCLK_DIV_OFFS]
and WORK2_REG, SAVED_HCLK_DIV_REG, #CLKPWR_PCLK_DIV_MASK
str WORK2_REG, [CLKPWRBASE_REG, #LPC32XX_CLKPWR_HCLK_DIV_OFFS]
@ Save HCLK PLL state and disable HCLK PLL
ldr SAVED_HCLK_PLL_REG, [CLKPWRBASE_REG,\
#LPC32XX_CLKPWR_HCLKPLL_CTRL_OFFS]
bic WORK2_REG, SAVED_HCLK_PLL_REG, #LPC32XX_CLKPWR_HCLKPLL_POWER_UP
str WORK2_REG, [CLKPWRBASE_REG, #LPC32XX_CLKPWR_HCLKPLL_CTRL_OFFS]
@ Enter stop mode until an enabled event occurs
orr WORK1_REG, WORK1_REG, #LPC32XX_CLKPWR_STOP_MODE_CTRL
str WORK1_REG, [CLKPWRBASE_REG, #LPC32XX_CLKPWR_PWR_CTRL_OFFS]
.rept 9
nop
.endr
@ Clear stop status
bic WORK1_REG, WORK1_REG, #LPC32XX_CLKPWR_STOP_MODE_CTRL
@ Restore original HCLK PLL value and wait for PLL lock
str SAVED_HCLK_PLL_REG, [CLKPWRBASE_REG,\
#LPC32XX_CLKPWR_HCLKPLL_CTRL_OFFS]
4:
ldr WORK2_REG, [CLKPWRBASE_REG, #LPC32XX_CLKPWR_HCLKPLL_CTRL_OFFS]
and WORK2_REG, WORK2_REG, #LPC32XX_CLKPWR_HCLKPLL_PLL_STS
bne 4b
@ Re-enter run mode with self-refresh flag cleared, but no DRAM
@ update yet. DRAM is still in self-refresh
str SAVED_PWR_CTRL_REG, [CLKPWRBASE_REG,\
#LPC32XX_CLKPWR_PWR_CTRL_OFFS]
@ Restore original DRAM clock mode to restore DRAM clocks
str SAVED_HCLK_DIV_REG, [CLKPWRBASE_REG,\
#LPC32XX_CLKPWR_HCLK_DIV_OFFS]
@ Clear self-refresh mode
orr WORK1_REG, SAVED_PWR_CTRL_REG,\
#LPC32XX_CLKPWR_UPD_SDRAM_SELF_RFSH
str WORK1_REG, [CLKPWRBASE_REG, #LPC32XX_CLKPWR_PWR_CTRL_OFFS]
str SAVED_PWR_CTRL_REG, [CLKPWRBASE_REG,\
#LPC32XX_CLKPWR_PWR_CTRL_OFFS]
@ Wait for EMC to clear self-refresh mode
5:
ldr WORK2_REG, [EMCBASE_REG, #LPC32XX_EMC_STATUS_OFFS]
and WORK2_REG, WORK2_REG, #LPC32XX_EMC_STATUS_SELF_RFSH
bne 5b @ Branch until self-refresh has exited
@ restore regs and return
adr r0, tmp_stack
ldmfd r0!, {r3 - r7, sp, pc}
reg_bases:
.long IO_ADDRESS(LPC32XX_CLK_PM_BASE)
.long IO_ADDRESS(LPC32XX_EMC_BASE)
tmp_stack:
.long 0, 0, 0, 0, 0, 0, 0
tmp_stack_end:
ENTRY(lpc32xx_sys_suspend_sz)
.word . - lpc32xx_sys_suspend
|
AirFortressIlikara/LS2K0300-linux-4.19
| 4,615
|
arch/arm/nwfpe/entry.S
|
/*
NetWinder Floating Point Emulator
(c) Rebel.COM, 1998
(c) 1998, 1999 Philip Blundell
Direct questions, comments to Scott Bambrough <scottb@netwinder.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <asm/assembler.h>
#include <asm/opcodes.h>
/* This is the kernel's entry point into the floating point emulator.
It is called from the kernel with code similar to this:
sub r4, r5, #4
ldrt r0, [r4] @ r0 = instruction
adrsvc al, r9, ret_from_exception @ r9 = normal FP return
adrsvc al, lr, fpundefinstr @ lr = undefined instr return
get_current_task r10
mov r8, #1
strb r8, [r10, #TSK_USED_MATH] @ set current->used_math
add r10, r10, #TSS_FPESAVE @ r10 = workspace
ldr r4, .LC2
ldr pc, [r4] @ Call FP emulator entry point
The kernel expects the emulator to return via one of two possible
points of return it passes to the emulator. The emulator, if
successful in its emulation, jumps to ret_from_exception (passed in
r9) and the kernel takes care of returning control from the trap to
the user code. If the emulator is unable to emulate the instruction,
it returns via _fpundefinstr (passed via lr) and the kernel halts the
user program with a core dump.
On entry to the emulator r10 points to an area of private FP workspace
reserved in the thread structure for this process. This is where the
emulator saves its registers across calls. The first word of this area
is used as a flag to detect the first time a process uses floating point,
so that the emulator startup cost can be avoided for tasks that don't
want it.
This routine does three things:
1) The kernel has created a struct pt_regs on the stack and saved the
user registers into it. See /usr/include/asm/proc/ptrace.h for details.
2) It calls EmulateAll to emulate a floating point instruction.
EmulateAll returns 1 if the emulation was successful, or 0 if not.
3) If an instruction has been emulated successfully, it looks ahead at
the next instruction. If it is a floating point instruction, it
executes the instruction, without returning to user space. In this
way it repeatedly looks ahead and executes floating point instructions
until it encounters a non floating point instruction, at which time it
returns via _fpreturn.
This is done to reduce the effect of the trap overhead on each
floating point instructions. GCC attempts to group floating point
instructions to allow the emulator to spread the cost of the trap over
several floating point instructions. */
#include <asm/asm-offsets.h>
.globl nwfpe_enter
nwfpe_enter:
mov r4, lr @ save the failure-return addresses
mov sl, sp @ we access the registers via 'sl'
ldr r5, [sp, #S_PC] @ get contents of PC;
mov r6, r0 @ save the opcode
emulate:
ldr r1, [sp, #S_PSR] @ fetch the PSR
bl arm_check_condition @ check the condition
cmp r0, #ARM_OPCODE_CONDTEST_PASS @ condition passed?
@ if condition code failed to match, next insn
bne next @ get the next instruction;
mov r0, r6 @ prepare for EmulateAll()
bl EmulateAll @ emulate the instruction
cmp r0, #0 @ was emulation successful
reteq r4 @ no, return failure
next:
uaccess_enable r3
.Lx1: ldrt r6, [r5], #4 @ get the next instruction and
@ increment PC
uaccess_disable r3
and r2, r6, #0x0F000000 @ test for FP insns
teq r2, #0x0C000000
teqne r2, #0x0D000000
teqne r2, #0x0E000000
retne r9 @ return ok if not a fp insn
str r5, [sp, #S_PC] @ update PC copy in regs
mov r0, r6 @ save a copy
b emulate @ check condition and emulate
@ We need to be prepared for the instructions at .Lx1 and .Lx2
@ to fault. Emit the appropriate exception gunk to fix things up.
@ ??? For some reason, faults can happen at .Lx2 even with a
@ plain LDR instruction. Weird, but it seems harmless.
.pushsection .text.fixup,"ax"
.align 2
.Lfix: ret r9 @ let the user eat segfaults
.popsection
.pushsection __ex_table,"a"
.align 3
.long .Lx1, .Lfix
.popsection
|
AirFortressIlikara/LS2K0300-linux-4.19
| 9,089
|
arch/arm/mach-omap1/ams-delta-fiq-handler.S
|
/*
* linux/arch/arm/mach-omap1/ams-delta-fiq-handler.S
*
* Based on linux/arch/arm/lib/floppydma.S
* Renamed and modified to work with 2.6 kernel by Matt Callow
* Copyright (C) 1995, 1996 Russell King
* Copyright (C) 2004 Pete Trapps
* Copyright (C) 2006 Matt Callow
* Copyright (C) 2010 Janusz Krzysztofik
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <linux/platform_data/ams-delta-fiq.h>
#include <asm/assembler.h>
#include <mach/board-ams-delta.h>
#include "ams-delta-fiq.h"
#include "iomap.h"
#include "soc.h"
/*
* GPIO related definitions, copied from arch/arm/plat-omap/gpio.c.
* Unfortunately, those were not placed in a separate header file.
*/
#define OMAP1510_GPIO_BASE 0xFFFCE000
#define OMAP1510_GPIO_DATA_INPUT 0x00
#define OMAP1510_GPIO_DATA_OUTPUT 0x04
#define OMAP1510_GPIO_DIR_CONTROL 0x08
#define OMAP1510_GPIO_INT_CONTROL 0x0c
#define OMAP1510_GPIO_INT_MASK 0x10
#define OMAP1510_GPIO_INT_STATUS 0x14
#define OMAP1510_GPIO_PIN_CONTROL 0x18
/* GPIO register bitmasks */
#define KEYBRD_DATA_MASK (0x1 << AMS_DELTA_GPIO_PIN_KEYBRD_DATA)
#define KEYBRD_CLK_MASK (0x1 << AMS_DELTA_GPIO_PIN_KEYBRD_CLK)
#define MODEM_IRQ_MASK (0x1 << AMS_DELTA_GPIO_PIN_MODEM_IRQ)
#define HOOK_SWITCH_MASK (0x1 << AMS_DELTA_GPIO_PIN_HOOK_SWITCH)
#define OTHERS_MASK (MODEM_IRQ_MASK | HOOK_SWITCH_MASK)
/* IRQ handler register bitmasks */
#define DEFERRED_FIQ_MASK OMAP_IRQ_BIT(INT_DEFERRED_FIQ)
#define GPIO_BANK1_MASK OMAP_IRQ_BIT(INT_GPIO_BANK1)
/* Driver buffer byte offsets */
#define BUF_MASK (FIQ_MASK * 4)
#define BUF_STATE (FIQ_STATE * 4)
#define BUF_KEYS_CNT (FIQ_KEYS_CNT * 4)
#define BUF_TAIL_OFFSET (FIQ_TAIL_OFFSET * 4)
#define BUF_HEAD_OFFSET (FIQ_HEAD_OFFSET * 4)
#define BUF_BUF_LEN (FIQ_BUF_LEN * 4)
#define BUF_KEY (FIQ_KEY * 4)
#define BUF_MISSED_KEYS (FIQ_MISSED_KEYS * 4)
#define BUF_BUFFER_START (FIQ_BUFFER_START * 4)
#define BUF_GPIO_INT_MASK (FIQ_GPIO_INT_MASK * 4)
#define BUF_KEYS_HICNT (FIQ_KEYS_HICNT * 4)
#define BUF_IRQ_PEND (FIQ_IRQ_PEND * 4)
#define BUF_SIR_CODE_L1 (FIQ_SIR_CODE_L1 * 4)
#define BUF_SIR_CODE_L2 (IRQ_SIR_CODE_L2 * 4)
#define BUF_CNT_INT_00 (FIQ_CNT_INT_00 * 4)
#define BUF_CNT_INT_KEY (FIQ_CNT_INT_KEY * 4)
#define BUF_CNT_INT_MDM (FIQ_CNT_INT_MDM * 4)
#define BUF_CNT_INT_03 (FIQ_CNT_INT_03 * 4)
#define BUF_CNT_INT_HSW (FIQ_CNT_INT_HSW * 4)
#define BUF_CNT_INT_05 (FIQ_CNT_INT_05 * 4)
#define BUF_CNT_INT_06 (FIQ_CNT_INT_06 * 4)
#define BUF_CNT_INT_07 (FIQ_CNT_INT_07 * 4)
#define BUF_CNT_INT_08 (FIQ_CNT_INT_08 * 4)
#define BUF_CNT_INT_09 (FIQ_CNT_INT_09 * 4)
#define BUF_CNT_INT_10 (FIQ_CNT_INT_10 * 4)
#define BUF_CNT_INT_11 (FIQ_CNT_INT_11 * 4)
#define BUF_CNT_INT_12 (FIQ_CNT_INT_12 * 4)
#define BUF_CNT_INT_13 (FIQ_CNT_INT_13 * 4)
#define BUF_CNT_INT_14 (FIQ_CNT_INT_14 * 4)
#define BUF_CNT_INT_15 (FIQ_CNT_INT_15 * 4)
#define BUF_CIRC_BUFF (FIQ_CIRC_BUFF * 4)
/*
* Register usage
* r8 - temporary
* r9 - the driver buffer
* r10 - temporary
* r11 - interrupts mask
* r12 - base pointers
* r13 - interrupts status
*/
.text
.global qwerty_fiqin_end
ENTRY(qwerty_fiqin_start)
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ FIQ intrrupt handler
ldr r12, omap_ih1_base @ set pointer to level1 handler
ldr r11, [r12, #IRQ_MIR_REG_OFFSET] @ fetch interrupts mask
ldr r13, [r12, #IRQ_ITR_REG_OFFSET] @ fetch interrupts status
bics r13, r13, r11 @ clear masked - any left?
beq exit @ none - spurious FIQ? exit
ldr r10, [r12, #IRQ_SIR_FIQ_REG_OFFSET] @ get requested interrupt number
mov r8, #2 @ reset FIQ agreement
str r8, [r12, #IRQ_CONTROL_REG_OFFSET]
cmp r10, #(INT_GPIO_BANK1 - NR_IRQS_LEGACY) @ is it GPIO interrupt?
beq gpio @ yes - process it
mov r8, #1
orr r8, r11, r8, lsl r10 @ mask spurious interrupt
str r8, [r12, #IRQ_MIR_REG_OFFSET]
exit:
subs pc, lr, #4 @ return from FIQ
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@
gpio: @ GPIO bank interrupt handler
ldr r12, omap1510_gpio_base @ set base pointer to GPIO bank
ldr r11, [r12, #OMAP1510_GPIO_INT_MASK] @ fetch GPIO interrupts mask
restart:
ldr r13, [r12, #OMAP1510_GPIO_INT_STATUS] @ fetch status bits
bics r13, r13, r11 @ clear masked - any left?
beq exit @ no - spurious interrupt? exit
orr r11, r11, r13 @ mask all requested interrupts
str r11, [r12, #OMAP1510_GPIO_INT_MASK]
str r13, [r12, #OMAP1510_GPIO_INT_STATUS] @ ack all requested interrupts
ands r10, r13, #KEYBRD_CLK_MASK @ extract keyboard status - set?
beq hksw @ no - try next source
@@@@@@@@@@@@@@@@@@@@@@
@ Keyboard clock FIQ mode interrupt handler
@ r10 now contains KEYBRD_CLK_MASK, use it
bic r11, r11, r10 @ unmask it
str r11, [r12, #OMAP1510_GPIO_INT_MASK]
@ Process keyboard data
ldr r8, [r12, #OMAP1510_GPIO_DATA_INPUT] @ fetch GPIO input
ldr r10, [r9, #BUF_STATE] @ fetch kbd interface state
cmp r10, #0 @ are we expecting start bit?
bne data @ no - go to data processing
ands r8, r8, #KEYBRD_DATA_MASK @ check start bit - detected?
beq hksw @ no - try next source
@ r8 contains KEYBRD_DATA_MASK, use it
str r8, [r9, #BUF_STATE] @ enter data processing state
@ r10 already contains 0, reuse it
str r10, [r9, #BUF_KEY] @ clear keycode
mov r10, #2 @ reset input bit mask
str r10, [r9, #BUF_MASK]
@ Mask other GPIO line interrupts till key done
str r11, [r9, #BUF_GPIO_INT_MASK] @ save mask for later restore
mvn r11, #KEYBRD_CLK_MASK @ prepare all except kbd mask
str r11, [r12, #OMAP1510_GPIO_INT_MASK] @ store into the mask register
b restart @ restart
data: ldr r10, [r9, #BUF_MASK] @ fetch current input bit mask
@ r8 still contains GPIO input bits
ands r8, r8, #KEYBRD_DATA_MASK @ is keyboard data line low?
ldreq r8, [r9, #BUF_KEY] @ yes - fetch collected so far,
orreq r8, r8, r10 @ set 1 at current mask position
streq r8, [r9, #BUF_KEY] @ and save back
mov r10, r10, lsl #1 @ shift mask left
bics r10, r10, #0x800 @ have we got all the bits?
strne r10, [r9, #BUF_MASK] @ not yet - store the mask
bne restart @ and restart
@ r10 already contains 0, reuse it
str r10, [r9, #BUF_STATE] @ reset state to start
@ Key done - restore interrupt mask
ldr r10, [r9, #BUF_GPIO_INT_MASK] @ fetch saved mask
and r11, r11, r10 @ unmask all saved as unmasked
str r11, [r12, #OMAP1510_GPIO_INT_MASK] @ restore into the mask register
@ Try appending the keycode to the circular buffer
ldr r10, [r9, #BUF_KEYS_CNT] @ get saved keystrokes count
ldr r8, [r9, #BUF_BUF_LEN] @ get buffer size
cmp r10, r8 @ is buffer full?
beq hksw @ yes - key lost, next source
add r10, r10, #1 @ incremet keystrokes counter
str r10, [r9, #BUF_KEYS_CNT]
ldr r10, [r9, #BUF_TAIL_OFFSET] @ get buffer tail offset
@ r8 already contains buffer size
cmp r10, r8 @ end of buffer?
moveq r10, #0 @ yes - rewind to buffer start
ldr r12, [r9, #BUF_BUFFER_START] @ get buffer start address
add r12, r12, r10, LSL #2 @ calculate buffer tail address
ldr r8, [r9, #BUF_KEY] @ get last keycode
str r8, [r12] @ append it to the buffer tail
add r10, r10, #1 @ increment buffer tail offset
str r10, [r9, #BUF_TAIL_OFFSET]
ldr r10, [r9, #BUF_CNT_INT_KEY] @ increment interrupts counter
add r10, r10, #1
str r10, [r9, #BUF_CNT_INT_KEY]
@@@@@@@@@@@@@@@@@@@@@@@@
hksw: @Is hook switch interrupt requested?
tst r13, #HOOK_SWITCH_MASK @ is hook switch status bit set?
beq mdm @ no - try next source
@@@@@@@@@@@@@@@@@@@@@@@@
@ Hook switch interrupt FIQ mode simple handler
@ Don't toggle active edge, the switch always bounces
@ Increment hook switch interrupt counter
ldr r10, [r9, #BUF_CNT_INT_HSW]
add r10, r10, #1
str r10, [r9, #BUF_CNT_INT_HSW]
@@@@@@@@@@@@@@@@@@@@@@@@
mdm: @Is it a modem interrupt?
tst r13, #MODEM_IRQ_MASK @ is modem status bit set?
beq irq @ no - check for next interrupt
@@@@@@@@@@@@@@@@@@@@@@@@
@ Modem FIQ mode interrupt handler stub
@ Increment modem interrupt counter
ldr r10, [r9, #BUF_CNT_INT_MDM]
add r10, r10, #1
str r10, [r9, #BUF_CNT_INT_MDM]
@@@@@@@@@@@@@@@@@@@@@@@@
irq: @ Place deferred_fiq interrupt request
ldr r12, deferred_fiq_ih_base @ set pointer to IRQ handler
mov r10, #DEFERRED_FIQ_MASK @ set deferred_fiq bit
str r10, [r12, #IRQ_ISR_REG_OFFSET] @ place it in the ISR register
ldr r12, omap1510_gpio_base @ set pointer back to GPIO bank
b restart @ check for next GPIO interrupt
@@@@@@@@@@@@@@@@@@@@@@@@@@@
/*
* Virtual addresses for IO
*/
omap_ih1_base:
.word OMAP1_IO_ADDRESS(OMAP_IH1_BASE)
deferred_fiq_ih_base:
.word OMAP1_IO_ADDRESS(DEFERRED_FIQ_IH_BASE)
omap1510_gpio_base:
.word OMAP1_IO_ADDRESS(OMAP1510_GPIO_BASE)
qwerty_fiqin_end:
/*
* Check the size of the FIQ,
* it cannot go beyond 0xffff0200, and is copied to 0xffff001c
*/
.if (qwerty_fiqin_end - qwerty_fiqin_start) > (0x200 - 0x1c)
.err
.endif
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,554
|
arch/arm/mach-omap1/sram.S
|
/*
* linux/arch/arm/plat-omap/sram-fn.S
*
* Functions that need to be run in internal SRAM
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <mach/hardware.h>
#include "iomap.h"
.text
/*
* Reprograms ULPD and CKCTL.
*/
.align 3
ENTRY(omap1_sram_reprogram_clock)
stmfd sp!, {r0 - r12, lr} @ save registers on stack
mov r2, #OMAP1_IO_ADDRESS(DPLL_CTL) & 0xff000000
orr r2, r2, #OMAP1_IO_ADDRESS(DPLL_CTL) & 0x00ff0000
orr r2, r2, #OMAP1_IO_ADDRESS(DPLL_CTL) & 0x0000ff00
mov r3, #OMAP1_IO_ADDRESS(ARM_CKCTL) & 0xff000000
orr r3, r3, #OMAP1_IO_ADDRESS(ARM_CKCTL) & 0x00ff0000
orr r3, r3, #OMAP1_IO_ADDRESS(ARM_CKCTL) & 0x0000ff00
tst r0, #1 << 4 @ want lock mode?
beq newck @ nope
bic r0, r0, #1 << 4 @ else clear lock bit
strh r0, [r2] @ set dpll into bypass mode
orr r0, r0, #1 << 4 @ set lock bit again
newck:
strh r1, [r3] @ write new ckctl value
strh r0, [r2] @ write new dpll value
mov r4, #0x0700 @ let the clocks settle
orr r4, r4, #0x00ff
delay: sub r4, r4, #1
cmp r4, #0
bne delay
lock: ldrh r4, [r2], #0 @ read back dpll value
tst r0, #1 << 4 @ want lock mode?
beq out @ nope
tst r4, #1 << 0 @ dpll rate locked?
beq lock @ try again
out:
ldmfd sp!, {r0 - r12, pc} @ restore regs and return
ENTRY(omap1_sram_reprogram_clock_sz)
.word . - omap1_sram_reprogram_clock
|
AirFortressIlikara/LS2K0300-linux-4.19
| 9,340
|
arch/arm/mach-omap1/sleep.S
|
/*
* linux/arch/arm/mach-omap1/sleep.S
*
* Low-level OMAP7XX/1510/1610 sleep/wakeUp support
*
* Initial SA1110 code:
* Copyright (c) 2001 Cliff Brake <cbrake@accelent.com>
*
* Adapted for PXA by Nicolas Pitre:
* Copyright (c) 2002 Monta Vista Software, Inc.
*
* Support for OMAP1510/1610 by Dirk Behme <dirk.behme@de.bosch.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <mach/hardware.h>
#include "iomap.h"
#include "pm.h"
.text
/*
* Forces OMAP into deep sleep state
*
* omapXXXX_cpu_suspend()
*
* The values of the registers ARM_IDLECT1 and ARM_IDLECT2 are passed
* as arg0 and arg1 from caller. arg0 is stored in register r0 and arg1
* in register r1.
*
* Note: This code get's copied to internal SRAM at boot. When the OMAP
* wakes up it continues execution at the point it went to sleep.
*
* Note: Because of errata work arounds we have processor specific functions
* here. They are mostly the same, but slightly different.
*
*/
#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
.align 3
ENTRY(omap7xx_cpu_suspend)
@ save registers on stack
stmfd sp!, {r0 - r12, lr}
@ Drain write cache
mov r4, #0
mcr p15, 0, r0, c7, c10, 4
nop
@ load base address of Traffic Controller
mov r6, #TCMIF_ASM_BASE & 0xff000000
orr r6, r6, #TCMIF_ASM_BASE & 0x00ff0000
orr r6, r6, #TCMIF_ASM_BASE & 0x0000ff00
@ prepare to put SDRAM into self-refresh manually
ldr r7, [r6, #EMIFF_SDRAM_CONFIG_ASM_OFFSET & 0xff]
orr r9, r7, #SELF_REFRESH_MODE & 0xff000000
orr r9, r9, #SELF_REFRESH_MODE & 0x000000ff
str r9, [r6, #EMIFF_SDRAM_CONFIG_ASM_OFFSET & 0xff]
@ prepare to put EMIFS to Sleep
ldr r8, [r6, #EMIFS_CONFIG_ASM_OFFSET & 0xff]
orr r9, r8, #IDLE_EMIFS_REQUEST & 0xff
str r9, [r6, #EMIFS_CONFIG_ASM_OFFSET & 0xff]
@ load base address of ARM_IDLECT1 and ARM_IDLECT2
mov r4, #CLKGEN_REG_ASM_BASE & 0xff000000
orr r4, r4, #CLKGEN_REG_ASM_BASE & 0x00ff0000
orr r4, r4, #CLKGEN_REG_ASM_BASE & 0x0000ff00
@ turn off clock domains
@ do not disable PERCK (0x04)
mov r5, #OMAP7XX_IDLECT2_SLEEP_VAL & 0xff
orr r5, r5, #OMAP7XX_IDLECT2_SLEEP_VAL & 0xff00
strh r5, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff]
@ request ARM idle
mov r3, #OMAP7XX_IDLECT1_SLEEP_VAL & 0xff
orr r3, r3, #OMAP7XX_IDLECT1_SLEEP_VAL & 0xff00
strh r3, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff]
@ disable instruction cache
mrc p15, 0, r9, c1, c0, 0
bic r2, r9, #0x1000
mcr p15, 0, r2, c1, c0, 0
nop
/*
* Let's wait for the next wake up event to wake us up. r0 can't be
* used here because r0 holds ARM_IDLECT1
*/
mov r2, #0
mcr p15, 0, r2, c7, c0, 4 @ wait for interrupt
/*
* omap7xx_cpu_suspend()'s resume point.
*
* It will just start executing here, so we'll restore stuff from the
* stack.
*/
@ re-enable Icache
mcr p15, 0, r9, c1, c0, 0
@ reset the ARM_IDLECT1 and ARM_IDLECT2.
strh r1, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff]
strh r0, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff]
@ Restore EMIFF controls
str r7, [r6, #EMIFF_SDRAM_CONFIG_ASM_OFFSET & 0xff]
str r8, [r6, #EMIFS_CONFIG_ASM_OFFSET & 0xff]
@ restore regs and return
ldmfd sp!, {r0 - r12, pc}
ENTRY(omap7xx_cpu_suspend_sz)
.word . - omap7xx_cpu_suspend
#endif /* CONFIG_ARCH_OMAP730 || CONFIG_ARCH_OMAP850 */
#ifdef CONFIG_ARCH_OMAP15XX
.align 3
ENTRY(omap1510_cpu_suspend)
@ save registers on stack
stmfd sp!, {r0 - r12, lr}
@ load base address of Traffic Controller
mov r4, #TCMIF_ASM_BASE & 0xff000000
orr r4, r4, #TCMIF_ASM_BASE & 0x00ff0000
orr r4, r4, #TCMIF_ASM_BASE & 0x0000ff00
@ work around errata of OMAP1510 PDE bit for TC shut down
@ clear PDE bit
ldr r5, [r4, #EMIFS_CONFIG_ASM_OFFSET & 0xff]
bic r5, r5, #PDE_BIT & 0xff
str r5, [r4, #EMIFS_CONFIG_ASM_OFFSET & 0xff]
@ set PWD_EN bit
and r5, r5, #PWD_EN_BIT & 0xff
str r5, [r4, #EMIFS_CONFIG_ASM_OFFSET & 0xff]
@ prepare to put SDRAM into self-refresh manually
ldr r5, [r4, #EMIFF_SDRAM_CONFIG_ASM_OFFSET & 0xff]
orr r5, r5, #SELF_REFRESH_MODE & 0xff000000
orr r5, r5, #SELF_REFRESH_MODE & 0x000000ff
str r5, [r4, #EMIFF_SDRAM_CONFIG_ASM_OFFSET & 0xff]
@ prepare to put EMIFS to Sleep
ldr r5, [r4, #EMIFS_CONFIG_ASM_OFFSET & 0xff]
orr r5, r5, #IDLE_EMIFS_REQUEST & 0xff
str r5, [r4, #EMIFS_CONFIG_ASM_OFFSET & 0xff]
@ load base address of ARM_IDLECT1 and ARM_IDLECT2
mov r4, #CLKGEN_REG_ASM_BASE & 0xff000000
orr r4, r4, #CLKGEN_REG_ASM_BASE & 0x00ff0000
orr r4, r4, #CLKGEN_REG_ASM_BASE & 0x0000ff00
@ turn off clock domains
mov r5, #OMAP1510_IDLE_CLOCK_DOMAINS & 0xff
orr r5, r5, #OMAP1510_IDLE_CLOCK_DOMAINS & 0xff00
strh r5, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff]
@ request ARM idle
mov r3, #OMAP1510_DEEP_SLEEP_REQUEST & 0xff
orr r3, r3, #OMAP1510_DEEP_SLEEP_REQUEST & 0xff00
strh r3, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff]
mov r5, #IDLE_WAIT_CYCLES & 0xff
orr r5, r5, #IDLE_WAIT_CYCLES & 0xff00
l_1510_2:
subs r5, r5, #1
bne l_1510_2
/*
* Let's wait for the next wake up event to wake us up. r0 can't be
* used here because r0 holds ARM_IDLECT1
*/
mov r2, #0
mcr p15, 0, r2, c7, c0, 4 @ wait for interrupt
/*
* omap1510_cpu_suspend()'s resume point.
*
* It will just start executing here, so we'll restore stuff from the
* stack, reset the ARM_IDLECT1 and ARM_IDLECT2.
*/
strh r1, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff]
strh r0, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff]
@ restore regs and return
ldmfd sp!, {r0 - r12, pc}
ENTRY(omap1510_cpu_suspend_sz)
.word . - omap1510_cpu_suspend
#endif /* CONFIG_ARCH_OMAP15XX */
#if defined(CONFIG_ARCH_OMAP16XX)
.align 3
ENTRY(omap1610_cpu_suspend)
@ save registers on stack
stmfd sp!, {r0 - r12, lr}
@ Drain write cache
mov r4, #0
mcr p15, 0, r0, c7, c10, 4
nop
@ Load base address of Traffic Controller
mov r6, #TCMIF_ASM_BASE & 0xff000000
orr r6, r6, #TCMIF_ASM_BASE & 0x00ff0000
orr r6, r6, #TCMIF_ASM_BASE & 0x0000ff00
@ Prepare to put SDRAM into self-refresh manually
ldr r7, [r6, #EMIFF_SDRAM_CONFIG_ASM_OFFSET & 0xff]
orr r9, r7, #SELF_REFRESH_MODE & 0xff000000
orr r9, r9, #SELF_REFRESH_MODE & 0x000000ff
str r9, [r6, #EMIFF_SDRAM_CONFIG_ASM_OFFSET & 0xff]
@ Prepare to put EMIFS to Sleep
ldr r8, [r6, #EMIFS_CONFIG_ASM_OFFSET & 0xff]
orr r9, r8, #IDLE_EMIFS_REQUEST & 0xff
str r9, [r6, #EMIFS_CONFIG_ASM_OFFSET & 0xff]
@ Load base address of ARM_IDLECT1 and ARM_IDLECT2
mov r4, #CLKGEN_REG_ASM_BASE & 0xff000000
orr r4, r4, #CLKGEN_REG_ASM_BASE & 0x00ff0000
orr r4, r4, #CLKGEN_REG_ASM_BASE & 0x0000ff00
@ Turn off clock domains
@ Do not disable PERCK (0x04)
mov r5, #OMAP1610_IDLECT2_SLEEP_VAL & 0xff
orr r5, r5, #OMAP1610_IDLECT2_SLEEP_VAL & 0xff00
strh r5, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff]
@ Request ARM idle
mov r3, #OMAP1610_IDLECT1_SLEEP_VAL & 0xff
orr r3, r3, #OMAP1610_IDLECT1_SLEEP_VAL & 0xff00
strh r3, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff]
/*
* Let's wait for the next wake up event to wake us up. r0 can't be
* used here because r0 holds ARM_IDLECT1
*/
mov r2, #0
mcr p15, 0, r2, c7, c0, 4 @ wait for interrupt
@ Errata (HEL3SU467, section 1.4.4) specifies nop-instructions
@ according to this formula:
@ 2 + (4*DPLL_MULT)/DPLL_DIV/ARMDIV
@ Max DPLL_MULT = 18
@ DPLL_DIV = 1
@ ARMDIV = 1
@ => 74 nop-instructions
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop @10
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop @20
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop @30
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop @40
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop @50
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop @60
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop @70
nop
nop
nop
nop @74
/*
* omap1610_cpu_suspend()'s resume point.
*
* It will just start executing here, so we'll restore stuff from the
* stack.
*/
@ Restore the ARM_IDLECT1 and ARM_IDLECT2.
strh r1, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff]
strh r0, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff]
@ Restore EMIFF controls
str r7, [r6, #EMIFF_SDRAM_CONFIG_ASM_OFFSET & 0xff]
str r8, [r6, #EMIFS_CONFIG_ASM_OFFSET & 0xff]
@ Restore regs and return
ldmfd sp!, {r0 - r12, pc}
ENTRY(omap1610_cpu_suspend_sz)
.word . - omap1610_cpu_suspend
#endif /* CONFIG_ARCH_OMAP16XX */
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,276
|
arch/arm/mach-shmobile/headsmp-scu.S
|
/*
* Shared SCU setup for mach-shmobile
*
* Copyright (C) 2012 Bastian Hecht
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR /PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/memory.h>
/*
* Boot code for secondary CPUs.
*
* First we turn on L1 cache coherency for our CPU. Then we jump to
* secondary_startup that invalidates the cache and hands over control
* to the common ARM startup code.
*/
ENTRY(shmobile_boot_scu)
@ r0 = SCU base address
mrc p15, 0, r1, c0, c0, 5 @ read MPIDR
and r1, r1, #3 @ mask out cpu ID
lsl r1, r1, #3 @ we will shift by cpu_id * 8 bits
ldr r2, [r0, #8] @ SCU Power Status Register
mov r3, #3
lsl r3, r3, r1
bic r2, r2, r3 @ Clear bits of our CPU (Run Mode)
str r2, [r0, #8] @ write back
b secondary_startup
ENDPROC(shmobile_boot_scu)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,240
|
arch/arm/mach-shmobile/headsmp.S
|
/*
* SMP support for R-Mobile / SH-Mobile
*
* Copyright (C) 2010 Magnus Damm
* Copyright (C) 2010 Takashi Yoshii
*
* Based on vexpress, Copyright (c) 2003 ARM Limited, All Rights Reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/linkage.h>
#include <linux/threads.h>
#include <asm/assembler.h>
#include <asm/memory.h>
#define SCTLR_MMU 0x01
#define BOOTROM_ADDRESS 0xE6340000
#define RWTCSRA_ADDRESS 0xE6020004
#define RWTCSRA_WOVF 0x10
/*
* Reset vector for secondary CPUs.
* This will be mapped at address 0 by SBAR register.
* We need _long_ jump to the physical address.
*/
.arm
.align 12
ENTRY(shmobile_boot_vector)
ldr r1, 1f
bx r1
ENDPROC(shmobile_boot_vector)
.align 2
.globl shmobile_boot_fn
shmobile_boot_fn:
1: .space 4
.globl shmobile_boot_size
shmobile_boot_size:
.long . - shmobile_boot_vector
#ifdef CONFIG_ARCH_RCAR_GEN2
/*
* Reset vector for R-Car Gen2 and RZ/G1 secondary CPUs.
* This will be mapped at address 0 by SBAR register.
*/
ENTRY(shmobile_boot_vector_gen2)
mrc p15, 0, r0, c0, c0, 5 @ r0 = MPIDR
ldr r1, shmobile_boot_cpu_gen2
cmp r0, r1
bne shmobile_smp_continue_gen2
mrc p15, 0, r1, c1, c0, 0 @ r1 = SCTLR
and r0, r1, #SCTLR_MMU
cmp r0, #SCTLR_MMU
beq shmobile_smp_continue_gen2
ldr r0, rwtcsra
mov r1, #0
ldrb r1, [r0]
and r0, r1, #RWTCSRA_WOVF
cmp r0, #RWTCSRA_WOVF
bne shmobile_smp_continue_gen2
ldr r0, bootrom
bx r0
shmobile_smp_continue_gen2:
ldr r1, shmobile_boot_fn_gen2
bx r1
ENDPROC(shmobile_boot_vector_gen2)
.align 4
rwtcsra:
.word RWTCSRA_ADDRESS
bootrom:
.word BOOTROM_ADDRESS
.globl shmobile_boot_cpu_gen2
shmobile_boot_cpu_gen2:
.word 0x00000000
.align 2
.globl shmobile_boot_fn_gen2
shmobile_boot_fn_gen2:
.space 4
.globl shmobile_boot_size_gen2
shmobile_boot_size_gen2:
.long . - shmobile_boot_vector_gen2
#endif /* CONFIG_ARCH_RCAR_GEN2 */
/*
* Per-CPU SMP boot function/argument selection code based on MPIDR
*/
ENTRY(shmobile_smp_boot)
mrc p15, 0, r1, c0, c0, 5 @ r1 = MPIDR
and r0, r1, #0xffffff @ MPIDR_HWID_BITMASK
@ r0 = cpu_logical_map() value
mov r1, #0 @ r1 = CPU index
adr r2, 1f
ldmia r2, {r5, r6, r7}
add r5, r5, r2 @ array of per-cpu mpidr values
add r6, r6, r2 @ array of per-cpu functions
add r7, r7, r2 @ array of per-cpu arguments
shmobile_smp_boot_find_mpidr:
ldr r8, [r5, r1, lsl #2]
cmp r8, r0
bne shmobile_smp_boot_next
ldr r9, [r6, r1, lsl #2]
cmp r9, #0
bne shmobile_smp_boot_found
shmobile_smp_boot_next:
add r1, r1, #1
cmp r1, #NR_CPUS
blo shmobile_smp_boot_find_mpidr
b shmobile_smp_sleep
shmobile_smp_boot_found:
ldr r0, [r7, r1, lsl #2]
ret r9
ENDPROC(shmobile_smp_boot)
ENTRY(shmobile_smp_sleep)
wfi
b shmobile_smp_boot
ENDPROC(shmobile_smp_sleep)
.align 2
1: .long shmobile_smp_mpidr - .
.long shmobile_smp_fn - 1b
.long shmobile_smp_arg - 1b
.bss
.globl shmobile_smp_mpidr
shmobile_smp_mpidr:
.space NR_CPUS * 4
.globl shmobile_smp_fn
shmobile_smp_fn:
.space NR_CPUS * 4
.globl shmobile_smp_arg
shmobile_smp_arg:
.space NR_CPUS * 4
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,296
|
arch/arm/lib/copy_to_user.S
|
/*
* linux/arch/arm/lib/copy_to_user.S
*
* Author: Nicolas Pitre
* Created: Sep 29, 2005
* Copyright: MontaVista Software, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
/*
* Prototype:
*
* size_t arm_copy_to_user(void *to, const void *from, size_t n)
*
* Purpose:
*
* copy a block to user memory from kernel memory
*
* Params:
*
* to = user memory
* from = kernel memory
* n = number of bytes to copy
*
* Return value:
*
* Number of bytes NOT copied.
*/
#define LDR1W_SHIFT 0
#ifndef CONFIG_THUMB2_KERNEL
#define STR1W_SHIFT 0
#else
#define STR1W_SHIFT 1
#endif
.macro ldr1w ptr reg abort
W(ldr) \reg, [\ptr], #4
.endm
.macro ldr4w ptr reg1 reg2 reg3 reg4 abort
ldmia \ptr!, {\reg1, \reg2, \reg3, \reg4}
.endm
.macro ldr8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort
ldmia \ptr!, {\reg1, \reg2, \reg3, \reg4, \reg5, \reg6, \reg7, \reg8}
.endm
.macro ldr1b ptr reg cond=al abort
ldr\cond\()b \reg, [\ptr], #1
.endm
.macro str1w ptr reg abort
strusr \reg, \ptr, 4, abort=\abort
.endm
.macro str8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort
str1w \ptr, \reg1, \abort
str1w \ptr, \reg2, \abort
str1w \ptr, \reg3, \abort
str1w \ptr, \reg4, \abort
str1w \ptr, \reg5, \abort
str1w \ptr, \reg6, \abort
str1w \ptr, \reg7, \abort
str1w \ptr, \reg8, \abort
.endm
.macro str1b ptr reg cond=al abort
strusr \reg, \ptr, 1, \cond, abort=\abort
.endm
.macro enter reg1 reg2
mov r3, #0
stmdb sp!, {r0, r2, r3, \reg1, \reg2}
.endm
.macro usave reg1 reg2
UNWIND( .save {r0, r2, r3, \reg1, \reg2} )
.endm
.macro exit reg1 reg2
add sp, sp, #8
ldmfd sp!, {r0, \reg1, \reg2}
.endm
.text
ENTRY(__copy_to_user_std)
WEAK(arm_copy_to_user)
#ifdef CONFIG_CPU_SPECTRE
get_thread_info r3
ldr r3, [r3, #TI_ADDR_LIMIT]
uaccess_mask_range_ptr r0, r2, r3, ip
#endif
#include "copy_template.S"
ENDPROC(arm_copy_to_user)
ENDPROC(__copy_to_user_std)
.pushsection .text.fixup,"ax"
.align 0
copy_abort_preamble
ldmfd sp!, {r1, r2, r3}
sub r0, r0, r1
rsb r0, r0, r2
copy_abort_end
.popsection
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,440
|
arch/arm/lib/clear_user.S
|
/*
* linux/arch/arm/lib/clear_user.S
*
* Copyright (C) 1995, 1996,1997,1998 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
.text
/* Prototype: unsigned long arm_clear_user(void *addr, size_t sz)
* Purpose : clear some user memory
* Params : addr - user memory address to clear
* : sz - number of bytes to clear
* Returns : number of bytes NOT cleared
*/
ENTRY(__clear_user_std)
WEAK(arm_clear_user)
UNWIND(.fnstart)
UNWIND(.save {r1, lr})
stmfd sp!, {r1, lr}
mov r2, #0
cmp r1, #4
blt 2f
ands ip, r0, #3
beq 1f
cmp ip, #2
strusr r2, r0, 1
strusr r2, r0, 1, le
strusr r2, r0, 1, lt
rsb ip, ip, #4
sub r1, r1, ip @ 7 6 5 4 3 2 1
1: subs r1, r1, #8 @ -1 -2 -3 -4 -5 -6 -7
strusr r2, r0, 4, pl, rept=2
bpl 1b
adds r1, r1, #4 @ 3 2 1 0 -1 -2 -3
strusr r2, r0, 4, pl
2: tst r1, #2 @ 1x 1x 0x 0x 1x 1x 0x
strusr r2, r0, 1, ne, rept=2
tst r1, #1 @ x1 x0 x1 x0 x1 x0 x1
it ne @ explicit IT needed for the label
USER( strnebt r2, [r0])
mov r0, #0
ldmfd sp!, {r1, pc}
UNWIND(.fnend)
ENDPROC(arm_clear_user)
ENDPROC(__clear_user_std)
.pushsection .text.fixup,"ax"
.align 0
9001: ldmfd sp!, {r0, pc}
.popsection
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,122
|
arch/arm/lib/csumpartial.S
|
/*
* linux/arch/arm/lib/csumpartial.S
*
* Copyright (C) 1995-1998 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
.text
/*
* Function: __u32 csum_partial(const char *src, int len, __u32 sum)
* Params : r0 = buffer, r1 = len, r2 = checksum
* Returns : r0 = new checksum
*/
buf .req r0
len .req r1
sum .req r2
td0 .req r3
td1 .req r4 @ save before use
td2 .req r5 @ save before use
td3 .req lr
.Lzero: mov r0, sum
add sp, sp, #4
ldr pc, [sp], #4
/*
* Handle 0 to 7 bytes, with any alignment of source and
* destination pointers. Note that when we get here, C = 0
*/
.Lless8: teq len, #0 @ check for zero count
beq .Lzero
/* we must have at least one byte. */
tst buf, #1 @ odd address?
movne sum, sum, ror #8
ldrneb td0, [buf], #1
subne len, len, #1
adcnes sum, sum, td0, put_byte_1
.Lless4: tst len, #6
beq .Lless8_byte
/* we are now half-word aligned */
.Lless8_wordlp:
#if __LINUX_ARM_ARCH__ >= 4
ldrh td0, [buf], #2
sub len, len, #2
#else
ldrb td0, [buf], #1
ldrb td3, [buf], #1
sub len, len, #2
#ifndef __ARMEB__
orr td0, td0, td3, lsl #8
#else
orr td0, td3, td0, lsl #8
#endif
#endif
adcs sum, sum, td0
tst len, #6
bne .Lless8_wordlp
.Lless8_byte: tst len, #1 @ odd number of bytes
ldrneb td0, [buf], #1 @ include last byte
adcnes sum, sum, td0, put_byte_0 @ update checksum
.Ldone: adc r0, sum, #0 @ collect up the last carry
ldr td0, [sp], #4
tst td0, #1 @ check buffer alignment
movne r0, r0, ror #8 @ rotate checksum by 8 bits
ldr pc, [sp], #4 @ return
.Lnot_aligned: tst buf, #1 @ odd address
ldrneb td0, [buf], #1 @ make even
subne len, len, #1
adcnes sum, sum, td0, put_byte_1 @ update checksum
tst buf, #2 @ 32-bit aligned?
#if __LINUX_ARM_ARCH__ >= 4
ldrneh td0, [buf], #2 @ make 32-bit aligned
subne len, len, #2
#else
ldrneb td0, [buf], #1
ldrneb ip, [buf], #1
subne len, len, #2
#ifndef __ARMEB__
orrne td0, td0, ip, lsl #8
#else
orrne td0, ip, td0, lsl #8
#endif
#endif
adcnes sum, sum, td0 @ update checksum
ret lr
ENTRY(csum_partial)
stmfd sp!, {buf, lr}
cmp len, #8 @ Ensure that we have at least
blo .Lless8 @ 8 bytes to copy.
tst buf, #1
movne sum, sum, ror #8
adds sum, sum, #0 @ C = 0
tst buf, #3 @ Test destination alignment
blne .Lnot_aligned @ align destination, return here
1: bics ip, len, #31
beq 3f
stmfd sp!, {r4 - r5}
2: ldmia buf!, {td0, td1, td2, td3}
adcs sum, sum, td0
adcs sum, sum, td1
adcs sum, sum, td2
adcs sum, sum, td3
ldmia buf!, {td0, td1, td2, td3}
adcs sum, sum, td0
adcs sum, sum, td1
adcs sum, sum, td2
adcs sum, sum, td3
sub ip, ip, #32
teq ip, #0
bne 2b
ldmfd sp!, {r4 - r5}
3: tst len, #0x1c @ should not change C
beq .Lless4
4: ldr td0, [buf], #4
sub len, len, #4
adcs sum, sum, td0
tst len, #0x1c
bne 4b
b .Lless4
ENDPROC(csum_partial)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 4,061
|
arch/arm/lib/div64.S
|
/*
* linux/arch/arm/lib/div64.S
*
* Optimized computation of 64-bit dividend / 32-bit divisor
*
* Author: Nicolas Pitre
* Created: Oct 5, 2003
* Copyright: Monta Vista Software, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
#ifdef __ARMEB__
#define xh r0
#define xl r1
#define yh r2
#define yl r3
#else
#define xl r0
#define xh r1
#define yl r2
#define yh r3
#endif
/*
* __do_div64: perform a division with 64-bit dividend and 32-bit divisor.
*
* Note: Calling convention is totally non standard for optimal code.
* This is meant to be used by do_div() from include/asm/div64.h only.
*
* Input parameters:
* xh-xl = dividend (clobbered)
* r4 = divisor (preserved)
*
* Output values:
* yh-yl = result
* xh = remainder
*
* Clobbered regs: xl, ip
*/
ENTRY(__do_div64)
UNWIND(.fnstart)
@ Test for easy paths first.
subs ip, r4, #1
bls 9f @ divisor is 0 or 1
tst ip, r4
beq 8f @ divisor is power of 2
@ See if we need to handle upper 32-bit result.
cmp xh, r4
mov yh, #0
blo 3f
@ Align divisor with upper part of dividend.
@ The aligned divisor is stored in yl preserving the original.
@ The bit position is stored in ip.
#if __LINUX_ARM_ARCH__ >= 5
clz yl, r4
clz ip, xh
sub yl, yl, ip
mov ip, #1
mov ip, ip, lsl yl
mov yl, r4, lsl yl
#else
mov yl, r4
mov ip, #1
1: cmp yl, #0x80000000
cmpcc yl, xh
movcc yl, yl, lsl #1
movcc ip, ip, lsl #1
bcc 1b
#endif
@ The division loop for needed upper bit positions.
@ Break out early if dividend reaches 0.
2: cmp xh, yl
orrcs yh, yh, ip
subcss xh, xh, yl
movnes ip, ip, lsr #1
mov yl, yl, lsr #1
bne 2b
@ See if we need to handle lower 32-bit result.
3: cmp xh, #0
mov yl, #0
cmpeq xl, r4
movlo xh, xl
retlo lr
@ The division loop for lower bit positions.
@ Here we shift remainer bits leftwards rather than moving the
@ divisor for comparisons, considering the carry-out bit as well.
mov ip, #0x80000000
4: movs xl, xl, lsl #1
adcs xh, xh, xh
beq 6f
cmpcc xh, r4
5: orrcs yl, yl, ip
subcs xh, xh, r4
movs ip, ip, lsr #1
bne 4b
ret lr
@ The top part of remainder became zero. If carry is set
@ (the 33th bit) this is a false positive so resume the loop.
@ Otherwise, if lower part is also null then we are done.
6: bcs 5b
cmp xl, #0
reteq lr
@ We still have remainer bits in the low part. Bring them up.
#if __LINUX_ARM_ARCH__ >= 5
clz xh, xl @ we know xh is zero here so...
add xh, xh, #1
mov xl, xl, lsl xh
mov ip, ip, lsr xh
#else
7: movs xl, xl, lsl #1
mov ip, ip, lsr #1
bcc 7b
#endif
@ Current remainder is now 1. It is worthless to compare with
@ divisor at this point since divisor can not be smaller than 3 here.
@ If possible, branch for another shift in the division loop.
@ If no bit position left then we are done.
movs ip, ip, lsr #1
mov xh, #1
bne 4b
ret lr
8: @ Division by a power of 2: determine what that divisor order is
@ then simply shift values around
#if __LINUX_ARM_ARCH__ >= 5
clz ip, r4
rsb ip, ip, #31
#else
mov yl, r4
cmp r4, #(1 << 16)
mov ip, #0
movhs yl, yl, lsr #16
movhs ip, #16
cmp yl, #(1 << 8)
movhs yl, yl, lsr #8
addhs ip, ip, #8
cmp yl, #(1 << 4)
movhs yl, yl, lsr #4
addhs ip, ip, #4
cmp yl, #(1 << 2)
addhi ip, ip, #3
addls ip, ip, yl, lsr #1
#endif
mov yh, xh, lsr ip
mov yl, xl, lsr ip
rsb ip, ip, #32
ARM( orr yl, yl, xh, lsl ip )
THUMB( lsl xh, xh, ip )
THUMB( orr yl, yl, xh )
mov xh, xl, lsl ip
mov xh, xh, lsr ip
ret lr
@ eq -> division by 1: obvious enough...
9: moveq yl, xl
moveq yh, xh
moveq xh, #0
reteq lr
UNWIND(.fnend)
UNWIND(.fnstart)
UNWIND(.pad #4)
UNWIND(.save {lr})
Ldiv0_64:
@ Division by 0:
str lr, [sp, #-8]!
bl __div0
@ as wrong as it could be...
mov yl, #0
mov yh, #0
mov xh, #0
ldr pc, [sp], #8
UNWIND(.fnend)
ENDPROC(__do_div64)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 6,965
|
arch/arm/lib/csumpartialcopygeneric.S
|
/*
* linux/arch/arm/lib/csumpartialcopygeneric.S
*
* Copyright (C) 1995-2001 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <asm/assembler.h>
/*
* unsigned int
* csum_partial_copy_xxx(const char *src, char *dst, int len, int sum, )
* r0 = src, r1 = dst, r2 = len, r3 = sum
* Returns : r0 = checksum
*
* Note that 'tst' and 'teq' preserve the carry flag.
*/
src .req r0
dst .req r1
len .req r2
sum .req r3
.Lzero: mov r0, sum
load_regs
/*
* Align an unaligned destination pointer. We know that
* we have >= 8 bytes here, so we don't need to check
* the length. Note that the source pointer hasn't been
* aligned yet.
*/
.Ldst_unaligned:
tst dst, #1
beq .Ldst_16bit
load1b ip
sub len, len, #1
adcs sum, sum, ip, put_byte_1 @ update checksum
strb ip, [dst], #1
tst dst, #2
reteq lr @ dst is now 32bit aligned
.Ldst_16bit: load2b r8, ip
sub len, len, #2
adcs sum, sum, r8, put_byte_0
strb r8, [dst], #1
adcs sum, sum, ip, put_byte_1
strb ip, [dst], #1
ret lr @ dst is now 32bit aligned
/*
* Handle 0 to 7 bytes, with any alignment of source and
* destination pointers. Note that when we get here, C = 0
*/
.Lless8: teq len, #0 @ check for zero count
beq .Lzero
/* we must have at least one byte. */
tst dst, #1 @ dst 16-bit aligned
beq .Lless8_aligned
/* Align dst */
load1b ip
sub len, len, #1
adcs sum, sum, ip, put_byte_1 @ update checksum
strb ip, [dst], #1
tst len, #6
beq .Lless8_byteonly
1: load2b r8, ip
sub len, len, #2
adcs sum, sum, r8, put_byte_0
strb r8, [dst], #1
adcs sum, sum, ip, put_byte_1
strb ip, [dst], #1
.Lless8_aligned:
tst len, #6
bne 1b
.Lless8_byteonly:
tst len, #1
beq .Ldone
load1b r8
adcs sum, sum, r8, put_byte_0 @ update checksum
strb r8, [dst], #1
b .Ldone
FN_ENTRY
save_regs
cmp len, #8 @ Ensure that we have at least
blo .Lless8 @ 8 bytes to copy.
adds sum, sum, #0 @ C = 0
tst dst, #3 @ Test destination alignment
blne .Ldst_unaligned @ align destination, return here
/*
* Ok, the dst pointer is now 32bit aligned, and we know
* that we must have more than 4 bytes to copy. Note
* that C contains the carry from the dst alignment above.
*/
tst src, #3 @ Test source alignment
bne .Lsrc_not_aligned
/* Routine for src & dst aligned */
bics ip, len, #15
beq 2f
1: load4l r4, r5, r6, r7
stmia dst!, {r4, r5, r6, r7}
adcs sum, sum, r4
adcs sum, sum, r5
adcs sum, sum, r6
adcs sum, sum, r7
sub ip, ip, #16
teq ip, #0
bne 1b
2: ands ip, len, #12
beq 4f
tst ip, #8
beq 3f
load2l r4, r5
stmia dst!, {r4, r5}
adcs sum, sum, r4
adcs sum, sum, r5
tst ip, #4
beq 4f
3: load1l r4
str r4, [dst], #4
adcs sum, sum, r4
4: ands len, len, #3
beq .Ldone
load1l r4
tst len, #2
mov r5, r4, get_byte_0
beq .Lexit
adcs sum, sum, r4, lspush #16
strb r5, [dst], #1
mov r5, r4, get_byte_1
strb r5, [dst], #1
mov r5, r4, get_byte_2
.Lexit: tst len, #1
strneb r5, [dst], #1
andne r5, r5, #255
adcnes sum, sum, r5, put_byte_0
/*
* If the dst pointer was not 16-bit aligned, we
* need to rotate the checksum here to get around
* the inefficient byte manipulations in the
* architecture independent code.
*/
.Ldone: adc r0, sum, #0
ldr sum, [sp, #0] @ dst
tst sum, #1
movne r0, r0, ror #8
load_regs
.Lsrc_not_aligned:
adc sum, sum, #0 @ include C from dst alignment
and ip, src, #3
bic src, src, #3
load1l r5
cmp ip, #2
beq .Lsrc2_aligned
bhi .Lsrc3_aligned
mov r4, r5, lspull #8 @ C = 0
bics ip, len, #15
beq 2f
1: load4l r5, r6, r7, r8
orr r4, r4, r5, lspush #24
mov r5, r5, lspull #8
orr r5, r5, r6, lspush #24
mov r6, r6, lspull #8
orr r6, r6, r7, lspush #24
mov r7, r7, lspull #8
orr r7, r7, r8, lspush #24
stmia dst!, {r4, r5, r6, r7}
adcs sum, sum, r4
adcs sum, sum, r5
adcs sum, sum, r6
adcs sum, sum, r7
mov r4, r8, lspull #8
sub ip, ip, #16
teq ip, #0
bne 1b
2: ands ip, len, #12
beq 4f
tst ip, #8
beq 3f
load2l r5, r6
orr r4, r4, r5, lspush #24
mov r5, r5, lspull #8
orr r5, r5, r6, lspush #24
stmia dst!, {r4, r5}
adcs sum, sum, r4
adcs sum, sum, r5
mov r4, r6, lspull #8
tst ip, #4
beq 4f
3: load1l r5
orr r4, r4, r5, lspush #24
str r4, [dst], #4
adcs sum, sum, r4
mov r4, r5, lspull #8
4: ands len, len, #3
beq .Ldone
mov r5, r4, get_byte_0
tst len, #2
beq .Lexit
adcs sum, sum, r4, lspush #16
strb r5, [dst], #1
mov r5, r4, get_byte_1
strb r5, [dst], #1
mov r5, r4, get_byte_2
b .Lexit
.Lsrc2_aligned: mov r4, r5, lspull #16
adds sum, sum, #0
bics ip, len, #15
beq 2f
1: load4l r5, r6, r7, r8
orr r4, r4, r5, lspush #16
mov r5, r5, lspull #16
orr r5, r5, r6, lspush #16
mov r6, r6, lspull #16
orr r6, r6, r7, lspush #16
mov r7, r7, lspull #16
orr r7, r7, r8, lspush #16
stmia dst!, {r4, r5, r6, r7}
adcs sum, sum, r4
adcs sum, sum, r5
adcs sum, sum, r6
adcs sum, sum, r7
mov r4, r8, lspull #16
sub ip, ip, #16
teq ip, #0
bne 1b
2: ands ip, len, #12
beq 4f
tst ip, #8
beq 3f
load2l r5, r6
orr r4, r4, r5, lspush #16
mov r5, r5, lspull #16
orr r5, r5, r6, lspush #16
stmia dst!, {r4, r5}
adcs sum, sum, r4
adcs sum, sum, r5
mov r4, r6, lspull #16
tst ip, #4
beq 4f
3: load1l r5
orr r4, r4, r5, lspush #16
str r4, [dst], #4
adcs sum, sum, r4
mov r4, r5, lspull #16
4: ands len, len, #3
beq .Ldone
mov r5, r4, get_byte_0
tst len, #2
beq .Lexit
adcs sum, sum, r4
strb r5, [dst], #1
mov r5, r4, get_byte_1
strb r5, [dst], #1
tst len, #1
beq .Ldone
load1b r5
b .Lexit
.Lsrc3_aligned: mov r4, r5, lspull #24
adds sum, sum, #0
bics ip, len, #15
beq 2f
1: load4l r5, r6, r7, r8
orr r4, r4, r5, lspush #8
mov r5, r5, lspull #24
orr r5, r5, r6, lspush #8
mov r6, r6, lspull #24
orr r6, r6, r7, lspush #8
mov r7, r7, lspull #24
orr r7, r7, r8, lspush #8
stmia dst!, {r4, r5, r6, r7}
adcs sum, sum, r4
adcs sum, sum, r5
adcs sum, sum, r6
adcs sum, sum, r7
mov r4, r8, lspull #24
sub ip, ip, #16
teq ip, #0
bne 1b
2: ands ip, len, #12
beq 4f
tst ip, #8
beq 3f
load2l r5, r6
orr r4, r4, r5, lspush #8
mov r5, r5, lspull #24
orr r5, r5, r6, lspush #8
stmia dst!, {r4, r5}
adcs sum, sum, r4
adcs sum, sum, r5
mov r4, r6, lspull #24
tst ip, #4
beq 4f
3: load1l r5
orr r4, r4, r5, lspush #8
str r4, [dst], #4
adcs sum, sum, r4
mov r4, r5, lspull #24
4: ands len, len, #3
beq .Ldone
mov r5, r4, get_byte_0
tst len, #2
beq .Lexit
strb r5, [dst], #1
adcs sum, sum, r4
load1l r4
mov r5, r4, get_byte_0
strb r5, [dst], #1
adcs sum, sum, r4, lspush #24
mov r5, r4, get_byte_1
b .Lexit
FN_EXIT
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,263
|
arch/arm/lib/backtrace.S
|
/*
* linux/arch/arm/lib/backtrace.S
*
* Copyright (C) 1995, 1996 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* 27/03/03 Ian Molton Clean up CONFIG_CPU
*
*/
#include <linux/kern_levels.h>
#include <linux/linkage.h>
#include <asm/assembler.h>
.text
@ fp is 0 or stack frame
#define frame r4
#define sv_fp r5
#define sv_pc r6
#define mask r7
#define offset r8
ENTRY(c_backtrace)
#if !defined(CONFIG_FRAME_POINTER) || !defined(CONFIG_PRINTK)
ret lr
ENDPROC(c_backtrace)
#else
stmfd sp!, {r4 - r8, lr} @ Save an extra register so we have a location...
movs frame, r0 @ if frame pointer is zero
beq no_frame @ we have no stack frames
tst r1, #0x10 @ 26 or 32-bit mode?
ARM( moveq mask, #0xfc000003 )
THUMB( moveq mask, #0xfc000000 )
THUMB( orreq mask, #0x03 )
movne mask, #0 @ mask for 32-bit
1: stmfd sp!, {pc} @ calculate offset of PC stored
ldr r0, [sp], #4 @ by stmfd for this CPU
adr r1, 1b
sub offset, r0, r1
/*
* Stack frame layout:
* optionally saved caller registers (r4 - r10)
* saved fp
* saved sp
* saved lr
* frame => saved pc
* optionally saved arguments (r0 - r3)
* saved sp => <next word>
*
* Functions start with the following code sequence:
* mov ip, sp
* stmfd sp!, {r0 - r3} (optional)
* corrected pc => stmfd sp!, {..., fp, ip, lr, pc}
*/
for_each_frame: tst frame, mask @ Check for address exceptions
bne no_frame
1001: ldr sv_pc, [frame, #0] @ get saved pc
1002: ldr sv_fp, [frame, #-12] @ get saved fp
sub sv_pc, sv_pc, offset @ Correct PC for prefetching
bic sv_pc, sv_pc, mask @ mask PC/LR for the mode
1003: ldr r2, [sv_pc, #-4] @ if stmfd sp!, {args} exists,
ldr r3, .Ldsi+4 @ adjust saved 'pc' back one
teq r3, r2, lsr #10 @ instruction
subne r0, sv_pc, #4 @ allow for mov
subeq r0, sv_pc, #8 @ allow for mov + stmia
ldr r1, [frame, #-4] @ get saved lr
mov r2, frame
bic r1, r1, mask @ mask PC/LR for the mode
bl dump_backtrace_entry
ldr r1, [sv_pc, #-4] @ if stmfd sp!, {args} exists,
ldr r3, .Ldsi+4
teq r3, r1, lsr #11
ldreq r0, [frame, #-8] @ get sp
subeq r0, r0, #4 @ point at the last arg
bleq dump_backtrace_stm @ dump saved registers
1004: ldr r1, [sv_pc, #0] @ if stmfd sp!, {..., fp, ip, lr, pc}
ldr r3, .Ldsi @ instruction exists,
teq r3, r1, lsr #11
subeq r0, frame, #16
bleq dump_backtrace_stm @ dump saved registers
teq sv_fp, #0 @ zero saved fp means
beq no_frame @ no further frames
cmp sv_fp, frame @ next frame must be
mov frame, sv_fp @ above the current frame
bhi for_each_frame
1006: adr r0, .Lbad
mov r1, frame
bl printk
no_frame: ldmfd sp!, {r4 - r8, pc}
ENDPROC(c_backtrace)
.pushsection __ex_table,"a"
.align 3
.long 1001b, 1006b
.long 1002b, 1006b
.long 1003b, 1006b
.long 1004b, 1006b
.popsection
.Lbad: .asciz "Backtrace aborted due to bad frame pointer <%p>\n"
.align
.Ldsi: .word 0xe92dd800 >> 11 @ stmfd sp!, {... fp, ip, lr, pc}
.word 0xe92d0000 >> 11 @ stmfd sp!, {}
#endif
|
AirFortressIlikara/LS2K0300-linux-4.19
| 7,018
|
arch/arm/lib/copy_template.S
|
/*
* linux/arch/arm/lib/copy_template.s
*
* Code template for optimized memory copy functions
*
* Author: Nicolas Pitre
* Created: Sep 28, 2005
* Copyright: MontaVista Software, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/*
* Theory of operation
* -------------------
*
* This file provides the core code for a forward memory copy used in
* the implementation of memcopy(), copy_to_user() and copy_from_user().
*
* The including file must define the following accessor macros
* according to the need of the given function:
*
* ldr1w ptr reg abort
*
* This loads one word from 'ptr', stores it in 'reg' and increments
* 'ptr' to the next word. The 'abort' argument is used for fixup tables.
*
* ldr4w ptr reg1 reg2 reg3 reg4 abort
* ldr8w ptr, reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort
*
* This loads four or eight words starting from 'ptr', stores them
* in provided registers and increments 'ptr' past those words.
* The'abort' argument is used for fixup tables.
*
* ldr1b ptr reg cond abort
*
* Similar to ldr1w, but it loads a byte and increments 'ptr' one byte.
* It also must apply the condition code if provided, otherwise the
* "al" condition is assumed by default.
*
* str1w ptr reg abort
* str8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort
* str1b ptr reg cond abort
*
* Same as their ldr* counterparts, but data is stored to 'ptr' location
* rather than being loaded.
*
* enter reg1 reg2
*
* Preserve the provided registers on the stack plus any additional
* data as needed by the implementation including this code. Called
* upon code entry.
*
* usave reg1 reg2
*
* Unwind annotation macro is corresponding for 'enter' macro.
* It tell unwinder that preserved some provided registers on the stack
* and additional data by a prior 'enter' macro.
*
* exit reg1 reg2
*
* Restore registers with the values previously saved with the
* 'preserv' macro. Called upon code termination.
*
* LDR1W_SHIFT
* STR1W_SHIFT
*
* Correction to be applied to the "ip" register when branching into
* the ldr1w or str1w instructions (some of these macros may expand to
* than one 32bit instruction in Thumb-2)
*/
UNWIND( .fnstart )
enter r4, lr
UNWIND( .fnend )
UNWIND( .fnstart )
usave r4, lr @ in first stmdb block
subs r2, r2, #4
blt 8f
ands ip, r0, #3
PLD( pld [r1, #0] )
bne 9f
ands ip, r1, #3
bne 10f
1: subs r2, r2, #(28)
stmfd sp!, {r5 - r8}
UNWIND( .fnend )
UNWIND( .fnstart )
usave r4, lr
UNWIND( .save {r5 - r8} ) @ in second stmfd block
blt 5f
CALGN( ands ip, r0, #31 )
CALGN( rsb r3, ip, #32 )
CALGN( sbcnes r4, r3, r2 ) @ C is always set here
CALGN( bcs 2f )
CALGN( adr r4, 6f )
CALGN( subs r2, r2, r3 ) @ C gets set
CALGN( add pc, r4, ip )
PLD( pld [r1, #0] )
2: PLD( subs r2, r2, #96 )
PLD( pld [r1, #28] )
PLD( blt 4f )
PLD( pld [r1, #60] )
PLD( pld [r1, #92] )
3: PLD( pld [r1, #124] )
4: ldr8w r1, r3, r4, r5, r6, r7, r8, ip, lr, abort=20f
subs r2, r2, #32
str8w r0, r3, r4, r5, r6, r7, r8, ip, lr, abort=20f
bge 3b
PLD( cmn r2, #96 )
PLD( bge 4b )
5: ands ip, r2, #28
rsb ip, ip, #32
#if LDR1W_SHIFT > 0
lsl ip, ip, #LDR1W_SHIFT
#endif
addne pc, pc, ip @ C is always clear here
b 7f
6:
.rept (1 << LDR1W_SHIFT)
W(nop)
.endr
ldr1w r1, r3, abort=20f
ldr1w r1, r4, abort=20f
ldr1w r1, r5, abort=20f
ldr1w r1, r6, abort=20f
ldr1w r1, r7, abort=20f
ldr1w r1, r8, abort=20f
ldr1w r1, lr, abort=20f
#if LDR1W_SHIFT < STR1W_SHIFT
lsl ip, ip, #STR1W_SHIFT - LDR1W_SHIFT
#elif LDR1W_SHIFT > STR1W_SHIFT
lsr ip, ip, #LDR1W_SHIFT - STR1W_SHIFT
#endif
add pc, pc, ip
nop
.rept (1 << STR1W_SHIFT)
W(nop)
.endr
str1w r0, r3, abort=20f
str1w r0, r4, abort=20f
str1w r0, r5, abort=20f
str1w r0, r6, abort=20f
str1w r0, r7, abort=20f
str1w r0, r8, abort=20f
str1w r0, lr, abort=20f
CALGN( bcs 2b )
7: ldmfd sp!, {r5 - r8}
UNWIND( .fnend ) @ end of second stmfd block
UNWIND( .fnstart )
usave r4, lr @ still in first stmdb block
8: movs r2, r2, lsl #31
ldr1b r1, r3, ne, abort=21f
ldr1b r1, r4, cs, abort=21f
ldr1b r1, ip, cs, abort=21f
str1b r0, r3, ne, abort=21f
str1b r0, r4, cs, abort=21f
str1b r0, ip, cs, abort=21f
exit r4, pc
9: rsb ip, ip, #4
cmp ip, #2
ldr1b r1, r3, gt, abort=21f
ldr1b r1, r4, ge, abort=21f
ldr1b r1, lr, abort=21f
str1b r0, r3, gt, abort=21f
str1b r0, r4, ge, abort=21f
subs r2, r2, ip
str1b r0, lr, abort=21f
blt 8b
ands ip, r1, #3
beq 1b
10: bic r1, r1, #3
cmp ip, #2
ldr1w r1, lr, abort=21f
beq 17f
bgt 18f
UNWIND( .fnend )
.macro forward_copy_shift pull push
UNWIND( .fnstart )
usave r4, lr @ still in first stmdb block
subs r2, r2, #28
blt 14f
CALGN( ands ip, r0, #31 )
CALGN( rsb ip, ip, #32 )
CALGN( sbcnes r4, ip, r2 ) @ C is always set here
CALGN( subcc r2, r2, ip )
CALGN( bcc 15f )
11: stmfd sp!, {r5 - r9}
UNWIND( .fnend )
UNWIND( .fnstart )
usave r4, lr
UNWIND( .save {r5 - r9} ) @ in new second stmfd block
PLD( pld [r1, #0] )
PLD( subs r2, r2, #96 )
PLD( pld [r1, #28] )
PLD( blt 13f )
PLD( pld [r1, #60] )
PLD( pld [r1, #92] )
12: PLD( pld [r1, #124] )
13: ldr4w r1, r4, r5, r6, r7, abort=19f
mov r3, lr, lspull #\pull
subs r2, r2, #32
ldr4w r1, r8, r9, ip, lr, abort=19f
orr r3, r3, r4, lspush #\push
mov r4, r4, lspull #\pull
orr r4, r4, r5, lspush #\push
mov r5, r5, lspull #\pull
orr r5, r5, r6, lspush #\push
mov r6, r6, lspull #\pull
orr r6, r6, r7, lspush #\push
mov r7, r7, lspull #\pull
orr r7, r7, r8, lspush #\push
mov r8, r8, lspull #\pull
orr r8, r8, r9, lspush #\push
mov r9, r9, lspull #\pull
orr r9, r9, ip, lspush #\push
mov ip, ip, lspull #\pull
orr ip, ip, lr, lspush #\push
str8w r0, r3, r4, r5, r6, r7, r8, r9, ip, , abort=19f
bge 12b
PLD( cmn r2, #96 )
PLD( bge 13b )
ldmfd sp!, {r5 - r9}
UNWIND( .fnend ) @ end of the second stmfd block
UNWIND( .fnstart )
usave r4, lr @ still in first stmdb block
14: ands ip, r2, #28
beq 16f
15: mov r3, lr, lspull #\pull
ldr1w r1, lr, abort=21f
subs ip, ip, #4
orr r3, r3, lr, lspush #\push
str1w r0, r3, abort=21f
bgt 15b
CALGN( cmp r2, #0 )
CALGN( bge 11b )
16: sub r1, r1, #(\push / 8)
b 8b
UNWIND( .fnend )
.endm
forward_copy_shift pull=8 push=24
17: forward_copy_shift pull=16 push=16
18: forward_copy_shift pull=24 push=8
/*
* Abort preamble and completion macros.
* If a fixup handler is required then those macros must surround it.
* It is assumed that the fixup code will handle the private part of
* the exit macro.
*/
.macro copy_abort_preamble
19: ldmfd sp!, {r5 - r9}
b 21f
20: ldmfd sp!, {r5 - r8}
21:
.endm
.macro copy_abort_end
ldmfd sp!, {r4, pc}
.endm
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,469
|
arch/arm/lib/memcpy.S
|
/*
* linux/arch/arm/lib/memcpy.S
*
* Author: Nicolas Pitre
* Created: Sep 28, 2005
* Copyright: MontaVista Software, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
#define LDR1W_SHIFT 0
#define STR1W_SHIFT 0
.macro ldr1w ptr reg abort
W(ldr) \reg, [\ptr], #4
.endm
.macro ldr4w ptr reg1 reg2 reg3 reg4 abort
ldmia \ptr!, {\reg1, \reg2, \reg3, \reg4}
.endm
.macro ldr8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort
ldmia \ptr!, {\reg1, \reg2, \reg3, \reg4, \reg5, \reg6, \reg7, \reg8}
.endm
.macro ldr1b ptr reg cond=al abort
ldr\cond\()b \reg, [\ptr], #1
.endm
.macro str1w ptr reg abort
W(str) \reg, [\ptr], #4
.endm
.macro str8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort
stmia \ptr!, {\reg1, \reg2, \reg3, \reg4, \reg5, \reg6, \reg7, \reg8}
.endm
.macro str1b ptr reg cond=al abort
str\cond\()b \reg, [\ptr], #1
.endm
.macro enter reg1 reg2
stmdb sp!, {r0, \reg1, \reg2}
.endm
.macro usave reg1 reg2
UNWIND( .save {r0, \reg1, \reg2} )
.endm
.macro exit reg1 reg2
ldmfd sp!, {r0, \reg1, \reg2}
.endm
.text
/* Prototype: void *memcpy(void *dest, const void *src, size_t n); */
ENTRY(mmiocpy)
ENTRY(memcpy)
#include "copy_template.S"
ENDPROC(memcpy)
ENDPROC(mmiocpy)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,680
|
arch/arm/lib/ashrdi3.S
|
/* Copyright 1995, 1996, 1998, 1999, 2000, 2003, 2004, 2005
Free Software Foundation, Inc.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2, or (at your option) any
later version.
In addition to the permissions in the GNU General Public License, the
Free Software Foundation gives you unlimited permission to link the
compiled version of this file into combinations with other programs,
and to distribute those combinations without any restriction coming
from the use of this file. (The General Public License restrictions
do apply in other respects; for example, they cover modification of
the file, and distribution when not linked into a combine
executable.)
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING. If not, write to
the Free Software Foundation, 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301, USA. */
#include <linux/linkage.h>
#include <asm/assembler.h>
#ifdef __ARMEB__
#define al r1
#define ah r0
#else
#define al r0
#define ah r1
#endif
ENTRY(__ashrdi3)
ENTRY(__aeabi_lasr)
subs r3, r2, #32
rsb ip, r2, #32
movmi al, al, lsr r2
movpl al, ah, asr r3
ARM( orrmi al, al, ah, lsl ip )
THUMB( lslmi r3, ah, ip )
THUMB( orrmi al, al, r3 )
mov ah, ah, asr r2
ret lr
ENDPROC(__ashrdi3)
ENDPROC(__aeabi_lasr)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,685
|
arch/arm/lib/io-writesb.S
|
/*
* linux/arch/arm/lib/io-writesb.S
*
* Copyright (C) 1995-2000 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
.macro outword, rd
#ifndef __ARMEB__
strb \rd, [r0]
mov \rd, \rd, lsr #8
strb \rd, [r0]
mov \rd, \rd, lsr #8
strb \rd, [r0]
mov \rd, \rd, lsr #8
strb \rd, [r0]
#else
mov lr, \rd, lsr #24
strb lr, [r0]
mov lr, \rd, lsr #16
strb lr, [r0]
mov lr, \rd, lsr #8
strb lr, [r0]
strb \rd, [r0]
#endif
.endm
.Loutsb_align: rsb ip, ip, #4
cmp ip, r2
movgt ip, r2
cmp ip, #2
ldrb r3, [r1], #1
strb r3, [r0]
ldrgeb r3, [r1], #1
strgeb r3, [r0]
ldrgtb r3, [r1], #1
strgtb r3, [r0]
subs r2, r2, ip
bne .Loutsb_aligned
ENTRY(__raw_writesb)
teq r2, #0 @ do we have to check for the zero len?
reteq lr
ands ip, r1, #3
bne .Loutsb_align
.Loutsb_aligned:
stmfd sp!, {r4, r5, lr}
subs r2, r2, #16
bmi .Loutsb_no_16
.Loutsb_16_lp: ldmia r1!, {r3, r4, r5, ip}
outword r3
outword r4
outword r5
outword ip
subs r2, r2, #16
bpl .Loutsb_16_lp
tst r2, #15
ldmeqfd sp!, {r4, r5, pc}
.Loutsb_no_16: tst r2, #8
beq .Loutsb_no_8
ldmia r1!, {r3, r4}
outword r3
outword r4
.Loutsb_no_8: tst r2, #4
beq .Loutsb_no_4
ldr r3, [r1], #4
outword r3
.Loutsb_no_4: ands r2, r2, #3
ldmeqfd sp!, {r4, r5, pc}
cmp r2, #2
ldrb r3, [r1], #1
strb r3, [r0]
ldrgeb r3, [r1], #1
strgeb r3, [r0]
ldrgtb r3, [r1]
strgtb r3, [r0]
ldmfd sp!, {r4, r5, pc}
ENDPROC(__raw_writesb)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,680
|
arch/arm/lib/lshrdi3.S
|
/* Copyright 1995, 1996, 1998, 1999, 2000, 2003, 2004, 2005
Free Software Foundation, Inc.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2, or (at your option) any
later version.
In addition to the permissions in the GNU General Public License, the
Free Software Foundation gives you unlimited permission to link the
compiled version of this file into combinations with other programs,
and to distribute those combinations without any restriction coming
from the use of this file. (The General Public License restrictions
do apply in other respects; for example, they cover modification of
the file, and distribution when not linked into a combine
executable.)
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING. If not, write to
the Free Software Foundation, 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301, USA. */
#include <linux/linkage.h>
#include <asm/assembler.h>
#ifdef __ARMEB__
#define al r1
#define ah r0
#else
#define al r0
#define ah r1
#endif
ENTRY(__lshrdi3)
ENTRY(__aeabi_llsr)
subs r3, r2, #32
rsb ip, r2, #32
movmi al, al, lsr r2
movpl al, ah, lsr r3
ARM( orrmi al, al, ah, lsl ip )
THUMB( lslmi r3, ah, ip )
THUMB( orrmi al, al, r3 )
mov ah, ah, lsr r2
ret lr
ENDPROC(__lshrdi3)
ENDPROC(__aeabi_llsr)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,242
|
arch/arm/lib/csumpartialcopyuser.S
|
/*
* linux/arch/arm/lib/csumpartialcopyuser.S
*
* Copyright (C) 1995-1998 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* 27/03/03 Ian Molton Clean up CONFIG_CPU
*
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/errno.h>
#include <asm/asm-offsets.h>
.text
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
.macro save_regs
mrc p15, 0, ip, c3, c0, 0
stmfd sp!, {r1, r2, r4 - r8, ip, lr}
uaccess_enable ip
.endm
.macro load_regs
ldmfd sp!, {r1, r2, r4 - r8, ip, lr}
mcr p15, 0, ip, c3, c0, 0
ret lr
.endm
#else
.macro save_regs
stmfd sp!, {r1, r2, r4 - r8, lr}
.endm
.macro load_regs
ldmfd sp!, {r1, r2, r4 - r8, pc}
.endm
#endif
.macro load1b, reg1
ldrusr \reg1, r0, 1
.endm
.macro load2b, reg1, reg2
ldrusr \reg1, r0, 1
ldrusr \reg2, r0, 1
.endm
.macro load1l, reg1
ldrusr \reg1, r0, 4
.endm
.macro load2l, reg1, reg2
ldrusr \reg1, r0, 4
ldrusr \reg2, r0, 4
.endm
.macro load4l, reg1, reg2, reg3, reg4
ldrusr \reg1, r0, 4
ldrusr \reg2, r0, 4
ldrusr \reg3, r0, 4
ldrusr \reg4, r0, 4
.endm
/*
* unsigned int
* csum_partial_copy_from_user(const char *src, char *dst, int len, int sum, int *err_ptr)
* r0 = src, r1 = dst, r2 = len, r3 = sum, [sp] = *err_ptr
* Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
*/
#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
#include "csumpartialcopygeneric.S"
/*
* FIXME: minor buglet here
* We don't return the checksum for the data present in the buffer. To do
* so properly, we would have to add in whatever registers were loaded before
* the fault, which, with the current asm above is not predictable.
*/
.pushsection .text.fixup,"ax"
.align 4
9001: mov r4, #-EFAULT
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
ldr r5, [sp, #9*4] @ *err_ptr
#else
ldr r5, [sp, #8*4] @ *err_ptr
#endif
str r4, [r5]
ldmia sp, {r1, r2} @ retrieve dst, len
add r2, r2, r1
mov r0, #0 @ zero the buffer
9002: teq r2, r1
strneb r0, [r1], #1
bne 9002b
load_regs
.popsection
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,807
|
arch/arm/lib/io-readsw-armv3.S
|
/*
* linux/arch/arm/lib/io-readsw-armv3.S
*
* Copyright (C) 1995-2000 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
.Linsw_bad_alignment:
adr r0, .Linsw_bad_align_msg
mov r2, lr
b panic
.Linsw_bad_align_msg:
.asciz "insw: bad buffer alignment (0x%p, lr=0x%08lX)\n"
.align
.Linsw_align: tst r1, #1
bne .Linsw_bad_alignment
ldr r3, [r0]
strb r3, [r1], #1
mov r3, r3, lsr #8
strb r3, [r1], #1
subs r2, r2, #1
reteq lr
ENTRY(__raw_readsw)
teq r2, #0 @ do we have to check for the zero len?
reteq lr
tst r1, #3
bne .Linsw_align
.Linsw_aligned: mov ip, #0xff
orr ip, ip, ip, lsl #8
stmfd sp!, {r4, r5, r6, lr}
subs r2, r2, #8
bmi .Lno_insw_8
.Linsw_8_lp: ldr r3, [r0]
and r3, r3, ip
ldr r4, [r0]
orr r3, r3, r4, lsl #16
ldr r4, [r0]
and r4, r4, ip
ldr r5, [r0]
orr r4, r4, r5, lsl #16
ldr r5, [r0]
and r5, r5, ip
ldr r6, [r0]
orr r5, r5, r6, lsl #16
ldr r6, [r0]
and r6, r6, ip
ldr lr, [r0]
orr r6, r6, lr, lsl #16
stmia r1!, {r3 - r6}
subs r2, r2, #8
bpl .Linsw_8_lp
tst r2, #7
ldmeqfd sp!, {r4, r5, r6, pc}
.Lno_insw_8: tst r2, #4
beq .Lno_insw_4
ldr r3, [r0]
and r3, r3, ip
ldr r4, [r0]
orr r3, r3, r4, lsl #16
ldr r4, [r0]
and r4, r4, ip
ldr r5, [r0]
orr r4, r4, r5, lsl #16
stmia r1!, {r3, r4}
.Lno_insw_4: tst r2, #2
beq .Lno_insw_2
ldr r3, [r0]
and r3, r3, ip
ldr r4, [r0]
orr r3, r3, r4, lsl #16
str r3, [r1], #4
.Lno_insw_2: tst r2, #1
ldrne r3, [r0]
strneb r3, [r1], #1
movne r3, r3, lsr #8
strneb r3, [r1]
ldmfd sp!, {r4, r5, r6, pc}
|
AirFortressIlikara/LS2K0300-linux-4.19
| 4,915
|
arch/arm/lib/memmove.S
|
/*
* linux/arch/arm/lib/memmove.S
*
* Author: Nicolas Pitre
* Created: Sep 28, 2005
* Copyright: (C) MontaVista Software Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
.text
/*
* Prototype: void *memmove(void *dest, const void *src, size_t n);
*
* Note:
*
* If the memory regions don't overlap, we simply branch to memcpy which is
* normally a bit faster. Otherwise the copy is done going downwards. This
* is a transposition of the code from copy_template.S but with the copy
* occurring in the opposite direction.
*/
ENTRY(memmove)
UNWIND( .fnstart )
subs ip, r0, r1
cmphi r2, ip
bls memcpy
stmfd sp!, {r0, r4, lr}
UNWIND( .fnend )
UNWIND( .fnstart )
UNWIND( .save {r0, r4, lr} ) @ in first stmfd block
add r1, r1, r2
add r0, r0, r2
subs r2, r2, #4
blt 8f
ands ip, r0, #3
PLD( pld [r1, #-4] )
bne 9f
ands ip, r1, #3
bne 10f
1: subs r2, r2, #(28)
stmfd sp!, {r5 - r8}
UNWIND( .fnend )
UNWIND( .fnstart )
UNWIND( .save {r0, r4, lr} )
UNWIND( .save {r5 - r8} ) @ in second stmfd block
blt 5f
CALGN( ands ip, r0, #31 )
CALGN( sbcnes r4, ip, r2 ) @ C is always set here
CALGN( bcs 2f )
CALGN( adr r4, 6f )
CALGN( subs r2, r2, ip ) @ C is set here
CALGN( rsb ip, ip, #32 )
CALGN( add pc, r4, ip )
PLD( pld [r1, #-4] )
2: PLD( subs r2, r2, #96 )
PLD( pld [r1, #-32] )
PLD( blt 4f )
PLD( pld [r1, #-64] )
PLD( pld [r1, #-96] )
3: PLD( pld [r1, #-128] )
4: ldmdb r1!, {r3, r4, r5, r6, r7, r8, ip, lr}
subs r2, r2, #32
stmdb r0!, {r3, r4, r5, r6, r7, r8, ip, lr}
bge 3b
PLD( cmn r2, #96 )
PLD( bge 4b )
5: ands ip, r2, #28
rsb ip, ip, #32
addne pc, pc, ip @ C is always clear here
b 7f
6: W(nop)
W(ldr) r3, [r1, #-4]!
W(ldr) r4, [r1, #-4]!
W(ldr) r5, [r1, #-4]!
W(ldr) r6, [r1, #-4]!
W(ldr) r7, [r1, #-4]!
W(ldr) r8, [r1, #-4]!
W(ldr) lr, [r1, #-4]!
add pc, pc, ip
nop
W(nop)
W(str) r3, [r0, #-4]!
W(str) r4, [r0, #-4]!
W(str) r5, [r0, #-4]!
W(str) r6, [r0, #-4]!
W(str) r7, [r0, #-4]!
W(str) r8, [r0, #-4]!
W(str) lr, [r0, #-4]!
CALGN( bcs 2b )
7: ldmfd sp!, {r5 - r8}
UNWIND( .fnend ) @ end of second stmfd block
UNWIND( .fnstart )
UNWIND( .save {r0, r4, lr} ) @ still in first stmfd block
8: movs r2, r2, lsl #31
ldrneb r3, [r1, #-1]!
ldrcsb r4, [r1, #-1]!
ldrcsb ip, [r1, #-1]
strneb r3, [r0, #-1]!
strcsb r4, [r0, #-1]!
strcsb ip, [r0, #-1]
ldmfd sp!, {r0, r4, pc}
9: cmp ip, #2
ldrgtb r3, [r1, #-1]!
ldrgeb r4, [r1, #-1]!
ldrb lr, [r1, #-1]!
strgtb r3, [r0, #-1]!
strgeb r4, [r0, #-1]!
subs r2, r2, ip
strb lr, [r0, #-1]!
blt 8b
ands ip, r1, #3
beq 1b
10: bic r1, r1, #3
cmp ip, #2
ldr r3, [r1, #0]
beq 17f
blt 18f
UNWIND( .fnend )
.macro backward_copy_shift push pull
UNWIND( .fnstart )
UNWIND( .save {r0, r4, lr} ) @ still in first stmfd block
subs r2, r2, #28
blt 14f
CALGN( ands ip, r0, #31 )
CALGN( sbcnes r4, ip, r2 ) @ C is always set here
CALGN( subcc r2, r2, ip )
CALGN( bcc 15f )
11: stmfd sp!, {r5 - r9}
UNWIND( .fnend )
UNWIND( .fnstart )
UNWIND( .save {r0, r4, lr} )
UNWIND( .save {r5 - r9} ) @ in new second stmfd block
PLD( pld [r1, #-4] )
PLD( subs r2, r2, #96 )
PLD( pld [r1, #-32] )
PLD( blt 13f )
PLD( pld [r1, #-64] )
PLD( pld [r1, #-96] )
12: PLD( pld [r1, #-128] )
13: ldmdb r1!, {r7, r8, r9, ip}
mov lr, r3, lspush #\push
subs r2, r2, #32
ldmdb r1!, {r3, r4, r5, r6}
orr lr, lr, ip, lspull #\pull
mov ip, ip, lspush #\push
orr ip, ip, r9, lspull #\pull
mov r9, r9, lspush #\push
orr r9, r9, r8, lspull #\pull
mov r8, r8, lspush #\push
orr r8, r8, r7, lspull #\pull
mov r7, r7, lspush #\push
orr r7, r7, r6, lspull #\pull
mov r6, r6, lspush #\push
orr r6, r6, r5, lspull #\pull
mov r5, r5, lspush #\push
orr r5, r5, r4, lspull #\pull
mov r4, r4, lspush #\push
orr r4, r4, r3, lspull #\pull
stmdb r0!, {r4 - r9, ip, lr}
bge 12b
PLD( cmn r2, #96 )
PLD( bge 13b )
ldmfd sp!, {r5 - r9}
UNWIND( .fnend ) @ end of the second stmfd block
UNWIND( .fnstart )
UNWIND( .save {r0, r4, lr} ) @ still in first stmfd block
14: ands ip, r2, #28
beq 16f
15: mov lr, r3, lspush #\push
ldr r3, [r1, #-4]!
subs ip, ip, #4
orr lr, lr, r3, lspull #\pull
str lr, [r0, #-4]!
bgt 15b
CALGN( cmp r2, #0 )
CALGN( bge 11b )
16: add r1, r1, #(\pull / 8)
b 8b
UNWIND( .fnend )
.endm
backward_copy_shift push=8 pull=24
17: backward_copy_shift push=16 pull=16
18: backward_copy_shift push=24 pull=8
ENDPROC(memmove)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,184
|
arch/arm/lib/io-writesw-armv3.S
|
/*
* linux/arch/arm/lib/io-writesw-armv3.S
*
* Copyright (C) 1995-2000 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
.Loutsw_bad_alignment:
adr r0, .Loutsw_bad_align_msg
mov r2, lr
b panic
.Loutsw_bad_align_msg:
.asciz "outsw: bad buffer alignment (0x%p, lr=0x%08lX)\n"
.align
.Loutsw_align: tst r1, #1
bne .Loutsw_bad_alignment
add r1, r1, #2
ldr r3, [r1, #-4]
mov r3, r3, lsr #16
orr r3, r3, r3, lsl #16
str r3, [r0]
subs r2, r2, #1
reteq lr
ENTRY(__raw_writesw)
teq r2, #0 @ do we have to check for the zero len?
reteq lr
tst r1, #3
bne .Loutsw_align
stmfd sp!, {r4, r5, r6, lr}
subs r2, r2, #8
bmi .Lno_outsw_8
.Loutsw_8_lp: ldmia r1!, {r3, r4, r5, r6}
mov ip, r3, lsl #16
orr ip, ip, ip, lsr #16
str ip, [r0]
mov ip, r3, lsr #16
orr ip, ip, ip, lsl #16
str ip, [r0]
mov ip, r4, lsl #16
orr ip, ip, ip, lsr #16
str ip, [r0]
mov ip, r4, lsr #16
orr ip, ip, ip, lsl #16
str ip, [r0]
mov ip, r5, lsl #16
orr ip, ip, ip, lsr #16
str ip, [r0]
mov ip, r5, lsr #16
orr ip, ip, ip, lsl #16
str ip, [r0]
mov ip, r6, lsl #16
orr ip, ip, ip, lsr #16
str ip, [r0]
mov ip, r6, lsr #16
orr ip, ip, ip, lsl #16
str ip, [r0]
subs r2, r2, #8
bpl .Loutsw_8_lp
tst r2, #7
ldmeqfd sp!, {r4, r5, r6, pc}
.Lno_outsw_8: tst r2, #4
beq .Lno_outsw_4
ldmia r1!, {r3, r4}
mov ip, r3, lsl #16
orr ip, ip, ip, lsr #16
str ip, [r0]
mov ip, r3, lsr #16
orr ip, ip, ip, lsl #16
str ip, [r0]
mov ip, r4, lsl #16
orr ip, ip, ip, lsr #16
str ip, [r0]
mov ip, r4, lsr #16
orr ip, ip, ip, lsl #16
str ip, [r0]
.Lno_outsw_4: tst r2, #2
beq .Lno_outsw_2
ldr r3, [r1], #4
mov ip, r3, lsl #16
orr ip, ip, ip, lsr #16
str ip, [r0]
mov ip, r3, lsr #16
orr ip, ip, ip, lsl #16
str ip, [r0]
.Lno_outsw_2: tst r2, #1
ldrne r3, [r1]
movne ip, r3, lsl #16
orrne ip, ip, ip, lsr #16
strne ip, [r0]
ldmfd sp!, {r4, r5, r6, pc}
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,151
|
arch/arm/lib/csumpartialcopy.S
|
/*
* linux/arch/arm/lib/csumpartialcopy.S
*
* Copyright (C) 1995-1998 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
.text
/* Function: __u32 csum_partial_copy_nocheck(const char *src, char *dst, int len, __u32 sum)
* Params : r0 = src, r1 = dst, r2 = len, r3 = checksum
* Returns : r0 = new checksum
*/
.macro save_regs
stmfd sp!, {r1, r4 - r8, lr}
.endm
.macro load_regs
ldmfd sp!, {r1, r4 - r8, pc}
.endm
.macro load1b, reg1
ldrb \reg1, [r0], #1
.endm
.macro load2b, reg1, reg2
ldrb \reg1, [r0], #1
ldrb \reg2, [r0], #1
.endm
.macro load1l, reg1
ldr \reg1, [r0], #4
.endm
.macro load2l, reg1, reg2
ldr \reg1, [r0], #4
ldr \reg2, [r0], #4
.endm
.macro load4l, reg1, reg2, reg3, reg4
ldmia r0!, {\reg1, \reg2, \reg3, \reg4}
.endm
#define FN_ENTRY ENTRY(csum_partial_copy_nocheck)
#define FN_EXIT ENDPROC(csum_partial_copy_nocheck)
#include "csumpartialcopygeneric.S"
|
AirFortressIlikara/LS2K0300-linux-4.19
| 4,945
|
arch/arm/lib/findbit.S
|
/*
* linux/arch/arm/lib/findbit.S
*
* Copyright (C) 1995-2000 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* 16th March 2001 - John Ripley <jripley@sonicblue.com>
* Fixed so that "size" is an exclusive not an inclusive quantity.
* All users of these functions expect exclusive sizes, and may
* also call with zero size.
* Reworked by rmk.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
.text
/*
* Purpose : Find a 'zero' bit
* Prototype: int find_first_zero_bit(void *addr, unsigned int maxbit);
*/
ENTRY(_find_first_zero_bit_le)
teq r1, #0
beq 3f
mov r2, #0
1:
ARM( ldrb r3, [r0, r2, lsr #3] )
THUMB( lsr r3, r2, #3 )
THUMB( ldrb r3, [r0, r3] )
eors r3, r3, #0xff @ invert bits
bne .L_found @ any now set - found zero bit
add r2, r2, #8 @ next bit pointer
2: cmp r2, r1 @ any more?
blo 1b
3: mov r0, r1 @ no free bits
ret lr
ENDPROC(_find_first_zero_bit_le)
/*
* Purpose : Find next 'zero' bit
* Prototype: int find_next_zero_bit(void *addr, unsigned int maxbit, int offset)
*/
ENTRY(_find_next_zero_bit_le)
teq r1, #0
beq 3b
ands ip, r2, #7
beq 1b @ If new byte, goto old routine
ARM( ldrb r3, [r0, r2, lsr #3] )
THUMB( lsr r3, r2, #3 )
THUMB( ldrb r3, [r0, r3] )
eor r3, r3, #0xff @ now looking for a 1 bit
movs r3, r3, lsr ip @ shift off unused bits
bne .L_found
orr r2, r2, #7 @ if zero, then no bits here
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
ENDPROC(_find_next_zero_bit_le)
/*
* Purpose : Find a 'one' bit
* Prototype: int find_first_bit(const unsigned long *addr, unsigned int maxbit);
*/
ENTRY(_find_first_bit_le)
teq r1, #0
beq 3f
mov r2, #0
1:
ARM( ldrb r3, [r0, r2, lsr #3] )
THUMB( lsr r3, r2, #3 )
THUMB( ldrb r3, [r0, r3] )
movs r3, r3
bne .L_found @ any now set - found zero bit
add r2, r2, #8 @ next bit pointer
2: cmp r2, r1 @ any more?
blo 1b
3: mov r0, r1 @ no free bits
ret lr
ENDPROC(_find_first_bit_le)
/*
* Purpose : Find next 'one' bit
* Prototype: int find_next_zero_bit(void *addr, unsigned int maxbit, int offset)
*/
ENTRY(_find_next_bit_le)
teq r1, #0
beq 3b
ands ip, r2, #7
beq 1b @ If new byte, goto old routine
ARM( ldrb r3, [r0, r2, lsr #3] )
THUMB( lsr r3, r2, #3 )
THUMB( ldrb r3, [r0, r3] )
movs r3, r3, lsr ip @ shift off unused bits
bne .L_found
orr r2, r2, #7 @ if zero, then no bits here
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
ENDPROC(_find_next_bit_le)
#ifdef __ARMEB__
ENTRY(_find_first_zero_bit_be)
teq r1, #0
beq 3f
mov r2, #0
1: eor r3, r2, #0x18 @ big endian byte ordering
ARM( ldrb r3, [r0, r3, lsr #3] )
THUMB( lsr r3, #3 )
THUMB( ldrb r3, [r0, r3] )
eors r3, r3, #0xff @ invert bits
bne .L_found @ any now set - found zero bit
add r2, r2, #8 @ next bit pointer
2: cmp r2, r1 @ any more?
blo 1b
3: mov r0, r1 @ no free bits
ret lr
ENDPROC(_find_first_zero_bit_be)
ENTRY(_find_next_zero_bit_be)
teq r1, #0
beq 3b
ands ip, r2, #7
beq 1b @ If new byte, goto old routine
eor r3, r2, #0x18 @ big endian byte ordering
ARM( ldrb r3, [r0, r3, lsr #3] )
THUMB( lsr r3, #3 )
THUMB( ldrb r3, [r0, r3] )
eor r3, r3, #0xff @ now looking for a 1 bit
movs r3, r3, lsr ip @ shift off unused bits
bne .L_found
orr r2, r2, #7 @ if zero, then no bits here
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
ENDPROC(_find_next_zero_bit_be)
ENTRY(_find_first_bit_be)
teq r1, #0
beq 3f
mov r2, #0
1: eor r3, r2, #0x18 @ big endian byte ordering
ARM( ldrb r3, [r0, r3, lsr #3] )
THUMB( lsr r3, #3 )
THUMB( ldrb r3, [r0, r3] )
movs r3, r3
bne .L_found @ any now set - found zero bit
add r2, r2, #8 @ next bit pointer
2: cmp r2, r1 @ any more?
blo 1b
3: mov r0, r1 @ no free bits
ret lr
ENDPROC(_find_first_bit_be)
ENTRY(_find_next_bit_be)
teq r1, #0
beq 3b
ands ip, r2, #7
beq 1b @ If new byte, goto old routine
eor r3, r2, #0x18 @ big endian byte ordering
ARM( ldrb r3, [r0, r3, lsr #3] )
THUMB( lsr r3, #3 )
THUMB( ldrb r3, [r0, r3] )
movs r3, r3, lsr ip @ shift off unused bits
bne .L_found
orr r2, r2, #7 @ if zero, then no bits here
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
ENDPROC(_find_next_bit_be)
#endif
/*
* One or more bits in the LSB of r3 are assumed to be set.
*/
.L_found:
#if __LINUX_ARM_ARCH__ >= 5
rsb r0, r3, #0
and r3, r3, r0
clz r3, r3
rsb r3, r3, #31
add r0, r2, r3
#else
tst r3, #0x0f
addeq r2, r2, #4
movne r3, r3, lsl #4
tst r3, #0x30
addeq r2, r2, #2
movne r3, r3, lsl #2
tst r3, #0x40
addeq r2, r2, #1
mov r0, r2
#endif
cmp r1, r0 @ Clamp to maxbit
movlo r0, r1
ret lr
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,287
|
arch/arm/lib/copy_page.S
|
/*
* linux/arch/arm/lib/copypage.S
*
* Copyright (C) 1995-1999 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* ASM optimised string functions
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/cache.h>
#define COPY_COUNT (PAGE_SZ / (2 * L1_CACHE_BYTES) PLD( -1 ))
.text
.align 5
/*
* StrongARM optimised copy_page routine
* now 1.78bytes/cycle, was 1.60 bytes/cycle (50MHz bus -> 89MB/s)
* Note that we probably achieve closer to the 100MB/s target with
* the core clock switching.
*/
ENTRY(copy_page)
stmfd sp!, {r4, lr} @ 2
PLD( pld [r1, #0] )
PLD( pld [r1, #L1_CACHE_BYTES] )
mov r2, #COPY_COUNT @ 1
ldmia r1!, {r3, r4, ip, lr} @ 4+1
1: PLD( pld [r1, #2 * L1_CACHE_BYTES])
PLD( pld [r1, #3 * L1_CACHE_BYTES])
2:
.rept (2 * L1_CACHE_BYTES / 16 - 1)
stmia r0!, {r3, r4, ip, lr} @ 4
ldmia r1!, {r3, r4, ip, lr} @ 4
.endr
subs r2, r2, #1 @ 1
stmia r0!, {r3, r4, ip, lr} @ 4
ldmgtia r1!, {r3, r4, ip, lr} @ 4
bgt 1b @ 1
PLD( ldmeqia r1!, {r3, r4, ip, lr} )
PLD( beq 2b )
ldmfd sp!, {r4, pc} @ 3
ENDPROC(copy_page)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,680
|
arch/arm/lib/ashldi3.S
|
/* Copyright 1995, 1996, 1998, 1999, 2000, 2003, 2004, 2005
Free Software Foundation, Inc.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2, or (at your option) any
later version.
In addition to the permissions in the GNU General Public License, the
Free Software Foundation gives you unlimited permission to link the
compiled version of this file into combinations with other programs,
and to distribute those combinations without any restriction coming
from the use of this file. (The General Public License restrictions
do apply in other respects; for example, they cover modification of
the file, and distribution when not linked into a combine
executable.)
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING. If not, write to
the Free Software Foundation, 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301, USA. */
#include <linux/linkage.h>
#include <asm/assembler.h>
#ifdef __ARMEB__
#define al r1
#define ah r0
#else
#define al r0
#define ah r1
#endif
ENTRY(__ashldi3)
ENTRY(__aeabi_llsl)
subs r3, r2, #32
rsb ip, r2, #32
movmi ah, ah, lsl r2
movpl ah, al, lsl r3
ARM( orrmi ah, ah, al, lsr ip )
THUMB( lsrmi r3, al, ip )
THUMB( orrmi ah, ah, r3 )
mov al, al, lsl r2
ret lr
ENDPROC(__ashldi3)
ENDPROC(__aeabi_llsl)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,504
|
arch/arm/lib/io-readsb.S
|
/*
* linux/arch/arm/lib/io-readsb.S
*
* Copyright (C) 1995-2000 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
.Linsb_align: rsb ip, ip, #4
cmp ip, r2
movgt ip, r2
cmp ip, #2
ldrb r3, [r0]
strb r3, [r1], #1
ldrgeb r3, [r0]
strgeb r3, [r1], #1
ldrgtb r3, [r0]
strgtb r3, [r1], #1
subs r2, r2, ip
bne .Linsb_aligned
ENTRY(__raw_readsb)
teq r2, #0 @ do we have to check for the zero len?
reteq lr
ands ip, r1, #3
bne .Linsb_align
.Linsb_aligned: stmfd sp!, {r4 - r6, lr}
subs r2, r2, #16
bmi .Linsb_no_16
.Linsb_16_lp: ldrb r3, [r0]
ldrb r4, [r0]
ldrb r5, [r0]
mov r3, r3, put_byte_0
ldrb r6, [r0]
orr r3, r3, r4, put_byte_1
ldrb r4, [r0]
orr r3, r3, r5, put_byte_2
ldrb r5, [r0]
orr r3, r3, r6, put_byte_3
ldrb r6, [r0]
mov r4, r4, put_byte_0
ldrb ip, [r0]
orr r4, r4, r5, put_byte_1
ldrb r5, [r0]
orr r4, r4, r6, put_byte_2
ldrb r6, [r0]
orr r4, r4, ip, put_byte_3
ldrb ip, [r0]
mov r5, r5, put_byte_0
ldrb lr, [r0]
orr r5, r5, r6, put_byte_1
ldrb r6, [r0]
orr r5, r5, ip, put_byte_2
ldrb ip, [r0]
orr r5, r5, lr, put_byte_3
ldrb lr, [r0]
mov r6, r6, put_byte_0
orr r6, r6, ip, put_byte_1
ldrb ip, [r0]
orr r6, r6, lr, put_byte_2
orr r6, r6, ip, put_byte_3
stmia r1!, {r3 - r6}
subs r2, r2, #16
bpl .Linsb_16_lp
tst r2, #15
ldmeqfd sp!, {r4 - r6, pc}
.Linsb_no_16: tst r2, #8
beq .Linsb_no_8
ldrb r3, [r0]
ldrb r4, [r0]
ldrb r5, [r0]
mov r3, r3, put_byte_0
ldrb r6, [r0]
orr r3, r3, r4, put_byte_1
ldrb r4, [r0]
orr r3, r3, r5, put_byte_2
ldrb r5, [r0]
orr r3, r3, r6, put_byte_3
ldrb r6, [r0]
mov r4, r4, put_byte_0
ldrb ip, [r0]
orr r4, r4, r5, put_byte_1
orr r4, r4, r6, put_byte_2
orr r4, r4, ip, put_byte_3
stmia r1!, {r3, r4}
.Linsb_no_8: tst r2, #4
beq .Linsb_no_4
ldrb r3, [r0]
ldrb r4, [r0]
ldrb r5, [r0]
ldrb r6, [r0]
mov r3, r3, put_byte_0
orr r3, r3, r4, put_byte_1
orr r3, r3, r5, put_byte_2
orr r3, r3, r6, put_byte_3
str r3, [r1], #4
.Linsb_no_4: ands r2, r2, #3
ldmeqfd sp!, {r4 - r6, pc}
cmp r2, #2
ldrb r3, [r0]
strb r3, [r1], #1
ldrgeb r3, [r0]
strgeb r3, [r1], #1
ldrgtb r3, [r0]
strgtb r3, [r1]
ldmfd sp!, {r4 - r6, pc}
ENDPROC(__raw_readsb)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,733
|
arch/arm/lib/io-writesw-armv4.S
|
/*
* linux/arch/arm/lib/io-writesw-armv4.S
*
* Copyright (C) 1995-2000 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
.macro outword, rd
#ifndef __ARMEB__
strh \rd, [r0]
mov \rd, \rd, lsr #16
strh \rd, [r0]
#else
mov lr, \rd, lsr #16
strh lr, [r0]
strh \rd, [r0]
#endif
.endm
.Loutsw_align: movs ip, r1, lsl #31
bne .Loutsw_noalign
ldrh r3, [r1], #2
sub r2, r2, #1
strh r3, [r0]
ENTRY(__raw_writesw)
teq r2, #0
reteq lr
ands r3, r1, #3
bne .Loutsw_align
stmfd sp!, {r4, r5, lr}
subs r2, r2, #8
bmi .Lno_outsw_8
.Loutsw_8_lp: ldmia r1!, {r3, r4, r5, ip}
subs r2, r2, #8
outword r3
outword r4
outword r5
outword ip
bpl .Loutsw_8_lp
.Lno_outsw_8: tst r2, #4
beq .Lno_outsw_4
ldmia r1!, {r3, ip}
outword r3
outword ip
.Lno_outsw_4: movs r2, r2, lsl #31
bcc .Lno_outsw_2
ldr r3, [r1], #4
outword r3
.Lno_outsw_2: ldrneh r3, [r1]
strneh r3, [r0]
ldmfd sp!, {r4, r5, pc}
#ifdef __ARMEB__
#define pull_hbyte0 lsl #8
#define push_hbyte1 lsr #24
#else
#define pull_hbyte0 lsr #24
#define push_hbyte1 lsl #8
#endif
.Loutsw_noalign:
ARM( ldr r3, [r1, -r3]! )
THUMB( rsb r3, r3, #0 )
THUMB( ldr r3, [r1, r3] )
THUMB( sub r1, r3 )
subcs r2, r2, #1
bcs 2f
subs r2, r2, #2
bmi 3f
1: mov ip, r3, lsr #8
strh ip, [r0]
2: mov ip, r3, pull_hbyte0
ldr r3, [r1, #4]!
subs r2, r2, #2
orr ip, ip, r3, push_hbyte1
strh ip, [r0]
bpl 1b
tst r2, #1
3: movne ip, r3, lsr #8
strneh ip, [r0]
ret lr
ENDPROC(__raw_writesw)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,078
|
arch/arm/lib/memset.S
|
/*
* linux/arch/arm/lib/memset.S
*
* Copyright (C) 1995-2000 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* ASM optimised string functions
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
.text
.align 5
ENTRY(mmioset)
ENTRY(memset)
UNWIND( .fnstart )
ands r3, r0, #3 @ 1 unaligned?
mov ip, r0 @ preserve r0 as return value
bne 6f @ 1
/*
* we know that the pointer in ip is aligned to a word boundary.
*/
1: orr r1, r1, r1, lsl #8
orr r1, r1, r1, lsl #16
mov r3, r1
7: cmp r2, #16
blt 4f
#if ! CALGN(1)+0
/*
* We need 2 extra registers for this loop - use r8 and the LR
*/
stmfd sp!, {r8, lr}
UNWIND( .fnend )
UNWIND( .fnstart )
UNWIND( .save {r8, lr} )
mov r8, r1
mov lr, r3
2: subs r2, r2, #64
stmgeia ip!, {r1, r3, r8, lr} @ 64 bytes at a time.
stmgeia ip!, {r1, r3, r8, lr}
stmgeia ip!, {r1, r3, r8, lr}
stmgeia ip!, {r1, r3, r8, lr}
bgt 2b
ldmeqfd sp!, {r8, pc} @ Now <64 bytes to go.
/*
* No need to correct the count; we're only testing bits from now on
*/
tst r2, #32
stmneia ip!, {r1, r3, r8, lr}
stmneia ip!, {r1, r3, r8, lr}
tst r2, #16
stmneia ip!, {r1, r3, r8, lr}
ldmfd sp!, {r8, lr}
UNWIND( .fnend )
#else
/*
* This version aligns the destination pointer in order to write
* whole cache lines at once.
*/
stmfd sp!, {r4-r8, lr}
UNWIND( .fnend )
UNWIND( .fnstart )
UNWIND( .save {r4-r8, lr} )
mov r4, r1
mov r5, r3
mov r6, r1
mov r7, r3
mov r8, r1
mov lr, r3
cmp r2, #96
tstgt ip, #31
ble 3f
and r8, ip, #31
rsb r8, r8, #32
sub r2, r2, r8
movs r8, r8, lsl #(32 - 4)
stmcsia ip!, {r4, r5, r6, r7}
stmmiia ip!, {r4, r5}
tst r8, #(1 << 30)
mov r8, r1
strne r1, [ip], #4
3: subs r2, r2, #64
stmgeia ip!, {r1, r3-r8, lr}
stmgeia ip!, {r1, r3-r8, lr}
bgt 3b
ldmeqfd sp!, {r4-r8, pc}
tst r2, #32
stmneia ip!, {r1, r3-r8, lr}
tst r2, #16
stmneia ip!, {r4-r7}
ldmfd sp!, {r4-r8, lr}
UNWIND( .fnend )
#endif
UNWIND( .fnstart )
4: tst r2, #8
stmneia ip!, {r1, r3}
tst r2, #4
strne r1, [ip], #4
/*
* When we get here, we've got less than 4 bytes to set. We
* may have an unaligned pointer as well.
*/
5: tst r2, #2
strneb r1, [ip], #1
strneb r1, [ip], #1
tst r2, #1
strneb r1, [ip], #1
ret lr
6: subs r2, r2, #4 @ 1 do we have enough
blt 5b @ 1 bytes to align with?
cmp r3, #2 @ 1
strltb r1, [ip], #1 @ 1
strleb r1, [ip], #1 @ 1
strb r1, [ip], #1 @ 1
add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
b 1b
UNWIND( .fnend )
ENDPROC(memset)
ENDPROC(mmioset)
ENTRY(__memset32)
UNWIND( .fnstart )
mov r3, r1 @ copy r1 to r3 and fall into memset64
UNWIND( .fnend )
ENDPROC(__memset32)
ENTRY(__memset64)
UNWIND( .fnstart )
mov ip, r0 @ preserve r0 as return value
b 7b @ jump into the middle of memset
UNWIND( .fnend )
ENDPROC(__memset64)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,266
|
arch/arm/lib/io-writesl.S
|
/*
* linux/arch/arm/lib/io-writesl.S
*
* Copyright (C) 1995-2000 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
ENTRY(__raw_writesl)
teq r2, #0 @ do we have to check for the zero len?
reteq lr
ands ip, r1, #3
bne 3f
subs r2, r2, #4
bmi 2f
stmfd sp!, {r4, lr}
1: ldmia r1!, {r3, r4, ip, lr}
subs r2, r2, #4
str r3, [r0, #0]
str r4, [r0, #0]
str ip, [r0, #0]
str lr, [r0, #0]
bpl 1b
ldmfd sp!, {r4, lr}
2: movs r2, r2, lsl #31
ldmcsia r1!, {r3, ip}
strcs r3, [r0, #0]
ldrne r3, [r1, #0]
strcs ip, [r0, #0]
strne r3, [r0, #0]
ret lr
3: bic r1, r1, #3
ldr r3, [r1], #4
cmp ip, #2
blt 5f
bgt 6f
4: mov ip, r3, lspull #16
ldr r3, [r1], #4
subs r2, r2, #1
orr ip, ip, r3, lspush #16
str ip, [r0]
bne 4b
ret lr
5: mov ip, r3, lspull #8
ldr r3, [r1], #4
subs r2, r2, #1
orr ip, ip, r3, lspush #24
str ip, [r0]
bne 5b
ret lr
6: mov ip, r3, lspull #24
ldr r3, [r1], #4
subs r2, r2, #1
orr ip, ip, r3, lspush #8
str ip, [r0]
bne 6b
ret lr
ENDPROC(__raw_writesl)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,209
|
arch/arm/lib/delay-loop.S
|
/*
* linux/arch/arm/lib/delay.S
*
* Copyright (C) 1995, 1996 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/delay.h>
.text
.LC0: .word loops_per_jiffy
.LC1: .word UDELAY_MULT
/*
* loops = r0 * HZ * loops_per_jiffy / 1000000
*
* r0 <= 2000
* HZ <= 1000
*/
ENTRY(__loop_udelay)
ldr r2, .LC1
mul r0, r2, r0 @ r0 = delay_us * UDELAY_MULT
ENTRY(__loop_const_udelay) @ 0 <= r0 <= 0xfffffaf0
ldr r2, .LC0
ldr r2, [r2]
umull r1, r0, r2, r0 @ r0-r1 = r0 * loops_per_jiffy
adds r1, r1, #0xffffffff @ rounding up ...
adcs r0, r0, r0 @ and right shift by 31
reteq lr
.align 3
@ Delay routine
ENTRY(__loop_delay)
subs r0, r0, #1
#if 0
retls lr
subs r0, r0, #1
retls lr
subs r0, r0, #1
retls lr
subs r0, r0, #1
retls lr
subs r0, r0, #1
retls lr
subs r0, r0, #1
retls lr
subs r0, r0, #1
retls lr
subs r0, r0, #1
#endif
bhi __loop_delay
ret lr
ENDPROC(__loop_udelay)
ENDPROC(__loop_const_udelay)
ENDPROC(__loop_delay)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,531
|
arch/arm/lib/io-readsl.S
|
/*
* linux/arch/arm/lib/io-readsl.S
*
* Copyright (C) 1995-2000 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
ENTRY(__raw_readsl)
teq r2, #0 @ do we have to check for the zero len?
reteq lr
ands ip, r1, #3
bne 3f
subs r2, r2, #4
bmi 2f
stmfd sp!, {r4, lr}
1: ldr r3, [r0, #0]
ldr r4, [r0, #0]
ldr ip, [r0, #0]
ldr lr, [r0, #0]
subs r2, r2, #4
stmia r1!, {r3, r4, ip, lr}
bpl 1b
ldmfd sp!, {r4, lr}
2: movs r2, r2, lsl #31
ldrcs r3, [r0, #0]
ldrcs ip, [r0, #0]
stmcsia r1!, {r3, ip}
ldrne r3, [r0, #0]
strne r3, [r1, #0]
ret lr
3: ldr r3, [r0]
cmp ip, #2
mov ip, r3, get_byte_0
strb ip, [r1], #1
bgt 6f
mov ip, r3, get_byte_1
strb ip, [r1], #1
beq 5f
mov ip, r3, get_byte_2
strb ip, [r1], #1
4: subs r2, r2, #1
mov ip, r3, lspull #24
ldrne r3, [r0]
orrne ip, ip, r3, lspush #8
strne ip, [r1], #4
bne 4b
b 8f
5: subs r2, r2, #1
mov ip, r3, lspull #16
ldrne r3, [r0]
orrne ip, ip, r3, lspush #16
strne ip, [r1], #4
bne 5b
b 7f
6: subs r2, r2, #1
mov ip, r3, lspull #8
ldrne r3, [r0]
orrne ip, ip, r3, lspush #24
strne ip, [r1], #4
bne 6b
mov r3, ip, get_byte_2
strb r3, [r1, #2]
7: mov r3, ip, get_byte_1
strb r3, [r1, #1]
8: mov r3, ip, get_byte_0
strb r3, [r1, #0]
ret lr
ENDPROC(__raw_readsl)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,197
|
arch/arm/lib/copy_from_user.S
|
/*
* linux/arch/arm/lib/copy_from_user.S
*
* Author: Nicolas Pitre
* Created: Sep 29, 2005
* Copyright: MontaVista Software, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
/*
* Prototype:
*
* size_t arm_copy_from_user(void *to, const void *from, size_t n)
*
* Purpose:
*
* copy a block to kernel memory from user memory
*
* Params:
*
* to = kernel memory
* from = user memory
* n = number of bytes to copy
*
* Return value:
*
* Number of bytes NOT copied.
*/
#ifndef CONFIG_THUMB2_KERNEL
#define LDR1W_SHIFT 0
#else
#define LDR1W_SHIFT 1
#endif
#define STR1W_SHIFT 0
.macro ldr1w ptr reg abort
ldrusr \reg, \ptr, 4, abort=\abort
.endm
.macro ldr4w ptr reg1 reg2 reg3 reg4 abort
ldr1w \ptr, \reg1, \abort
ldr1w \ptr, \reg2, \abort
ldr1w \ptr, \reg3, \abort
ldr1w \ptr, \reg4, \abort
.endm
.macro ldr8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort
ldr4w \ptr, \reg1, \reg2, \reg3, \reg4, \abort
ldr4w \ptr, \reg5, \reg6, \reg7, \reg8, \abort
.endm
.macro ldr1b ptr reg cond=al abort
ldrusr \reg, \ptr, 1, \cond, abort=\abort
.endm
.macro str1w ptr reg abort
W(str) \reg, [\ptr], #4
.endm
.macro str8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort
stmia \ptr!, {\reg1, \reg2, \reg3, \reg4, \reg5, \reg6, \reg7, \reg8}
.endm
.macro str1b ptr reg cond=al abort
str\cond\()b \reg, [\ptr], #1
.endm
.macro enter reg1 reg2
mov r3, #0
stmdb sp!, {r0, r2, r3, \reg1, \reg2}
.endm
.macro usave reg1 reg2
UNWIND( .save {r0, r2, r3, \reg1, \reg2} )
.endm
.macro exit reg1 reg2
add sp, sp, #8
ldmfd sp!, {r0, \reg1, \reg2}
.endm
.text
ENTRY(arm_copy_from_user)
#ifdef CONFIG_CPU_SPECTRE
get_thread_info r3
ldr r3, [r3, #TI_ADDR_LIMIT]
uaccess_mask_range_ptr r1, r2, r3, ip
#endif
#include "copy_template.S"
ENDPROC(arm_copy_from_user)
.pushsection .text.fixup,"ax"
.align 0
copy_abort_preamble
ldmfd sp!, {r1, r2, r3}
sub r0, r0, r1
rsb r0, r0, r2
copy_abort_end
.popsection
|
AirFortressIlikara/LS2K0300-linux-4.19
| 8,247
|
arch/arm/lib/lib1funcs.S
|
/*
* linux/arch/arm/lib/lib1funcs.S: Optimized ARM division routines
*
* Author: Nicolas Pitre <nico@fluxnic.net>
* - contributed to gcc-3.4 on Sep 30, 2003
* - adapted for the Linux kernel on Oct 2, 2003
*/
/* Copyright 1995, 1996, 1998, 1999, 2000, 2003 Free Software Foundation, Inc.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2, or (at your option) any
later version.
In addition to the permissions in the GNU General Public License, the
Free Software Foundation gives you unlimited permission to link the
compiled version of this file into combinations with other programs,
and to distribute those combinations without any restriction coming
from the use of this file. (The General Public License restrictions
do apply in other respects; for example, they cover modification of
the file, and distribution when not linked into a combine
executable.)
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
.macro ARM_DIV_BODY dividend, divisor, result, curbit
#if __LINUX_ARM_ARCH__ >= 5
clz \curbit, \divisor
clz \result, \dividend
sub \result, \curbit, \result
mov \curbit, #1
mov \divisor, \divisor, lsl \result
mov \curbit, \curbit, lsl \result
mov \result, #0
#else
@ Initially shift the divisor left 3 bits if possible,
@ set curbit accordingly. This allows for curbit to be located
@ at the left end of each 4 bit nibbles in the division loop
@ to save one loop in most cases.
tst \divisor, #0xe0000000
moveq \divisor, \divisor, lsl #3
moveq \curbit, #8
movne \curbit, #1
@ Unless the divisor is very big, shift it up in multiples of
@ four bits, since this is the amount of unwinding in the main
@ division loop. Continue shifting until the divisor is
@ larger than the dividend.
1: cmp \divisor, #0x10000000
cmplo \divisor, \dividend
movlo \divisor, \divisor, lsl #4
movlo \curbit, \curbit, lsl #4
blo 1b
@ For very big divisors, we must shift it a bit at a time, or
@ we will be in danger of overflowing.
1: cmp \divisor, #0x80000000
cmplo \divisor, \dividend
movlo \divisor, \divisor, lsl #1
movlo \curbit, \curbit, lsl #1
blo 1b
mov \result, #0
#endif
@ Division loop
1: cmp \dividend, \divisor
subhs \dividend, \dividend, \divisor
orrhs \result, \result, \curbit
cmp \dividend, \divisor, lsr #1
subhs \dividend, \dividend, \divisor, lsr #1
orrhs \result, \result, \curbit, lsr #1
cmp \dividend, \divisor, lsr #2
subhs \dividend, \dividend, \divisor, lsr #2
orrhs \result, \result, \curbit, lsr #2
cmp \dividend, \divisor, lsr #3
subhs \dividend, \dividend, \divisor, lsr #3
orrhs \result, \result, \curbit, lsr #3
cmp \dividend, #0 @ Early termination?
movnes \curbit, \curbit, lsr #4 @ No, any more bits to do?
movne \divisor, \divisor, lsr #4
bne 1b
.endm
.macro ARM_DIV2_ORDER divisor, order
#if __LINUX_ARM_ARCH__ >= 5
clz \order, \divisor
rsb \order, \order, #31
#else
cmp \divisor, #(1 << 16)
movhs \divisor, \divisor, lsr #16
movhs \order, #16
movlo \order, #0
cmp \divisor, #(1 << 8)
movhs \divisor, \divisor, lsr #8
addhs \order, \order, #8
cmp \divisor, #(1 << 4)
movhs \divisor, \divisor, lsr #4
addhs \order, \order, #4
cmp \divisor, #(1 << 2)
addhi \order, \order, #3
addls \order, \order, \divisor, lsr #1
#endif
.endm
.macro ARM_MOD_BODY dividend, divisor, order, spare
#if __LINUX_ARM_ARCH__ >= 5
clz \order, \divisor
clz \spare, \dividend
sub \order, \order, \spare
mov \divisor, \divisor, lsl \order
#else
mov \order, #0
@ Unless the divisor is very big, shift it up in multiples of
@ four bits, since this is the amount of unwinding in the main
@ division loop. Continue shifting until the divisor is
@ larger than the dividend.
1: cmp \divisor, #0x10000000
cmplo \divisor, \dividend
movlo \divisor, \divisor, lsl #4
addlo \order, \order, #4
blo 1b
@ For very big divisors, we must shift it a bit at a time, or
@ we will be in danger of overflowing.
1: cmp \divisor, #0x80000000
cmplo \divisor, \dividend
movlo \divisor, \divisor, lsl #1
addlo \order, \order, #1
blo 1b
#endif
@ Perform all needed subtractions to keep only the reminder.
@ Do comparisons in batch of 4 first.
subs \order, \order, #3 @ yes, 3 is intended here
blt 2f
1: cmp \dividend, \divisor
subhs \dividend, \dividend, \divisor
cmp \dividend, \divisor, lsr #1
subhs \dividend, \dividend, \divisor, lsr #1
cmp \dividend, \divisor, lsr #2
subhs \dividend, \dividend, \divisor, lsr #2
cmp \dividend, \divisor, lsr #3
subhs \dividend, \dividend, \divisor, lsr #3
cmp \dividend, #1
mov \divisor, \divisor, lsr #4
subges \order, \order, #4
bge 1b
tst \order, #3
teqne \dividend, #0
beq 5f
@ Either 1, 2 or 3 comparison/subtractions are left.
2: cmn \order, #2
blt 4f
beq 3f
cmp \dividend, \divisor
subhs \dividend, \dividend, \divisor
mov \divisor, \divisor, lsr #1
3: cmp \dividend, \divisor
subhs \dividend, \dividend, \divisor
mov \divisor, \divisor, lsr #1
4: cmp \dividend, \divisor
subhs \dividend, \dividend, \divisor
5:
.endm
#ifdef CONFIG_ARM_PATCH_IDIV
.align 3
#endif
ENTRY(__udivsi3)
ENTRY(__aeabi_uidiv)
UNWIND(.fnstart)
subs r2, r1, #1
reteq lr
bcc Ldiv0
cmp r0, r1
bls 11f
tst r1, r2
beq 12f
ARM_DIV_BODY r0, r1, r2, r3
mov r0, r2
ret lr
11: moveq r0, #1
movne r0, #0
ret lr
12: ARM_DIV2_ORDER r1, r2
mov r0, r0, lsr r2
ret lr
UNWIND(.fnend)
ENDPROC(__udivsi3)
ENDPROC(__aeabi_uidiv)
ENTRY(__umodsi3)
UNWIND(.fnstart)
subs r2, r1, #1 @ compare divisor with 1
bcc Ldiv0
cmpne r0, r1 @ compare dividend with divisor
moveq r0, #0
tsthi r1, r2 @ see if divisor is power of 2
andeq r0, r0, r2
retls lr
ARM_MOD_BODY r0, r1, r2, r3
ret lr
UNWIND(.fnend)
ENDPROC(__umodsi3)
#ifdef CONFIG_ARM_PATCH_IDIV
.align 3
#endif
ENTRY(__divsi3)
ENTRY(__aeabi_idiv)
UNWIND(.fnstart)
cmp r1, #0
eor ip, r0, r1 @ save the sign of the result.
beq Ldiv0
rsbmi r1, r1, #0 @ loops below use unsigned.
subs r2, r1, #1 @ division by 1 or -1 ?
beq 10f
movs r3, r0
rsbmi r3, r0, #0 @ positive dividend value
cmp r3, r1
bls 11f
tst r1, r2 @ divisor is power of 2 ?
beq 12f
ARM_DIV_BODY r3, r1, r0, r2
cmp ip, #0
rsbmi r0, r0, #0
ret lr
10: teq ip, r0 @ same sign ?
rsbmi r0, r0, #0
ret lr
11: movlo r0, #0
moveq r0, ip, asr #31
orreq r0, r0, #1
ret lr
12: ARM_DIV2_ORDER r1, r2
cmp ip, #0
mov r0, r3, lsr r2
rsbmi r0, r0, #0
ret lr
UNWIND(.fnend)
ENDPROC(__divsi3)
ENDPROC(__aeabi_idiv)
ENTRY(__modsi3)
UNWIND(.fnstart)
cmp r1, #0
beq Ldiv0
rsbmi r1, r1, #0 @ loops below use unsigned.
movs ip, r0 @ preserve sign of dividend
rsbmi r0, r0, #0 @ if negative make positive
subs r2, r1, #1 @ compare divisor with 1
cmpne r0, r1 @ compare dividend with divisor
moveq r0, #0
tsthi r1, r2 @ see if divisor is power of 2
andeq r0, r0, r2
bls 10f
ARM_MOD_BODY r0, r1, r2, r3
10: cmp ip, #0
rsbmi r0, r0, #0
ret lr
UNWIND(.fnend)
ENDPROC(__modsi3)
#ifdef CONFIG_AEABI
ENTRY(__aeabi_uidivmod)
UNWIND(.fnstart)
UNWIND(.save {r0, r1, ip, lr} )
stmfd sp!, {r0, r1, ip, lr}
bl __aeabi_uidiv
ldmfd sp!, {r1, r2, ip, lr}
mul r3, r0, r2
sub r1, r1, r3
ret lr
UNWIND(.fnend)
ENDPROC(__aeabi_uidivmod)
ENTRY(__aeabi_idivmod)
UNWIND(.fnstart)
UNWIND(.save {r0, r1, ip, lr} )
stmfd sp!, {r0, r1, ip, lr}
bl __aeabi_idiv
ldmfd sp!, {r1, r2, ip, lr}
mul r3, r0, r2
sub r1, r1, r3
ret lr
UNWIND(.fnend)
ENDPROC(__aeabi_idivmod)
#endif
Ldiv0:
UNWIND(.fnstart)
UNWIND(.pad #4)
UNWIND(.save {lr})
str lr, [sp, #-8]!
bl __div0
mov r0, #0 @ About as wrong as it could be.
ldr pc, [sp], #8
UNWIND(.fnend)
ENDPROC(Ldiv0)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,378
|
arch/arm/lib/io-readsw-armv4.S
|
/*
* linux/arch/arm/lib/io-readsw-armv4.S
*
* Copyright (C) 1995-2000 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
.macro pack, rd, hw1, hw2
#ifndef __ARMEB__
orr \rd, \hw1, \hw2, lsl #16
#else
orr \rd, \hw2, \hw1, lsl #16
#endif
.endm
.Linsw_align: movs ip, r1, lsl #31
bne .Linsw_noalign
ldrh ip, [r0]
sub r2, r2, #1
strh ip, [r1], #2
ENTRY(__raw_readsw)
teq r2, #0
reteq lr
tst r1, #3
bne .Linsw_align
stmfd sp!, {r4, r5, lr}
subs r2, r2, #8
bmi .Lno_insw_8
.Linsw_8_lp: ldrh r3, [r0]
ldrh r4, [r0]
pack r3, r3, r4
ldrh r4, [r0]
ldrh r5, [r0]
pack r4, r4, r5
ldrh r5, [r0]
ldrh ip, [r0]
pack r5, r5, ip
ldrh ip, [r0]
ldrh lr, [r0]
pack ip, ip, lr
subs r2, r2, #8
stmia r1!, {r3 - r5, ip}
bpl .Linsw_8_lp
.Lno_insw_8: tst r2, #4
beq .Lno_insw_4
ldrh r3, [r0]
ldrh r4, [r0]
pack r3, r3, r4
ldrh r4, [r0]
ldrh ip, [r0]
pack r4, r4, ip
stmia r1!, {r3, r4}
.Lno_insw_4: movs r2, r2, lsl #31
bcc .Lno_insw_2
ldrh r3, [r0]
ldrh ip, [r0]
pack r3, r3, ip
str r3, [r1], #4
.Lno_insw_2: ldrneh r3, [r0]
strneh r3, [r1]
ldmfd sp!, {r4, r5, pc}
#ifdef __ARMEB__
#define _BE_ONLY_(code...) code
#define _LE_ONLY_(code...)
#define push_hbyte0 lsr #8
#define pull_hbyte1 lsl #24
#else
#define _BE_ONLY_(code...)
#define _LE_ONLY_(code...) code
#define push_hbyte0 lsl #24
#define pull_hbyte1 lsr #8
#endif
.Linsw_noalign: stmfd sp!, {r4, lr}
ldrccb ip, [r1, #-1]!
bcc 1f
ldrh ip, [r0]
sub r2, r2, #1
_BE_ONLY_( mov ip, ip, ror #8 )
strb ip, [r1], #1
_LE_ONLY_( mov ip, ip, lsr #8 )
_BE_ONLY_( mov ip, ip, lsr #24 )
1: subs r2, r2, #2
bmi 3f
_BE_ONLY_( mov ip, ip, lsl #24 )
2: ldrh r3, [r0]
ldrh r4, [r0]
subs r2, r2, #2
orr ip, ip, r3, lsl #8
orr ip, ip, r4, push_hbyte0
str ip, [r1], #4
mov ip, r4, pull_hbyte1
bpl 2b
_BE_ONLY_( mov ip, ip, lsr #24 )
3: tst r2, #1
strb ip, [r1], #1
ldrneh ip, [r0]
_BE_ONLY_( movne ip, ip, ror #8 )
strneb ip, [r1], #1
_LE_ONLY_( movne ip, ip, lsr #8 )
_BE_ONLY_( movne ip, ip, lsr #24 )
strneb ip, [r1]
ldmfd sp!, {r4, pc}
ENDPROC(__raw_readsw)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,625
|
arch/arm/lib/getuser.S
|
/*
* linux/arch/arm/lib/getuser.S
*
* Copyright (C) 2001 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Idea from x86 version, (C) Copyright 1998 Linus Torvalds
*
* These functions have a non-standard call interface to make them more
* efficient, especially as they return an error value in addition to
* the "real" return value.
*
* __get_user_X
*
* Inputs: r0 contains the address
* r1 contains the address limit, which must be preserved
* Outputs: r0 is the error code
* r2, r3 contains the zero-extended value
* lr corrupted
*
* No other registers must be altered. (see <asm/uaccess.h>
* for specific ASM register usage).
*
* Note that ADDR_LIMIT is either 0 or 0xc0000000.
* Note also that it is intended that __get_user_bad is not global.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/errno.h>
#include <asm/domain.h>
ENTRY(__get_user_1)
check_uaccess r0, 1, r1, r2, __get_user_bad
1: TUSER(ldrb) r2, [r0]
mov r0, #0
ret lr
ENDPROC(__get_user_1)
_ASM_NOKPROBE(__get_user_1)
ENTRY(__get_user_2)
check_uaccess r0, 2, r1, r2, __get_user_bad
#if __LINUX_ARM_ARCH__ >= 6
2: TUSER(ldrh) r2, [r0]
#else
#ifdef CONFIG_CPU_USE_DOMAINS
rb .req ip
2: ldrbt r2, [r0], #1
3: ldrbt rb, [r0], #0
#else
rb .req r0
2: ldrb r2, [r0]
3: ldrb rb, [r0, #1]
#endif
#ifndef __ARMEB__
orr r2, r2, rb, lsl #8
#else
orr r2, rb, r2, lsl #8
#endif
#endif /* __LINUX_ARM_ARCH__ >= 6 */
mov r0, #0
ret lr
ENDPROC(__get_user_2)
_ASM_NOKPROBE(__get_user_2)
ENTRY(__get_user_4)
check_uaccess r0, 4, r1, r2, __get_user_bad
4: TUSER(ldr) r2, [r0]
mov r0, #0
ret lr
ENDPROC(__get_user_4)
_ASM_NOKPROBE(__get_user_4)
ENTRY(__get_user_8)
check_uaccess r0, 8, r1, r2, __get_user_bad8
#ifdef CONFIG_THUMB2_KERNEL
5: TUSER(ldr) r2, [r0]
6: TUSER(ldr) r3, [r0, #4]
#else
5: TUSER(ldr) r2, [r0], #4
6: TUSER(ldr) r3, [r0]
#endif
mov r0, #0
ret lr
ENDPROC(__get_user_8)
_ASM_NOKPROBE(__get_user_8)
#ifdef __ARMEB__
ENTRY(__get_user_32t_8)
check_uaccess r0, 8, r1, r2, __get_user_bad
#ifdef CONFIG_CPU_USE_DOMAINS
add r0, r0, #4
7: ldrt r2, [r0]
#else
7: ldr r2, [r0, #4]
#endif
mov r0, #0
ret lr
ENDPROC(__get_user_32t_8)
_ASM_NOKPROBE(__get_user_32t_8)
ENTRY(__get_user_64t_1)
check_uaccess r0, 1, r1, r2, __get_user_bad8
8: TUSER(ldrb) r3, [r0]
mov r0, #0
ret lr
ENDPROC(__get_user_64t_1)
_ASM_NOKPROBE(__get_user_64t_1)
ENTRY(__get_user_64t_2)
check_uaccess r0, 2, r1, r2, __get_user_bad8
#ifdef CONFIG_CPU_USE_DOMAINS
rb .req ip
9: ldrbt r3, [r0], #1
10: ldrbt rb, [r0], #0
#else
rb .req r0
9: ldrb r3, [r0]
10: ldrb rb, [r0, #1]
#endif
orr r3, rb, r3, lsl #8
mov r0, #0
ret lr
ENDPROC(__get_user_64t_2)
_ASM_NOKPROBE(__get_user_64t_2)
ENTRY(__get_user_64t_4)
check_uaccess r0, 4, r1, r2, __get_user_bad8
11: TUSER(ldr) r3, [r0]
mov r0, #0
ret lr
ENDPROC(__get_user_64t_4)
_ASM_NOKPROBE(__get_user_64t_4)
#endif
__get_user_bad8:
mov r3, #0
__get_user_bad:
mov r2, #0
mov r0, #-EFAULT
ret lr
ENDPROC(__get_user_bad)
ENDPROC(__get_user_bad8)
_ASM_NOKPROBE(__get_user_bad)
_ASM_NOKPROBE(__get_user_bad8)
.pushsection __ex_table, "a"
.long 1b, __get_user_bad
.long 2b, __get_user_bad
#if __LINUX_ARM_ARCH__ < 6
.long 3b, __get_user_bad
#endif
.long 4b, __get_user_bad
.long 5b, __get_user_bad8
.long 6b, __get_user_bad8
#ifdef __ARMEB__
.long 7b, __get_user_bad
.long 8b, __get_user_bad8
.long 9b, __get_user_bad8
.long 10b, __get_user_bad8
.long 11b, __get_user_bad8
#endif
.popsection
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,198
|
arch/arm/lib/putuser.S
|
/*
* linux/arch/arm/lib/putuser.S
*
* Copyright (C) 2001 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Idea from x86 version, (C) Copyright 1998 Linus Torvalds
*
* These functions have a non-standard call interface to make
* them more efficient, especially as they return an error
* value in addition to the "real" return value.
*
* __put_user_X
*
* Inputs: r0 contains the address
* r1 contains the address limit, which must be preserved
* r2, r3 contains the value
* Outputs: r0 is the error code
* lr corrupted
*
* No other registers must be altered. (see <asm/uaccess.h>
* for specific ASM register usage).
*
* Note that ADDR_LIMIT is either 0 or 0xc0000000
* Note also that it is intended that __put_user_bad is not global.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/errno.h>
#include <asm/domain.h>
ENTRY(__put_user_1)
check_uaccess r0, 1, r1, ip, __put_user_bad
1: TUSER(strb) r2, [r0]
mov r0, #0
ret lr
ENDPROC(__put_user_1)
ENTRY(__put_user_2)
check_uaccess r0, 2, r1, ip, __put_user_bad
#if __LINUX_ARM_ARCH__ >= 6
2: TUSER(strh) r2, [r0]
#else
mov ip, r2, lsr #8
#ifndef __ARMEB__
2: TUSER(strb) r2, [r0], #1
3: TUSER(strb) ip, [r0]
#else
2: TUSER(strb) ip, [r0], #1
3: TUSER(strb) r2, [r0]
#endif
#endif /* __LINUX_ARM_ARCH__ >= 6 */
mov r0, #0
ret lr
ENDPROC(__put_user_2)
ENTRY(__put_user_4)
check_uaccess r0, 4, r1, ip, __put_user_bad
4: TUSER(str) r2, [r0]
mov r0, #0
ret lr
ENDPROC(__put_user_4)
ENTRY(__put_user_8)
check_uaccess r0, 8, r1, ip, __put_user_bad
#ifdef CONFIG_THUMB2_KERNEL
5: TUSER(str) r2, [r0]
6: TUSER(str) r3, [r0, #4]
#else
5: TUSER(str) r2, [r0], #4
6: TUSER(str) r3, [r0]
#endif
mov r0, #0
ret lr
ENDPROC(__put_user_8)
__put_user_bad:
mov r0, #-EFAULT
ret lr
ENDPROC(__put_user_bad)
.pushsection __ex_table, "a"
.long 1b, __put_user_bad
.long 2b, __put_user_bad
#if __LINUX_ARM_ARCH__ < 6
.long 3b, __put_user_bad
#endif
.long 4b, __put_user_bad
.long 5b, __put_user_bad
.long 6b, __put_user_bad
.popsection
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,182
|
arch/arm/lib/call_with_stack.S
|
/*
* arch/arm/lib/call_with_stack.S
*
* Copyright (C) 2011 ARM Ltd.
* Written by Will Deacon <will.deacon@arm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
/*
* void call_with_stack(void (*fn)(void *), void *arg, void *sp)
*
* Change the stack to that pointed at by sp, then invoke fn(arg) with
* the new stack.
*/
ENTRY(call_with_stack)
str sp, [r2, #-4]!
str lr, [r2, #-4]!
mov sp, r2
mov r2, r0
mov r0, r1
badr lr, 1f
ret r2
1: ldr lr, [sp]
ldr sp, [sp, #4]
ret lr
ENDPROC(call_with_stack)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 4,390
|
arch/arm/kvm/init.S
|
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unified.h>
#include <asm/asm-offsets.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_mmu.h>
#include <asm/virt.h>
/********************************************************************
* Hypervisor initialization
* - should be called with:
* r0 = top of Hyp stack (kernel VA)
* r1 = pointer to hyp vectors
* r2,r3 = Hypervisor pgd pointer
*
* The init scenario is:
* - We jump in HYP with 3 parameters: runtime HYP pgd, runtime stack,
* runtime vectors
* - Invalidate TLBs
* - Set stack and vectors
* - Setup the page tables
* - Enable the MMU
* - Profit! (or eret, if you only care about the code).
*
* Another possibility is to get a HYP stub hypercall.
* We discriminate between the two by checking if r0 contains a value
* that is less than HVC_STUB_HCALL_NR.
*/
.text
.pushsection .hyp.idmap.text,"ax"
.align 5
__kvm_hyp_init:
.globl __kvm_hyp_init
@ Hyp-mode exception vector
W(b) .
W(b) .
W(b) .
W(b) .
W(b) .
W(b) __do_hyp_init
W(b) .
W(b) .
__do_hyp_init:
@ Check for a stub hypercall
cmp r0, #HVC_STUB_HCALL_NR
blo __kvm_handle_stub_hvc
@ Set stack pointer
mov sp, r0
@ Set HVBAR to point to the HYP vectors
mcr p15, 4, r1, c12, c0, 0 @ HVBAR
@ Set the HTTBR to point to the hypervisor PGD pointer passed
mcrr p15, 4, rr_lo_hi(r2, r3), c2
@ Set the HTCR and VTCR to the same shareability and cacheability
@ settings as the non-secure TTBCR and with T0SZ == 0.
mrc p15, 4, r0, c2, c0, 2 @ HTCR
ldr r2, =HTCR_MASK
bic r0, r0, r2
mrc p15, 0, r1, c2, c0, 2 @ TTBCR
and r1, r1, #(HTCR_MASK & ~TTBCR_T0SZ)
orr r0, r0, r1
mcr p15, 4, r0, c2, c0, 2 @ HTCR
@ Use the same memory attributes for hyp. accesses as the kernel
@ (copy MAIRx ro HMAIRx).
mrc p15, 0, r0, c10, c2, 0
mcr p15, 4, r0, c10, c2, 0
mrc p15, 0, r0, c10, c2, 1
mcr p15, 4, r0, c10, c2, 1
@ Invalidate the stale TLBs from Bootloader
mcr p15, 4, r0, c8, c7, 0 @ TLBIALLH
dsb ish
@ Set the HSCTLR to:
@ - ARM/THUMB exceptions: Kernel config (Thumb-2 kernel)
@ - Endianness: Kernel config
@ - Fast Interrupt Features: Kernel config
@ - Write permission implies XN: disabled
@ - Instruction cache: enabled
@ - Data/Unified cache: enabled
@ - MMU: enabled (this code must be run from an identity mapping)
mrc p15, 4, r0, c1, c0, 0 @ HSCR
ldr r2, =HSCTLR_MASK
bic r0, r0, r2
mrc p15, 0, r1, c1, c0, 0 @ SCTLR
ldr r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C)
and r1, r1, r2
ARM( ldr r2, =(HSCTLR_M) )
THUMB( ldr r2, =(HSCTLR_M | HSCTLR_TE) )
orr r1, r1, r2
orr r0, r0, r1
mcr p15, 4, r0, c1, c0, 0 @ HSCR
isb
eret
ENTRY(__kvm_handle_stub_hvc)
cmp r0, #HVC_SOFT_RESTART
bne 1f
/* The target is expected in r1 */
msr ELR_hyp, r1
mrs r0, cpsr
bic r0, r0, #MODE_MASK
orr r0, r0, #HYP_MODE
THUMB( orr r0, r0, #PSR_T_BIT )
msr spsr_cxsf, r0
b reset
1: cmp r0, #HVC_RESET_VECTORS
bne 1f
reset:
/* We're now in idmap, disable MMU */
mrc p15, 4, r1, c1, c0, 0 @ HSCTLR
ldr r0, =(HSCTLR_M | HSCTLR_A | HSCTLR_C | HSCTLR_I)
bic r1, r1, r0
mcr p15, 4, r1, c1, c0, 0 @ HSCTLR
/*
* Install stub vectors, using ardb's VA->PA trick.
*/
0: adr r0, 0b @ PA(0)
movw r1, #:lower16:__hyp_stub_vectors - 0b @ VA(stub) - VA(0)
movt r1, #:upper16:__hyp_stub_vectors - 0b
add r1, r1, r0 @ PA(stub)
mcr p15, 4, r1, c12, c0, 0 @ HVBAR
b exit
1: ldr r0, =HVC_STUB_ERR
eret
exit:
mov r0, #0
eret
ENDPROC(__kvm_handle_stub_hvc)
.ltorg
.globl __kvm_hyp_init_end
__kvm_hyp_init_end:
.popsection
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,817
|
arch/arm/kvm/interrupts.S
|
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/linkage.h>
.text
/********************************************************************
* Call function in Hyp mode
*
*
* unsigned long kvm_call_hyp(void *hypfn, ...);
*
* This is not really a variadic function in the classic C-way and care must
* be taken when calling this to ensure parameters are passed in registers
* only, since the stack will change between the caller and the callee.
*
* Call the function with the first argument containing a pointer to the
* function you wish to call in Hyp mode, and subsequent arguments will be
* passed as r0, r1, and r2 (a maximum of 3 arguments in addition to the
* function pointer can be passed). The function being called must be mapped
* in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
* passed in r0 (strictly 32bit).
*
* The calling convention follows the standard AAPCS:
* r0 - r3: caller save
* r12: caller save
* rest: callee save
*/
ENTRY(kvm_call_hyp)
hvc #0
bx lr
ENDPROC(kvm_call_hyp)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,297
|
arch/arm/mach-exynos/sleep.S
|
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 2013 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Exynos low-level resume code
*/
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/hardware/cache-l2x0.h>
#include "smc.h"
#define CPU_MASK 0xff0ffff0
#define CPU_CORTEX_A9 0x410fc090
.text
.align
/*
* sleep magic, to allow the bootloader to check for an valid
* image to resume to. Must be the first word before the
* exynos_cpu_resume entry.
*/
.word 0x2bedf00d
/*
* exynos_cpu_resume
*
* resume code entry for bootloader to call
*/
ENTRY(exynos_cpu_resume)
#ifdef CONFIG_CACHE_L2X0
mrc p15, 0, r0, c0, c0, 0
ldr r1, =CPU_MASK
and r0, r0, r1
ldr r1, =CPU_CORTEX_A9
cmp r0, r1
bleq l2c310_early_resume
#endif
b cpu_resume
ENDPROC(exynos_cpu_resume)
.align
ENTRY(exynos_cpu_resume_ns)
mrc p15, 0, r0, c0, c0, 0
ldr r1, =CPU_MASK
and r0, r0, r1
ldr r1, =CPU_CORTEX_A9
cmp r0, r1
bne skip_cp15
adr r0, _cp15_save_power
ldr r1, [r0]
ldr r1, [r0, r1]
adr r0, _cp15_save_diag
ldr r2, [r0]
ldr r2, [r0, r2]
mov r0, #SMC_CMD_C15RESUME
dsb
smc #0
#ifdef CONFIG_CACHE_L2X0
adr r0, 1f
ldr r2, [r0]
add r0, r2, r0
/* Check that the address has been initialised. */
ldr r1, [r0, #L2X0_R_PHY_BASE]
teq r1, #0
beq skip_l2x0
/* Check if controller has been enabled. */
ldr r2, [r1, #L2X0_CTRL]
tst r2, #0x1
bne skip_l2x0
ldr r1, [r0, #L2X0_R_TAG_LATENCY]
ldr r2, [r0, #L2X0_R_DATA_LATENCY]
ldr r3, [r0, #L2X0_R_PREFETCH_CTRL]
mov r0, #SMC_CMD_L2X0SETUP1
smc #0
/* Reload saved regs pointer because smc corrupts registers. */
adr r0, 1f
ldr r2, [r0]
add r0, r2, r0
ldr r1, [r0, #L2X0_R_PWR_CTRL]
ldr r2, [r0, #L2X0_R_AUX_CTRL]
mov r0, #SMC_CMD_L2X0SETUP2
smc #0
mov r0, #SMC_CMD_L2X0INVALL
smc #0
mov r1, #1
mov r0, #SMC_CMD_L2X0CTRL
smc #0
skip_l2x0:
#endif /* CONFIG_CACHE_L2X0 */
skip_cp15:
b cpu_resume
ENDPROC(exynos_cpu_resume_ns)
.align
_cp15_save_power:
.long cp15_save_power - .
_cp15_save_diag:
.long cp15_save_diag - .
#ifdef CONFIG_CACHE_L2X0
1: .long l2x0_saved_regs - .
#endif /* CONFIG_CACHE_L2X0 */
.data
.align 2
.globl cp15_save_diag
cp15_save_diag:
.long 0 @ cp15 diagnostic
.globl cp15_save_power
cp15_save_power:
.long 0 @ cp15 power control
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,898
|
arch/arm/mach-sa1100/sleep.S
|
/*
* SA11x0 Assembler Sleep/WakeUp Management Routines
*
* Copyright (c) 2001 Cliff Brake <cbrake@accelent.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License.
*
* History:
*
* 2001-02-06: Cliff Brake Initial code
*
* 2001-08-29: Nicolas Pitre Simplified.
*
* 2002-05-27: Nicolas Pitre Revisited, more cleanup and simplification.
* Storage is on the stack now.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <mach/hardware.h>
.text
/*
* sa1100_finish_suspend()
*
* Causes sa11x0 to enter sleep state
*
* Must be aligned to a cacheline.
*/
.balign 32
ENTRY(sa1100_finish_suspend)
@ disable clock switching
mcr p15, 0, r1, c15, c2, 2
ldr r6, =MDREFR
ldr r4, [r6]
orr r4, r4, #MDREFR_K1DB2
ldr r5, =PPCR
@ Pre-load __loop_udelay into the I-cache
mov r0, #1
bl __loop_udelay
mov r0, r0
@ The following must all exist in a single cache line to
@ avoid accessing memory until this sequence is complete,
@ otherwise we occasionally hang.
@ Adjust memory timing before lowering CPU clock
str r4, [r6]
@ delay 90us and set CPU PLL to lowest speed
@ fixes resume problem on high speed SA1110
mov r0, #90
bl __loop_udelay
mov r1, #0
str r1, [r5]
mov r0, #90
bl __loop_udelay
/*
* SA1110 SDRAM controller workaround. register values:
*
* r0 = &MSC0
* r1 = &MSC1
* r2 = &MSC2
* r3 = MSC0 value
* r4 = MSC1 value
* r5 = MSC2 value
* r6 = &MDREFR
* r7 = first MDREFR value
* r8 = second MDREFR value
* r9 = &MDCNFG
* r10 = MDCNFG value
* r11 = third MDREFR value
* r12 = &PMCR
* r13 = PMCR value (1)
*/
ldr r0, =MSC0
ldr r1, =MSC1
ldr r2, =MSC2
ldr r3, [r0]
bic r3, r3, #FMsk(MSC_RT)
bic r3, r3, #FMsk(MSC_RT)<<16
ldr r4, [r1]
bic r4, r4, #FMsk(MSC_RT)
bic r4, r4, #FMsk(MSC_RT)<<16
ldr r5, [r2]
bic r5, r5, #FMsk(MSC_RT)
bic r5, r5, #FMsk(MSC_RT)<<16
ldr r7, [r6]
bic r7, r7, #0x0000FF00
bic r7, r7, #0x000000F0
orr r8, r7, #MDREFR_SLFRSH
ldr r9, =MDCNFG
ldr r10, [r9]
bic r10, r10, #(MDCNFG_DE0+MDCNFG_DE1)
bic r10, r10, #(MDCNFG_DE2+MDCNFG_DE3)
bic r11, r8, #MDREFR_SLFRSH
bic r11, r11, #MDREFR_E1PIN
ldr r12, =PMCR
mov r13, #PMCR_SF
b sa1110_sdram_controller_fix
.align 5
sa1110_sdram_controller_fix:
@ Step 1 clear RT field of all MSCx registers
str r3, [r0]
str r4, [r1]
str r5, [r2]
@ Step 2 clear DRI field in MDREFR
str r7, [r6]
@ Step 3 set SLFRSH bit in MDREFR
str r8, [r6]
@ Step 4 clear DE bis in MDCNFG
str r10, [r9]
@ Step 5 clear DRAM refresh control register
str r11, [r6]
@ Wow, now the hardware suspend request pins can be used, that makes them functional for
@ about 7 ns out of the entire time that the CPU is running!
@ Step 6 set force sleep bit in PMCR
str r13, [r12]
20: b 20b @ loop waiting for sleep
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,498
|
arch/arm/mach-imx/suspend-imx53.S
|
/*
* Copyright (C) 2008-2011 Freescale Semiconductor, Inc.
*/
/*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
* Version 2 or later at the following locations:
*
* http://www.opensource.org/licenses/gpl-license.html
* http://www.gnu.org/copyleft/gpl.html
*/
#include <linux/linkage.h>
#define M4IF_MCR0_OFFSET (0x008C)
#define M4IF_MCR0_FDVFS (0x1 << 11)
#define M4IF_MCR0_FDVACK (0x1 << 27)
.align 3
/*
* ==================== low level suspend ====================
*
* On entry
* r0: pm_info structure address;
*
* suspend ocram space layout:
* ======================== high address ======================
* .
* .
* .
* ^
* ^
* ^
* imx53_suspend code
* PM_INFO structure(imx53_suspend_info)
* ======================== low address =======================
*/
/* Offsets of members of struct imx53_suspend_info */
#define SUSPEND_INFO_MX53_M4IF_V_OFFSET 0x0
#define SUSPEND_INFO_MX53_IOMUXC_V_OFFSET 0x4
#define SUSPEND_INFO_MX53_IO_COUNT_OFFSET 0x8
#define SUSPEND_INFO_MX53_IO_STATE_OFFSET 0xc
ENTRY(imx53_suspend)
stmfd sp!, {r4,r5,r6,r7}
/* Save pad config */
ldr r1, [r0, #SUSPEND_INFO_MX53_IO_COUNT_OFFSET]
cmp r1, #0
beq skip_pad_conf_1
add r2, r0, #SUSPEND_INFO_MX53_IO_STATE_OFFSET
ldr r3, [r0, #SUSPEND_INFO_MX53_IOMUXC_V_OFFSET]
1:
ldr r5, [r2], #12 /* IOMUXC register offset */
ldr r6, [r3, r5] /* current value */
str r6, [r2], #4 /* save area */
subs r1, r1, #1
bne 1b
skip_pad_conf_1:
/* Set FDVFS bit of M4IF_MCR0 to request DDR to enter self-refresh */
ldr r1, [r0, #SUSPEND_INFO_MX53_M4IF_V_OFFSET]
ldr r2,[r1, #M4IF_MCR0_OFFSET]
orr r2, r2, #M4IF_MCR0_FDVFS
str r2,[r1, #M4IF_MCR0_OFFSET]
/* Poll FDVACK bit of M4IF_MCR to wait for DDR to enter self-refresh */
wait_sr_ack:
ldr r2,[r1, #M4IF_MCR0_OFFSET]
ands r2, r2, #M4IF_MCR0_FDVACK
beq wait_sr_ack
/* Set pad config */
ldr r1, [r0, #SUSPEND_INFO_MX53_IO_COUNT_OFFSET]
cmp r1, #0
beq skip_pad_conf_2
add r2, r0, #SUSPEND_INFO_MX53_IO_STATE_OFFSET
ldr r3, [r0, #SUSPEND_INFO_MX53_IOMUXC_V_OFFSET]
2:
ldr r5, [r2], #4 /* IOMUXC register offset */
ldr r6, [r2], #4 /* clear */
ldr r7, [r3, r5]
bic r7, r7, r6
ldr r6, [r2], #8 /* set */
orr r7, r7, r6
str r7, [r3, r5]
subs r1, r1, #1
bne 2b
skip_pad_conf_2:
/* Zzz, enter stop mode */
wfi
nop
nop
nop
nop
/* Restore pad config */
ldr r1, [r0, #SUSPEND_INFO_MX53_IO_COUNT_OFFSET]
cmp r1, #0
beq skip_pad_conf_3
add r2, r0, #SUSPEND_INFO_MX53_IO_STATE_OFFSET
ldr r3, [r0, #SUSPEND_INFO_MX53_IOMUXC_V_OFFSET]
3:
ldr r5, [r2], #12 /* IOMUXC register offset */
ldr r6, [r2], #4 /* saved value */
str r6, [r3, r5]
subs r1, r1, #1
bne 3b
skip_pad_conf_3:
/* Clear FDVFS bit of M4IF_MCR0 to request DDR to exit self-refresh */
ldr r1, [r0, #SUSPEND_INFO_MX53_M4IF_V_OFFSET]
ldr r2,[r1, #M4IF_MCR0_OFFSET]
bic r2, r2, #M4IF_MCR0_FDVFS
str r2,[r1, #M4IF_MCR0_OFFSET]
/* Poll FDVACK bit of M4IF_MCR to wait for DDR to exit self-refresh */
wait_ar_ack:
ldr r2,[r1, #M4IF_MCR0_OFFSET]
ands r2, r2, #M4IF_MCR0_FDVACK
bne wait_ar_ack
/* Restore registers */
ldmfd sp!, {r4,r5,r6,r7}
mov pc, lr
ENDPROC(imx53_suspend)
ENTRY(imx53_suspend_sz)
.word . - imx53_suspend
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,010
|
arch/arm/mach-imx/ssi-fiq.S
|
/*
* Copyright (C) 2009 Sascha Hauer <s.hauer@pengutronix.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
/*
* r8 = bit 0-15: tx offset, bit 16-31: tx buffer size
* r9 = bit 0-15: rx offset, bit 16-31: rx buffer size
*/
#define SSI_STX0 0x00
#define SSI_SRX0 0x08
#define SSI_SISR 0x14
#define SSI_SIER 0x18
#define SSI_SACNT 0x38
#define SSI_SACNT_AC97EN (1 << 0)
#define SSI_SIER_TFE0_EN (1 << 0)
#define SSI_SISR_TFE0 (1 << 0)
#define SSI_SISR_RFF0 (1 << 2)
#define SSI_SIER_RFF0_EN (1 << 2)
.text
.global imx_ssi_fiq_start
.global imx_ssi_fiq_end
.global imx_ssi_fiq_base
.global imx_ssi_fiq_rx_buffer
.global imx_ssi_fiq_tx_buffer
/*
* imx_ssi_fiq_start is _intentionally_ not marked as a function symbol
* using ENDPROC(). imx_ssi_fiq_start and imx_ssi_fiq_end are used to
* mark the function body so that it can be copied to the FIQ vector in
* the vectors page. imx_ssi_fiq_start should only be called as the result
* of an FIQ: calling it directly will not work.
*/
imx_ssi_fiq_start:
ldr r12, .L_imx_ssi_fiq_base
/* TX */
ldr r13, .L_imx_ssi_fiq_tx_buffer
/* shall we send? */
ldr r11, [r12, #SSI_SIER]
tst r11, #SSI_SIER_TFE0_EN
beq 1f
/* TX FIFO empty? */
ldr r11, [r12, #SSI_SISR]
tst r11, #SSI_SISR_TFE0
beq 1f
mov r10, #0x10000
sub r10, #1
and r10, r10, r8 /* r10: current buffer offset */
add r13, r13, r10
ldrh r11, [r13]
strh r11, [r12, #SSI_STX0]
ldrh r11, [r13, #2]
strh r11, [r12, #SSI_STX0]
ldrh r11, [r13, #4]
strh r11, [r12, #SSI_STX0]
ldrh r11, [r13, #6]
strh r11, [r12, #SSI_STX0]
add r10, #8
lsr r11, r8, #16 /* r11: buffer size */
cmp r10, r11
lslgt r8, r11, #16
addle r8, #8
1:
/* RX */
/* shall we receive? */
ldr r11, [r12, #SSI_SIER]
tst r11, #SSI_SIER_RFF0_EN
beq 1f
/* RX FIFO full? */
ldr r11, [r12, #SSI_SISR]
tst r11, #SSI_SISR_RFF0
beq 1f
ldr r13, .L_imx_ssi_fiq_rx_buffer
mov r10, #0x10000
sub r10, #1
and r10, r10, r9 /* r10: current buffer offset */
add r13, r13, r10
ldr r11, [r12, #SSI_SACNT]
tst r11, #SSI_SACNT_AC97EN
ldr r11, [r12, #SSI_SRX0]
strh r11, [r13]
ldr r11, [r12, #SSI_SRX0]
strh r11, [r13, #2]
/* dummy read to skip slot 12 */
ldrne r11, [r12, #SSI_SRX0]
ldr r11, [r12, #SSI_SRX0]
strh r11, [r13, #4]
ldr r11, [r12, #SSI_SRX0]
strh r11, [r13, #6]
/* dummy read to skip slot 12 */
ldrne r11, [r12, #SSI_SRX0]
add r10, #8
lsr r11, r9, #16 /* r11: buffer size */
cmp r10, r11
lslgt r9, r11, #16
addle r9, #8
1:
@ return from FIQ
subs pc, lr, #4
.align
.L_imx_ssi_fiq_base:
imx_ssi_fiq_base:
.word 0x0
.L_imx_ssi_fiq_rx_buffer:
imx_ssi_fiq_rx_buffer:
.word 0x0
.L_imx_ssi_fiq_tx_buffer:
imx_ssi_fiq_tx_buffer:
.word 0x0
.L_imx_ssi_fiq_end:
imx_ssi_fiq_end:
|
AirFortressIlikara/LS2K0300-linux-4.19
| 8,152
|
arch/arm/mach-imx/suspend-imx6.S
|
/*
* Copyright 2014 Freescale Semiconductor, Inc.
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
* Version 2 or later at the following locations:
*
* http://www.opensource.org/licenses/gpl-license.html
* http://www.gnu.org/copyleft/gpl.html
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/hardware/cache-l2x0.h>
#include "hardware.h"
/*
* ==================== low level suspend ====================
*
* Better to follow below rules to use ARM registers:
* r0: pm_info structure address;
* r1 ~ r4: for saving pm_info members;
* r5 ~ r10: free registers;
* r11: io base address.
*
* suspend ocram space layout:
* ======================== high address ======================
* .
* .
* .
* ^
* ^
* ^
* imx6_suspend code
* PM_INFO structure(imx6_cpu_pm_info)
* ======================== low address =======================
*/
/*
* Below offsets are based on struct imx6_cpu_pm_info
* which defined in arch/arm/mach-imx/pm-imx6q.c, this
* structure contains necessary pm info for low level
* suspend related code.
*/
#define PM_INFO_PBASE_OFFSET 0x0
#define PM_INFO_RESUME_ADDR_OFFSET 0x4
#define PM_INFO_DDR_TYPE_OFFSET 0x8
#define PM_INFO_PM_INFO_SIZE_OFFSET 0xC
#define PM_INFO_MX6Q_MMDC_P_OFFSET 0x10
#define PM_INFO_MX6Q_MMDC_V_OFFSET 0x14
#define PM_INFO_MX6Q_SRC_P_OFFSET 0x18
#define PM_INFO_MX6Q_SRC_V_OFFSET 0x1C
#define PM_INFO_MX6Q_IOMUXC_P_OFFSET 0x20
#define PM_INFO_MX6Q_IOMUXC_V_OFFSET 0x24
#define PM_INFO_MX6Q_CCM_P_OFFSET 0x28
#define PM_INFO_MX6Q_CCM_V_OFFSET 0x2C
#define PM_INFO_MX6Q_GPC_P_OFFSET 0x30
#define PM_INFO_MX6Q_GPC_V_OFFSET 0x34
#define PM_INFO_MX6Q_L2_P_OFFSET 0x38
#define PM_INFO_MX6Q_L2_V_OFFSET 0x3C
#define PM_INFO_MMDC_IO_NUM_OFFSET 0x40
#define PM_INFO_MMDC_IO_VAL_OFFSET 0x44
#define MX6Q_SRC_GPR1 0x20
#define MX6Q_SRC_GPR2 0x24
#define MX6Q_MMDC_MAPSR 0x404
#define MX6Q_MMDC_MPDGCTRL0 0x83c
#define MX6Q_GPC_IMR1 0x08
#define MX6Q_GPC_IMR2 0x0c
#define MX6Q_GPC_IMR3 0x10
#define MX6Q_GPC_IMR4 0x14
#define MX6Q_CCM_CCR 0x0
.align 3
.arm
.macro sync_l2_cache
/* sync L2 cache to drain L2's buffers to DRAM. */
#ifdef CONFIG_CACHE_L2X0
ldr r11, [r0, #PM_INFO_MX6Q_L2_V_OFFSET]
teq r11, #0
beq 6f
mov r6, #0x0
str r6, [r11, #L2X0_CACHE_SYNC]
1:
ldr r6, [r11, #L2X0_CACHE_SYNC]
ands r6, r6, #0x1
bne 1b
6:
#endif
.endm
.macro resume_mmdc
/* restore MMDC IO */
cmp r5, #0x0
ldreq r11, [r0, #PM_INFO_MX6Q_IOMUXC_V_OFFSET]
ldrne r11, [r0, #PM_INFO_MX6Q_IOMUXC_P_OFFSET]
ldr r6, [r0, #PM_INFO_MMDC_IO_NUM_OFFSET]
ldr r7, =PM_INFO_MMDC_IO_VAL_OFFSET
add r7, r7, r0
1:
ldr r8, [r7], #0x4
ldr r9, [r7], #0x4
str r9, [r11, r8]
subs r6, r6, #0x1
bne 1b
cmp r5, #0x0
ldreq r11, [r0, #PM_INFO_MX6Q_MMDC_V_OFFSET]
ldrne r11, [r0, #PM_INFO_MX6Q_MMDC_P_OFFSET]
cmp r3, #IMX_DDR_TYPE_LPDDR2
bne 4f
/* reset read FIFO, RST_RD_FIFO */
ldr r7, =MX6Q_MMDC_MPDGCTRL0
ldr r6, [r11, r7]
orr r6, r6, #(1 << 31)
str r6, [r11, r7]
2:
ldr r6, [r11, r7]
ands r6, r6, #(1 << 31)
bne 2b
/* reset FIFO a second time */
ldr r6, [r11, r7]
orr r6, r6, #(1 << 31)
str r6, [r11, r7]
3:
ldr r6, [r11, r7]
ands r6, r6, #(1 << 31)
bne 3b
4:
/* let DDR out of self-refresh */
ldr r7, [r11, #MX6Q_MMDC_MAPSR]
bic r7, r7, #(1 << 21)
str r7, [r11, #MX6Q_MMDC_MAPSR]
5:
ldr r7, [r11, #MX6Q_MMDC_MAPSR]
ands r7, r7, #(1 << 25)
bne 5b
/* enable DDR auto power saving */
ldr r7, [r11, #MX6Q_MMDC_MAPSR]
bic r7, r7, #0x1
str r7, [r11, #MX6Q_MMDC_MAPSR]
.endm
ENTRY(imx6_suspend)
ldr r1, [r0, #PM_INFO_PBASE_OFFSET]
ldr r2, [r0, #PM_INFO_RESUME_ADDR_OFFSET]
ldr r3, [r0, #PM_INFO_DDR_TYPE_OFFSET]
ldr r4, [r0, #PM_INFO_PM_INFO_SIZE_OFFSET]
/*
* counting the resume address in iram
* to set it in SRC register.
*/
ldr r6, =imx6_suspend
ldr r7, =resume
sub r7, r7, r6
add r8, r1, r4
add r9, r8, r7
/*
* make sure TLB contain the addr we want,
* as we will access them after MMDC IO floated.
*/
ldr r11, [r0, #PM_INFO_MX6Q_CCM_V_OFFSET]
ldr r6, [r11, #0x0]
ldr r11, [r0, #PM_INFO_MX6Q_GPC_V_OFFSET]
ldr r6, [r11, #0x0]
ldr r11, [r0, #PM_INFO_MX6Q_IOMUXC_V_OFFSET]
ldr r6, [r11, #0x0]
/* use r11 to store the IO address */
ldr r11, [r0, #PM_INFO_MX6Q_SRC_V_OFFSET]
/* store physical resume addr and pm_info address. */
str r9, [r11, #MX6Q_SRC_GPR1]
str r1, [r11, #MX6Q_SRC_GPR2]
/* need to sync L2 cache before DSM. */
sync_l2_cache
ldr r11, [r0, #PM_INFO_MX6Q_MMDC_V_OFFSET]
/*
* put DDR explicitly into self-refresh and
* disable automatic power savings.
*/
ldr r7, [r11, #MX6Q_MMDC_MAPSR]
orr r7, r7, #0x1
str r7, [r11, #MX6Q_MMDC_MAPSR]
/* make the DDR explicitly enter self-refresh. */
ldr r7, [r11, #MX6Q_MMDC_MAPSR]
orr r7, r7, #(1 << 21)
str r7, [r11, #MX6Q_MMDC_MAPSR]
poll_dvfs_set:
ldr r7, [r11, #MX6Q_MMDC_MAPSR]
ands r7, r7, #(1 << 25)
beq poll_dvfs_set
ldr r11, [r0, #PM_INFO_MX6Q_IOMUXC_V_OFFSET]
ldr r6, =0x0
ldr r7, [r0, #PM_INFO_MMDC_IO_NUM_OFFSET]
ldr r8, =PM_INFO_MMDC_IO_VAL_OFFSET
add r8, r8, r0
/* LPDDR2's last 3 IOs need special setting */
cmp r3, #IMX_DDR_TYPE_LPDDR2
subeq r7, r7, #0x3
set_mmdc_io_lpm:
ldr r9, [r8], #0x8
str r6, [r11, r9]
subs r7, r7, #0x1
bne set_mmdc_io_lpm
cmp r3, #IMX_DDR_TYPE_LPDDR2
bne set_mmdc_io_lpm_done
ldr r6, =0x1000
ldr r9, [r8], #0x8
str r6, [r11, r9]
ldr r9, [r8], #0x8
str r6, [r11, r9]
ldr r6, =0x80000
ldr r9, [r8]
str r6, [r11, r9]
set_mmdc_io_lpm_done:
/*
* mask all GPC interrupts before
* enabling the RBC counters to
* avoid the counter starting too
* early if an interupt is already
* pending.
*/
ldr r11, [r0, #PM_INFO_MX6Q_GPC_V_OFFSET]
ldr r6, [r11, #MX6Q_GPC_IMR1]
ldr r7, [r11, #MX6Q_GPC_IMR2]
ldr r8, [r11, #MX6Q_GPC_IMR3]
ldr r9, [r11, #MX6Q_GPC_IMR4]
ldr r10, =0xffffffff
str r10, [r11, #MX6Q_GPC_IMR1]
str r10, [r11, #MX6Q_GPC_IMR2]
str r10, [r11, #MX6Q_GPC_IMR3]
str r10, [r11, #MX6Q_GPC_IMR4]
/*
* enable the RBC bypass counter here
* to hold off the interrupts. RBC counter
* = 32 (1ms), Minimum RBC delay should be
* 400us for the analog LDOs to power down.
*/
ldr r11, [r0, #PM_INFO_MX6Q_CCM_V_OFFSET]
ldr r10, [r11, #MX6Q_CCM_CCR]
bic r10, r10, #(0x3f << 21)
orr r10, r10, #(0x20 << 21)
str r10, [r11, #MX6Q_CCM_CCR]
/* enable the counter. */
ldr r10, [r11, #MX6Q_CCM_CCR]
orr r10, r10, #(0x1 << 27)
str r10, [r11, #MX6Q_CCM_CCR]
/* unmask all the GPC interrupts. */
ldr r11, [r0, #PM_INFO_MX6Q_GPC_V_OFFSET]
str r6, [r11, #MX6Q_GPC_IMR1]
str r7, [r11, #MX6Q_GPC_IMR2]
str r8, [r11, #MX6Q_GPC_IMR3]
str r9, [r11, #MX6Q_GPC_IMR4]
/*
* now delay for a short while (3usec)
* ARM is at 1GHz at this point
* so a short loop should be enough.
* this delay is required to ensure that
* the RBC counter can start counting in
* case an interrupt is already pending
* or in case an interrupt arrives just
* as ARM is about to assert DSM_request.
*/
ldr r6, =2000
rbc_loop:
subs r6, r6, #0x1
bne rbc_loop
/* Zzz, enter stop mode */
wfi
nop
nop
nop
nop
/*
* run to here means there is pending
* wakeup source, system should auto
* resume, we need to restore MMDC IO first
*/
mov r5, #0x0
resume_mmdc
/* return to suspend finish */
ret lr
resume:
/* invalidate L1 I-cache first */
mov r6, #0x0
mcr p15, 0, r6, c7, c5, 0
mcr p15, 0, r6, c7, c5, 6
/* enable the Icache and branch prediction */
mov r6, #0x1800
mcr p15, 0, r6, c1, c0, 0
isb
/* get physical resume address from pm_info. */
ldr lr, [r0, #PM_INFO_RESUME_ADDR_OFFSET]
/* clear core0's entry and parameter */
ldr r11, [r0, #PM_INFO_MX6Q_SRC_P_OFFSET]
mov r7, #0x0
str r7, [r11, #MX6Q_SRC_GPR1]
str r7, [r11, #MX6Q_SRC_GPR2]
ldr r3, [r0, #PM_INFO_DDR_TYPE_OFFSET]
mov r5, #0x1
resume_mmdc
ret lr
ENDPROC(imx6_suspend)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,004
|
arch/arm/mach-s3c64xx/sleep.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* linux/arch/arm/plat-s3c64xx/sleep.S
*
* Copyright 2008 Openmoko, Inc.
* Copyright 2008 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
* http://armlinux.simtec.co.uk/
*
* S3C64XX CPU sleep code
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <mach/map.h>
#undef S3C64XX_VA_GPIO
#define S3C64XX_VA_GPIO (0x0)
#include <mach/regs-gpio.h>
#define LL_UART (S3C_PA_UART + (0x400 * CONFIG_S3C_LOWLEVEL_UART_PORT))
.text
/* Sleep magic, the word before the resume entry point so that the
* bootloader can check for a resumeable image. */
.word 0x2bedf00d
/* s3c_cpu_reusme
*
* This is the entry point, stored by whatever method the bootloader
* requires to get the kernel runnign again. This code expects to be
* entered with no caches live and the MMU disabled. It will then
* restore the MMU and other basic CP registers saved and restart
* the kernel C code to finish the resume code.
*/
ENTRY(s3c_cpu_resume)
msr cpsr_c, #PSR_I_BIT | PSR_F_BIT | SVC_MODE
ldr r2, =LL_UART /* for debug */
#ifdef CONFIG_S3C_PM_DEBUG_LED_SMDK
#define S3C64XX_GPNCON (S3C64XX_GPN_BASE + 0x00)
#define S3C64XX_GPNDAT (S3C64XX_GPN_BASE + 0x04)
#define S3C64XX_GPN_CONMASK(__gpio) (0x3 << ((__gpio) * 2))
#define S3C64XX_GPN_OUTPUT(__gpio) (0x1 << ((__gpio) * 2))
/* Initialise the GPIO state if we are debugging via the SMDK LEDs,
* as the uboot version supplied resets these to inputs during the
* resume checks.
*/
ldr r3, =S3C64XX_PA_GPIO
ldr r0, [ r3, #S3C64XX_GPNCON ]
bic r0, r0, #(S3C64XX_GPN_CONMASK(12) | S3C64XX_GPN_CONMASK(13) | \
S3C64XX_GPN_CONMASK(14) | S3C64XX_GPN_CONMASK(15))
orr r0, r0, #(S3C64XX_GPN_OUTPUT(12) | S3C64XX_GPN_OUTPUT(13) | \
S3C64XX_GPN_OUTPUT(14) | S3C64XX_GPN_OUTPUT(15))
str r0, [ r3, #S3C64XX_GPNCON ]
ldr r0, [ r3, #S3C64XX_GPNDAT ]
bic r0, r0, #0xf << 12 @ GPN12..15
orr r0, r0, #1 << 15 @ GPN15
str r0, [ r3, #S3C64XX_GPNDAT ]
#endif
b cpu_resume
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,659
|
arch/arm/common/vlock.S
|
/*
* vlock.S - simple voting lock implementation for ARM
*
* Created by: Dave Martin, 2012-08-16
* Copyright: (C) 2012-2013 Linaro Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* This algorithm is described in more detail in
* Documentation/arm/vlocks.txt.
*/
#include <linux/linkage.h>
#include "vlock.h"
/* Select different code if voting flags can fit in a single word. */
#if VLOCK_VOTING_SIZE > 4
#define FEW(x...)
#define MANY(x...) x
#else
#define FEW(x...) x
#define MANY(x...)
#endif
@ voting lock for first-man coordination
.macro voting_begin rbase:req, rcpu:req, rscratch:req
mov \rscratch, #1
strb \rscratch, [\rbase, \rcpu]
dmb
.endm
.macro voting_end rbase:req, rcpu:req, rscratch:req
dmb
mov \rscratch, #0
strb \rscratch, [\rbase, \rcpu]
dsb st
sev
.endm
/*
* The vlock structure must reside in Strongly-Ordered or Device memory.
* This implementation deliberately eliminates most of the barriers which
* would be required for other memory types, and assumes that independent
* writes to neighbouring locations within a cacheline do not interfere
* with one another.
*/
@ r0: lock structure base
@ r1: CPU ID (0-based index within cluster)
ENTRY(vlock_trylock)
add r1, r1, #VLOCK_VOTING_OFFSET
voting_begin r0, r1, r2
ldrb r2, [r0, #VLOCK_OWNER_OFFSET] @ check whether lock is held
cmp r2, #VLOCK_OWNER_NONE
bne trylock_fail @ fail if so
@ Control dependency implies strb not observable before previous ldrb.
strb r1, [r0, #VLOCK_OWNER_OFFSET] @ submit my vote
voting_end r0, r1, r2 @ implies DMB
@ Wait for the current round of voting to finish:
MANY( mov r3, #VLOCK_VOTING_OFFSET )
0:
MANY( ldr r2, [r0, r3] )
FEW( ldr r2, [r0, #VLOCK_VOTING_OFFSET] )
cmp r2, #0
wfene
bne 0b
MANY( add r3, r3, #4 )
MANY( cmp r3, #VLOCK_VOTING_OFFSET + VLOCK_VOTING_SIZE )
MANY( bne 0b )
@ Check who won:
dmb
ldrb r2, [r0, #VLOCK_OWNER_OFFSET]
eor r0, r1, r2 @ zero if I won, else nonzero
bx lr
trylock_fail:
voting_end r0, r1, r2
mov r0, #1 @ nonzero indicates that I lost
bx lr
ENDPROC(vlock_trylock)
@ r0: lock structure base
ENTRY(vlock_unlock)
dmb
mov r1, #VLOCK_OWNER_NONE
strb r1, [r0, #VLOCK_OWNER_OFFSET]
dsb st
sev
bx lr
ENDPROC(vlock_unlock)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 5,389
|
arch/arm/common/mcpm_head.S
|
/*
* arch/arm/common/mcpm_head.S -- kernel entry point for multi-cluster PM
*
* Created by: Nicolas Pitre, March 2012
* Copyright: (C) 2012-2013 Linaro Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* Refer to Documentation/arm/cluster-pm-race-avoidance.txt
* for details of the synchronisation algorithms used here.
*/
#include <linux/linkage.h>
#include <asm/mcpm.h>
#include <asm/assembler.h>
#include "vlock.h"
.if MCPM_SYNC_CLUSTER_CPUS
.error "cpus must be the first member of struct mcpm_sync_struct"
.endif
.macro pr_dbg string
#if defined(CONFIG_DEBUG_LL) && defined(DEBUG)
b 1901f
1902: .asciz "CPU"
1903: .asciz " cluster"
1904: .asciz ": \string"
.align
1901: adr r0, 1902b
bl printascii
mov r0, r9
bl printhex2
adr r0, 1903b
bl printascii
mov r0, r10
bl printhex2
adr r0, 1904b
bl printascii
#endif
.endm
.arm
.align
ENTRY(mcpm_entry_point)
ARM_BE8(setend be)
THUMB( badr r12, 1f )
THUMB( bx r12 )
THUMB( .thumb )
1:
mrc p15, 0, r0, c0, c0, 5 @ MPIDR
ubfx r9, r0, #0, #8 @ r9 = cpu
ubfx r10, r0, #8, #8 @ r10 = cluster
mov r3, #MAX_CPUS_PER_CLUSTER
mla r4, r3, r10, r9 @ r4 = canonical CPU index
cmp r4, #(MAX_CPUS_PER_CLUSTER * MAX_NR_CLUSTERS)
blo 2f
/* We didn't expect this CPU. Try to cheaply make it quiet. */
1: wfi
wfe
b 1b
2: pr_dbg "kernel mcpm_entry_point\n"
/*
* MMU is off so we need to get to various variables in a
* position independent way.
*/
adr r5, 3f
ldmia r5, {r0, r6, r7, r8, r11}
add r0, r5, r0 @ r0 = mcpm_entry_early_pokes
add r6, r5, r6 @ r6 = mcpm_entry_vectors
ldr r7, [r5, r7] @ r7 = mcpm_power_up_setup_phys
add r8, r5, r8 @ r8 = mcpm_sync
add r11, r5, r11 @ r11 = first_man_locks
@ Perform an early poke, if any
add r0, r0, r4, lsl #3
ldmia r0, {r0, r1}
teq r0, #0
strne r1, [r0]
mov r0, #MCPM_SYNC_CLUSTER_SIZE
mla r8, r0, r10, r8 @ r8 = sync cluster base
@ Signal that this CPU is coming UP:
mov r0, #CPU_COMING_UP
mov r5, #MCPM_SYNC_CPU_SIZE
mla r5, r9, r5, r8 @ r5 = sync cpu address
strb r0, [r5]
@ At this point, the cluster cannot unexpectedly enter the GOING_DOWN
@ state, because there is at least one active CPU (this CPU).
mov r0, #VLOCK_SIZE
mla r11, r0, r10, r11 @ r11 = cluster first man lock
mov r0, r11
mov r1, r9 @ cpu
bl vlock_trylock @ implies DMB
cmp r0, #0 @ failed to get the lock?
bne mcpm_setup_wait @ wait for cluster setup if so
ldrb r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
cmp r0, #CLUSTER_UP @ cluster already up?
bne mcpm_setup @ if not, set up the cluster
@ Otherwise, release the first man lock and skip setup:
mov r0, r11
bl vlock_unlock
b mcpm_setup_complete
mcpm_setup:
@ Control dependency implies strb not observable before previous ldrb.
@ Signal that the cluster is being brought up:
mov r0, #INBOUND_COMING_UP
strb r0, [r8, #MCPM_SYNC_CLUSTER_INBOUND]
dmb
@ Any CPU trying to take the cluster into CLUSTER_GOING_DOWN from this
@ point onwards will observe INBOUND_COMING_UP and abort.
@ Wait for any previously-pending cluster teardown operations to abort
@ or complete:
mcpm_teardown_wait:
ldrb r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
cmp r0, #CLUSTER_GOING_DOWN
bne first_man_setup
wfe
b mcpm_teardown_wait
first_man_setup:
dmb
@ If the outbound gave up before teardown started, skip cluster setup:
cmp r0, #CLUSTER_UP
beq mcpm_setup_leave
@ power_up_setup is now responsible for setting up the cluster:
cmp r7, #0
mov r0, #1 @ second (cluster) affinity level
blxne r7 @ Call power_up_setup if defined
dmb
mov r0, #CLUSTER_UP
strb r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
dmb
mcpm_setup_leave:
@ Leave the cluster setup critical section:
mov r0, #INBOUND_NOT_COMING_UP
strb r0, [r8, #MCPM_SYNC_CLUSTER_INBOUND]
dsb st
sev
mov r0, r11
bl vlock_unlock @ implies DMB
b mcpm_setup_complete
@ In the contended case, non-first men wait here for cluster setup
@ to complete:
mcpm_setup_wait:
ldrb r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
cmp r0, #CLUSTER_UP
wfene
bne mcpm_setup_wait
dmb
mcpm_setup_complete:
@ If a platform-specific CPU setup hook is needed, it is
@ called from here.
cmp r7, #0
mov r0, #0 @ first (CPU) affinity level
blxne r7 @ Call power_up_setup if defined
dmb
@ Mark the CPU as up:
mov r0, #CPU_UP
strb r0, [r5]
@ Observability order of CPU_UP and opening of the gate does not matter.
mcpm_entry_gated:
ldr r5, [r6, r4, lsl #2] @ r5 = CPU entry vector
cmp r5, #0
wfeeq
beq mcpm_entry_gated
dmb
pr_dbg "released\n"
bx r5
.align 2
3: .word mcpm_entry_early_pokes - .
.word mcpm_entry_vectors - 3b
.word mcpm_power_up_setup_phys - 3b
.word mcpm_sync - 3b
.word first_man_locks - 3b
ENDPROC(mcpm_entry_point)
.bss
.align CACHE_WRITEBACK_ORDER
.type first_man_locks, #object
first_man_locks:
.space VLOCK_SIZE * MAX_NR_CLUSTERS
.align CACHE_WRITEBACK_ORDER
.type mcpm_entry_vectors, #object
ENTRY(mcpm_entry_vectors)
.space 4 * MAX_NR_CLUSTERS * MAX_CPUS_PER_CLUSTER
.type mcpm_entry_early_pokes, #object
ENTRY(mcpm_entry_early_pokes)
.space 8 * MAX_NR_CLUSTERS * MAX_CPUS_PER_CLUSTER
.type mcpm_power_up_setup_phys, #object
ENTRY(mcpm_power_up_setup_phys)
.space 4 @ set by mcpm_sync_init()
|
AirFortressIlikara/LS2K0300-linux-4.19
| 9,707
|
arch/arm/mach-at91/pm_suspend.S
|
/*
* arch/arm/mach-at91/pm_slow_clock.S
*
* Copyright (C) 2006 Savin Zlobec
*
* AT91SAM9 support:
* Copyright (C) 2007 Anti Sullin <anti.sullin@artecdesign.ee>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/linkage.h>
#include <linux/clk/at91_pmc.h>
#include "pm.h"
#include "generated/at91_pm_data-offsets.h"
#define SRAMC_SELF_FRESH_ACTIVE 0x01
#define SRAMC_SELF_FRESH_EXIT 0x00
pmc .req r0
tmp1 .req r4
tmp2 .req r5
/*
* Wait until master clock is ready (after switching master clock source)
*/
.macro wait_mckrdy
1: ldr tmp1, [pmc, #AT91_PMC_SR]
tst tmp1, #AT91_PMC_MCKRDY
beq 1b
.endm
/*
* Wait until master oscillator has stabilized.
*/
.macro wait_moscrdy
1: ldr tmp1, [pmc, #AT91_PMC_SR]
tst tmp1, #AT91_PMC_MOSCS
beq 1b
.endm
/*
* Wait for main oscillator selection is done
*/
.macro wait_moscsels
1: ldr tmp1, [pmc, #AT91_PMC_SR]
tst tmp1, #AT91_PMC_MOSCSELS
beq 1b
.endm
/*
* Wait until PLLA has locked.
*/
.macro wait_pllalock
1: ldr tmp1, [pmc, #AT91_PMC_SR]
tst tmp1, #AT91_PMC_LOCKA
beq 1b
.endm
/*
* Put the processor to enter the idle state
*/
.macro at91_cpu_idle
#if defined(CONFIG_CPU_V7)
mov tmp1, #AT91_PMC_PCK
str tmp1, [pmc, #AT91_PMC_SCDR]
dsb
wfi @ Wait For Interrupt
#else
mcr p15, 0, tmp1, c7, c0, 4
#endif
.endm
.text
.arm
/*
* void at91_suspend_sram_fn(struct at91_pm_data*)
* @input param:
* @r0: base address of struct at91_pm_data
*/
/* at91_pm_suspend_in_sram must be 8-byte aligned per the requirements of fncpy() */
.align 3
ENTRY(at91_pm_suspend_in_sram)
/* Save registers on stack */
stmfd sp!, {r4 - r12, lr}
/* Drain write buffer */
mov tmp1, #0
mcr p15, 0, tmp1, c7, c10, 4
ldr tmp1, [r0, #PM_DATA_PMC]
str tmp1, .pmc_base
ldr tmp1, [r0, #PM_DATA_RAMC0]
str tmp1, .sramc_base
ldr tmp1, [r0, #PM_DATA_RAMC1]
str tmp1, .sramc1_base
ldr tmp1, [r0, #PM_DATA_MEMCTRL]
str tmp1, .memtype
ldr tmp1, [r0, #PM_DATA_MODE]
str tmp1, .pm_mode
/* Both ldrne below are here to preload their address in the TLB */
ldr tmp1, [r0, #PM_DATA_SHDWC]
str tmp1, .shdwc
cmp tmp1, #0
ldrne tmp2, [tmp1, #0]
ldr tmp1, [r0, #PM_DATA_SFRBU]
str tmp1, .sfr
cmp tmp1, #0
ldrne tmp2, [tmp1, #0x10]
/* Active the self-refresh mode */
mov r0, #SRAMC_SELF_FRESH_ACTIVE
bl at91_sramc_self_refresh
ldr r0, .pm_mode
cmp r0, #AT91_PM_STANDBY
beq standby
cmp r0, #AT91_PM_BACKUP
beq backup_mode
bl at91_ulp_mode
b exit_suspend
standby:
/* Wait for interrupt */
ldr pmc, .pmc_base
at91_cpu_idle
b exit_suspend
backup_mode:
bl at91_backup_mode
b exit_suspend
exit_suspend:
/* Exit the self-refresh mode */
mov r0, #SRAMC_SELF_FRESH_EXIT
bl at91_sramc_self_refresh
/* Restore registers, and return */
ldmfd sp!, {r4 - r12, pc}
ENDPROC(at91_pm_suspend_in_sram)
ENTRY(at91_backup_mode)
/*BUMEN*/
ldr r0, .sfr
mov tmp1, #0x1
str tmp1, [r0, #0x10]
/* Shutdown */
ldr r0, .shdwc
mov tmp1, #0xA5000000
add tmp1, tmp1, #0x1
str tmp1, [r0, #0]
ENDPROC(at91_backup_mode)
.macro at91_pm_ulp0_mode
ldr pmc, .pmc_base
/* Turn off the crystal oscillator */
ldr tmp1, [pmc, #AT91_CKGR_MOR]
bic tmp1, tmp1, #AT91_PMC_MOSCEN
orr tmp1, tmp1, #AT91_PMC_KEY
str tmp1, [pmc, #AT91_CKGR_MOR]
/* Wait for interrupt */
at91_cpu_idle
/* Turn on the crystal oscillator */
ldr tmp1, [pmc, #AT91_CKGR_MOR]
orr tmp1, tmp1, #AT91_PMC_MOSCEN
orr tmp1, tmp1, #AT91_PMC_KEY
str tmp1, [pmc, #AT91_CKGR_MOR]
wait_moscrdy
.endm
/**
* Note: This procedure only applies on the platform which uses
* the external crystal oscillator as a main clock source.
*/
.macro at91_pm_ulp1_mode
ldr pmc, .pmc_base
/* Switch the main clock source to 12-MHz RC oscillator */
ldr tmp1, [pmc, #AT91_CKGR_MOR]
bic tmp1, tmp1, #AT91_PMC_MOSCSEL
bic tmp1, tmp1, #AT91_PMC_KEY_MASK
orr tmp1, tmp1, #AT91_PMC_KEY
str tmp1, [pmc, #AT91_CKGR_MOR]
wait_moscsels
/* Disable the crystal oscillator */
ldr tmp1, [pmc, #AT91_CKGR_MOR]
bic tmp1, tmp1, #AT91_PMC_MOSCEN
bic tmp1, tmp1, #AT91_PMC_KEY_MASK
orr tmp1, tmp1, #AT91_PMC_KEY
str tmp1, [pmc, #AT91_CKGR_MOR]
/* Switch the master clock source to main clock */
ldr tmp1, [pmc, #AT91_PMC_MCKR]
bic tmp1, tmp1, #AT91_PMC_CSS
orr tmp1, tmp1, #AT91_PMC_CSS_MAIN
str tmp1, [pmc, #AT91_PMC_MCKR]
wait_mckrdy
/* Enter the ULP1 mode by set WAITMODE bit in CKGR_MOR */
ldr tmp1, [pmc, #AT91_CKGR_MOR]
orr tmp1, tmp1, #AT91_PMC_WAITMODE
bic tmp1, tmp1, #AT91_PMC_KEY_MASK
orr tmp1, tmp1, #AT91_PMC_KEY
str tmp1, [pmc, #AT91_CKGR_MOR]
/* Quirk for SAM9X60's PMC */
nop
nop
wait_mckrdy
/* Enable the crystal oscillator */
ldr tmp1, [pmc, #AT91_CKGR_MOR]
orr tmp1, tmp1, #AT91_PMC_MOSCEN
bic tmp1, tmp1, #AT91_PMC_KEY_MASK
orr tmp1, tmp1, #AT91_PMC_KEY
str tmp1, [pmc, #AT91_CKGR_MOR]
wait_moscrdy
/* Switch the master clock source to slow clock */
ldr tmp1, [pmc, #AT91_PMC_MCKR]
bic tmp1, tmp1, #AT91_PMC_CSS
str tmp1, [pmc, #AT91_PMC_MCKR]
wait_mckrdy
/* Switch main clock source to crystal oscillator */
ldr tmp1, [pmc, #AT91_CKGR_MOR]
orr tmp1, tmp1, #AT91_PMC_MOSCSEL
bic tmp1, tmp1, #AT91_PMC_KEY_MASK
orr tmp1, tmp1, #AT91_PMC_KEY
str tmp1, [pmc, #AT91_CKGR_MOR]
wait_moscsels
/* Switch the master clock source to main clock */
ldr tmp1, [pmc, #AT91_PMC_MCKR]
bic tmp1, tmp1, #AT91_PMC_CSS
orr tmp1, tmp1, #AT91_PMC_CSS_MAIN
str tmp1, [pmc, #AT91_PMC_MCKR]
wait_mckrdy
.endm
ENTRY(at91_ulp_mode)
ldr pmc, .pmc_base
/* Save Master clock setting */
ldr tmp1, [pmc, #AT91_PMC_MCKR]
str tmp1, .saved_mckr
/*
* Set the Master clock source to slow clock
*/
bic tmp1, tmp1, #AT91_PMC_CSS
str tmp1, [pmc, #AT91_PMC_MCKR]
wait_mckrdy
/* Save PLLA setting and disable it */
ldr tmp1, [pmc, #AT91_CKGR_PLLAR]
str tmp1, .saved_pllar
mov tmp1, #AT91_PMC_PLLCOUNT
orr tmp1, tmp1, #(1 << 29) /* bit 29 always set */
str tmp1, [pmc, #AT91_CKGR_PLLAR]
ldr r0, .pm_mode
cmp r0, #AT91_PM_ULP1
beq ulp1_mode
at91_pm_ulp0_mode
b ulp_exit
ulp1_mode:
at91_pm_ulp1_mode
b ulp_exit
ulp_exit:
ldr pmc, .pmc_base
/* Restore PLLA setting */
ldr tmp1, .saved_pllar
str tmp1, [pmc, #AT91_CKGR_PLLAR]
tst tmp1, #(AT91_PMC_MUL & 0xff0000)
bne 3f
tst tmp1, #(AT91_PMC_MUL & ~0xff0000)
beq 4f
3:
wait_pllalock
4:
/*
* Restore master clock setting
*/
ldr tmp1, .saved_mckr
str tmp1, [pmc, #AT91_PMC_MCKR]
wait_mckrdy
mov pc, lr
ENDPROC(at91_ulp_mode)
/*
* void at91_sramc_self_refresh(unsigned int is_active)
*
* @input param:
* @r0: 1 - active self-refresh mode
* 0 - exit self-refresh mode
* register usage:
* @r1: memory type
* @r2: base address of the sram controller
*/
ENTRY(at91_sramc_self_refresh)
ldr r1, .memtype
ldr r2, .sramc_base
cmp r1, #AT91_MEMCTRL_MC
bne ddrc_sf
/*
* at91rm9200 Memory controller
*/
/*
* For exiting the self-refresh mode, do nothing,
* automatically exit the self-refresh mode.
*/
tst r0, #SRAMC_SELF_FRESH_ACTIVE
beq exit_sramc_sf
/* Active SDRAM self-refresh mode */
mov r3, #1
str r3, [r2, #AT91_MC_SDRAMC_SRR]
b exit_sramc_sf
ddrc_sf:
cmp r1, #AT91_MEMCTRL_DDRSDR
bne sdramc_sf
/*
* DDR Memory controller
*/
tst r0, #SRAMC_SELF_FRESH_ACTIVE
beq ddrc_exit_sf
/* LPDDR1 --> force DDR2 mode during self-refresh */
ldr r3, [r2, #AT91_DDRSDRC_MDR]
str r3, .saved_sam9_mdr
bic r3, r3, #~AT91_DDRSDRC_MD
cmp r3, #AT91_DDRSDRC_MD_LOW_POWER_DDR
ldreq r3, [r2, #AT91_DDRSDRC_MDR]
biceq r3, r3, #AT91_DDRSDRC_MD
orreq r3, r3, #AT91_DDRSDRC_MD_DDR2
streq r3, [r2, #AT91_DDRSDRC_MDR]
/* Active DDRC self-refresh mode */
ldr r3, [r2, #AT91_DDRSDRC_LPR]
str r3, .saved_sam9_lpr
bic r3, r3, #AT91_DDRSDRC_LPCB
orr r3, r3, #AT91_DDRSDRC_LPCB_SELF_REFRESH
str r3, [r2, #AT91_DDRSDRC_LPR]
/* If using the 2nd ddr controller */
ldr r2, .sramc1_base
cmp r2, #0
beq no_2nd_ddrc
ldr r3, [r2, #AT91_DDRSDRC_MDR]
str r3, .saved_sam9_mdr1
bic r3, r3, #~AT91_DDRSDRC_MD
cmp r3, #AT91_DDRSDRC_MD_LOW_POWER_DDR
ldreq r3, [r2, #AT91_DDRSDRC_MDR]
biceq r3, r3, #AT91_DDRSDRC_MD
orreq r3, r3, #AT91_DDRSDRC_MD_DDR2
streq r3, [r2, #AT91_DDRSDRC_MDR]
/* Active DDRC self-refresh mode */
ldr r3, [r2, #AT91_DDRSDRC_LPR]
str r3, .saved_sam9_lpr1
bic r3, r3, #AT91_DDRSDRC_LPCB
orr r3, r3, #AT91_DDRSDRC_LPCB_SELF_REFRESH
str r3, [r2, #AT91_DDRSDRC_LPR]
no_2nd_ddrc:
b exit_sramc_sf
ddrc_exit_sf:
/* Restore MDR in case of LPDDR1 */
ldr r3, .saved_sam9_mdr
str r3, [r2, #AT91_DDRSDRC_MDR]
/* Restore LPR on AT91 with DDRAM */
ldr r3, .saved_sam9_lpr
str r3, [r2, #AT91_DDRSDRC_LPR]
/* If using the 2nd ddr controller */
ldr r2, .sramc1_base
cmp r2, #0
ldrne r3, .saved_sam9_mdr1
strne r3, [r2, #AT91_DDRSDRC_MDR]
ldrne r3, .saved_sam9_lpr1
strne r3, [r2, #AT91_DDRSDRC_LPR]
b exit_sramc_sf
/*
* SDRAMC Memory controller
*/
sdramc_sf:
tst r0, #SRAMC_SELF_FRESH_ACTIVE
beq sdramc_exit_sf
/* Active SDRAMC self-refresh mode */
ldr r3, [r2, #AT91_SDRAMC_LPR]
str r3, .saved_sam9_lpr
bic r3, r3, #AT91_SDRAMC_LPCB
orr r3, r3, #AT91_SDRAMC_LPCB_SELF_REFRESH
str r3, [r2, #AT91_SDRAMC_LPR]
sdramc_exit_sf:
ldr r3, .saved_sam9_lpr
str r3, [r2, #AT91_SDRAMC_LPR]
exit_sramc_sf:
mov pc, lr
ENDPROC(at91_sramc_self_refresh)
.pmc_base:
.word 0
.sramc_base:
.word 0
.sramc1_base:
.word 0
.shdwc:
.word 0
.sfr:
.word 0
.memtype:
.word 0
.pm_mode:
.word 0
.saved_mckr:
.word 0
.saved_pllar:
.word 0
.saved_sam9_lpr:
.word 0
.saved_sam9_lpr1:
.word 0
.saved_sam9_mdr:
.word 0
.saved_sam9_mdr1:
.word 0
ENTRY(at91_pm_suspend_in_sram_sz)
.word .-at91_pm_suspend_in_sram
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,686
|
arch/arm/mach-socfpga/self-refresh.S
|
/*
* Copyright (C) 2014-2015 Altera Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#define MAX_LOOP_COUNT 1000
/* Register offset */
#define SDR_CTRLGRP_LOWPWREQ_ADDR 0x54
#define SDR_CTRLGRP_LOWPWRACK_ADDR 0x58
/* Bitfield positions */
#define SELFRSHREQ_POS 3
#define SELFRSHREQ_MASK 0x8
#define SELFRFSHACK_POS 1
#define SELFRFSHACK_MASK 0x2
/*
* This code assumes that when the bootloader configured
* the sdram controller for the DDR on the board it
* configured the following fields depending on the DDR
* vendor/configuration:
*
* sdr.ctrlcfg.lowpwreq.selfrfshmask
* sdr.ctrlcfg.lowpwrtiming.clkdisablecycles
* sdr.ctrlcfg.dramtiming4.selfrfshexit
*/
.arch armv7-a
.text
.align 3
/*
* socfpga_sdram_self_refresh
*
* r0 : sdr_ctl_base_addr
* r1 : temp storage of return value
* r2 : temp storage of register values
* r3 : loop counter
*
* return value: lower 16 bits: loop count going into self refresh
* upper 16 bits: loop count exiting self refresh
*/
ENTRY(socfpga_sdram_self_refresh)
/* Enable dynamic clock gating in the Power Control Register. */
mrc p15, 0, r2, c15, c0, 0
orr r2, r2, #1
mcr p15, 0, r2, c15, c0, 0
/* Enable self refresh: set sdr.ctrlgrp.lowpwreq.selfrshreq = 1 */
ldr r2, [r0, #SDR_CTRLGRP_LOWPWREQ_ADDR]
orr r2, r2, #SELFRSHREQ_MASK
str r2, [r0, #SDR_CTRLGRP_LOWPWREQ_ADDR]
/* Poll until sdr.ctrlgrp.lowpwrack.selfrfshack == 1 or hit max loops */
mov r3, #0
while_ack_0:
ldr r2, [r0, #SDR_CTRLGRP_LOWPWRACK_ADDR]
and r2, r2, #SELFRFSHACK_MASK
cmp r2, #SELFRFSHACK_MASK
beq ack_1
add r3, #1
cmp r3, #MAX_LOOP_COUNT
bne while_ack_0
ack_1:
mov r1, r3
/*
* Execute an ISB instruction to ensure that all of the
* CP15 register changes have been committed.
*/
isb
/*
* Execute a barrier instruction to ensure that all cache,
* TLB and branch predictor maintenance operations issued
* by any CPU in the cluster have completed.
*/
dsb
dmb
wfi
/* Disable self-refresh: set sdr.ctrlgrp.lowpwreq.selfrshreq = 0 */
ldr r2, [r0, #SDR_CTRLGRP_LOWPWREQ_ADDR]
bic r2, r2, #SELFRSHREQ_MASK
str r2, [r0, #SDR_CTRLGRP_LOWPWREQ_ADDR]
/* Poll until sdr.ctrlgrp.lowpwrack.selfrfshack == 0 or hit max loops */
mov r3, #0
while_ack_1:
ldr r2, [r0, #SDR_CTRLGRP_LOWPWRACK_ADDR]
and r2, r2, #SELFRFSHACK_MASK
cmp r2, #SELFRFSHACK_MASK
bne ack_0
add r3, #1
cmp r3, #MAX_LOOP_COUNT
bne while_ack_1
ack_0:
/*
* Prepare return value:
* Shift loop count for exiting self refresh into upper 16 bits.
* Leave loop count for requesting self refresh in lower 16 bits.
*/
mov r3, r3, lsl #16
add r1, r1, r3
/* Disable dynamic clock gating in the Power Control Register. */
mrc p15, 0, r2, c15, c0, 0
bic r2, r2, #1
mcr p15, 0, r2, c15, c0, 0
mov r0, r1 @ return value
bx lr @ return
ENDPROC(socfpga_sdram_self_refresh)
ENTRY(socfpga_sdram_self_refresh_sz)
.word . - socfpga_sdram_self_refresh
|
AirFortressIlikara/LS2K0300-linux-4.19
| 4,403
|
arch/arm/mach-mvebu/coherency_ll.S
|
/*
* Coherency fabric: low level functions
*
* Copyright (C) 2012 Marvell
*
* Gregory CLEMENT <gregory.clement@free-electrons.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*
* This file implements the assembly function to add a CPU to the
* coherency fabric. This function is called by each of the secondary
* CPUs during their early boot in an SMP kernel, this why this
* function have to callable from assembly. It can also be called by a
* primary CPU from C code during its boot.
*/
#include <linux/linkage.h>
#define ARMADA_XP_CFB_CTL_REG_OFFSET 0x0
#define ARMADA_XP_CFB_CFG_REG_OFFSET 0x4
#include <asm/assembler.h>
#include <asm/cp15.h>
.text
/*
* Returns the coherency base address in r1 (r0 is untouched), or 0 if
* the coherency fabric is not enabled.
*/
ENTRY(ll_get_coherency_base)
mrc p15, 0, r1, c1, c0, 0
tst r1, #CR_M @ Check MMU bit enabled
bne 1f
/*
* MMU is disabled, use the physical address of the coherency
* base address. However, if the coherency fabric isn't mapped
* (i.e its virtual address is zero), it means coherency is
* not enabled, so we return 0.
*/
ldr r1, =coherency_base
cmp r1, #0
beq 2f
adr r1, 3f
ldr r3, [r1]
ldr r1, [r1, r3]
b 2f
1:
/*
* MMU is enabled, use the virtual address of the coherency
* base address.
*/
ldr r1, =coherency_base
ldr r1, [r1]
2:
ret lr
ENDPROC(ll_get_coherency_base)
/*
* Returns the coherency CPU mask in r3 (r0 is untouched). This
* coherency CPU mask can be used with the coherency fabric
* configuration and control registers. Note that the mask is already
* endian-swapped as appropriate so that the calling functions do not
* have to care about endianness issues while accessing the coherency
* fabric registers
*/
ENTRY(ll_get_coherency_cpumask)
mrc 15, 0, r3, cr0, cr0, 5
and r3, r3, #15
mov r2, #(1 << 24)
lsl r3, r2, r3
ARM_BE8(rev r3, r3)
ret lr
ENDPROC(ll_get_coherency_cpumask)
/*
* ll_add_cpu_to_smp_group(), ll_enable_coherency() and
* ll_disable_coherency() use the strex/ldrex instructions while the
* MMU can be disabled. The Armada XP SoC has an exclusive monitor
* that tracks transactions to Device and/or SO memory and thanks to
* that, exclusive transactions are functional even when the MMU is
* disabled.
*/
ENTRY(ll_add_cpu_to_smp_group)
/*
* As r0 is not modified by ll_get_coherency_base() and
* ll_get_coherency_cpumask(), we use it to temporarly save lr
* and avoid it being modified by the branch and link
* calls. This function is used very early in the secondary
* CPU boot, and no stack is available at this point.
*/
mov r0, lr
bl ll_get_coherency_base
/* Bail out if the coherency is not enabled */
cmp r1, #0
reteq r0
bl ll_get_coherency_cpumask
mov lr, r0
add r0, r1, #ARMADA_XP_CFB_CFG_REG_OFFSET
1:
ldrex r2, [r0]
orr r2, r2, r3
strex r1, r2, [r0]
cmp r1, #0
bne 1b
ret lr
ENDPROC(ll_add_cpu_to_smp_group)
ENTRY(ll_enable_coherency)
/*
* As r0 is not modified by ll_get_coherency_base() and
* ll_get_coherency_cpumask(), we use it to temporarly save lr
* and avoid it being modified by the branch and link
* calls. This function is used very early in the secondary
* CPU boot, and no stack is available at this point.
*/
mov r0, lr
bl ll_get_coherency_base
/* Bail out if the coherency is not enabled */
cmp r1, #0
reteq r0
bl ll_get_coherency_cpumask
mov lr, r0
add r0, r1, #ARMADA_XP_CFB_CTL_REG_OFFSET
1:
ldrex r2, [r0]
orr r2, r2, r3
strex r1, r2, [r0]
cmp r1, #0
bne 1b
dsb
mov r0, #0
ret lr
ENDPROC(ll_enable_coherency)
ENTRY(ll_disable_coherency)
/*
* As r0 is not modified by ll_get_coherency_base() and
* ll_get_coherency_cpumask(), we use it to temporarly save lr
* and avoid it being modified by the branch and link
* calls. This function is used very early in the secondary
* CPU boot, and no stack is available at this point.
*/
mov r0, lr
bl ll_get_coherency_base
/* Bail out if the coherency is not enabled */
cmp r1, #0
reteq r0
bl ll_get_coherency_cpumask
mov lr, r0
add r0, r1, #ARMADA_XP_CFB_CTL_REG_OFFSET
1:
ldrex r2, [r0]
bic r2, r2, r3
strex r1, r2, [r0]
cmp r1, #0
bne 1b
dsb
ret lr
ENDPROC(ll_disable_coherency)
.align 2
3:
.long coherency_phys_base - .
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,147
|
arch/arm/mach-mvebu/headsmp.S
|
/*
* SMP support: Entry point for secondary CPUs
*
* Copyright (C) 2012 Marvell
*
* Yehuda Yitschak <yehuday@marvell.com>
* Gregory CLEMENT <gregory.clement@free-electrons.com>
* Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*
* This file implements the assembly entry point for secondary CPUs in
* an SMP kernel. The only thing we need to do is to add the CPU to
* the coherency fabric by writing to 2 registers. Currently the base
* register addresses are hard coded due to the early initialisation
* problems.
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
/*
* Armada XP specific entry point for secondary CPUs.
* We add the CPU to the coherency fabric and then jump to secondary
* startup
*/
ENTRY(armada_xp_secondary_startup)
ARM_BE8(setend be ) @ go BE8 if entered LE
bl ll_add_cpu_to_smp_group
bl ll_enable_coherency
b secondary_startup
ENDPROC(armada_xp_secondary_startup)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,020
|
arch/arm/mach-mvebu/pmsu_ll.S
|
/*
* Copyright (C) 2014 Marvell
*
* Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
* Gregory Clement <gregory.clement@free-electrons.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
ENTRY(armada_38x_scu_power_up)
mrc p15, 4, r1, c15, c0 @ get SCU base address
orr r1, r1, #0x8 @ SCU CPU Power Status Register
mrc 15, 0, r0, cr0, cr0, 5 @ get the CPU ID
and r0, r0, #15
add r1, r1, r0
mov r0, #0x0
strb r0, [r1] @ switch SCU power state to Normal mode
ret lr
ENDPROC(armada_38x_scu_power_up)
/*
* This is the entry point through which CPUs exiting cpuidle deep
* idle state are going.
*/
ENTRY(armada_370_xp_cpu_resume)
ARM_BE8(setend be ) @ go BE8 if entered LE
/*
* Disable the MMU that might have been enabled in BootROM if
* this code is used in the resume path of a suspend/resume
* cycle.
*/
mrc p15, 0, r1, c1, c0, 0
bic r1, #1
mcr p15, 0, r1, c1, c0, 0
bl ll_add_cpu_to_smp_group
bl ll_enable_coherency
b cpu_resume
ENDPROC(armada_370_xp_cpu_resume)
ENTRY(armada_38x_cpu_resume)
/* do we need it for Armada 38x*/
ARM_BE8(setend be ) @ go BE8 if entered LE
bl v7_invalidate_l1
bl armada_38x_scu_power_up
b cpu_resume
ENDPROC(armada_38x_cpu_resume)
.global mvebu_boot_wa_start
.global mvebu_boot_wa_end
/* The following code will be executed from SRAM */
ENTRY(mvebu_boot_wa_start)
mvebu_boot_wa_start:
ARM_BE8(setend be)
adr r0, 1f
ldr r0, [r0] @ load the address of the
@ resume register
ldr r0, [r0] @ load the value in the
@ resume register
ARM_BE8(rev r0, r0) @ the value is stored LE
mov pc, r0 @ jump to this value
/*
* the last word of this piece of code will be filled by the physical
* address of the boot address register just after being copied in SRAM
*/
1:
.long .
mvebu_boot_wa_end:
ENDPROC(mvebu_boot_wa_end)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,145
|
arch/arm/mach-s3c24xx/sleep-s3c2412.S
|
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 2007 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* S3C2412 Power Manager low-level sleep support
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <mach/hardware.h>
#include <mach/map.h>
#include <mach/regs-irq.h>
.text
.global s3c2412_sleep_enter
s3c2412_sleep_enter:
mov r0, #0 /* argument for coprocessors */
ldr r1, =S3C2410_INTPND
ldr r2, =S3C2410_SRCPND
ldr r3, =S3C2410_EINTPEND
teq r0, r0
bl s3c2412_sleep_enter1
teq pc, r0
bl s3c2412_sleep_enter1
.align 5
/* this is called twice, first with the Z flag to ensure that the
* instructions have been loaded into the cache, and the second
* time to try and suspend the system.
*/
s3c2412_sleep_enter1:
mcr p15, 0, r0, c7, c10, 4
mcrne p15, 0, r0, c7, c0, 4
/* if we return from here, it is because an interrupt was
* active when we tried to shutdown. Try and ack the IRQ and
* retry, as simply returning causes the system to lock.
*/
ldrne r9, [r1]
strne r9, [r1]
ldrne r9, [r2]
strne r9, [r2]
ldrne r9, [r3]
strne r9, [r3]
bne s3c2412_sleep_enter1
ret lr
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,512
|
arch/arm/mach-s3c24xx/sleep.S
|
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 2004 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* S3C2410 Power Manager (Suspend-To-RAM) support
*
* Based on PXA/SA1100 sleep code by:
* Nicolas Pitre, (c) 2002 Monta Vista Software Inc
* Cliff Brake, (c) 2001
*/
#include <linux/linkage.h>
#include <linux/serial_s3c.h>
#include <asm/assembler.h>
#include <mach/hardware.h>
#include <mach/map.h>
#include <mach/regs-gpio.h>
#include <mach/regs-clock.h>
/*
* S3C24XX_DEBUG_RESUME is dangerous if your bootloader does not
* reset the UART configuration, only enable if you really need this!
*/
//#define S3C24XX_DEBUG_RESUME
.text
/* sleep magic, to allow the bootloader to check for an valid
* image to resume to. Must be the first word before the
* s3c_cpu_resume entry.
*/
.word 0x2bedf00d
/* s3c_cpu_resume
*
* resume code entry for bootloader to call
*/
ENTRY(s3c_cpu_resume)
mov r0, #PSR_I_BIT | PSR_F_BIT | SVC_MODE
msr cpsr_c, r0
@@ load UART to allow us to print the two characters for
@@ resume debug
mov r2, #S3C24XX_PA_UART & 0xff000000
orr r2, r2, #S3C24XX_PA_UART & 0xff000
#if 0
/* SMDK2440 LED set */
mov r14, #S3C24XX_PA_GPIO
ldr r12, [ r14, #0x54 ]
bic r12, r12, #3<<4
orr r12, r12, #1<<7
str r12, [ r14, #0x54 ]
#endif
#ifdef S3C24XX_DEBUG_RESUME
mov r3, #'L'
strb r3, [ r2, #S3C2410_UTXH ]
1001:
ldrb r14, [ r3, #S3C2410_UTRSTAT ]
tst r14, #S3C2410_UTRSTAT_TXE
beq 1001b
#endif /* S3C24XX_DEBUG_RESUME */
b cpu_resume
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,356
|
arch/arm/mach-s3c24xx/sleep-s3c2410.S
|
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 2004 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* S3C2410 Power Manager (Suspend-To-RAM) support
*
* Based on PXA/SA1100 sleep code by:
* Nicolas Pitre, (c) 2002 Monta Vista Software Inc
* Cliff Brake, (c) 2001
*/
#include <linux/linkage.h>
#include <linux/serial_s3c.h>
#include <asm/assembler.h>
#include <mach/hardware.h>
#include <mach/map.h>
#include <mach/regs-gpio.h>
#include <mach/regs-clock.h>
#include "regs-mem.h"
/* s3c2410_cpu_suspend
*
* put the cpu into sleep mode
*/
ENTRY(s3c2410_cpu_suspend)
@@ prepare cpu to sleep
ldr r4, =S3C2410_REFRESH
ldr r5, =S3C24XX_MISCCR
ldr r6, =S3C2410_CLKCON
ldr r7, [r4] @ get REFRESH (and ensure in TLB)
ldr r8, [r5] @ get MISCCR (and ensure in TLB)
ldr r9, [r6] @ get CLKCON (and ensure in TLB)
orr r7, r7, #S3C2410_REFRESH_SELF @ SDRAM sleep command
orr r8, r8, #S3C2410_MISCCR_SDSLEEP @ SDRAM power-down signals
orr r9, r9, #S3C2410_CLKCON_POWER @ power down command
teq pc, #0 @ first as a trial-run to load cache
bl s3c2410_do_sleep
teq r0, r0 @ now do it for real
b s3c2410_do_sleep @
@@ align next bit of code to cache line
.align 5
s3c2410_do_sleep:
streq r7, [r4] @ SDRAM sleep command
streq r8, [r5] @ SDRAM power-down config
streq r9, [r6] @ CPU sleep
1: beq 1b
ret lr
|
AirFortressIlikara/LS2K0300-linux-4.19
| 18,808
|
arch/arm/mm/proc-xscale.S
|
/*
* linux/arch/arm/mm/proc-xscale.S
*
* Author: Nicolas Pitre
* Created: November 2000
* Copyright: (C) 2000, 2001 MontaVista Software Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* MMU functions for the Intel XScale CPUs
*
* 2001 Aug 21:
* some contributions by Brett Gaines <brett.w.gaines@intel.com>
* Copyright 2001 by Intel Corp.
*
* 2001 Sep 08:
* Completely revisited, many important fixes
* Nicolas Pitre <nico@fluxnic.net>
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
#include <asm/pgtable.h>
#include <asm/pgtable-hwdef.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
/*
* This is the maximum size of an area which will be flushed. If the area
* is larger than this, then we flush the whole cache
*/
#define MAX_AREA_SIZE 32768
/*
* the cache line size of the I and D cache
*/
#define CACHELINESIZE 32
/*
* the size of the data cache
*/
#define CACHESIZE 32768
/*
* Virtual address used to allocate the cache when flushed
*
* This must be an address range which is _never_ used. It should
* apparently have a mapping in the corresponding page table for
* compatibility with future CPUs that _could_ require it. For instance we
* don't care.
*
* This must be aligned on a 2*CACHESIZE boundary. The code selects one of
* the 2 areas in alternance each time the clean_d_cache macro is used.
* Without this the XScale core exhibits cache eviction problems and no one
* knows why.
*
* Reminder: the vector table is located at 0xffff0000-0xffff0fff.
*/
#define CLEAN_ADDR 0xfffe0000
/*
* This macro is used to wait for a CP15 write and is needed
* when we have to ensure that the last operation to the co-pro
* was completed before continuing with operation.
*/
.macro cpwait, rd
mrc p15, 0, \rd, c2, c0, 0 @ arbitrary read of cp15
mov \rd, \rd @ wait for completion
sub pc, pc, #4 @ flush instruction pipeline
.endm
.macro cpwait_ret, lr, rd
mrc p15, 0, \rd, c2, c0, 0 @ arbitrary read of cp15
sub pc, \lr, \rd, LSR #32 @ wait for completion and
@ flush instruction pipeline
.endm
/*
* This macro cleans the entire dcache using line allocate.
* The main loop has been unrolled to reduce loop overhead.
* rd and rs are two scratch registers.
*/
.macro clean_d_cache, rd, rs
ldr \rs, =clean_addr
ldr \rd, [\rs]
eor \rd, \rd, #CACHESIZE
str \rd, [\rs]
add \rs, \rd, #CACHESIZE
1: mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line
add \rd, \rd, #CACHELINESIZE
mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line
add \rd, \rd, #CACHELINESIZE
mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line
add \rd, \rd, #CACHELINESIZE
mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line
add \rd, \rd, #CACHELINESIZE
teq \rd, \rs
bne 1b
.endm
.data
.align 2
clean_addr: .word CLEAN_ADDR
.text
/*
* cpu_xscale_proc_init()
*
* Nothing too exciting at the moment
*/
ENTRY(cpu_xscale_proc_init)
@ enable write buffer coalescing. Some bootloader disable it
mrc p15, 0, r1, c1, c0, 1
bic r1, r1, #1
mcr p15, 0, r1, c1, c0, 1
ret lr
/*
* cpu_xscale_proc_fin()
*/
ENTRY(cpu_xscale_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1800 @ ...IZ...........
bic r0, r0, #0x0006 @ .............CA.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
/*
* cpu_xscale_reset(loc)
*
* Perform a soft reset of the system. Put the CPU into the
* same state as it would be if it had been reset, and branch
* to what would be the reset vector.
*
* loc: location to jump to for soft reset
*
* Beware PXA270 erratum E7.
*/
.align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_xscale_reset)
mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
msr cpsr_c, r1 @ reset CPSR
mcr p15, 0, r1, c10, c4, 1 @ unlock I-TLB
mcr p15, 0, r1, c8, c5, 0 @ invalidate I-TLB
mrc p15, 0, r1, c1, c0, 0 @ ctrl register
bic r1, r1, #0x0086 @ ........B....CA.
bic r1, r1, #0x3900 @ ..VIZ..S........
sub pc, pc, #4 @ flush pipeline
@ *** cache line aligned ***
mcr p15, 0, r1, c1, c0, 0 @ ctrl register
bic r1, r1, #0x0001 @ ...............M
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches & BTB
mcr p15, 0, r1, c1, c0, 0 @ ctrl register
@ CAUTION: MMU turned off from this point. We count on the pipeline
@ already containing those two last instructions to survive.
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
ret r0
ENDPROC(cpu_xscale_reset)
.popsection
/*
* cpu_xscale_do_idle()
*
* Cause the processor to idle
*
* For now we do nothing but go to idle mode for every case
*
* XScale supports clock switching, but using idle mode support
* allows external hardware to react to system state changes.
*/
.align 5
ENTRY(cpu_xscale_do_idle)
mov r0, #1
mcr p14, 0, r0, c7, c0, 0 @ Go to IDLE
ret lr
/* ================================= CACHE ================================ */
/*
* flush_icache_all()
*
* Unconditionally clean and invalidate the entire icache.
*/
ENTRY(xscale_flush_icache_all)
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
ret lr
ENDPROC(xscale_flush_icache_all)
/*
* flush_user_cache_all()
*
* Invalidate all cache entries in a particular address
* space.
*/
ENTRY(xscale_flush_user_cache_all)
/* FALLTHROUGH */
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
ENTRY(xscale_flush_kern_cache_all)
mov r2, #VM_EXEC
mov ip, #0
__flush_whole_cache:
clean_d_cache r0, r1
tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB
mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
ret lr
/*
* flush_user_cache_range(start, end, vm_flags)
*
* Invalidate a range of cache entries in the specified
* address space.
*
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
* - vma - vma_area_struct describing address space
*/
.align 5
ENTRY(xscale_flush_user_cache_range)
mov ip, #0
sub r3, r1, r0 @ calculate total size
cmp r3, #MAX_AREA_SIZE
bhs __flush_whole_cache
1: tst r2, #VM_EXEC
mcrne p15, 0, r0, c7, c5, 1 @ Invalidate I cache line
mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line
mcr p15, 0, r0, c7, c6, 1 @ Invalidate D cache line
add r0, r0, #CACHELINESIZE
cmp r0, r1
blo 1b
tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c5, 6 @ Invalidate BTB
mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
ret lr
/*
* coherent_kern_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*
* Note: single I-cache line invalidation isn't used here since
* it also trashes the mini I-cache used by JTAG debuggers.
*/
ENTRY(xscale_coherent_kern_range)
bic r0, r0, #CACHELINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHELINESIZE
cmp r0, r1
blo 1b
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB
mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer
ret lr
/*
* coherent_user_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(xscale_coherent_user_range)
bic r0, r0, #CACHELINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c5, 1 @ Invalidate I cache entry
add r0, r0, #CACHELINESIZE
cmp r0, r1
blo 1b
mov r0, #0
mcr p15, 0, r0, c7, c5, 6 @ Invalidate BTB
mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer
ret lr
/*
* flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
* - addr - kernel address
* - size - region size
*/
ENTRY(xscale_flush_kern_dcache_area)
add r1, r0, r1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHELINESIZE
cmp r0, r1
blo 1b
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB
mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer
ret lr
/*
* dma_inv_range(start, end)
*
* Invalidate (discard) the specified virtual address range.
* May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
*
* - start - virtual start address
* - end - virtual end address
*/
xscale_dma_inv_range:
tst r0, #CACHELINESIZE - 1
bic r0, r0, #CACHELINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
tst r1, #CACHELINESIZE - 1
mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHELINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer
ret lr
/*
* dma_clean_range(start, end)
*
* Clean the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
xscale_dma_clean_range:
bic r0, r0, #CACHELINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHELINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer
ret lr
/*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(xscale_dma_flush_range)
bic r0, r0, #CACHELINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHELINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer
ret lr
/*
* dma_map_area(start, size, dir)
* - start - kernel virtual start address
* - size - size of region
* - dir - DMA direction
*/
ENTRY(xscale_dma_map_area)
add r1, r1, r0
cmp r2, #DMA_TO_DEVICE
beq xscale_dma_clean_range
bcs xscale_dma_inv_range
b xscale_dma_flush_range
ENDPROC(xscale_dma_map_area)
/*
* dma_map_area(start, size, dir)
* - start - kernel virtual start address
* - size - size of region
* - dir - DMA direction
*/
ENTRY(xscale_80200_A0_A1_dma_map_area)
add r1, r1, r0
teq r2, #DMA_TO_DEVICE
beq xscale_dma_clean_range
b xscale_dma_flush_range
ENDPROC(xscale_80200_A0_A1_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
* - start - kernel virtual start address
* - size - size of region
* - dir - DMA direction
*/
ENTRY(xscale_dma_unmap_area)
ret lr
ENDPROC(xscale_dma_unmap_area)
.globl xscale_flush_kern_cache_louis
.equ xscale_flush_kern_cache_louis, xscale_flush_kern_cache_all
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions xscale
/*
* On stepping A0/A1 of the 80200, invalidating D-cache by line doesn't
* clear the dirty bits, which means that if we invalidate a dirty line,
* the dirty data can still be written back to external memory later on.
*
* The recommended workaround is to always do a clean D-cache line before
* doing an invalidate D-cache line, so on the affected processors,
* dma_inv_range() is implemented as dma_flush_range().
*
* See erratum #25 of "Intel 80200 Processor Specification Update",
* revision January 22, 2003, available at:
* http://www.intel.com/design/iio/specupdt/273415.htm
*/
.macro a0_alias basename
.globl xscale_80200_A0_A1_\basename
.type xscale_80200_A0_A1_\basename , %function
.equ xscale_80200_A0_A1_\basename , xscale_\basename
.endm
/*
* Most of the cache functions are unchanged for these processor revisions.
* Export suitable alias symbols for the unchanged functions:
*/
a0_alias flush_icache_all
a0_alias flush_user_cache_all
a0_alias flush_kern_cache_all
a0_alias flush_kern_cache_louis
a0_alias flush_user_cache_range
a0_alias coherent_kern_range
a0_alias coherent_user_range
a0_alias flush_kern_dcache_area
a0_alias dma_flush_range
a0_alias dma_unmap_area
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions xscale_80200_A0_A1
ENTRY(cpu_xscale_dcache_clean_area)
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHELINESIZE
subs r1, r1, #CACHELINESIZE
bhi 1b
ret lr
/* =============================== PageTable ============================== */
/*
* cpu_xscale_switch_mm(pgd)
*
* Set the translation base pointer to be as described by pgd.
*
* pgd: new page tables
*/
.align 5
ENTRY(cpu_xscale_switch_mm)
clean_d_cache r1, r2
mcr p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB
mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
cpwait_ret lr, ip
/*
* cpu_xscale_set_pte_ext(ptep, pte, ext)
*
* Set a PTE and flush it out
*
* Errata 40: must set memory to write-through for user read-only pages.
*/
cpu_xscale_mt_table:
.long 0x00 @ L_PTE_MT_UNCACHED
.long PTE_BUFFERABLE @ L_PTE_MT_BUFFERABLE
.long PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH
.long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK
.long PTE_EXT_TEX(1) | PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED
.long 0x00 @ unused
.long PTE_EXT_TEX(1) | PTE_CACHEABLE @ L_PTE_MT_MINICACHE
.long PTE_EXT_TEX(1) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC
.long 0x00 @ unused
.long PTE_BUFFERABLE @ L_PTE_MT_DEV_WC
.long 0x00 @ unused
.long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_DEV_CACHED
.long 0x00 @ L_PTE_MT_DEV_NONSHARED
.long 0x00 @ unused
.long 0x00 @ unused
.long 0x00 @ unused
.align 5
ENTRY(cpu_xscale_set_pte_ext)
xscale_set_pte_ext_prologue
@
@ Erratum 40: must set memory to write-through for user read-only pages
@
and ip, r1, #(L_PTE_MT_MASK | L_PTE_USER | L_PTE_RDONLY) & ~(4 << 2)
teq ip, #L_PTE_MT_WRITEBACK | L_PTE_USER | L_PTE_RDONLY
moveq r1, #L_PTE_MT_WRITETHROUGH
and r1, r1, #L_PTE_MT_MASK
adr ip, cpu_xscale_mt_table
ldr ip, [ip, r1]
bic r2, r2, #0x0c
orr r2, r2, ip
xscale_set_pte_ext_epilogue
ret lr
.ltorg
.align
.globl cpu_xscale_suspend_size
.equ cpu_xscale_suspend_size, 4 * 6
#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_xscale_do_suspend)
stmfd sp!, {r4 - r9, lr}
mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
mrc p15, 0, r5, c15, c1, 0 @ CP access reg
mrc p15, 0, r6, c13, c0, 0 @ PID
mrc p15, 0, r7, c3, c0, 0 @ domain ID
mrc p15, 0, r8, c1, c0, 1 @ auxiliary control reg
mrc p15, 0, r9, c1, c0, 0 @ control reg
bic r4, r4, #2 @ clear frequency change bit
stmia r0, {r4 - r9} @ store cp regs
ldmfd sp!, {r4 - r9, pc}
ENDPROC(cpu_xscale_do_suspend)
ENTRY(cpu_xscale_do_resume)
ldmia r0, {r4 - r9} @ load cp regs
mov ip, #0
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB
mcr p14, 0, r4, c6, c0, 0 @ clock configuration, turbo mode.
mcr p15, 0, r5, c15, c1, 0 @ CP access reg
mcr p15, 0, r6, c13, c0, 0 @ PID
mcr p15, 0, r7, c3, c0, 0 @ domain ID
mcr p15, 0, r1, c2, c0, 0 @ translation table base addr
mcr p15, 0, r8, c1, c0, 1 @ auxiliary control reg
mov r0, r9 @ control register
b cpu_resume_mmu
ENDPROC(cpu_xscale_do_resume)
#endif
.type __xscale_setup, #function
__xscale_setup:
mcr p15, 0, ip, c7, c7, 0 @ invalidate I, D caches & BTB
mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
mcr p15, 0, ip, c8, c7, 0 @ invalidate I, D TLBs
mov r0, #1 << 6 @ cp6 for IOP3xx and Bulverde
orr r0, r0, #1 << 13 @ Its undefined whether this
mcr p15, 0, r0, c15, c1, 0 @ affects USR or SVC modes
adr r5, xscale_crval
ldmia r5, {r5, r6}
mrc p15, 0, r0, c1, c0, 0 @ get control register
bic r0, r0, r5
orr r0, r0, r6
ret lr
.size __xscale_setup, . - __xscale_setup
/*
* R
* .RVI ZFRS BLDP WCAM
* ..11 1.01 .... .101
*
*/
.type xscale_crval, #object
xscale_crval:
crval clear=0x00003b07, mmuset=0x00003905, ucset=0x00001900
__INITDATA
@ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
define_processor_functions xscale, dabort=v5t_early_abort, pabort=legacy_pabort, suspend=1
.section ".rodata"
string cpu_arch_name, "armv5te"
string cpu_elf_name, "v5"
string cpu_80200_A0_A1_name, "XScale-80200 A0/A1"
string cpu_80200_name, "XScale-80200"
string cpu_80219_name, "XScale-80219"
string cpu_8032x_name, "XScale-IOP8032x Family"
string cpu_8033x_name, "XScale-IOP8033x Family"
string cpu_pxa250_name, "XScale-PXA250"
string cpu_pxa210_name, "XScale-PXA210"
string cpu_ixp42x_name, "XScale-IXP42x Family"
string cpu_ixp43x_name, "XScale-IXP43x Family"
string cpu_ixp46x_name, "XScale-IXP46x Family"
string cpu_ixp2400_name, "XScale-IXP2400"
string cpu_ixp2800_name, "XScale-IXP2800"
string cpu_pxa255_name, "XScale-PXA255"
string cpu_pxa270_name, "XScale-PXA270"
.align
.section ".proc.info.init", #alloc
.macro xscale_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache
.type __\name\()_proc_info,#object
__\name\()_proc_info:
.long \cpu_val
.long \cpu_mask
.long PMD_TYPE_SECT | \
PMD_SECT_BUFFERABLE | \
PMD_SECT_CACHEABLE | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
.long PMD_TYPE_SECT | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
initfn __xscale_setup, __\name\()_proc_info
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
.long \cpu_name
.long xscale_processor_functions
.long v4wbi_tlb_fns
.long xscale_mc_user_fns
.ifb \cache
.long xscale_cache_fns
.else
.long \cache
.endif
.size __\name\()_proc_info, . - __\name\()_proc_info
.endm
xscale_proc_info 80200_A0_A1, 0x69052000, 0xfffffffe, cpu_80200_name, \
cache=xscale_80200_A0_A1_cache_fns
xscale_proc_info 80200, 0x69052000, 0xfffffff0, cpu_80200_name
xscale_proc_info 80219, 0x69052e20, 0xffffffe0, cpu_80219_name
xscale_proc_info 8032x, 0x69052420, 0xfffff7e0, cpu_8032x_name
xscale_proc_info 8033x, 0x69054010, 0xfffffd30, cpu_8033x_name
xscale_proc_info pxa250, 0x69052100, 0xfffff7f0, cpu_pxa250_name
xscale_proc_info pxa210, 0x69052120, 0xfffff3f0, cpu_pxa210_name
xscale_proc_info ixp2400, 0x69054190, 0xfffffff0, cpu_ixp2400_name
xscale_proc_info ixp2800, 0x690541a0, 0xfffffff0, cpu_ixp2800_name
xscale_proc_info ixp42x, 0x690541c0, 0xffffffc0, cpu_ixp42x_name
xscale_proc_info ixp43x, 0x69054040, 0xfffffff0, cpu_ixp43x_name
xscale_proc_info ixp46x, 0x69054200, 0xffffff00, cpu_ixp46x_name
xscale_proc_info pxa255, 0x69052d00, 0xfffffff0, cpu_pxa255_name
xscale_proc_info pxa270, 0x69054110, 0xfffffff0, cpu_pxa270_name
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,664
|
arch/arm/mm/tlb-v6.S
|
/*
* linux/arch/arm/mm/tlb-v6.S
*
* Copyright (C) 1997-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* ARM architecture version 6 TLB handling functions.
* These assume a split I/D TLB.
*/
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/assembler.h>
#include <asm/page.h>
#include <asm/tlbflush.h>
#include "proc-macros.S"
#define HARVARD_TLB
/*
* v6wbi_flush_user_tlb_range(start, end, vma)
*
* Invalidate a range of TLB entries in the specified address space.
*
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
* - vma - vma_struct describing address range
*
* It is assumed that:
* - the "Invalidate single entry" instruction will invalidate
* both the I and the D TLBs on Harvard-style TLBs
*/
ENTRY(v6wbi_flush_user_tlb_range)
vma_vm_mm r3, r2 @ get vma->vm_mm
mov ip, #0
mmid r3, r3 @ get vm_mm->context.id
mcr p15, 0, ip, c7, c10, 4 @ drain write buffer
mov r0, r0, lsr #PAGE_SHIFT @ align address
mov r1, r1, lsr #PAGE_SHIFT
asid r3, r3 @ mask ASID
orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA
mov r1, r1, lsl #PAGE_SHIFT
vma_vm_flags r2, r2 @ get vma->vm_flags
1:
#ifdef HARVARD_TLB
mcr p15, 0, r0, c8, c6, 1 @ TLB invalidate D MVA (was 1)
tst r2, #VM_EXEC @ Executable area ?
mcrne p15, 0, r0, c8, c5, 1 @ TLB invalidate I MVA (was 1)
#else
mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate MVA (was 1)
#endif
add r0, r0, #PAGE_SZ
cmp r0, r1
blo 1b
mcr p15, 0, ip, c7, c10, 4 @ data synchronization barrier
ret lr
/*
* v6wbi_flush_kern_tlb_range(start,end)
*
* Invalidate a range of kernel TLB entries
*
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
*/
ENTRY(v6wbi_flush_kern_tlb_range)
mov r2, #0
mcr p15, 0, r2, c7, c10, 4 @ drain write buffer
mov r0, r0, lsr #PAGE_SHIFT @ align address
mov r1, r1, lsr #PAGE_SHIFT
mov r0, r0, lsl #PAGE_SHIFT
mov r1, r1, lsl #PAGE_SHIFT
1:
#ifdef HARVARD_TLB
mcr p15, 0, r0, c8, c6, 1 @ TLB invalidate D MVA
mcr p15, 0, r0, c8, c5, 1 @ TLB invalidate I MVA
#else
mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate MVA
#endif
add r0, r0, #PAGE_SZ
cmp r0, r1
blo 1b
mcr p15, 0, r2, c7, c10, 4 @ data synchronization barrier
mcr p15, 0, r2, c7, c5, 4 @ prefetch flush (isb)
ret lr
__INIT
/* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
define_tlb_functions v6wbi, v6wbi_tlb_flags
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,732
|
arch/arm/mm/tlb-v4wbi.S
|
/*
* linux/arch/arm/mm/tlbv4wbi.S
*
* Copyright (C) 1997-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* ARM architecture version 4 and version 5 TLB handling functions.
* These assume a split I/D TLBs, with a write buffer.
*
* Processors: ARM920 ARM922 ARM925 ARM926 XScale
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/tlbflush.h>
#include "proc-macros.S"
/*
* v4wb_flush_user_tlb_range(start, end, mm)
*
* Invalidate a range of TLB entries in the specified address space.
*
* - start - range start address
* - end - range end address
* - mm - mm_struct describing address space
*/
.align 5
ENTRY(v4wbi_flush_user_tlb_range)
vma_vm_mm ip, r2
act_mm r3 @ get current->active_mm
eors r3, ip, r3 @ == mm ?
retne lr @ no, we dont do anything
mov r3, #0
mcr p15, 0, r3, c7, c10, 4 @ drain WB
vma_vm_flags r2, r2
bic r0, r0, #0x0ff
bic r0, r0, #0xf00
1: tst r2, #VM_EXEC
mcrne p15, 0, r0, c8, c5, 1 @ invalidate I TLB entry
mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry
add r0, r0, #PAGE_SZ
cmp r0, r1
blo 1b
ret lr
ENTRY(v4wbi_flush_kern_tlb_range)
mov r3, #0
mcr p15, 0, r3, c7, c10, 4 @ drain WB
bic r0, r0, #0x0ff
bic r0, r0, #0xf00
1: mcr p15, 0, r0, c8, c5, 1 @ invalidate I TLB entry
mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry
add r0, r0, #PAGE_SZ
cmp r0, r1
blo 1b
ret lr
__INITDATA
/* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
define_tlb_functions v4wbi, v4wbi_tlb_flags
|
AirFortressIlikara/LS2K0300-linux-4.19
| 16,397
|
arch/arm/mm/proc-feroceon.S
|
/*
* linux/arch/arm/mm/proc-feroceon.S: MMU functions for Feroceon
*
* Heavily based on proc-arm926.S
* Maintainer: Assaf Hoffman <hoffman@marvell.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
/*
* This is the maximum size of an area which will be invalidated
* using the single invalidate entry instructions. Anything larger
* than this, and we go for the whole cache.
*
* This value should be chosen such that we choose the cheapest
* alternative.
*/
#define CACHE_DLIMIT 16384
/*
* the cache line size of the I and D cache
*/
#define CACHE_DLINESIZE 32
.bss
.align 3
__cache_params_loc:
.space 8
.text
__cache_params:
.word __cache_params_loc
/*
* cpu_feroceon_proc_init()
*/
ENTRY(cpu_feroceon_proc_init)
mrc p15, 0, r0, c0, c0, 1 @ read cache type register
ldr r1, __cache_params
mov r2, #(16 << 5)
tst r0, #(1 << 16) @ get way
mov r0, r0, lsr #18 @ get cache size order
movne r3, #((4 - 1) << 30) @ 4-way
and r0, r0, #0xf
moveq r3, #0 @ 1-way
mov r2, r2, lsl r0 @ actual cache size
movne r2, r2, lsr #2 @ turned into # of sets
sub r2, r2, #(1 << 5)
stmia r1, {r2, r3}
ret lr
/*
* cpu_feroceon_proc_fin()
*/
ENTRY(cpu_feroceon_proc_fin)
#if defined(CONFIG_CACHE_FEROCEON_L2) && \
!defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
mov r0, #0
mcr p15, 1, r0, c15, c9, 0 @ clean L2
mcr p15, 0, r0, c7, c10, 4 @ drain WB
#endif
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
/*
* cpu_feroceon_reset(loc)
*
* Perform a soft reset of the system. Put the CPU into the
* same state as it would be if it had been reset, and branch
* to what would be the reset vector.
*
* loc: location to jump to for soft reset
*/
.align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_feroceon_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
#ifdef CONFIG_MMU
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
#endif
mrc p15, 0, ip, c1, c0, 0 @ ctrl register
bic ip, ip, #0x000f @ ............wcam
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0
ENDPROC(cpu_feroceon_reset)
.popsection
/*
* cpu_feroceon_do_idle()
*
* Called with IRQs disabled
*/
.align 5
ENTRY(cpu_feroceon_do_idle)
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
ret lr
/*
* flush_icache_all()
*
* Unconditionally clean and invalidate the entire icache.
*/
ENTRY(feroceon_flush_icache_all)
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
ret lr
ENDPROC(feroceon_flush_icache_all)
/*
* flush_user_cache_all()
*
* Clean and invalidate all cache entries in a particular
* address space.
*/
.align 5
ENTRY(feroceon_flush_user_cache_all)
/* FALLTHROUGH */
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
ENTRY(feroceon_flush_kern_cache_all)
mov r2, #VM_EXEC
__flush_whole_cache:
ldr r1, __cache_params
ldmia r1, {r1, r3}
1: orr ip, r1, r3
2: mcr p15, 0, ip, c7, c14, 2 @ clean + invalidate D set/way
subs ip, ip, #(1 << 30) @ next way
bcs 2b
subs r1, r1, #(1 << 5) @ next set
bcs 1b
tst r2, #VM_EXEC
mov ip, #0
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
/*
* flush_user_cache_range(start, end, flags)
*
* Clean and invalidate a range of cache entries in the
* specified address range.
*
* - start - start address (inclusive)
* - end - end address (exclusive)
* - flags - vm_flags describing address space
*/
.align 5
ENTRY(feroceon_flush_user_cache_range)
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT
bgt __flush_whole_cache
1: tst r2, #VM_EXEC
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #CACHE_DLINESIZE
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
tst r2, #VM_EXEC
mov ip, #0
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
/*
* coherent_kern_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start, end. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
.align 5
ENTRY(feroceon_coherent_kern_range)
/* FALLTHROUGH */
/*
* coherent_user_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start, end. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(feroceon_coherent_user_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov r0, #0
ret lr
/*
* flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
* - addr - kernel address
* - size - region size
*/
.align 5
ENTRY(feroceon_flush_kern_dcache_area)
add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
.align 5
ENTRY(feroceon_range_flush_kern_dcache_area)
mrs r2, cpsr
add r1, r0, #PAGE_SZ - CACHE_DLINESIZE @ top addr is inclusive
orr r3, r2, #PSR_I_BIT
msr cpsr_c, r3 @ disable interrupts
mcr p15, 5, r0, c15, c15, 0 @ D clean/inv range start
mcr p15, 5, r1, c15, c15, 1 @ D clean/inv range top
msr cpsr_c, r2 @ restore interrupts
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
/*
* dma_inv_range(start, end)
*
* Invalidate (discard) the specified virtual address range.
* May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
*
* - start - virtual start address
* - end - virtual end address
*
* (same as v4wb)
*/
.align 5
feroceon_dma_inv_range:
tst r0, #CACHE_DLINESIZE - 1
bic r0, r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
tst r1, #CACHE_DLINESIZE - 1
mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
.align 5
feroceon_range_dma_inv_range:
mrs r2, cpsr
tst r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
tst r1, #CACHE_DLINESIZE - 1
mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
cmp r1, r0
subne r1, r1, #1 @ top address is inclusive
orr r3, r2, #PSR_I_BIT
msr cpsr_c, r3 @ disable interrupts
mcr p15, 5, r0, c15, c14, 0 @ D inv range start
mcr p15, 5, r1, c15, c14, 1 @ D inv range top
msr cpsr_c, r2 @ restore interrupts
ret lr
/*
* dma_clean_range(start, end)
*
* Clean the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*
* (same as v4wb)
*/
.align 5
feroceon_dma_clean_range:
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
.align 5
feroceon_range_dma_clean_range:
mrs r2, cpsr
cmp r1, r0
subne r1, r1, #1 @ top address is inclusive
orr r3, r2, #PSR_I_BIT
msr cpsr_c, r3 @ disable interrupts
mcr p15, 5, r0, c15, c13, 0 @ D clean range start
mcr p15, 5, r1, c15, c13, 1 @ D clean range top
msr cpsr_c, r2 @ restore interrupts
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
/*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
.align 5
ENTRY(feroceon_dma_flush_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
.align 5
ENTRY(feroceon_range_dma_flush_range)
mrs r2, cpsr
cmp r1, r0
subne r1, r1, #1 @ top address is inclusive
orr r3, r2, #PSR_I_BIT
msr cpsr_c, r3 @ disable interrupts
mcr p15, 5, r0, c15, c15, 0 @ D clean/inv range start
mcr p15, 5, r1, c15, c15, 1 @ D clean/inv range top
msr cpsr_c, r2 @ restore interrupts
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
/*
* dma_map_area(start, size, dir)
* - start - kernel virtual start address
* - size - size of region
* - dir - DMA direction
*/
ENTRY(feroceon_dma_map_area)
add r1, r1, r0
cmp r2, #DMA_TO_DEVICE
beq feroceon_dma_clean_range
bcs feroceon_dma_inv_range
b feroceon_dma_flush_range
ENDPROC(feroceon_dma_map_area)
/*
* dma_map_area(start, size, dir)
* - start - kernel virtual start address
* - size - size of region
* - dir - DMA direction
*/
ENTRY(feroceon_range_dma_map_area)
add r1, r1, r0
cmp r2, #DMA_TO_DEVICE
beq feroceon_range_dma_clean_range
bcs feroceon_range_dma_inv_range
b feroceon_range_dma_flush_range
ENDPROC(feroceon_range_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
* - start - kernel virtual start address
* - size - size of region
* - dir - DMA direction
*/
ENTRY(feroceon_dma_unmap_area)
ret lr
ENDPROC(feroceon_dma_unmap_area)
.globl feroceon_flush_kern_cache_louis
.equ feroceon_flush_kern_cache_louis, feroceon_flush_kern_cache_all
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions feroceon
.macro range_alias basename
.globl feroceon_range_\basename
.type feroceon_range_\basename , %function
.equ feroceon_range_\basename , feroceon_\basename
.endm
/*
* Most of the cache functions are unchanged for this case.
* Export suitable alias symbols for the unchanged functions:
*/
range_alias flush_icache_all
range_alias flush_user_cache_all
range_alias flush_kern_cache_all
range_alias flush_kern_cache_louis
range_alias flush_user_cache_range
range_alias coherent_kern_range
range_alias coherent_user_range
range_alias dma_unmap_area
define_cache_functions feroceon_range
.align 5
ENTRY(cpu_feroceon_dcache_clean_area)
#if defined(CONFIG_CACHE_FEROCEON_L2) && \
!defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
mov r2, r0
mov r3, r1
#endif
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
subs r1, r1, #CACHE_DLINESIZE
bhi 1b
#if defined(CONFIG_CACHE_FEROCEON_L2) && \
!defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
1: mcr p15, 1, r2, c15, c9, 1 @ clean L2 entry
add r2, r2, #CACHE_DLINESIZE
subs r3, r3, #CACHE_DLINESIZE
bhi 1b
#endif
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
/* =============================== PageTable ============================== */
/*
* cpu_feroceon_switch_mm(pgd)
*
* Set the translation base pointer to be as described by pgd.
*
* pgd: new page tables
*/
.align 5
ENTRY(cpu_feroceon_switch_mm)
#ifdef CONFIG_MMU
/*
* Note: we wish to call __flush_whole_cache but we need to preserve
* lr to do so. The only way without touching main memory is to
* use r2 which is normally used to test the VM_EXEC flag, and
* compensate locally for the skipped ops if it is not set.
*/
mov r2, lr @ abuse r2 to preserve lr
bl __flush_whole_cache
@ if r2 contains the VM_EXEC bit then the next 2 ops are done already
tst r2, #VM_EXEC
mcreq p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcreq p15, 0, ip, c7, c10, 4 @ drain WB
mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
ret r2
#else
ret lr
#endif
/*
* cpu_feroceon_set_pte_ext(ptep, pte, ext)
*
* Set a PTE and flush it out
*/
.align 5
ENTRY(cpu_feroceon_set_pte_ext)
#ifdef CONFIG_MMU
armv3_set_pte_ext wc_disable=0
mov r0, r0
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
#if defined(CONFIG_CACHE_FEROCEON_L2) && \
!defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
mcr p15, 1, r0, c15, c9, 1 @ clean L2 entry
#endif
mcr p15, 0, r0, c7, c10, 4 @ drain WB
#endif
ret lr
/* Suspend/resume support: taken from arch/arm/mm/proc-arm926.S */
.globl cpu_feroceon_suspend_size
.equ cpu_feroceon_suspend_size, 4 * 3
#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_feroceon_do_suspend)
stmfd sp!, {r4 - r6, lr}
mrc p15, 0, r4, c13, c0, 0 @ PID
mrc p15, 0, r5, c3, c0, 0 @ Domain ID
mrc p15, 0, r6, c1, c0, 0 @ Control register
stmia r0, {r4 - r6}
ldmfd sp!, {r4 - r6, pc}
ENDPROC(cpu_feroceon_do_suspend)
ENTRY(cpu_feroceon_do_resume)
mov ip, #0
mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs
mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches
ldmia r0, {r4 - r6}
mcr p15, 0, r4, c13, c0, 0 @ PID
mcr p15, 0, r5, c3, c0, 0 @ Domain ID
mcr p15, 0, r1, c2, c0, 0 @ TTB address
mov r0, r6 @ control register
b cpu_resume_mmu
ENDPROC(cpu_feroceon_do_resume)
#endif
.type __feroceon_setup, #function
__feroceon_setup:
mov r0, #0
mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4
#ifdef CONFIG_MMU
mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4
#endif
adr r5, feroceon_crval
ldmia r5, {r5, r6}
mrc p15, 0, r0, c1, c0 @ get control register v4
bic r0, r0, r5
orr r0, r0, r6
ret lr
.size __feroceon_setup, . - __feroceon_setup
/*
* B
* R P
* .RVI UFRS BLDP WCAM
* .011 .001 ..11 0101
*
*/
.type feroceon_crval, #object
feroceon_crval:
crval clear=0x0000773f, mmuset=0x00003135, ucset=0x00001134
__INITDATA
@ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
define_processor_functions feroceon, dabort=v5t_early_abort, pabort=legacy_pabort
.section ".rodata"
string cpu_arch_name, "armv5te"
string cpu_elf_name, "v5"
string cpu_feroceon_name, "Feroceon"
string cpu_88fr531_name, "Feroceon 88FR531-vd"
string cpu_88fr571_name, "Feroceon 88FR571-vd"
string cpu_88fr131_name, "Feroceon 88FR131"
.align
.section ".proc.info.init", #alloc
.macro feroceon_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache:req
.type __\name\()_proc_info,#object
__\name\()_proc_info:
.long \cpu_val
.long \cpu_mask
.long PMD_TYPE_SECT | \
PMD_SECT_BUFFERABLE | \
PMD_SECT_CACHEABLE | \
PMD_BIT4 | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
.long PMD_TYPE_SECT | \
PMD_BIT4 | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
initfn __feroceon_setup, __\name\()_proc_info
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
.long \cpu_name
.long feroceon_processor_functions
.long v4wbi_tlb_fns
.long feroceon_user_fns
.long \cache
.size __\name\()_proc_info, . - __\name\()_proc_info
.endm
#ifdef CONFIG_CPU_FEROCEON_OLD_ID
feroceon_proc_info feroceon_old_id, 0x41009260, 0xff00fff0, \
cpu_name=cpu_feroceon_name, cache=feroceon_cache_fns
#endif
feroceon_proc_info 88fr531, 0x56055310, 0xfffffff0, cpu_88fr531_name, \
cache=feroceon_cache_fns
feroceon_proc_info 88fr571, 0x56155710, 0xfffffff0, cpu_88fr571_name, \
cache=feroceon_range_cache_fns
feroceon_proc_info 88fr131, 0x56251310, 0xfffffff0, cpu_88fr131_name, \
cache=feroceon_range_cache_fns
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,253
|
arch/arm/mm/abort-ev6.S
|
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
#include <asm/assembler.h>
#include "abort-macro.S"
/*
* Function: v6_early_abort
*
* Params : r2 = pt_regs
* : r4 = aborted context pc
* : r5 = aborted context psr
*
* Returns : r4 - r11, r13 preserved
*
* Purpose : obtain information about current aborted instruction.
* Note: we read user space. This means we might cause a data
* abort here if the I-TLB and D-TLB aren't seeing the same
* picture. Unfortunately, this does happen. We live with it.
*/
.align 5
ENTRY(v6_early_abort)
mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR
/*
* Faulty SWP instruction on 1136 doesn't set bit 11 in DFSR.
*/
#ifdef CONFIG_ARM_ERRATA_326103
ldr ip, =0x4107b36
mrc p15, 0, r3, c0, c0, 0 @ get processor id
teq ip, r3, lsr #4 @ r0 ARM1136?
bne 1f
tst r5, #PSR_J_BIT @ Java?
tsteq r5, #PSR_T_BIT @ Thumb?
bne 1f
bic r1, r1, #1 << 11 @ clear bit 11 of FSR
ldr r3, [r4] @ read aborted ARM instruction
ARM_BE8(rev r3, r3)
teq_ldrd tmp=ip, insn=r3 @ insn was LDRD?
beq 1f @ yes
tst r3, #1 << 20 @ L = 0 -> write
orreq r1, r1, #1 << 11 @ yes.
#endif
1: uaccess_disable ip @ disable userspace access
b do_DataAbort
|
AirFortressIlikara/LS2K0300-linux-4.19
| 11,826
|
arch/arm/mm/proc-arm920.S
|
/*
* linux/arch/arm/mm/proc-arm920.S: MMU functions for ARM920
*
* Copyright (C) 1999,2000 ARM Limited
* Copyright (C) 2000 Deep Blue Solutions Ltd.
* hacked for non-paged-MM by Hyok S. Choi, 2003.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*
* These are the low level assembler for performing cache and TLB
* functions on the arm920.
*
* CONFIG_CPU_ARM920_CPU_IDLE -> nohlt
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
/*
* The size of one data cache line.
*/
#define CACHE_DLINESIZE 32
/*
* The number of data cache segments.
*/
#define CACHE_DSEGMENTS 8
/*
* The number of lines in a cache segment.
*/
#define CACHE_DENTRIES 64
/*
* This is the size at which it becomes more efficient to
* clean the whole cache, rather than using the individual
* cache line maintenance instructions.
*/
#define CACHE_DLIMIT 65536
.text
/*
* cpu_arm920_proc_init()
*/
ENTRY(cpu_arm920_proc_init)
ret lr
/*
* cpu_arm920_proc_fin()
*/
ENTRY(cpu_arm920_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
/*
* cpu_arm920_reset(loc)
*
* Perform a soft reset of the system. Put the CPU into the
* same state as it would be if it had been reset, and branch
* to what would be the reset vector.
*
* loc: location to jump to for soft reset
*/
.align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm920_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
#ifdef CONFIG_MMU
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
#endif
mrc p15, 0, ip, c1, c0, 0 @ ctrl register
bic ip, ip, #0x000f @ ............wcam
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0
ENDPROC(cpu_arm920_reset)
.popsection
/*
* cpu_arm920_do_idle()
*/
.align 5
ENTRY(cpu_arm920_do_idle)
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
ret lr
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
/*
* flush_icache_all()
*
* Unconditionally clean and invalidate the entire icache.
*/
ENTRY(arm920_flush_icache_all)
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
ret lr
ENDPROC(arm920_flush_icache_all)
/*
* flush_user_cache_all()
*
* Invalidate all cache entries in a particular address
* space.
*/
ENTRY(arm920_flush_user_cache_all)
/* FALLTHROUGH */
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
ENTRY(arm920_flush_kern_cache_all)
mov r2, #VM_EXEC
mov ip, #0
__flush_whole_cache:
mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 8 segments
1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index
subs r3, r3, #1 << 26
bcs 2b @ entries 63 to 0
subs r1, r1, #1 << 5
bcs 1b @ segments 7 to 0
tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
/*
* flush_user_cache_range(start, end, flags)
*
* Invalidate a range of cache entries in the specified
* address space.
*
* - start - start address (inclusive)
* - end - end address (exclusive)
* - flags - vm_flags for address space
*/
ENTRY(arm920_flush_user_cache_range)
mov ip, #0
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT
bhs __flush_whole_cache
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
tst r2, #VM_EXEC
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
/*
* coherent_kern_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start, end. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(arm920_coherent_kern_range)
/* FALLTHROUGH */
/*
* coherent_user_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start, end. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(arm920_coherent_user_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov r0, #0
ret lr
/*
* flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
* - addr - kernel address
* - size - region size
*/
ENTRY(arm920_flush_kern_dcache_area)
add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
/*
* dma_inv_range(start, end)
*
* Invalidate (discard) the specified virtual address range.
* May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
*
* - start - virtual start address
* - end - virtual end address
*
* (same as v4wb)
*/
arm920_dma_inv_range:
tst r0, #CACHE_DLINESIZE - 1
bic r0, r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
tst r1, #CACHE_DLINESIZE - 1
mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
/*
* dma_clean_range(start, end)
*
* Clean the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*
* (same as v4wb)
*/
arm920_dma_clean_range:
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
/*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(arm920_dma_flush_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
/*
* dma_map_area(start, size, dir)
* - start - kernel virtual start address
* - size - size of region
* - dir - DMA direction
*/
ENTRY(arm920_dma_map_area)
add r1, r1, r0
cmp r2, #DMA_TO_DEVICE
beq arm920_dma_clean_range
bcs arm920_dma_inv_range
b arm920_dma_flush_range
ENDPROC(arm920_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
* - start - kernel virtual start address
* - size - size of region
* - dir - DMA direction
*/
ENTRY(arm920_dma_unmap_area)
ret lr
ENDPROC(arm920_dma_unmap_area)
.globl arm920_flush_kern_cache_louis
.equ arm920_flush_kern_cache_louis, arm920_flush_kern_cache_all
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions arm920
#endif
ENTRY(cpu_arm920_dcache_clean_area)
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
subs r1, r1, #CACHE_DLINESIZE
bhi 1b
ret lr
/* =============================== PageTable ============================== */
/*
* cpu_arm920_switch_mm(pgd)
*
* Set the translation base pointer to be as described by pgd.
*
* pgd: new page tables
*/
.align 5
ENTRY(cpu_arm920_switch_mm)
#ifdef CONFIG_MMU
mov ip, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
#else
@ && 'Clean & Invalidate whole DCache'
@ && Re-written to use Index Ops.
@ && Uses registers r1, r3 and ip
mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 8 segments
1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index
subs r3, r3, #1 << 26
bcs 2b @ entries 63 to 0
subs r1, r1, #1 << 5
bcs 1b @ segments 7 to 0
#endif
mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
#endif
ret lr
/*
* cpu_arm920_set_pte(ptep, pte, ext)
*
* Set a PTE and flush it out
*/
.align 5
ENTRY(cpu_arm920_set_pte_ext)
#ifdef CONFIG_MMU
armv3_set_pte_ext
mov r0, r0
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c10, 4 @ drain WB
#endif
ret lr
/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
.globl cpu_arm920_suspend_size
.equ cpu_arm920_suspend_size, 4 * 3
#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_arm920_do_suspend)
stmfd sp!, {r4 - r6, lr}
mrc p15, 0, r4, c13, c0, 0 @ PID
mrc p15, 0, r5, c3, c0, 0 @ Domain ID
mrc p15, 0, r6, c1, c0, 0 @ Control register
stmia r0, {r4 - r6}
ldmfd sp!, {r4 - r6, pc}
ENDPROC(cpu_arm920_do_suspend)
ENTRY(cpu_arm920_do_resume)
mov ip, #0
mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs
mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches
ldmia r0, {r4 - r6}
mcr p15, 0, r4, c13, c0, 0 @ PID
mcr p15, 0, r5, c3, c0, 0 @ Domain ID
mcr p15, 0, r1, c2, c0, 0 @ TTB address
mov r0, r6 @ control register
b cpu_resume_mmu
ENDPROC(cpu_arm920_do_resume)
#endif
.type __arm920_setup, #function
__arm920_setup:
mov r0, #0
mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4
#ifdef CONFIG_MMU
mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4
#endif
adr r5, arm920_crval
ldmia r5, {r5, r6}
mrc p15, 0, r0, c1, c0 @ get control register v4
bic r0, r0, r5
orr r0, r0, r6
ret lr
.size __arm920_setup, . - __arm920_setup
/*
* R
* .RVI ZFRS BLDP WCAM
* ..11 0001 ..11 0101
*
*/
.type arm920_crval, #object
arm920_crval:
crval clear=0x00003f3f, mmuset=0x00003135, ucset=0x00001130
__INITDATA
@ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
define_processor_functions arm920, dabort=v4t_early_abort, pabort=legacy_pabort, suspend=1
.section ".rodata"
string cpu_arch_name, "armv4t"
string cpu_elf_name, "v4"
string cpu_arm920_name, "ARM920T"
.align
.section ".proc.info.init", #alloc
.type __arm920_proc_info,#object
__arm920_proc_info:
.long 0x41009200
.long 0xff00fff0
.long PMD_TYPE_SECT | \
PMD_SECT_BUFFERABLE | \
PMD_SECT_CACHEABLE | \
PMD_BIT4 | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
.long PMD_TYPE_SECT | \
PMD_BIT4 | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
initfn __arm920_setup, __arm920_proc_info
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
.long cpu_arm920_name
.long arm920_processor_functions
.long v4wbi_tlb_fns
.long v4wb_user_fns
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
.long arm920_cache_fns
#else
.long v4wt_cache_fns
#endif
.size __arm920_proc_info, . - __arm920_proc_info
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,603
|
arch/arm/mm/tlb-v4.S
|
/*
* linux/arch/arm/mm/tlbv4.S
*
* Copyright (C) 1997-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* ARM architecture version 4 TLB handling functions.
* These assume a split I/D TLBs, and no write buffer.
*
* Processors: ARM720T
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/tlbflush.h>
#include "proc-macros.S"
.align 5
/*
* v4_flush_user_tlb_range(start, end, mm)
*
* Invalidate a range of TLB entries in the specified user address space.
*
* - start - range start address
* - end - range end address
* - mm - mm_struct describing address space
*/
.align 5
ENTRY(v4_flush_user_tlb_range)
vma_vm_mm ip, r2
act_mm r3 @ get current->active_mm
eors r3, ip, r3 @ == mm ?
retne lr @ no, we dont do anything
.v4_flush_kern_tlb_range:
bic r0, r0, #0x0ff
bic r0, r0, #0xf00
1: mcr p15, 0, r0, c8, c7, 1 @ invalidate TLB entry
add r0, r0, #PAGE_SZ
cmp r0, r1
blo 1b
ret lr
/*
* v4_flush_kern_tlb_range(start, end)
*
* Invalidate a range of TLB entries in the specified kernel
* address range.
*
* - start - virtual address (may not be aligned)
* - end - virtual address (may not be aligned)
*/
.globl v4_flush_kern_tlb_range
.equ v4_flush_kern_tlb_range, .v4_flush_kern_tlb_range
__INITDATA
/* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
define_tlb_functions v4, v4_tlb_flags
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,999
|
arch/arm/mm/pv-fixup-asm.S
|
/*
* Copyright (C) 2015 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This assembly is required to safely remap the physical address space
* for Keystone 2
*/
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/cp15.h>
#include <asm/memory.h>
#include <asm/pgtable.h>
.section ".idmap.text", "ax"
#define L1_ORDER 3
#define L2_ORDER 3
ENTRY(lpae_pgtables_remap_asm)
stmfd sp!, {r4-r8, lr}
mrc p15, 0, r8, c1, c0, 0 @ read control reg
bic ip, r8, #CR_M @ disable caches and MMU
mcr p15, 0, ip, c1, c0, 0
dsb
isb
/* Update level 2 entries covering the kernel */
ldr r6, =(_end - 1)
add r7, r2, #0x1000
add r6, r7, r6, lsr #SECTION_SHIFT - L2_ORDER
add r7, r7, #PAGE_OFFSET >> (SECTION_SHIFT - L2_ORDER)
1: ldrd r4, [r7]
adds r4, r4, r0
adc r5, r5, r1
strd r4, [r7], #1 << L2_ORDER
cmp r7, r6
bls 1b
/* Update level 2 entries for the boot data */
add r7, r2, #0x1000
add r7, r7, r3, lsr #SECTION_SHIFT - L2_ORDER
bic r7, r7, #(1 << L2_ORDER) - 1
ldrd r4, [r7]
adds r4, r4, r0
adc r5, r5, r1
strd r4, [r7], #1 << L2_ORDER
ldrd r4, [r7]
adds r4, r4, r0
adc r5, r5, r1
strd r4, [r7]
/* Update level 1 entries */
mov r6, #4
mov r7, r2
2: ldrd r4, [r7]
adds r4, r4, r0
adc r5, r5, r1
strd r4, [r7], #1 << L1_ORDER
subs r6, r6, #1
bne 2b
mrrc p15, 0, r4, r5, c2 @ read TTBR0
adds r4, r4, r0 @ update physical address
adc r5, r5, r1
mcrr p15, 0, r4, r5, c2 @ write back TTBR0
mrrc p15, 1, r4, r5, c2 @ read TTBR1
adds r4, r4, r0 @ update physical address
adc r5, r5, r1
mcrr p15, 1, r4, r5, c2 @ write back TTBR1
dsb
mov ip, #0
mcr p15, 0, ip, c7, c5, 0 @ I+BTB cache invalidate
mcr p15, 0, ip, c8, c7, 0 @ local_flush_tlb_all()
dsb
isb
mcr p15, 0, r8, c1, c0, 0 @ re-enable MMU
dsb
isb
ldmfd sp!, {r4-r8, pc}
ENDPROC(lpae_pgtables_remap_asm)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,499
|
arch/arm/mm/l2c-l2x0-resume.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* L2C-310 early resume code. This can be used by platforms to restore
* the settings of their L2 cache controller before restoring the
* processor state.
*
* This code can only be used to if you are running in the secure world.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/hardware/cache-l2x0.h>
.text
ENTRY(l2c310_early_resume)
adr r0, 1f
ldr r2, [r0]
add r0, r2, r0
ldmia r0, {r1, r2, r3, r4, r5, r6, r7, r8}
@ r1 = phys address of L2C-310 controller
@ r2 = aux_ctrl
@ r3 = tag_latency
@ r4 = data_latency
@ r5 = filter_start
@ r6 = filter_end
@ r7 = prefetch_ctrl
@ r8 = pwr_ctrl
@ Check that the address has been initialised
teq r1, #0
reteq lr
@ The prefetch and power control registers are revision dependent
@ and can be written whether or not the L2 cache is enabled
ldr r0, [r1, #L2X0_CACHE_ID]
and r0, r0, #L2X0_CACHE_ID_RTL_MASK
cmp r0, #L310_CACHE_ID_RTL_R2P0
strcs r7, [r1, #L310_PREFETCH_CTRL]
cmp r0, #L310_CACHE_ID_RTL_R3P0
strcs r8, [r1, #L310_POWER_CTRL]
@ Don't setup the L2 cache if it is already enabled
ldr r0, [r1, #L2X0_CTRL]
tst r0, #L2X0_CTRL_EN
retne lr
str r3, [r1, #L310_TAG_LATENCY_CTRL]
str r4, [r1, #L310_DATA_LATENCY_CTRL]
str r6, [r1, #L310_ADDR_FILTER_END]
str r5, [r1, #L310_ADDR_FILTER_START]
str r2, [r1, #L2X0_AUX_CTRL]
mov r9, #L2X0_CTRL_EN
str r9, [r1, #L2X0_CTRL]
ret lr
ENDPROC(l2c310_early_resume)
.align
1: .long l2x0_saved_regs - .
|
AirFortressIlikara/LS2K0300-linux-4.19
| 5,351
|
arch/arm/mm/proc-sa110.S
|
/*
* linux/arch/arm/mm/proc-sa110.S
*
* Copyright (C) 1997-2002 Russell King
* hacked for non-paged-MM by Hyok S. Choi, 2003.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* MMU functions for SA110
*
* These are the low level assembler for performing cache and TLB
* functions on the StrongARM-110.
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/hwcap.h>
#include <mach/hardware.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
/*
* the cache line size of the I and D cache
*/
#define DCACHELINESIZE 32
.text
/*
* cpu_sa110_proc_init()
*/
ENTRY(cpu_sa110_proc_init)
mov r0, #0
mcr p15, 0, r0, c15, c1, 2 @ Enable clock switching
ret lr
/*
* cpu_sa110_proc_fin()
*/
ENTRY(cpu_sa110_proc_fin)
mov r0, #0
mcr p15, 0, r0, c15, c2, 2 @ Disable clock switching
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
/*
* cpu_sa110_reset(loc)
*
* Perform a soft reset of the system. Put the CPU into the
* same state as it would be if it had been reset, and branch
* to what would be the reset vector.
*
* loc: location to jump to for soft reset
*/
.align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_sa110_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
#ifdef CONFIG_MMU
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
#endif
mrc p15, 0, ip, c1, c0, 0 @ ctrl register
bic ip, ip, #0x000f @ ............wcam
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0
ENDPROC(cpu_sa110_reset)
.popsection
/*
* cpu_sa110_do_idle(type)
*
* Cause the processor to idle
*
* type: call type:
* 0 = slow idle
* 1 = fast idle
* 2 = switch to slow processor clock
* 3 = switch to fast processor clock
*/
.align 5
ENTRY(cpu_sa110_do_idle)
mcr p15, 0, ip, c15, c2, 2 @ disable clock switching
ldr r1, =UNCACHEABLE_ADDR @ load from uncacheable loc
ldr r1, [r1, #0] @ force switch to MCLK
mov r0, r0 @ safety
mov r0, r0 @ safety
mov r0, r0 @ safety
mcr p15, 0, r0, c15, c8, 2 @ Wait for interrupt, cache aligned
mov r0, r0 @ safety
mov r0, r0 @ safety
mov r0, r0 @ safety
mcr p15, 0, r0, c15, c1, 2 @ enable clock switching
ret lr
/* ================================= CACHE ================================ */
/*
* cpu_sa110_dcache_clean_area(addr,sz)
*
* Clean the specified entry of any caches such that the MMU
* translation fetches will obtain correct data.
*
* addr: cache-unaligned virtual address
*/
.align 5
ENTRY(cpu_sa110_dcache_clean_area)
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #DCACHELINESIZE
subs r1, r1, #DCACHELINESIZE
bhi 1b
ret lr
/* =============================== PageTable ============================== */
/*
* cpu_sa110_switch_mm(pgd)
*
* Set the translation base pointer to be as described by pgd.
*
* pgd: new page tables
*/
.align 5
ENTRY(cpu_sa110_switch_mm)
#ifdef CONFIG_MMU
str lr, [sp, #-4]!
bl v4wb_flush_kern_cache_all @ clears IP
mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
ldr pc, [sp], #4
#else
ret lr
#endif
/*
* cpu_sa110_set_pte_ext(ptep, pte, ext)
*
* Set a PTE and flush it out
*/
.align 5
ENTRY(cpu_sa110_set_pte_ext)
#ifdef CONFIG_MMU
armv3_set_pte_ext wc_disable=0
mov r0, r0
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c10, 4 @ drain WB
#endif
ret lr
.type __sa110_setup, #function
__sa110_setup:
mov r10, #0
mcr p15, 0, r10, c7, c7 @ invalidate I,D caches on v4
mcr p15, 0, r10, c7, c10, 4 @ drain write buffer on v4
#ifdef CONFIG_MMU
mcr p15, 0, r10, c8, c7 @ invalidate I,D TLBs on v4
#endif
adr r5, sa110_crval
ldmia r5, {r5, r6}
mrc p15, 0, r0, c1, c0 @ get control register v4
bic r0, r0, r5
orr r0, r0, r6
ret lr
.size __sa110_setup, . - __sa110_setup
/*
* R
* .RVI ZFRS BLDP WCAM
* ..01 0001 ..11 1101
*
*/
.type sa110_crval, #object
sa110_crval:
crval clear=0x00003f3f, mmuset=0x0000113d, ucset=0x00001130
__INITDATA
@ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
define_processor_functions sa110, dabort=v4_early_abort, pabort=legacy_pabort
.section ".rodata"
string cpu_arch_name, "armv4"
string cpu_elf_name, "v4"
string cpu_sa110_name, "StrongARM-110"
.align
.section ".proc.info.init", #alloc
.type __sa110_proc_info,#object
__sa110_proc_info:
.long 0x4401a100
.long 0xfffffff0
.long PMD_TYPE_SECT | \
PMD_SECT_BUFFERABLE | \
PMD_SECT_CACHEABLE | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
.long PMD_TYPE_SECT | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
initfn __sa110_setup, __sa110_proc_info
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT
.long cpu_sa110_name
.long sa110_processor_functions
.long v4wb_tlb_fns
.long v4wb_user_fns
.long v4wb_cache_fns
.size __sa110_proc_info, . - __sa110_proc_info
|
AirFortressIlikara/LS2K0300-linux-4.19
| 25,229
|
arch/arm/mm/proc-v7.S
|
/*
* linux/arch/arm/mm/proc-v7.S
*
* Copyright (C) 2001 Deep Blue Solutions Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This is the "shell" of the ARMv7 processor support.
*/
#include <linux/arm-smccc.h>
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>
#include <asm/memory.h>
#include "proc-macros.S"
#ifdef CONFIG_ARM_LPAE
#include "proc-v7-3level.S"
#else
#include "proc-v7-2level.S"
#endif
ENTRY(cpu_v7_proc_init)
ret lr
ENDPROC(cpu_v7_proc_init)
ENTRY(cpu_v7_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x0006 @ .............ca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
ENDPROC(cpu_v7_proc_fin)
/*
* cpu_v7_reset(loc, hyp)
*
* Perform a soft reset of the system. Put the CPU into the
* same state as it would be if it had been reset, and branch
* to what would be the reset vector.
*
* - loc - location to jump to for soft reset
* - hyp - indicate if restart occurs in HYP mode
*
* This code must be executed using a flat identity mapping with
* caches disabled.
*/
.align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_v7_reset)
mrc p15, 0, r2, c1, c0, 0 @ ctrl register
bic r2, r2, #0x1 @ ...............m
THUMB( bic r2, r2, #1 << 30 ) @ SCTLR.TE (Thumb exceptions)
mcr p15, 0, r2, c1, c0, 0 @ disable MMU
isb
#ifdef CONFIG_ARM_VIRT_EXT
teq r1, #0
bne __hyp_soft_restart
#endif
bx r0
ENDPROC(cpu_v7_reset)
.popsection
/*
* cpu_v7_do_idle()
*
* Idle the processor (eg, wait for interrupt).
*
* IRQs are already disabled.
*/
ENTRY(cpu_v7_do_idle)
dsb @ WFI may enter a low-power mode
wfi
ret lr
ENDPROC(cpu_v7_do_idle)
ENTRY(cpu_v7_dcache_clean_area)
ALT_SMP(W(nop)) @ MP extensions imply L1 PTW
ALT_UP_B(1f)
ret lr
1: dcache_line_size r2, r3
2: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, r2
subs r1, r1, r2
bhi 2b
dsb ishst
ret lr
ENDPROC(cpu_v7_dcache_clean_area)
#ifdef CONFIG_ARM_PSCI
.arch_extension sec
ENTRY(cpu_v7_smc_switch_mm)
stmfd sp!, {r0 - r3}
movw r0, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
movt r0, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
smc #0
ldmfd sp!, {r0 - r3}
b cpu_v7_switch_mm
ENDPROC(cpu_v7_smc_switch_mm)
.arch_extension virt
ENTRY(cpu_v7_hvc_switch_mm)
stmfd sp!, {r0 - r3}
movw r0, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
movt r0, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
hvc #0
ldmfd sp!, {r0 - r3}
b cpu_v7_switch_mm
ENDPROC(cpu_v7_hvc_switch_mm)
#endif
ENTRY(cpu_v7_iciallu_switch_mm)
mov r3, #0
mcr p15, 0, r3, c7, c5, 0 @ ICIALLU
b cpu_v7_switch_mm
ENDPROC(cpu_v7_iciallu_switch_mm)
ENTRY(cpu_v7_bpiall_switch_mm)
mov r3, #0
mcr p15, 0, r3, c7, c5, 6 @ flush BTAC/BTB
b cpu_v7_switch_mm
ENDPROC(cpu_v7_bpiall_switch_mm)
string cpu_v7_name, "ARMv7 Processor"
.align
/* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */
.globl cpu_v7_suspend_size
.equ cpu_v7_suspend_size, 4 * 9
#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_v7_do_suspend)
stmfd sp!, {r4 - r11, lr}
mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
mrc p15, 0, r5, c13, c0, 3 @ User r/o thread ID
stmia r0!, {r4 - r5}
#ifdef CONFIG_MMU
mrc p15, 0, r6, c3, c0, 0 @ Domain ID
#ifdef CONFIG_ARM_LPAE
mrrc p15, 1, r5, r7, c2 @ TTB 1
#else
mrc p15, 0, r7, c2, c0, 1 @ TTB 1
#endif
mrc p15, 0, r11, c2, c0, 2 @ TTB control register
#endif
mrc p15, 0, r8, c1, c0, 0 @ Control register
mrc p15, 0, r9, c1, c0, 1 @ Auxiliary control register
mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control
stmia r0, {r5 - r11}
ldmfd sp!, {r4 - r11, pc}
ENDPROC(cpu_v7_do_suspend)
ENTRY(cpu_v7_do_resume)
mov ip, #0
mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcr p15, 0, ip, c13, c0, 1 @ set reserved context ID
ldmia r0!, {r4 - r5}
mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID
mcr p15, 0, r5, c13, c0, 3 @ User r/o thread ID
ldmia r0, {r5 - r11}
#ifdef CONFIG_MMU
mcr p15, 0, ip, c8, c7, 0 @ invalidate TLBs
mcr p15, 0, r6, c3, c0, 0 @ Domain ID
#ifdef CONFIG_ARM_LPAE
mcrr p15, 0, r1, ip, c2 @ TTB 0
mcrr p15, 1, r5, r7, c2 @ TTB 1
#else
ALT_SMP(orr r1, r1, #TTB_FLAGS_SMP)
ALT_UP(orr r1, r1, #TTB_FLAGS_UP)
mcr p15, 0, r1, c2, c0, 0 @ TTB 0
mcr p15, 0, r7, c2, c0, 1 @ TTB 1
#endif
mcr p15, 0, r11, c2, c0, 2 @ TTB control register
ldr r4, =PRRR @ PRRR
ldr r5, =NMRR @ NMRR
mcr p15, 0, r4, c10, c2, 0 @ write PRRR
mcr p15, 0, r5, c10, c2, 1 @ write NMRR
#endif /* CONFIG_MMU */
mrc p15, 0, r4, c1, c0, 1 @ Read Auxiliary control register
teq r4, r9 @ Is it already set?
mcrne p15, 0, r9, c1, c0, 1 @ No, so write it
mcr p15, 0, r10, c1, c0, 2 @ Co-processor access control
isb
dsb
mov r0, r8 @ control register
b cpu_resume_mmu
ENDPROC(cpu_v7_do_resume)
#endif
.globl cpu_ca9mp_suspend_size
.equ cpu_ca9mp_suspend_size, cpu_v7_suspend_size + 4 * 2
#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_ca9mp_do_suspend)
stmfd sp!, {r4 - r5}
mrc p15, 0, r4, c15, c0, 1 @ Diagnostic register
mrc p15, 0, r5, c15, c0, 0 @ Power register
stmia r0!, {r4 - r5}
ldmfd sp!, {r4 - r5}
b cpu_v7_do_suspend
ENDPROC(cpu_ca9mp_do_suspend)
ENTRY(cpu_ca9mp_do_resume)
ldmia r0!, {r4 - r5}
mrc p15, 0, r10, c15, c0, 1 @ Read Diagnostic register
teq r4, r10 @ Already restored?
mcrne p15, 0, r4, c15, c0, 1 @ No, so restore it
mrc p15, 0, r10, c15, c0, 0 @ Read Power register
teq r5, r10 @ Already restored?
mcrne p15, 0, r5, c15, c0, 0 @ No, so restore it
b cpu_v7_do_resume
ENDPROC(cpu_ca9mp_do_resume)
#endif
#ifdef CONFIG_CPU_PJ4B
globl_equ cpu_pj4b_switch_mm, cpu_v7_switch_mm
globl_equ cpu_pj4b_set_pte_ext, cpu_v7_set_pte_ext
globl_equ cpu_pj4b_proc_init, cpu_v7_proc_init
globl_equ cpu_pj4b_proc_fin, cpu_v7_proc_fin
globl_equ cpu_pj4b_reset, cpu_v7_reset
#ifdef CONFIG_PJ4B_ERRATA_4742
ENTRY(cpu_pj4b_do_idle)
dsb @ WFI may enter a low-power mode
wfi
dsb @barrier
ret lr
ENDPROC(cpu_pj4b_do_idle)
#else
globl_equ cpu_pj4b_do_idle, cpu_v7_do_idle
#endif
globl_equ cpu_pj4b_dcache_clean_area, cpu_v7_dcache_clean_area
#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_pj4b_do_suspend)
stmfd sp!, {r6 - r10}
mrc p15, 1, r6, c15, c1, 0 @ save CP15 - extra features
mrc p15, 1, r7, c15, c2, 0 @ save CP15 - Aux Func Modes Ctrl 0
mrc p15, 1, r8, c15, c1, 2 @ save CP15 - Aux Debug Modes Ctrl 2
mrc p15, 1, r9, c15, c1, 1 @ save CP15 - Aux Debug Modes Ctrl 1
mrc p15, 0, r10, c9, c14, 0 @ save CP15 - PMC
stmia r0!, {r6 - r10}
ldmfd sp!, {r6 - r10}
b cpu_v7_do_suspend
ENDPROC(cpu_pj4b_do_suspend)
ENTRY(cpu_pj4b_do_resume)
ldmia r0!, {r6 - r10}
mcr p15, 1, r6, c15, c1, 0 @ restore CP15 - extra features
mcr p15, 1, r7, c15, c2, 0 @ restore CP15 - Aux Func Modes Ctrl 0
mcr p15, 1, r8, c15, c1, 2 @ restore CP15 - Aux Debug Modes Ctrl 2
mcr p15, 1, r9, c15, c1, 1 @ restore CP15 - Aux Debug Modes Ctrl 1
mcr p15, 0, r10, c9, c14, 0 @ restore CP15 - PMC
b cpu_v7_do_resume
ENDPROC(cpu_pj4b_do_resume)
#endif
.globl cpu_pj4b_suspend_size
.equ cpu_pj4b_suspend_size, cpu_v7_suspend_size + 4 * 5
#endif
/*
* __v7_setup
*
* Initialise TLB, Caches, and MMU state ready to switch the MMU
* on. Return in r0 the new CP15 C1 control register setting.
*
* r1, r2, r4, r5, r9, r13 must be preserved - r13 is not a stack
* r4: TTBR0 (low word)
* r5: TTBR0 (high word if LPAE)
* r8: TTBR1
* r9: Main ID register
*
* This should be able to cover all ARMv7 cores.
*
* It is assumed that:
* - cache type register is implemented
*/
__v7_ca5mp_setup:
__v7_ca9mp_setup:
__v7_cr7mp_setup:
__v7_cr8mp_setup:
mov r10, #(1 << 0) @ Cache/TLB ops broadcasting
b 1f
__v7_ca7mp_setup:
__v7_ca12mp_setup:
__v7_ca15mp_setup:
__v7_b15mp_setup:
__v7_ca17mp_setup:
mov r10, #0
1: adr r0, __v7_setup_stack_ptr
ldr r12, [r0]
add r12, r12, r0 @ the local stack
stmia r12, {r1-r6, lr} @ v7_invalidate_l1 touches r0-r6
bl v7_invalidate_l1
ldmia r12, {r1-r6, lr}
#ifdef CONFIG_SMP
orr r10, r10, #(1 << 6) @ Enable SMP/nAMP mode
ALT_SMP(mrc p15, 0, r0, c1, c0, 1)
ALT_UP(mov r0, r10) @ fake it for UP
orr r10, r10, r0 @ Set required bits
teq r10, r0 @ Were they already set?
mcrne p15, 0, r10, c1, c0, 1 @ No, update register
#endif
b __v7_setup_cont
/*
* Errata:
* r0, r10 available for use
* r1, r2, r4, r5, r9, r13: must be preserved
* r3: contains MIDR rX number in bits 23-20
* r6: contains MIDR rXpY as 8-bit XY number
* r9: MIDR
*/
__ca8_errata:
#if defined(CONFIG_ARM_ERRATA_430973) && !defined(CONFIG_ARCH_MULTIPLATFORM)
teq r3, #0x00100000 @ only present in r1p*
mrceq p15, 0, r0, c1, c0, 1 @ read aux control register
orreq r0, r0, #(1 << 6) @ set IBE to 1
mcreq p15, 0, r0, c1, c0, 1 @ write aux control register
#endif
#ifdef CONFIG_ARM_ERRATA_458693
teq r6, #0x20 @ only present in r2p0
mrceq p15, 0, r0, c1, c0, 1 @ read aux control register
orreq r0, r0, #(1 << 5) @ set L1NEON to 1
orreq r0, r0, #(1 << 9) @ set PLDNOP to 1
mcreq p15, 0, r0, c1, c0, 1 @ write aux control register
#endif
#ifdef CONFIG_ARM_ERRATA_460075
teq r6, #0x20 @ only present in r2p0
mrceq p15, 1, r0, c9, c0, 2 @ read L2 cache aux ctrl register
tsteq r0, #1 << 22
orreq r0, r0, #(1 << 22) @ set the Write Allocate disable bit
mcreq p15, 1, r0, c9, c0, 2 @ write the L2 cache aux ctrl register
#endif
b __errata_finish
__ca9_errata:
#ifdef CONFIG_ARM_ERRATA_742230
cmp r6, #0x22 @ only present up to r2p2
mrcle p15, 0, r0, c15, c0, 1 @ read diagnostic register
orrle r0, r0, #1 << 4 @ set bit #4
mcrle p15, 0, r0, c15, c0, 1 @ write diagnostic register
#endif
#ifdef CONFIG_ARM_ERRATA_742231
teq r6, #0x20 @ present in r2p0
teqne r6, #0x21 @ present in r2p1
teqne r6, #0x22 @ present in r2p2
mrceq p15, 0, r0, c15, c0, 1 @ read diagnostic register
orreq r0, r0, #1 << 12 @ set bit #12
orreq r0, r0, #1 << 22 @ set bit #22
mcreq p15, 0, r0, c15, c0, 1 @ write diagnostic register
#endif
#ifdef CONFIG_ARM_ERRATA_743622
teq r3, #0x00200000 @ only present in r2p*
mrceq p15, 0, r0, c15, c0, 1 @ read diagnostic register
orreq r0, r0, #1 << 6 @ set bit #6
mcreq p15, 0, r0, c15, c0, 1 @ write diagnostic register
#endif
#if defined(CONFIG_ARM_ERRATA_751472) && defined(CONFIG_SMP)
ALT_SMP(cmp r6, #0x30) @ present prior to r3p0
ALT_UP_B(1f)
mrclt p15, 0, r0, c15, c0, 1 @ read diagnostic register
orrlt r0, r0, #1 << 11 @ set bit #11
mcrlt p15, 0, r0, c15, c0, 1 @ write diagnostic register
1:
#endif
b __errata_finish
__ca15_errata:
#ifdef CONFIG_ARM_ERRATA_773022
cmp r6, #0x4 @ only present up to r0p4
mrcle p15, 0, r0, c1, c0, 1 @ read aux control register
orrle r0, r0, #1 << 1 @ disable loop buffer
mcrle p15, 0, r0, c1, c0, 1 @ write aux control register
#endif
b __errata_finish
__ca12_errata:
#ifdef CONFIG_ARM_ERRATA_818325_852422
mrc p15, 0, r10, c15, c0, 1 @ read diagnostic register
orr r10, r10, #1 << 12 @ set bit #12
mcr p15, 0, r10, c15, c0, 1 @ write diagnostic register
#endif
#ifdef CONFIG_ARM_ERRATA_821420
mrc p15, 0, r10, c15, c0, 2 @ read internal feature reg
orr r10, r10, #1 << 1 @ set bit #1
mcr p15, 0, r10, c15, c0, 2 @ write internal feature reg
#endif
#ifdef CONFIG_ARM_ERRATA_825619
mrc p15, 0, r10, c15, c0, 1 @ read diagnostic register
orr r10, r10, #1 << 24 @ set bit #24
mcr p15, 0, r10, c15, c0, 1 @ write diagnostic register
#endif
b __errata_finish
__ca17_errata:
#ifdef CONFIG_ARM_ERRATA_852421
cmp r6, #0x12 @ only present up to r1p2
mrcle p15, 0, r10, c15, c0, 1 @ read diagnostic register
orrle r10, r10, #1 << 24 @ set bit #24
mcrle p15, 0, r10, c15, c0, 1 @ write diagnostic register
#endif
#ifdef CONFIG_ARM_ERRATA_852423
cmp r6, #0x12 @ only present up to r1p2
mrcle p15, 0, r10, c15, c0, 1 @ read diagnostic register
orrle r10, r10, #1 << 12 @ set bit #12
mcrle p15, 0, r10, c15, c0, 1 @ write diagnostic register
#endif
b __errata_finish
__v7_pj4b_setup:
#ifdef CONFIG_CPU_PJ4B
/* Auxiliary Debug Modes Control 1 Register */
#define PJ4B_STATIC_BP (1 << 2) /* Enable Static BP */
#define PJ4B_INTER_PARITY (1 << 8) /* Disable Internal Parity Handling */
#define PJ4B_CLEAN_LINE (1 << 16) /* Disable data transfer for clean line */
/* Auxiliary Debug Modes Control 2 Register */
#define PJ4B_FAST_LDR (1 << 23) /* Disable fast LDR */
#define PJ4B_SNOOP_DATA (1 << 25) /* Do not interleave write and snoop data */
#define PJ4B_CWF (1 << 27) /* Disable Critical Word First feature */
#define PJ4B_OUTSDNG_NC (1 << 29) /* Disable outstanding non cacheable rqst */
#define PJ4B_L1_REP_RR (1 << 30) /* L1 replacement - Strict round robin */
#define PJ4B_AUX_DBG_CTRL2 (PJ4B_SNOOP_DATA | PJ4B_CWF |\
PJ4B_OUTSDNG_NC | PJ4B_L1_REP_RR)
/* Auxiliary Functional Modes Control Register 0 */
#define PJ4B_SMP_CFB (1 << 1) /* Set SMP mode. Join the coherency fabric */
#define PJ4B_L1_PAR_CHK (1 << 2) /* Support L1 parity checking */
#define PJ4B_BROADCAST_CACHE (1 << 8) /* Broadcast Cache and TLB maintenance */
/* Auxiliary Debug Modes Control 0 Register */
#define PJ4B_WFI_WFE (1 << 22) /* WFI/WFE - serve the DVM and back to idle */
/* Auxiliary Debug Modes Control 1 Register */
mrc p15, 1, r0, c15, c1, 1
orr r0, r0, #PJ4B_CLEAN_LINE
orr r0, r0, #PJ4B_INTER_PARITY
bic r0, r0, #PJ4B_STATIC_BP
mcr p15, 1, r0, c15, c1, 1
/* Auxiliary Debug Modes Control 2 Register */
mrc p15, 1, r0, c15, c1, 2
bic r0, r0, #PJ4B_FAST_LDR
orr r0, r0, #PJ4B_AUX_DBG_CTRL2
mcr p15, 1, r0, c15, c1, 2
/* Auxiliary Functional Modes Control Register 0 */
mrc p15, 1, r0, c15, c2, 0
#ifdef CONFIG_SMP
orr r0, r0, #PJ4B_SMP_CFB
#endif
orr r0, r0, #PJ4B_L1_PAR_CHK
orr r0, r0, #PJ4B_BROADCAST_CACHE
mcr p15, 1, r0, c15, c2, 0
/* Auxiliary Debug Modes Control 0 Register */
mrc p15, 1, r0, c15, c1, 0
orr r0, r0, #PJ4B_WFI_WFE
mcr p15, 1, r0, c15, c1, 0
#endif /* CONFIG_CPU_PJ4B */
__v7_setup:
adr r0, __v7_setup_stack_ptr
ldr r12, [r0]
add r12, r12, r0 @ the local stack
stmia r12, {r1-r6, lr} @ v7_invalidate_l1 touches r0-r6
bl v7_invalidate_l1
ldmia r12, {r1-r6, lr}
__v7_setup_cont:
and r0, r9, #0xff000000 @ ARM?
teq r0, #0x41000000
bne __errata_finish
and r3, r9, #0x00f00000 @ variant
and r6, r9, #0x0000000f @ revision
orr r6, r6, r3, lsr #20-4 @ combine variant and revision
ubfx r0, r9, #4, #12 @ primary part number
/* Cortex-A8 Errata */
ldr r10, =0x00000c08 @ Cortex-A8 primary part number
teq r0, r10
beq __ca8_errata
/* Cortex-A9 Errata */
ldr r10, =0x00000c09 @ Cortex-A9 primary part number
teq r0, r10
beq __ca9_errata
/* Cortex-A12 Errata */
ldr r10, =0x00000c0d @ Cortex-A12 primary part number
teq r0, r10
beq __ca12_errata
/* Cortex-A17 Errata */
ldr r10, =0x00000c0e @ Cortex-A17 primary part number
teq r0, r10
beq __ca17_errata
/* Cortex-A15 Errata */
ldr r10, =0x00000c0f @ Cortex-A15 primary part number
teq r0, r10
beq __ca15_errata
__errata_finish:
mov r10, #0
mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate
#ifdef CONFIG_MMU
mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs
v7_ttb_setup r10, r4, r5, r8, r3 @ TTBCR, TTBRx setup
ldr r3, =PRRR @ PRRR
ldr r6, =NMRR @ NMRR
mcr p15, 0, r3, c10, c2, 0 @ write PRRR
mcr p15, 0, r6, c10, c2, 1 @ write NMRR
#endif
dsb @ Complete invalidations
#ifndef CONFIG_ARM_THUMBEE
mrc p15, 0, r0, c0, c1, 0 @ read ID_PFR0 for ThumbEE
and r0, r0, #(0xf << 12) @ ThumbEE enabled field
teq r0, #(1 << 12) @ check if ThumbEE is present
bne 1f
mov r3, #0
mcr p14, 6, r3, c1, c0, 0 @ Initialize TEEHBR to 0
mrc p14, 6, r0, c0, c0, 0 @ load TEECR
orr r0, r0, #1 @ set the 1st bit in order to
mcr p14, 6, r0, c0, c0, 0 @ stop userspace TEEHBR access
1:
#endif
adr r3, v7_crval
ldmia r3, {r3, r6}
ARM_BE8(orr r6, r6, #1 << 25) @ big-endian page tables
#ifdef CONFIG_SWP_EMULATE
orr r3, r3, #(1 << 10) @ set SW bit in "clear"
bic r6, r6, #(1 << 10) @ clear it in "mmuset"
#endif
mrc p15, 0, r0, c1, c0, 0 @ read control register
bic r0, r0, r3 @ clear bits them
orr r0, r0, r6 @ set them
THUMB( orr r0, r0, #1 << 30 ) @ Thumb exceptions
ret lr @ return to head.S:__ret
.align 2
__v7_setup_stack_ptr:
.word PHYS_RELATIVE(__v7_setup_stack, .)
ENDPROC(__v7_setup)
.bss
.align 2
__v7_setup_stack:
.space 4 * 7 @ 7 registers
__INITDATA
.weak cpu_v7_bugs_init
@ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
@ generic v7 bpiall on context switch
globl_equ cpu_v7_bpiall_proc_init, cpu_v7_proc_init
globl_equ cpu_v7_bpiall_proc_fin, cpu_v7_proc_fin
globl_equ cpu_v7_bpiall_reset, cpu_v7_reset
globl_equ cpu_v7_bpiall_do_idle, cpu_v7_do_idle
globl_equ cpu_v7_bpiall_dcache_clean_area, cpu_v7_dcache_clean_area
globl_equ cpu_v7_bpiall_set_pte_ext, cpu_v7_set_pte_ext
globl_equ cpu_v7_bpiall_suspend_size, cpu_v7_suspend_size
#ifdef CONFIG_ARM_CPU_SUSPEND
globl_equ cpu_v7_bpiall_do_suspend, cpu_v7_do_suspend
globl_equ cpu_v7_bpiall_do_resume, cpu_v7_do_resume
#endif
define_processor_functions v7_bpiall, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init
#define HARDENED_BPIALL_PROCESSOR_FUNCTIONS v7_bpiall_processor_functions
#else
#define HARDENED_BPIALL_PROCESSOR_FUNCTIONS v7_processor_functions
#endif
#ifndef CONFIG_ARM_LPAE
@ Cortex-A8 - always needs bpiall switch_mm implementation
globl_equ cpu_ca8_proc_init, cpu_v7_proc_init
globl_equ cpu_ca8_proc_fin, cpu_v7_proc_fin
globl_equ cpu_ca8_reset, cpu_v7_reset
globl_equ cpu_ca8_do_idle, cpu_v7_do_idle
globl_equ cpu_ca8_dcache_clean_area, cpu_v7_dcache_clean_area
globl_equ cpu_ca8_set_pte_ext, cpu_v7_set_pte_ext
globl_equ cpu_ca8_switch_mm, cpu_v7_bpiall_switch_mm
globl_equ cpu_ca8_suspend_size, cpu_v7_suspend_size
#ifdef CONFIG_ARM_CPU_SUSPEND
globl_equ cpu_ca8_do_suspend, cpu_v7_do_suspend
globl_equ cpu_ca8_do_resume, cpu_v7_do_resume
#endif
define_processor_functions ca8, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_ca8_ibe
@ Cortex-A9 - needs more registers preserved across suspend/resume
@ and bpiall switch_mm for hardening
globl_equ cpu_ca9mp_proc_init, cpu_v7_proc_init
globl_equ cpu_ca9mp_proc_fin, cpu_v7_proc_fin
globl_equ cpu_ca9mp_reset, cpu_v7_reset
globl_equ cpu_ca9mp_do_idle, cpu_v7_do_idle
globl_equ cpu_ca9mp_dcache_clean_area, cpu_v7_dcache_clean_area
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
globl_equ cpu_ca9mp_switch_mm, cpu_v7_bpiall_switch_mm
#else
globl_equ cpu_ca9mp_switch_mm, cpu_v7_switch_mm
#endif
globl_equ cpu_ca9mp_set_pte_ext, cpu_v7_set_pte_ext
define_processor_functions ca9mp, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init
#endif
@ Cortex-A15 - needs iciallu switch_mm for hardening
globl_equ cpu_ca15_proc_init, cpu_v7_proc_init
globl_equ cpu_ca15_proc_fin, cpu_v7_proc_fin
globl_equ cpu_ca15_reset, cpu_v7_reset
globl_equ cpu_ca15_do_idle, cpu_v7_do_idle
globl_equ cpu_ca15_dcache_clean_area, cpu_v7_dcache_clean_area
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
globl_equ cpu_ca15_switch_mm, cpu_v7_iciallu_switch_mm
#else
globl_equ cpu_ca15_switch_mm, cpu_v7_switch_mm
#endif
globl_equ cpu_ca15_set_pte_ext, cpu_v7_set_pte_ext
globl_equ cpu_ca15_suspend_size, cpu_v7_suspend_size
globl_equ cpu_ca15_do_suspend, cpu_v7_do_suspend
globl_equ cpu_ca15_do_resume, cpu_v7_do_resume
define_processor_functions ca15, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_ca15_ibe
#ifdef CONFIG_CPU_PJ4B
define_processor_functions pj4b, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
#endif
.section ".rodata"
string cpu_arch_name, "armv7"
string cpu_elf_name, "v7"
.align
.section ".proc.info.init", #alloc
/*
* Standard v7 proc info content
*/
.macro __v7_proc name, initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0, proc_fns = v7_processor_functions, cache_fns = v7_cache_fns
ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \
PMD_SECT_AF | PMD_FLAGS_SMP | \mm_mmuflags)
ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \
PMD_SECT_AF | PMD_FLAGS_UP | \mm_mmuflags)
.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ | PMD_SECT_AF | \io_mmuflags
initfn \initfunc, \name
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_FAST_MULT | \
HWCAP_EDSP | HWCAP_TLS | \hwcaps
.long cpu_v7_name
.long \proc_fns
.long v7wbi_tlb_fns
.long v6_user_fns
.long \cache_fns
.endm
#ifndef CONFIG_ARM_LPAE
/*
* ARM Ltd. Cortex A5 processor.
*/
.type __v7_ca5mp_proc_info, #object
__v7_ca5mp_proc_info:
.long 0x410fc050
.long 0xff0ffff0
__v7_proc __v7_ca5mp_proc_info, __v7_ca5mp_setup
.size __v7_ca5mp_proc_info, . - __v7_ca5mp_proc_info
/*
* ARM Ltd. Cortex A9 processor.
*/
.type __v7_ca9mp_proc_info, #object
__v7_ca9mp_proc_info:
.long 0x410fc090
.long 0xff0ffff0
__v7_proc __v7_ca9mp_proc_info, __v7_ca9mp_setup, proc_fns = ca9mp_processor_functions
.size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info
/*
* ARM Ltd. Cortex A8 processor.
*/
.type __v7_ca8_proc_info, #object
__v7_ca8_proc_info:
.long 0x410fc080
.long 0xff0ffff0
__v7_proc __v7_ca8_proc_info, __v7_setup, proc_fns = ca8_processor_functions
.size __v7_ca8_proc_info, . - __v7_ca8_proc_info
#endif /* CONFIG_ARM_LPAE */
/*
* Marvell PJ4B processor.
*/
#ifdef CONFIG_CPU_PJ4B
.type __v7_pj4b_proc_info, #object
__v7_pj4b_proc_info:
.long 0x560f5800
.long 0xff0fff00
__v7_proc __v7_pj4b_proc_info, __v7_pj4b_setup, proc_fns = pj4b_processor_functions
.size __v7_pj4b_proc_info, . - __v7_pj4b_proc_info
#endif
/*
* ARM Ltd. Cortex R7 processor.
*/
.type __v7_cr7mp_proc_info, #object
__v7_cr7mp_proc_info:
.long 0x410fc170
.long 0xff0ffff0
__v7_proc __v7_cr7mp_proc_info, __v7_cr7mp_setup
.size __v7_cr7mp_proc_info, . - __v7_cr7mp_proc_info
/*
* ARM Ltd. Cortex R8 processor.
*/
.type __v7_cr8mp_proc_info, #object
__v7_cr8mp_proc_info:
.long 0x410fc180
.long 0xff0ffff0
__v7_proc __v7_cr8mp_proc_info, __v7_cr8mp_setup
.size __v7_cr8mp_proc_info, . - __v7_cr8mp_proc_info
/*
* ARM Ltd. Cortex A7 processor.
*/
.type __v7_ca7mp_proc_info, #object
__v7_ca7mp_proc_info:
.long 0x410fc070
.long 0xff0ffff0
__v7_proc __v7_ca7mp_proc_info, __v7_ca7mp_setup
.size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info
/*
* ARM Ltd. Cortex A12 processor.
*/
.type __v7_ca12mp_proc_info, #object
__v7_ca12mp_proc_info:
.long 0x410fc0d0
.long 0xff0ffff0
__v7_proc __v7_ca12mp_proc_info, __v7_ca12mp_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
.size __v7_ca12mp_proc_info, . - __v7_ca12mp_proc_info
/*
* ARM Ltd. Cortex A15 processor.
*/
.type __v7_ca15mp_proc_info, #object
__v7_ca15mp_proc_info:
.long 0x410fc0f0
.long 0xff0ffff0
__v7_proc __v7_ca15mp_proc_info, __v7_ca15mp_setup, proc_fns = ca15_processor_functions
.size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info
/*
* Broadcom Corporation Brahma-B15 processor.
*/
.type __v7_b15mp_proc_info, #object
__v7_b15mp_proc_info:
.long 0x420f00f0
.long 0xff0ffff0
__v7_proc __v7_b15mp_proc_info, __v7_b15mp_setup, proc_fns = ca15_processor_functions, cache_fns = b15_cache_fns
.size __v7_b15mp_proc_info, . - __v7_b15mp_proc_info
/*
* ARM Ltd. Cortex A17 processor.
*/
.type __v7_ca17mp_proc_info, #object
__v7_ca17mp_proc_info:
.long 0x410fc0e0
.long 0xff0ffff0
__v7_proc __v7_ca17mp_proc_info, __v7_ca17mp_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
.size __v7_ca17mp_proc_info, . - __v7_ca17mp_proc_info
/* ARM Ltd. Cortex A73 processor */
.type __v7_ca73_proc_info, #object
__v7_ca73_proc_info:
.long 0x410fd090
.long 0xff0ffff0
__v7_proc __v7_ca73_proc_info, __v7_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
.size __v7_ca73_proc_info, . - __v7_ca73_proc_info
/* ARM Ltd. Cortex A75 processor */
.type __v7_ca75_proc_info, #object
__v7_ca75_proc_info:
.long 0x410fd0a0
.long 0xff0ffff0
__v7_proc __v7_ca75_proc_info, __v7_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
.size __v7_ca75_proc_info, . - __v7_ca75_proc_info
/*
* Qualcomm Inc. Krait processors.
*/
.type __krait_proc_info, #object
__krait_proc_info:
.long 0x510f0400 @ Required ID value
.long 0xff0ffc00 @ Mask for ID
/*
* Some Krait processors don't indicate support for SDIV and UDIV
* instructions in the ARM instruction set, even though they actually
* do support them. They also don't indicate support for fused multiply
* instructions even though they actually do support them.
*/
__v7_proc __krait_proc_info, __v7_setup, hwcaps = HWCAP_IDIV | HWCAP_VFPv4
.size __krait_proc_info, . - __krait_proc_info
/*
* Match any ARMv7 processor core.
*/
.type __v7_proc_info, #object
__v7_proc_info:
.long 0x000f0000 @ Required ID value
.long 0x000f0000 @ Mask for ID
__v7_proc __v7_proc_info, __v7_setup
.size __v7_proc_info, . - __v7_proc_info
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,653
|
arch/arm/mm/tlb-v7.S
|
/*
* linux/arch/arm/mm/tlb-v7.S
*
* Copyright (C) 1997-2002 Russell King
* Modified for ARMv7 by Catalin Marinas
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* ARM architecture version 6 TLB handling functions.
* These assume a split I/D TLB.
*/
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/page.h>
#include <asm/tlbflush.h>
#include "proc-macros.S"
/*
* v7wbi_flush_user_tlb_range(start, end, vma)
*
* Invalidate a range of TLB entries in the specified address space.
*
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
* - vma - vma_struct describing address range
*
* It is assumed that:
* - the "Invalidate single entry" instruction will invalidate
* both the I and the D TLBs on Harvard-style TLBs
*/
ENTRY(v7wbi_flush_user_tlb_range)
vma_vm_mm r3, r2 @ get vma->vm_mm
mmid r3, r3 @ get vm_mm->context.id
dsb ish
mov r0, r0, lsr #PAGE_SHIFT @ align address
mov r1, r1, lsr #PAGE_SHIFT
asid r3, r3 @ mask ASID
#ifdef CONFIG_ARM_ERRATA_720789
ALT_SMP(W(mov) r3, #0 )
ALT_UP(W(nop) )
#endif
orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA
mov r1, r1, lsl #PAGE_SHIFT
1:
#ifdef CONFIG_ARM_ERRATA_720789
ALT_SMP(mcr p15, 0, r0, c8, c3, 3) @ TLB invalidate U MVA all ASID (shareable)
#else
ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable)
#endif
ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA
add r0, r0, #PAGE_SZ
cmp r0, r1
blo 1b
dsb ish
ret lr
ENDPROC(v7wbi_flush_user_tlb_range)
/*
* v7wbi_flush_kern_tlb_range(start,end)
*
* Invalidate a range of kernel TLB entries
*
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
*/
ENTRY(v7wbi_flush_kern_tlb_range)
dsb ish
mov r0, r0, lsr #PAGE_SHIFT @ align address
mov r1, r1, lsr #PAGE_SHIFT
mov r0, r0, lsl #PAGE_SHIFT
mov r1, r1, lsl #PAGE_SHIFT
1:
#ifdef CONFIG_ARM_ERRATA_720789
ALT_SMP(mcr p15, 0, r0, c8, c3, 3) @ TLB invalidate U MVA all ASID (shareable)
#else
ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable)
#endif
ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA
add r0, r0, #PAGE_SZ
cmp r0, r1
blo 1b
dsb ish
isb
ret lr
ENDPROC(v7wbi_flush_kern_tlb_range)
__INIT
/* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
define_tlb_functions v7wbi, v7wbi_tlb_flags_up, flags_smp=v7wbi_tlb_flags_smp
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,975
|
arch/arm/mm/proc-arm740.S
|
/*
* linux/arch/arm/mm/arm740.S: utility functions for ARM740
*
* Copyright (C) 2004-2006 Hyok S. Choi (hyok.choi@samsung.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
.text
/*
* cpu_arm740_proc_init()
* cpu_arm740_do_idle()
* cpu_arm740_dcache_clean_area()
* cpu_arm740_switch_mm()
*
* These are not required.
*/
ENTRY(cpu_arm740_proc_init)
ENTRY(cpu_arm740_do_idle)
ENTRY(cpu_arm740_dcache_clean_area)
ENTRY(cpu_arm740_switch_mm)
ret lr
/*
* cpu_arm740_proc_fin()
*/
ENTRY(cpu_arm740_proc_fin)
mrc p15, 0, r0, c1, c0, 0
bic r0, r0, #0x3f000000 @ bank/f/lock/s
bic r0, r0, #0x0000000c @ w-buffer/cache
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
/*
* cpu_arm740_reset(loc)
* Params : r0 = address to jump to
* Notes : This sets up everything for a reset
*/
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm740_reset)
mov ip, #0
mcr p15, 0, ip, c7, c0, 0 @ invalidate cache
mrc p15, 0, ip, c1, c0, 0 @ get ctrl register
bic ip, ip, #0x0000000c @ ............wc..
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0
ENDPROC(cpu_arm740_reset)
.popsection
.type __arm740_setup, #function
__arm740_setup:
mov r0, #0
mcr p15, 0, r0, c7, c0, 0 @ invalidate caches
mcr p15, 0, r0, c6, c3 @ disable area 3~7
mcr p15, 0, r0, c6, c4
mcr p15, 0, r0, c6, c5
mcr p15, 0, r0, c6, c6
mcr p15, 0, r0, c6, c7
mov r0, #0x0000003F @ base = 0, size = 4GB
mcr p15, 0, r0, c6, c0 @ set area 0, default
ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
ldr r3, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB)
mov r4, #10 @ 11 is the minimum (4KB)
1: add r4, r4, #1 @ area size *= 2
movs r3, r3, lsr #1
bne 1b @ count not zero r-shift
orr r0, r0, r4, lsl #1 @ the area register value
orr r0, r0, #1 @ set enable bit
mcr p15, 0, r0, c6, c1 @ set area 1, RAM
ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
ldr r3, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB)
cmp r3, #0
moveq r0, #0
beq 2f
mov r4, #10 @ 11 is the minimum (4KB)
1: add r4, r4, #1 @ area size *= 2
movs r3, r3, lsr #1
bne 1b @ count not zero r-shift
orr r0, r0, r4, lsl #1 @ the area register value
orr r0, r0, #1 @ set enable bit
2: mcr p15, 0, r0, c6, c2 @ set area 2, ROM/FLASH
mov r0, #0x06
mcr p15, 0, r0, c2, c0 @ Region 1&2 cacheable
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
mov r0, #0x00 @ disable whole write buffer
#else
mov r0, #0x02 @ Region 1 write bufferred
#endif
mcr p15, 0, r0, c3, c0
mov r0, #0x10000
sub r0, r0, #1 @ r0 = 0xffff
mcr p15, 0, r0, c5, c0 @ all read/write access
mrc p15, 0, r0, c1, c0 @ get control register
bic r0, r0, #0x3F000000 @ set to standard caching mode
@ need some benchmark
orr r0, r0, #0x0000000d @ MPU/Cache/WB
ret lr
.size __arm740_setup, . - __arm740_setup
__INITDATA
@ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
define_processor_functions arm740, dabort=v4t_late_abort, pabort=legacy_pabort, nommu=1
.section ".rodata"
string cpu_arch_name, "armv4"
string cpu_elf_name, "v4"
string cpu_arm740_name, "ARM740T"
.align
.section ".proc.info.init", #alloc
.type __arm740_proc_info,#object
__arm740_proc_info:
.long 0x41807400
.long 0xfffffff0
.long 0
.long 0
initfn __arm740_setup, __arm740_proc_info
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_26BIT
.long cpu_arm740_name
.long arm740_processor_functions
.long 0
.long 0
.long v4_cache_fns @ cache model
.size __arm740_proc_info, . - __arm740_proc_info
|
AirFortressIlikara/LS2K0300-linux-4.19
| 5,221
|
arch/arm/mm/proc-fa526.S
|
/*
* linux/arch/arm/mm/proc-fa526.S: MMU functions for FA526
*
* Written by : Luke Lee
* Copyright (C) 2005 Faraday Corp.
* Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*
* These are the low level assembler for performing cache and TLB
* functions on the fa526.
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
#define CACHE_DLINESIZE 16
.text
/*
* cpu_fa526_proc_init()
*/
ENTRY(cpu_fa526_proc_init)
ret lr
/*
* cpu_fa526_proc_fin()
*/
ENTRY(cpu_fa526_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
nop
nop
ret lr
/*
* cpu_fa526_reset(loc)
*
* Perform a soft reset of the system. Put the CPU into the
* same state as it would be if it had been reset, and branch
* to what would be the reset vector.
*
* loc: location to jump to for soft reset
*/
.align 4
.pushsection .idmap.text, "ax"
ENTRY(cpu_fa526_reset)
/* TODO: Use CP8 if possible... */
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
#ifdef CONFIG_MMU
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
#endif
mrc p15, 0, ip, c1, c0, 0 @ ctrl register
bic ip, ip, #0x000f @ ............wcam
bic ip, ip, #0x1100 @ ...i...s........
bic ip, ip, #0x0800 @ BTB off
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
nop
nop
ret r0
ENDPROC(cpu_fa526_reset)
.popsection
/*
* cpu_fa526_do_idle()
*/
.align 4
ENTRY(cpu_fa526_do_idle)
ret lr
ENTRY(cpu_fa526_dcache_clean_area)
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
subs r1, r1, #CACHE_DLINESIZE
bhi 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
/* =============================== PageTable ============================== */
/*
* cpu_fa526_switch_mm(pgd)
*
* Set the translation base pointer to be as described by pgd.
*
* pgd: new page tables
*/
.align 4
ENTRY(cpu_fa526_switch_mm)
#ifdef CONFIG_MMU
mov ip, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
#else
mcr p15, 0, ip, c7, c14, 0 @ clean and invalidate whole D cache
#endif
mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcr p15, 0, ip, c7, c5, 6 @ invalidate BTB since mm changed
mcr p15, 0, ip, c7, c10, 4 @ data write barrier
mcr p15, 0, ip, c7, c5, 4 @ prefetch flush
mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
mcr p15, 0, ip, c8, c7, 0 @ invalidate UTLB
#endif
ret lr
/*
* cpu_fa526_set_pte_ext(ptep, pte, ext)
*
* Set a PTE and flush it out
*/
.align 4
ENTRY(cpu_fa526_set_pte_ext)
#ifdef CONFIG_MMU
armv3_set_pte_ext
mov r0, r0
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain WB
#endif
ret lr
.type __fa526_setup, #function
__fa526_setup:
/* On return of this routine, r0 must carry correct flags for CFG register */
mov r0, #0
mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4
#ifdef CONFIG_MMU
mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4
#endif
mcr p15, 0, r0, c7, c5, 5 @ invalidate IScratchpad RAM
mov r0, #1
mcr p15, 0, r0, c1, c1, 0 @ turn-on ECR
mov r0, #0
mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB All
mcr p15, 0, r0, c7, c10, 4 @ data write barrier
mcr p15, 0, r0, c7, c5, 4 @ prefetch flush
mov r0, #0x1f @ Domains 0, 1 = manager, 2 = client
mcr p15, 0, r0, c3, c0 @ load domain access register
mrc p15, 0, r0, c1, c0 @ get control register v4
ldr r5, fa526_cr1_clear
bic r0, r0, r5
ldr r5, fa526_cr1_set
orr r0, r0, r5
ret lr
.size __fa526_setup, . - __fa526_setup
/*
* .RVI ZFRS BLDP WCAM
* ..11 1001 .111 1101
*
*/
.type fa526_cr1_clear, #object
.type fa526_cr1_set, #object
fa526_cr1_clear:
.word 0x3f3f
fa526_cr1_set:
.word 0x397D
__INITDATA
@ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
define_processor_functions fa526, dabort=v4_early_abort, pabort=legacy_pabort
.section ".rodata"
string cpu_arch_name, "armv4"
string cpu_elf_name, "v4"
string cpu_fa526_name, "FA526"
.align
.section ".proc.info.init", #alloc
.type __fa526_proc_info,#object
__fa526_proc_info:
.long 0x66015261
.long 0xff01fff1
.long PMD_TYPE_SECT | \
PMD_SECT_BUFFERABLE | \
PMD_SECT_CACHEABLE | \
PMD_BIT4 | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
.long PMD_TYPE_SECT | \
PMD_BIT4 | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
initfn __fa526_setup, __fa526_proc_info
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF
.long cpu_fa526_name
.long fa526_processor_functions
.long fa_tlb_fns
.long fa_user_fns
.long fa_cache_fns
.size __fa526_proc_info, . - __fa526_proc_info
|
AirFortressIlikara/LS2K0300-linux-4.19
| 11,963
|
arch/arm/mm/proc-arm1020e.S
|
/*
* linux/arch/arm/mm/proc-arm1020e.S: MMU functions for ARM1020
*
* Copyright (C) 2000 ARM Limited
* Copyright (C) 2000 Deep Blue Solutions Ltd.
* hacked for non-paged-MM by Hyok S. Choi, 2003.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*
* These are the low level assembler for performing cache and TLB
* functions on the arm1020e.
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
/*
* This is the maximum size of an area which will be invalidated
* using the single invalidate entry instructions. Anything larger
* than this, and we go for the whole cache.
*
* This value should be chosen such that we choose the cheapest
* alternative.
*/
#define MAX_AREA_SIZE 32768
/*
* The size of one data cache line.
*/
#define CACHE_DLINESIZE 32
/*
* The number of data cache segments.
*/
#define CACHE_DSEGMENTS 16
/*
* The number of lines in a cache segment.
*/
#define CACHE_DENTRIES 64
/*
* This is the size at which it becomes more efficient to
* clean the whole cache, rather than using the individual
* cache line maintenance instructions.
*/
#define CACHE_DLIMIT 32768
.text
/*
* cpu_arm1020e_proc_init()
*/
ENTRY(cpu_arm1020e_proc_init)
ret lr
/*
* cpu_arm1020e_proc_fin()
*/
ENTRY(cpu_arm1020e_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
/*
* cpu_arm1020e_reset(loc)
*
* Perform a soft reset of the system. Put the CPU into the
* same state as it would be if it had been reset, and branch
* to what would be the reset vector.
*
* loc: location to jump to for soft reset
*/
.align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm1020e_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
#ifdef CONFIG_MMU
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
#endif
mrc p15, 0, ip, c1, c0, 0 @ ctrl register
bic ip, ip, #0x000f @ ............wcam
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0
ENDPROC(cpu_arm1020e_reset)
.popsection
/*
* cpu_arm1020e_do_idle()
*/
.align 5
ENTRY(cpu_arm1020e_do_idle)
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
ret lr
/* ================================= CACHE ================================ */
.align 5
/*
* flush_icache_all()
*
* Unconditionally clean and invalidate the entire icache.
*/
ENTRY(arm1020e_flush_icache_all)
#ifndef CONFIG_CPU_ICACHE_DISABLE
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
#endif
ret lr
ENDPROC(arm1020e_flush_icache_all)
/*
* flush_user_cache_all()
*
* Invalidate all cache entries in a particular address
* space.
*/
ENTRY(arm1020e_flush_user_cache_all)
/* FALLTHROUGH */
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
ENTRY(arm1020e_flush_kern_cache_all)
mov r2, #VM_EXEC
mov ip, #0
__flush_whole_cache:
#ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments
1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index
subs r3, r3, #1 << 26
bcs 2b @ entries 63 to 0
subs r1, r1, #1 << 5
bcs 1b @ segments 15 to 0
#endif
tst r2, #VM_EXEC
#ifndef CONFIG_CPU_ICACHE_DISABLE
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
#endif
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
/*
* flush_user_cache_range(start, end, flags)
*
* Invalidate a range of cache entries in the specified
* address space.
*
* - start - start address (inclusive)
* - end - end address (exclusive)
* - flags - vm_flags for this space
*/
ENTRY(arm1020e_flush_user_cache_range)
mov ip, #0
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT
bhs __flush_whole_cache
#ifndef CONFIG_CPU_DCACHE_DISABLE
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
#endif
tst r2, #VM_EXEC
#ifndef CONFIG_CPU_ICACHE_DISABLE
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
#endif
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
/*
* coherent_kern_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(arm1020e_coherent_kern_range)
/* FALLTHROUGH */
/*
* coherent_user_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(arm1020e_coherent_user_range)
mov ip, #0
bic r0, r0, #CACHE_DLINESIZE - 1
1:
#ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
#endif
#ifndef CONFIG_CPU_ICACHE_DISABLE
mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
#endif
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov r0, #0
ret lr
/*
* flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
* - addr - kernel address
* - size - region size
*/
ENTRY(arm1020e_flush_kern_dcache_area)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
#endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
/*
* dma_inv_range(start, end)
*
* Invalidate (discard) the specified virtual address range.
* May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
*
* - start - virtual start address
* - end - virtual end address
*
* (same as v4wb)
*/
arm1020e_dma_inv_range:
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
tst r0, #CACHE_DLINESIZE - 1
bic r0, r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
tst r1, #CACHE_DLINESIZE - 1
mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
#endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
/*
* dma_clean_range(start, end)
*
* Clean the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*
* (same as v4wb)
*/
arm1020e_dma_clean_range:
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
#endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
/*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(arm1020e_dma_flush_range)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
#endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
/*
* dma_map_area(start, size, dir)
* - start - kernel virtual start address
* - size - size of region
* - dir - DMA direction
*/
ENTRY(arm1020e_dma_map_area)
add r1, r1, r0
cmp r2, #DMA_TO_DEVICE
beq arm1020e_dma_clean_range
bcs arm1020e_dma_inv_range
b arm1020e_dma_flush_range
ENDPROC(arm1020e_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
* - start - kernel virtual start address
* - size - size of region
* - dir - DMA direction
*/
ENTRY(arm1020e_dma_unmap_area)
ret lr
ENDPROC(arm1020e_dma_unmap_area)
.globl arm1020e_flush_kern_cache_louis
.equ arm1020e_flush_kern_cache_louis, arm1020e_flush_kern_cache_all
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions arm1020e
.align 5
ENTRY(cpu_arm1020e_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_DISABLE
mov ip, #0
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
subs r1, r1, #CACHE_DLINESIZE
bhi 1b
#endif
ret lr
/* =============================== PageTable ============================== */
/*
* cpu_arm1020e_switch_mm(pgd)
*
* Set the translation base pointer to be as described by pgd.
*
* pgd: new page tables
*/
.align 5
ENTRY(cpu_arm1020e_switch_mm)
#ifdef CONFIG_MMU
#ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, r3, c7, c10, 4
mov r1, #0xF @ 16 segments
1: mov r3, #0x3F @ 64 entries
2: mov ip, r3, LSL #26 @ shift up entry
orr ip, ip, r1, LSL #5 @ shift in/up index
mcr p15, 0, ip, c7, c14, 2 @ Clean & Inval DCache entry
mov ip, #0
subs r3, r3, #1
cmp r3, #0
bge 2b @ entries 3F to 0
subs r1, r1, #1
cmp r1, #0
bge 1b @ segments 15 to 0
#endif
mov r1, #0
#ifndef CONFIG_CPU_ICACHE_DISABLE
mcr p15, 0, r1, c7, c5, 0 @ invalidate I cache
#endif
mcr p15, 0, r1, c7, c10, 4 @ drain WB
mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
#endif
ret lr
/*
* cpu_arm1020e_set_pte(ptep, pte)
*
* Set a PTE and flush it out
*/
.align 5
ENTRY(cpu_arm1020e_set_pte_ext)
#ifdef CONFIG_MMU
armv3_set_pte_ext
mov r0, r0
#ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
#endif
#endif /* CONFIG_MMU */
ret lr
.type __arm1020e_setup, #function
__arm1020e_setup:
mov r0, #0
mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4
#ifdef CONFIG_MMU
mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4
#endif
adr r5, arm1020e_crval
ldmia r5, {r5, r6}
mrc p15, 0, r0, c1, c0 @ get control register v4
bic r0, r0, r5
orr r0, r0, r6
#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
orr r0, r0, #0x4000 @ .R.. .... .... ....
#endif
ret lr
.size __arm1020e_setup, . - __arm1020e_setup
/*
* R
* .RVI ZFRS BLDP WCAM
* .011 1001 ..11 0101
*/
.type arm1020e_crval, #object
arm1020e_crval:
crval clear=0x00007f3f, mmuset=0x00003935, ucset=0x00001930
__INITDATA
@ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
define_processor_functions arm1020e, dabort=v4t_early_abort, pabort=legacy_pabort
.section ".rodata"
string cpu_arch_name, "armv5te"
string cpu_elf_name, "v5"
string cpu_arm1020e_name, "ARM1020E"
.align
.section ".proc.info.init", #alloc
.type __arm1020e_proc_info,#object
__arm1020e_proc_info:
.long 0x4105a200 @ ARM 1020TE (Architecture v5TE)
.long 0xff0ffff0
.long PMD_TYPE_SECT | \
PMD_BIT4 | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
.long PMD_TYPE_SECT | \
PMD_BIT4 | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
initfn __arm1020e_setup, __arm1020e_proc_info
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_EDSP
.long cpu_arm1020e_name
.long arm1020e_processor_functions
.long v4wbi_tlb_fns
.long v4wb_user_fns
.long arm1020e_cache_fns
.size __arm1020e_proc_info, . - __arm1020e_proc_info
|
AirFortressIlikara/LS2K0300-linux-4.19
| 4,447
|
arch/arm/mm/proc-v7-3level.S
|
/*
* arch/arm/mm/proc-v7-3level.S
*
* Copyright (C) 2001 Deep Blue Solutions Ltd.
* Copyright (C) 2011 ARM Ltd.
* Author: Catalin Marinas <catalin.marinas@arm.com>
* based on arch/arm/mm/proc-v7-2level.S
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <asm/assembler.h>
#define TTB_IRGN_NC (0 << 8)
#define TTB_IRGN_WBWA (1 << 8)
#define TTB_IRGN_WT (2 << 8)
#define TTB_IRGN_WB (3 << 8)
#define TTB_RGN_NC (0 << 10)
#define TTB_RGN_OC_WBWA (1 << 10)
#define TTB_RGN_OC_WT (2 << 10)
#define TTB_RGN_OC_WB (3 << 10)
#define TTB_S (3 << 12)
#define TTB_EAE (1 << 31)
/* PTWs cacheable, inner WB not shareable, outer WB not shareable */
#define TTB_FLAGS_UP (TTB_IRGN_WB|TTB_RGN_OC_WB)
#define PMD_FLAGS_UP (PMD_SECT_WB)
/* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */
#define TTB_FLAGS_SMP (TTB_IRGN_WBWA|TTB_S|TTB_RGN_OC_WBWA)
#define PMD_FLAGS_SMP (PMD_SECT_WBWA|PMD_SECT_S)
#ifndef __ARMEB__
# define rpgdl r0
# define rpgdh r1
#else
# define rpgdl r1
# define rpgdh r0
#endif
/*
* cpu_v7_switch_mm(pgd_phys, tsk)
*
* Set the translation table base pointer to be pgd_phys (physical address of
* the new TTB).
*/
ENTRY(cpu_v7_switch_mm)
#ifdef CONFIG_MMU
mmid r2, r2
asid r2, r2
orr rpgdh, rpgdh, r2, lsl #(48 - 32) @ upper 32-bits of pgd
mcrr p15, 0, rpgdl, rpgdh, c2 @ set TTB 0
isb
#endif
ret lr
ENDPROC(cpu_v7_switch_mm)
#ifdef __ARMEB__
#define rl r3
#define rh r2
#else
#define rl r2
#define rh r3
#endif
/*
* cpu_v7_set_pte_ext(ptep, pte)
*
* Set a level 2 translation table entry.
* - ptep - pointer to level 3 translation table entry
* - pte - PTE value to store (64-bit in r2 and r3)
*/
ENTRY(cpu_v7_set_pte_ext)
#ifdef CONFIG_MMU
tst rl, #L_PTE_VALID
beq 1f
tst rh, #1 << (57 - 32) @ L_PTE_NONE
bicne rl, #L_PTE_VALID
bne 1f
eor ip, rh, #1 << (55 - 32) @ toggle L_PTE_DIRTY in temp reg to
@ test for !L_PTE_DIRTY || L_PTE_RDONLY
tst ip, #1 << (55 - 32) | 1 << (58 - 32)
orrne rl, #PTE_AP2
biceq rl, #PTE_AP2
1: strd r2, r3, [r0]
ALT_SMP(W(nop))
ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
#endif
ret lr
ENDPROC(cpu_v7_set_pte_ext)
/*
* Memory region attributes for LPAE (defined in pgtable-3level.h):
*
* n = AttrIndx[2:0]
*
* n MAIR
* UNCACHED 000 00000000
* BUFFERABLE 001 01000100
* DEV_WC 001 01000100
* WRITETHROUGH 010 10101010
* WRITEBACK 011 11101110
* DEV_CACHED 011 11101110
* DEV_SHARED 100 00000100
* DEV_NONSHARED 100 00000100
* unused 101
* unused 110
* WRITEALLOC 111 11111111
*/
.equ PRRR, 0xeeaa4400 @ MAIR0
.equ NMRR, 0xff000004 @ MAIR1
/*
* Macro for setting up the TTBRx and TTBCR registers.
* - \ttbr1 updated.
*/
.macro v7_ttb_setup, zero, ttbr0l, ttbr0h, ttbr1, tmp
ldr \tmp, =swapper_pg_dir @ swapper_pg_dir virtual address
cmp \ttbr1, \tmp, lsr #12 @ PHYS_OFFSET > PAGE_OFFSET?
mov \tmp, #TTB_EAE @ for TTB control egister
ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP)
ALT_UP(orr \tmp, \tmp, #TTB_FLAGS_UP)
ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP << 16)
ALT_UP(orr \tmp, \tmp, #TTB_FLAGS_UP << 16)
/*
* Only use split TTBRs if PHYS_OFFSET <= PAGE_OFFSET (cmp above),
* otherwise booting secondary CPUs would end up using TTBR1 for the
* identity mapping set up in TTBR0.
*/
orrls \tmp, \tmp, #TTBR1_SIZE @ TTBCR.T1SZ
mcr p15, 0, \tmp, c2, c0, 2 @ TTBCR
mov \tmp, \ttbr1, lsr #20
mov \ttbr1, \ttbr1, lsl #12
addls \ttbr1, \ttbr1, #TTBR1_OFFSET
mcrr p15, 1, \ttbr1, \tmp, c2 @ load TTBR1
.endm
/*
* AT
* TFR EV X F IHD LR S
* .EEE ..EE PUI. .TAT 4RVI ZWRS BLDP WCAM
* rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced
* 11 0 110 0 0011 1100 .111 1101 < we want
*/
.align 2
.type v7_crval, #object
v7_crval:
crval clear=0x0122c302, mmuset=0x30c03c7d, ucset=0x00c01c7c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.