repo_id
stringlengths
5
115
size
int64
590
5.01M
file_path
stringlengths
4
212
content
stringlengths
590
5.01M
AirFortressIlikara/LS2K0300-linux-4.19
14,364
arch/arm/mm/proc-xsc3.S
/* * linux/arch/arm/mm/proc-xsc3.S * * Original Author: Matthew Gilbert * Current Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> * * Copyright 2004 (C) Intel Corp. * Copyright 2005 (C) MontaVista Software, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * MMU functions for the Intel XScale3 Core (XSC3). The XSC3 core is * an extension to Intel's original XScale core that adds the following * features: * * - ARMv6 Supersections * - Low Locality Reference pages (replaces mini-cache) * - 36-bit addressing * - L2 cache * - Cache coherency if chipset supports it * * Based on original XScale code by Nicolas Pitre. */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> #include <asm/hwcap.h> #include <asm/pgtable.h> #include <asm/pgtable-hwdef.h> #include <asm/page.h> #include <asm/ptrace.h> #include "proc-macros.S" /* * This is the maximum size of an area which will be flushed. If the * area is larger than this, then we flush the whole cache. */ #define MAX_AREA_SIZE 32768 /* * The cache line size of the L1 I, L1 D and unified L2 cache. */ #define CACHELINESIZE 32 /* * The size of the L1 D cache. */ #define CACHESIZE 32768 /* * This macro is used to wait for a CP15 write and is needed when we * have to ensure that the last operation to the coprocessor was * completed before continuing with operation. */ .macro cpwait_ret, lr, rd mrc p15, 0, \rd, c2, c0, 0 @ arbitrary read of cp15 sub pc, \lr, \rd, LSR #32 @ wait for completion and @ flush instruction pipeline .endm /* * This macro cleans and invalidates the entire L1 D cache. */ .macro clean_d_cache rd, rs mov \rd, #0x1f00 orr \rd, \rd, #0x00e0 1: mcr p15, 0, \rd, c7, c14, 2 @ clean/invalidate L1 D line adds \rd, \rd, #0x40000000 bcc 1b subs \rd, \rd, #0x20 bpl 1b .endm .text /* * cpu_xsc3_proc_init() * * Nothing too exciting at the moment */ ENTRY(cpu_xsc3_proc_init) ret lr /* * cpu_xsc3_proc_fin() */ ENTRY(cpu_xsc3_proc_fin) mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1800 @ ...IZ........... bic r0, r0, #0x0006 @ .............CA. mcr p15, 0, r0, c1, c0, 0 @ disable caches ret lr /* * cpu_xsc3_reset(loc) * * Perform a soft reset of the system. Put the CPU into the * same state as it would be if it had been reset, and branch * to what would be the reset vector. * * loc: location to jump to for soft reset */ .align 5 .pushsection .idmap.text, "ax" ENTRY(cpu_xsc3_reset) mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE msr cpsr_c, r1 @ reset CPSR mrc p15, 0, r1, c1, c0, 0 @ ctrl register bic r1, r1, #0x3900 @ ..VIZ..S........ bic r1, r1, #0x0086 @ ........B....CA. mcr p15, 0, r1, c1, c0, 0 @ ctrl register mcr p15, 0, ip, c7, c7, 0 @ invalidate L1 caches and BTB bic r1, r1, #0x0001 @ ...............M mcr p15, 0, r1, c1, c0, 0 @ ctrl register @ CAUTION: MMU turned off from this point. We count on the pipeline @ already containing those two last instructions to survive. mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs ret r0 ENDPROC(cpu_xsc3_reset) .popsection /* * cpu_xsc3_do_idle() * * Cause the processor to idle * * For now we do nothing but go to idle mode for every case * * XScale supports clock switching, but using idle mode support * allows external hardware to react to system state changes. */ .align 5 ENTRY(cpu_xsc3_do_idle) mov r0, #1 mcr p14, 0, r0, c7, c0, 0 @ go to idle ret lr /* ================================= CACHE ================================ */ /* * flush_icache_all() * * Unconditionally clean and invalidate the entire icache. */ ENTRY(xsc3_flush_icache_all) mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache ret lr ENDPROC(xsc3_flush_icache_all) /* * flush_user_cache_all() * * Invalidate all cache entries in a particular address * space. */ ENTRY(xsc3_flush_user_cache_all) /* FALLTHROUGH */ /* * flush_kern_cache_all() * * Clean and invalidate the entire cache. */ ENTRY(xsc3_flush_kern_cache_all) mov r2, #VM_EXEC mov ip, #0 __flush_whole_cache: clean_d_cache r0, r1 tst r2, #VM_EXEC mcrne p15, 0, ip, c7, c5, 0 @ invalidate L1 I cache and BTB mcrne p15, 0, ip, c7, c10, 4 @ data write barrier mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush ret lr /* * flush_user_cache_range(start, end, vm_flags) * * Invalidate a range of cache entries in the specified * address space. * * - start - start address (may not be aligned) * - end - end address (exclusive, may not be aligned) * - vma - vma_area_struct describing address space */ .align 5 ENTRY(xsc3_flush_user_cache_range) mov ip, #0 sub r3, r1, r0 @ calculate total size cmp r3, #MAX_AREA_SIZE bhs __flush_whole_cache 1: tst r2, #VM_EXEC mcrne p15, 0, r0, c7, c5, 1 @ invalidate L1 I line mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line add r0, r0, #CACHELINESIZE cmp r0, r1 blo 1b tst r2, #VM_EXEC mcrne p15, 0, ip, c7, c5, 6 @ invalidate BTB mcrne p15, 0, ip, c7, c10, 4 @ data write barrier mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush ret lr /* * coherent_kern_range(start, end) * * Ensure coherency between the I cache and the D cache in the * region described by start. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address * * Note: single I-cache line invalidation isn't used here since * it also trashes the mini I-cache used by JTAG debuggers. */ ENTRY(xsc3_coherent_kern_range) /* FALLTHROUGH */ ENTRY(xsc3_coherent_user_range) bic r0, r0, #CACHELINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line add r0, r0, #CACHELINESIZE cmp r0, r1 blo 1b mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate L1 I cache and BTB mcr p15, 0, r0, c7, c10, 4 @ data write barrier mcr p15, 0, r0, c7, c5, 4 @ prefetch flush ret lr /* * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache. * * - addr - kernel address * - size - region size */ ENTRY(xsc3_flush_kern_dcache_area) add r1, r0, r1 1: mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line add r0, r0, #CACHELINESIZE cmp r0, r1 blo 1b mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate L1 I cache and BTB mcr p15, 0, r0, c7, c10, 4 @ data write barrier mcr p15, 0, r0, c7, c5, 4 @ prefetch flush ret lr /* * dma_inv_range(start, end) * * Invalidate (discard) the specified virtual address range. * May not write back any entries. If 'start' or 'end' * are not cache line aligned, those lines must be written * back. * * - start - virtual start address * - end - virtual end address */ xsc3_dma_inv_range: tst r0, #CACHELINESIZE - 1 bic r0, r0, #CACHELINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean L1 D line tst r1, #CACHELINESIZE - 1 mcrne p15, 0, r1, c7, c10, 1 @ clean L1 D line 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate L1 D line add r0, r0, #CACHELINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ data write barrier ret lr /* * dma_clean_range(start, end) * * Clean the specified virtual address range. * * - start - virtual start address * - end - virtual end address */ xsc3_dma_clean_range: bic r0, r0, #CACHELINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line add r0, r0, #CACHELINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ data write barrier ret lr /* * dma_flush_range(start, end) * * Clean and invalidate the specified virtual address range. * * - start - virtual start address * - end - virtual end address */ ENTRY(xsc3_dma_flush_range) bic r0, r0, #CACHELINESIZE - 1 1: mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line add r0, r0, #CACHELINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ data write barrier ret lr /* * dma_map_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(xsc3_dma_map_area) add r1, r1, r0 cmp r2, #DMA_TO_DEVICE beq xsc3_dma_clean_range bcs xsc3_dma_inv_range b xsc3_dma_flush_range ENDPROC(xsc3_dma_map_area) /* * dma_unmap_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(xsc3_dma_unmap_area) ret lr ENDPROC(xsc3_dma_unmap_area) .globl xsc3_flush_kern_cache_louis .equ xsc3_flush_kern_cache_louis, xsc3_flush_kern_cache_all @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) define_cache_functions xsc3 ENTRY(cpu_xsc3_dcache_clean_area) 1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line add r0, r0, #CACHELINESIZE subs r1, r1, #CACHELINESIZE bhi 1b ret lr /* =============================== PageTable ============================== */ /* * cpu_xsc3_switch_mm(pgd) * * Set the translation base pointer to be as described by pgd. * * pgd: new page tables */ .align 5 ENTRY(cpu_xsc3_switch_mm) clean_d_cache r1, r2 mcr p15, 0, ip, c7, c5, 0 @ invalidate L1 I cache and BTB mcr p15, 0, ip, c7, c10, 4 @ data write barrier mcr p15, 0, ip, c7, c5, 4 @ prefetch flush orr r0, r0, #0x18 @ cache the page table in L2 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs cpwait_ret lr, ip /* * cpu_xsc3_set_pte_ext(ptep, pte, ext) * * Set a PTE and flush it out */ cpu_xsc3_mt_table: .long 0x00 @ L_PTE_MT_UNCACHED .long PTE_EXT_TEX(1) @ L_PTE_MT_BUFFERABLE .long PTE_EXT_TEX(5) | PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK .long PTE_EXT_TEX(1) | PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED .long 0x00 @ unused .long 0x00 @ L_PTE_MT_MINICACHE (not present) .long PTE_EXT_TEX(5) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC (not present?) .long 0x00 @ unused .long PTE_EXT_TEX(1) @ L_PTE_MT_DEV_WC .long 0x00 @ unused .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_DEV_CACHED .long PTE_EXT_TEX(2) @ L_PTE_MT_DEV_NONSHARED .long 0x00 @ unused .long 0x00 @ unused .long 0x00 @ unused .align 5 ENTRY(cpu_xsc3_set_pte_ext) xscale_set_pte_ext_prologue tst r1, #L_PTE_SHARED @ shared? and r1, r1, #L_PTE_MT_MASK adr ip, cpu_xsc3_mt_table ldr ip, [ip, r1] orrne r2, r2, #PTE_EXT_COHERENT @ interlock: mask in coherent bit bic r2, r2, #0x0c @ clear old C,B bits orr r2, r2, ip xscale_set_pte_ext_epilogue ret lr .ltorg .align .globl cpu_xsc3_suspend_size .equ cpu_xsc3_suspend_size, 4 * 6 #ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_xsc3_do_suspend) stmfd sp!, {r4 - r9, lr} mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode mrc p15, 0, r5, c15, c1, 0 @ CP access reg mrc p15, 0, r6, c13, c0, 0 @ PID mrc p15, 0, r7, c3, c0, 0 @ domain ID mrc p15, 0, r8, c1, c0, 1 @ auxiliary control reg mrc p15, 0, r9, c1, c0, 0 @ control reg bic r4, r4, #2 @ clear frequency change bit stmia r0, {r4 - r9} @ store cp regs ldmia sp!, {r4 - r9, pc} ENDPROC(cpu_xsc3_do_suspend) ENTRY(cpu_xsc3_do_resume) ldmia r0, {r4 - r9} @ load cp regs mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB mcr p15, 0, ip, c7, c10, 4 @ drain write (&fill) buffer mcr p15, 0, ip, c7, c5, 4 @ flush prefetch buffer mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs mcr p14, 0, r4, c6, c0, 0 @ clock configuration, turbo mode. mcr p15, 0, r5, c15, c1, 0 @ CP access reg mcr p15, 0, r6, c13, c0, 0 @ PID mcr p15, 0, r7, c3, c0, 0 @ domain ID orr r1, r1, #0x18 @ cache the page table in L2 mcr p15, 0, r1, c2, c0, 0 @ translation table base addr mcr p15, 0, r8, c1, c0, 1 @ auxiliary control reg mov r0, r9 @ control register b cpu_resume_mmu ENDPROC(cpu_xsc3_do_resume) #endif .type __xsc3_setup, #function __xsc3_setup: mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE msr cpsr_c, r0 mcr p15, 0, ip, c7, c7, 0 @ invalidate L1 caches and BTB mcr p15, 0, ip, c7, c10, 4 @ data write barrier mcr p15, 0, ip, c7, c5, 4 @ prefetch flush mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs orr r4, r4, #0x18 @ cache the page table in L2 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer mov r0, #1 << 6 @ cp6 access for early sched_clock mcr p15, 0, r0, c15, c1, 0 @ write CP access register mrc p15, 0, r0, c1, c0, 1 @ get auxiliary control reg and r0, r0, #2 @ preserve bit P bit setting orr r0, r0, #(1 << 10) @ enable L2 for LLR cache mcr p15, 0, r0, c1, c0, 1 @ set auxiliary control reg adr r5, xsc3_crval ldmia r5, {r5, r6} #ifdef CONFIG_CACHE_XSC3L2 mrc p15, 1, r0, c0, c0, 1 @ get L2 present information ands r0, r0, #0xf8 orrne r6, r6, #(1 << 26) @ enable L2 if present #endif mrc p15, 0, r0, c1, c0, 0 @ get control register bic r0, r0, r5 @ ..V. ..R. .... ..A. orr r0, r0, r6 @ ..VI Z..S .... .C.M (mmu) @ ...I Z..S .... .... (uc) ret lr .size __xsc3_setup, . - __xsc3_setup .type xsc3_crval, #object xsc3_crval: crval clear=0x04002202, mmuset=0x00003905, ucset=0x00001900 __INITDATA @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) define_processor_functions xsc3, dabort=v5t_early_abort, pabort=legacy_pabort, suspend=1 .section ".rodata" string cpu_arch_name, "armv5te" string cpu_elf_name, "v5" string cpu_xsc3_name, "XScale-V3 based processor" .align .section ".proc.info.init", #alloc .macro xsc3_proc_info name:req, cpu_val:req, cpu_mask:req .type __\name\()_proc_info,#object __\name\()_proc_info: .long \cpu_val .long \cpu_mask .long PMD_TYPE_SECT | \ PMD_SECT_BUFFERABLE | \ PMD_SECT_CACHEABLE | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ .long PMD_TYPE_SECT | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ initfn __xsc3_setup, __\name\()_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP .long cpu_xsc3_name .long xsc3_processor_functions .long v4wbi_tlb_fns .long xsc3_mc_user_fns .long xsc3_cache_fns .size __\name\()_proc_info, . - __\name\()_proc_info .endm xsc3_proc_info xsc3, 0x69056000, 0xffffe000 /* Note: PXA935 changed its implementor ID from Intel to Marvell */ xsc3_proc_info xsc3_pxa935, 0x56056000, 0xffffe000
AirFortressIlikara/LS2K0300-linux-4.19
3,196
arch/arm/mm/proc-arm7tdmi.S
/* * linux/arch/arm/mm/proc-arm7tdmi.S: utility functions for ARM7TDMI * * Copyright (C) 2003-2006 Hyok S. Choi <hyok.choi@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> #include <asm/asm-offsets.h> #include <asm/hwcap.h> #include <asm/pgtable-hwdef.h> #include <asm/pgtable.h> #include <asm/ptrace.h> #include "proc-macros.S" .text /* * cpu_arm7tdmi_proc_init() * cpu_arm7tdmi_do_idle() * cpu_arm7tdmi_dcache_clean_area() * cpu_arm7tdmi_switch_mm() * * These are not required. */ ENTRY(cpu_arm7tdmi_proc_init) ENTRY(cpu_arm7tdmi_do_idle) ENTRY(cpu_arm7tdmi_dcache_clean_area) ENTRY(cpu_arm7tdmi_switch_mm) ret lr /* * cpu_arm7tdmi_proc_fin() */ ENTRY(cpu_arm7tdmi_proc_fin) ret lr /* * Function: cpu_arm7tdmi_reset(loc) * Params : loc(r0) address to jump to * Purpose : Sets up everything for a reset and jump to the location for soft reset. */ .pushsection .idmap.text, "ax" ENTRY(cpu_arm7tdmi_reset) ret r0 ENDPROC(cpu_arm7tdmi_reset) .popsection .type __arm7tdmi_setup, #function __arm7tdmi_setup: ret lr .size __arm7tdmi_setup, . - __arm7tdmi_setup __INITDATA @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) define_processor_functions arm7tdmi, dabort=v4t_late_abort, pabort=legacy_pabort, nommu=1 .section ".rodata" string cpu_arch_name, "armv4t" string cpu_elf_name, "v4" string cpu_arm7tdmi_name, "ARM7TDMI" string cpu_triscenda7_name, "Triscend-A7x" string cpu_at91_name, "Atmel-AT91M40xxx" string cpu_s3c3410_name, "Samsung-S3C3410" string cpu_s3c44b0x_name, "Samsung-S3C44B0x" string cpu_s3c4510b_name, "Samsung-S3C4510B" string cpu_s3c4530_name, "Samsung-S3C4530" string cpu_netarm_name, "NETARM" .align .section ".proc.info.init", #alloc .macro arm7tdmi_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, \ extra_hwcaps=0 .type __\name\()_proc_info, #object __\name\()_proc_info: .long \cpu_val .long \cpu_mask .long 0 .long 0 initfn __arm7tdmi_setup, __\name\()_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP | HWCAP_26BIT | ( \extra_hwcaps ) .long \cpu_name .long arm7tdmi_processor_functions .long 0 .long 0 .long v4_cache_fns .size __\name\()_proc_info, . - __\name\()_proc_info .endm arm7tdmi_proc_info arm7tdmi, 0x41007700, 0xfff8ff00, \ cpu_arm7tdmi_name arm7tdmi_proc_info triscenda7, 0x0001d2ff, 0x0001ffff, \ cpu_triscenda7_name, extra_hwcaps=HWCAP_THUMB arm7tdmi_proc_info at91, 0x14000040, 0xfff000e0, \ cpu_at91_name, extra_hwcaps=HWCAP_THUMB arm7tdmi_proc_info s3c4510b, 0x36365000, 0xfffff000, \ cpu_s3c4510b_name, extra_hwcaps=HWCAP_THUMB arm7tdmi_proc_info s3c4530, 0x4c000000, 0xfff000e0, \ cpu_s3c4530_name, extra_hwcaps=HWCAP_THUMB arm7tdmi_proc_info s3c3410, 0x34100000, 0xffff0000, \ cpu_s3c3410_name, extra_hwcaps=HWCAP_THUMB arm7tdmi_proc_info s3c44b0x, 0x44b00000, 0xffff0000, \ cpu_s3c44b0x_name, extra_hwcaps=HWCAP_THUMB
AirFortressIlikara/LS2K0300-linux-4.19
12,794
arch/arm/mm/proc-arm926.S
/* * linux/arch/arm/mm/proc-arm926.S: MMU functions for ARM926EJ-S * * Copyright (C) 1999-2001 ARM Limited * Copyright (C) 2000 Deep Blue Solutions Ltd. * hacked for non-paged-MM by Hyok S. Choi, 2003. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * * These are the low level assembler for performing cache and TLB * functions on the arm926. * * CONFIG_CPU_ARM926_CPU_IDLE -> nohlt */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> #include <asm/hwcap.h> #include <asm/pgtable-hwdef.h> #include <asm/pgtable.h> #include <asm/page.h> #include <asm/ptrace.h> #include "proc-macros.S" /* * This is the maximum size of an area which will be invalidated * using the single invalidate entry instructions. Anything larger * than this, and we go for the whole cache. * * This value should be chosen such that we choose the cheapest * alternative. */ #define CACHE_DLIMIT 16384 /* * the cache line size of the I and D cache */ #define CACHE_DLINESIZE 32 .text /* * cpu_arm926_proc_init() */ ENTRY(cpu_arm926_proc_init) ret lr /* * cpu_arm926_proc_fin() */ ENTRY(cpu_arm926_proc_fin) mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches ret lr /* * cpu_arm926_reset(loc) * * Perform a soft reset of the system. Put the CPU into the * same state as it would be if it had been reset, and branch * to what would be the reset vector. * * loc: location to jump to for soft reset */ .align 5 .pushsection .idmap.text, "ax" ENTRY(cpu_arm926_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c10, 4 @ drain WB #ifdef CONFIG_MMU mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs #endif mrc p15, 0, ip, c1, c0, 0 @ ctrl register bic ip, ip, #0x000f @ ............wcam bic ip, ip, #0x1100 @ ...i...s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register ret r0 ENDPROC(cpu_arm926_reset) .popsection /* * cpu_arm926_do_idle() * * Called with IRQs disabled */ .align 10 ENTRY(cpu_arm926_do_idle) mov r0, #0 mrc p15, 0, r1, c1, c0, 0 @ Read control register mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer bic r2, r1, #1 << 12 mrs r3, cpsr @ Disable FIQs while Icache orr ip, r3, #PSR_F_BIT @ is disabled msr cpsr_c, ip mcr p15, 0, r2, c1, c0, 0 @ Disable I cache mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt mcr p15, 0, r1, c1, c0, 0 @ Restore ICache enable msr cpsr_c, r3 @ Restore FIQ state ret lr /* * flush_icache_all() * * Unconditionally clean and invalidate the entire icache. */ ENTRY(arm926_flush_icache_all) mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache ret lr ENDPROC(arm926_flush_icache_all) /* * flush_user_cache_all() * * Clean and invalidate all cache entries in a particular * address space. */ ENTRY(arm926_flush_user_cache_all) /* FALLTHROUGH */ /* * flush_kern_cache_all() * * Clean and invalidate the entire cache. */ ENTRY(arm926_flush_kern_cache_all) mov r2, #VM_EXEC mov ip, #0 __flush_whole_cache: #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache #else 1: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate bne 1b #endif tst r2, #VM_EXEC mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache mcrne p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * flush_user_cache_range(start, end, flags) * * Clean and invalidate a range of cache entries in the * specified address range. * * - start - start address (inclusive) * - end - end address (exclusive) * - flags - vm_flags describing address space */ ENTRY(arm926_flush_user_cache_range) mov ip, #0 sub r3, r1, r0 @ calculate total size cmp r3, #CACHE_DLIMIT bgt __flush_whole_cache 1: tst r2, #VM_EXEC #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE #else mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE #endif cmp r0, r1 blo 1b tst r2, #VM_EXEC mcrne p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * coherent_kern_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start, end. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm926_coherent_kern_range) /* FALLTHROUGH */ /* * coherent_user_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start, end. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm926_coherent_user_range) bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB mov r0, #0 ret lr /* * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * * - addr - kernel address * - size - region size */ ENTRY(arm926_flush_kern_dcache_area) add r1, r0, r1 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_inv_range(start, end) * * Invalidate (discard) the specified virtual address range. * May not write back any entries. If 'start' or 'end' * are not cache line aligned, those lines must be written * back. * * - start - virtual start address * - end - virtual end address * * (same as v4wb) */ arm926_dma_inv_range: #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH tst r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry tst r1, #CACHE_DLINESIZE - 1 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry #endif bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_clean_range(start, end) * * Clean the specified virtual address range. * * - start - virtual start address * - end - virtual end address * * (same as v4wb) */ arm926_dma_clean_range: #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b #endif mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_flush_range(start, end) * * Clean and invalidate the specified virtual address range. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm926_dma_flush_range) bic r0, r0, #CACHE_DLINESIZE - 1 1: #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry #else mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry #endif add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_map_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(arm926_dma_map_area) add r1, r1, r0 cmp r2, #DMA_TO_DEVICE beq arm926_dma_clean_range bcs arm926_dma_inv_range b arm926_dma_flush_range ENDPROC(arm926_dma_map_area) /* * dma_unmap_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(arm926_dma_unmap_area) ret lr ENDPROC(arm926_dma_unmap_area) .globl arm926_flush_kern_cache_louis .equ arm926_flush_kern_cache_louis, arm926_flush_kern_cache_all @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) define_cache_functions arm926 ENTRY(cpu_arm926_dcache_clean_area) #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE subs r1, r1, #CACHE_DLINESIZE bhi 1b #endif mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* =============================== PageTable ============================== */ /* * cpu_arm926_switch_mm(pgd) * * Set the translation base pointer to be as described by pgd. * * pgd: new page tables */ .align 5 ENTRY(cpu_arm926_switch_mm) #ifdef CONFIG_MMU mov ip, #0 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache #else @ && 'Clean & Invalidate whole DCache' 1: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate bne 1b #endif mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache mcr p15, 0, ip, c7, c10, 4 @ drain WB mcr p15, 0, r0, c2, c0, 0 @ load page table pointer mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs #endif ret lr /* * cpu_arm926_set_pte_ext(ptep, pte, ext) * * Set a PTE and flush it out */ .align 5 ENTRY(cpu_arm926_set_pte_ext) #ifdef CONFIG_MMU armv3_set_pte_ext mov r0, r0 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH mcr p15, 0, r0, c7, c10, 1 @ clean D entry #endif mcr p15, 0, r0, c7, c10, 4 @ drain WB #endif ret lr /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ .globl cpu_arm926_suspend_size .equ cpu_arm926_suspend_size, 4 * 3 #ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_arm926_do_suspend) stmfd sp!, {r4 - r6, lr} mrc p15, 0, r4, c13, c0, 0 @ PID mrc p15, 0, r5, c3, c0, 0 @ Domain ID mrc p15, 0, r6, c1, c0, 0 @ Control register stmia r0, {r4 - r6} ldmfd sp!, {r4 - r6, pc} ENDPROC(cpu_arm926_do_suspend) ENTRY(cpu_arm926_do_resume) mov ip, #0 mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches ldmia r0, {r4 - r6} mcr p15, 0, r4, c13, c0, 0 @ PID mcr p15, 0, r5, c3, c0, 0 @ Domain ID mcr p15, 0, r1, c2, c0, 0 @ TTB address mov r0, r6 @ control register b cpu_resume_mmu ENDPROC(cpu_arm926_do_resume) #endif .type __arm926_setup, #function __arm926_setup: mov r0, #0 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 #ifdef CONFIG_MMU mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 #endif #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH mov r0, #4 @ disable write-back on caches explicitly mcr p15, 7, r0, c15, c0, 0 #endif adr r5, arm926_crval ldmia r5, {r5, r6} mrc p15, 0, r0, c1, c0 @ get control register v4 bic r0, r0, r5 orr r0, r0, r6 #ifdef CONFIG_CPU_CACHE_ROUND_ROBIN orr r0, r0, #0x4000 @ .1.. .... .... .... #endif ret lr .size __arm926_setup, . - __arm926_setup /* * R * .RVI ZFRS BLDP WCAM * .011 0001 ..11 0101 * */ .type arm926_crval, #object arm926_crval: crval clear=0x00007f3f, mmuset=0x00003135, ucset=0x00001134 __INITDATA @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) define_processor_functions arm926, dabort=v5tj_early_abort, pabort=legacy_pabort, suspend=1 .section ".rodata" string cpu_arch_name, "armv5tej" string cpu_elf_name, "v5" string cpu_arm926_name, "ARM926EJ-S" .align .section ".proc.info.init", #alloc .type __arm926_proc_info,#object __arm926_proc_info: .long 0x41069260 @ ARM926EJ-S (v5TEJ) .long 0xff0ffff0 .long PMD_TYPE_SECT | \ PMD_SECT_BUFFERABLE | \ PMD_SECT_CACHEABLE | \ PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ .long PMD_TYPE_SECT | \ PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ initfn __arm926_setup, __arm926_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA .long cpu_arm926_name .long arm926_processor_functions .long v4wbi_tlb_fns .long v4wb_user_fns .long arm926_cache_fns .size __arm926_proc_info, . - __arm926_proc_info
AirFortressIlikara/LS2K0300-linux-4.19
9,410
arch/arm/mm/proc-macros.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * We need constants.h for: * VMA_VM_MM * VMA_VM_FLAGS * VM_EXEC */ #include <linux/const.h> #include <asm/asm-offsets.h> #include <asm/thread_info.h> #ifdef CONFIG_CPU_V7M #include <asm/v7m.h> #endif /* * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm) */ .macro vma_vm_mm, rd, rn ldr \rd, [\rn, #VMA_VM_MM] .endm /* * vma_vm_flags - get vma->vm_flags */ .macro vma_vm_flags, rd, rn ldr \rd, [\rn, #VMA_VM_FLAGS] .endm /* * act_mm - get current->active_mm */ .macro act_mm, rd bic \rd, sp, #(THREAD_SIZE - 1) & ~63 bic \rd, \rd, #63 ldr \rd, [\rd, #TI_TASK] .if (TSK_ACTIVE_MM > IMM12_MASK) add \rd, \rd, #TSK_ACTIVE_MM & ~IMM12_MASK .endif ldr \rd, [\rd, #TSK_ACTIVE_MM & IMM12_MASK] .endm /* * mmid - get context id from mm pointer (mm->context.id) * note, this field is 64bit, so in big-endian the two words are swapped too. */ .macro mmid, rd, rn #ifdef __ARMEB__ ldr \rd, [\rn, #MM_CONTEXT_ID + 4 ] #else ldr \rd, [\rn, #MM_CONTEXT_ID] #endif .endm /* * mask_asid - mask the ASID from the context ID */ .macro asid, rd, rn and \rd, \rn, #255 .endm .macro crval, clear, mmuset, ucset #ifdef CONFIG_MMU .word \clear .word \mmuset #else .word \clear .word \ucset #endif .endm /* * dcache_line_size - get the minimum D-cache line size from the CTR register * on ARMv7. */ .macro dcache_line_size, reg, tmp #ifdef CONFIG_CPU_V7M movw \tmp, #:lower16:BASEADDR_V7M_SCB + V7M_SCB_CTR movt \tmp, #:upper16:BASEADDR_V7M_SCB + V7M_SCB_CTR ldr \tmp, [\tmp] #else mrc p15, 0, \tmp, c0, c0, 1 @ read ctr #endif lsr \tmp, \tmp, #16 and \tmp, \tmp, #0xf @ cache line size encoding mov \reg, #4 @ bytes per word mov \reg, \reg, lsl \tmp @ actual cache line size .endm /* * icache_line_size - get the minimum I-cache line size from the CTR register * on ARMv7. */ .macro icache_line_size, reg, tmp #ifdef CONFIG_CPU_V7M movw \tmp, #:lower16:BASEADDR_V7M_SCB + V7M_SCB_CTR movt \tmp, #:upper16:BASEADDR_V7M_SCB + V7M_SCB_CTR ldr \tmp, [\tmp] #else mrc p15, 0, \tmp, c0, c0, 1 @ read ctr #endif and \tmp, \tmp, #0xf @ cache line size encoding mov \reg, #4 @ bytes per word mov \reg, \reg, lsl \tmp @ actual cache line size .endm /* * Sanity check the PTE configuration for the code below - which makes * certain assumptions about how these bits are laid out. */ #ifdef CONFIG_MMU #if L_PTE_SHARED != PTE_EXT_SHARED #error PTE shared bit mismatch #endif #if !defined (CONFIG_ARM_LPAE) && \ (L_PTE_XN+L_PTE_USER+L_PTE_RDONLY+L_PTE_DIRTY+L_PTE_YOUNG+\ L_PTE_PRESENT) > L_PTE_SHARED #error Invalid Linux PTE bit settings #endif #endif /* CONFIG_MMU */ /* * The ARMv6 and ARMv7 set_pte_ext translation function. * * Permission translation: * YUWD APX AP1 AP0 SVC User * 0xxx 0 0 0 no acc no acc * 100x 1 0 1 r/o no acc * 10x0 1 0 1 r/o no acc * 1011 0 0 1 r/w no acc * 110x 1 1 1 r/o r/o * 11x0 1 1 1 r/o r/o * 1111 0 1 1 r/w r/w */ .macro armv6_mt_table pfx \pfx\()_mt_table: .long 0x00 @ L_PTE_MT_UNCACHED .long PTE_EXT_TEX(1) @ L_PTE_MT_BUFFERABLE .long PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK .long PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED .long 0x00 @ unused .long 0x00 @ L_PTE_MT_MINICACHE (not present) .long PTE_EXT_TEX(1) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC .long 0x00 @ unused .long PTE_EXT_TEX(1) @ L_PTE_MT_DEV_WC .long 0x00 @ unused .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_DEV_CACHED .long PTE_EXT_TEX(2) @ L_PTE_MT_DEV_NONSHARED .long 0x00 @ unused .long 0x00 @ unused .long PTE_CACHEABLE | PTE_BUFFERABLE | PTE_EXT_APX @ L_PTE_MT_VECTORS .endm .macro armv6_set_pte_ext pfx str r1, [r0], #2048 @ linux version bic r3, r1, #0x000003fc bic r3, r3, #PTE_TYPE_MASK orr r3, r3, r2 orr r3, r3, #PTE_EXT_AP0 | 2 adr ip, \pfx\()_mt_table and r2, r1, #L_PTE_MT_MASK ldr r2, [ip, r2] eor r1, r1, #L_PTE_DIRTY tst r1, #L_PTE_DIRTY|L_PTE_RDONLY orrne r3, r3, #PTE_EXT_APX tst r1, #L_PTE_USER orrne r3, r3, #PTE_EXT_AP1 tstne r3, #PTE_EXT_APX @ user read-only -> kernel read-only bicne r3, r3, #PTE_EXT_AP0 tst r1, #L_PTE_XN orrne r3, r3, #PTE_EXT_XN eor r3, r3, r2 tst r1, #L_PTE_YOUNG tstne r1, #L_PTE_PRESENT moveq r3, #0 tstne r1, #L_PTE_NONE movne r3, #0 str r3, [r0] mcr p15, 0, r0, c7, c10, 1 @ flush_pte .endm /* * The ARMv3, ARMv4 and ARMv5 set_pte_ext translation function, * covering most CPUs except Xscale and Xscale 3. * * Permission translation: * YUWD AP SVC User * 0xxx 0x00 no acc no acc * 100x 0x00 r/o no acc * 10x0 0x00 r/o no acc * 1011 0x55 r/w no acc * 110x 0xaa r/w r/o * 11x0 0xaa r/w r/o * 1111 0xff r/w r/w */ .macro armv3_set_pte_ext wc_disable=1 str r1, [r0], #2048 @ linux version eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY bic r2, r1, #PTE_SMALL_AP_MASK @ keep C, B bits bic r2, r2, #PTE_TYPE_MASK orr r2, r2, #PTE_TYPE_SMALL tst r3, #L_PTE_USER @ user? orrne r2, r2, #PTE_SMALL_AP_URO_SRW tst r3, #L_PTE_RDONLY | L_PTE_DIRTY @ write and dirty? orreq r2, r2, #PTE_SMALL_AP_UNO_SRW tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ present and young? movne r2, #0 .if \wc_disable #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH tst r2, #PTE_CACHEABLE bicne r2, r2, #PTE_BUFFERABLE #endif .endif str r2, [r0] @ hardware version .endm /* * Xscale set_pte_ext translation, split into two halves to cope * with work-arounds. r3 must be preserved by code between these * two macros. * * Permission translation: * YUWD AP SVC User * 0xxx 00 no acc no acc * 100x 00 r/o no acc * 10x0 00 r/o no acc * 1011 01 r/w no acc * 110x 10 r/w r/o * 11x0 10 r/w r/o * 1111 11 r/w r/w */ .macro xscale_set_pte_ext_prologue str r1, [r0] @ linux version eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY bic r2, r1, #PTE_SMALL_AP_MASK @ keep C, B bits orr r2, r2, #PTE_TYPE_EXT @ extended page tst r3, #L_PTE_USER @ user? orrne r2, r2, #PTE_EXT_AP_URO_SRW @ yes -> user r/o, system r/w tst r3, #L_PTE_RDONLY | L_PTE_DIRTY @ write and dirty? orreq r2, r2, #PTE_EXT_AP_UNO_SRW @ yes -> user n/a, system r/w @ combined with user -> user r/w .endm .macro xscale_set_pte_ext_epilogue tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ present and young? movne r2, #0 @ no -> fault str r2, [r0, #2048]! @ hardware version mov ip, #0 mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line mcr p15, 0, ip, c7, c10, 4 @ data write barrier .endm .macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0, bugs=0 /* * If we are building for big.Little with branch predictor hardening, * we need the processor function tables to remain available after boot. */ #if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) .section ".rodata" #endif .type \name\()_processor_functions, #object .align 2 ENTRY(\name\()_processor_functions) .word \dabort .word \pabort .word cpu_\name\()_proc_init .word \bugs .word cpu_\name\()_proc_fin .word cpu_\name\()_reset .word cpu_\name\()_do_idle .word cpu_\name\()_dcache_clean_area .word cpu_\name\()_switch_mm .if \nommu .word 0 .else .word cpu_\name\()_set_pte_ext .endif .if \suspend .word cpu_\name\()_suspend_size #ifdef CONFIG_ARM_CPU_SUSPEND .word cpu_\name\()_do_suspend .word cpu_\name\()_do_resume #else .word 0 .word 0 #endif .else .word 0 .word 0 .word 0 .endif .size \name\()_processor_functions, . - \name\()_processor_functions #if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) .previous #endif .endm .macro define_cache_functions name:req .align 2 .type \name\()_cache_fns, #object ENTRY(\name\()_cache_fns) .long \name\()_flush_icache_all .long \name\()_flush_kern_cache_all .long \name\()_flush_kern_cache_louis .long \name\()_flush_user_cache_all .long \name\()_flush_user_cache_range .long \name\()_coherent_kern_range .long \name\()_coherent_user_range .long \name\()_flush_kern_dcache_area .long \name\()_dma_map_area .long \name\()_dma_unmap_area .long \name\()_dma_flush_range .size \name\()_cache_fns, . - \name\()_cache_fns .endm .macro define_tlb_functions name:req, flags_up:req, flags_smp .type \name\()_tlb_fns, #object ENTRY(\name\()_tlb_fns) .long \name\()_flush_user_tlb_range .long \name\()_flush_kern_tlb_range .ifnb \flags_smp ALT_SMP(.long \flags_smp ) ALT_UP(.long \flags_up ) .else .long \flags_up .endif .size \name\()_tlb_fns, . - \name\()_tlb_fns .endm .macro globl_equ x, y .globl \x .equ \x, \y .endm .macro initfn, func, base .long \func - \base .endm /* * Macro to calculate the log2 size for the protection region * registers. This calculates rd = log2(size) - 1. tmp must * not be the same register as rd. */ .macro pr_sz, rd, size, tmp mov \tmp, \size, lsr #12 mov \rd, #11 1: movs \tmp, \tmp, lsr #1 addne \rd, \rd, #1 bne 1b .endm /* * Macro to generate a protection region register value * given a pre-masked address, size, and enable bit. * Corrupts size. */ .macro pr_val, dest, addr, size, enable pr_sz \dest, \size, \size @ calculate log2(size) - 1 orr \dest, \addr, \dest, lsl #1 @ mask in the region size orr \dest, \dest, \enable .endm
AirFortressIlikara/LS2K0300-linux-4.19
10,661
arch/arm/mm/proc-arm946.S
/* * linux/arch/arm/mm/arm946.S: utility functions for ARM946E-S * * Copyright (C) 2004-2006 Hyok S. Choi (hyok.choi@samsung.com) * * (Many of cache codes are from proc-arm926.S) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> #include <asm/hwcap.h> #include <asm/pgtable-hwdef.h> #include <asm/pgtable.h> #include <asm/ptrace.h> #include "proc-macros.S" /* * ARM946E-S is synthesizable to have 0KB to 1MB sized D-Cache, * comprising 256 lines of 32 bytes (8 words). */ #define CACHE_DSIZE (CONFIG_CPU_DCACHE_SIZE) /* typically 8KB. */ #define CACHE_DLINESIZE 32 /* fixed */ #define CACHE_DSEGMENTS 4 /* fixed */ #define CACHE_DENTRIES (CACHE_DSIZE / CACHE_DSEGMENTS / CACHE_DLINESIZE) #define CACHE_DLIMIT (CACHE_DSIZE * 4) /* benchmark needed */ .text /* * cpu_arm946_proc_init() * cpu_arm946_switch_mm() * * These are not required. */ ENTRY(cpu_arm946_proc_init) ENTRY(cpu_arm946_switch_mm) ret lr /* * cpu_arm946_proc_fin() */ ENTRY(cpu_arm946_proc_fin) mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x00001000 @ i-cache bic r0, r0, #0x00000004 @ d-cache mcr p15, 0, r0, c1, c0, 0 @ disable caches ret lr /* * cpu_arm946_reset(loc) * Params : r0 = address to jump to * Notes : This sets up everything for a reset */ .pushsection .idmap.text, "ax" ENTRY(cpu_arm946_reset) mov ip, #0 mcr p15, 0, ip, c7, c5, 0 @ flush I cache mcr p15, 0, ip, c7, c6, 0 @ flush D cache mcr p15, 0, ip, c7, c10, 4 @ drain WB mrc p15, 0, ip, c1, c0, 0 @ ctrl register bic ip, ip, #0x00000005 @ .............c.p bic ip, ip, #0x00001000 @ i-cache mcr p15, 0, ip, c1, c0, 0 @ ctrl register ret r0 ENDPROC(cpu_arm946_reset) .popsection /* * cpu_arm946_do_idle() */ .align 5 ENTRY(cpu_arm946_do_idle) mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt ret lr /* * flush_icache_all() * * Unconditionally clean and invalidate the entire icache. */ ENTRY(arm946_flush_icache_all) mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache ret lr ENDPROC(arm946_flush_icache_all) /* * flush_user_cache_all() */ ENTRY(arm946_flush_user_cache_all) /* FALLTHROUGH */ /* * flush_kern_cache_all() * * Clean and invalidate the entire cache. */ ENTRY(arm946_flush_kern_cache_all) mov r2, #VM_EXEC mov ip, #0 __flush_whole_cache: #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH mcr p15, 0, ip, c7, c6, 0 @ flush D cache #else mov r1, #(CACHE_DSEGMENTS - 1) << 29 @ 4 segments 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 4 @ n entries 2: mcr p15, 0, r3, c7, c14, 2 @ clean/flush D index subs r3, r3, #1 << 4 bcs 2b @ entries n to 0 subs r1, r1, #1 << 29 bcs 1b @ segments 3 to 0 #endif tst r2, #VM_EXEC mcrne p15, 0, ip, c7, c5, 0 @ flush I cache mcrne p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * flush_user_cache_range(start, end, flags) * * Clean and invalidate a range of cache entries in the * specified address range. * * - start - start address (inclusive) * - end - end address (exclusive) * - flags - vm_flags describing address space * (same as arm926) */ ENTRY(arm946_flush_user_cache_range) mov ip, #0 sub r3, r1, r0 @ calculate total size cmp r3, #CACHE_DLIMIT bhs __flush_whole_cache 1: tst r2, #VM_EXEC #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE #else mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE #endif cmp r0, r1 blo 1b tst r2, #VM_EXEC mcrne p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * coherent_kern_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start, end. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm946_coherent_kern_range) /* FALLTHROUGH */ /* * coherent_user_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start, end. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address * (same as arm926) */ ENTRY(arm946_coherent_user_range) bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB mov r0, #0 ret lr /* * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * * - addr - kernel address * - size - region size * (same as arm926) */ ENTRY(arm946_flush_kern_dcache_area) add r1, r0, r1 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_inv_range(start, end) * * Invalidate (discard) the specified virtual address range. * May not write back any entries. If 'start' or 'end' * are not cache line aligned, those lines must be written * back. * * - start - virtual start address * - end - virtual end address * (same as arm926) */ arm946_dma_inv_range: #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH tst r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry tst r1, #CACHE_DLINESIZE - 1 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry #endif bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_clean_range(start, end) * * Clean the specified virtual address range. * * - start - virtual start address * - end - virtual end address * * (same as arm926) */ arm946_dma_clean_range: #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b #endif mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_flush_range(start, end) * * Clean and invalidate the specified virtual address range. * * - start - virtual start address * - end - virtual end address * * (same as arm926) */ ENTRY(arm946_dma_flush_range) bic r0, r0, #CACHE_DLINESIZE - 1 1: #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry #else mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry #endif add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_map_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(arm946_dma_map_area) add r1, r1, r0 cmp r2, #DMA_TO_DEVICE beq arm946_dma_clean_range bcs arm946_dma_inv_range b arm946_dma_flush_range ENDPROC(arm946_dma_map_area) /* * dma_unmap_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(arm946_dma_unmap_area) ret lr ENDPROC(arm946_dma_unmap_area) .globl arm946_flush_kern_cache_louis .equ arm946_flush_kern_cache_louis, arm946_flush_kern_cache_all @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) define_cache_functions arm946 ENTRY(cpu_arm946_dcache_clean_area) #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE subs r1, r1, #CACHE_DLINESIZE bhi 1b #endif mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr .type __arm946_setup, #function __arm946_setup: mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c6, 0 @ invalidate D cache mcr p15, 0, r0, c7, c10, 4 @ drain WB mcr p15, 0, r0, c6, c3, 0 @ disable memory region 3~7 mcr p15, 0, r0, c6, c4, 0 mcr p15, 0, r0, c6, c5, 0 mcr p15, 0, r0, c6, c6, 0 mcr p15, 0, r0, c6, c7, 0 mov r0, #0x0000003F @ base = 0, size = 4GB mcr p15, 0, r0, c6, c0, 0 @ set region 0, default ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM ldr r7, =CONFIG_DRAM_SIZE @ size of RAM (must be >= 4KB) pr_val r3, r0, r7, #1 mcr p15, 0, r3, c6, c1, 0 ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH ldr r7, =CONFIG_FLASH_SIZE @ size of FLASH (must be >= 4KB) pr_val r3, r0, r7, #1 mcr p15, 0, r3, c6, c2, 0 mov r0, #0x06 mcr p15, 0, r0, c2, c0, 0 @ region 1,2 d-cacheable mcr p15, 0, r0, c2, c0, 1 @ region 1,2 i-cacheable #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH mov r0, #0x00 @ disable whole write buffer #else mov r0, #0x02 @ region 1 write bufferred #endif mcr p15, 0, r0, c3, c0, 0 /* * Access Permission Settings for future permission control by PU. * * priv. user * region 0 (whole) rw -- : b0001 * region 1 (RAM) rw rw : b0011 * region 2 (FLASH) rw r- : b0010 * region 3~7 (none) -- -- : b0000 */ mov r0, #0x00000031 orr r0, r0, #0x00000200 mcr p15, 0, r0, c5, c0, 2 @ set data access permission mcr p15, 0, r0, c5, c0, 3 @ set inst. access permission mrc p15, 0, r0, c1, c0 @ get control register orr r0, r0, #0x00001000 @ I-cache orr r0, r0, #0x00000005 @ MPU/D-cache #ifdef CONFIG_CPU_CACHE_ROUND_ROBIN orr r0, r0, #0x00004000 @ .1.. .... .... .... #endif ret lr .size __arm946_setup, . - __arm946_setup __INITDATA @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) define_processor_functions arm946, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1 .section ".rodata" string cpu_arch_name, "armv5te" string cpu_elf_name, "v5t" string cpu_arm946_name, "ARM946E-S" .align .section ".proc.info.init", #alloc .type __arm946_proc_info,#object __arm946_proc_info: .long 0x41009460 .long 0xff00fff0 .long 0 .long 0 initfn __arm946_setup, __arm946_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB .long cpu_arm946_name .long arm946_processor_functions .long 0 .long 0 .long arm946_cache_fns .size __arm946_proc_info, . - __arm946_proc_info
AirFortressIlikara/LS2K0300-linux-4.19
1,092
arch/arm/mm/abort-ev7.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <linux/linkage.h> #include <asm/assembler.h> /* * Function: v7_early_abort * * Params : r2 = pt_regs * : r4 = aborted context pc * : r5 = aborted context psr * * Returns : r4 - r11, r13 preserved * * Purpose : obtain information about current aborted instruction. */ .align 5 ENTRY(v7_early_abort) mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR uaccess_disable ip @ disable userspace access /* * V6 code adjusts the returned DFSR. * New designs should not need to patch up faults. */ #if defined(CONFIG_VERIFY_PERMISSION_FAULT) /* * Detect erroneous permission failures and fix */ ldr r3, =0x40d @ On permission fault and r3, r1, r3 cmp r3, #0x0d bne do_DataAbort mcr p15, 0, r0, c7, c8, 0 @ Retranslate FAR isb mrc p15, 0, ip, c7, c4, 0 @ Read the PAR and r3, ip, #0x7b @ On translation fault cmp r3, #0x0b bne do_DataAbort bic r1, r1, #0xf @ Fix up FSR FS[5:0] and ip, ip, #0x7e orr r1, r1, ip, LSR #1 #endif b do_DataAbort ENDPROC(v7_early_abort)
AirFortressIlikara/LS2K0300-linux-4.19
6,972
arch/arm/mm/proc-sa1100.S
/* * linux/arch/arm/mm/proc-sa1100.S * * Copyright (C) 1997-2002 Russell King * hacked for non-paged-MM by Hyok S. Choi, 2003. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * MMU functions for SA110 * * These are the low level assembler for performing cache and TLB * functions on the StrongARM-1100 and StrongARM-1110. * * Note that SA1100 and SA1110 share everything but their name and CPU ID. * * 12-jun-2000, Erik Mouw (J.A.K.Mouw@its.tudelft.nl): * Flush the read buffer at context switches */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> #include <asm/asm-offsets.h> #include <asm/hwcap.h> #include <mach/hardware.h> #include <asm/pgtable-hwdef.h> #include <asm/pgtable.h> #include "proc-macros.S" /* * the cache line size of the I and D cache */ #define DCACHELINESIZE 32 .section .text /* * cpu_sa1100_proc_init() */ ENTRY(cpu_sa1100_proc_init) mov r0, #0 mcr p15, 0, r0, c15, c1, 2 @ Enable clock switching mcr p15, 0, r0, c9, c0, 5 @ Allow read-buffer operations from userland ret lr /* * cpu_sa1100_proc_fin() * * Prepare the CPU for reset: * - Disable interrupts * - Clean and turn off caches. */ ENTRY(cpu_sa1100_proc_fin) mcr p15, 0, ip, c15, c2, 2 @ Disable clock switching mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches ret lr /* * cpu_sa1100_reset(loc) * * Perform a soft reset of the system. Put the CPU into the * same state as it would be if it had been reset, and branch * to what would be the reset vector. * * loc: location to jump to for soft reset */ .align 5 .pushsection .idmap.text, "ax" ENTRY(cpu_sa1100_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c10, 4 @ drain WB #ifdef CONFIG_MMU mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs #endif mrc p15, 0, ip, c1, c0, 0 @ ctrl register bic ip, ip, #0x000f @ ............wcam bic ip, ip, #0x1100 @ ...i...s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register ret r0 ENDPROC(cpu_sa1100_reset) .popsection /* * cpu_sa1100_do_idle(type) * * Cause the processor to idle * * type: call type: * 0 = slow idle * 1 = fast idle * 2 = switch to slow processor clock * 3 = switch to fast processor clock */ .align 5 ENTRY(cpu_sa1100_do_idle) mov r0, r0 @ 4 nop padding mov r0, r0 mov r0, r0 mov r0, r0 @ 4 nop padding mov r0, r0 mov r0, r0 mov r0, #0 ldr r1, =UNCACHEABLE_ADDR @ ptr to uncacheable address @ --- aligned to a cache line mcr p15, 0, r0, c15, c2, 2 @ disable clock switching ldr r1, [r1, #0] @ force switch to MCLK mcr p15, 0, r0, c15, c8, 2 @ wait for interrupt mov r0, r0 @ safety mcr p15, 0, r0, c15, c1, 2 @ enable clock switching ret lr /* ================================= CACHE ================================ */ /* * cpu_sa1100_dcache_clean_area(addr,sz) * * Clean the specified entry of any caches such that the MMU * translation fetches will obtain correct data. * * addr: cache-unaligned virtual address */ .align 5 ENTRY(cpu_sa1100_dcache_clean_area) 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #DCACHELINESIZE subs r1, r1, #DCACHELINESIZE bhi 1b ret lr /* =============================== PageTable ============================== */ /* * cpu_sa1100_switch_mm(pgd) * * Set the translation base pointer to be as described by pgd. * * pgd: new page tables */ .align 5 ENTRY(cpu_sa1100_switch_mm) #ifdef CONFIG_MMU str lr, [sp, #-4]! bl v4wb_flush_kern_cache_all @ clears IP mcr p15, 0, ip, c9, c0, 0 @ invalidate RB mcr p15, 0, r0, c2, c0, 0 @ load page table pointer mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs ldr pc, [sp], #4 #else ret lr #endif /* * cpu_sa1100_set_pte_ext(ptep, pte, ext) * * Set a PTE and flush it out */ .align 5 ENTRY(cpu_sa1100_set_pte_ext) #ifdef CONFIG_MMU armv3_set_pte_ext wc_disable=0 mov r0, r0 mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcr p15, 0, r0, c7, c10, 4 @ drain WB #endif ret lr .globl cpu_sa1100_suspend_size .equ cpu_sa1100_suspend_size, 4 * 3 #ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_sa1100_do_suspend) stmfd sp!, {r4 - r6, lr} mrc p15, 0, r4, c3, c0, 0 @ domain ID mrc p15, 0, r5, c13, c0, 0 @ PID mrc p15, 0, r6, c1, c0, 0 @ control reg stmia r0, {r4 - r6} @ store cp regs ldmfd sp!, {r4 - r6, pc} ENDPROC(cpu_sa1100_do_suspend) ENTRY(cpu_sa1100_do_resume) ldmia r0, {r4 - r6} @ load cp regs mov ip, #0 mcr p15, 0, ip, c8, c7, 0 @ flush I+D TLBs mcr p15, 0, ip, c7, c7, 0 @ flush I&D cache mcr p15, 0, ip, c9, c0, 0 @ invalidate RB mcr p15, 0, ip, c9, c0, 5 @ allow user space to use RB mcr p15, 0, r4, c3, c0, 0 @ domain ID mcr p15, 0, r1, c2, c0, 0 @ translation table base addr mcr p15, 0, r5, c13, c0, 0 @ PID mov r0, r6 @ control register b cpu_resume_mmu ENDPROC(cpu_sa1100_do_resume) #endif .type __sa1100_setup, #function __sa1100_setup: mov r0, #0 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 #ifdef CONFIG_MMU mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 #endif adr r5, sa1100_crval ldmia r5, {r5, r6} mrc p15, 0, r0, c1, c0 @ get control register v4 bic r0, r0, r5 orr r0, r0, r6 ret lr .size __sa1100_setup, . - __sa1100_setup /* * R * .RVI ZFRS BLDP WCAM * ..11 0001 ..11 1101 * */ .type sa1100_crval, #object sa1100_crval: crval clear=0x00003f3f, mmuset=0x0000313d, ucset=0x00001130 __INITDATA /* * SA1100 and SA1110 share the same function calls */ @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) define_processor_functions sa1100, dabort=v4_early_abort, pabort=legacy_pabort, suspend=1 .section ".rodata" string cpu_arch_name, "armv4" string cpu_elf_name, "v4" string cpu_sa1100_name, "StrongARM-1100" string cpu_sa1110_name, "StrongARM-1110" .align .section ".proc.info.init", #alloc .macro sa1100_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req .type __\name\()_proc_info,#object __\name\()_proc_info: .long \cpu_val .long \cpu_mask .long PMD_TYPE_SECT | \ PMD_SECT_BUFFERABLE | \ PMD_SECT_CACHEABLE | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ .long PMD_TYPE_SECT | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ initfn __sa1100_setup, __\name\()_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT .long \cpu_name .long sa1100_processor_functions .long v4wb_tlb_fns .long v4_mc_user_fns .long v4wb_cache_fns .size __\name\()_proc_info, . - __\name\()_proc_info .endm sa1100_proc_info sa1100, 0x4401a110, 0xfffffff0, cpu_sa1100_name sa1100_proc_info sa1110, 0x6901b110, 0xfffffff0, cpu_sa1110_name
AirFortressIlikara/LS2K0300-linux-4.19
3,307
arch/arm/mm/cache-v4.S
/* * linux/arch/arm/mm/cache-v4.S * * Copyright (C) 1997-2002 Russell king * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> #include <asm/page.h> #include "proc-macros.S" /* * flush_icache_all() * * Unconditionally clean and invalidate the entire icache. */ ENTRY(v4_flush_icache_all) ret lr ENDPROC(v4_flush_icache_all) /* * flush_user_cache_all() * * Invalidate all cache entries in a particular address * space. * * - mm - mm_struct describing address space */ ENTRY(v4_flush_user_cache_all) /* FALLTHROUGH */ /* * flush_kern_cache_all() * * Clean and invalidate the entire cache. */ ENTRY(v4_flush_kern_cache_all) #ifdef CONFIG_CPU_CP15 mov r0, #0 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache ret lr #else /* FALLTHROUGH */ #endif /* * flush_user_cache_range(start, end, flags) * * Invalidate a range of cache entries in the specified * address space. * * - start - start address (may not be aligned) * - end - end address (exclusive, may not be aligned) * - flags - vma_area_struct flags describing address space */ ENTRY(v4_flush_user_cache_range) #ifdef CONFIG_CPU_CP15 mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ flush ID cache ret lr #else /* FALLTHROUGH */ #endif /* * coherent_kern_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(v4_coherent_kern_range) /* FALLTHROUGH */ /* * coherent_user_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(v4_coherent_user_range) mov r0, #0 ret lr /* * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * * - addr - kernel address * - size - region size */ ENTRY(v4_flush_kern_dcache_area) /* FALLTHROUGH */ /* * dma_flush_range(start, end) * * Clean and invalidate the specified virtual address range. * * - start - virtual start address * - end - virtual end address */ ENTRY(v4_dma_flush_range) #ifdef CONFIG_CPU_CP15 mov r0, #0 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache #endif ret lr /* * dma_unmap_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(v4_dma_unmap_area) teq r2, #DMA_TO_DEVICE bne v4_dma_flush_range /* FALLTHROUGH */ /* * dma_map_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(v4_dma_map_area) ret lr ENDPROC(v4_dma_unmap_area) ENDPROC(v4_dma_map_area) .globl v4_flush_kern_cache_louis .equ v4_flush_kern_cache_louis, v4_flush_kern_cache_all __INITDATA @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) define_cache_functions v4
AirFortressIlikara/LS2K0300-linux-4.19
13,766
arch/arm/mm/proc-arm925.S
/* * linux/arch/arm/mm/arm925.S: MMU functions for ARM925 * * Copyright (C) 1999,2000 ARM Limited * Copyright (C) 2000 Deep Blue Solutions Ltd. * Copyright (C) 2002 RidgeRun, Inc. * Copyright (C) 2002-2003 MontaVista Software, Inc. * * Update for Linux-2.6 and cache flush improvements * Copyright (C) 2004 Nokia Corporation by Tony Lindgren <tony@atomide.com> * * hacked for non-paged-MM by Hyok S. Choi, 2004. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * * These are the low level assembler for performing cache and TLB * functions on the arm925. * * CONFIG_CPU_ARM925_CPU_IDLE -> nohlt * * Some additional notes based on deciphering the TI TRM on OMAP-5910: * * NOTE1: The TI925T Configuration Register bit "D-cache clean and flush * entry mode" must be 0 to flush the entries in both segments * at once. This is the default value. See TRM 2-20 and 2-24 for * more information. * * NOTE2: Default is the "D-cache clean and flush entry mode". It looks * like the "Transparent mode" must be on for partial cache flushes * to work in this mode. This mode only works with 16-bit external * memory. See TRM 2-24 for more information. * * NOTE3: Write-back cache flushing seems to be flakey with devices using * direct memory access, such as USB OHCI. The workaround is to use * write-through cache with CONFIG_CPU_DCACHE_WRITETHROUGH (this is * the default for OMAP-1510). */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> #include <asm/hwcap.h> #include <asm/pgtable-hwdef.h> #include <asm/pgtable.h> #include <asm/page.h> #include <asm/ptrace.h> #include "proc-macros.S" /* * The size of one data cache line. */ #define CACHE_DLINESIZE 16 /* * The number of data cache segments. */ #define CACHE_DSEGMENTS 2 /* * The number of lines in a cache segment. */ #define CACHE_DENTRIES 256 /* * This is the size at which it becomes more efficient to * clean the whole cache, rather than using the individual * cache line maintenance instructions. */ #define CACHE_DLIMIT 8192 .text /* * cpu_arm925_proc_init() */ ENTRY(cpu_arm925_proc_init) ret lr /* * cpu_arm925_proc_fin() */ ENTRY(cpu_arm925_proc_fin) mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches ret lr /* * cpu_arm925_reset(loc) * * Perform a soft reset of the system. Put the CPU into the * same state as it would be if it had been reset, and branch * to what would be the reset vector. * * loc: location to jump to for soft reset */ .align 5 .pushsection .idmap.text, "ax" ENTRY(cpu_arm925_reset) /* Send software reset to MPU and DSP */ mov ip, #0xff000000 orr ip, ip, #0x00fe0000 orr ip, ip, #0x0000ce00 mov r4, #1 strh r4, [ip, #0x10] ENDPROC(cpu_arm925_reset) .popsection mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c10, 4 @ drain WB #ifdef CONFIG_MMU mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs #endif mrc p15, 0, ip, c1, c0, 0 @ ctrl register bic ip, ip, #0x000f @ ............wcam bic ip, ip, #0x1100 @ ...i...s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register ret r0 /* * cpu_arm925_do_idle() * * Called with IRQs disabled */ .align 10 ENTRY(cpu_arm925_do_idle) mov r0, #0 mrc p15, 0, r1, c1, c0, 0 @ Read control register mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer bic r2, r1, #1 << 12 mcr p15, 0, r2, c1, c0, 0 @ Disable I cache mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt mcr p15, 0, r1, c1, c0, 0 @ Restore ICache enable ret lr /* * flush_icache_all() * * Unconditionally clean and invalidate the entire icache. */ ENTRY(arm925_flush_icache_all) mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache ret lr ENDPROC(arm925_flush_icache_all) /* * flush_user_cache_all() * * Clean and invalidate all cache entries in a particular * address space. */ ENTRY(arm925_flush_user_cache_all) /* FALLTHROUGH */ /* * flush_kern_cache_all() * * Clean and invalidate the entire cache. */ ENTRY(arm925_flush_kern_cache_all) mov r2, #VM_EXEC mov ip, #0 __flush_whole_cache: #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache #else /* Flush entries in both segments at once, see NOTE1 above */ mov r3, #(CACHE_DENTRIES - 1) << 4 @ 256 entries in segment 2: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index subs r3, r3, #1 << 4 bcs 2b @ entries 255 to 0 #endif tst r2, #VM_EXEC mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache mcrne p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * flush_user_cache_range(start, end, flags) * * Clean and invalidate a range of cache entries in the * specified address range. * * - start - start address (inclusive) * - end - end address (exclusive) * - flags - vm_flags describing address space */ ENTRY(arm925_flush_user_cache_range) mov ip, #0 sub r3, r1, r0 @ calculate total size cmp r3, #CACHE_DLIMIT bgt __flush_whole_cache 1: tst r2, #VM_EXEC #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE #else mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE #endif cmp r0, r1 blo 1b tst r2, #VM_EXEC mcrne p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * coherent_kern_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start, end. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm925_coherent_kern_range) /* FALLTHROUGH */ /* * coherent_user_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start, end. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm925_coherent_user_range) bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB mov r0, #0 ret lr /* * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * * - addr - kernel address * - size - region size */ ENTRY(arm925_flush_kern_dcache_area) add r1, r0, r1 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_inv_range(start, end) * * Invalidate (discard) the specified virtual address range. * May not write back any entries. If 'start' or 'end' * are not cache line aligned, those lines must be written * back. * * - start - virtual start address * - end - virtual end address * * (same as v4wb) */ arm925_dma_inv_range: #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH tst r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry tst r1, #CACHE_DLINESIZE - 1 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry #endif bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_clean_range(start, end) * * Clean the specified virtual address range. * * - start - virtual start address * - end - virtual end address * * (same as v4wb) */ arm925_dma_clean_range: #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b #endif mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_flush_range(start, end) * * Clean and invalidate the specified virtual address range. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm925_dma_flush_range) bic r0, r0, #CACHE_DLINESIZE - 1 1: #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry #else mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry #endif add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_map_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(arm925_dma_map_area) add r1, r1, r0 cmp r2, #DMA_TO_DEVICE beq arm925_dma_clean_range bcs arm925_dma_inv_range b arm925_dma_flush_range ENDPROC(arm925_dma_map_area) /* * dma_unmap_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(arm925_dma_unmap_area) ret lr ENDPROC(arm925_dma_unmap_area) .globl arm925_flush_kern_cache_louis .equ arm925_flush_kern_cache_louis, arm925_flush_kern_cache_all @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) define_cache_functions arm925 ENTRY(cpu_arm925_dcache_clean_area) #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE subs r1, r1, #CACHE_DLINESIZE bhi 1b #endif mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* =============================== PageTable ============================== */ /* * cpu_arm925_switch_mm(pgd) * * Set the translation base pointer to be as described by pgd. * * pgd: new page tables */ .align 5 ENTRY(cpu_arm925_switch_mm) #ifdef CONFIG_MMU mov ip, #0 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache #else /* Flush entries in bothe segments at once, see NOTE1 above */ mov r3, #(CACHE_DENTRIES - 1) << 4 @ 256 entries in segment 2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index subs r3, r3, #1 << 4 bcs 2b @ entries 255 to 0 #endif mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache mcr p15, 0, ip, c7, c10, 4 @ drain WB mcr p15, 0, r0, c2, c0, 0 @ load page table pointer mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs #endif ret lr /* * cpu_arm925_set_pte_ext(ptep, pte, ext) * * Set a PTE and flush it out */ .align 5 ENTRY(cpu_arm925_set_pte_ext) #ifdef CONFIG_MMU armv3_set_pte_ext mov r0, r0 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH mcr p15, 0, r0, c7, c10, 1 @ clean D entry #endif mcr p15, 0, r0, c7, c10, 4 @ drain WB #endif /* CONFIG_MMU */ ret lr .type __arm925_setup, #function __arm925_setup: mov r0, #0 /* Transparent on, D-cache clean & flush mode. See NOTE2 above */ orr r0,r0,#1 << 1 @ transparent mode on mcr p15, 0, r0, c15, c1, 0 @ write TI config register mov r0, #0 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 #ifdef CONFIG_MMU mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 #endif #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH mov r0, #4 @ disable write-back on caches explicitly mcr p15, 7, r0, c15, c0, 0 #endif adr r5, arm925_crval ldmia r5, {r5, r6} mrc p15, 0, r0, c1, c0 @ get control register v4 bic r0, r0, r5 orr r0, r0, r6 #ifdef CONFIG_CPU_CACHE_ROUND_ROBIN orr r0, r0, #0x4000 @ .1.. .... .... .... #endif ret lr .size __arm925_setup, . - __arm925_setup /* * R * .RVI ZFRS BLDP WCAM * .011 0001 ..11 1101 * */ .type arm925_crval, #object arm925_crval: crval clear=0x00007f3f, mmuset=0x0000313d, ucset=0x00001130 __INITDATA @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) define_processor_functions arm925, dabort=v4t_early_abort, pabort=legacy_pabort .section ".rodata" string cpu_arch_name, "armv4t" string cpu_elf_name, "v4" string cpu_arm925_name, "ARM925T" .align .section ".proc.info.init", #alloc .macro arm925_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache .type __\name\()_proc_info,#object __\name\()_proc_info: .long \cpu_val .long \cpu_mask .long PMD_TYPE_SECT | \ PMD_SECT_CACHEABLE | \ PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ .long PMD_TYPE_SECT | \ PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ initfn __arm925_setup, __\name\()_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB .long cpu_arm925_name .long arm925_processor_functions .long v4wbi_tlb_fns .long v4wb_user_fns .long arm925_cache_fns .size __\name\()_proc_info, . - __\name\()_proc_info .endm arm925_proc_info arm925, 0x54029250, 0xfffffff0, cpu_arm925_name arm925_proc_info arm915, 0x54029150, 0xfffffff0, cpu_arm925_name
AirFortressIlikara/LS2K0300-linux-4.19
11,796
arch/arm/mm/proc-mohawk.S
/* * linux/arch/arm/mm/proc-mohawk.S: MMU functions for Marvell PJ1 core * * PJ1 (codename Mohawk) is a hybrid of the xscale3 and Marvell's own core. * * Heavily based on proc-arm926.S and proc-xsc3.S * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> #include <asm/hwcap.h> #include <asm/pgtable-hwdef.h> #include <asm/pgtable.h> #include <asm/page.h> #include <asm/ptrace.h> #include "proc-macros.S" /* * This is the maximum size of an area which will be flushed. If the * area is larger than this, then we flush the whole cache. */ #define CACHE_DLIMIT 32768 /* * The cache line size of the L1 D cache. */ #define CACHE_DLINESIZE 32 /* * cpu_mohawk_proc_init() */ ENTRY(cpu_mohawk_proc_init) ret lr /* * cpu_mohawk_proc_fin() */ ENTRY(cpu_mohawk_proc_fin) mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1800 @ ...iz........... bic r0, r0, #0x0006 @ .............ca. mcr p15, 0, r0, c1, c0, 0 @ disable caches ret lr /* * cpu_mohawk_reset(loc) * * Perform a soft reset of the system. Put the CPU into the * same state as it would be if it had been reset, and branch * to what would be the reset vector. * * loc: location to jump to for soft reset * * (same as arm926) */ .align 5 .pushsection .idmap.text, "ax" ENTRY(cpu_mohawk_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c10, 4 @ drain WB mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs mrc p15, 0, ip, c1, c0, 0 @ ctrl register bic ip, ip, #0x0007 @ .............cam bic ip, ip, #0x1100 @ ...i...s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register ret r0 ENDPROC(cpu_mohawk_reset) .popsection /* * cpu_mohawk_do_idle() * * Called with IRQs disabled */ .align 5 ENTRY(cpu_mohawk_do_idle) mov r0, #0 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer mcr p15, 0, r0, c7, c0, 4 @ wait for interrupt ret lr /* * flush_icache_all() * * Unconditionally clean and invalidate the entire icache. */ ENTRY(mohawk_flush_icache_all) mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache ret lr ENDPROC(mohawk_flush_icache_all) /* * flush_user_cache_all() * * Clean and invalidate all cache entries in a particular * address space. */ ENTRY(mohawk_flush_user_cache_all) /* FALLTHROUGH */ /* * flush_kern_cache_all() * * Clean and invalidate the entire cache. */ ENTRY(mohawk_flush_kern_cache_all) mov r2, #VM_EXEC mov ip, #0 __flush_whole_cache: mcr p15, 0, ip, c7, c14, 0 @ clean & invalidate all D cache tst r2, #VM_EXEC mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache mcrne p15, 0, ip, c7, c10, 0 @ drain write buffer ret lr /* * flush_user_cache_range(start, end, flags) * * Clean and invalidate a range of cache entries in the * specified address range. * * - start - start address (inclusive) * - end - end address (exclusive) * - flags - vm_flags describing address space * * (same as arm926) */ ENTRY(mohawk_flush_user_cache_range) mov ip, #0 sub r3, r1, r0 @ calculate total size cmp r3, #CACHE_DLIMIT bgt __flush_whole_cache 1: tst r2, #VM_EXEC mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b tst r2, #VM_EXEC mcrne p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * coherent_kern_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start, end. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(mohawk_coherent_kern_range) /* FALLTHROUGH */ /* * coherent_user_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start, end. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address * * (same as arm926) */ ENTRY(mohawk_coherent_user_range) bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB mov r0, #0 ret lr /* * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * * - addr - kernel address * - size - region size */ ENTRY(mohawk_flush_kern_dcache_area) add r1, r0, r1 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_inv_range(start, end) * * Invalidate (discard) the specified virtual address range. * May not write back any entries. If 'start' or 'end' * are not cache line aligned, those lines must be written * back. * * - start - virtual start address * - end - virtual end address * * (same as v4wb) */ mohawk_dma_inv_range: tst r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry tst r1, #CACHE_DLINESIZE - 1 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_clean_range(start, end) * * Clean the specified virtual address range. * * - start - virtual start address * - end - virtual end address * * (same as v4wb) */ mohawk_dma_clean_range: bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_flush_range(start, end) * * Clean and invalidate the specified virtual address range. * * - start - virtual start address * - end - virtual end address */ ENTRY(mohawk_dma_flush_range) bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_map_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(mohawk_dma_map_area) add r1, r1, r0 cmp r2, #DMA_TO_DEVICE beq mohawk_dma_clean_range bcs mohawk_dma_inv_range b mohawk_dma_flush_range ENDPROC(mohawk_dma_map_area) /* * dma_unmap_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(mohawk_dma_unmap_area) ret lr ENDPROC(mohawk_dma_unmap_area) .globl mohawk_flush_kern_cache_louis .equ mohawk_flush_kern_cache_louis, mohawk_flush_kern_cache_all @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) define_cache_functions mohawk ENTRY(cpu_mohawk_dcache_clean_area) 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE subs r1, r1, #CACHE_DLINESIZE bhi 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * cpu_mohawk_switch_mm(pgd) * * Set the translation base pointer to be as described by pgd. * * pgd: new page tables */ .align 5 ENTRY(cpu_mohawk_switch_mm) mov ip, #0 mcr p15, 0, ip, c7, c14, 0 @ clean & invalidate all D cache mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache mcr p15, 0, ip, c7, c10, 4 @ drain WB orr r0, r0, #0x18 @ cache the page table in L2 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs ret lr /* * cpu_mohawk_set_pte_ext(ptep, pte, ext) * * Set a PTE and flush it out */ .align 5 ENTRY(cpu_mohawk_set_pte_ext) #ifdef CONFIG_MMU armv3_set_pte_ext mov r0, r0 mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr #endif .globl cpu_mohawk_suspend_size .equ cpu_mohawk_suspend_size, 4 * 6 #ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_mohawk_do_suspend) stmfd sp!, {r4 - r9, lr} mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode mrc p15, 0, r5, c15, c1, 0 @ CP access reg mrc p15, 0, r6, c13, c0, 0 @ PID mrc p15, 0, r7, c3, c0, 0 @ domain ID mrc p15, 0, r8, c1, c0, 1 @ auxiliary control reg mrc p15, 0, r9, c1, c0, 0 @ control reg bic r4, r4, #2 @ clear frequency change bit stmia r0, {r4 - r9} @ store cp regs ldmia sp!, {r4 - r9, pc} ENDPROC(cpu_mohawk_do_suspend) ENTRY(cpu_mohawk_do_resume) ldmia r0, {r4 - r9} @ load cp regs mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB mcr p15, 0, ip, c7, c10, 4 @ drain write (&fill) buffer mcr p15, 0, ip, c7, c5, 4 @ flush prefetch buffer mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs mcr p14, 0, r4, c6, c0, 0 @ clock configuration, turbo mode. mcr p15, 0, r5, c15, c1, 0 @ CP access reg mcr p15, 0, r6, c13, c0, 0 @ PID mcr p15, 0, r7, c3, c0, 0 @ domain ID orr r1, r1, #0x18 @ cache the page table in L2 mcr p15, 0, r1, c2, c0, 0 @ translation table base addr mcr p15, 0, r8, c1, c0, 1 @ auxiliary control reg mov r0, r9 @ control register b cpu_resume_mmu ENDPROC(cpu_mohawk_do_resume) #endif .type __mohawk_setup, #function __mohawk_setup: mov r0, #0 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches mcr p15, 0, r0, c7, c10, 4 @ drain write buffer mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs orr r4, r4, #0x18 @ cache the page table in L2 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer mov r0, #0 @ don't allow CP access mcr p15, 0, r0, c15, c1, 0 @ write CP access register adr r5, mohawk_crval ldmia r5, {r5, r6} mrc p15, 0, r0, c1, c0 @ get control register bic r0, r0, r5 orr r0, r0, r6 ret lr .size __mohawk_setup, . - __mohawk_setup /* * R * .RVI ZFRS BLDP WCAM * .011 1001 ..00 0101 * */ .type mohawk_crval, #object mohawk_crval: crval clear=0x00007f3f, mmuset=0x00003905, ucset=0x00001134 __INITDATA @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) define_processor_functions mohawk, dabort=v5t_early_abort, pabort=legacy_pabort .section ".rodata" string cpu_arch_name, "armv5te" string cpu_elf_name, "v5" string cpu_mohawk_name, "Marvell 88SV331x" .align .section ".proc.info.init", #alloc .type __88sv331x_proc_info,#object __88sv331x_proc_info: .long 0x56158000 @ Marvell 88SV331x (MOHAWK) .long 0xfffff000 .long PMD_TYPE_SECT | \ PMD_SECT_BUFFERABLE | \ PMD_SECT_CACHEABLE | \ PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ .long PMD_TYPE_SECT | \ PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ initfn __mohawk_setup, __88sv331x_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP .long cpu_mohawk_name .long mohawk_processor_functions .long v4wbi_tlb_fns .long v4wb_user_fns .long mohawk_cache_fns .size __88sv331x_proc_info, . - __88sv331x_proc_info
AirFortressIlikara/LS2K0300-linux-4.19
1,065
arch/arm/mm/abort-ev5tj.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <linux/linkage.h> #include <asm/assembler.h> #include "abort-macro.S" /* * Function: v5tj_early_abort * * Params : r2 = pt_regs * : r4 = aborted context pc * : r5 = aborted context psr * * Returns : r4 - r11, r13 preserved * * Purpose : obtain information about current aborted instruction. * Note: we read user space. This means we might cause a data * abort here if the I-TLB and D-TLB aren't seeing the same * picture. Unfortunately, this does happen. We live with it. */ .align 5 ENTRY(v5tj_early_abort) mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR tst r5, #PSR_J_BIT @ Java? bne do_DataAbort do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 ldreq r3, [r4] @ read aborted ARM instruction uaccess_disable ip @ disable userspace access teq_ldrd tmp=ip, insn=r3 @ insn was LDRD? beq do_DataAbort @ yes tst r3, #1 << 20 @ L = 0 -> write orreq r1, r1, #1 << 11 @ yes. b do_DataAbort
AirFortressIlikara/LS2K0300-linux-4.19
1,853
arch/arm/mm/tlb-fa.S
/* * linux/arch/arm/mm/tlb-fa.S * * Copyright (C) 2005 Faraday Corp. * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt> * * Based on tlb-v4wbi.S: * Copyright (C) 1997-2002 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * ARM architecture version 4, Faraday variation. * This assume an unified TLBs, with a write buffer, and branch target buffer (BTB) * * Processors: FA520 FA526 FA626 */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> #include <asm/asm-offsets.h> #include <asm/tlbflush.h> #include "proc-macros.S" /* * flush_user_tlb_range(start, end, mm) * * Invalidate a range of TLB entries in the specified address space. * * - start - range start address * - end - range end address * - mm - mm_struct describing address space */ .align 4 ENTRY(fa_flush_user_tlb_range) vma_vm_mm ip, r2 act_mm r3 @ get current->active_mm eors r3, ip, r3 @ == mm ? retne lr @ no, we dont do anything mov r3, #0 mcr p15, 0, r3, c7, c10, 4 @ drain WB bic r0, r0, #0x0ff bic r0, r0, #0xf00 1: mcr p15, 0, r0, c8, c7, 1 @ invalidate UTLB entry add r0, r0, #PAGE_SZ cmp r0, r1 blo 1b mcr p15, 0, r3, c7, c10, 4 @ data write barrier ret lr ENTRY(fa_flush_kern_tlb_range) mov r3, #0 mcr p15, 0, r3, c7, c10, 4 @ drain WB bic r0, r0, #0x0ff bic r0, r0, #0xf00 1: mcr p15, 0, r0, c8, c7, 1 @ invalidate UTLB entry add r0, r0, #PAGE_SZ cmp r0, r1 blo 1b mcr p15, 0, r3, c7, c10, 4 @ data write barrier mcr p15, 0, r3, c7, c5, 4 @ prefetch flush (isb) ret lr __INITDATA /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */ define_tlb_functions fa, fa_tlb_flags
AirFortressIlikara/LS2K0300-linux-4.19
4,764
arch/arm/mm/cache-v4wt.S
/* * linux/arch/arm/mm/cache-v4wt.S * * Copyright (C) 1997-2002 Russell king * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * ARMv4 write through cache operations support. * * We assume that the write buffer is not enabled. */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> #include <asm/page.h> #include "proc-macros.S" /* * The size of one data cache line. */ #define CACHE_DLINESIZE 32 /* * The number of data cache segments. */ #define CACHE_DSEGMENTS 8 /* * The number of lines in a cache segment. */ #define CACHE_DENTRIES 64 /* * This is the size at which it becomes more efficient to * clean the whole cache, rather than using the individual * cache line maintenance instructions. * * *** This needs benchmarking */ #define CACHE_DLIMIT 16384 /* * flush_icache_all() * * Unconditionally clean and invalidate the entire icache. */ ENTRY(v4wt_flush_icache_all) mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache ret lr ENDPROC(v4wt_flush_icache_all) /* * flush_user_cache_all() * * Invalidate all cache entries in a particular address * space. */ ENTRY(v4wt_flush_user_cache_all) /* FALLTHROUGH */ /* * flush_kern_cache_all() * * Clean and invalidate the entire cache. */ ENTRY(v4wt_flush_kern_cache_all) mov r2, #VM_EXEC mov ip, #0 __flush_whole_cache: tst r2, #VM_EXEC mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache ret lr /* * flush_user_cache_range(start, end, flags) * * Clean and invalidate a range of cache entries in the specified * address space. * * - start - start address (inclusive, page aligned) * - end - end address (exclusive, page aligned) * - flags - vma_area_struct flags describing address space */ ENTRY(v4wt_flush_user_cache_range) sub r3, r1, r0 @ calculate total size cmp r3, #CACHE_DLIMIT bhs __flush_whole_cache 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry tst r2, #VM_EXEC mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b ret lr /* * coherent_kern_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(v4wt_coherent_kern_range) /* FALLTRHOUGH */ /* * coherent_user_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(v4wt_coherent_user_range) bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mov r0, #0 ret lr /* * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * * - addr - kernel address * - size - region size */ ENTRY(v4wt_flush_kern_dcache_area) mov r2, #0 mcr p15, 0, r2, c7, c5, 0 @ invalidate I cache add r1, r0, r1 /* fallthrough */ /* * dma_inv_range(start, end) * * Invalidate (discard) the specified virtual address range. * May not write back any entries. If 'start' or 'end' * are not cache line aligned, those lines must be written * back. * * - start - virtual start address * - end - virtual end address */ v4wt_dma_inv_range: bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b ret lr /* * dma_flush_range(start, end) * * Clean and invalidate the specified virtual address range. * * - start - virtual start address * - end - virtual end address */ .globl v4wt_dma_flush_range .equ v4wt_dma_flush_range, v4wt_dma_inv_range /* * dma_unmap_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(v4wt_dma_unmap_area) add r1, r1, r0 teq r2, #DMA_TO_DEVICE bne v4wt_dma_inv_range /* FALLTHROUGH */ /* * dma_map_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(v4wt_dma_map_area) ret lr ENDPROC(v4wt_dma_unmap_area) ENDPROC(v4wt_dma_map_area) .globl v4wt_flush_kern_cache_louis .equ v4wt_flush_kern_cache_louis, v4wt_flush_kern_cache_all __INITDATA @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) define_cache_functions v4wt
AirFortressIlikara/LS2K0300-linux-4.19
12,875
arch/arm/mm/proc-arm1020.S
/* * linux/arch/arm/mm/proc-arm1020.S: MMU functions for ARM1020 * * Copyright (C) 2000 ARM Limited * Copyright (C) 2000 Deep Blue Solutions Ltd. * hacked for non-paged-MM by Hyok S. Choi, 2003. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * * These are the low level assembler for performing cache and TLB * functions on the arm1020. */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> #include <asm/asm-offsets.h> #include <asm/hwcap.h> #include <asm/pgtable-hwdef.h> #include <asm/pgtable.h> #include <asm/ptrace.h> #include "proc-macros.S" /* * This is the maximum size of an area which will be invalidated * using the single invalidate entry instructions. Anything larger * than this, and we go for the whole cache. * * This value should be chosen such that we choose the cheapest * alternative. */ #define MAX_AREA_SIZE 32768 /* * The size of one data cache line. */ #define CACHE_DLINESIZE 32 /* * The number of data cache segments. */ #define CACHE_DSEGMENTS 16 /* * The number of lines in a cache segment. */ #define CACHE_DENTRIES 64 /* * This is the size at which it becomes more efficient to * clean the whole cache, rather than using the individual * cache line maintenance instructions. */ #define CACHE_DLIMIT 32768 .text /* * cpu_arm1020_proc_init() */ ENTRY(cpu_arm1020_proc_init) ret lr /* * cpu_arm1020_proc_fin() */ ENTRY(cpu_arm1020_proc_fin) mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches ret lr /* * cpu_arm1020_reset(loc) * * Perform a soft reset of the system. Put the CPU into the * same state as it would be if it had been reset, and branch * to what would be the reset vector. * * loc: location to jump to for soft reset */ .align 5 .pushsection .idmap.text, "ax" ENTRY(cpu_arm1020_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c10, 4 @ drain WB #ifdef CONFIG_MMU mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs #endif mrc p15, 0, ip, c1, c0, 0 @ ctrl register bic ip, ip, #0x000f @ ............wcam bic ip, ip, #0x1100 @ ...i...s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register ret r0 ENDPROC(cpu_arm1020_reset) .popsection /* * cpu_arm1020_do_idle() */ .align 5 ENTRY(cpu_arm1020_do_idle) mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt ret lr /* ================================= CACHE ================================ */ .align 5 /* * flush_icache_all() * * Unconditionally clean and invalidate the entire icache. */ ENTRY(arm1020_flush_icache_all) #ifndef CONFIG_CPU_ICACHE_DISABLE mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache #endif ret lr ENDPROC(arm1020_flush_icache_all) /* * flush_user_cache_all() * * Invalidate all cache entries in a particular address * space. */ ENTRY(arm1020_flush_user_cache_all) /* FALLTHROUGH */ /* * flush_kern_cache_all() * * Clean and invalidate the entire cache. */ ENTRY(arm1020_flush_kern_cache_all) mov r2, #VM_EXEC mov ip, #0 __flush_whole_cache: #ifndef CONFIG_CPU_DCACHE_DISABLE mcr p15, 0, ip, c7, c10, 4 @ drain WB mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 2: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index mcr p15, 0, ip, c7, c10, 4 @ drain WB subs r3, r3, #1 << 26 bcs 2b @ entries 63 to 0 subs r1, r1, #1 << 5 bcs 1b @ segments 15 to 0 #endif tst r2, #VM_EXEC #ifndef CONFIG_CPU_ICACHE_DISABLE mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache #endif mcrne p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * flush_user_cache_range(start, end, flags) * * Invalidate a range of cache entries in the specified * address space. * * - start - start address (inclusive) * - end - end address (exclusive) * - flags - vm_flags for this space */ ENTRY(arm1020_flush_user_cache_range) mov ip, #0 sub r3, r1, r0 @ calculate total size cmp r3, #CACHE_DLIMIT bhs __flush_whole_cache #ifndef CONFIG_CPU_DCACHE_DISABLE mcr p15, 0, ip, c7, c10, 4 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry mcr p15, 0, ip, c7, c10, 4 @ drain WB add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b #endif tst r2, #VM_EXEC #ifndef CONFIG_CPU_ICACHE_DISABLE mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache #endif mcrne p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * coherent_kern_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm1020_coherent_kern_range) /* FALLTRHOUGH */ /* * coherent_user_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm1020_coherent_user_range) mov ip, #0 bic r0, r0, #CACHE_DLINESIZE - 1 mcr p15, 0, ip, c7, c10, 4 1: #ifndef CONFIG_CPU_DCACHE_DISABLE mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcr p15, 0, ip, c7, c10, 4 @ drain WB #endif #ifndef CONFIG_CPU_ICACHE_DISABLE mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry #endif add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, ip, c7, c10, 4 @ drain WB mov r0, #0 ret lr /* * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * * - addr - kernel address * - size - region size */ ENTRY(arm1020_flush_kern_dcache_area) mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE add r1, r0, r1 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry mcr p15, 0, ip, c7, c10, 4 @ drain WB add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b #endif mcr p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * dma_inv_range(start, end) * * Invalidate (discard) the specified virtual address range. * May not write back any entries. If 'start' or 'end' * are not cache line aligned, those lines must be written * back. * * - start - virtual start address * - end - virtual end address * * (same as v4wb) */ arm1020_dma_inv_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE tst r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, ip, c7, c10, 4 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry mcrne p15, 0, ip, c7, c10, 4 @ drain WB tst r1, #CACHE_DLINESIZE - 1 mcrne p15, 0, ip, c7, c10, 4 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry mcrne p15, 0, ip, c7, c10, 4 @ drain WB 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b #endif mcr p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * dma_clean_range(start, end) * * Clean the specified virtual address range. * * - start - virtual start address * - end - virtual end address * * (same as v4wb) */ arm1020_dma_clean_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcr p15, 0, ip, c7, c10, 4 @ drain WB add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b #endif mcr p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * dma_flush_range(start, end) * * Clean and invalidate the specified virtual address range. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm1020_dma_flush_range) mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE bic r0, r0, #CACHE_DLINESIZE - 1 mcr p15, 0, ip, c7, c10, 4 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry mcr p15, 0, ip, c7, c10, 4 @ drain WB add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b #endif mcr p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * dma_map_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(arm1020_dma_map_area) add r1, r1, r0 cmp r2, #DMA_TO_DEVICE beq arm1020_dma_clean_range bcs arm1020_dma_inv_range b arm1020_dma_flush_range ENDPROC(arm1020_dma_map_area) /* * dma_unmap_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(arm1020_dma_unmap_area) ret lr ENDPROC(arm1020_dma_unmap_area) .globl arm1020_flush_kern_cache_louis .equ arm1020_flush_kern_cache_louis, arm1020_flush_kern_cache_all @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) define_cache_functions arm1020 .align 5 ENTRY(cpu_arm1020_dcache_clean_area) #ifndef CONFIG_CPU_DCACHE_DISABLE mov ip, #0 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcr p15, 0, ip, c7, c10, 4 @ drain WB add r0, r0, #CACHE_DLINESIZE subs r1, r1, #CACHE_DLINESIZE bhi 1b #endif ret lr /* =============================== PageTable ============================== */ /* * cpu_arm1020_switch_mm(pgd) * * Set the translation base pointer to be as described by pgd. * * pgd: new page tables */ .align 5 ENTRY(cpu_arm1020_switch_mm) #ifdef CONFIG_MMU #ifndef CONFIG_CPU_DCACHE_DISABLE mcr p15, 0, r3, c7, c10, 4 mov r1, #0xF @ 16 segments 1: mov r3, #0x3F @ 64 entries 2: mov ip, r3, LSL #26 @ shift up entry orr ip, ip, r1, LSL #5 @ shift in/up index mcr p15, 0, ip, c7, c14, 2 @ Clean & Inval DCache entry mov ip, #0 mcr p15, 0, ip, c7, c10, 4 subs r3, r3, #1 cmp r3, #0 bge 2b @ entries 3F to 0 subs r1, r1, #1 cmp r1, #0 bge 1b @ segments 15 to 0 #endif mov r1, #0 #ifndef CONFIG_CPU_ICACHE_DISABLE mcr p15, 0, r1, c7, c5, 0 @ invalidate I cache #endif mcr p15, 0, r1, c7, c10, 4 @ drain WB mcr p15, 0, r0, c2, c0, 0 @ load page table pointer mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs #endif /* CONFIG_MMU */ ret lr /* * cpu_arm1020_set_pte(ptep, pte) * * Set a PTE and flush it out */ .align 5 ENTRY(cpu_arm1020_set_pte_ext) #ifdef CONFIG_MMU armv3_set_pte_ext mov r0, r0 #ifndef CONFIG_CPU_DCACHE_DISABLE mcr p15, 0, r0, c7, c10, 4 mcr p15, 0, r0, c7, c10, 1 @ clean D entry #endif mcr p15, 0, r0, c7, c10, 4 @ drain WB #endif /* CONFIG_MMU */ ret lr .type __arm1020_setup, #function __arm1020_setup: mov r0, #0 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 #ifdef CONFIG_MMU mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 #endif adr r5, arm1020_crval ldmia r5, {r5, r6} mrc p15, 0, r0, c1, c0 @ get control register v4 bic r0, r0, r5 orr r0, r0, r6 #ifdef CONFIG_CPU_CACHE_ROUND_ROBIN orr r0, r0, #0x4000 @ .R.. .... .... .... #endif ret lr .size __arm1020_setup, . - __arm1020_setup /* * R * .RVI ZFRS BLDP WCAM * .011 1001 ..11 0101 */ .type arm1020_crval, #object arm1020_crval: crval clear=0x0000593f, mmuset=0x00003935, ucset=0x00001930 __INITDATA @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) define_processor_functions arm1020, dabort=v4t_early_abort, pabort=legacy_pabort .section ".rodata" string cpu_arch_name, "armv5t" string cpu_elf_name, "v5" .type cpu_arm1020_name, #object cpu_arm1020_name: .ascii "ARM1020" #ifndef CONFIG_CPU_ICACHE_DISABLE .ascii "i" #endif #ifndef CONFIG_CPU_DCACHE_DISABLE .ascii "d" #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH .ascii "(wt)" #else .ascii "(wb)" #endif #endif #ifndef CONFIG_CPU_BPREDICT_DISABLE .ascii "B" #endif #ifdef CONFIG_CPU_CACHE_ROUND_ROBIN .ascii "RR" #endif .ascii "\0" .size cpu_arm1020_name, . - cpu_arm1020_name .align .section ".proc.info.init", #alloc .type __arm1020_proc_info,#object __arm1020_proc_info: .long 0x4104a200 @ ARM 1020T (Architecture v5T) .long 0xff0ffff0 .long PMD_TYPE_SECT | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ .long PMD_TYPE_SECT | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ initfn __arm1020_setup, __arm1020_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB .long cpu_arm1020_name .long arm1020_processor_functions .long v4wbi_tlb_fns .long v4wb_user_fns .long arm1020_cache_fns .size __arm1020_proc_info, . - __arm1020_proc_info
AirFortressIlikara/LS2K0300-linux-4.19
13,107
arch/arm/mm/cache-v7.S
/* * linux/arch/arm/mm/cache-v7.S * * Copyright (C) 2001 Deep Blue Solutions Ltd. * Copyright (C) 2005 ARM Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This is the "shell" of the ARMv7 processor support. */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> #include <asm/errno.h> #include <asm/unwind.h> #include <asm/hardware/cache-b15-rac.h> #include "proc-macros.S" /* * The secondary kernel init calls v7_flush_dcache_all before it enables * the L1; however, the L1 comes out of reset in an undefined state, so * the clean + invalidate performed by v7_flush_dcache_all causes a bunch * of cache lines with uninitialized data and uninitialized tags to get * written out to memory, which does really unpleasant things to the main * processor. We fix this by performing an invalidate, rather than a * clean + invalidate, before jumping into the kernel. * * This function is cloned from arch/arm/mach-tegra/headsmp.S, and needs * to be called for both secondary cores startup and primary core resume * procedures. */ ENTRY(v7_invalidate_l1) mov r0, #0 mcr p15, 2, r0, c0, c0, 0 mrc p15, 1, r0, c0, c0, 0 movw r1, #0x7fff and r2, r1, r0, lsr #13 movw r1, #0x3ff and r3, r1, r0, lsr #3 @ NumWays - 1 add r2, r2, #1 @ NumSets and r0, r0, #0x7 add r0, r0, #4 @ SetShift clz r1, r3 @ WayShift add r4, r3, #1 @ NumWays 1: sub r2, r2, #1 @ NumSets-- mov r3, r4 @ Temp = NumWays 2: subs r3, r3, #1 @ Temp-- mov r5, r3, lsl r1 mov r6, r2, lsl r0 orr r5, r5, r6 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift) mcr p15, 0, r5, c7, c6, 2 bgt 2b cmp r2, #0 bgt 1b dsb st isb ret lr ENDPROC(v7_invalidate_l1) /* * v7_flush_icache_all() * * Flush the whole I-cache. * * Registers: * r0 - set to 0 */ ENTRY(v7_flush_icache_all) mov r0, #0 ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate ret lr ENDPROC(v7_flush_icache_all) /* * v7_flush_dcache_louis() * * Flush the D-cache up to the Level of Unification Inner Shareable * * Corrupted registers: r0-r7, r9-r11 (r6 only in Thumb mode) */ ENTRY(v7_flush_dcache_louis) dmb @ ensure ordering with previous memory accesses mrc p15, 1, r0, c0, c0, 1 @ read clidr, r0 = clidr ALT_SMP(mov r3, r0, lsr #20) @ move LoUIS into position ALT_UP( mov r3, r0, lsr #26) @ move LoUU into position ands r3, r3, #7 << 1 @ extract LoU*2 field from clidr bne start_flush_levels @ LoU != 0, start flushing #ifdef CONFIG_ARM_ERRATA_643719 ALT_SMP(mrc p15, 0, r2, c0, c0, 0) @ read main ID register ALT_UP( ret lr) @ LoUU is zero, so nothing to do movw r1, #:lower16:(0x410fc090 >> 4) @ ID of ARM Cortex A9 r0p? movt r1, #:upper16:(0x410fc090 >> 4) teq r1, r2, lsr #4 @ test for errata affected core and if so... moveq r3, #1 << 1 @ fix LoUIS value beq start_flush_levels @ start flushing cache levels #endif ret lr ENDPROC(v7_flush_dcache_louis) /* * v7_flush_dcache_all() * * Flush the whole D-cache. * * Corrupted registers: r0-r7, r9-r11 (r6 only in Thumb mode) * * - mm - mm_struct describing address space */ ENTRY(v7_flush_dcache_all) dmb @ ensure ordering with previous memory accesses mrc p15, 1, r0, c0, c0, 1 @ read clidr mov r3, r0, lsr #23 @ move LoC into position ands r3, r3, #7 << 1 @ extract LoC*2 from clidr beq finished @ if loc is 0, then no need to clean start_flush_levels: mov r10, #0 @ start clean at cache level 0 flush_levels: add r2, r10, r10, lsr #1 @ work out 3x current cache level mov r1, r0, lsr r2 @ extract cache type bits from clidr and r1, r1, #7 @ mask of the bits for current cache only cmp r1, #2 @ see what cache we have at this level blt skip @ skip if no cache, or just i-cache #ifdef CONFIG_PREEMPT save_and_disable_irqs_notrace r9 @ make cssr&csidr read atomic #endif mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr isb @ isb to sych the new cssr&csidr mrc p15, 1, r1, c0, c0, 0 @ read the new csidr #ifdef CONFIG_PREEMPT restore_irqs_notrace r9 #endif and r2, r1, #7 @ extract the length of the cache lines add r2, r2, #4 @ add 4 (line length offset) movw r4, #0x3ff ands r4, r4, r1, lsr #3 @ find maximum number on the way size clz r5, r4 @ find bit position of way size increment movw r7, #0x7fff ands r7, r7, r1, lsr #13 @ extract max number of the index size loop1: mov r9, r7 @ create working copy of max index loop2: ARM( orr r11, r10, r4, lsl r5 ) @ factor way and cache number into r11 THUMB( lsl r6, r4, r5 ) THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11 ARM( orr r11, r11, r9, lsl r2 ) @ factor index number into r11 THUMB( lsl r6, r9, r2 ) THUMB( orr r11, r11, r6 ) @ factor index number into r11 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way subs r9, r9, #1 @ decrement the index bge loop2 subs r4, r4, #1 @ decrement the way bge loop1 skip: add r10, r10, #2 @ increment cache number cmp r3, r10 bgt flush_levels finished: mov r10, #0 @ switch back to cache level 0 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr dsb st isb ret lr ENDPROC(v7_flush_dcache_all) /* * v7_flush_cache_all() * * Flush the entire cache system. * The data cache flush is now achieved using atomic clean / invalidates * working outwards from L1 cache. This is done using Set/Way based cache * maintenance instructions. * The instruction cache can still be invalidated back to the point of * unification in a single instruction. * */ ENTRY(v7_flush_kern_cache_all) ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} ) THUMB( stmfd sp!, {r4-r7, r9-r11, lr} ) bl v7_flush_dcache_all mov r0, #0 ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} ) THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} ) ret lr ENDPROC(v7_flush_kern_cache_all) /* * v7_flush_kern_cache_louis(void) * * Flush the data cache up to Level of Unification Inner Shareable. * Invalidate the I-cache to the point of unification. */ ENTRY(v7_flush_kern_cache_louis) ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} ) THUMB( stmfd sp!, {r4-r7, r9-r11, lr} ) bl v7_flush_dcache_louis mov r0, #0 ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} ) THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} ) ret lr ENDPROC(v7_flush_kern_cache_louis) /* * v7_flush_cache_all() * * Flush all TLB entries in a particular address space * * - mm - mm_struct describing address space */ ENTRY(v7_flush_user_cache_all) /*FALLTHROUGH*/ /* * v7_flush_cache_range(start, end, flags) * * Flush a range of TLB entries in the specified address space. * * - start - start address (may not be aligned) * - end - end address (exclusive, may not be aligned) * - flags - vm_area_struct flags describing address space * * It is assumed that: * - we have a VIPT cache. */ ENTRY(v7_flush_user_cache_range) ret lr ENDPROC(v7_flush_user_cache_all) ENDPROC(v7_flush_user_cache_range) /* * v7_coherent_kern_range(start,end) * * Ensure that the I and D caches are coherent within specified * region. This is typically used when code has been written to * a memory region, and will be executed. * * - start - virtual start address of region * - end - virtual end address of region * * It is assumed that: * - the Icache does not read data from the write buffer */ ENTRY(v7_coherent_kern_range) /* FALLTHROUGH */ /* * v7_coherent_user_range(start,end) * * Ensure that the I and D caches are coherent within specified * region. This is typically used when code has been written to * a memory region, and will be executed. * * - start - virtual start address of region * - end - virtual end address of region * * It is assumed that: * - the Icache does not read data from the write buffer */ ENTRY(v7_coherent_user_range) UNWIND(.fnstart ) dcache_line_size r2, r3 sub r3, r2, #1 bic r12, r0, r3 #ifdef CONFIG_ARM_ERRATA_764369 ALT_SMP(W(dsb)) ALT_UP(W(nop)) #endif 1: USER( mcr p15, 0, r12, c7, c11, 1 ) @ clean D line to the point of unification add r12, r12, r2 cmp r12, r1 blo 1b dsb ishst icache_line_size r2, r3 sub r3, r2, #1 bic r12, r0, r3 2: USER( mcr p15, 0, r12, c7, c5, 1 ) @ invalidate I line add r12, r12, r2 cmp r12, r1 blo 2b mov r0, #0 ALT_SMP(mcr p15, 0, r0, c7, c1, 6) @ invalidate BTB Inner Shareable ALT_UP(mcr p15, 0, r0, c7, c5, 6) @ invalidate BTB dsb ishst isb ret lr /* * Fault handling for the cache operation above. If the virtual address in r0 * isn't mapped, fail with -EFAULT. */ 9001: #ifdef CONFIG_ARM_ERRATA_775420 dsb #endif mov r0, #-EFAULT ret lr UNWIND(.fnend ) ENDPROC(v7_coherent_kern_range) ENDPROC(v7_coherent_user_range) /* * v7_flush_kern_dcache_area(void *addr, size_t size) * * Ensure that the data held in the page kaddr is written back * to the page in question. * * - addr - kernel address * - size - region size */ ENTRY(v7_flush_kern_dcache_area) dcache_line_size r2, r3 add r1, r0, r1 sub r3, r2, #1 bic r0, r0, r3 #ifdef CONFIG_ARM_ERRATA_764369 ALT_SMP(W(dsb)) ALT_UP(W(nop)) #endif 1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line add r0, r0, r2 cmp r0, r1 blo 1b dsb st ret lr ENDPROC(v7_flush_kern_dcache_area) /* * v7_dma_inv_range(start,end) * * Invalidate the data cache within the specified region; we will * be performing a DMA operation in this region and we want to * purge old data in the cache. * * - start - virtual start address of region * - end - virtual end address of region */ v7_dma_inv_range: dcache_line_size r2, r3 sub r3, r2, #1 tst r0, r3 bic r0, r0, r3 #ifdef CONFIG_ARM_ERRATA_764369 ALT_SMP(W(dsb)) ALT_UP(W(nop)) #endif mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line addne r0, r0, r2 tst r1, r3 bic r1, r1, r3 mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D / U line cmp r0, r1 1: mcrlo p15, 0, r0, c7, c6, 1 @ invalidate D / U line addlo r0, r0, r2 cmplo r0, r1 blo 1b dsb st ret lr ENDPROC(v7_dma_inv_range) /* * v7_dma_clean_range(start,end) * - start - virtual start address of region * - end - virtual end address of region */ v7_dma_clean_range: dcache_line_size r2, r3 sub r3, r2, #1 bic r0, r0, r3 #ifdef CONFIG_ARM_ERRATA_764369 ALT_SMP(W(dsb)) ALT_UP(W(nop)) #endif 1: mcr p15, 0, r0, c7, c10, 1 @ clean D / U line add r0, r0, r2 cmp r0, r1 blo 1b dsb st ret lr ENDPROC(v7_dma_clean_range) /* * v7_dma_flush_range(start,end) * - start - virtual start address of region * - end - virtual end address of region */ ENTRY(v7_dma_flush_range) dcache_line_size r2, r3 sub r3, r2, #1 bic r0, r0, r3 #ifdef CONFIG_ARM_ERRATA_764369 ALT_SMP(W(dsb)) ALT_UP(W(nop)) #endif 1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line add r0, r0, r2 cmp r0, r1 blo 1b dsb st ret lr ENDPROC(v7_dma_flush_range) /* * dma_map_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(v7_dma_map_area) add r1, r1, r0 teq r2, #DMA_FROM_DEVICE beq v7_dma_inv_range b v7_dma_clean_range ENDPROC(v7_dma_map_area) /* * dma_unmap_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(v7_dma_unmap_area) add r1, r1, r0 teq r2, #DMA_TO_DEVICE bne v7_dma_inv_range ret lr ENDPROC(v7_dma_unmap_area) __INITDATA @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) define_cache_functions v7 /* The Broadcom Brahma-B15 read-ahead cache requires some modifications * to the v7_cache_fns, we only override the ones we need */ #ifndef CONFIG_CACHE_B15_RAC globl_equ b15_flush_kern_cache_all, v7_flush_kern_cache_all #endif globl_equ b15_flush_icache_all, v7_flush_icache_all globl_equ b15_flush_kern_cache_louis, v7_flush_kern_cache_louis globl_equ b15_flush_user_cache_all, v7_flush_user_cache_all globl_equ b15_flush_user_cache_range, v7_flush_user_cache_range globl_equ b15_coherent_kern_range, v7_coherent_kern_range globl_equ b15_coherent_user_range, v7_coherent_user_range globl_equ b15_flush_kern_dcache_area, v7_flush_kern_dcache_area globl_equ b15_dma_map_area, v7_dma_map_area globl_equ b15_dma_unmap_area, v7_dma_unmap_area globl_equ b15_dma_flush_range, v7_dma_flush_range define_cache_functions b15
AirFortressIlikara/LS2K0300-linux-4.19
11,116
arch/arm/mm/proc-arm1026.S
/* * linux/arch/arm/mm/proc-arm1026.S: MMU functions for ARM1026EJ-S * * Copyright (C) 2000 ARM Limited * Copyright (C) 2000 Deep Blue Solutions Ltd. * hacked for non-paged-MM by Hyok S. Choi, 2003. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * * These are the low level assembler for performing cache and TLB * functions on the ARM1026EJ-S. */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> #include <asm/asm-offsets.h> #include <asm/hwcap.h> #include <asm/pgtable-hwdef.h> #include <asm/pgtable.h> #include <asm/ptrace.h> #include "proc-macros.S" /* * This is the maximum size of an area which will be invalidated * using the single invalidate entry instructions. Anything larger * than this, and we go for the whole cache. * * This value should be chosen such that we choose the cheapest * alternative. */ #define MAX_AREA_SIZE 32768 /* * The size of one data cache line. */ #define CACHE_DLINESIZE 32 /* * The number of data cache segments. */ #define CACHE_DSEGMENTS 16 /* * The number of lines in a cache segment. */ #define CACHE_DENTRIES 64 /* * This is the size at which it becomes more efficient to * clean the whole cache, rather than using the individual * cache line maintenance instructions. */ #define CACHE_DLIMIT 32768 .text /* * cpu_arm1026_proc_init() */ ENTRY(cpu_arm1026_proc_init) ret lr /* * cpu_arm1026_proc_fin() */ ENTRY(cpu_arm1026_proc_fin) mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches ret lr /* * cpu_arm1026_reset(loc) * * Perform a soft reset of the system. Put the CPU into the * same state as it would be if it had been reset, and branch * to what would be the reset vector. * * loc: location to jump to for soft reset */ .align 5 .pushsection .idmap.text, "ax" ENTRY(cpu_arm1026_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c10, 4 @ drain WB #ifdef CONFIG_MMU mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs #endif mrc p15, 0, ip, c1, c0, 0 @ ctrl register bic ip, ip, #0x000f @ ............wcam bic ip, ip, #0x1100 @ ...i...s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register ret r0 ENDPROC(cpu_arm1026_reset) .popsection /* * cpu_arm1026_do_idle() */ .align 5 ENTRY(cpu_arm1026_do_idle) mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt ret lr /* ================================= CACHE ================================ */ .align 5 /* * flush_icache_all() * * Unconditionally clean and invalidate the entire icache. */ ENTRY(arm1026_flush_icache_all) #ifndef CONFIG_CPU_ICACHE_DISABLE mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache #endif ret lr ENDPROC(arm1026_flush_icache_all) /* * flush_user_cache_all() * * Invalidate all cache entries in a particular address * space. */ ENTRY(arm1026_flush_user_cache_all) /* FALLTHROUGH */ /* * flush_kern_cache_all() * * Clean and invalidate the entire cache. */ ENTRY(arm1026_flush_kern_cache_all) mov r2, #VM_EXEC mov ip, #0 __flush_whole_cache: #ifndef CONFIG_CPU_DCACHE_DISABLE 1: mrc p15, 0, r15, c7, c14, 3 @ test, clean, invalidate bne 1b #endif tst r2, #VM_EXEC #ifndef CONFIG_CPU_ICACHE_DISABLE mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache #endif mcrne p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * flush_user_cache_range(start, end, flags) * * Invalidate a range of cache entries in the specified * address space. * * - start - start address (inclusive) * - end - end address (exclusive) * - flags - vm_flags for this space */ ENTRY(arm1026_flush_user_cache_range) mov ip, #0 sub r3, r1, r0 @ calculate total size cmp r3, #CACHE_DLIMIT bhs __flush_whole_cache #ifndef CONFIG_CPU_DCACHE_DISABLE 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b #endif tst r2, #VM_EXEC #ifndef CONFIG_CPU_ICACHE_DISABLE mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache #endif mcrne p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * coherent_kern_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm1026_coherent_kern_range) /* FALLTHROUGH */ /* * coherent_user_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm1026_coherent_user_range) mov ip, #0 bic r0, r0, #CACHE_DLINESIZE - 1 1: #ifndef CONFIG_CPU_DCACHE_DISABLE mcr p15, 0, r0, c7, c10, 1 @ clean D entry #endif #ifndef CONFIG_CPU_ICACHE_DISABLE mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry #endif add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, ip, c7, c10, 4 @ drain WB mov r0, #0 ret lr /* * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * * - addr - kernel address * - size - region size */ ENTRY(arm1026_flush_kern_dcache_area) mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE add r1, r0, r1 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b #endif mcr p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * dma_inv_range(start, end) * * Invalidate (discard) the specified virtual address range. * May not write back any entries. If 'start' or 'end' * are not cache line aligned, those lines must be written * back. * * - start - virtual start address * - end - virtual end address * * (same as v4wb) */ arm1026_dma_inv_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE tst r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry tst r1, #CACHE_DLINESIZE - 1 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b #endif mcr p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * dma_clean_range(start, end) * * Clean the specified virtual address range. * * - start - virtual start address * - end - virtual end address * * (same as v4wb) */ arm1026_dma_clean_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b #endif mcr p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * dma_flush_range(start, end) * * Clean and invalidate the specified virtual address range. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm1026_dma_flush_range) mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b #endif mcr p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * dma_map_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(arm1026_dma_map_area) add r1, r1, r0 cmp r2, #DMA_TO_DEVICE beq arm1026_dma_clean_range bcs arm1026_dma_inv_range b arm1026_dma_flush_range ENDPROC(arm1026_dma_map_area) /* * dma_unmap_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(arm1026_dma_unmap_area) ret lr ENDPROC(arm1026_dma_unmap_area) .globl arm1026_flush_kern_cache_louis .equ arm1026_flush_kern_cache_louis, arm1026_flush_kern_cache_all @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) define_cache_functions arm1026 .align 5 ENTRY(cpu_arm1026_dcache_clean_area) #ifndef CONFIG_CPU_DCACHE_DISABLE mov ip, #0 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE subs r1, r1, #CACHE_DLINESIZE bhi 1b #endif ret lr /* =============================== PageTable ============================== */ /* * cpu_arm1026_switch_mm(pgd) * * Set the translation base pointer to be as described by pgd. * * pgd: new page tables */ .align 5 ENTRY(cpu_arm1026_switch_mm) #ifdef CONFIG_MMU mov r1, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE 1: mrc p15, 0, r15, c7, c14, 3 @ test, clean, invalidate bne 1b #endif #ifndef CONFIG_CPU_ICACHE_DISABLE mcr p15, 0, r1, c7, c5, 0 @ invalidate I cache #endif mcr p15, 0, r1, c7, c10, 4 @ drain WB mcr p15, 0, r0, c2, c0, 0 @ load page table pointer mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs #endif ret lr /* * cpu_arm1026_set_pte_ext(ptep, pte, ext) * * Set a PTE and flush it out */ .align 5 ENTRY(cpu_arm1026_set_pte_ext) #ifdef CONFIG_MMU armv3_set_pte_ext mov r0, r0 #ifndef CONFIG_CPU_DCACHE_DISABLE mcr p15, 0, r0, c7, c10, 1 @ clean D entry #endif #endif /* CONFIG_MMU */ ret lr .type __arm1026_setup, #function __arm1026_setup: mov r0, #0 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 #ifdef CONFIG_MMU mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 mcr p15, 0, r4, c2, c0 @ load page table pointer #endif #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH mov r0, #4 @ explicitly disable writeback mcr p15, 7, r0, c15, c0, 0 #endif adr r5, arm1026_crval ldmia r5, {r5, r6} mrc p15, 0, r0, c1, c0 @ get control register v4 bic r0, r0, r5 orr r0, r0, r6 #ifdef CONFIG_CPU_CACHE_ROUND_ROBIN orr r0, r0, #0x4000 @ .R.. .... .... .... #endif ret lr .size __arm1026_setup, . - __arm1026_setup /* * R * .RVI ZFRS BLDP WCAM * .011 1001 ..11 0101 * */ .type arm1026_crval, #object arm1026_crval: crval clear=0x00007f3f, mmuset=0x00003935, ucset=0x00001934 __INITDATA @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) define_processor_functions arm1026, dabort=v5t_early_abort, pabort=legacy_pabort .section .rodata string cpu_arch_name, "armv5tej" string cpu_elf_name, "v5" .align string cpu_arm1026_name, "ARM1026EJ-S" .align .section ".proc.info.init", #alloc .type __arm1026_proc_info,#object __arm1026_proc_info: .long 0x4106a260 @ ARM 1026EJ-S (v5TEJ) .long 0xff0ffff0 .long PMD_TYPE_SECT | \ PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ .long PMD_TYPE_SECT | \ PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ initfn __arm1026_setup, __arm1026_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA .long cpu_arm1026_name .long arm1026_processor_functions .long v4wbi_tlb_fns .long v4wb_user_fns .long arm1026_cache_fns .size __arm1026_proc_info, . - __arm1026_proc_info
AirFortressIlikara/LS2K0300-linux-4.19
1,188
arch/arm/mm/abort-macro.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * The ARM LDRD and Thumb LDRSB instructions use bit 20/11 (ARM/Thumb) * differently than every other instruction, so it is set to 0 (write) * even though the instructions are read instructions. This means that * during an abort the instructions will be treated as a write and the * handler will raise a signal from unwriteable locations if they * fault. We have to specifically check for these instructions * from the abort handlers to treat them properly. * */ .macro do_thumb_abort, fsr, pc, psr, tmp tst \psr, #PSR_T_BIT beq not_thumb ldrh \tmp, [\pc] @ Read aborted Thumb instruction uaccess_disable ip @ disable userspace access and \tmp, \tmp, # 0xfe00 @ Mask opcode field cmp \tmp, # 0x5600 @ Is it ldrsb? orreq \tmp, \tmp, #1 << 11 @ Set L-bit if yes tst \tmp, #1 << 11 @ L = 0 -> write orreq \fsr, \fsr, #1 << 11 @ yes. b do_DataAbort not_thumb: .endm /* * We check for the following instruction encoding for LDRD. * * [27:25] == 000 * [7:4] == 1101 * [20] == 0 */ .macro teq_ldrd, tmp, insn mov \tmp, #0x0e100000 orr \tmp, #0x000000f0 and \tmp, \insn, \tmp teq \tmp, #0x000000d0 .endm
AirFortressIlikara/LS2K0300-linux-4.19
6,309
arch/arm/mm/cache-v4wb.S
/* * linux/arch/arm/mm/cache-v4wb.S * * Copyright (C) 1997-2002 Russell king * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> #include <asm/memory.h> #include <asm/page.h> #include "proc-macros.S" /* * The size of one data cache line. */ #define CACHE_DLINESIZE 32 /* * The total size of the data cache. */ #if defined(CONFIG_CPU_SA110) # define CACHE_DSIZE 16384 #elif defined(CONFIG_CPU_SA1100) # define CACHE_DSIZE 8192 #else # error Unknown cache size #endif /* * This is the size at which it becomes more efficient to * clean the whole cache, rather than using the individual * cache line maintenance instructions. * * Size Clean (ticks) Dirty (ticks) * 4096 21 20 21 53 55 54 * 8192 40 41 40 106 100 102 * 16384 77 77 76 140 140 138 * 32768 150 149 150 214 216 212 <--- * 65536 296 297 296 351 358 361 * 131072 591 591 591 656 657 651 * Whole 132 136 132 221 217 207 <--- */ #define CACHE_DLIMIT (CACHE_DSIZE * 4) .data .align 2 flush_base: .long FLUSH_BASE .text /* * flush_icache_all() * * Unconditionally clean and invalidate the entire icache. */ ENTRY(v4wb_flush_icache_all) mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache ret lr ENDPROC(v4wb_flush_icache_all) /* * flush_user_cache_all() * * Clean and invalidate all cache entries in a particular address * space. */ ENTRY(v4wb_flush_user_cache_all) /* FALLTHROUGH */ /* * flush_kern_cache_all() * * Clean and invalidate the entire cache. */ ENTRY(v4wb_flush_kern_cache_all) mov ip, #0 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache __flush_whole_cache: ldr r3, =flush_base ldr r1, [r3, #0] eor r1, r1, #CACHE_DSIZE str r1, [r3, #0] add r2, r1, #CACHE_DSIZE 1: ldr r3, [r1], #32 cmp r1, r2 blo 1b #ifdef FLUSH_BASE_MINICACHE add r2, r2, #FLUSH_BASE_MINICACHE - FLUSH_BASE sub r1, r2, #512 @ only 512 bytes 1: ldr r3, [r1], #32 cmp r1, r2 blo 1b #endif mcr p15, 0, ip, c7, c10, 4 @ drain write buffer ret lr /* * flush_user_cache_range(start, end, flags) * * Invalidate a range of cache entries in the specified * address space. * * - start - start address (inclusive, page aligned) * - end - end address (exclusive, page aligned) * - flags - vma_area_struct flags describing address space */ ENTRY(v4wb_flush_user_cache_range) mov ip, #0 sub r3, r1, r0 @ calculate total size tst r2, #VM_EXEC @ executable region? mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache cmp r3, #CACHE_DLIMIT @ total size >= limit? bhs __flush_whole_cache @ flush whole D cache 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b tst r2, #VM_EXEC mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer ret lr /* * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * * - addr - kernel address * - size - region size */ ENTRY(v4wb_flush_kern_dcache_area) add r1, r0, r1 /* fall through */ /* * coherent_kern_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(v4wb_coherent_kern_range) /* fall through */ /* * coherent_user_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(v4wb_coherent_user_range) bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_inv_range(start, end) * * Invalidate (discard) the specified virtual address range. * May not write back any entries. If 'start' or 'end' * are not cache line aligned, those lines must be written * back. * * - start - virtual start address * - end - virtual end address */ v4wb_dma_inv_range: tst r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry tst r1, #CACHE_DLINESIZE - 1 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain write buffer ret lr /* * dma_clean_range(start, end) * * Clean (write back) the specified virtual address range. * * - start - virtual start address * - end - virtual end address */ v4wb_dma_clean_range: bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain write buffer ret lr /* * dma_flush_range(start, end) * * Clean and invalidate the specified virtual address range. * * - start - virtual start address * - end - virtual end address * * This is actually the same as v4wb_coherent_kern_range() */ .globl v4wb_dma_flush_range .set v4wb_dma_flush_range, v4wb_coherent_kern_range /* * dma_map_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(v4wb_dma_map_area) add r1, r1, r0 cmp r2, #DMA_TO_DEVICE beq v4wb_dma_clean_range bcs v4wb_dma_inv_range b v4wb_dma_flush_range ENDPROC(v4wb_dma_map_area) /* * dma_unmap_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(v4wb_dma_unmap_area) ret lr ENDPROC(v4wb_dma_unmap_area) .globl v4wb_flush_kern_cache_louis .equ v4wb_flush_kern_cache_louis, v4wb_flush_kern_cache_all __INITDATA @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) define_cache_functions v4wb
AirFortressIlikara/LS2K0300-linux-4.19
9,255
arch/arm/mm/proc-arm940.S
/* * linux/arch/arm/mm/arm940.S: utility functions for ARM940T * * Copyright (C) 2004-2006 Hyok S. Choi (hyok.choi@samsung.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> #include <asm/hwcap.h> #include <asm/pgtable-hwdef.h> #include <asm/pgtable.h> #include <asm/ptrace.h> #include "proc-macros.S" /* ARM940T has a 4KB DCache comprising 256 lines of 4 words */ #define CACHE_DLINESIZE 16 #define CACHE_DSEGMENTS 4 #define CACHE_DENTRIES 64 .text /* * cpu_arm940_proc_init() * cpu_arm940_switch_mm() * * These are not required. */ ENTRY(cpu_arm940_proc_init) ENTRY(cpu_arm940_switch_mm) ret lr /* * cpu_arm940_proc_fin() */ ENTRY(cpu_arm940_proc_fin) mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x00001000 @ i-cache bic r0, r0, #0x00000004 @ d-cache mcr p15, 0, r0, c1, c0, 0 @ disable caches ret lr /* * cpu_arm940_reset(loc) * Params : r0 = address to jump to * Notes : This sets up everything for a reset */ .pushsection .idmap.text, "ax" ENTRY(cpu_arm940_reset) mov ip, #0 mcr p15, 0, ip, c7, c5, 0 @ flush I cache mcr p15, 0, ip, c7, c6, 0 @ flush D cache mcr p15, 0, ip, c7, c10, 4 @ drain WB mrc p15, 0, ip, c1, c0, 0 @ ctrl register bic ip, ip, #0x00000005 @ .............c.p bic ip, ip, #0x00001000 @ i-cache mcr p15, 0, ip, c1, c0, 0 @ ctrl register ret r0 ENDPROC(cpu_arm940_reset) .popsection /* * cpu_arm940_do_idle() */ .align 5 ENTRY(cpu_arm940_do_idle) mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt ret lr /* * flush_icache_all() * * Unconditionally clean and invalidate the entire icache. */ ENTRY(arm940_flush_icache_all) mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache ret lr ENDPROC(arm940_flush_icache_all) /* * flush_user_cache_all() */ ENTRY(arm940_flush_user_cache_all) /* FALLTHROUGH */ /* * flush_kern_cache_all() * * Clean and invalidate the entire cache. */ ENTRY(arm940_flush_kern_cache_all) mov r2, #VM_EXEC /* FALLTHROUGH */ /* * flush_user_cache_range(start, end, flags) * * There is no efficient way to flush a range of cache entries * in the specified address range. Thus, flushes all. * * - start - start address (inclusive) * - end - end address (exclusive) * - flags - vm_flags describing address space */ ENTRY(arm940_flush_user_cache_range) mov ip, #0 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH mcr p15, 0, ip, c7, c6, 0 @ flush D cache #else mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 2: mcr p15, 0, r3, c7, c14, 2 @ clean/flush D index subs r3, r3, #1 << 26 bcs 2b @ entries 63 to 0 subs r1, r1, #1 << 4 bcs 1b @ segments 3 to 0 #endif tst r2, #VM_EXEC mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache mcrne p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * coherent_kern_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start, end. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm940_coherent_kern_range) /* FALLTHROUGH */ /* * coherent_user_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start, end. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm940_coherent_user_range) /* FALLTHROUGH */ /* * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * * - addr - kernel address * - size - region size */ ENTRY(arm940_flush_kern_dcache_area) mov r0, #0 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 2: mcr p15, 0, r3, c7, c14, 2 @ clean/flush D index subs r3, r3, #1 << 26 bcs 2b @ entries 63 to 0 subs r1, r1, #1 << 4 bcs 1b @ segments 7 to 0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_inv_range(start, end) * * There is no efficient way to invalidate a specifid virtual * address range. Thus, invalidates all. * * - start - virtual start address * - end - virtual end address */ arm940_dma_inv_range: mov ip, #0 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 2: mcr p15, 0, r3, c7, c6, 2 @ flush D entry subs r3, r3, #1 << 26 bcs 2b @ entries 63 to 0 subs r1, r1, #1 << 4 bcs 1b @ segments 7 to 0 mcr p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * dma_clean_range(start, end) * * There is no efficient way to clean a specifid virtual * address range. Thus, cleans all. * * - start - virtual start address * - end - virtual end address */ arm940_dma_clean_range: ENTRY(cpu_arm940_dcache_clean_area) mov ip, #0 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 2: mcr p15, 0, r3, c7, c10, 2 @ clean D entry subs r3, r3, #1 << 26 bcs 2b @ entries 63 to 0 subs r1, r1, #1 << 4 bcs 1b @ segments 7 to 0 #endif mcr p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * dma_flush_range(start, end) * * There is no efficient way to clean and invalidate a specifid * virtual address range. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm940_dma_flush_range) mov ip, #0 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 2: #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH mcr p15, 0, r3, c7, c14, 2 @ clean/flush D entry #else mcr p15, 0, r3, c7, c6, 2 @ invalidate D entry #endif subs r3, r3, #1 << 26 bcs 2b @ entries 63 to 0 subs r1, r1, #1 << 4 bcs 1b @ segments 7 to 0 mcr p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * dma_map_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(arm940_dma_map_area) add r1, r1, r0 cmp r2, #DMA_TO_DEVICE beq arm940_dma_clean_range bcs arm940_dma_inv_range b arm940_dma_flush_range ENDPROC(arm940_dma_map_area) /* * dma_unmap_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(arm940_dma_unmap_area) ret lr ENDPROC(arm940_dma_unmap_area) .globl arm940_flush_kern_cache_louis .equ arm940_flush_kern_cache_louis, arm940_flush_kern_cache_all @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) define_cache_functions arm940 .type __arm940_setup, #function __arm940_setup: mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c6, 0 @ invalidate D cache mcr p15, 0, r0, c7, c10, 4 @ drain WB mcr p15, 0, r0, c6, c3, 0 @ disable data area 3~7 mcr p15, 0, r0, c6, c4, 0 mcr p15, 0, r0, c6, c5, 0 mcr p15, 0, r0, c6, c6, 0 mcr p15, 0, r0, c6, c7, 0 mcr p15, 0, r0, c6, c3, 1 @ disable instruction area 3~7 mcr p15, 0, r0, c6, c4, 1 mcr p15, 0, r0, c6, c5, 1 mcr p15, 0, r0, c6, c6, 1 mcr p15, 0, r0, c6, c7, 1 mov r0, #0x0000003F @ base = 0, size = 4GB mcr p15, 0, r0, c6, c0, 0 @ set area 0, default mcr p15, 0, r0, c6, c0, 1 ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM ldr r7, =CONFIG_DRAM_SIZE >> 12 @ size of RAM (must be >= 4KB) pr_val r3, r0, r7, #1 mcr p15, 0, r3, c6, c1, 0 @ set area 1, RAM mcr p15, 0, r3, c6, c1, 1 ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH ldr r7, =CONFIG_FLASH_SIZE @ size of FLASH (must be >= 4KB) pr_val r3, r0, r6, #1 mcr p15, 0, r3, c6, c2, 0 @ set area 2, ROM/FLASH mcr p15, 0, r3, c6, c2, 1 mov r0, #0x06 mcr p15, 0, r0, c2, c0, 0 @ Region 1&2 cacheable mcr p15, 0, r0, c2, c0, 1 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH mov r0, #0x00 @ disable whole write buffer #else mov r0, #0x02 @ Region 1 write bufferred #endif mcr p15, 0, r0, c3, c0, 0 mov r0, #0x10000 sub r0, r0, #1 @ r0 = 0xffff mcr p15, 0, r0, c5, c0, 0 @ all read/write access mcr p15, 0, r0, c5, c0, 1 mrc p15, 0, r0, c1, c0 @ get control register orr r0, r0, #0x00001000 @ I-cache orr r0, r0, #0x00000005 @ MPU/D-cache ret lr .size __arm940_setup, . - __arm940_setup __INITDATA @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) define_processor_functions arm940, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1 .section ".rodata" string cpu_arch_name, "armv4t" string cpu_elf_name, "v4" string cpu_arm940_name, "ARM940T" .align .section ".proc.info.init", #alloc .type __arm940_proc_info,#object __arm940_proc_info: .long 0x41009400 .long 0xff00fff0 .long 0 initfn __arm940_setup, __arm940_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB .long cpu_arm940_name .long arm940_processor_functions .long 0 .long 0 .long arm940_cache_fns .size __arm940_proc_info, . - __arm940_proc_info
AirFortressIlikara/LS2K0300-linux-4.19
1,932
arch/arm/mm/tlb-v4wb.S
/* * linux/arch/arm/mm/tlbv4wb.S * * Copyright (C) 1997-2002 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * ARM architecture version 4 TLB handling functions. * These assume a split I/D TLBs w/o I TLB entry, with a write buffer. * * Processors: SA110 SA1100 SA1110 */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> #include <asm/asm-offsets.h> #include <asm/tlbflush.h> #include "proc-macros.S" .align 5 /* * v4wb_flush_user_tlb_range(start, end, mm) * * Invalidate a range of TLB entries in the specified address space. * * - start - range start address * - end - range end address * - mm - mm_struct describing address space */ .align 5 ENTRY(v4wb_flush_user_tlb_range) vma_vm_mm ip, r2 act_mm r3 @ get current->active_mm eors r3, ip, r3 @ == mm ? retne lr @ no, we dont do anything vma_vm_flags r2, r2 mcr p15, 0, r3, c7, c10, 4 @ drain WB tst r2, #VM_EXEC mcrne p15, 0, r3, c8, c5, 0 @ invalidate I TLB bic r0, r0, #0x0ff bic r0, r0, #0xf00 1: mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry add r0, r0, #PAGE_SZ cmp r0, r1 blo 1b ret lr /* * v4_flush_kern_tlb_range(start, end) * * Invalidate a range of TLB entries in the specified kernel * address range. * * - start - virtual address (may not be aligned) * - end - virtual address (may not be aligned) */ ENTRY(v4wb_flush_kern_tlb_range) mov r3, #0 mcr p15, 0, r3, c7, c10, 4 @ drain WB bic r0, r0, #0x0ff bic r0, r0, #0xf00 mcr p15, 0, r3, c8, c5, 0 @ invalidate I TLB 1: mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry add r0, r0, #PAGE_SZ cmp r0, r1 blo 1b ret lr __INITDATA /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */ define_tlb_functions v4wb, v4wb_tlb_flags
AirFortressIlikara/LS2K0300-linux-4.19
11,309
arch/arm/mm/proc-arm1022.S
/* * linux/arch/arm/mm/proc-arm1022.S: MMU functions for ARM1022E * * Copyright (C) 2000 ARM Limited * Copyright (C) 2000 Deep Blue Solutions Ltd. * hacked for non-paged-MM by Hyok S. Choi, 2003. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * * These are the low level assembler for performing cache and TLB * functions on the ARM1022E. */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> #include <asm/asm-offsets.h> #include <asm/hwcap.h> #include <asm/pgtable-hwdef.h> #include <asm/pgtable.h> #include <asm/ptrace.h> #include "proc-macros.S" /* * This is the maximum size of an area which will be invalidated * using the single invalidate entry instructions. Anything larger * than this, and we go for the whole cache. * * This value should be chosen such that we choose the cheapest * alternative. */ #define MAX_AREA_SIZE 32768 /* * The size of one data cache line. */ #define CACHE_DLINESIZE 32 /* * The number of data cache segments. */ #define CACHE_DSEGMENTS 16 /* * The number of lines in a cache segment. */ #define CACHE_DENTRIES 64 /* * This is the size at which it becomes more efficient to * clean the whole cache, rather than using the individual * cache line maintenance instructions. */ #define CACHE_DLIMIT 32768 .text /* * cpu_arm1022_proc_init() */ ENTRY(cpu_arm1022_proc_init) ret lr /* * cpu_arm1022_proc_fin() */ ENTRY(cpu_arm1022_proc_fin) mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches ret lr /* * cpu_arm1022_reset(loc) * * Perform a soft reset of the system. Put the CPU into the * same state as it would be if it had been reset, and branch * to what would be the reset vector. * * loc: location to jump to for soft reset */ .align 5 .pushsection .idmap.text, "ax" ENTRY(cpu_arm1022_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c10, 4 @ drain WB #ifdef CONFIG_MMU mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs #endif mrc p15, 0, ip, c1, c0, 0 @ ctrl register bic ip, ip, #0x000f @ ............wcam bic ip, ip, #0x1100 @ ...i...s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register ret r0 ENDPROC(cpu_arm1022_reset) .popsection /* * cpu_arm1022_do_idle() */ .align 5 ENTRY(cpu_arm1022_do_idle) mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt ret lr /* ================================= CACHE ================================ */ .align 5 /* * flush_icache_all() * * Unconditionally clean and invalidate the entire icache. */ ENTRY(arm1022_flush_icache_all) #ifndef CONFIG_CPU_ICACHE_DISABLE mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache #endif ret lr ENDPROC(arm1022_flush_icache_all) /* * flush_user_cache_all() * * Invalidate all cache entries in a particular address * space. */ ENTRY(arm1022_flush_user_cache_all) /* FALLTHROUGH */ /* * flush_kern_cache_all() * * Clean and invalidate the entire cache. */ ENTRY(arm1022_flush_kern_cache_all) mov r2, #VM_EXEC mov ip, #0 __flush_whole_cache: #ifndef CONFIG_CPU_DCACHE_DISABLE mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 2: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index subs r3, r3, #1 << 26 bcs 2b @ entries 63 to 0 subs r1, r1, #1 << 5 bcs 1b @ segments 15 to 0 #endif tst r2, #VM_EXEC #ifndef CONFIG_CPU_ICACHE_DISABLE mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache #endif mcrne p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * flush_user_cache_range(start, end, flags) * * Invalidate a range of cache entries in the specified * address space. * * - start - start address (inclusive) * - end - end address (exclusive) * - flags - vm_flags for this space */ ENTRY(arm1022_flush_user_cache_range) mov ip, #0 sub r3, r1, r0 @ calculate total size cmp r3, #CACHE_DLIMIT bhs __flush_whole_cache #ifndef CONFIG_CPU_DCACHE_DISABLE 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b #endif tst r2, #VM_EXEC #ifndef CONFIG_CPU_ICACHE_DISABLE mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache #endif mcrne p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * coherent_kern_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm1022_coherent_kern_range) /* FALLTHROUGH */ /* * coherent_user_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm1022_coherent_user_range) mov ip, #0 bic r0, r0, #CACHE_DLINESIZE - 1 1: #ifndef CONFIG_CPU_DCACHE_DISABLE mcr p15, 0, r0, c7, c10, 1 @ clean D entry #endif #ifndef CONFIG_CPU_ICACHE_DISABLE mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry #endif add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, ip, c7, c10, 4 @ drain WB mov r0, #0 ret lr /* * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * * - addr - kernel address * - size - region size */ ENTRY(arm1022_flush_kern_dcache_area) mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE add r1, r0, r1 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b #endif mcr p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * dma_inv_range(start, end) * * Invalidate (discard) the specified virtual address range. * May not write back any entries. If 'start' or 'end' * are not cache line aligned, those lines must be written * back. * * - start - virtual start address * - end - virtual end address * * (same as v4wb) */ arm1022_dma_inv_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE tst r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry tst r1, #CACHE_DLINESIZE - 1 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b #endif mcr p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * dma_clean_range(start, end) * * Clean the specified virtual address range. * * - start - virtual start address * - end - virtual end address * * (same as v4wb) */ arm1022_dma_clean_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b #endif mcr p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * dma_flush_range(start, end) * * Clean and invalidate the specified virtual address range. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm1022_dma_flush_range) mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b #endif mcr p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * dma_map_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(arm1022_dma_map_area) add r1, r1, r0 cmp r2, #DMA_TO_DEVICE beq arm1022_dma_clean_range bcs arm1022_dma_inv_range b arm1022_dma_flush_range ENDPROC(arm1022_dma_map_area) /* * dma_unmap_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(arm1022_dma_unmap_area) ret lr ENDPROC(arm1022_dma_unmap_area) .globl arm1022_flush_kern_cache_louis .equ arm1022_flush_kern_cache_louis, arm1022_flush_kern_cache_all @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) define_cache_functions arm1022 .align 5 ENTRY(cpu_arm1022_dcache_clean_area) #ifndef CONFIG_CPU_DCACHE_DISABLE mov ip, #0 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE subs r1, r1, #CACHE_DLINESIZE bhi 1b #endif ret lr /* =============================== PageTable ============================== */ /* * cpu_arm1022_switch_mm(pgd) * * Set the translation base pointer to be as described by pgd. * * pgd: new page tables */ .align 5 ENTRY(cpu_arm1022_switch_mm) #ifdef CONFIG_MMU #ifndef CONFIG_CPU_DCACHE_DISABLE mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 2: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index subs r3, r3, #1 << 26 bcs 2b @ entries 63 to 0 subs r1, r1, #1 << 5 bcs 1b @ segments 15 to 0 #endif mov r1, #0 #ifndef CONFIG_CPU_ICACHE_DISABLE mcr p15, 0, r1, c7, c5, 0 @ invalidate I cache #endif mcr p15, 0, r1, c7, c10, 4 @ drain WB mcr p15, 0, r0, c2, c0, 0 @ load page table pointer mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs #endif ret lr /* * cpu_arm1022_set_pte_ext(ptep, pte, ext) * * Set a PTE and flush it out */ .align 5 ENTRY(cpu_arm1022_set_pte_ext) #ifdef CONFIG_MMU armv3_set_pte_ext mov r0, r0 #ifndef CONFIG_CPU_DCACHE_DISABLE mcr p15, 0, r0, c7, c10, 1 @ clean D entry #endif #endif /* CONFIG_MMU */ ret lr .type __arm1022_setup, #function __arm1022_setup: mov r0, #0 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 #ifdef CONFIG_MMU mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 #endif adr r5, arm1022_crval ldmia r5, {r5, r6} mrc p15, 0, r0, c1, c0 @ get control register v4 bic r0, r0, r5 orr r0, r0, r6 #ifdef CONFIG_CPU_CACHE_ROUND_ROBIN orr r0, r0, #0x4000 @ .R.............. #endif ret lr .size __arm1022_setup, . - __arm1022_setup /* * R * .RVI ZFRS BLDP WCAM * .011 1001 ..11 0101 * */ .type arm1022_crval, #object arm1022_crval: crval clear=0x00007f3f, mmuset=0x00003935, ucset=0x00001930 __INITDATA @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) define_processor_functions arm1022, dabort=v4t_early_abort, pabort=legacy_pabort .section ".rodata" string cpu_arch_name, "armv5te" string cpu_elf_name, "v5" string cpu_arm1022_name, "ARM1022" .align .section ".proc.info.init", #alloc .type __arm1022_proc_info,#object __arm1022_proc_info: .long 0x4105a220 @ ARM 1022E (v5TE) .long 0xff0ffff0 .long PMD_TYPE_SECT | \ PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ .long PMD_TYPE_SECT | \ PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ initfn __arm1022_setup, __arm1022_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_EDSP .long cpu_arm1022_name .long arm1022_processor_functions .long v4wbi_tlb_fns .long v4wb_user_fns .long arm1022_cache_fns .size __arm1022_proc_info, . - __arm1022_proc_info
AirFortressIlikara/LS2K0300-linux-4.19
10,536
arch/arm/mm/cache-v7m.S
/* * linux/arch/arm/mm/cache-v7m.S * * Based on linux/arch/arm/mm/cache-v7.S * * Copyright (C) 2001 Deep Blue Solutions Ltd. * Copyright (C) 2005 ARM Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This is the "shell" of the ARMv7M processor support. */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> #include <asm/errno.h> #include <asm/unwind.h> #include <asm/v7m.h> #include "proc-macros.S" /* Generic V7M read/write macros for memory mapped cache operations */ .macro v7m_cache_read, rt, reg movw \rt, #:lower16:BASEADDR_V7M_SCB + \reg movt \rt, #:upper16:BASEADDR_V7M_SCB + \reg ldr \rt, [\rt] .endm .macro v7m_cacheop, rt, tmp, op, c = al movw\c \tmp, #:lower16:BASEADDR_V7M_SCB + \op movt\c \tmp, #:upper16:BASEADDR_V7M_SCB + \op str\c \rt, [\tmp] .endm .macro read_ccsidr, rt v7m_cache_read \rt, V7M_SCB_CCSIDR .endm .macro read_clidr, rt v7m_cache_read \rt, V7M_SCB_CLIDR .endm .macro write_csselr, rt, tmp v7m_cacheop \rt, \tmp, V7M_SCB_CSSELR .endm /* * dcisw: Invalidate data cache by set/way */ .macro dcisw, rt, tmp v7m_cacheop \rt, \tmp, V7M_SCB_DCISW .endm /* * dccisw: Clean and invalidate data cache by set/way */ .macro dccisw, rt, tmp v7m_cacheop \rt, \tmp, V7M_SCB_DCCISW .endm /* * dccimvac: Clean and invalidate data cache line by MVA to PoC. */ .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo .macro dccimvac\c, rt, tmp v7m_cacheop \rt, \tmp, V7M_SCB_DCCIMVAC, \c .endm .endr /* * dcimvac: Invalidate data cache line by MVA to PoC */ .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo .macro dcimvac\c, rt, tmp v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC, \c .endm .endr /* * dccmvau: Clean data cache line by MVA to PoU */ .macro dccmvau, rt, tmp v7m_cacheop \rt, \tmp, V7M_SCB_DCCMVAU .endm /* * dccmvac: Clean data cache line by MVA to PoC */ .macro dccmvac, rt, tmp v7m_cacheop \rt, \tmp, V7M_SCB_DCCMVAC .endm /* * icimvau: Invalidate instruction caches by MVA to PoU */ .macro icimvau, rt, tmp v7m_cacheop \rt, \tmp, V7M_SCB_ICIMVAU .endm /* * Invalidate the icache, inner shareable if SMP, invalidate BTB for UP. * rt data ignored by ICIALLU(IS), so can be used for the address */ .macro invalidate_icache, rt v7m_cacheop \rt, \rt, V7M_SCB_ICIALLU mov \rt, #0 .endm /* * Invalidate the BTB, inner shareable if SMP. * rt data ignored by BPIALL, so it can be used for the address */ .macro invalidate_bp, rt v7m_cacheop \rt, \rt, V7M_SCB_BPIALL mov \rt, #0 .endm ENTRY(v7m_invalidate_l1) mov r0, #0 write_csselr r0, r1 read_ccsidr r0 movw r1, #0x7fff and r2, r1, r0, lsr #13 movw r1, #0x3ff and r3, r1, r0, lsr #3 @ NumWays - 1 add r2, r2, #1 @ NumSets and r0, r0, #0x7 add r0, r0, #4 @ SetShift clz r1, r3 @ WayShift add r4, r3, #1 @ NumWays 1: sub r2, r2, #1 @ NumSets-- mov r3, r4 @ Temp = NumWays 2: subs r3, r3, #1 @ Temp-- mov r5, r3, lsl r1 mov r6, r2, lsl r0 orr r5, r5, r6 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift) dcisw r5, r6 bgt 2b cmp r2, #0 bgt 1b dsb st isb ret lr ENDPROC(v7m_invalidate_l1) /* * v7m_flush_icache_all() * * Flush the whole I-cache. * * Registers: * r0 - set to 0 */ ENTRY(v7m_flush_icache_all) invalidate_icache r0 ret lr ENDPROC(v7m_flush_icache_all) /* * v7m_flush_dcache_all() * * Flush the whole D-cache. * * Corrupted registers: r0-r7, r9-r11 */ ENTRY(v7m_flush_dcache_all) dmb @ ensure ordering with previous memory accesses read_clidr r0 mov r3, r0, lsr #23 @ move LoC into position ands r3, r3, #7 << 1 @ extract LoC*2 from clidr beq finished @ if loc is 0, then no need to clean start_flush_levels: mov r10, #0 @ start clean at cache level 0 flush_levels: add r2, r10, r10, lsr #1 @ work out 3x current cache level mov r1, r0, lsr r2 @ extract cache type bits from clidr and r1, r1, #7 @ mask of the bits for current cache only cmp r1, #2 @ see what cache we have at this level blt skip @ skip if no cache, or just i-cache #ifdef CONFIG_PREEMPT save_and_disable_irqs_notrace r9 @ make cssr&csidr read atomic #endif write_csselr r10, r1 @ set current cache level isb @ isb to sych the new cssr&csidr read_ccsidr r1 @ read the new csidr #ifdef CONFIG_PREEMPT restore_irqs_notrace r9 #endif and r2, r1, #7 @ extract the length of the cache lines add r2, r2, #4 @ add 4 (line length offset) movw r4, #0x3ff ands r4, r4, r1, lsr #3 @ find maximum number on the way size clz r5, r4 @ find bit position of way size increment movw r7, #0x7fff ands r7, r7, r1, lsr #13 @ extract max number of the index size loop1: mov r9, r7 @ create working copy of max index loop2: lsl r6, r4, r5 orr r11, r10, r6 @ factor way and cache number into r11 lsl r6, r9, r2 orr r11, r11, r6 @ factor index number into r11 dccisw r11, r6 @ clean/invalidate by set/way subs r9, r9, #1 @ decrement the index bge loop2 subs r4, r4, #1 @ decrement the way bge loop1 skip: add r10, r10, #2 @ increment cache number cmp r3, r10 bgt flush_levels finished: mov r10, #0 @ switch back to cache level 0 write_csselr r10, r3 @ select current cache level in cssr dsb st isb ret lr ENDPROC(v7m_flush_dcache_all) /* * v7m_flush_cache_all() * * Flush the entire cache system. * The data cache flush is now achieved using atomic clean / invalidates * working outwards from L1 cache. This is done using Set/Way based cache * maintenance instructions. * The instruction cache can still be invalidated back to the point of * unification in a single instruction. * */ ENTRY(v7m_flush_kern_cache_all) stmfd sp!, {r4-r7, r9-r11, lr} bl v7m_flush_dcache_all invalidate_icache r0 ldmfd sp!, {r4-r7, r9-r11, lr} ret lr ENDPROC(v7m_flush_kern_cache_all) /* * v7m_flush_cache_all() * * Flush all TLB entries in a particular address space * * - mm - mm_struct describing address space */ ENTRY(v7m_flush_user_cache_all) /*FALLTHROUGH*/ /* * v7m_flush_cache_range(start, end, flags) * * Flush a range of TLB entries in the specified address space. * * - start - start address (may not be aligned) * - end - end address (exclusive, may not be aligned) * - flags - vm_area_struct flags describing address space * * It is assumed that: * - we have a VIPT cache. */ ENTRY(v7m_flush_user_cache_range) ret lr ENDPROC(v7m_flush_user_cache_all) ENDPROC(v7m_flush_user_cache_range) /* * v7m_coherent_kern_range(start,end) * * Ensure that the I and D caches are coherent within specified * region. This is typically used when code has been written to * a memory region, and will be executed. * * - start - virtual start address of region * - end - virtual end address of region * * It is assumed that: * - the Icache does not read data from the write buffer */ ENTRY(v7m_coherent_kern_range) /* FALLTHROUGH */ /* * v7m_coherent_user_range(start,end) * * Ensure that the I and D caches are coherent within specified * region. This is typically used when code has been written to * a memory region, and will be executed. * * - start - virtual start address of region * - end - virtual end address of region * * It is assumed that: * - the Icache does not read data from the write buffer */ ENTRY(v7m_coherent_user_range) UNWIND(.fnstart ) dcache_line_size r2, r3 sub r3, r2, #1 bic r12, r0, r3 1: /* * We use open coded version of dccmvau otherwise USER() would * point at movw instruction. */ dccmvau r12, r3 add r12, r12, r2 cmp r12, r1 blo 1b dsb ishst icache_line_size r2, r3 sub r3, r2, #1 bic r12, r0, r3 2: icimvau r12, r3 add r12, r12, r2 cmp r12, r1 blo 2b invalidate_bp r0 dsb ishst isb ret lr UNWIND(.fnend ) ENDPROC(v7m_coherent_kern_range) ENDPROC(v7m_coherent_user_range) /* * v7m_flush_kern_dcache_area(void *addr, size_t size) * * Ensure that the data held in the page kaddr is written back * to the page in question. * * - addr - kernel address * - size - region size */ ENTRY(v7m_flush_kern_dcache_area) dcache_line_size r2, r3 add r1, r0, r1 sub r3, r2, #1 bic r0, r0, r3 1: dccimvac r0, r3 @ clean & invalidate D line / unified line add r0, r0, r2 cmp r0, r1 blo 1b dsb st ret lr ENDPROC(v7m_flush_kern_dcache_area) /* * v7m_dma_inv_range(start,end) * * Invalidate the data cache within the specified region; we will * be performing a DMA operation in this region and we want to * purge old data in the cache. * * - start - virtual start address of region * - end - virtual end address of region */ v7m_dma_inv_range: dcache_line_size r2, r3 sub r3, r2, #1 tst r0, r3 bic r0, r0, r3 dccimvacne r0, r3 addne r0, r0, r2 subne r3, r2, #1 @ restore r3, corrupted by v7m's dccimvac tst r1, r3 bic r1, r1, r3 dccimvacne r1, r3 cmp r0, r1 1: dcimvaclo r0, r3 addlo r0, r0, r2 cmplo r0, r1 blo 1b dsb st ret lr ENDPROC(v7m_dma_inv_range) /* * v7m_dma_clean_range(start,end) * - start - virtual start address of region * - end - virtual end address of region */ v7m_dma_clean_range: dcache_line_size r2, r3 sub r3, r2, #1 bic r0, r0, r3 1: dccmvac r0, r3 @ clean D / U line add r0, r0, r2 cmp r0, r1 blo 1b dsb st ret lr ENDPROC(v7m_dma_clean_range) /* * v7m_dma_flush_range(start,end) * - start - virtual start address of region * - end - virtual end address of region */ ENTRY(v7m_dma_flush_range) dcache_line_size r2, r3 sub r3, r2, #1 bic r0, r0, r3 1: dccimvac r0, r3 @ clean & invalidate D / U line add r0, r0, r2 cmp r0, r1 blo 1b dsb st ret lr ENDPROC(v7m_dma_flush_range) /* * dma_map_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(v7m_dma_map_area) add r1, r1, r0 teq r2, #DMA_FROM_DEVICE beq v7m_dma_inv_range b v7m_dma_clean_range ENDPROC(v7m_dma_map_area) /* * dma_unmap_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(v7m_dma_unmap_area) add r1, r1, r0 teq r2, #DMA_TO_DEVICE bne v7m_dma_inv_range ret lr ENDPROC(v7m_dma_unmap_area) .globl v7m_flush_kern_cache_louis .equ v7m_flush_kern_cache_louis, v7m_flush_kern_cache_all __INITDATA @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) define_cache_functions v7m
AirFortressIlikara/LS2K0300-linux-4.19
8,423
arch/arm/mm/proc-v6.S
/* * linux/arch/arm/mm/proc-v6.S * * Copyright (C) 2001 Deep Blue Solutions Ltd. * Modified by Catalin Marinas for noMMU support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This is the "shell" of the ARMv6 processor support. */ #include <linux/init.h> #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/asm-offsets.h> #include <asm/hwcap.h> #include <asm/pgtable-hwdef.h> #include <asm/pgtable.h> #include "proc-macros.S" #define D_CACHE_LINE_SIZE 32 #define TTB_C (1 << 0) #define TTB_S (1 << 1) #define TTB_IMP (1 << 2) #define TTB_RGN_NC (0 << 3) #define TTB_RGN_WBWA (1 << 3) #define TTB_RGN_WT (2 << 3) #define TTB_RGN_WB (3 << 3) #define TTB_FLAGS_UP TTB_RGN_WBWA #define PMD_FLAGS_UP PMD_SECT_WB #define TTB_FLAGS_SMP TTB_RGN_WBWA|TTB_S #define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S ENTRY(cpu_v6_proc_init) ret lr ENTRY(cpu_v6_proc_fin) mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x0006 @ .............ca. mcr p15, 0, r0, c1, c0, 0 @ disable caches ret lr /* * cpu_v6_reset(loc) * * Perform a soft reset of the system. Put the CPU into the * same state as it would be if it had been reset, and branch * to what would be the reset vector. * * - loc - location to jump to for soft reset */ .align 5 .pushsection .idmap.text, "ax" ENTRY(cpu_v6_reset) mrc p15, 0, r1, c1, c0, 0 @ ctrl register bic r1, r1, #0x1 @ ...............m mcr p15, 0, r1, c1, c0, 0 @ disable MMU mov r1, #0 mcr p15, 0, r1, c7, c5, 4 @ ISB ret r0 ENDPROC(cpu_v6_reset) .popsection /* * cpu_v6_do_idle() * * Idle the processor (eg, wait for interrupt). * * IRQs are already disabled. */ ENTRY(cpu_v6_do_idle) mov r1, #0 mcr p15, 0, r1, c7, c10, 4 @ DWB - WFI may enter a low-power mode mcr p15, 0, r1, c7, c0, 4 @ wait for interrupt ret lr ENTRY(cpu_v6_dcache_clean_area) 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #D_CACHE_LINE_SIZE subs r1, r1, #D_CACHE_LINE_SIZE bhi 1b ret lr /* * cpu_v6_switch_mm(pgd_phys, tsk) * * Set the translation table base pointer to be pgd_phys * * - pgd_phys - physical address of new TTB * * It is assumed that: * - we are not using split page tables */ ENTRY(cpu_v6_switch_mm) #ifdef CONFIG_MMU mov r2, #0 mmid r1, r1 @ get mm->context.id ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP) ALT_UP(orr r0, r0, #TTB_FLAGS_UP) mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB mcr p15, 0, r2, c7, c10, 4 @ drain write buffer mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 #ifdef CONFIG_PID_IN_CONTEXTIDR mrc p15, 0, r2, c13, c0, 1 @ read current context ID bic r2, r2, #0xff @ extract the PID and r1, r1, #0xff orr r1, r1, r2 @ insert into new context ID #endif mcr p15, 0, r1, c13, c0, 1 @ set context ID #endif ret lr /* * cpu_v6_set_pte_ext(ptep, pte, ext) * * Set a level 2 translation table entry. * * - ptep - pointer to level 2 translation table entry * (hardware version is stored at -1024 bytes) * - pte - PTE value to store * - ext - value for extended PTE bits */ armv6_mt_table cpu_v6 ENTRY(cpu_v6_set_pte_ext) #ifdef CONFIG_MMU armv6_set_pte_ext cpu_v6 #endif ret lr /* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */ .globl cpu_v6_suspend_size .equ cpu_v6_suspend_size, 4 * 6 #ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_v6_do_suspend) stmfd sp!, {r4 - r9, lr} mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID #ifdef CONFIG_MMU mrc p15, 0, r5, c3, c0, 0 @ Domain ID mrc p15, 0, r6, c2, c0, 1 @ Translation table base 1 #endif mrc p15, 0, r7, c1, c0, 1 @ auxiliary control register mrc p15, 0, r8, c1, c0, 2 @ co-processor access control mrc p15, 0, r9, c1, c0, 0 @ control register stmia r0, {r4 - r9} ldmfd sp!, {r4- r9, pc} ENDPROC(cpu_v6_do_suspend) ENTRY(cpu_v6_do_resume) mov ip, #0 mcr p15, 0, ip, c7, c14, 0 @ clean+invalidate D cache mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache mcr p15, 0, ip, c7, c15, 0 @ clean+invalidate cache mcr p15, 0, ip, c7, c10, 4 @ drain write buffer mcr p15, 0, ip, c13, c0, 1 @ set reserved context ID ldmia r0, {r4 - r9} mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID #ifdef CONFIG_MMU mcr p15, 0, r5, c3, c0, 0 @ Domain ID ALT_SMP(orr r1, r1, #TTB_FLAGS_SMP) ALT_UP(orr r1, r1, #TTB_FLAGS_UP) mcr p15, 0, r1, c2, c0, 0 @ Translation table base 0 mcr p15, 0, r6, c2, c0, 1 @ Translation table base 1 mcr p15, 0, ip, c2, c0, 2 @ TTB control register #endif mcr p15, 0, r7, c1, c0, 1 @ auxiliary control register mcr p15, 0, r8, c1, c0, 2 @ co-processor access control mcr p15, 0, ip, c7, c5, 4 @ ISB mov r0, r9 @ control register b cpu_resume_mmu ENDPROC(cpu_v6_do_resume) #endif string cpu_v6_name, "ARMv6-compatible processor" .align /* * __v6_setup * * Initialise TLB, Caches, and MMU state ready to switch the MMU * on. Return in r0 the new CP15 C1 control register setting. * * We automatically detect if we have a Harvard cache, and use the * Harvard cache control instructions insead of the unified cache * control instructions. * * This should be able to cover all ARMv6 cores. * * It is assumed that: * - cache type register is implemented */ __v6_setup: #ifdef CONFIG_SMP ALT_SMP(mrc p15, 0, r0, c1, c0, 1) @ Enable SMP/nAMP mode ALT_UP(nop) orr r0, r0, #0x20 ALT_SMP(mcr p15, 0, r0, c1, c0, 1) ALT_UP(nop) #endif mov r0, #0 mcr p15, 0, r0, c7, c14, 0 @ clean+invalidate D cache mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c15, 0 @ clean+invalidate cache #ifdef CONFIG_MMU mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs mcr p15, 0, r0, c2, c0, 2 @ TTB control register ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP) ALT_UP(orr r4, r4, #TTB_FLAGS_UP) ALT_SMP(orr r8, r8, #TTB_FLAGS_SMP) ALT_UP(orr r8, r8, #TTB_FLAGS_UP) mcr p15, 0, r8, c2, c0, 1 @ load TTB1 #endif /* CONFIG_MMU */ mcr p15, 0, r0, c7, c10, 4 @ drain write buffer and @ complete invalidations adr r5, v6_crval ldmia r5, {r5, r6} ARM_BE8(orr r6, r6, #1 << 25) @ big-endian page tables mrc p15, 0, r0, c1, c0, 0 @ read control register bic r0, r0, r5 @ clear bits them orr r0, r0, r6 @ set them #ifdef CONFIG_ARM_ERRATA_364296 /* * Workaround for the 364296 ARM1136 r0p2 erratum (possible cache data * corruption with hit-under-miss enabled). The conditional code below * (setting the undocumented bit 31 in the auxiliary control register * and the FI bit in the control register) disables hit-under-miss * without putting the processor into full low interrupt latency mode. */ ldr r6, =0x4107b362 @ id for ARM1136 r0p2 mrc p15, 0, r5, c0, c0, 0 @ get processor id teq r5, r6 @ check for the faulty core mrceq p15, 0, r5, c1, c0, 1 @ load aux control reg orreq r5, r5, #(1 << 31) @ set the undocumented bit 31 mcreq p15, 0, r5, c1, c0, 1 @ write aux control reg orreq r0, r0, #(1 << 21) @ low interrupt latency configuration #endif ret lr @ return to head.S:__ret /* * V X F I D LR * .... ...E PUI. .T.T 4RVI ZFRS BLDP WCAM * rrrr rrrx xxx0 0101 xxxx xxxx x111 xxxx < forced * 0 110 0011 1.00 .111 1101 < we want */ .type v6_crval, #object v6_crval: crval clear=0x01e0fb7f, mmuset=0x00c0387d, ucset=0x00c0187c __INITDATA @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) define_processor_functions v6, dabort=v6_early_abort, pabort=v6_pabort, suspend=1 .section ".rodata" string cpu_arch_name, "armv6" string cpu_elf_name, "v6" .align .section ".proc.info.init", #alloc /* * Match any ARMv6 processor core. */ .type __v6_proc_info, #object __v6_proc_info: .long 0x0007b000 .long 0x0007f000 ALT_SMP(.long \ PMD_TYPE_SECT | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ | \ PMD_FLAGS_SMP) ALT_UP(.long \ PMD_TYPE_SECT | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ | \ PMD_FLAGS_UP) .long PMD_TYPE_SECT | \ PMD_SECT_XN | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ initfn __v6_setup, __v6_proc_info .long cpu_arch_name .long cpu_elf_name /* See also feat_v6_fixup() for HWCAP_TLS */ .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA|HWCAP_TLS .long cpu_v6_name .long v6_processor_functions .long v6wbi_tlb_fns .long v6_user_fns .long v6_cache_fns .size __v6_proc_info, . - __v6_proc_info
AirFortressIlikara/LS2K0300-linux-4.19
11,201
arch/arm/mm/proc-arm922.S
/* * linux/arch/arm/mm/proc-arm922.S: MMU functions for ARM922 * * Copyright (C) 1999,2000 ARM Limited * Copyright (C) 2000 Deep Blue Solutions Ltd. * Copyright (C) 2001 Altera Corporation * hacked for non-paged-MM by Hyok S. Choi, 2003. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * * These are the low level assembler for performing cache and TLB * functions on the arm922. * * CONFIG_CPU_ARM922_CPU_IDLE -> nohlt */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> #include <asm/hwcap.h> #include <asm/pgtable-hwdef.h> #include <asm/pgtable.h> #include <asm/page.h> #include <asm/ptrace.h> #include "proc-macros.S" /* * The size of one data cache line. */ #define CACHE_DLINESIZE 32 /* * The number of data cache segments. */ #define CACHE_DSEGMENTS 4 /* * The number of lines in a cache segment. */ #define CACHE_DENTRIES 64 /* * This is the size at which it becomes more efficient to * clean the whole cache, rather than using the individual * cache line maintenance instructions. (I think this should * be 32768). */ #define CACHE_DLIMIT 8192 .text /* * cpu_arm922_proc_init() */ ENTRY(cpu_arm922_proc_init) ret lr /* * cpu_arm922_proc_fin() */ ENTRY(cpu_arm922_proc_fin) mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches ret lr /* * cpu_arm922_reset(loc) * * Perform a soft reset of the system. Put the CPU into the * same state as it would be if it had been reset, and branch * to what would be the reset vector. * * loc: location to jump to for soft reset */ .align 5 .pushsection .idmap.text, "ax" ENTRY(cpu_arm922_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c10, 4 @ drain WB #ifdef CONFIG_MMU mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs #endif mrc p15, 0, ip, c1, c0, 0 @ ctrl register bic ip, ip, #0x000f @ ............wcam bic ip, ip, #0x1100 @ ...i...s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register ret r0 ENDPROC(cpu_arm922_reset) .popsection /* * cpu_arm922_do_idle() */ .align 5 ENTRY(cpu_arm922_do_idle) mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt ret lr #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH /* * flush_icache_all() * * Unconditionally clean and invalidate the entire icache. */ ENTRY(arm922_flush_icache_all) mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache ret lr ENDPROC(arm922_flush_icache_all) /* * flush_user_cache_all() * * Clean and invalidate all cache entries in a particular * address space. */ ENTRY(arm922_flush_user_cache_all) /* FALLTHROUGH */ /* * flush_kern_cache_all() * * Clean and invalidate the entire cache. */ ENTRY(arm922_flush_kern_cache_all) mov r2, #VM_EXEC mov ip, #0 __flush_whole_cache: mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 8 segments 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 2: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index subs r3, r3, #1 << 26 bcs 2b @ entries 63 to 0 subs r1, r1, #1 << 5 bcs 1b @ segments 7 to 0 tst r2, #VM_EXEC mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache mcrne p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * flush_user_cache_range(start, end, flags) * * Clean and invalidate a range of cache entries in the * specified address range. * * - start - start address (inclusive) * - end - end address (exclusive) * - flags - vm_flags describing address space */ ENTRY(arm922_flush_user_cache_range) mov ip, #0 sub r3, r1, r0 @ calculate total size cmp r3, #CACHE_DLIMIT bhs __flush_whole_cache 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry tst r2, #VM_EXEC mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b tst r2, #VM_EXEC mcrne p15, 0, ip, c7, c10, 4 @ drain WB ret lr /* * coherent_kern_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start, end. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm922_coherent_kern_range) /* FALLTHROUGH */ /* * coherent_user_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start, end. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm922_coherent_user_range) bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB mov r0, #0 ret lr /* * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * * - addr - kernel address * - size - region size */ ENTRY(arm922_flush_kern_dcache_area) add r1, r0, r1 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_inv_range(start, end) * * Invalidate (discard) the specified virtual address range. * May not write back any entries. If 'start' or 'end' * are not cache line aligned, those lines must be written * back. * * - start - virtual start address * - end - virtual end address * * (same as v4wb) */ arm922_dma_inv_range: tst r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry tst r1, #CACHE_DLINESIZE - 1 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_clean_range(start, end) * * Clean the specified virtual address range. * * - start - virtual start address * - end - virtual end address * * (same as v4wb) */ arm922_dma_clean_range: bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_flush_range(start, end) * * Clean and invalidate the specified virtual address range. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm922_dma_flush_range) bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr /* * dma_map_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(arm922_dma_map_area) add r1, r1, r0 cmp r2, #DMA_TO_DEVICE beq arm922_dma_clean_range bcs arm922_dma_inv_range b arm922_dma_flush_range ENDPROC(arm922_dma_map_area) /* * dma_unmap_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(arm922_dma_unmap_area) ret lr ENDPROC(arm922_dma_unmap_area) .globl arm922_flush_kern_cache_louis .equ arm922_flush_kern_cache_louis, arm922_flush_kern_cache_all @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) define_cache_functions arm922 #endif ENTRY(cpu_arm922_dcache_clean_area) #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE subs r1, r1, #CACHE_DLINESIZE bhi 1b #endif ret lr /* =============================== PageTable ============================== */ /* * cpu_arm922_switch_mm(pgd) * * Set the translation base pointer to be as described by pgd. * * pgd: new page tables */ .align 5 ENTRY(cpu_arm922_switch_mm) #ifdef CONFIG_MMU mov ip, #0 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache #else @ && 'Clean & Invalidate whole DCache' @ && Re-written to use Index Ops. @ && Uses registers r1, r3 and ip mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 4 segments 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index subs r3, r3, #1 << 26 bcs 2b @ entries 63 to 0 subs r1, r1, #1 << 5 bcs 1b @ segments 7 to 0 #endif mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache mcr p15, 0, ip, c7, c10, 4 @ drain WB mcr p15, 0, r0, c2, c0, 0 @ load page table pointer mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs #endif ret lr /* * cpu_arm922_set_pte_ext(ptep, pte, ext) * * Set a PTE and flush it out */ .align 5 ENTRY(cpu_arm922_set_pte_ext) #ifdef CONFIG_MMU armv3_set_pte_ext mov r0, r0 mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcr p15, 0, r0, c7, c10, 4 @ drain WB #endif /* CONFIG_MMU */ ret lr .type __arm922_setup, #function __arm922_setup: mov r0, #0 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 #ifdef CONFIG_MMU mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 #endif adr r5, arm922_crval ldmia r5, {r5, r6} mrc p15, 0, r0, c1, c0 @ get control register v4 bic r0, r0, r5 orr r0, r0, r6 ret lr .size __arm922_setup, . - __arm922_setup /* * R * .RVI ZFRS BLDP WCAM * ..11 0001 ..11 0101 * */ .type arm922_crval, #object arm922_crval: crval clear=0x00003f3f, mmuset=0x00003135, ucset=0x00001130 __INITDATA @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) define_processor_functions arm922, dabort=v4t_early_abort, pabort=legacy_pabort .section ".rodata" string cpu_arch_name, "armv4t" string cpu_elf_name, "v4" string cpu_arm922_name, "ARM922T" .align .section ".proc.info.init", #alloc .type __arm922_proc_info,#object __arm922_proc_info: .long 0x41009220 .long 0xff00fff0 .long PMD_TYPE_SECT | \ PMD_SECT_BUFFERABLE | \ PMD_SECT_CACHEABLE | \ PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ .long PMD_TYPE_SECT | \ PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ initfn __arm922_setup, __arm922_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB .long cpu_arm922_name .long arm922_processor_functions .long v4wbi_tlb_fns .long v4wb_user_fns #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH .long arm922_cache_fns #else .long v4wt_cache_fns #endif .size __arm922_proc_info, . - __arm922_proc_info
AirFortressIlikara/LS2K0300-linux-4.19
4,467
arch/arm/mm/proc-v7-2level.S
/* * arch/arm/mm/proc-v7-2level.S * * Copyright (C) 2001 Deep Blue Solutions Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define TTB_S (1 << 1) #define TTB_RGN_NC (0 << 3) #define TTB_RGN_OC_WBWA (1 << 3) #define TTB_RGN_OC_WT (2 << 3) #define TTB_RGN_OC_WB (3 << 3) #define TTB_NOS (1 << 5) #define TTB_IRGN_NC ((0 << 0) | (0 << 6)) #define TTB_IRGN_WBWA ((0 << 0) | (1 << 6)) #define TTB_IRGN_WT ((1 << 0) | (0 << 6)) #define TTB_IRGN_WB ((1 << 0) | (1 << 6)) /* PTWs cacheable, inner WB not shareable, outer WB not shareable */ #define TTB_FLAGS_UP TTB_IRGN_WB|TTB_RGN_OC_WB #define PMD_FLAGS_UP PMD_SECT_WB /* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */ #define TTB_FLAGS_SMP TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA #define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S /* * cpu_v7_switch_mm(pgd_phys, tsk) * * Set the translation table base pointer to be pgd_phys * * - pgd_phys - physical address of new TTB * * It is assumed that: * - we are not using split page tables * * Note that we always need to flush BTAC/BTB if IBE is set * even on Cortex-A8 revisions not affected by 430973. * If IBE is not set, the flush BTAC/BTB won't do anything. */ ENTRY(cpu_v7_switch_mm) #ifdef CONFIG_MMU mmid r1, r1 @ get mm->context.id ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP) ALT_UP(orr r0, r0, #TTB_FLAGS_UP) #ifdef CONFIG_PID_IN_CONTEXTIDR mrc p15, 0, r2, c13, c0, 1 @ read current context ID lsr r2, r2, #8 @ extract the PID bfi r1, r2, #8, #24 @ insert into new context ID #endif #ifdef CONFIG_ARM_ERRATA_754322 dsb #endif mcr p15, 0, r1, c13, c0, 1 @ set context ID isb mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 isb #endif bx lr ENDPROC(cpu_v7_switch_mm) /* * cpu_v7_set_pte_ext(ptep, pte) * * Set a level 2 translation table entry. * * - ptep - pointer to level 2 translation table entry * (hardware version is stored at +2048 bytes) * - pte - PTE value to store * - ext - value for extended PTE bits */ ENTRY(cpu_v7_set_pte_ext) #ifdef CONFIG_MMU str r1, [r0] @ linux version bic r3, r1, #0x000003f0 bic r3, r3, #PTE_TYPE_MASK orr r3, r3, r2 orr r3, r3, #PTE_EXT_AP0 | 2 tst r1, #1 << 4 orrne r3, r3, #PTE_EXT_TEX(1) eor r1, r1, #L_PTE_DIRTY tst r1, #L_PTE_RDONLY | L_PTE_DIRTY orrne r3, r3, #PTE_EXT_APX tst r1, #L_PTE_USER orrne r3, r3, #PTE_EXT_AP1 tst r1, #L_PTE_XN orrne r3, r3, #PTE_EXT_XN tst r1, #L_PTE_YOUNG tstne r1, #L_PTE_VALID eorne r1, r1, #L_PTE_NONE tstne r1, #L_PTE_NONE moveq r3, #0 ARM( str r3, [r0, #2048]! ) THUMB( add r0, r0, #2048 ) THUMB( str r3, [r0] ) ALT_SMP(W(nop)) ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte #endif bx lr ENDPROC(cpu_v7_set_pte_ext) /* * Memory region attributes with SCTLR.TRE=1 * * n = TEX[0],C,B * TR = PRRR[2n+1:2n] - memory type * IR = NMRR[2n+1:2n] - inner cacheable property * OR = NMRR[2n+17:2n+16] - outer cacheable property * * n TR IR OR * UNCACHED 000 00 * BUFFERABLE 001 10 00 00 * WRITETHROUGH 010 10 10 10 * WRITEBACK 011 10 11 11 * reserved 110 * WRITEALLOC 111 10 01 01 * DEV_SHARED 100 01 * DEV_NONSHARED 100 01 * DEV_WC 001 10 * DEV_CACHED 011 10 * * Other attributes: * * DS0 = PRRR[16] = 0 - device shareable property * DS1 = PRRR[17] = 1 - device shareable property * NS0 = PRRR[18] = 0 - normal shareable property * NS1 = PRRR[19] = 1 - normal shareable property * NOS = PRRR[24+n] = 1 - not outer shareable */ .equ PRRR, 0xff0a81a8 .equ NMRR, 0x40e040e0 /* * Macro for setting up the TTBRx and TTBCR registers. * - \ttb0 and \ttb1 updated with the corresponding flags. */ .macro v7_ttb_setup, zero, ttbr0l, ttbr0h, ttbr1, tmp mcr p15, 0, \zero, c2, c0, 2 @ TTB control register ALT_SMP(orr \ttbr0l, \ttbr0l, #TTB_FLAGS_SMP) ALT_UP(orr \ttbr0l, \ttbr0l, #TTB_FLAGS_UP) ALT_SMP(orr \ttbr1, \ttbr1, #TTB_FLAGS_SMP) ALT_UP(orr \ttbr1, \ttbr1, #TTB_FLAGS_UP) mcr p15, 0, \ttbr1, c2, c0, 1 @ load TTB1 .endm /* AT * TFR EV X F I D LR S * .EEE ..EE PUI. .T.T 4RVI ZWRS BLDP WCAM * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced * 01 0 110 0011 1100 .111 1101 < we want */ .align 2 .type v7_crval, #object v7_crval: crval clear=0x2120c302, mmuset=0x10c03c7d, ucset=0x00c01c7c
AirFortressIlikara/LS2K0300-linux-4.19
2,344
arch/arm/mm/proc-arm9tdmi.S
/* * linux/arch/arm/mm/proc-arm9tdmi.S: utility functions for ARM9TDMI * * Copyright (C) 2003-2006 Hyok S. Choi <hyok.choi@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> #include <asm/asm-offsets.h> #include <asm/hwcap.h> #include <asm/pgtable-hwdef.h> #include <asm/pgtable.h> #include <asm/ptrace.h> #include "proc-macros.S" .text /* * cpu_arm9tdmi_proc_init() * cpu_arm9tdmi_do_idle() * cpu_arm9tdmi_dcache_clean_area() * cpu_arm9tdmi_switch_mm() * * These are not required. */ ENTRY(cpu_arm9tdmi_proc_init) ENTRY(cpu_arm9tdmi_do_idle) ENTRY(cpu_arm9tdmi_dcache_clean_area) ENTRY(cpu_arm9tdmi_switch_mm) ret lr /* * cpu_arm9tdmi_proc_fin() */ ENTRY(cpu_arm9tdmi_proc_fin) ret lr /* * Function: cpu_arm9tdmi_reset(loc) * Params : loc(r0) address to jump to * Purpose : Sets up everything for a reset and jump to the location for soft reset. */ .pushsection .idmap.text, "ax" ENTRY(cpu_arm9tdmi_reset) ret r0 ENDPROC(cpu_arm9tdmi_reset) .popsection .type __arm9tdmi_setup, #function __arm9tdmi_setup: ret lr .size __arm9tdmi_setup, . - __arm9tdmi_setup __INITDATA @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) define_processor_functions arm9tdmi, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1 .section ".rodata" string cpu_arch_name, "armv4t" string cpu_elf_name, "v4" string cpu_arm9tdmi_name, "ARM9TDMI" string cpu_p2001_name, "P2001" .align .section ".proc.info.init", #alloc .macro arm9tdmi_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req .type __\name\()_proc_info, #object __\name\()_proc_info: .long \cpu_val .long \cpu_mask .long 0 .long 0 initfn __arm9tdmi_setup, __\name\()_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT .long \cpu_name .long arm9tdmi_processor_functions .long 0 .long 0 .long v4_cache_fns .size __\name\()_proc_info, . - __\name\()_proc_info .endm arm9tdmi_proc_info arm9tdmi, 0x41009900, 0xfff8ff00, cpu_arm9tdmi_name arm9tdmi_proc_info p2001, 0x41029000, 0xffffffff, cpu_p2001_name
AirFortressIlikara/LS2K0300-linux-4.19
1,320
arch/arm/mm/cache-nop.S
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> #include "proc-macros.S" ENTRY(nop_flush_icache_all) ret lr ENDPROC(nop_flush_icache_all) .globl nop_flush_kern_cache_all .equ nop_flush_kern_cache_all, nop_flush_icache_all .globl nop_flush_kern_cache_louis .equ nop_flush_kern_cache_louis, nop_flush_icache_all .globl nop_flush_user_cache_all .equ nop_flush_user_cache_all, nop_flush_icache_all .globl nop_flush_user_cache_range .equ nop_flush_user_cache_range, nop_flush_icache_all .globl nop_coherent_kern_range .equ nop_coherent_kern_range, nop_flush_icache_all ENTRY(nop_coherent_user_range) mov r0, 0 ret lr ENDPROC(nop_coherent_user_range) .globl nop_flush_kern_dcache_area .equ nop_flush_kern_dcache_area, nop_flush_icache_all .globl nop_dma_flush_range .equ nop_dma_flush_range, nop_flush_icache_all .globl nop_dma_map_area .equ nop_dma_map_area, nop_flush_icache_all .globl nop_dma_unmap_area .equ nop_dma_unmap_area, nop_flush_icache_all __INITDATA @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) define_cache_functions nop
AirFortressIlikara/LS2K0300-linux-4.19
6,896
arch/arm/mm/abort-lv4t.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <linux/linkage.h> #include <asm/assembler.h> /* * Function: v4t_late_abort * * Params : r2 = pt_regs * : r4 = aborted context pc * : r5 = aborted context psr * * Returns : r4-r5, r9-r11, r13 preserved * * Purpose : obtain information about current aborted instruction. * Note: we read user space. This means we might cause a data * abort here if the I-TLB and D-TLB aren't seeing the same * picture. Unfortunately, this does happen. We live with it. */ ENTRY(v4t_late_abort) tst r5, #PSR_T_BIT @ check for thumb mode #ifdef CONFIG_CPU_CP15_MMU mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR #else mov r0, #0 @ clear r0, r1 (no FSR/FAR) mov r1, #0 #endif bne .data_thumb_abort ldr r8, [r4] @ read arm instruction uaccess_disable ip @ disable userspace access tst r8, #1 << 20 @ L = 1 -> write? orreq r1, r1, #1 << 11 @ yes. and r7, r8, #15 << 24 add pc, pc, r7, lsr #22 @ Now branch to the relevant processing routine nop /* 0 */ b .data_arm_lateldrhpost @ ldrh rd, [rn], #m/rm /* 1 */ b .data_arm_lateldrhpre @ ldrh rd, [rn, #m/rm] /* 2 */ b .data_unknown /* 3 */ b .data_unknown /* 4 */ b .data_arm_lateldrpostconst @ ldr rd, [rn], #m /* 5 */ b .data_arm_lateldrpreconst @ ldr rd, [rn, #m] /* 6 */ b .data_arm_lateldrpostreg @ ldr rd, [rn], rm /* 7 */ b .data_arm_lateldrprereg @ ldr rd, [rn, rm] /* 8 */ b .data_arm_ldmstm @ ldm*a rn, <rlist> /* 9 */ b .data_arm_ldmstm @ ldm*b rn, <rlist> /* a */ b .data_unknown /* b */ b .data_unknown /* c */ b do_DataAbort @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m /* d */ b do_DataAbort @ ldc rd, [rn, #m] /* e */ b .data_unknown /* f */ b .data_unknown .data_unknown_r9: ldr r9, [sp], #4 .data_unknown: @ Part of jumptable mov r0, r4 mov r1, r8 b baddataabort .data_arm_ldmstm: tst r8, #1 << 21 @ check writeback bit beq do_DataAbort @ no writeback -> no fixup str r9, [sp, #-4]! mov r7, #0x11 orr r7, r7, #0x1100 and r6, r8, r7 and r9, r8, r7, lsl #1 add r6, r6, r9, lsr #1 and r9, r8, r7, lsl #2 add r6, r6, r9, lsr #2 and r9, r8, r7, lsl #3 add r6, r6, r9, lsr #3 add r6, r6, r6, lsr #8 add r6, r6, r6, lsr #4 and r6, r6, #15 @ r6 = no. of registers to transfer. and r9, r8, #15 << 16 @ Extract 'n' from instruction ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit subne r7, r7, r6, lsl #2 @ Undo increment addeq r7, r7, r6, lsl #2 @ Undo decrement str r7, [r2, r9, lsr #14] @ Put register 'Rn' ldr r9, [sp], #4 b do_DataAbort .data_arm_lateldrhpre: tst r8, #1 << 21 @ Check writeback bit beq do_DataAbort @ No writeback -> no fixup .data_arm_lateldrhpost: str r9, [sp, #-4]! and r9, r8, #0x00f @ get Rm / low nibble of immediate value tst r8, #1 << 22 @ if (immediate offset) andne r6, r8, #0xf00 @ { immediate high nibble orrne r6, r9, r6, lsr #4 @ combine nibbles } else ldreq r6, [r2, r9, lsl #2] @ { load Rm value } .data_arm_apply_r6_and_rn: and r9, r8, #15 << 16 @ Extract 'n' from instruction ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit subne r7, r7, r6 @ Undo incrmenet addeq r7, r7, r6 @ Undo decrement str r7, [r2, r9, lsr #14] @ Put register 'Rn' ldr r9, [sp], #4 b do_DataAbort .data_arm_lateldrpreconst: tst r8, #1 << 21 @ check writeback bit beq do_DataAbort @ no writeback -> no fixup .data_arm_lateldrpostconst: movs r6, r8, lsl #20 @ Get offset beq do_DataAbort @ zero -> no fixup str r9, [sp, #-4]! and r9, r8, #15 << 16 @ Extract 'n' from instruction ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit subne r7, r7, r6, lsr #20 @ Undo increment addeq r7, r7, r6, lsr #20 @ Undo decrement str r7, [r2, r9, lsr #14] @ Put register 'Rn' ldr r9, [sp], #4 b do_DataAbort .data_arm_lateldrprereg: tst r8, #1 << 21 @ check writeback bit beq do_DataAbort @ no writeback -> no fixup .data_arm_lateldrpostreg: and r7, r8, #15 @ Extract 'm' from instruction ldr r6, [r2, r7, lsl #2] @ Get register 'Rm' str r9, [sp, #-4]! mov r9, r8, lsr #7 @ get shift count ands r9, r9, #31 and r7, r8, #0x70 @ get shift type orreq r7, r7, #8 @ shift count = 0 add pc, pc, r7 nop mov r6, r6, lsl r9 @ 0: LSL #!0 b .data_arm_apply_r6_and_rn b .data_arm_apply_r6_and_rn @ 1: LSL #0 nop b .data_unknown_r9 @ 2: MUL? nop b .data_unknown_r9 @ 3: MUL? nop mov r6, r6, lsr r9 @ 4: LSR #!0 b .data_arm_apply_r6_and_rn mov r6, r6, lsr #32 @ 5: LSR #32 b .data_arm_apply_r6_and_rn b .data_unknown_r9 @ 6: MUL? nop b .data_unknown_r9 @ 7: MUL? nop mov r6, r6, asr r9 @ 8: ASR #!0 b .data_arm_apply_r6_and_rn mov r6, r6, asr #32 @ 9: ASR #32 b .data_arm_apply_r6_and_rn b .data_unknown_r9 @ A: MUL? nop b .data_unknown_r9 @ B: MUL? nop mov r6, r6, ror r9 @ C: ROR #!0 b .data_arm_apply_r6_and_rn mov r6, r6, rrx @ D: RRX b .data_arm_apply_r6_and_rn b .data_unknown_r9 @ E: MUL? nop b .data_unknown_r9 @ F: MUL? .data_thumb_abort: ldrh r8, [r4] @ read instruction uaccess_disable ip @ disable userspace access tst r8, #1 << 11 @ L = 1 -> write? orreq r1, r1, #1 << 8 @ yes and r7, r8, #15 << 12 add pc, pc, r7, lsr #10 @ lookup in table nop /* 0 */ b .data_unknown /* 1 */ b .data_unknown /* 2 */ b .data_unknown /* 3 */ b .data_unknown /* 4 */ b .data_unknown /* 5 */ b .data_thumb_reg /* 6 */ b do_DataAbort /* 7 */ b do_DataAbort /* 8 */ b do_DataAbort /* 9 */ b do_DataAbort /* A */ b .data_unknown /* B */ b .data_thumb_pushpop /* C */ b .data_thumb_ldmstm /* D */ b .data_unknown /* E */ b .data_unknown /* F */ b .data_unknown .data_thumb_reg: tst r8, #1 << 9 beq do_DataAbort tst r8, #1 << 10 @ If 'S' (signed) bit is set movne r1, #0 @ it must be a load instr b do_DataAbort .data_thumb_pushpop: tst r8, #1 << 10 beq .data_unknown str r9, [sp, #-4]! and r6, r8, #0x55 @ hweight8(r8) + R bit and r9, r8, #0xaa add r6, r6, r9, lsr #1 and r9, r6, #0xcc and r6, r6, #0x33 add r6, r6, r9, lsr #2 movs r7, r8, lsr #9 @ C = r8 bit 8 (R bit) adc r6, r6, r6, lsr #4 @ high + low nibble + R bit and r6, r6, #15 @ number of regs to transfer ldr r7, [r2, #13 << 2] tst r8, #1 << 11 addeq r7, r7, r6, lsl #2 @ increment SP if PUSH subne r7, r7, r6, lsl #2 @ decrement SP if POP str r7, [r2, #13 << 2] ldr r9, [sp], #4 b do_DataAbort .data_thumb_ldmstm: str r9, [sp, #-4]! and r6, r8, #0x55 @ hweight8(r8) and r9, r8, #0xaa add r6, r6, r9, lsr #1 and r9, r6, #0xcc and r6, r6, #0x33 add r6, r6, r9, lsr #2 add r6, r6, r6, lsr #4 and r9, r8, #7 << 8 ldr r7, [r2, r9, lsr #6] and r6, r6, #15 @ number of regs to transfer sub r7, r7, r6, lsl #2 @ always decrement str r7, [r2, r9, lsr #6] ldr r9, [sp], #4 b do_DataAbort
AirFortressIlikara/LS2K0300-linux-4.19
8,005
arch/arm/mm/cache-v6.S
/* * linux/arch/arm/mm/cache-v6.S * * Copyright (C) 2001 Deep Blue Solutions Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This is the "shell" of the ARMv6 processor support. */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> #include <asm/errno.h> #include <asm/unwind.h> #include "proc-macros.S" #define HARVARD_CACHE #define CACHE_LINE_SIZE 32 #define D_CACHE_LINE_SIZE 32 #define BTB_FLUSH_SIZE 8 /* * v6_flush_icache_all() * * Flush the whole I-cache. * * ARM1136 erratum 411920 - Invalidate Instruction Cache operation can fail. * This erratum is present in 1136, 1156 and 1176. It does not affect the * MPCore. * * Registers: * r0 - set to 0 * r1 - corrupted */ ENTRY(v6_flush_icache_all) mov r0, #0 #ifdef CONFIG_ARM_ERRATA_411920 mrs r1, cpsr cpsid ifa @ disable interrupts mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache msr cpsr_cx, r1 @ restore interrupts .rept 11 @ ARM Ltd recommends at least nop @ 11 NOPs .endr #else mcr p15, 0, r0, c7, c5, 0 @ invalidate I-cache #endif ret lr ENDPROC(v6_flush_icache_all) /* * v6_flush_cache_all() * * Flush the entire cache. * * It is assumed that: */ ENTRY(v6_flush_kern_cache_all) mov r0, #0 #ifdef HARVARD_CACHE mcr p15, 0, r0, c7, c14, 0 @ D cache clean+invalidate #ifndef CONFIG_ARM_ERRATA_411920 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate #else b v6_flush_icache_all #endif #else mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate #endif ret lr /* * v6_flush_cache_all() * * Flush all TLB entries in a particular address space * * - mm - mm_struct describing address space */ ENTRY(v6_flush_user_cache_all) /*FALLTHROUGH*/ /* * v6_flush_cache_range(start, end, flags) * * Flush a range of TLB entries in the specified address space. * * - start - start address (may not be aligned) * - end - end address (exclusive, may not be aligned) * - flags - vm_area_struct flags describing address space * * It is assumed that: * - we have a VIPT cache. */ ENTRY(v6_flush_user_cache_range) ret lr /* * v6_coherent_kern_range(start,end) * * Ensure that the I and D caches are coherent within specified * region. This is typically used when code has been written to * a memory region, and will be executed. * * - start - virtual start address of region * - end - virtual end address of region * * It is assumed that: * - the Icache does not read data from the write buffer */ ENTRY(v6_coherent_kern_range) /* FALLTHROUGH */ /* * v6_coherent_user_range(start,end) * * Ensure that the I and D caches are coherent within specified * region. This is typically used when code has been written to * a memory region, and will be executed. * * - start - virtual start address of region * - end - virtual end address of region * * It is assumed that: * - the Icache does not read data from the write buffer */ ENTRY(v6_coherent_user_range) UNWIND(.fnstart ) #ifdef HARVARD_CACHE bic r0, r0, #CACHE_LINE_SIZE - 1 1: USER( mcr p15, 0, r0, c7, c10, 1 ) @ clean D line add r0, r0, #CACHE_LINE_SIZE cmp r0, r1 blo 1b #endif mov r0, #0 #ifdef HARVARD_CACHE mcr p15, 0, r0, c7, c10, 4 @ drain write buffer #ifndef CONFIG_ARM_ERRATA_411920 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate #else b v6_flush_icache_all #endif #else mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB #endif ret lr /* * Fault handling for the cache operation above. If the virtual address in r0 * isn't mapped, fail with -EFAULT. */ 9001: mov r0, #-EFAULT ret lr UNWIND(.fnend ) ENDPROC(v6_coherent_user_range) ENDPROC(v6_coherent_kern_range) /* * v6_flush_kern_dcache_area(void *addr, size_t size) * * Ensure that the data held in the page kaddr is written back * to the page in question. * * - addr - kernel address * - size - region size */ ENTRY(v6_flush_kern_dcache_area) add r1, r0, r1 bic r0, r0, #D_CACHE_LINE_SIZE - 1 1: #ifdef HARVARD_CACHE mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line #else mcr p15, 0, r0, c7, c15, 1 @ clean & invalidate unified line #endif add r0, r0, #D_CACHE_LINE_SIZE cmp r0, r1 blo 1b #ifdef HARVARD_CACHE mov r0, #0 mcr p15, 0, r0, c7, c10, 4 #endif ret lr /* * v6_dma_inv_range(start,end) * * Invalidate the data cache within the specified region; we will * be performing a DMA operation in this region and we want to * purge old data in the cache. * * - start - virtual start address of region * - end - virtual end address of region */ v6_dma_inv_range: #ifdef CONFIG_DMA_CACHE_RWFO ldrb r2, [r0] @ read for ownership strb r2, [r0] @ write for ownership #endif tst r0, #D_CACHE_LINE_SIZE - 1 bic r0, r0, #D_CACHE_LINE_SIZE - 1 #ifdef HARVARD_CACHE mcrne p15, 0, r0, c7, c10, 1 @ clean D line #else mcrne p15, 0, r0, c7, c11, 1 @ clean unified line #endif tst r1, #D_CACHE_LINE_SIZE - 1 #ifdef CONFIG_DMA_CACHE_RWFO ldrneb r2, [r1, #-1] @ read for ownership strneb r2, [r1, #-1] @ write for ownership #endif bic r1, r1, #D_CACHE_LINE_SIZE - 1 #ifdef HARVARD_CACHE mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D line #else mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line #endif 1: #ifdef HARVARD_CACHE mcr p15, 0, r0, c7, c6, 1 @ invalidate D line #else mcr p15, 0, r0, c7, c7, 1 @ invalidate unified line #endif add r0, r0, #D_CACHE_LINE_SIZE cmp r0, r1 #ifdef CONFIG_DMA_CACHE_RWFO ldrlo r2, [r0] @ read for ownership strlo r2, [r0] @ write for ownership #endif blo 1b mov r0, #0 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer ret lr /* * v6_dma_clean_range(start,end) * - start - virtual start address of region * - end - virtual end address of region */ v6_dma_clean_range: bic r0, r0, #D_CACHE_LINE_SIZE - 1 1: #ifdef CONFIG_DMA_CACHE_RWFO ldr r2, [r0] @ read for ownership #endif #ifdef HARVARD_CACHE mcr p15, 0, r0, c7, c10, 1 @ clean D line #else mcr p15, 0, r0, c7, c11, 1 @ clean unified line #endif add r0, r0, #D_CACHE_LINE_SIZE cmp r0, r1 blo 1b mov r0, #0 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer ret lr /* * v6_dma_flush_range(start,end) * - start - virtual start address of region * - end - virtual end address of region */ ENTRY(v6_dma_flush_range) #ifdef CONFIG_DMA_CACHE_RWFO ldrb r2, [r0] @ read for ownership strb r2, [r0] @ write for ownership #endif bic r0, r0, #D_CACHE_LINE_SIZE - 1 1: #ifdef HARVARD_CACHE mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line #else mcr p15, 0, r0, c7, c15, 1 @ clean & invalidate line #endif add r0, r0, #D_CACHE_LINE_SIZE cmp r0, r1 #ifdef CONFIG_DMA_CACHE_RWFO ldrlob r2, [r0] @ read for ownership strlob r2, [r0] @ write for ownership #endif blo 1b mov r0, #0 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer ret lr /* * dma_map_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(v6_dma_map_area) add r1, r1, r0 teq r2, #DMA_FROM_DEVICE beq v6_dma_inv_range #ifndef CONFIG_DMA_CACHE_RWFO b v6_dma_clean_range #else teq r2, #DMA_TO_DEVICE beq v6_dma_clean_range b v6_dma_flush_range #endif ENDPROC(v6_dma_map_area) /* * dma_unmap_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(v6_dma_unmap_area) #ifndef CONFIG_DMA_CACHE_RWFO add r1, r1, r0 teq r2, #DMA_TO_DEVICE bne v6_dma_inv_range #endif ret lr ENDPROC(v6_dma_unmap_area) .globl v6_flush_kern_cache_louis .equ v6_flush_kern_cache_louis, v6_flush_kern_cache_all __INITDATA @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) define_cache_functions v6
AirFortressIlikara/LS2K0300-linux-4.19
6,266
arch/arm/mm/cache-fa.S
/* * linux/arch/arm/mm/cache-fa.S * * Copyright (C) 2005 Faraday Corp. * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt> * * Based on cache-v4wb.S: * Copyright (C) 1997-2002 Russell king * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Processors: FA520 FA526 FA626 */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> #include <asm/memory.h> #include <asm/page.h> #include "proc-macros.S" /* * The size of one data cache line. */ #define CACHE_DLINESIZE 16 /* * The total size of the data cache. */ #ifdef CONFIG_ARCH_GEMINI #define CACHE_DSIZE 8192 #else #define CACHE_DSIZE 16384 #endif /* FIXME: put optimal value here. Current one is just estimation */ #define CACHE_DLIMIT (CACHE_DSIZE * 2) /* * flush_icache_all() * * Unconditionally clean and invalidate the entire icache. */ ENTRY(fa_flush_icache_all) mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache ret lr ENDPROC(fa_flush_icache_all) /* * flush_user_cache_all() * * Clean and invalidate all cache entries in a particular address * space. */ ENTRY(fa_flush_user_cache_all) /* FALLTHROUGH */ /* * flush_kern_cache_all() * * Clean and invalidate the entire cache. */ ENTRY(fa_flush_kern_cache_all) mov ip, #0 mov r2, #VM_EXEC __flush_whole_cache: mcr p15, 0, ip, c7, c14, 0 @ clean/invalidate D cache tst r2, #VM_EXEC mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache mcrne p15, 0, ip, c7, c5, 6 @ invalidate BTB mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush ret lr /* * flush_user_cache_range(start, end, flags) * * Invalidate a range of cache entries in the specified * address space. * * - start - start address (inclusive, page aligned) * - end - end address (exclusive, page aligned) * - flags - vma_area_struct flags describing address space */ ENTRY(fa_flush_user_cache_range) mov ip, #0 sub r3, r1, r0 @ calculate total size cmp r3, #CACHE_DLIMIT @ total size >= limit? bhs __flush_whole_cache @ flush whole D cache 1: tst r2, #VM_EXEC mcrne p15, 0, r0, c7, c5, 1 @ invalidate I line mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b tst r2, #VM_EXEC mcrne p15, 0, ip, c7, c5, 6 @ invalidate BTB mcrne p15, 0, ip, c7, c10, 4 @ data write barrier mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush ret lr /* * coherent_kern_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(fa_coherent_kern_range) /* fall through */ /* * coherent_user_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(fa_coherent_user_range) bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mov r0, #0 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB mcr p15, 0, r0, c7, c10, 4 @ drain write buffer mcr p15, 0, r0, c7, c5, 4 @ prefetch flush ret lr /* * flush_kern_dcache_area(void *addr, size_t size) * * Ensure that the data held in the page kaddr is written back * to the page in question. * * - addr - kernel address * - size - size of region */ ENTRY(fa_flush_kern_dcache_area) add r1, r0, r1 1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c10, 4 @ drain write buffer ret lr /* * dma_inv_range(start, end) * * Invalidate (discard) the specified virtual address range. * May not write back any entries. If 'start' or 'end' * are not cache line aligned, those lines must be written * back. * * - start - virtual start address * - end - virtual end address */ fa_dma_inv_range: tst r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D entry tst r1, #CACHE_DLINESIZE - 1 bic r1, r1, #CACHE_DLINESIZE - 1 mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D entry 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mov r0, #0 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer ret lr /* * dma_clean_range(start, end) * * Clean (write back) the specified virtual address range. * * - start - virtual start address * - end - virtual end address */ fa_dma_clean_range: bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mov r0, #0 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer ret lr /* * dma_flush_range(start,end) * - start - virtual start address of region * - end - virtual end address of region */ ENTRY(fa_dma_flush_range) bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mov r0, #0 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer ret lr /* * dma_map_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(fa_dma_map_area) add r1, r1, r0 cmp r2, #DMA_TO_DEVICE beq fa_dma_clean_range bcs fa_dma_inv_range b fa_dma_flush_range ENDPROC(fa_dma_map_area) /* * dma_unmap_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(fa_dma_unmap_area) ret lr ENDPROC(fa_dma_unmap_area) .globl fa_flush_kern_cache_louis .equ fa_flush_kern_cache_louis, fa_flush_kern_cache_all __INITDATA @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) define_cache_functions fa
AirFortressIlikara/LS2K0300-linux-4.19
6,044
arch/arm/mm/proc-arm720.S
/* * linux/arch/arm/mm/proc-arm720.S: MMU functions for ARM720 * * Copyright (C) 2000 Steve Hill (sjhill@cotw.com) * Rob Scott (rscott@mtrob.fdns.net) * Copyright (C) 2000 ARM Limited, Deep Blue Solutions Ltd. * hacked for non-paged-MM by Hyok S. Choi, 2004. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * * These are the low level assembler for performing cache and TLB * functions on the ARM720T. The ARM720T has a writethrough IDC * cache, so we don't need to clean it. * * Changelog: * 05-09-2000 SJH Created by moving 720 specific functions * out of 'proc-arm6,7.S' per RMK discussion * 07-25-2000 SJH Added idle function. * 08-25-2000 DBS Updated for integration of ARM Ltd version. * 04-20-2004 HSC modified for non-paged memory management mode. */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> #include <asm/asm-offsets.h> #include <asm/hwcap.h> #include <asm/pgtable-hwdef.h> #include <asm/pgtable.h> #include <asm/ptrace.h> #include "proc-macros.S" /* * Function: arm720_proc_init (void) * : arm720_proc_fin (void) * * Notes : This processor does not require these */ ENTRY(cpu_arm720_dcache_clean_area) ENTRY(cpu_arm720_proc_init) ret lr ENTRY(cpu_arm720_proc_fin) mrc p15, 0, r0, c1, c0, 0 bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches ret lr /* * Function: arm720_proc_do_idle(void) * Params : r0 = unused * Purpose : put the processor in proper idle mode */ ENTRY(cpu_arm720_do_idle) ret lr /* * Function: arm720_switch_mm(unsigned long pgd_phys) * Params : pgd_phys Physical address of page table * Purpose : Perform a task switch, saving the old process' state and restoring * the new. */ ENTRY(cpu_arm720_switch_mm) #ifdef CONFIG_MMU mov r1, #0 mcr p15, 0, r1, c7, c7, 0 @ invalidate cache mcr p15, 0, r0, c2, c0, 0 @ update page table ptr mcr p15, 0, r1, c8, c7, 0 @ flush TLB (v4) #endif ret lr /* * Function: arm720_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext) * Params : r0 = Address to set * : r1 = value to set * Purpose : Set a PTE and flush it out of any WB cache */ .align 5 ENTRY(cpu_arm720_set_pte_ext) #ifdef CONFIG_MMU armv3_set_pte_ext wc_disable=0 #endif ret lr /* * Function: arm720_reset * Params : r0 = address to jump to * Notes : This sets up everything for a reset */ .pushsection .idmap.text, "ax" ENTRY(cpu_arm720_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate cache #ifdef CONFIG_MMU mcr p15, 0, ip, c8, c7, 0 @ flush TLB (v4) #endif mrc p15, 0, ip, c1, c0, 0 @ get ctrl register bic ip, ip, #0x000f @ ............wcam bic ip, ip, #0x2100 @ ..v....s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register ret r0 ENDPROC(cpu_arm720_reset) .popsection .type __arm710_setup, #function __arm710_setup: mov r0, #0 mcr p15, 0, r0, c7, c7, 0 @ invalidate caches #ifdef CONFIG_MMU mcr p15, 0, r0, c8, c7, 0 @ flush TLB (v4) #endif mrc p15, 0, r0, c1, c0 @ get control register ldr r5, arm710_cr1_clear bic r0, r0, r5 ldr r5, arm710_cr1_set orr r0, r0, r5 ret lr @ __ret (head.S) .size __arm710_setup, . - __arm710_setup /* * R * .RVI ZFRS BLDP WCAM * .... 0001 ..11 1101 * */ .type arm710_cr1_clear, #object .type arm710_cr1_set, #object arm710_cr1_clear: .word 0x0f3f arm710_cr1_set: .word 0x013d .type __arm720_setup, #function __arm720_setup: mov r0, #0 mcr p15, 0, r0, c7, c7, 0 @ invalidate caches #ifdef CONFIG_MMU mcr p15, 0, r0, c8, c7, 0 @ flush TLB (v4) #endif adr r5, arm720_crval ldmia r5, {r5, r6} mrc p15, 0, r0, c1, c0 @ get control register bic r0, r0, r5 orr r0, r0, r6 ret lr @ __ret (head.S) .size __arm720_setup, . - __arm720_setup /* * R * .RVI ZFRS BLDP WCAM * ..1. 1001 ..11 1101 * */ .type arm720_crval, #object arm720_crval: crval clear=0x00002f3f, mmuset=0x0000213d, ucset=0x00000130 __INITDATA @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) define_processor_functions arm720, dabort=v4t_late_abort, pabort=legacy_pabort .section ".rodata" string cpu_arch_name, "armv4t" string cpu_elf_name, "v4" string cpu_arm710_name, "ARM710T" string cpu_arm720_name, "ARM720T" .align /* * See <asm/procinfo.h> for a definition of this structure. */ .section ".proc.info.init", #alloc .macro arm720_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cpu_flush:req .type __\name\()_proc_info,#object __\name\()_proc_info: .long \cpu_val .long \cpu_mask .long PMD_TYPE_SECT | \ PMD_SECT_BUFFERABLE | \ PMD_SECT_CACHEABLE | \ PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ .long PMD_TYPE_SECT | \ PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ initfn \cpu_flush, __\name\()_proc_info @ cpu_flush .long cpu_arch_name @ arch_name .long cpu_elf_name @ elf_name .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB @ elf_hwcap .long \cpu_name .long arm720_processor_functions .long v4_tlb_fns .long v4wt_user_fns .long v4_cache_fns .size __\name\()_proc_info, . - __\name\()_proc_info .endm arm720_proc_info arm710, 0x41807100, 0xffffff00, cpu_arm710_name, __arm710_setup arm720_proc_info arm720, 0x41807200, 0xffffff00, cpu_arm720_name, __arm720_setup
AirFortressIlikara/LS2K0300-linux-4.19
6,261
arch/arm/mm/proc-v7m.S
/* * linux/arch/arm/mm/proc-v7m.S * * Copyright (C) 2008 ARM Ltd. * Copyright (C) 2001 Deep Blue Solutions Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This is the "shell" of the ARMv7-M processor support. */ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/memory.h> #include <asm/v7m.h> #include "proc-macros.S" ENTRY(cpu_v7m_proc_init) ret lr ENDPROC(cpu_v7m_proc_init) ENTRY(cpu_v7m_proc_fin) ret lr ENDPROC(cpu_v7m_proc_fin) /* * cpu_v7m_reset(loc) * * Perform a soft reset of the system. Put the CPU into the * same state as it would be if it had been reset, and branch * to what would be the reset vector. * * - loc - location to jump to for soft reset */ .align 5 ENTRY(cpu_v7m_reset) ret r0 ENDPROC(cpu_v7m_reset) /* * cpu_v7m_do_idle() * * Idle the processor (eg, wait for interrupt). * * IRQs are already disabled. */ ENTRY(cpu_v7m_do_idle) wfi ret lr ENDPROC(cpu_v7m_do_idle) ENTRY(cpu_v7m_dcache_clean_area) ret lr ENDPROC(cpu_v7m_dcache_clean_area) /* * There is no MMU, so here is nothing to do. */ ENTRY(cpu_v7m_switch_mm) ret lr ENDPROC(cpu_v7m_switch_mm) .globl cpu_v7m_suspend_size .equ cpu_v7m_suspend_size, 0 #ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_v7m_do_suspend) ret lr ENDPROC(cpu_v7m_do_suspend) ENTRY(cpu_v7m_do_resume) ret lr ENDPROC(cpu_v7m_do_resume) #endif ENTRY(cpu_cm7_dcache_clean_area) dcache_line_size r2, r3 movw r3, #:lower16:BASEADDR_V7M_SCB + V7M_SCB_DCCMVAC movt r3, #:upper16:BASEADDR_V7M_SCB + V7M_SCB_DCCMVAC 1: str r0, [r3] @ clean D entry add r0, r0, r2 subs r1, r1, r2 bhi 1b dsb ret lr ENDPROC(cpu_cm7_dcache_clean_area) ENTRY(cpu_cm7_proc_fin) movw r2, #:lower16:(BASEADDR_V7M_SCB + V7M_SCB_CCR) movt r2, #:upper16:(BASEADDR_V7M_SCB + V7M_SCB_CCR) ldr r0, [r2] bic r0, r0, #(V7M_SCB_CCR_DC | V7M_SCB_CCR_IC) str r0, [r2] ret lr ENDPROC(cpu_cm7_proc_fin) .section ".init.text", #alloc, #execinstr __v7m_cm7_setup: mov r8, #(V7M_SCB_CCR_DC | V7M_SCB_CCR_IC| V7M_SCB_CCR_BP) b __v7m_setup_cont /* * __v7m_setup * * This should be able to cover all ARMv7-M cores. */ __v7m_setup: mov r8, 0 __v7m_setup_cont: @ Configure the vector table base address ldr r0, =BASEADDR_V7M_SCB ldr r12, =vector_table str r12, [r0, V7M_SCB_VTOR] @ enable UsageFault, BusFault and MemManage fault. ldr r5, [r0, #V7M_SCB_SHCSR] orr r5, #(V7M_SCB_SHCSR_USGFAULTENA | V7M_SCB_SHCSR_BUSFAULTENA | V7M_SCB_SHCSR_MEMFAULTENA) str r5, [r0, #V7M_SCB_SHCSR] @ Lower the priority of the SVC and PendSV exceptions mov r5, #0x80000000 str r5, [r0, V7M_SCB_SHPR2] @ set SVC priority mov r5, #0x00800000 str r5, [r0, V7M_SCB_SHPR3] @ set PendSV priority @ SVC to switch to handler mode. Notice that this requires sp to @ point to writeable memory because the processor saves @ some registers to the stack. badr r1, 1f ldr r5, [r12, #11 * 4] @ read the SVC vector entry str r1, [r12, #11 * 4] @ write the temporary SVC vector entry dsb mov r6, lr @ save LR ldr sp, =init_thread_union + THREAD_START_SP cpsie i svc #0 1: cpsid i /* Calculate exc_ret */ orr r10, lr, #EXC_RET_THREADMODE_PROCESSSTACK ldmia sp, {r0-r3, r12} str r5, [r12, #11 * 4] @ restore the original SVC vector entry mov lr, r6 @ restore LR @ Special-purpose control register mov r1, #1 msr control, r1 @ Thread mode has unpriviledged access @ Configure caches (if implemented) teq r8, #0 stmneia sp, {r0-r6, lr} @ v7m_invalidate_l1 touches r0-r6 blne v7m_invalidate_l1 teq r8, #0 @ re-evalutae condition ldmneia sp, {r0-r6, lr} @ Configure the System Control Register to ensure 8-byte stack alignment @ Note the STKALIGN bit is either RW or RAO. ldr r0, [r0, V7M_SCB_CCR] @ system control register orr r0, #V7M_SCB_CCR_STKALIGN orr r0, r0, r8 ret lr ENDPROC(__v7m_setup) /* * Cortex-M7 processor functions */ globl_equ cpu_cm7_proc_init, cpu_v7m_proc_init globl_equ cpu_cm7_reset, cpu_v7m_reset globl_equ cpu_cm7_do_idle, cpu_v7m_do_idle globl_equ cpu_cm7_switch_mm, cpu_v7m_switch_mm define_processor_functions v7m, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1 define_processor_functions cm7, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1 .section ".rodata" string cpu_arch_name, "armv7m" string cpu_elf_name "v7m" string cpu_v7m_name "ARMv7-M" .section ".proc.info.init", #alloc .macro __v7m_proc name, initfunc, cache_fns = nop_cache_fns, hwcaps = 0, proc_fns = v7m_processor_functions .long 0 /* proc_info_list.__cpu_mm_mmu_flags */ .long 0 /* proc_info_list.__cpu_io_mmu_flags */ initfn \initfunc, \name .long cpu_arch_name .long cpu_elf_name .long HWCAP_HALF | HWCAP_THUMB | HWCAP_FAST_MULT | \hwcaps .long cpu_v7m_name .long \proc_fns .long 0 /* proc_info_list.tlb */ .long 0 /* proc_info_list.user */ .long \cache_fns .endm /* * Match ARM Cortex-M7 processor. */ .type __v7m_cm7_proc_info, #object __v7m_cm7_proc_info: .long 0x410fc270 /* ARM Cortex-M7 0xC27 */ .long 0xff0ffff0 /* Mask off revision, patch release */ __v7m_proc __v7m_cm7_proc_info, __v7m_cm7_setup, hwcaps = HWCAP_EDSP, cache_fns = v7m_cache_fns, proc_fns = cm7_processor_functions .size __v7m_cm7_proc_info, . - __v7m_cm7_proc_info /* * Match ARM Cortex-M4 processor. */ .type __v7m_cm4_proc_info, #object __v7m_cm4_proc_info: .long 0x410fc240 /* ARM Cortex-M4 0xC24 */ .long 0xff0ffff0 /* Mask off revision, patch release */ __v7m_proc __v7m_cm4_proc_info, __v7m_setup, hwcaps = HWCAP_EDSP .size __v7m_cm4_proc_info, . - __v7m_cm4_proc_info /* * Match ARM Cortex-M3 processor. */ .type __v7m_cm3_proc_info, #object __v7m_cm3_proc_info: .long 0x410fc230 /* ARM Cortex-M3 0xC23 */ .long 0xff0ffff0 /* Mask off revision, patch release */ __v7m_proc __v7m_cm3_proc_info, __v7m_setup .size __v7m_cm3_proc_info, . - __v7m_cm3_proc_info /* * Match any ARMv7-M processor core. */ .type __v7m_proc_info, #object __v7m_proc_info: .long 0x000f0000 @ Required ID value .long 0x000f0000 @ Mask for ID __v7m_proc __v7m_proc_info, __v7m_setup .size __v7m_proc_info, . - __v7m_proc_info
AirFortressIlikara/LS2K0300-linux-4.19
7,163
arch/arm/mach-tegra/reset-handler.S
/* * Copyright (c) 2012, NVIDIA Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/init.h> #include <linux/linkage.h> #include <soc/tegra/flowctrl.h> #include <soc/tegra/fuse.h> #include <asm/asm-offsets.h> #include <asm/cache.h> #include "iomap.h" #include "reset.h" #include "sleep.h" #define PMC_SCRATCH41 0x140 #define RESET_DATA(x) ((TEGRA_RESET_##x)*4) #ifdef CONFIG_PM_SLEEP /* * tegra_resume * * CPU boot vector when restarting the a CPU following * an LP2 transition. Also branched to by LP0 and LP1 resume after * re-enabling sdram. * * r6: SoC ID * r8: CPU part number */ ENTRY(tegra_resume) check_cpu_part_num 0xc09, r8, r9 bleq v7_invalidate_l1 cpu_id r0 cmp r0, #0 @ CPU0? THUMB( it ne ) bne cpu_resume @ no tegra_get_soc_id TEGRA_APB_MISC_BASE, r6 /* Are we on Tegra20? */ cmp r6, #TEGRA20 beq 1f @ Yes /* Clear the flow controller flags for this CPU. */ cpu_to_csr_reg r3, r0 mov32 r2, TEGRA_FLOW_CTRL_BASE ldr r1, [r2, r3] /* Clear event & intr flag */ orr r1, r1, \ #FLOW_CTRL_CSR_INTR_FLAG | FLOW_CTRL_CSR_EVENT_FLAG movw r0, #0x3FFD @ enable, cluster_switch, immed, bitmaps @ & ext flags for CPU power mgnt bic r1, r1, r0 str r1, [r2, r3] 1: mov32 r9, 0xc09 cmp r8, r9 bne end_ca9_scu_l2_resume #ifdef CONFIG_HAVE_ARM_SCU /* enable SCU */ mov32 r0, TEGRA_ARM_PERIF_BASE ldr r1, [r0] orr r1, r1, #1 str r1, [r0] #endif #ifdef CONFIG_CACHE_L2X0 /* L2 cache resume & re-enable */ bl l2c310_early_resume #endif end_ca9_scu_l2_resume: mov32 r9, 0xc0f cmp r8, r9 bleq tegra_init_l2_for_a15 b cpu_resume ENDPROC(tegra_resume) #endif .align L1_CACHE_SHIFT ENTRY(__tegra_cpu_reset_handler_start) /* * __tegra_cpu_reset_handler: * * Common handler for all CPU reset events. * * Register usage within the reset handler: * * Others: scratch * R6 = SoC ID * R7 = CPU present (to the OS) mask * R8 = CPU in LP1 state mask * R9 = CPU in LP2 state mask * R10 = CPU number * R11 = CPU mask * R12 = pointer to reset handler data * * NOTE: This code is copied to IRAM. All code and data accesses * must be position-independent. */ .align L1_CACHE_SHIFT ENTRY(__tegra_cpu_reset_handler) cpsid aif, 0x13 @ SVC mode, interrupts disabled tegra_get_soc_id TEGRA_APB_MISC_BASE, r6 #ifdef CONFIG_ARCH_TEGRA_2x_SOC t20_check: cmp r6, #TEGRA20 bne after_t20_check t20_errata: # Tegra20 is a Cortex-A9 r1p1 mrc p15, 0, r0, c1, c0, 0 @ read system control register orr r0, r0, #1 << 14 @ erratum 716044 mcr p15, 0, r0, c1, c0, 0 @ write system control register mrc p15, 0, r0, c15, c0, 1 @ read diagnostic register orr r0, r0, #1 << 4 @ erratum 742230 orr r0, r0, #1 << 11 @ erratum 751472 mcr p15, 0, r0, c15, c0, 1 @ write diagnostic register b after_errata after_t20_check: #endif #ifdef CONFIG_ARCH_TEGRA_3x_SOC t30_check: cmp r6, #TEGRA30 bne after_t30_check t30_errata: # Tegra30 is a Cortex-A9 r2p9 mrc p15, 0, r0, c15, c0, 1 @ read diagnostic register orr r0, r0, #1 << 6 @ erratum 743622 orr r0, r0, #1 << 11 @ erratum 751472 mcr p15, 0, r0, c15, c0, 1 @ write diagnostic register b after_errata after_t30_check: #endif after_errata: mrc p15, 0, r10, c0, c0, 5 @ MPIDR and r10, r10, #0x3 @ R10 = CPU number mov r11, #1 mov r11, r11, lsl r10 @ R11 = CPU mask adr r12, __tegra_cpu_reset_handler_data #ifdef CONFIG_SMP /* Does the OS know about this CPU? */ ldr r7, [r12, #RESET_DATA(MASK_PRESENT)] tst r7, r11 @ if !present bleq __die @ CPU not present (to OS) #endif #ifdef CONFIG_ARCH_TEGRA_2x_SOC /* Are we on Tegra20? */ cmp r6, #TEGRA20 bne 1f /* If not CPU0, don't let CPU0 reset CPU1 now that CPU1 is coming up. */ mov32 r5, TEGRA_IRAM_BASE + TEGRA_IRAM_RESET_HANDLER_OFFSET mov r0, #CPU_NOT_RESETTABLE cmp r10, #0 strneb r0, [r5, #__tegra20_cpu1_resettable_status_offset] 1: #endif /* Waking up from LP1? */ ldr r8, [r12, #RESET_DATA(MASK_LP1)] tst r8, r11 @ if in_lp1 beq __is_not_lp1 cmp r10, #0 bne __die @ only CPU0 can be here ldr lr, [r12, #RESET_DATA(STARTUP_LP1)] cmp lr, #0 bleq __die @ no LP1 startup handler THUMB( add lr, lr, #1 ) @ switch to Thumb mode bx lr __is_not_lp1: /* Waking up from LP2? */ ldr r9, [r12, #RESET_DATA(MASK_LP2)] tst r9, r11 @ if in_lp2 beq __is_not_lp2 ldr lr, [r12, #RESET_DATA(STARTUP_LP2)] cmp lr, #0 bleq __die @ no LP2 startup handler bx lr __is_not_lp2: #ifdef CONFIG_SMP /* * Can only be secondary boot (initial or hotplug) * CPU0 can't be here for Tegra20/30 */ cmp r6, #TEGRA114 beq __no_cpu0_chk cmp r10, #0 bleq __die @ CPU0 cannot be here __no_cpu0_chk: ldr lr, [r12, #RESET_DATA(STARTUP_SECONDARY)] cmp lr, #0 bleq __die @ no secondary startup handler bx lr #endif /* * We don't know why the CPU reset. Just kill it. * The LR register will contain the address we died at + 4. */ __die: sub lr, lr, #4 mov32 r7, TEGRA_PMC_BASE str lr, [r7, #PMC_SCRATCH41] mov32 r7, TEGRA_CLK_RESET_BASE /* Are we on Tegra20? */ cmp r6, #TEGRA20 bne 1f #ifdef CONFIG_ARCH_TEGRA_2x_SOC mov32 r0, 0x1111 mov r1, r0, lsl r10 str r1, [r7, #0x340] @ CLK_RST_CPU_CMPLX_SET #endif 1: #ifdef CONFIG_ARCH_TEGRA_3x_SOC mov32 r6, TEGRA_FLOW_CTRL_BASE cmp r10, #0 moveq r1, #FLOW_CTRL_HALT_CPU0_EVENTS moveq r2, #FLOW_CTRL_CPU0_CSR movne r1, r10, lsl #3 addne r2, r1, #(FLOW_CTRL_CPU1_CSR-8) addne r1, r1, #(FLOW_CTRL_HALT_CPU1_EVENTS-8) /* Clear CPU "event" and "interrupt" flags and power gate it when halting but not before it is in the "WFI" state. */ ldr r0, [r6, +r2] orr r0, r0, #FLOW_CTRL_CSR_INTR_FLAG | FLOW_CTRL_CSR_EVENT_FLAG orr r0, r0, #FLOW_CTRL_CSR_ENABLE str r0, [r6, +r2] /* Unconditionally halt this CPU */ mov r0, #FLOW_CTRL_WAITEVENT str r0, [r6, +r1] ldr r0, [r6, +r1] @ memory barrier dsb isb wfi @ CPU should be power gated here /* If the CPU didn't power gate above just kill it's clock. */ mov r0, r11, lsl #8 str r0, [r7, #348] @ CLK_CPU_CMPLX_SET #endif /* If the CPU still isn't dead, just spin here. */ b . ENDPROC(__tegra_cpu_reset_handler) .align L1_CACHE_SHIFT .type __tegra_cpu_reset_handler_data, %object .globl __tegra_cpu_reset_handler_data __tegra_cpu_reset_handler_data: .rept TEGRA_RESET_DATA_SIZE .long 0 .endr .globl __tegra20_cpu1_resettable_status_offset .equ __tegra20_cpu1_resettable_status_offset, \ . - __tegra_cpu_reset_handler_start .byte 0 .align L1_CACHE_SHIFT ENTRY(__tegra_cpu_reset_handler_end)
AirFortressIlikara/LS2K0300-linux-4.19
22,269
arch/arm/mach-tegra/sleep-tegra30.S
/* * Copyright (c) 2012, NVIDIA Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/linkage.h> #include <soc/tegra/flowctrl.h> #include <soc/tegra/fuse.h> #include <asm/asm-offsets.h> #include <asm/assembler.h> #include <asm/cache.h> #include "irammap.h" #include "sleep.h" #define EMC_CFG 0xc #define EMC_ADR_CFG 0x10 #define EMC_TIMING_CONTROL 0x28 #define EMC_REFRESH 0x70 #define EMC_NOP 0xdc #define EMC_SELF_REF 0xe0 #define EMC_MRW 0xe8 #define EMC_FBIO_CFG5 0x104 #define EMC_AUTO_CAL_CONFIG 0x2a4 #define EMC_AUTO_CAL_INTERVAL 0x2a8 #define EMC_AUTO_CAL_STATUS 0x2ac #define EMC_REQ_CTRL 0x2b0 #define EMC_CFG_DIG_DLL 0x2bc #define EMC_EMC_STATUS 0x2b4 #define EMC_ZCAL_INTERVAL 0x2e0 #define EMC_ZQ_CAL 0x2ec #define EMC_XM2VTTGENPADCTRL 0x310 #define EMC_XM2VTTGENPADCTRL2 0x314 #define PMC_CTRL 0x0 #define PMC_CTRL_SIDE_EFFECT_LP0 (1 << 14) /* enter LP0 when CPU pwr gated */ #define PMC_PLLP_WB0_OVERRIDE 0xf8 #define PMC_IO_DPD_REQ 0x1b8 #define PMC_IO_DPD_STATUS 0x1bc #define CLK_RESET_CCLK_BURST 0x20 #define CLK_RESET_CCLK_DIVIDER 0x24 #define CLK_RESET_SCLK_BURST 0x28 #define CLK_RESET_SCLK_DIVIDER 0x2c #define CLK_RESET_PLLC_BASE 0x80 #define CLK_RESET_PLLC_MISC 0x8c #define CLK_RESET_PLLM_BASE 0x90 #define CLK_RESET_PLLM_MISC 0x9c #define CLK_RESET_PLLP_BASE 0xa0 #define CLK_RESET_PLLP_MISC 0xac #define CLK_RESET_PLLA_BASE 0xb0 #define CLK_RESET_PLLA_MISC 0xbc #define CLK_RESET_PLLX_BASE 0xe0 #define CLK_RESET_PLLX_MISC 0xe4 #define CLK_RESET_PLLX_MISC3 0x518 #define CLK_RESET_PLLX_MISC3_IDDQ 3 #define CLK_RESET_PLLM_MISC_IDDQ 5 #define CLK_RESET_PLLC_MISC_IDDQ 26 #define CLK_RESET_CLK_SOURCE_MSELECT 0x3b4 #define MSELECT_CLKM (0x3 << 30) #define LOCK_DELAY 50 /* safety delay after lock is detected */ #define TEGRA30_POWER_HOTPLUG_SHUTDOWN (1 << 27) /* Hotplug shutdown */ .macro emc_device_mask, rd, base ldr \rd, [\base, #EMC_ADR_CFG] tst \rd, #0x1 moveq \rd, #(0x1 << 8) @ just 1 device movne \rd, #(0x3 << 8) @ 2 devices .endm .macro emc_timing_update, rd, base mov \rd, #1 str \rd, [\base, #EMC_TIMING_CONTROL] 1001: ldr \rd, [\base, #EMC_EMC_STATUS] tst \rd, #(0x1<<23) @ wait EMC_STATUS_TIMING_UPDATE_STALLED is clear bne 1001b .endm .macro pll_enable, rd, r_car_base, pll_base, pll_misc ldr \rd, [\r_car_base, #\pll_base] tst \rd, #(1 << 30) orreq \rd, \rd, #(1 << 30) streq \rd, [\r_car_base, #\pll_base] /* Enable lock detector */ .if \pll_misc ldr \rd, [\r_car_base, #\pll_misc] bic \rd, \rd, #(1 << 18) str \rd, [\r_car_base, #\pll_misc] ldr \rd, [\r_car_base, #\pll_misc] ldr \rd, [\r_car_base, #\pll_misc] orr \rd, \rd, #(1 << 18) str \rd, [\r_car_base, #\pll_misc] .endif .endm .macro pll_locked, rd, r_car_base, pll_base 1: ldr \rd, [\r_car_base, #\pll_base] tst \rd, #(1 << 27) beq 1b .endm .macro pll_iddq_exit, rd, car, iddq, iddq_bit ldr \rd, [\car, #\iddq] bic \rd, \rd, #(1<<\iddq_bit) str \rd, [\car, #\iddq] .endm .macro pll_iddq_entry, rd, car, iddq, iddq_bit ldr \rd, [\car, #\iddq] orr \rd, \rd, #(1<<\iddq_bit) str \rd, [\car, #\iddq] .endm #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PM_SLEEP) /* * tegra30_hotplug_shutdown(void) * * Powergates the current CPU. * Should never return. */ ENTRY(tegra30_hotplug_shutdown) /* Powergate this CPU */ mov r0, #TEGRA30_POWER_HOTPLUG_SHUTDOWN bl tegra30_cpu_shutdown ret lr @ should never get here ENDPROC(tegra30_hotplug_shutdown) /* * tegra30_cpu_shutdown(unsigned long flags) * * Puts the current CPU in wait-for-event mode on the flow controller * and powergates it -- flags (in R0) indicate the request type. * * r10 = SoC ID * corrupts r0-r4, r10-r12 */ ENTRY(tegra30_cpu_shutdown) cpu_id r3 tegra_get_soc_id TEGRA_APB_MISC_VIRT, r10 cmp r10, #TEGRA30 bne _no_cpu0_chk @ It's not Tegra30 cmp r3, #0 reteq lr @ Must never be called for CPU 0 _no_cpu0_chk: ldr r12, =TEGRA_FLOW_CTRL_VIRT cpu_to_csr_reg r1, r3 add r1, r1, r12 @ virtual CSR address for this CPU cpu_to_halt_reg r2, r3 add r2, r2, r12 @ virtual HALT_EVENTS address for this CPU /* * Clear this CPU's "event" and "interrupt" flags and power gate * it when halting but not before it is in the "WFE" state. */ movw r12, \ FLOW_CTRL_CSR_INTR_FLAG | FLOW_CTRL_CSR_EVENT_FLAG | \ FLOW_CTRL_CSR_ENABLE cmp r10, #TEGRA30 moveq r4, #(1 << 4) @ wfe bitmap movne r4, #(1 << 8) @ wfi bitmap ARM( orr r12, r12, r4, lsl r3 ) THUMB( lsl r4, r4, r3 ) THUMB( orr r12, r12, r4 ) str r12, [r1] /* Halt this CPU. */ mov r3, #0x400 delay_1: subs r3, r3, #1 @ delay as a part of wfe war. bge delay_1; cpsid a @ disable imprecise aborts. ldr r3, [r1] @ read CSR str r3, [r1] @ clear CSR tst r0, #TEGRA30_POWER_HOTPLUG_SHUTDOWN beq flow_ctrl_setting_for_lp2 /* flow controller set up for hotplug */ mov r3, #FLOW_CTRL_WAITEVENT @ For hotplug b flow_ctrl_done flow_ctrl_setting_for_lp2: /* flow controller set up for LP2 */ cmp r10, #TEGRA30 moveq r3, #FLOW_CTRL_WAIT_FOR_INTERRUPT @ For LP2 movne r3, #FLOW_CTRL_WAITEVENT orrne r3, r3, #FLOW_CTRL_HALT_GIC_IRQ orrne r3, r3, #FLOW_CTRL_HALT_GIC_FIQ flow_ctrl_done: cmp r10, #TEGRA30 str r3, [r2] ldr r0, [r2] b wfe_war __cpu_reset_again: dsb .align 5 wfeeq @ CPU should be power gated here wfine wfe_war: b __cpu_reset_again /* * 38 nop's, which fills rest of wfe cache line and * 4 more cachelines with nop */ .rept 38 nop .endr b . @ should never get here ENDPROC(tegra30_cpu_shutdown) #endif #ifdef CONFIG_PM_SLEEP /* * tegra30_sleep_core_finish(unsigned long v2p) * * Enters suspend in LP0 or LP1 by turning off the MMU and jumping to * tegra30_tear_down_core in IRAM */ ENTRY(tegra30_sleep_core_finish) mov r4, r0 /* Flush, disable the L1 data cache and exit SMP */ mov r0, #TEGRA_FLUSH_CACHE_ALL bl tegra_disable_clean_inv_dcache mov r0, r4 /* * Preload all the address literals that are needed for the * CPU power-gating process, to avoid loading from SDRAM which * are not supported once SDRAM is put into self-refresh. * LP0 / LP1 use physical address, since the MMU needs to be * disabled before putting SDRAM into self-refresh to avoid * memory access due to page table walks. */ mov32 r4, TEGRA_PMC_BASE mov32 r5, TEGRA_CLK_RESET_BASE mov32 r6, TEGRA_FLOW_CTRL_BASE mov32 r7, TEGRA_TMRUS_BASE mov32 r3, tegra_shut_off_mmu add r3, r3, r0 mov32 r0, tegra30_tear_down_core mov32 r1, tegra30_iram_start sub r0, r0, r1 mov32 r1, TEGRA_IRAM_LPx_RESUME_AREA add r0, r0, r1 ret r3 ENDPROC(tegra30_sleep_core_finish) /* * tegra30_sleep_cpu_secondary_finish(unsigned long v2p) * * Enters LP2 on secondary CPU by exiting coherency and powergating the CPU. */ ENTRY(tegra30_sleep_cpu_secondary_finish) mov r7, lr /* Flush and disable the L1 data cache */ mov r0, #TEGRA_FLUSH_CACHE_LOUIS bl tegra_disable_clean_inv_dcache /* Powergate this CPU. */ mov r0, #0 @ power mode flags (!hotplug) bl tegra30_cpu_shutdown mov r0, #1 @ never return here ret r7 ENDPROC(tegra30_sleep_cpu_secondary_finish) /* * tegra30_tear_down_cpu * * Switches the CPU to enter sleep. */ ENTRY(tegra30_tear_down_cpu) mov32 r6, TEGRA_FLOW_CTRL_BASE b tegra30_enter_sleep ENDPROC(tegra30_tear_down_cpu) /* START OF ROUTINES COPIED TO IRAM */ .align L1_CACHE_SHIFT .globl tegra30_iram_start tegra30_iram_start: /* * tegra30_lp1_reset * * reset vector for LP1 restore; copied into IRAM during suspend. * Brings the system back up to a safe staring point (SDRAM out of * self-refresh, PLLC, PLLM and PLLP reenabled, CPU running on PLLX, * system clock running on the same PLL that it suspended at), and * jumps to tegra_resume to restore virtual addressing. * The physical address of tegra_resume expected to be stored in * PMC_SCRATCH41. * * NOTE: THIS *MUST* BE RELOCATED TO TEGRA_IRAM_LPx_RESUME_AREA. */ ENTRY(tegra30_lp1_reset) /* * The CPU and system bus are running at 32KHz and executing from * IRAM when this code is executed; immediately switch to CLKM and * enable PLLP, PLLM, PLLC, PLLA and PLLX. */ mov32 r0, TEGRA_CLK_RESET_BASE mov r1, #(1 << 28) str r1, [r0, #CLK_RESET_SCLK_BURST] str r1, [r0, #CLK_RESET_CCLK_BURST] mov r1, #0 str r1, [r0, #CLK_RESET_CCLK_DIVIDER] str r1, [r0, #CLK_RESET_SCLK_DIVIDER] tegra_get_soc_id TEGRA_APB_MISC_BASE, r10 cmp r10, #TEGRA30 beq _no_pll_iddq_exit pll_iddq_exit r1, r0, CLK_RESET_PLLM_MISC, CLK_RESET_PLLM_MISC_IDDQ pll_iddq_exit r1, r0, CLK_RESET_PLLC_MISC, CLK_RESET_PLLC_MISC_IDDQ pll_iddq_exit r1, r0, CLK_RESET_PLLX_MISC3, CLK_RESET_PLLX_MISC3_IDDQ mov32 r7, TEGRA_TMRUS_BASE ldr r1, [r7] add r1, r1, #2 wait_until r1, r7, r3 /* enable PLLM via PMC */ mov32 r2, TEGRA_PMC_BASE ldr r1, [r2, #PMC_PLLP_WB0_OVERRIDE] orr r1, r1, #(1 << 12) str r1, [r2, #PMC_PLLP_WB0_OVERRIDE] pll_enable r1, r0, CLK_RESET_PLLM_BASE, 0 pll_enable r1, r0, CLK_RESET_PLLC_BASE, 0 pll_enable r1, r0, CLK_RESET_PLLX_BASE, 0 b _pll_m_c_x_done _no_pll_iddq_exit: /* enable PLLM via PMC */ mov32 r2, TEGRA_PMC_BASE ldr r1, [r2, #PMC_PLLP_WB0_OVERRIDE] orr r1, r1, #(1 << 12) str r1, [r2, #PMC_PLLP_WB0_OVERRIDE] pll_enable r1, r0, CLK_RESET_PLLM_BASE, CLK_RESET_PLLM_MISC pll_enable r1, r0, CLK_RESET_PLLC_BASE, CLK_RESET_PLLC_MISC pll_enable r1, r0, CLK_RESET_PLLX_BASE, CLK_RESET_PLLX_MISC _pll_m_c_x_done: pll_enable r1, r0, CLK_RESET_PLLP_BASE, CLK_RESET_PLLP_MISC pll_enable r1, r0, CLK_RESET_PLLA_BASE, CLK_RESET_PLLA_MISC pll_locked r1, r0, CLK_RESET_PLLM_BASE pll_locked r1, r0, CLK_RESET_PLLP_BASE pll_locked r1, r0, CLK_RESET_PLLA_BASE pll_locked r1, r0, CLK_RESET_PLLC_BASE pll_locked r1, r0, CLK_RESET_PLLX_BASE tegra_get_soc_id TEGRA_APB_MISC_BASE, r1 cmp r1, #TEGRA30 beq 1f ldr r1, [r0, #CLK_RESET_PLLP_BASE] bic r1, r1, #(1<<31) @ disable PllP bypass str r1, [r0, #CLK_RESET_PLLP_BASE] 1: mov32 r7, TEGRA_TMRUS_BASE ldr r1, [r7] add r1, r1, #LOCK_DELAY wait_until r1, r7, r3 adr r5, tegra_sdram_pad_save ldr r4, [r5, #0x18] @ restore CLK_SOURCE_MSELECT str r4, [r0, #CLK_RESET_CLK_SOURCE_MSELECT] ldr r4, [r5, #0x1C] @ restore SCLK_BURST str r4, [r0, #CLK_RESET_SCLK_BURST] cmp r10, #TEGRA30 movweq r4, #:lower16:((1 << 28) | (0x8)) @ burst policy is PLLX movteq r4, #:upper16:((1 << 28) | (0x8)) movwne r4, #:lower16:((1 << 28) | (0xe)) movtne r4, #:upper16:((1 << 28) | (0xe)) str r4, [r0, #CLK_RESET_CCLK_BURST] /* Restore pad power state to normal */ ldr r1, [r5, #0x14] @ PMC_IO_DPD_STATUS mvn r1, r1 bic r1, r1, #(1 << 31) orr r1, r1, #(1 << 30) str r1, [r2, #PMC_IO_DPD_REQ] @ DPD_OFF cmp r10, #TEGRA30 movweq r0, #:lower16:TEGRA_EMC_BASE @ r0 reserved for emc base movteq r0, #:upper16:TEGRA_EMC_BASE cmp r10, #TEGRA114 movweq r0, #:lower16:TEGRA_EMC0_BASE movteq r0, #:upper16:TEGRA_EMC0_BASE cmp r10, #TEGRA124 movweq r0, #:lower16:TEGRA124_EMC_BASE movteq r0, #:upper16:TEGRA124_EMC_BASE exit_self_refresh: ldr r1, [r5, #0xC] @ restore EMC_XM2VTTGENPADCTRL str r1, [r0, #EMC_XM2VTTGENPADCTRL] ldr r1, [r5, #0x10] @ restore EMC_XM2VTTGENPADCTRL2 str r1, [r0, #EMC_XM2VTTGENPADCTRL2] ldr r1, [r5, #0x8] @ restore EMC_AUTO_CAL_INTERVAL str r1, [r0, #EMC_AUTO_CAL_INTERVAL] /* Relock DLL */ ldr r1, [r0, #EMC_CFG_DIG_DLL] orr r1, r1, #(1 << 30) @ set DLL_RESET str r1, [r0, #EMC_CFG_DIG_DLL] emc_timing_update r1, r0 cmp r10, #TEGRA114 movweq r1, #:lower16:TEGRA_EMC1_BASE movteq r1, #:upper16:TEGRA_EMC1_BASE cmpeq r0, r1 ldr r1, [r0, #EMC_AUTO_CAL_CONFIG] orr r1, r1, #(1 << 31) @ set AUTO_CAL_ACTIVE orreq r1, r1, #(1 << 27) @ set slave mode for channel 1 str r1, [r0, #EMC_AUTO_CAL_CONFIG] emc_wait_auto_cal_onetime: ldr r1, [r0, #EMC_AUTO_CAL_STATUS] tst r1, #(1 << 31) @ wait until AUTO_CAL_ACTIVE is cleared bne emc_wait_auto_cal_onetime ldr r1, [r0, #EMC_CFG] bic r1, r1, #(1 << 31) @ disable DRAM_CLK_STOP_PD str r1, [r0, #EMC_CFG] mov r1, #0 str r1, [r0, #EMC_SELF_REF] @ take DRAM out of self refresh mov r1, #1 cmp r10, #TEGRA30 streq r1, [r0, #EMC_NOP] streq r1, [r0, #EMC_NOP] streq r1, [r0, #EMC_REFRESH] emc_device_mask r1, r0 exit_selfrefresh_loop: ldr r2, [r0, #EMC_EMC_STATUS] ands r2, r2, r1 bne exit_selfrefresh_loop lsr r1, r1, #8 @ devSel, bit0:dev0, bit1:dev1 mov32 r7, TEGRA_TMRUS_BASE ldr r2, [r0, #EMC_FBIO_CFG5] and r2, r2, #3 @ check DRAM_TYPE cmp r2, #2 beq emc_lpddr2 /* Issue a ZQ_CAL for dev0 - DDR3 */ mov32 r2, 0x80000011 @ DEV_SELECTION=2, LENGTH=LONG, CMD=1 str r2, [r0, #EMC_ZQ_CAL] ldr r2, [r7] add r2, r2, #10 wait_until r2, r7, r3 tst r1, #2 beq zcal_done /* Issue a ZQ_CAL for dev1 - DDR3 */ mov32 r2, 0x40000011 @ DEV_SELECTION=1, LENGTH=LONG, CMD=1 str r2, [r0, #EMC_ZQ_CAL] ldr r2, [r7] add r2, r2, #10 wait_until r2, r7, r3 b zcal_done emc_lpddr2: /* Issue a ZQ_CAL for dev0 - LPDDR2 */ mov32 r2, 0x800A00AB @ DEV_SELECTION=2, MA=10, OP=0xAB str r2, [r0, #EMC_MRW] ldr r2, [r7] add r2, r2, #1 wait_until r2, r7, r3 tst r1, #2 beq zcal_done /* Issue a ZQ_CAL for dev0 - LPDDR2 */ mov32 r2, 0x400A00AB @ DEV_SELECTION=1, MA=10, OP=0xAB str r2, [r0, #EMC_MRW] ldr r2, [r7] add r2, r2, #1 wait_until r2, r7, r3 zcal_done: mov r1, #0 @ unstall all transactions str r1, [r0, #EMC_REQ_CTRL] ldr r1, [r5, #0x4] @ restore EMC_ZCAL_INTERVAL str r1, [r0, #EMC_ZCAL_INTERVAL] ldr r1, [r5, #0x0] @ restore EMC_CFG str r1, [r0, #EMC_CFG] /* Tegra114 had dual EMC channel, now config the other one */ cmp r10, #TEGRA114 bne __no_dual_emc_chanl mov32 r1, TEGRA_EMC1_BASE cmp r0, r1 movne r0, r1 addne r5, r5, #0x20 bne exit_self_refresh __no_dual_emc_chanl: mov32 r0, TEGRA_PMC_BASE ldr r0, [r0, #PMC_SCRATCH41] ret r0 @ jump to tegra_resume ENDPROC(tegra30_lp1_reset) .align L1_CACHE_SHIFT tegra30_sdram_pad_address: .word TEGRA_EMC_BASE + EMC_CFG @0x0 .word TEGRA_EMC_BASE + EMC_ZCAL_INTERVAL @0x4 .word TEGRA_EMC_BASE + EMC_AUTO_CAL_INTERVAL @0x8 .word TEGRA_EMC_BASE + EMC_XM2VTTGENPADCTRL @0xc .word TEGRA_EMC_BASE + EMC_XM2VTTGENPADCTRL2 @0x10 .word TEGRA_PMC_BASE + PMC_IO_DPD_STATUS @0x14 .word TEGRA_CLK_RESET_BASE + CLK_RESET_CLK_SOURCE_MSELECT @0x18 .word TEGRA_CLK_RESET_BASE + CLK_RESET_SCLK_BURST @0x1c tegra30_sdram_pad_address_end: tegra114_sdram_pad_address: .word TEGRA_EMC0_BASE + EMC_CFG @0x0 .word TEGRA_EMC0_BASE + EMC_ZCAL_INTERVAL @0x4 .word TEGRA_EMC0_BASE + EMC_AUTO_CAL_INTERVAL @0x8 .word TEGRA_EMC0_BASE + EMC_XM2VTTGENPADCTRL @0xc .word TEGRA_EMC0_BASE + EMC_XM2VTTGENPADCTRL2 @0x10 .word TEGRA_PMC_BASE + PMC_IO_DPD_STATUS @0x14 .word TEGRA_CLK_RESET_BASE + CLK_RESET_CLK_SOURCE_MSELECT @0x18 .word TEGRA_CLK_RESET_BASE + CLK_RESET_SCLK_BURST @0x1c .word TEGRA_EMC1_BASE + EMC_CFG @0x20 .word TEGRA_EMC1_BASE + EMC_ZCAL_INTERVAL @0x24 .word TEGRA_EMC1_BASE + EMC_AUTO_CAL_INTERVAL @0x28 .word TEGRA_EMC1_BASE + EMC_XM2VTTGENPADCTRL @0x2c .word TEGRA_EMC1_BASE + EMC_XM2VTTGENPADCTRL2 @0x30 tegra114_sdram_pad_adress_end: tegra124_sdram_pad_address: .word TEGRA124_EMC_BASE + EMC_CFG @0x0 .word TEGRA124_EMC_BASE + EMC_ZCAL_INTERVAL @0x4 .word TEGRA124_EMC_BASE + EMC_AUTO_CAL_INTERVAL @0x8 .word TEGRA124_EMC_BASE + EMC_XM2VTTGENPADCTRL @0xc .word TEGRA124_EMC_BASE + EMC_XM2VTTGENPADCTRL2 @0x10 .word TEGRA_PMC_BASE + PMC_IO_DPD_STATUS @0x14 .word TEGRA_CLK_RESET_BASE + CLK_RESET_CLK_SOURCE_MSELECT @0x18 .word TEGRA_CLK_RESET_BASE + CLK_RESET_SCLK_BURST @0x1c tegra124_sdram_pad_address_end: tegra30_sdram_pad_size: .word tegra30_sdram_pad_address_end - tegra30_sdram_pad_address tegra114_sdram_pad_size: .word tegra114_sdram_pad_adress_end - tegra114_sdram_pad_address .type tegra_sdram_pad_save, %object tegra_sdram_pad_save: .rept (tegra114_sdram_pad_adress_end - tegra114_sdram_pad_address) / 4 .long 0 .endr /* * tegra30_tear_down_core * * copied into and executed from IRAM * puts memory in self-refresh for LP0 and LP1 */ tegra30_tear_down_core: bl tegra30_sdram_self_refresh bl tegra30_switch_cpu_to_clk32k b tegra30_enter_sleep /* * tegra30_switch_cpu_to_clk32k * * In LP0 and LP1 all PLLs will be turned off. Switching the CPU and System CLK * to the 32KHz clock. * r4 = TEGRA_PMC_BASE * r5 = TEGRA_CLK_RESET_BASE * r6 = TEGRA_FLOW_CTRL_BASE * r7 = TEGRA_TMRUS_BASE * r10= SoC ID */ tegra30_switch_cpu_to_clk32k: /* * start by jumping to CLKM to safely disable PLLs, then jump to * CLKS. */ mov r0, #(1 << 28) str r0, [r5, #CLK_RESET_SCLK_BURST] /* 2uS delay delay between changing SCLK and CCLK */ ldr r1, [r7] add r1, r1, #2 wait_until r1, r7, r9 str r0, [r5, #CLK_RESET_CCLK_BURST] mov r0, #0 str r0, [r5, #CLK_RESET_CCLK_DIVIDER] str r0, [r5, #CLK_RESET_SCLK_DIVIDER] /* switch the clock source of mselect to be CLK_M */ ldr r0, [r5, #CLK_RESET_CLK_SOURCE_MSELECT] orr r0, r0, #MSELECT_CLKM str r0, [r5, #CLK_RESET_CLK_SOURCE_MSELECT] /* 2uS delay delay between changing SCLK and disabling PLLs */ ldr r1, [r7] add r1, r1, #2 wait_until r1, r7, r9 /* disable PLLM via PMC in LP1 */ ldr r0, [r4, #PMC_PLLP_WB0_OVERRIDE] bic r0, r0, #(1 << 12) str r0, [r4, #PMC_PLLP_WB0_OVERRIDE] /* disable PLLP, PLLA, PLLC and PLLX */ tegra_get_soc_id TEGRA_APB_MISC_BASE, r1 cmp r1, #TEGRA30 ldr r0, [r5, #CLK_RESET_PLLP_BASE] orrne r0, r0, #(1 << 31) @ enable PllP bypass on fast cluster bic r0, r0, #(1 << 30) str r0, [r5, #CLK_RESET_PLLP_BASE] ldr r0, [r5, #CLK_RESET_PLLA_BASE] bic r0, r0, #(1 << 30) str r0, [r5, #CLK_RESET_PLLA_BASE] ldr r0, [r5, #CLK_RESET_PLLC_BASE] bic r0, r0, #(1 << 30) str r0, [r5, #CLK_RESET_PLLC_BASE] ldr r0, [r5, #CLK_RESET_PLLX_BASE] bic r0, r0, #(1 << 30) str r0, [r5, #CLK_RESET_PLLX_BASE] cmp r10, #TEGRA30 beq _no_pll_in_iddq pll_iddq_entry r1, r5, CLK_RESET_PLLX_MISC3, CLK_RESET_PLLX_MISC3_IDDQ _no_pll_in_iddq: /* switch to CLKS */ mov r0, #0 /* brust policy = 32KHz */ str r0, [r5, #CLK_RESET_SCLK_BURST] ret lr /* * tegra30_enter_sleep * * uses flow controller to enter sleep state * executes from IRAM with SDRAM in selfrefresh when target state is LP0 or LP1 * executes from SDRAM with target state is LP2 * r6 = TEGRA_FLOW_CTRL_BASE */ tegra30_enter_sleep: cpu_id r1 cpu_to_csr_reg r2, r1 ldr r0, [r6, r2] orr r0, r0, #FLOW_CTRL_CSR_INTR_FLAG | FLOW_CTRL_CSR_EVENT_FLAG orr r0, r0, #FLOW_CTRL_CSR_ENABLE str r0, [r6, r2] tegra_get_soc_id TEGRA_APB_MISC_BASE, r10 cmp r10, #TEGRA30 mov r0, #FLOW_CTRL_WAIT_FOR_INTERRUPT orreq r0, r0, #FLOW_CTRL_HALT_CPU_IRQ | FLOW_CTRL_HALT_CPU_FIQ orrne r0, r0, #FLOW_CTRL_HALT_LIC_IRQ | FLOW_CTRL_HALT_LIC_FIQ cpu_to_halt_reg r2, r1 str r0, [r6, r2] dsb ldr r0, [r6, r2] /* memory barrier */ halted: isb dsb wfi /* CPU should be power gated here */ /* !!!FIXME!!! Implement halt failure handler */ b halted /* * tegra30_sdram_self_refresh * * called with MMU off and caches disabled * must be executed from IRAM * r4 = TEGRA_PMC_BASE * r5 = TEGRA_CLK_RESET_BASE * r6 = TEGRA_FLOW_CTRL_BASE * r7 = TEGRA_TMRUS_BASE * r10= SoC ID */ tegra30_sdram_self_refresh: adr r8, tegra_sdram_pad_save tegra_get_soc_id TEGRA_APB_MISC_BASE, r10 cmp r10, #TEGRA30 adreq r2, tegra30_sdram_pad_address ldreq r3, tegra30_sdram_pad_size cmp r10, #TEGRA114 adreq r2, tegra114_sdram_pad_address ldreq r3, tegra114_sdram_pad_size cmp r10, #TEGRA124 adreq r2, tegra124_sdram_pad_address ldreq r3, tegra30_sdram_pad_size mov r9, #0 padsave: ldr r0, [r2, r9] @ r0 is the addr in the pad_address ldr r1, [r0] str r1, [r8, r9] @ save the content of the addr add r9, r9, #4 cmp r3, r9 bne padsave padsave_done: dsb cmp r10, #TEGRA30 ldreq r0, =TEGRA_EMC_BASE @ r0 reserved for emc base addr cmp r10, #TEGRA114 ldreq r0, =TEGRA_EMC0_BASE cmp r10, #TEGRA124 ldreq r0, =TEGRA124_EMC_BASE enter_self_refresh: cmp r10, #TEGRA30 mov r1, #0 str r1, [r0, #EMC_ZCAL_INTERVAL] str r1, [r0, #EMC_AUTO_CAL_INTERVAL] ldr r1, [r0, #EMC_CFG] bic r1, r1, #(1 << 28) bicne r1, r1, #(1 << 29) str r1, [r0, #EMC_CFG] @ disable DYN_SELF_REF emc_timing_update r1, r0 ldr r1, [r7] add r1, r1, #5 wait_until r1, r7, r2 emc_wait_auto_cal: ldr r1, [r0, #EMC_AUTO_CAL_STATUS] tst r1, #(1 << 31) @ wait until AUTO_CAL_ACTIVE is cleared bne emc_wait_auto_cal mov r1, #3 str r1, [r0, #EMC_REQ_CTRL] @ stall incoming DRAM requests emcidle: ldr r1, [r0, #EMC_EMC_STATUS] tst r1, #4 beq emcidle mov r1, #1 str r1, [r0, #EMC_SELF_REF] emc_device_mask r1, r0 emcself: ldr r2, [r0, #EMC_EMC_STATUS] and r2, r2, r1 cmp r2, r1 bne emcself @ loop until DDR in self-refresh /* Put VTTGEN in the lowest power mode */ ldr r1, [r0, #EMC_XM2VTTGENPADCTRL] mov32 r2, 0xF8F8FFFF @ clear XM2VTTGEN_DRVUP and XM2VTTGEN_DRVDN and r1, r1, r2 str r1, [r0, #EMC_XM2VTTGENPADCTRL] ldr r1, [r0, #EMC_XM2VTTGENPADCTRL2] cmp r10, #TEGRA30 orreq r1, r1, #7 @ set E_NO_VTTGEN orrne r1, r1, #0x3f str r1, [r0, #EMC_XM2VTTGENPADCTRL2] emc_timing_update r1, r0 /* Tegra114 had dual EMC channel, now config the other one */ cmp r10, #TEGRA114 bne no_dual_emc_chanl mov32 r1, TEGRA_EMC1_BASE cmp r0, r1 movne r0, r1 bne enter_self_refresh no_dual_emc_chanl: ldr r1, [r4, #PMC_CTRL] tst r1, #PMC_CTRL_SIDE_EFFECT_LP0 bne pmc_io_dpd_skip /* * Put DDR_DATA, DISC_ADDR_CMD, DDR_ADDR_CMD, POP_ADDR_CMD, POP_CLK * and COMP in the lowest power mode when LP1. */ mov32 r1, 0x8EC00000 str r1, [r4, #PMC_IO_DPD_REQ] pmc_io_dpd_skip: dsb ret lr .ltorg /* dummy symbol for end of IRAM */ .align L1_CACHE_SHIFT .global tegra30_iram_end tegra30_iram_end: b . #endif
AirFortressIlikara/LS2K0300-linux-4.19
14,280
arch/arm/mach-tegra/sleep-tegra20.S
/* * Copyright (c) 2010-2012, NVIDIA Corporation. All rights reserved. * Copyright (c) 2011, Google, Inc. * * Author: Colin Cross <ccross@android.com> * Gary King <gking@nvidia.com> * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/linkage.h> #include <soc/tegra/flowctrl.h> #include <asm/assembler.h> #include <asm/proc-fns.h> #include <asm/cp15.h> #include <asm/cache.h> #include "irammap.h" #include "sleep.h" #define EMC_CFG 0xc #define EMC_ADR_CFG 0x10 #define EMC_REFRESH 0x70 #define EMC_NOP 0xdc #define EMC_SELF_REF 0xe0 #define EMC_REQ_CTRL 0x2b0 #define EMC_EMC_STATUS 0x2b4 #define CLK_RESET_CCLK_BURST 0x20 #define CLK_RESET_CCLK_DIVIDER 0x24 #define CLK_RESET_SCLK_BURST 0x28 #define CLK_RESET_SCLK_DIVIDER 0x2c #define CLK_RESET_PLLC_BASE 0x80 #define CLK_RESET_PLLM_BASE 0x90 #define CLK_RESET_PLLP_BASE 0xa0 #define APB_MISC_XM2CFGCPADCTRL 0x8c8 #define APB_MISC_XM2CFGDPADCTRL 0x8cc #define APB_MISC_XM2CLKCFGPADCTRL 0x8d0 #define APB_MISC_XM2COMPPADCTRL 0x8d4 #define APB_MISC_XM2VTTGENPADCTRL 0x8d8 #define APB_MISC_XM2CFGCPADCTRL2 0x8e4 #define APB_MISC_XM2CFGDPADCTRL2 0x8e8 .macro pll_enable, rd, r_car_base, pll_base ldr \rd, [\r_car_base, #\pll_base] tst \rd, #(1 << 30) orreq \rd, \rd, #(1 << 30) streq \rd, [\r_car_base, #\pll_base] .endm .macro emc_device_mask, rd, base ldr \rd, [\base, #EMC_ADR_CFG] tst \rd, #(0x3 << 24) moveq \rd, #(0x1 << 8) @ just 1 device movne \rd, #(0x3 << 8) @ 2 devices .endm #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PM_SLEEP) /* * tegra20_hotplug_shutdown(void) * * puts the current cpu in reset * should never return */ ENTRY(tegra20_hotplug_shutdown) /* Put this CPU down */ cpu_id r0 bl tegra20_cpu_shutdown ret lr @ should never get here ENDPROC(tegra20_hotplug_shutdown) /* * tegra20_cpu_shutdown(int cpu) * * r0 is cpu to reset * * puts the specified CPU in wait-for-event mode on the flow controller * and puts the CPU in reset * can be called on the current cpu or another cpu * if called on the current cpu, does not return * MUST NOT BE CALLED FOR CPU 0. * * corrupts r0-r3, r12 */ ENTRY(tegra20_cpu_shutdown) cmp r0, #0 reteq lr @ must not be called for CPU 0 mov32 r1, TEGRA_IRAM_RESET_BASE_VIRT ldr r2, =__tegra20_cpu1_resettable_status_offset mov r12, #CPU_RESETTABLE strb r12, [r1, r2] cpu_to_halt_reg r1, r0 ldr r3, =TEGRA_FLOW_CTRL_VIRT mov r2, #FLOW_CTRL_WAITEVENT | FLOW_CTRL_JTAG_RESUME str r2, [r3, r1] @ put flow controller in wait event mode ldr r2, [r3, r1] isb dsb movw r1, 0x1011 mov r1, r1, lsl r0 ldr r3, =TEGRA_CLK_RESET_VIRT str r1, [r3, #0x340] @ put slave CPU in reset isb dsb cpu_id r3 cmp r3, r0 beq . ret lr ENDPROC(tegra20_cpu_shutdown) #endif #ifdef CONFIG_PM_SLEEP /* * tegra_pen_lock * * spinlock implementation with no atomic test-and-set and no coherence * using Peterson's algorithm on strongly-ordered registers * used to synchronize a cpu waking up from wfi with entering lp2 on idle * * The reference link of Peterson's algorithm: * http://en.wikipedia.org/wiki/Peterson's_algorithm * * SCRATCH37 = r1 = !turn (inverted from Peterson's algorithm) * on cpu 0: * r2 = flag[0] (in SCRATCH38) * r3 = flag[1] (in SCRATCH39) * on cpu1: * r2 = flag[1] (in SCRATCH39) * r3 = flag[0] (in SCRATCH38) * * must be called with MMU on * corrupts r0-r3, r12 */ ENTRY(tegra_pen_lock) mov32 r3, TEGRA_PMC_VIRT cpu_id r0 add r1, r3, #PMC_SCRATCH37 cmp r0, #0 addeq r2, r3, #PMC_SCRATCH38 addeq r3, r3, #PMC_SCRATCH39 addne r2, r3, #PMC_SCRATCH39 addne r3, r3, #PMC_SCRATCH38 mov r12, #1 str r12, [r2] @ flag[cpu] = 1 dsb str r12, [r1] @ !turn = cpu 1: dsb ldr r12, [r3] cmp r12, #1 @ flag[!cpu] == 1? ldreq r12, [r1] cmpeq r12, r0 @ !turn == cpu? beq 1b @ while !turn == cpu && flag[!cpu] == 1 ret lr @ locked ENDPROC(tegra_pen_lock) ENTRY(tegra_pen_unlock) dsb mov32 r3, TEGRA_PMC_VIRT cpu_id r0 cmp r0, #0 addeq r2, r3, #PMC_SCRATCH38 addne r2, r3, #PMC_SCRATCH39 mov r12, #0 str r12, [r2] ret lr ENDPROC(tegra_pen_unlock) /* * tegra20_cpu_clear_resettable(void) * * Called to clear the "resettable soon" flag in IRAM variable when * it is expected that the secondary CPU will be idle soon. */ ENTRY(tegra20_cpu_clear_resettable) mov32 r1, TEGRA_IRAM_RESET_BASE_VIRT ldr r2, =__tegra20_cpu1_resettable_status_offset mov r12, #CPU_NOT_RESETTABLE strb r12, [r1, r2] ret lr ENDPROC(tegra20_cpu_clear_resettable) /* * tegra20_cpu_set_resettable_soon(void) * * Called to set the "resettable soon" flag in IRAM variable when * it is expected that the secondary CPU will be idle soon. */ ENTRY(tegra20_cpu_set_resettable_soon) mov32 r1, TEGRA_IRAM_RESET_BASE_VIRT ldr r2, =__tegra20_cpu1_resettable_status_offset mov r12, #CPU_RESETTABLE_SOON strb r12, [r1, r2] ret lr ENDPROC(tegra20_cpu_set_resettable_soon) /* * tegra20_cpu_is_resettable_soon(void) * * Returns true if the "resettable soon" flag in IRAM variable has been * set because it is expected that the secondary CPU will be idle soon. */ ENTRY(tegra20_cpu_is_resettable_soon) mov32 r1, TEGRA_IRAM_RESET_BASE_VIRT ldr r2, =__tegra20_cpu1_resettable_status_offset ldrb r12, [r1, r2] cmp r12, #CPU_RESETTABLE_SOON moveq r0, #1 movne r0, #0 ret lr ENDPROC(tegra20_cpu_is_resettable_soon) /* * tegra20_sleep_core_finish(unsigned long v2p) * * Enters suspend in LP0 or LP1 by turning off the mmu and jumping to * tegra20_tear_down_core in IRAM */ ENTRY(tegra20_sleep_core_finish) mov r4, r0 /* Flush, disable the L1 data cache and exit SMP */ mov r0, #TEGRA_FLUSH_CACHE_ALL bl tegra_disable_clean_inv_dcache mov r0, r4 mov32 r3, tegra_shut_off_mmu add r3, r3, r0 mov32 r0, tegra20_tear_down_core mov32 r1, tegra20_iram_start sub r0, r0, r1 mov32 r1, TEGRA_IRAM_LPx_RESUME_AREA add r0, r0, r1 ret r3 ENDPROC(tegra20_sleep_core_finish) /* * tegra20_sleep_cpu_secondary_finish(unsigned long v2p) * * Enters WFI on secondary CPU by exiting coherency. */ ENTRY(tegra20_sleep_cpu_secondary_finish) stmfd sp!, {r4-r11, lr} mrc p15, 0, r11, c1, c0, 1 @ save actlr before exiting coherency /* Flush and disable the L1 data cache */ mov r0, #TEGRA_FLUSH_CACHE_LOUIS bl tegra_disable_clean_inv_dcache mov32 r0, TEGRA_IRAM_RESET_BASE_VIRT ldr r4, =__tegra20_cpu1_resettable_status_offset mov r3, #CPU_RESETTABLE strb r3, [r0, r4] bl tegra_cpu_do_idle /* * cpu may be reset while in wfi, which will return through * tegra_resume to cpu_resume * or interrupt may wake wfi, which will return here * cpu state is unchanged - MMU is on, cache is on, coherency * is off, and the data cache is off * * r11 contains the original actlr */ bl tegra_pen_lock mov32 r0, TEGRA_IRAM_RESET_BASE_VIRT ldr r4, =__tegra20_cpu1_resettable_status_offset mov r3, #CPU_NOT_RESETTABLE strb r3, [r0, r4] bl tegra_pen_unlock /* Re-enable the data cache */ mrc p15, 0, r10, c1, c0, 0 orr r10, r10, #CR_C mcr p15, 0, r10, c1, c0, 0 isb mcr p15, 0, r11, c1, c0, 1 @ reenable coherency /* Invalidate the TLBs & BTAC */ mov r1, #0 mcr p15, 0, r1, c8, c3, 0 @ invalidate shared TLBs mcr p15, 0, r1, c7, c1, 6 @ invalidate shared BTAC dsb isb /* the cpu was running with coherency disabled, * caches may be out of date */ bl v7_flush_kern_cache_louis ldmfd sp!, {r4 - r11, pc} ENDPROC(tegra20_sleep_cpu_secondary_finish) /* * tegra20_tear_down_cpu * * Switches the CPU cluster to PLL-P and enters sleep. */ ENTRY(tegra20_tear_down_cpu) bl tegra_switch_cpu_to_pllp b tegra20_enter_sleep ENDPROC(tegra20_tear_down_cpu) /* START OF ROUTINES COPIED TO IRAM */ .align L1_CACHE_SHIFT .globl tegra20_iram_start tegra20_iram_start: /* * tegra20_lp1_reset * * reset vector for LP1 restore; copied into IRAM during suspend. * Brings the system back up to a safe staring point (SDRAM out of * self-refresh, PLLC, PLLM and PLLP reenabled, CPU running on PLLP, * system clock running on the same PLL that it suspended at), and * jumps to tegra_resume to restore virtual addressing and PLLX. * The physical address of tegra_resume expected to be stored in * PMC_SCRATCH41. * * NOTE: THIS *MUST* BE RELOCATED TO TEGRA_IRAM_LPx_RESUME_AREA. */ ENTRY(tegra20_lp1_reset) /* * The CPU and system bus are running at 32KHz and executing from * IRAM when this code is executed; immediately switch to CLKM and * enable PLLM, PLLP, PLLC. */ mov32 r0, TEGRA_CLK_RESET_BASE mov r1, #(1 << 28) str r1, [r0, #CLK_RESET_SCLK_BURST] str r1, [r0, #CLK_RESET_CCLK_BURST] mov r1, #0 str r1, [r0, #CLK_RESET_CCLK_DIVIDER] str r1, [r0, #CLK_RESET_SCLK_DIVIDER] pll_enable r1, r0, CLK_RESET_PLLM_BASE pll_enable r1, r0, CLK_RESET_PLLP_BASE pll_enable r1, r0, CLK_RESET_PLLC_BASE adr r2, tegra20_sdram_pad_address adr r4, tegra20_sdram_pad_save mov r5, #0 ldr r6, tegra20_sdram_pad_size padload: ldr r7, [r2, r5] @ r7 is the addr in the pad_address ldr r1, [r4, r5] str r1, [r7] @ restore the value in pad_save add r5, r5, #4 cmp r6, r5 bne padload padload_done: /* 255uS delay for PLL stabilization */ mov32 r7, TEGRA_TMRUS_BASE ldr r1, [r7] add r1, r1, #0xff wait_until r1, r7, r9 adr r4, tegra20_sclk_save ldr r4, [r4] str r4, [r0, #CLK_RESET_SCLK_BURST] mov32 r4, ((1 << 28) | (4)) @ burst policy is PLLP str r4, [r0, #CLK_RESET_CCLK_BURST] mov32 r0, TEGRA_EMC_BASE ldr r1, [r0, #EMC_CFG] bic r1, r1, #(1 << 31) @ disable DRAM_CLK_STOP str r1, [r0, #EMC_CFG] mov r1, #0 str r1, [r0, #EMC_SELF_REF] @ take DRAM out of self refresh mov r1, #1 str r1, [r0, #EMC_NOP] str r1, [r0, #EMC_NOP] str r1, [r0, #EMC_REFRESH] emc_device_mask r1, r0 exit_selfrefresh_loop: ldr r2, [r0, #EMC_EMC_STATUS] ands r2, r2, r1 bne exit_selfrefresh_loop mov r1, #0 @ unstall all transactions str r1, [r0, #EMC_REQ_CTRL] mov32 r0, TEGRA_PMC_BASE ldr r0, [r0, #PMC_SCRATCH41] ret r0 @ jump to tegra_resume ENDPROC(tegra20_lp1_reset) /* * tegra20_tear_down_core * * copied into and executed from IRAM * puts memory in self-refresh for LP0 and LP1 */ tegra20_tear_down_core: bl tegra20_sdram_self_refresh bl tegra20_switch_cpu_to_clk32k b tegra20_enter_sleep /* * tegra20_switch_cpu_to_clk32k * * In LP0 and LP1 all PLLs will be turned off. Switch the CPU and system clock * to the 32KHz clock. */ tegra20_switch_cpu_to_clk32k: /* * start by switching to CLKM to safely disable PLLs, then switch to * CLKS. */ mov r0, #(1 << 28) str r0, [r5, #CLK_RESET_SCLK_BURST] str r0, [r5, #CLK_RESET_CCLK_BURST] mov r0, #0 str r0, [r5, #CLK_RESET_CCLK_DIVIDER] str r0, [r5, #CLK_RESET_SCLK_DIVIDER] /* 2uS delay delay between changing SCLK and disabling PLLs */ mov32 r7, TEGRA_TMRUS_BASE ldr r1, [r7] add r1, r1, #2 wait_until r1, r7, r9 /* disable PLLM, PLLP and PLLC */ ldr r0, [r5, #CLK_RESET_PLLM_BASE] bic r0, r0, #(1 << 30) str r0, [r5, #CLK_RESET_PLLM_BASE] ldr r0, [r5, #CLK_RESET_PLLP_BASE] bic r0, r0, #(1 << 30) str r0, [r5, #CLK_RESET_PLLP_BASE] ldr r0, [r5, #CLK_RESET_PLLC_BASE] bic r0, r0, #(1 << 30) str r0, [r5, #CLK_RESET_PLLC_BASE] /* switch to CLKS */ mov r0, #0 /* brust policy = 32KHz */ str r0, [r5, #CLK_RESET_SCLK_BURST] ret lr /* * tegra20_enter_sleep * * uses flow controller to enter sleep state * executes from IRAM with SDRAM in selfrefresh when target state is LP0 or LP1 * executes from SDRAM with target state is LP2 */ tegra20_enter_sleep: mov32 r6, TEGRA_FLOW_CTRL_BASE mov r0, #FLOW_CTRL_WAIT_FOR_INTERRUPT orr r0, r0, #FLOW_CTRL_HALT_CPU_IRQ | FLOW_CTRL_HALT_CPU_FIQ cpu_id r1 cpu_to_halt_reg r1, r1 str r0, [r6, r1] dsb ldr r0, [r6, r1] /* memory barrier */ halted: dsb wfe /* CPU should be power gated here */ isb b halted /* * tegra20_sdram_self_refresh * * called with MMU off and caches disabled * puts sdram in self refresh * must be executed from IRAM */ tegra20_sdram_self_refresh: mov32 r1, TEGRA_EMC_BASE @ r1 reserved for emc base addr mov r2, #3 str r2, [r1, #EMC_REQ_CTRL] @ stall incoming DRAM requests emcidle: ldr r2, [r1, #EMC_EMC_STATUS] tst r2, #4 beq emcidle mov r2, #1 str r2, [r1, #EMC_SELF_REF] emc_device_mask r2, r1 emcself: ldr r3, [r1, #EMC_EMC_STATUS] and r3, r3, r2 cmp r3, r2 bne emcself @ loop until DDR in self-refresh adr r2, tegra20_sdram_pad_address adr r3, tegra20_sdram_pad_safe adr r4, tegra20_sdram_pad_save mov r5, #0 ldr r6, tegra20_sdram_pad_size padsave: ldr r0, [r2, r5] @ r0 is the addr in the pad_address ldr r1, [r0] str r1, [r4, r5] @ save the content of the addr ldr r1, [r3, r5] str r1, [r0] @ set the save val to the addr add r5, r5, #4 cmp r6, r5 bne padsave padsave_done: mov32 r5, TEGRA_CLK_RESET_BASE ldr r0, [r5, #CLK_RESET_SCLK_BURST] adr r2, tegra20_sclk_save str r0, [r2] dsb ret lr tegra20_sdram_pad_address: .word TEGRA_APB_MISC_BASE + APB_MISC_XM2CFGCPADCTRL .word TEGRA_APB_MISC_BASE + APB_MISC_XM2CFGDPADCTRL .word TEGRA_APB_MISC_BASE + APB_MISC_XM2CLKCFGPADCTRL .word TEGRA_APB_MISC_BASE + APB_MISC_XM2COMPPADCTRL .word TEGRA_APB_MISC_BASE + APB_MISC_XM2VTTGENPADCTRL .word TEGRA_APB_MISC_BASE + APB_MISC_XM2CFGCPADCTRL2 .word TEGRA_APB_MISC_BASE + APB_MISC_XM2CFGDPADCTRL2 tegra20_sdram_pad_size: .word tegra20_sdram_pad_size - tegra20_sdram_pad_address tegra20_sdram_pad_safe: .word 0x8 .word 0x8 .word 0x0 .word 0x8 .word 0x5500 .word 0x08080040 .word 0x0 tegra20_sclk_save: .word 0x0 tegra20_sdram_pad_save: .rept (tegra20_sdram_pad_size - tegra20_sdram_pad_address) / 4 .long 0 .endr .ltorg /* dummy symbol for end of IRAM */ .align L1_CACHE_SHIFT .globl tegra20_iram_end tegra20_iram_end: b . #endif
AirFortressIlikara/LS2K0300-linux-4.19
3,881
arch/arm/mach-tegra/sleep.S
/* * arch/arm/mach-tegra/sleep.S * * Copyright (c) 2010-2011, NVIDIA Corporation. * Copyright (c) 2011, Google, Inc. * * Author: Colin Cross <ccross@android.com> * Gary King <gking@nvidia.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/cache.h> #include <asm/cp15.h> #include <asm/hardware/cache-l2x0.h> #include "iomap.h" #include "sleep.h" #define CLK_RESET_CCLK_BURST 0x20 #define CLK_RESET_CCLK_DIVIDER 0x24 #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PM_SLEEP) /* * tegra_disable_clean_inv_dcache * * disable, clean & invalidate the D-cache * * Corrupted registers: r1-r3, r6, r8, r9-r11 */ ENTRY(tegra_disable_clean_inv_dcache) stmfd sp!, {r0, r4-r5, r7, r9-r11, lr} dmb @ ensure ordering /* Disable the D-cache */ mrc p15, 0, r2, c1, c0, 0 bic r2, r2, #CR_C mcr p15, 0, r2, c1, c0, 0 isb /* Flush the D-cache */ cmp r0, #TEGRA_FLUSH_CACHE_ALL blne v7_flush_dcache_louis bleq v7_flush_dcache_all /* Trun off coherency */ exit_smp r4, r5 ldmfd sp!, {r0, r4-r5, r7, r9-r11, pc} ENDPROC(tegra_disable_clean_inv_dcache) #endif #ifdef CONFIG_PM_SLEEP /* * tegra_init_l2_for_a15 * * set up the correct L2 cache data RAM latency */ ENTRY(tegra_init_l2_for_a15) mrc p15, 0, r0, c0, c0, 5 ubfx r0, r0, #8, #4 tst r0, #1 @ only need for cluster 0 bne _exit_init_l2_a15 mrc p15, 0x1, r0, c9, c0, 2 and r0, r0, #7 cmp r0, #2 bicne r0, r0, #7 orrne r0, r0, #2 mcrne p15, 0x1, r0, c9, c0, 2 _exit_init_l2_a15: ret lr ENDPROC(tegra_init_l2_for_a15) /* * tegra_sleep_cpu_finish(unsigned long v2p) * * enters suspend in LP2 by turning off the mmu and jumping to * tegra?_tear_down_cpu */ ENTRY(tegra_sleep_cpu_finish) mov r4, r0 /* Flush and disable the L1 data cache */ mov r0, #TEGRA_FLUSH_CACHE_ALL bl tegra_disable_clean_inv_dcache mov r0, r4 mov32 r6, tegra_tear_down_cpu ldr r1, [r6] add r1, r1, r0 mov32 r3, tegra_shut_off_mmu add r3, r3, r0 mov r0, r1 ret r3 ENDPROC(tegra_sleep_cpu_finish) /* * tegra_shut_off_mmu * * r0 = physical address to jump to with mmu off * * called with VA=PA mapping * turns off MMU, icache, dcache and branch prediction */ .align L1_CACHE_SHIFT .pushsection .idmap.text, "ax" ENTRY(tegra_shut_off_mmu) mrc p15, 0, r3, c1, c0, 0 movw r2, #CR_I | CR_Z | CR_C | CR_M bic r3, r3, r2 dsb mcr p15, 0, r3, c1, c0, 0 isb #ifdef CONFIG_CACHE_L2X0 /* Disable L2 cache */ check_cpu_part_num 0xc09, r9, r10 movweq r2, #:lower16:(TEGRA_ARM_PERIF_BASE + 0x3000) movteq r2, #:upper16:(TEGRA_ARM_PERIF_BASE + 0x3000) moveq r3, #0 streq r3, [r2, #L2X0_CTRL] #endif ret r0 ENDPROC(tegra_shut_off_mmu) .popsection /* * tegra_switch_cpu_to_pllp * * In LP2 the normal cpu clock pllx will be turned off. Switch the CPU to pllp */ ENTRY(tegra_switch_cpu_to_pllp) /* in LP2 idle (SDRAM active), set the CPU burst policy to PLLP */ mov32 r5, TEGRA_CLK_RESET_BASE mov r0, #(2 << 28) @ burst policy = run mode orr r0, r0, #(4 << 4) @ use PLLP in run mode burst str r0, [r5, #CLK_RESET_CCLK_BURST] mov r0, #0 str r0, [r5, #CLK_RESET_CCLK_DIVIDER] ret lr ENDPROC(tegra_switch_cpu_to_pllp) #endif
AirFortressIlikara/LS2K0300-linux-4.19
1,795
arch/arm/mach-rockchip/sleep.S
/* * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd * Author: Tony Xie <tony.xie@rock-chips.com> * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * */ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/memory.h> .data /* * this code will be copied from * ddr to sram for system resumeing. * so it is ".data section". */ .align 2 ENTRY(rockchip_slp_cpu_resume) setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set svc, irqs off mrc p15, 0, r1, c0, c0, 5 and r1, r1, #0xf cmp r1, #0 /* olny cpu0 can continue to run, the others is halt here */ beq cpu0run secondary_loop: wfe b secondary_loop cpu0run: ldr r3, rkpm_bootdata_l2ctlr_f cmp r3, #0 beq sp_set ldr r3, rkpm_bootdata_l2ctlr mcr p15, 1, r3, c9, c0, 2 sp_set: ldr sp, rkpm_bootdata_cpusp ldr r1, rkpm_bootdata_cpu_code bx r1 ENDPROC(rockchip_slp_cpu_resume) /* Parameters filled in by the kernel */ /* Flag for whether to restore L2CTLR on resume */ .global rkpm_bootdata_l2ctlr_f rkpm_bootdata_l2ctlr_f: .long 0 /* Saved L2CTLR to restore on resume */ .global rkpm_bootdata_l2ctlr rkpm_bootdata_l2ctlr: .long 0 /* CPU resume SP addr */ .globl rkpm_bootdata_cpusp rkpm_bootdata_cpusp: .long 0 /* CPU resume function (physical address) */ .globl rkpm_bootdata_cpu_code rkpm_bootdata_cpu_code: .long 0 ENTRY(rk3288_bootram_sz) .word . - rockchip_slp_cpu_resume
AirFortressIlikara/LS2K0300-linux-4.19
2,245
arch/arm/mach-footbridge/include/mach/entry-macro.S
/* * arch/arm/mach-footbridge/include/mach/entry-macro.S * * Low-level IRQ helper macros for footbridge-based platforms * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <mach/hardware.h> #include <mach/irqs.h> #include <asm/hardware/dec21285.h> .equ dc21285_high, ARMCSR_BASE & 0xff000000 .equ dc21285_low, ARMCSR_BASE & 0x00ffffff .macro get_irqnr_preamble, base, tmp mov \base, #dc21285_high .if dc21285_low orr \base, \base, #dc21285_low .endif .endm .macro get_irqnr_and_base, irqnr, irqstat, base, tmp ldr \irqstat, [\base, #0x180] @ get interrupts mov \irqnr, #IRQ_SDRAMPARITY tst \irqstat, #IRQ_MASK_SDRAMPARITY bne 1001f tst \irqstat, #IRQ_MASK_UART_RX movne \irqnr, #IRQ_CONRX bne 1001f tst \irqstat, #IRQ_MASK_DMA1 movne \irqnr, #IRQ_DMA1 bne 1001f tst \irqstat, #IRQ_MASK_DMA2 movne \irqnr, #IRQ_DMA2 bne 1001f tst \irqstat, #IRQ_MASK_IN0 movne \irqnr, #IRQ_IN0 bne 1001f tst \irqstat, #IRQ_MASK_IN1 movne \irqnr, #IRQ_IN1 bne 1001f tst \irqstat, #IRQ_MASK_IN2 movne \irqnr, #IRQ_IN2 bne 1001f tst \irqstat, #IRQ_MASK_IN3 movne \irqnr, #IRQ_IN3 bne 1001f tst \irqstat, #IRQ_MASK_PCI movne \irqnr, #IRQ_PCI bne 1001f tst \irqstat, #IRQ_MASK_DOORBELLHOST movne \irqnr, #IRQ_DOORBELLHOST bne 1001f tst \irqstat, #IRQ_MASK_I2OINPOST movne \irqnr, #IRQ_I2OINPOST bne 1001f tst \irqstat, #IRQ_MASK_TIMER1 movne \irqnr, #IRQ_TIMER1 bne 1001f tst \irqstat, #IRQ_MASK_TIMER2 movne \irqnr, #IRQ_TIMER2 bne 1001f tst \irqstat, #IRQ_MASK_TIMER3 movne \irqnr, #IRQ_TIMER3 bne 1001f tst \irqstat, #IRQ_MASK_UART_TX movne \irqnr, #IRQ_CONTX bne 1001f tst \irqstat, #IRQ_MASK_PCI_ABORT movne \irqnr, #IRQ_PCI_ABORT bne 1001f tst \irqstat, #IRQ_MASK_PCI_SERR movne \irqnr, #IRQ_PCI_SERR bne 1001f tst \irqstat, #IRQ_MASK_DISCARD_TIMER movne \irqnr, #IRQ_DISCARD_TIMER bne 1001f tst \irqstat, #IRQ_MASK_PCI_DPERR movne \irqnr, #IRQ_PCI_DPERR bne 1001f tst \irqstat, #IRQ_MASK_PCI_PERR movne \irqnr, #IRQ_PCI_PERR 1001: .endm
AirFortressIlikara/LS2K0300-linux-4.19
1,058
arch/arm/mach-davinci/include/mach/entry-macro.S
/* * Low-level IRQ helper macros for TI DaVinci-based platforms * * Author: Kevin Hilman, MontaVista Software, Inc. <source@mvista.com> * * 2007 (c) MontaVista Software, Inc. This file is licensed under * the terms of the GNU General Public License version 2. This program * is licensed "as is" without any warranty of any kind, whether express * or implied. */ #include <mach/irqs.h> .macro get_irqnr_preamble, base, tmp ldr \base, =davinci_intc_base ldr \base, [\base] .endm .macro get_irqnr_and_base, irqnr, irqstat, base, tmp #if defined(CONFIG_AINTC) && defined(CONFIG_CP_INTC) ldr \tmp, =davinci_intc_type ldr \tmp, [\tmp] cmp \tmp, #DAVINCI_INTC_TYPE_CP_INTC beq 1001f #endif #if defined(CONFIG_AINTC) ldr \tmp, [\base, #0x14] movs \tmp, \tmp, lsr #2 sub \irqnr, \tmp, #1 b 1002f #endif #if defined(CONFIG_CP_INTC) 1001: ldr \irqnr, [\base, #0x80] /* get irq number */ mov \tmp, \irqnr, lsr #31 and \irqnr, \irqnr, #0xff /* irq is in bits 0-9 */ and \tmp, \tmp, #0x1 cmp \tmp, #0x1 #endif 1002: .endm
AirFortressIlikara/LS2K0300-linux-4.19
1,214
arch/arm/mach-ks8695/include/mach/entry-macro.S
/* * arch/arm/mach-ks8695/include/mach/entry-macro.S * * Copyright (C) 2006 Ben Dooks <ben@simtec.co.uk> * Copyright (C) 2006 Simtec Electronics * * Low-level IRQ helper macros for KS8695 * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <mach/hardware.h> #include <mach/regs-irq.h> .macro get_irqnr_preamble, base, tmp ldr \base, =KS8695_IRQ_VA @ Base address of interrupt controller .endm .macro get_irqnr_and_base, irqnr, irqstat, base, tmp ldr \irqstat, [\base, #KS8695_INTMS] @ Mask Status register teq \irqstat, #0 beq 1001f mov \irqnr, #0 tst \irqstat, #0xff moveq \irqstat, \irqstat, lsr #8 addeq \irqnr, \irqnr, #8 tsteq \irqstat, #0xff moveq \irqstat, \irqstat, lsr #8 addeq \irqnr, \irqnr, #8 tsteq \irqstat, #0xff moveq \irqstat, \irqstat, lsr #8 addeq \irqnr, \irqnr, #8 tst \irqstat, #0x0f moveq \irqstat, \irqstat, lsr #4 addeq \irqnr, \irqnr, #4 tst \irqstat, #0x03 moveq \irqstat, \irqstat, lsr #2 addeq \irqnr, \irqnr, #2 tst \irqstat, #0x01 addeqs \irqnr, \irqnr, #1 1001: .endm
AirFortressIlikara/LS2K0300-linux-4.19
1,146
arch/arm/mach-lpc32xx/include/mach/entry-macro.S
/* * arch/arm/mach-lpc32xx/include/mach/entry-macro.S * * Author: Kevin Wells <kevin.wells@nxp.com> * * Copyright (C) 2010 NXP Semiconductors * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <mach/hardware.h> #include <mach/platform.h> #define LPC32XX_INTC_MASKED_STATUS_OFS 0x8 .macro get_irqnr_preamble, base, tmp ldr \base, =IO_ADDRESS(LPC32XX_MIC_BASE) .endm /* * Return IRQ number in irqnr. Also return processor Z flag status in CPSR * as set if an interrupt is pending. */ .macro get_irqnr_and_base, irqnr, irqstat, base, tmp ldr \irqstat, [\base, #LPC32XX_INTC_MASKED_STATUS_OFS] clz \irqnr, \irqstat rsb \irqnr, \irqnr, #31 teq \irqstat, #0 .endm
AirFortressIlikara/LS2K0300-linux-4.19
1,085
arch/arm/mach-ixp4xx/include/mach/entry-macro.S
/* * arch/arm/mach-ixp4xx/include/mach/entry-macro.S * * Low-level IRQ helper macros for IXP4xx-based platforms * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <mach/hardware.h> .macro get_irqnr_preamble, base, tmp .endm .macro get_irqnr_and_base, irqnr, irqstat, base, tmp ldr \irqstat, =(IXP4XX_INTC_BASE_VIRT+IXP4XX_ICIP_OFFSET) ldr \irqstat, [\irqstat] @ get interrupts cmp \irqstat, #0 beq 1001f @ upper IRQ? clz \irqnr, \irqstat mov \base, #31 sub \irqnr, \base, \irqnr b 1002f @ lower IRQ being @ handled 1001: /* * IXP465/IXP435 has an upper IRQ status register */ #if defined(CONFIG_CPU_IXP46X) || defined(CONFIG_CPU_IXP43X) ldr \irqstat, =(IXP4XX_INTC_BASE_VIRT+IXP4XX_ICIP2_OFFSET) ldr \irqstat, [\irqstat] @ get upper interrupts mov \irqnr, #63 clz \irqstat, \irqstat cmp \irqstat, #32 subne \irqnr, \irqnr, \irqstat #endif 1002: .endm
AirFortressIlikara/LS2K0300-linux-4.19
3,176
arch/arm/kvm/hyp/entry.S
/* * Copyright (C) 2016 - ARM Ltd * Author: Marc Zyngier <marc.zyngier@arm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/linkage.h> #include <asm/asm-offsets.h> #include <asm/kvm_arm.h> #include <asm/kvm_asm.h> .arch_extension virt .text .pushsection .hyp.text, "ax" #define USR_REGS_OFFSET (CPU_CTXT_GP_REGS + GP_REGS_USR) /* int __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host) */ ENTRY(__guest_enter) @ Save host registers add r1, r1, #(USR_REGS_OFFSET + S_R4) stm r1!, {r4-r12} str lr, [r1, #4] @ Skip SP_usr (already saved) @ Restore guest registers add r0, r0, #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R0) ldr lr, [r0, #S_LR] ldm r0, {r0-r12} clrex eret ENDPROC(__guest_enter) ENTRY(__guest_exit) /* * return convention: * guest r0, r1, r2 saved on the stack * r0: vcpu pointer * r1: exception code */ add r2, r0, #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R3) stm r2!, {r3-r12} str lr, [r2, #4] add r2, r0, #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R0) pop {r3, r4, r5} @ r0, r1, r2 stm r2, {r3-r5} ldr r0, [r0, #VCPU_HOST_CTXT] add r0, r0, #(USR_REGS_OFFSET + S_R4) ldm r0!, {r4-r12} ldr lr, [r0, #4] mov r0, r1 mrs r1, SPSR mrs r2, ELR_hyp mrc p15, 4, r3, c5, c2, 0 @ HSR /* * Force loads and stores to complete before unmasking aborts * and forcing the delivery of the exception. This gives us a * single instruction window, which the handler will try to * match. */ dsb sy cpsie a .global abort_guest_exit_start abort_guest_exit_start: isb .global abort_guest_exit_end abort_guest_exit_end: /* * If we took an abort, r0[31] will be set, and cmp will set * the N bit in PSTATE. */ cmp r0, #0 msrmi SPSR_cxsf, r1 msrmi ELR_hyp, r2 mcrmi p15, 4, r3, c5, c2, 0 @ HSR bx lr ENDPROC(__guest_exit) /* * If VFPv3 support is not available, then we will not switch the VFP * registers; however cp10 and cp11 accesses will still trap and fallback * to the regular coprocessor emulation code, which currently will * inject an undefined exception to the guest. */ #ifdef CONFIG_VFPv3 ENTRY(__vfp_guest_restore) push {r3, r4, lr} @ NEON/VFP used. Turn on VFP access. mrc p15, 4, r1, c1, c1, 2 @ HCPTR bic r1, r1, #(HCPTR_TCP(10) | HCPTR_TCP(11)) mcr p15, 4, r1, c1, c1, 2 @ HCPTR isb @ Switch VFP/NEON hardware state to the guest's mov r4, r0 ldr r0, [r0, #VCPU_HOST_CTXT] add r0, r0, #CPU_CTXT_VFP bl __vfp_save_state add r0, r4, #(VCPU_GUEST_CTXT + CPU_CTXT_VFP) bl __vfp_restore_state pop {r3, r4, lr} pop {r0, r1, r2} clrex eret ENDPROC(__vfp_guest_restore) #endif .popsection
AirFortressIlikara/LS2K0300-linux-4.19
1,983
arch/arm/kvm/hyp/vfp.S
/* * Copyright (C) 2012 - Virtual Open Systems and Columbia University * Author: Christoffer Dall <c.dall@virtualopensystems.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/linkage.h> #include <asm/vfpmacros.h> .text .pushsection .hyp.text, "ax" /* void __vfp_save_state(struct vfp_hard_struct *vfp); */ ENTRY(__vfp_save_state) push {r4, r5} VFPFMRX r1, FPEXC @ Make sure *really* VFP is enabled so we can touch the registers. orr r5, r1, #FPEXC_EN tst r5, #FPEXC_EX @ Check for VFP Subarchitecture bic r5, r5, #FPEXC_EX @ FPEXC_EX disable VFPFMXR FPEXC, r5 isb VFPFMRX r2, FPSCR beq 1f @ If FPEXC_EX is 0, then FPINST/FPINST2 reads are upredictable, so @ we only need to save them if FPEXC_EX is set. VFPFMRX r3, FPINST tst r5, #FPEXC_FP2V VFPFMRX r4, FPINST2, ne @ vmrsne 1: VFPFSTMIA r0, r5 @ Save VFP registers stm r0, {r1-r4} @ Save FPEXC, FPSCR, FPINST, FPINST2 pop {r4, r5} bx lr ENDPROC(__vfp_save_state) /* void __vfp_restore_state(struct vfp_hard_struct *vfp); * Assume FPEXC_EN is on and FPEXC_EX is off */ ENTRY(__vfp_restore_state) VFPFLDMIA r0, r1 @ Load VFP registers ldm r0, {r0-r3} @ Load FPEXC, FPSCR, FPINST, FPINST2 VFPFMXR FPSCR, r1 tst r0, #FPEXC_EX @ Check for VFP Subarchitecture beq 1f VFPFMXR FPINST, r2 tst r0, #FPEXC_FP2V VFPFMXR FPINST2, r3, ne 1: VFPFMXR FPEXC, r0 @ FPEXC (last, in case !EN) bx lr ENDPROC(__vfp_restore_state) .popsection
AirFortressIlikara/LS2K0300-linux-4.19
7,640
arch/arm/kvm/hyp/hyp-entry.S
/* * Copyright (C) 2012 - Virtual Open Systems and Columbia University * Author: Christoffer Dall <c.dall@virtualopensystems.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2, as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <linux/arm-smccc.h> #include <linux/linkage.h> #include <asm/kvm_arm.h> #include <asm/kvm_asm.h> .arch_extension virt .text .pushsection .hyp.text, "ax" .macro load_vcpu reg mrc p15, 4, \reg, c13, c0, 2 @ HTPIDR .endm /******************************************************************** * Hypervisor exception vector and handlers * * * The KVM/ARM Hypervisor ABI is defined as follows: * * Entry to Hyp mode from the host kernel will happen _only_ when an HVC * instruction is issued since all traps are disabled when running the host * kernel as per the Hyp-mode initialization at boot time. * * HVC instructions cause a trap to the vector page + offset 0x14 (see hyp_hvc * below) when the HVC instruction is called from SVC mode (i.e. a guest or the * host kernel) and they cause a trap to the vector page + offset 0x8 when HVC * instructions are called from within Hyp-mode. * * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode): * Switching to Hyp mode is done through a simple HVC #0 instruction. The * exception vector code will check that the HVC comes from VMID==0. * - r0 contains a pointer to a HYP function * - r1, r2, and r3 contain arguments to the above function. * - The HYP function will be called with its arguments in r0, r1 and r2. * On HYP function return, we return directly to SVC. * * Note that the above is used to execute code in Hyp-mode from a host-kernel * point of view, and is a different concept from performing a world-switch and * executing guest code SVC mode (with a VMID != 0). */ .align 5 __kvm_hyp_vector: .global __kvm_hyp_vector @ Hyp-mode exception vector W(b) hyp_reset W(b) hyp_undef W(b) hyp_svc W(b) hyp_pabt W(b) hyp_dabt W(b) hyp_hvc W(b) hyp_irq W(b) hyp_fiq #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR .align 5 __kvm_hyp_vector_ic_inv: .global __kvm_hyp_vector_ic_inv /* * We encode the exception entry in the bottom 3 bits of * SP, and we have to guarantee to be 8 bytes aligned. */ W(add) sp, sp, #1 /* Reset 7 */ W(add) sp, sp, #1 /* Undef 6 */ W(add) sp, sp, #1 /* Syscall 5 */ W(add) sp, sp, #1 /* Prefetch abort 4 */ W(add) sp, sp, #1 /* Data abort 3 */ W(add) sp, sp, #1 /* HVC 2 */ W(add) sp, sp, #1 /* IRQ 1 */ W(nop) /* FIQ 0 */ mcr p15, 0, r0, c7, c5, 0 /* ICIALLU */ isb b decode_vectors .align 5 __kvm_hyp_vector_bp_inv: .global __kvm_hyp_vector_bp_inv /* * We encode the exception entry in the bottom 3 bits of * SP, and we have to guarantee to be 8 bytes aligned. */ W(add) sp, sp, #1 /* Reset 7 */ W(add) sp, sp, #1 /* Undef 6 */ W(add) sp, sp, #1 /* Syscall 5 */ W(add) sp, sp, #1 /* Prefetch abort 4 */ W(add) sp, sp, #1 /* Data abort 3 */ W(add) sp, sp, #1 /* HVC 2 */ W(add) sp, sp, #1 /* IRQ 1 */ W(nop) /* FIQ 0 */ mcr p15, 0, r0, c7, c5, 6 /* BPIALL */ isb decode_vectors: #ifdef CONFIG_THUMB2_KERNEL /* * Yet another silly hack: Use VPIDR as a temp register. * Thumb2 is really a pain, as SP cannot be used with most * of the bitwise instructions. The vect_br macro ensures * things gets cleaned-up. */ mcr p15, 4, r0, c0, c0, 0 /* VPIDR */ mov r0, sp and r0, r0, #7 sub sp, sp, r0 push {r1, r2} mov r1, r0 mrc p15, 4, r0, c0, c0, 0 /* VPIDR */ mrc p15, 0, r2, c0, c0, 0 /* MIDR */ mcr p15, 4, r2, c0, c0, 0 /* VPIDR */ #endif .macro vect_br val, targ ARM( eor sp, sp, #\val ) ARM( tst sp, #7 ) ARM( eorne sp, sp, #\val ) THUMB( cmp r1, #\val ) THUMB( popeq {r1, r2} ) beq \targ .endm vect_br 0, hyp_fiq vect_br 1, hyp_irq vect_br 2, hyp_hvc vect_br 3, hyp_dabt vect_br 4, hyp_pabt vect_br 5, hyp_svc vect_br 6, hyp_undef vect_br 7, hyp_reset #endif .macro invalid_vector label, cause .align \label: mov r0, #\cause b __hyp_panic .endm invalid_vector hyp_reset ARM_EXCEPTION_RESET invalid_vector hyp_undef ARM_EXCEPTION_UNDEFINED invalid_vector hyp_svc ARM_EXCEPTION_SOFTWARE invalid_vector hyp_pabt ARM_EXCEPTION_PREF_ABORT invalid_vector hyp_fiq ARM_EXCEPTION_FIQ ENTRY(__hyp_do_panic) mrs lr, cpsr bic lr, lr, #MODE_MASK orr lr, lr, #SVC_MODE THUMB( orr lr, lr, #PSR_T_BIT ) msr spsr_cxsf, lr ldr lr, =panic msr ELR_hyp, lr ldr lr, =kvm_call_hyp clrex eret ENDPROC(__hyp_do_panic) hyp_hvc: /* * Getting here is either because of a trap from a guest, * or from executing HVC from the host kernel, which means * "do something in Hyp mode". */ push {r0, r1, r2} @ Check syndrome register mrc p15, 4, r1, c5, c2, 0 @ HSR lsr r0, r1, #HSR_EC_SHIFT cmp r0, #HSR_EC_HVC bne guest_trap @ Not HVC instr. /* * Let's check if the HVC came from VMID 0 and allow simple * switch to Hyp mode */ mrrc p15, 6, r0, r2, c2 lsr r2, r2, #16 and r2, r2, #0xff cmp r2, #0 bne guest_hvc_trap @ Guest called HVC /* * Getting here means host called HVC, we shift parameters and branch * to Hyp function. */ pop {r0, r1, r2} /* * Check if we have a kernel function, which is guaranteed to be * bigger than the maximum hyp stub hypercall */ cmp r0, #HVC_STUB_HCALL_NR bhs 1f /* * Not a kernel function, treat it as a stub hypercall. * Compute the physical address for __kvm_handle_stub_hvc * (as the code lives in the idmaped page) and branch there. * We hijack ip (r12) as a tmp register. */ push {r1} ldr r1, =kimage_voffset ldr r1, [r1] ldr ip, =__kvm_handle_stub_hvc sub ip, ip, r1 pop {r1} bx ip 1: /* * Pushing r2 here is just a way of keeping the stack aligned to * 8 bytes on any path that can trigger a HYP exception. Here, * we may well be about to jump into the guest, and the guest * exit would otherwise be badly decoded by our fancy * "decode-exception-without-a-branch" code... */ push {r2, lr} mov lr, r0 mov r0, r1 mov r1, r2 mov r2, r3 THUMB( orr lr, #1) blx lr @ Call the HYP function pop {r2, lr} eret guest_hvc_trap: movw r2, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1 movt r2, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1 ldr r0, [sp] @ Guest's r0 teq r0, r2 bne guest_trap add sp, sp, #12 @ Returns: @ r0 = 0 @ r1 = HSR value (perfectly predictable) @ r2 = ARM_SMCCC_ARCH_WORKAROUND_1 mov r0, #0 eret guest_trap: load_vcpu r0 @ Load VCPU pointer to r0 #ifdef CONFIG_VFPv3 @ Check for a VFP access lsr r1, r1, #HSR_EC_SHIFT cmp r1, #HSR_EC_CP_0_13 beq __vfp_guest_restore #endif mov r1, #ARM_EXCEPTION_HVC b __guest_exit hyp_irq: push {r0, r1, r2} mov r1, #ARM_EXCEPTION_IRQ load_vcpu r0 @ Load VCPU pointer to r0 b __guest_exit hyp_dabt: push {r0, r1} mrs r0, ELR_hyp ldr r1, =abort_guest_exit_start THUMB( add r1, r1, #1) cmp r0, r1 ldrne r1, =abort_guest_exit_end THUMB( addne r1, r1, #1) cmpne r0, r1 pop {r0, r1} bne __hyp_panic orr r0, r0, #(1 << ARM_EXIT_WITH_ABORT_BIT) eret .ltorg .popsection
AirFortressIlikara/LS2K0300-linux-4.19
1,529
arch/arm/mach-iop13xx/include/mach/entry-macro.S
/* * iop13xx low level irq macros * Copyright (c) 2005-2006, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ .macro get_irqnr_preamble, base, tmp mrc p15, 0, \tmp, c15, c1, 0 orr \tmp, \tmp, #(1 << 6) mcr p15, 0, \tmp, c15, c1, 0 @ Enable cp6 access .endm /* * Note: a 1-cycle window exists where iintvec will return the value * of iintbase, so we explicitly check for "bad zeros" */ .macro get_irqnr_and_base, irqnr, irqstat, base, tmp mrc p6, 0, \irqnr, c3, c2, 0 @ Read IINTVEC cmp \irqnr, #0 mrceq p6, 0, \irqnr, c3, c2, 0 @ Re-read on potentially bad zero adds \irqstat, \irqnr, #1 @ Check for 0xffffffff movne \irqnr, \irqnr, lsr #2 @ Convert to irqnr .endm .macro arch_ret_to_user, tmp1, tmp2 mrc p15, 0, \tmp1, c15, c1, 0 ands \tmp2, \tmp1, #(1 << 6) bicne \tmp1, \tmp1, #(1 << 6) mcrne p15, 0, \tmp1, c15, c1, 0 @ Disable cp6 access .endm
AirFortressIlikara/LS2K0300-linux-4.19
36,937
arch/arm/boot/compressed/head.S
/* * linux/arch/arm/boot/compressed/head.S * * Copyright (C) 1996-2002 Russell King * Copyright (C) 2004 Hyok S. Choi (MPU support) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/v7m.h> #include "efi-header.S" AR_CLASS( .arch armv7-a ) M_CLASS( .arch armv7-m ) /* * Debugging stuff * * Note that these macros must not contain any code which is not * 100% relocatable. Any attempt to do so will result in a crash. * Please select one of the following when turning on debugging. */ #ifdef DEBUG #if defined(CONFIG_DEBUG_ICEDCC) #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7) .macro loadsp, rb, tmp1, tmp2 .endm .macro writeb, ch, rb mcr p14, 0, \ch, c0, c5, 0 .endm #elif defined(CONFIG_CPU_XSCALE) .macro loadsp, rb, tmp1, tmp2 .endm .macro writeb, ch, rb mcr p14, 0, \ch, c8, c0, 0 .endm #else .macro loadsp, rb, tmp1, tmp2 .endm .macro writeb, ch, rb mcr p14, 0, \ch, c1, c0, 0 .endm #endif #else #include CONFIG_DEBUG_LL_INCLUDE .macro writeb, ch, rb senduart \ch, \rb .endm #if defined(CONFIG_ARCH_SA1100) .macro loadsp, rb, tmp1, tmp2 mov \rb, #0x80000000 @ physical base address #ifdef CONFIG_DEBUG_LL_SER3 add \rb, \rb, #0x00050000 @ Ser3 #else add \rb, \rb, #0x00010000 @ Ser1 #endif .endm #else .macro loadsp, rb, tmp1, tmp2 addruart \rb, \tmp1, \tmp2 .endm #endif #endif #endif .macro kputc,val mov r0, \val bl putc .endm .macro kphex,val,len mov r0, \val mov r1, #\len bl phex .endm .macro debug_reloc_start #ifdef DEBUG kputc #'\n' kphex r6, 8 /* processor id */ kputc #':' kphex r7, 8 /* architecture id */ #ifdef CONFIG_CPU_CP15 kputc #':' mrc p15, 0, r0, c1, c0 kphex r0, 8 /* control reg */ #endif kputc #'\n' kphex r5, 8 /* decompressed kernel start */ kputc #'-' kphex r9, 8 /* decompressed kernel end */ kputc #'>' kphex r4, 8 /* kernel execution address */ kputc #'\n' #endif .endm .macro debug_reloc_end #ifdef DEBUG kphex r5, 8 /* end of kernel */ kputc #'\n' mov r0, r4 bl memdump /* dump 256 bytes at start of kernel */ #endif .endm .section ".start", #alloc, #execinstr /* * sort out different calling conventions */ .align /* * Always enter in ARM state for CPUs that support the ARM ISA. * As of today (2014) that's exactly the members of the A and R * classes. */ AR_CLASS( .arm ) start: .type start,#function .rept 7 __nop .endr #ifndef CONFIG_THUMB2_KERNEL mov r0, r0 #else AR_CLASS( sub pc, pc, #3 ) @ A/R: switch to Thumb2 mode M_CLASS( nop.w ) @ M: already in Thumb2 mode .thumb #endif W(b) 1f .word _magic_sig @ Magic numbers to help the loader .word _magic_start @ absolute load/run zImage address .word _magic_end @ zImage end address .word 0x04030201 @ endianness flag .word 0x45454545 @ another magic number to indicate .word _magic_table @ additional data table __EFI_HEADER 1: ARM_BE8( setend be ) @ go BE8 if compiled for BE8 AR_CLASS( mrs r9, cpsr ) #ifdef CONFIG_ARM_VIRT_EXT bl __hyp_stub_install @ get into SVC mode, reversibly #endif mov r7, r1 @ save architecture ID mov r8, r2 @ save atags pointer #ifndef CONFIG_CPU_V7M /* * Booting from Angel - need to enter SVC mode and disable * FIQs/IRQs (numeric definitions from angel arm.h source). * We only do this if we were in user mode on entry. */ mrs r2, cpsr @ get current mode tst r2, #3 @ not user? bne not_angel mov r0, #0x17 @ angel_SWIreason_EnterSVC ARM( swi 0x123456 ) @ angel_SWI_ARM THUMB( svc 0xab ) @ angel_SWI_THUMB not_angel: safe_svcmode_maskall r0 msr spsr_cxsf, r9 @ Save the CPU boot mode in @ SPSR #endif /* * Note that some cache flushing and other stuff may * be needed here - is there an Angel SWI call for this? */ /* * some architecture specific code can be inserted * by the linker here, but it should preserve r7, r8, and r9. */ .text #ifdef CONFIG_AUTO_ZRELADDR /* * Find the start of physical memory. As we are executing * without the MMU on, we are in the physical address space. * We just need to get rid of any offset by aligning the * address. * * This alignment is a balance between the requirements of * different platforms - we have chosen 128MB to allow * platforms which align the start of their physical memory * to 128MB to use this feature, while allowing the zImage * to be placed within the first 128MB of memory on other * platforms. Increasing the alignment means we place * stricter alignment requirements on the start of physical * memory, but relaxing it means that we break people who * are already placing their zImage in (eg) the top 64MB * of this range. */ mov r4, pc and r4, r4, #0xf8000000 /* Determine final kernel image address. */ add r4, r4, #TEXT_OFFSET #else ldr r4, =zreladdr #endif /* * Set up a page table only if it won't overwrite ourself. * That means r4 < pc || r4 - 16k page directory > &_end. * Given that r4 > &_end is most unfrequent, we add a rough * additional 1MB of room for a possible appended DTB. */ mov r0, pc cmp r0, r4 ldrcc r0, LC0+32 addcc r0, r0, pc cmpcc r4, r0 orrcc r4, r4, #1 @ remember we skipped cache_on blcs cache_on restart: adr r0, LC0 ldmia r0, {r1, r2, r3, r6, r10, r11, r12} ldr sp, [r0, #28] /* * We might be running at a different address. We need * to fix up various pointers. */ sub r0, r0, r1 @ calculate the delta offset add r6, r6, r0 @ _edata add r10, r10, r0 @ inflated kernel size location /* * The kernel build system appends the size of the * decompressed kernel at the end of the compressed data * in little-endian form. */ ldrb r9, [r10, #0] ldrb lr, [r10, #1] orr r9, r9, lr, lsl #8 ldrb lr, [r10, #2] ldrb r10, [r10, #3] orr r9, r9, lr, lsl #16 orr r9, r9, r10, lsl #24 #ifndef CONFIG_ZBOOT_ROM /* malloc space is above the relocated stack (64k max) */ add sp, sp, r0 add r10, sp, #0x10000 #else /* * With ZBOOT_ROM the bss/stack is non relocatable, * but someone could still run this code from RAM, * in which case our reference is _edata. */ mov r10, r6 #endif mov r5, #0 @ init dtb size to 0 #ifdef CONFIG_ARM_APPENDED_DTB /* * r0 = delta * r2 = BSS start * r3 = BSS end * r4 = final kernel address (possibly with LSB set) * r5 = appended dtb size (still unknown) * r6 = _edata * r7 = architecture ID * r8 = atags/device tree pointer * r9 = size of decompressed image * r10 = end of this image, including bss/stack/malloc space if non XIP * r11 = GOT start * r12 = GOT end * sp = stack pointer * * if there are device trees (dtb) appended to zImage, advance r10 so that the * dtb data will get relocated along with the kernel if necessary. */ ldr lr, [r6, #0] #ifndef __ARMEB__ ldr r1, =0xedfe0dd0 @ sig is 0xd00dfeed big endian #else ldr r1, =0xd00dfeed #endif cmp lr, r1 bne dtb_check_done @ not found #ifdef CONFIG_ARM_ATAG_DTB_COMPAT /* * OK... Let's do some funky business here. * If we do have a DTB appended to zImage, and we do have * an ATAG list around, we want the later to be translated * and folded into the former here. No GOT fixup has occurred * yet, but none of the code we're about to call uses any * global variable. */ /* Get the initial DTB size */ ldr r5, [r6, #4] #ifndef __ARMEB__ /* convert to little endian */ eor r1, r5, r5, ror #16 bic r1, r1, #0x00ff0000 mov r5, r5, ror #8 eor r5, r5, r1, lsr #8 #endif /* 50% DTB growth should be good enough */ add r5, r5, r5, lsr #1 /* preserve 64-bit alignment */ add r5, r5, #7 bic r5, r5, #7 /* clamp to 32KB min and 1MB max */ cmp r5, #(1 << 15) movlo r5, #(1 << 15) cmp r5, #(1 << 20) movhi r5, #(1 << 20) /* temporarily relocate the stack past the DTB work space */ add sp, sp, r5 stmfd sp!, {r0-r3, ip, lr} mov r0, r8 mov r1, r6 mov r2, r5 bl atags_to_fdt /* * If returned value is 1, there is no ATAG at the location * pointed by r8. Try the typical 0x100 offset from start * of RAM and hope for the best. */ cmp r0, #1 sub r0, r4, #TEXT_OFFSET bic r0, r0, #1 add r0, r0, #0x100 mov r1, r6 mov r2, r5 bleq atags_to_fdt ldmfd sp!, {r0-r3, ip, lr} sub sp, sp, r5 #endif mov r8, r6 @ use the appended device tree /* * Make sure that the DTB doesn't end up in the final * kernel's .bss area. To do so, we adjust the decompressed * kernel size to compensate if that .bss size is larger * than the relocated code. */ ldr r5, =_kernel_bss_size adr r1, wont_overwrite sub r1, r6, r1 subs r1, r5, r1 addhi r9, r9, r1 /* Get the current DTB size */ ldr r5, [r6, #4] #ifndef __ARMEB__ /* convert r5 (dtb size) to little endian */ eor r1, r5, r5, ror #16 bic r1, r1, #0x00ff0000 mov r5, r5, ror #8 eor r5, r5, r1, lsr #8 #endif /* preserve 64-bit alignment */ add r5, r5, #7 bic r5, r5, #7 /* relocate some pointers past the appended dtb */ add r6, r6, r5 add r10, r10, r5 add sp, sp, r5 dtb_check_done: #endif /* * Check to see if we will overwrite ourselves. * r4 = final kernel address (possibly with LSB set) * r9 = size of decompressed image * r10 = end of this image, including bss/stack/malloc space if non XIP * We basically want: * r4 - 16k page directory >= r10 -> OK * r4 + image length <= address of wont_overwrite -> OK * Note: the possible LSB in r4 is harmless here. */ add r10, r10, #16384 cmp r4, r10 bhs wont_overwrite add r10, r4, r9 adr r9, wont_overwrite cmp r10, r9 bls wont_overwrite /* * Relocate ourselves past the end of the decompressed kernel. * r6 = _edata * r10 = end of the decompressed kernel * Because we always copy ahead, we need to do it from the end and go * backward in case the source and destination overlap. */ /* * Bump to the next 256-byte boundary with the size of * the relocation code added. This avoids overwriting * ourself when the offset is small. */ add r10, r10, #((reloc_code_end - restart + 256) & ~255) bic r10, r10, #255 /* Get start of code we want to copy and align it down. */ adr r5, restart bic r5, r5, #31 /* Relocate the hyp vector base if necessary */ #ifdef CONFIG_ARM_VIRT_EXT mrs r0, spsr and r0, r0, #MODE_MASK cmp r0, #HYP_MODE bne 1f /* * Compute the address of the hyp vectors after relocation. * This requires some arithmetic since we cannot directly * reference __hyp_stub_vectors in a PC-relative way. * Call __hyp_set_vectors with the new address so that we * can HVC again after the copy. */ 0: adr r0, 0b movw r1, #:lower16:__hyp_stub_vectors - 0b movt r1, #:upper16:__hyp_stub_vectors - 0b add r0, r0, r1 sub r0, r0, r5 add r0, r0, r10 bl __hyp_set_vectors 1: #endif sub r9, r6, r5 @ size to copy add r9, r9, #31 @ rounded up to a multiple bic r9, r9, #31 @ ... of 32 bytes add r6, r9, r5 add r9, r9, r10 1: ldmdb r6!, {r0 - r3, r10 - r12, lr} cmp r6, r5 stmdb r9!, {r0 - r3, r10 - r12, lr} bhi 1b /* Preserve offset to relocated code. */ sub r6, r9, r6 #ifndef CONFIG_ZBOOT_ROM /* cache_clean_flush may use the stack, so relocate it */ add sp, sp, r6 #endif bl cache_clean_flush badr r0, restart add r0, r0, r6 mov pc, r0 wont_overwrite: /* * If delta is zero, we are running at the address we were linked at. * r0 = delta * r2 = BSS start * r3 = BSS end * r4 = kernel execution address (possibly with LSB set) * r5 = appended dtb size (0 if not present) * r7 = architecture ID * r8 = atags pointer * r11 = GOT start * r12 = GOT end * sp = stack pointer */ orrs r1, r0, r5 beq not_relocated add r11, r11, r0 add r12, r12, r0 #ifndef CONFIG_ZBOOT_ROM /* * If we're running fully PIC === CONFIG_ZBOOT_ROM = n, * we need to fix up pointers into the BSS region. * Note that the stack pointer has already been fixed up. */ add r2, r2, r0 add r3, r3, r0 /* * Relocate all entries in the GOT table. * Bump bss entries to _edata + dtb size */ 1: ldr r1, [r11, #0] @ relocate entries in the GOT add r1, r1, r0 @ This fixes up C references cmp r1, r2 @ if entry >= bss_start && cmphs r3, r1 @ bss_end > entry addhi r1, r1, r5 @ entry += dtb size str r1, [r11], #4 @ next entry cmp r11, r12 blo 1b /* bump our bss pointers too */ add r2, r2, r5 add r3, r3, r5 #else /* * Relocate entries in the GOT table. We only relocate * the entries that are outside the (relocated) BSS region. */ 1: ldr r1, [r11, #0] @ relocate entries in the GOT cmp r1, r2 @ entry < bss_start || cmphs r3, r1 @ _end < entry addlo r1, r1, r0 @ table. This fixes up the str r1, [r11], #4 @ C references. cmp r11, r12 blo 1b #endif not_relocated: mov r0, #0 1: str r0, [r2], #4 @ clear bss str r0, [r2], #4 str r0, [r2], #4 str r0, [r2], #4 cmp r2, r3 blo 1b /* * Did we skip the cache setup earlier? * That is indicated by the LSB in r4. * Do it now if so. */ tst r4, #1 bic r4, r4, #1 blne cache_on /* * The C runtime environment should now be setup sufficiently. * Set up some pointers, and start decompressing. * r4 = kernel execution address * r7 = architecture ID * r8 = atags pointer */ mov r0, r4 mov r1, sp @ malloc space above stack add r2, sp, #0x10000 @ 64k max mov r3, r7 bl decompress_kernel bl cache_clean_flush bl cache_off #ifdef CONFIG_ARM_VIRT_EXT mrs r0, spsr @ Get saved CPU boot mode and r0, r0, #MODE_MASK cmp r0, #HYP_MODE @ if not booted in HYP mode... bne __enter_kernel @ boot kernel directly adr r12, .L__hyp_reentry_vectors_offset ldr r0, [r12] add r0, r0, r12 bl __hyp_set_vectors __HVC(0) @ otherwise bounce to hyp mode b . @ should never be reached .align 2 .L__hyp_reentry_vectors_offset: .long __hyp_reentry_vectors - . #else b __enter_kernel #endif .align 2 .type LC0, #object LC0: .word LC0 @ r1 .word __bss_start @ r2 .word _end @ r3 .word _edata @ r6 .word input_data_end - 4 @ r10 (inflated size location) .word _got_start @ r11 .word _got_end @ ip .word .L_user_stack_end @ sp .word _end - restart + 16384 + 1024*1024 .size LC0, . - LC0 #ifdef CONFIG_ARCH_RPC .globl params params: ldr r0, =0x10000100 @ params_phys for RPC mov pc, lr .ltorg .align #endif /* * Turn on the cache. We need to setup some page tables so that we * can have both the I and D caches on. * * We place the page tables 16k down from the kernel execution address, * and we hope that nothing else is using it. If we're using it, we * will go pop! * * On entry, * r4 = kernel execution address * r7 = architecture number * r8 = atags pointer * On exit, * r0, r1, r2, r3, r9, r10, r12 corrupted * This routine must preserve: * r4, r7, r8 */ .align 5 cache_on: mov r3, #8 @ cache_on function b call_cache_fn /* * Initialize the highest priority protection region, PR7 * to cover all 32bit address and cacheable and bufferable. */ __armv4_mpu_cache_on: mov r0, #0x3f @ 4G, the whole mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting mcr p15, 0, r0, c6, c7, 1 mov r0, #0x80 @ PR7 mcr p15, 0, r0, c2, c0, 0 @ D-cache on mcr p15, 0, r0, c2, c0, 1 @ I-cache on mcr p15, 0, r0, c3, c0, 0 @ write-buffer on mov r0, #0xc000 mcr p15, 0, r0, c5, c0, 1 @ I-access permission mcr p15, 0, r0, c5, c0, 0 @ D-access permission mov r0, #0 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache mrc p15, 0, r0, c1, c0, 0 @ read control reg @ ...I .... ..D. WC.M orr r0, r0, #0x002d @ .... .... ..1. 11.1 orr r0, r0, #0x1000 @ ...1 .... .... .... mcr p15, 0, r0, c1, c0, 0 @ write control reg mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache mov pc, lr __armv3_mpu_cache_on: mov r0, #0x3f @ 4G, the whole mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting mov r0, #0x80 @ PR7 mcr p15, 0, r0, c2, c0, 0 @ cache on mcr p15, 0, r0, c3, c0, 0 @ write-buffer on mov r0, #0xc000 mcr p15, 0, r0, c5, c0, 0 @ access permission mov r0, #0 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 /* * ?? ARMv3 MMU does not allow reading the control register, * does this really work on ARMv3 MPU? */ mrc p15, 0, r0, c1, c0, 0 @ read control reg @ .... .... .... WC.M orr r0, r0, #0x000d @ .... .... .... 11.1 /* ?? this overwrites the value constructed above? */ mov r0, #0 mcr p15, 0, r0, c1, c0, 0 @ write control reg /* ?? invalidate for the second time? */ mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 mov pc, lr #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH #define CB_BITS 0x08 #else #define CB_BITS 0x0c #endif __setup_mmu: sub r3, r4, #16384 @ Page directory size bic r3, r3, #0xff @ Align the pointer bic r3, r3, #0x3f00 /* * Initialise the page tables, turning on the cacheable and bufferable * bits for the RAM area only. */ mov r0, r3 mov r9, r0, lsr #18 mov r9, r9, lsl #18 @ start of RAM add r10, r9, #0x10000000 @ a reasonable RAM size mov r1, #0x12 @ XN|U + section mapping orr r1, r1, #3 << 10 @ AP=11 add r2, r3, #16384 1: cmp r1, r9 @ if virt > start of RAM cmphs r10, r1 @ && end of RAM > virt bic r1, r1, #0x1c @ clear XN|U + C + B orrlo r1, r1, #0x10 @ Set XN|U for non-RAM orrhs r1, r1, r6 @ set RAM section settings str r1, [r0], #4 @ 1:1 mapping add r1, r1, #1048576 teq r0, r2 bne 1b /* * If ever we are running from Flash, then we surely want the cache * to be enabled also for our execution instance... We map 2MB of it * so there is no map overlap problem for up to 1 MB compressed kernel. * If the execution is in RAM then we would only be duplicating the above. */ orr r1, r6, #0x04 @ ensure B is set for this orr r1, r1, #3 << 10 mov r2, pc mov r2, r2, lsr #20 orr r1, r1, r2, lsl #20 add r0, r3, r2, lsl #2 str r1, [r0], #4 add r1, r1, #1048576 str r1, [r0] mov pc, lr ENDPROC(__setup_mmu) @ Enable unaligned access on v6, to allow better code generation @ for the decompressor C code: __armv6_mmu_cache_on: mrc p15, 0, r0, c1, c0, 0 @ read SCTLR bic r0, r0, #2 @ A (no unaligned access fault) orr r0, r0, #1 << 22 @ U (v6 unaligned access model) mcr p15, 0, r0, c1, c0, 0 @ write SCTLR b __armv4_mmu_cache_on __arm926ejs_mmu_cache_on: #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH mov r0, #4 @ put dcache in WT mode mcr p15, 7, r0, c15, c0, 0 #endif __armv4_mmu_cache_on: mov r12, lr #ifdef CONFIG_MMU mov r6, #CB_BITS | 0x12 @ U bl __setup_mmu mov r0, #0 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs mrc p15, 0, r0, c1, c0, 0 @ read control reg orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement orr r0, r0, #0x0030 ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables bl __common_mmu_cache_on mov r0, #0 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs #endif mov pc, r12 __armv7_mmu_cache_on: mov r12, lr #ifdef CONFIG_MMU mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0 tst r11, #0xf @ VMSA movne r6, #CB_BITS | 0x02 @ !XN blne __setup_mmu mov r0, #0 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer tst r11, #0xf @ VMSA mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs #endif mrc p15, 0, r0, c1, c0, 0 @ read control reg bic r0, r0, #1 << 28 @ clear SCTLR.TRE orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement orr r0, r0, #0x003c @ write buffer bic r0, r0, #2 @ A (no unaligned access fault) orr r0, r0, #1 << 22 @ U (v6 unaligned access model) @ (needed for ARM1176) #ifdef CONFIG_MMU ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables mrcne p15, 0, r6, c2, c0, 2 @ read ttb control reg orrne r0, r0, #1 @ MMU enabled movne r1, #0xfffffffd @ domain 0 = client bic r6, r6, #1 << 31 @ 32-bit translation system bic r6, r6, #(7 << 0) | (1 << 4) @ use only ttbr0 mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer mcrne p15, 0, r1, c3, c0, 0 @ load domain access control mcrne p15, 0, r6, c2, c0, 2 @ load ttb control #endif mcr p15, 0, r0, c7, c5, 4 @ ISB mcr p15, 0, r0, c1, c0, 0 @ load control register mrc p15, 0, r0, c1, c0, 0 @ and read it back mov r0, #0 mcr p15, 0, r0, c7, c5, 4 @ ISB mov pc, r12 __fa526_cache_on: mov r12, lr mov r6, #CB_BITS | 0x12 @ U bl __setup_mmu mov r0, #0 mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache mcr p15, 0, r0, c7, c10, 4 @ drain write buffer mcr p15, 0, r0, c8, c7, 0 @ flush UTLB mrc p15, 0, r0, c1, c0, 0 @ read control reg orr r0, r0, #0x1000 @ I-cache enable bl __common_mmu_cache_on mov r0, #0 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB mov pc, r12 __common_mmu_cache_on: #ifndef CONFIG_THUMB2_KERNEL #ifndef DEBUG orr r0, r0, #0x000d @ Write buffer, mmu #endif mov r1, #-1 mcr p15, 0, r3, c2, c0, 0 @ load page table pointer mcr p15, 0, r1, c3, c0, 0 @ load domain access control b 1f .align 5 @ cache line aligned 1: mcr p15, 0, r0, c1, c0, 0 @ load control register mrc p15, 0, r0, c1, c0, 0 @ and read it back to sub pc, lr, r0, lsr #32 @ properly flush pipeline #endif #define PROC_ENTRY_SIZE (4*5) /* * Here follow the relocatable cache support functions for the * various processors. This is a generic hook for locating an * entry and jumping to an instruction at the specified offset * from the start of the block. Please note this is all position * independent code. * * r1 = corrupted * r2 = corrupted * r3 = block offset * r9 = corrupted * r12 = corrupted */ call_cache_fn: adr r12, proc_types #ifdef CONFIG_CPU_CP15 mrc p15, 0, r9, c0, c0 @ get processor ID #elif defined(CONFIG_CPU_V7M) /* * On v7-M the processor id is located in the V7M_SCB_CPUID * register, but as cache handling is IMPLEMENTATION DEFINED on * v7-M (if existant at all) we just return early here. * If V7M_SCB_CPUID were used the cpu ID functions (i.e. * __armv7_mmu_cache_{on,off,flush}) would be selected which * use cp15 registers that are not implemented on v7-M. */ bx lr #else ldr r9, =CONFIG_PROCESSOR_ID #endif 1: ldr r1, [r12, #0] @ get value ldr r2, [r12, #4] @ get mask eor r1, r1, r9 @ (real ^ match) tst r1, r2 @ & mask ARM( addeq pc, r12, r3 ) @ call cache function THUMB( addeq r12, r3 ) THUMB( moveq pc, r12 ) @ call cache function add r12, r12, #PROC_ENTRY_SIZE b 1b /* * Table for cache operations. This is basically: * - CPU ID match * - CPU ID mask * - 'cache on' method instruction * - 'cache off' method instruction * - 'cache flush' method instruction * * We match an entry using: ((real_id ^ match) & mask) == 0 * * Writethrough caches generally only need 'on' and 'off' * methods. Writeback caches _must_ have the flush method * defined. */ .align 2 .type proc_types,#object proc_types: .word 0x41000000 @ old ARM ID .word 0xff00f000 mov pc, lr THUMB( nop ) mov pc, lr THUMB( nop ) mov pc, lr THUMB( nop ) .word 0x41007000 @ ARM7/710 .word 0xfff8fe00 mov pc, lr THUMB( nop ) mov pc, lr THUMB( nop ) mov pc, lr THUMB( nop ) .word 0x41807200 @ ARM720T (writethrough) .word 0xffffff00 W(b) __armv4_mmu_cache_on W(b) __armv4_mmu_cache_off mov pc, lr THUMB( nop ) .word 0x41007400 @ ARM74x .word 0xff00ff00 W(b) __armv3_mpu_cache_on W(b) __armv3_mpu_cache_off W(b) __armv3_mpu_cache_flush .word 0x41009400 @ ARM94x .word 0xff00ff00 W(b) __armv4_mpu_cache_on W(b) __armv4_mpu_cache_off W(b) __armv4_mpu_cache_flush .word 0x41069260 @ ARM926EJ-S (v5TEJ) .word 0xff0ffff0 W(b) __arm926ejs_mmu_cache_on W(b) __armv4_mmu_cache_off W(b) __armv5tej_mmu_cache_flush .word 0x00007000 @ ARM7 IDs .word 0x0000f000 mov pc, lr THUMB( nop ) mov pc, lr THUMB( nop ) mov pc, lr THUMB( nop ) @ Everything from here on will be the new ID system. .word 0x4401a100 @ sa110 / sa1100 .word 0xffffffe0 W(b) __armv4_mmu_cache_on W(b) __armv4_mmu_cache_off W(b) __armv4_mmu_cache_flush .word 0x6901b110 @ sa1110 .word 0xfffffff0 W(b) __armv4_mmu_cache_on W(b) __armv4_mmu_cache_off W(b) __armv4_mmu_cache_flush .word 0x56056900 .word 0xffffff00 @ PXA9xx W(b) __armv4_mmu_cache_on W(b) __armv4_mmu_cache_off W(b) __armv4_mmu_cache_flush .word 0x56158000 @ PXA168 .word 0xfffff000 W(b) __armv4_mmu_cache_on W(b) __armv4_mmu_cache_off W(b) __armv5tej_mmu_cache_flush .word 0x56050000 @ Feroceon .word 0xff0f0000 W(b) __armv4_mmu_cache_on W(b) __armv4_mmu_cache_off W(b) __armv5tej_mmu_cache_flush #ifdef CONFIG_CPU_FEROCEON_OLD_ID /* this conflicts with the standard ARMv5TE entry */ .long 0x41009260 @ Old Feroceon .long 0xff00fff0 b __armv4_mmu_cache_on b __armv4_mmu_cache_off b __armv5tej_mmu_cache_flush #endif .word 0x66015261 @ FA526 .word 0xff01fff1 W(b) __fa526_cache_on W(b) __armv4_mmu_cache_off W(b) __fa526_cache_flush @ These match on the architecture ID .word 0x00020000 @ ARMv4T .word 0x000f0000 W(b) __armv4_mmu_cache_on W(b) __armv4_mmu_cache_off W(b) __armv4_mmu_cache_flush .word 0x00050000 @ ARMv5TE .word 0x000f0000 W(b) __armv4_mmu_cache_on W(b) __armv4_mmu_cache_off W(b) __armv4_mmu_cache_flush .word 0x00060000 @ ARMv5TEJ .word 0x000f0000 W(b) __armv4_mmu_cache_on W(b) __armv4_mmu_cache_off W(b) __armv5tej_mmu_cache_flush .word 0x0007b000 @ ARMv6 .word 0x000ff000 W(b) __armv6_mmu_cache_on W(b) __armv4_mmu_cache_off W(b) __armv6_mmu_cache_flush .word 0x000f0000 @ new CPU Id .word 0x000f0000 W(b) __armv7_mmu_cache_on W(b) __armv7_mmu_cache_off W(b) __armv7_mmu_cache_flush .word 0 @ unrecognised type .word 0 mov pc, lr THUMB( nop ) mov pc, lr THUMB( nop ) mov pc, lr THUMB( nop ) .size proc_types, . - proc_types /* * If you get a "non-constant expression in ".if" statement" * error from the assembler on this line, check that you have * not accidentally written a "b" instruction where you should * have written W(b). */ .if (. - proc_types) % PROC_ENTRY_SIZE != 0 .error "The size of one or more proc_types entries is wrong." .endif /* * Turn off the Cache and MMU. ARMv3 does not support * reading the control register, but ARMv4 does. * * On exit, * r0, r1, r2, r3, r9, r12 corrupted * This routine must preserve: * r4, r7, r8 */ .align 5 cache_off: mov r3, #12 @ cache_off function b call_cache_fn __armv4_mpu_cache_off: mrc p15, 0, r0, c1, c0 bic r0, r0, #0x000d mcr p15, 0, r0, c1, c0 @ turn MPU and cache off mov r0, #0 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer mcr p15, 0, r0, c7, c6, 0 @ flush D-Cache mcr p15, 0, r0, c7, c5, 0 @ flush I-Cache mov pc, lr __armv3_mpu_cache_off: mrc p15, 0, r0, c1, c0 bic r0, r0, #0x000d mcr p15, 0, r0, c1, c0, 0 @ turn MPU and cache off mov r0, #0 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 mov pc, lr __armv4_mmu_cache_off: #ifdef CONFIG_MMU mrc p15, 0, r0, c1, c0 bic r0, r0, #0x000d mcr p15, 0, r0, c1, c0 @ turn MMU and cache off mov r0, #0 mcr p15, 0, r0, c7, c7 @ invalidate whole cache v4 mcr p15, 0, r0, c8, c7 @ invalidate whole TLB v4 #endif mov pc, lr __armv7_mmu_cache_off: mrc p15, 0, r0, c1, c0 #ifdef CONFIG_MMU bic r0, r0, #0x0005 #else bic r0, r0, #0x0004 #endif mcr p15, 0, r0, c1, c0 @ turn MMU and cache off mov r12, lr bl __armv7_mmu_cache_flush mov r0, #0 #ifdef CONFIG_MMU mcr p15, 0, r0, c8, c7, 0 @ invalidate whole TLB #endif mcr p15, 0, r0, c7, c5, 6 @ invalidate BTC mcr p15, 0, r0, c7, c10, 4 @ DSB mcr p15, 0, r0, c7, c5, 4 @ ISB mov pc, r12 /* * Clean and flush the cache to maintain consistency. * * On exit, * r1, r2, r3, r9, r10, r11, r12 corrupted * This routine must preserve: * r4, r6, r7, r8 */ .align 5 cache_clean_flush: mov r3, #16 b call_cache_fn __armv4_mpu_cache_flush: tst r4, #1 movne pc, lr mov r2, #1 mov r3, #0 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache mov r1, #7 << 5 @ 8 segments 1: orr r3, r1, #63 << 26 @ 64 entries 2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index subs r3, r3, #1 << 26 bcs 2b @ entries 63 to 0 subs r1, r1, #1 << 5 bcs 1b @ segments 7 to 0 teq r2, #0 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache mcr p15, 0, ip, c7, c10, 4 @ drain WB mov pc, lr __fa526_cache_flush: tst r4, #1 movne pc, lr mov r1, #0 mcr p15, 0, r1, c7, c14, 0 @ clean and invalidate D cache mcr p15, 0, r1, c7, c5, 0 @ flush I cache mcr p15, 0, r1, c7, c10, 4 @ drain WB mov pc, lr __armv6_mmu_cache_flush: mov r1, #0 tst r4, #1 mcreq p15, 0, r1, c7, c14, 0 @ clean+invalidate D mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB mcreq p15, 0, r1, c7, c15, 0 @ clean+invalidate unified mcr p15, 0, r1, c7, c10, 4 @ drain WB mov pc, lr __armv7_mmu_cache_flush: tst r4, #1 bne iflush mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1 tst r10, #0xf << 16 @ hierarchical cache (ARMv7) mov r10, #0 beq hierarchical mcr p15, 0, r10, c7, c14, 0 @ clean+invalidate D b iflush hierarchical: mcr p15, 0, r10, c7, c10, 5 @ DMB stmfd sp!, {r0-r7, r9-r11} mrc p15, 1, r0, c0, c0, 1 @ read clidr ands r3, r0, #0x7000000 @ extract loc from clidr mov r3, r3, lsr #23 @ left align loc bit field beq finished @ if loc is 0, then no need to clean mov r10, #0 @ start clean at cache level 0 loop1: add r2, r10, r10, lsr #1 @ work out 3x current cache level mov r1, r0, lsr r2 @ extract cache type bits from clidr and r1, r1, #7 @ mask of the bits for current cache only cmp r1, #2 @ see what cache we have at this level blt skip @ skip if no cache, or just i-cache mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr mcr p15, 0, r10, c7, c5, 4 @ isb to sych the new cssr&csidr mrc p15, 1, r1, c0, c0, 0 @ read the new csidr and r2, r1, #7 @ extract the length of the cache lines add r2, r2, #4 @ add 4 (line length offset) ldr r4, =0x3ff ands r4, r4, r1, lsr #3 @ find maximum number on the way size clz r5, r4 @ find bit position of way size increment ldr r7, =0x7fff ands r7, r7, r1, lsr #13 @ extract max number of the index size loop2: mov r9, r4 @ create working copy of max way size loop3: ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11 ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11 THUMB( lsl r6, r9, r5 ) THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11 THUMB( lsl r6, r7, r2 ) THUMB( orr r11, r11, r6 ) @ factor index number into r11 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way subs r9, r9, #1 @ decrement the way bge loop3 subs r7, r7, #1 @ decrement the index bge loop2 skip: add r10, r10, #2 @ increment cache number cmp r3, r10 bgt loop1 finished: ldmfd sp!, {r0-r7, r9-r11} mov r10, #0 @ switch back to cache level 0 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr iflush: mcr p15, 0, r10, c7, c10, 4 @ DSB mcr p15, 0, r10, c7, c5, 0 @ invalidate I+BTB mcr p15, 0, r10, c7, c10, 4 @ DSB mcr p15, 0, r10, c7, c5, 4 @ ISB mov pc, lr __armv5tej_mmu_cache_flush: tst r4, #1 movne pc, lr 1: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate D cache bne 1b mcr p15, 0, r0, c7, c5, 0 @ flush I cache mcr p15, 0, r0, c7, c10, 4 @ drain WB mov pc, lr __armv4_mmu_cache_flush: tst r4, #1 movne pc, lr mov r2, #64*1024 @ default: 32K dcache size (*2) mov r11, #32 @ default: 32 byte line size mrc p15, 0, r3, c0, c0, 1 @ read cache type teq r3, r9 @ cache ID register present? beq no_cache_id mov r1, r3, lsr #18 and r1, r1, #7 mov r2, #1024 mov r2, r2, lsl r1 @ base dcache size *2 tst r3, #1 << 14 @ test M bit addne r2, r2, r2, lsr #1 @ +1/2 size if M == 1 mov r3, r3, lsr #12 and r3, r3, #3 mov r11, #8 mov r11, r11, lsl r3 @ cache line size in bytes no_cache_id: mov r1, pc bic r1, r1, #63 @ align to longest cache line add r2, r1, r2 1: ARM( ldr r3, [r1], r11 ) @ s/w flush D cache THUMB( ldr r3, [r1] ) @ s/w flush D cache THUMB( add r1, r1, r11 ) teq r1, r2 bne 1b mcr p15, 0, r1, c7, c5, 0 @ flush I cache mcr p15, 0, r1, c7, c6, 0 @ flush D cache mcr p15, 0, r1, c7, c10, 4 @ drain WB mov pc, lr __armv3_mmu_cache_flush: __armv3_mpu_cache_flush: tst r4, #1 movne pc, lr mov r1, #0 mcr p15, 0, r1, c7, c0, 0 @ invalidate whole cache v3 mov pc, lr /* * Various debugging routines for printing hex characters and * memory, which again must be relocatable. */ #ifdef DEBUG .align 2 .type phexbuf,#object phexbuf: .space 12 .size phexbuf, . - phexbuf @ phex corrupts {r0, r1, r2, r3} phex: adr r3, phexbuf mov r2, #0 strb r2, [r3, r1] 1: subs r1, r1, #1 movmi r0, r3 bmi puts and r2, r0, #15 mov r0, r0, lsr #4 cmp r2, #10 addge r2, r2, #7 add r2, r2, #'0' strb r2, [r3, r1] b 1b @ puts corrupts {r0, r1, r2, r3} puts: loadsp r3, r2, r1 1: ldrb r2, [r0], #1 teq r2, #0 moveq pc, lr 2: writeb r2, r3 mov r1, #0x00020000 3: subs r1, r1, #1 bne 3b teq r2, #'\n' moveq r2, #'\r' beq 2b teq r0, #0 bne 1b mov pc, lr @ putc corrupts {r0, r1, r2, r3} putc: mov r2, r0 loadsp r3, r1, r0 mov r0, #0 b 2b @ memdump corrupts {r0, r1, r2, r3, r10, r11, r12, lr} memdump: mov r12, r0 mov r10, lr mov r11, #0 2: mov r0, r11, lsl #2 add r0, r0, r12 mov r1, #8 bl phex mov r0, #':' bl putc 1: mov r0, #' ' bl putc ldr r0, [r12, r11, lsl #2] mov r1, #8 bl phex and r0, r11, #7 teq r0, #3 moveq r0, #' ' bleq putc and r0, r11, #7 add r11, r11, #1 teq r0, #7 bne 1b mov r0, #'\n' bl putc cmp r11, #64 blt 2b mov pc, r10 #endif .ltorg #ifdef CONFIG_ARM_VIRT_EXT .align 5 __hyp_reentry_vectors: W(b) . @ reset W(b) . @ undef W(b) . @ svc W(b) . @ pabort W(b) . @ dabort W(b) __enter_kernel @ hyp W(b) . @ irq W(b) . @ fiq #endif /* CONFIG_ARM_VIRT_EXT */ __enter_kernel: mov r0, #0 @ must be 0 mov r1, r7 @ restore architecture number mov r2, r8 @ restore atags pointer ARM( mov pc, r4 ) @ call kernel M_CLASS( add r4, r4, #1 ) @ enter in Thumb mode for M class THUMB( bx r4 ) @ entry point is always ARM for A/R classes reloc_code_end: #ifdef CONFIG_EFI_STUB .align 2 _start: .long start - . ENTRY(efi_stub_entry) @ allocate space on stack for passing current zImage address @ and for the EFI stub to return of new entry point of @ zImage, as EFI stub may copy the kernel. Pointer address @ is passed in r2. r0 and r1 are passed through from the @ EFI firmware to efi_entry adr ip, _start ldr r3, [ip] add r3, r3, ip stmfd sp!, {r3, lr} mov r2, sp @ pass zImage address in r2 bl efi_entry @ Check for error return from EFI stub. r0 has FDT address @ or error code. cmn r0, #1 beq efi_load_fail @ Preserve return value of efi_entry() in r4 mov r4, r0 @ our cache maintenance code relies on CP15 barrier instructions @ but since we arrived here with the MMU and caches configured @ by UEFI, we must check that the CP15BEN bit is set in SCTLR. @ Note that this bit is RAO/WI on v6 and earlier, so the ISB in @ the enable path will be executed on v7+ only. mrc p15, 0, r1, c1, c0, 0 @ read SCTLR tst r1, #(1 << 5) @ CP15BEN bit set? bne 0f orr r1, r1, #(1 << 5) @ CP15 barrier instructions mcr p15, 0, r1, c1, c0, 0 @ write SCTLR ARM( .inst 0xf57ff06f @ v7+ isb ) THUMB( isb ) 0: bl cache_clean_flush bl cache_off @ Set parameters for booting zImage according to boot protocol @ put FDT address in r2, it was returned by efi_entry() @ r1 is the machine type, and r0 needs to be 0 mov r0, #0 mov r1, #0xFFFFFFFF mov r2, r4 @ Branch to (possibly) relocated zImage that is in [sp] ldr lr, [sp] ldr ip, =start_offset add lr, lr, ip mov pc, lr @ no mode switch efi_load_fail: @ Return EFI_LOAD_ERROR to EFI firmware on error. ldr r0, =0x80000001 ldmfd sp!, {ip, pc} ENDPROC(efi_stub_entry) #endif .align .section ".stack", "aw", %nobits .L_user_stack: .space 4096 .L_user_stack_end:
AirFortressIlikara/LS2K0300-linux-4.19
4,184
arch/arm/boot/compressed/efi-header.S
/* * Copyright (C) 2013-2017 Linaro Ltd * Authors: Roy Franz <roy.franz@linaro.org> * Ard Biesheuvel <ard.biesheuvel@linaro.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/pe.h> #include <linux/sizes.h> .macro __nop #ifdef CONFIG_EFI_STUB @ This is almost but not quite a NOP, since it does clobber the @ condition flags. But it is the best we can do for EFI, since @ PE/COFF expects the magic string "MZ" at offset 0, while the @ ARM/Linux boot protocol expects an executable instruction @ there. .inst MZ_MAGIC | (0x1310 << 16) @ tstne r0, #0x4d000 #else AR_CLASS( mov r0, r0 ) M_CLASS( nop.w ) #endif .endm .macro __EFI_HEADER #ifdef CONFIG_EFI_STUB .set start_offset, __efi_start - start .org start + 0x3c @ @ The PE header can be anywhere in the file, but for @ simplicity we keep it together with the MSDOS header @ The offset to the PE/COFF header needs to be at offset @ 0x3C in the MSDOS header. @ The only 2 fields of the MSDOS header that are used are this @ PE/COFF offset, and the "MZ" bytes at offset 0x0. @ .long pe_header - start @ Offset to the PE header. pe_header: .long PE_MAGIC coff_header: .short IMAGE_FILE_MACHINE_THUMB @ Machine .short section_count @ NumberOfSections .long 0 @ TimeDateStamp .long 0 @ PointerToSymbolTable .long 0 @ NumberOfSymbols .short section_table - optional_header @ SizeOfOptionalHeader .short IMAGE_FILE_32BIT_MACHINE | \ IMAGE_FILE_DEBUG_STRIPPED | \ IMAGE_FILE_EXECUTABLE_IMAGE | \ IMAGE_FILE_LINE_NUMS_STRIPPED @ Characteristics #define __pecoff_code_size (__pecoff_data_start - __efi_start) optional_header: .short PE_OPT_MAGIC_PE32 @ PE32 format .byte 0x02 @ MajorLinkerVersion .byte 0x14 @ MinorLinkerVersion .long __pecoff_code_size @ SizeOfCode .long __pecoff_data_size @ SizeOfInitializedData .long 0 @ SizeOfUninitializedData .long efi_stub_entry - start @ AddressOfEntryPoint .long start_offset @ BaseOfCode .long __pecoff_data_start - start @ BaseOfData extra_header_fields: .long 0 @ ImageBase .long SZ_4K @ SectionAlignment .long SZ_512 @ FileAlignment .short 0 @ MajorOsVersion .short 0 @ MinorOsVersion .short 0 @ MajorImageVersion .short 0 @ MinorImageVersion .short 0 @ MajorSubsystemVersion .short 0 @ MinorSubsystemVersion .long 0 @ Win32VersionValue .long __pecoff_end - start @ SizeOfImage .long start_offset @ SizeOfHeaders .long 0 @ CheckSum .short IMAGE_SUBSYSTEM_EFI_APPLICATION @ Subsystem .short 0 @ DllCharacteristics .long 0 @ SizeOfStackReserve .long 0 @ SizeOfStackCommit .long 0 @ SizeOfHeapReserve .long 0 @ SizeOfHeapCommit .long 0 @ LoaderFlags .long (section_table - .) / 8 @ NumberOfRvaAndSizes .quad 0 @ ExportTable .quad 0 @ ImportTable .quad 0 @ ResourceTable .quad 0 @ ExceptionTable .quad 0 @ CertificationTable .quad 0 @ BaseRelocationTable section_table: .ascii ".text\0\0\0" .long __pecoff_code_size @ VirtualSize .long __efi_start @ VirtualAddress .long __pecoff_code_size @ SizeOfRawData .long __efi_start @ PointerToRawData .long 0 @ PointerToRelocations .long 0 @ PointerToLineNumbers .short 0 @ NumberOfRelocations .short 0 @ NumberOfLineNumbers .long IMAGE_SCN_CNT_CODE | \ IMAGE_SCN_MEM_READ | \ IMAGE_SCN_MEM_EXECUTE @ Characteristics .ascii ".data\0\0\0" .long __pecoff_data_size @ VirtualSize .long __pecoff_data_start - start @ VirtualAddress .long __pecoff_data_rawsize @ SizeOfRawData .long __pecoff_data_start - start @ PointerToRawData .long 0 @ PointerToRelocations .long 0 @ PointerToLineNumbers .short 0 @ NumberOfRelocations .short 0 @ NumberOfLineNumbers .long IMAGE_SCN_CNT_INITIALIZED_DATA | \ IMAGE_SCN_MEM_READ | \ IMAGE_SCN_MEM_WRITE @ Characteristics .set section_count, (. - section_table) / 40 .align 12 __efi_start: #endif .endm
AirFortressIlikara/LS2K0300-linux-4.19
2,868
arch/arm/boot/compressed/ll_char_wr.S
/* * linux/arch/arm/lib/ll_char_wr.S * * Copyright (C) 1995, 1996 Russell King. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Speedups & 1bpp code (C) 1996 Philip Blundell & Russell King. * * 10-04-96 RMK Various cleanups & reduced register usage. * 08-04-98 RMK Shifts re-ordered */ @ Regs: [] = corruptible @ {} = used @ () = do not use #include <linux/linkage.h> #include <asm/assembler.h> .text LC0: .word LC0 .word bytes_per_char_h .word video_size_row .word acorndata_8x8 .word con_charconvtable /* * r0 = ptr * r1 = char * r2 = white */ ENTRY(ll_write_char) stmfd sp!, {r4 - r7, lr} @ @ Smashable regs: {r0 - r3}, [r4 - r7], (r8 - fp), [ip], (sp), [lr], (pc) @ /* * calculate offset into character table */ mov r1, r1, lsl #3 /* * calculate offset required for each row. */ adr ip, LC0 ldmia ip, {r3, r4, r5, r6, lr} sub ip, ip, r3 add r6, r6, ip add lr, lr, ip ldr r4, [r4, ip] ldr r5, [r5, ip] /* * Go to resolution-dependent routine... */ cmp r4, #4 blt Lrow1bpp add r0, r0, r5, lsl #3 @ Move to bottom of character orr r1, r1, #7 ldrb r7, [r6, r1] teq r4, #8 beq Lrow8bpplp @ @ Smashable regs: {r0 - r3}, [r4], {r5 - r7}, (r8 - fp), [ip], (sp), {lr}, (pc) @ Lrow4bpplp: ldr r7, [lr, r7, lsl #2] mul r7, r2, r7 sub r1, r1, #1 @ avoid using r7 directly after str r7, [r0, -r5]! ldrb r7, [r6, r1] ldr r7, [lr, r7, lsl #2] mul r7, r2, r7 tst r1, #7 @ avoid using r7 directly after str r7, [r0, -r5]! subne r1, r1, #1 ldrneb r7, [r6, r1] bne Lrow4bpplp ldmfd sp!, {r4 - r7, pc} @ @ Smashable regs: {r0 - r3}, [r4], {r5 - r7}, (r8 - fp), [ip], (sp), {lr}, (pc) @ Lrow8bpplp: mov ip, r7, lsr #4 ldr ip, [lr, ip, lsl #2] mul r4, r2, ip and ip, r7, #15 @ avoid r4 ldr ip, [lr, ip, lsl #2] @ avoid r4 mul ip, r2, ip @ avoid r4 sub r1, r1, #1 @ avoid ip sub r0, r0, r5 @ avoid ip stmia r0, {r4, ip} ldrb r7, [r6, r1] mov ip, r7, lsr #4 ldr ip, [lr, ip, lsl #2] mul r4, r2, ip and ip, r7, #15 @ avoid r4 ldr ip, [lr, ip, lsl #2] @ avoid r4 mul ip, r2, ip @ avoid r4 tst r1, #7 @ avoid ip sub r0, r0, r5 @ avoid ip stmia r0, {r4, ip} subne r1, r1, #1 ldrneb r7, [r6, r1] bne Lrow8bpplp ldmfd sp!, {r4 - r7, pc} @ @ Smashable regs: {r0 - r3}, [r4], {r5, r6}, [r7], (r8 - fp), [ip], (sp), [lr], (pc) @ Lrow1bpp: add r6, r6, r1 ldmia r6, {r4, r7} strb r4, [r0], r5 mov r4, r4, lsr #8 strb r4, [r0], r5 mov r4, r4, lsr #8 strb r4, [r0], r5 mov r4, r4, lsr #8 strb r4, [r0], r5 strb r7, [r0], r5 mov r7, r7, lsr #8 strb r7, [r0], r5 mov r7, r7, lsr #8 strb r7, [r0], r5 mov r7, r7, lsr #8 strb r7, [r0], r5 ldmfd sp!, {r4 - r7, pc} .bss ENTRY(con_charconvtable) .space 1024
AirFortressIlikara/LS2K0300-linux-4.19
1,291
arch/arm/boot/compressed/head-sa1100.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/arch/arm/boot/compressed/head-sa1100.S * * Copyright (C) 1999 Nicolas Pitre <nico@fluxnic.net> * * SA1100 specific tweaks. This is merged into head.S by the linker. * */ #include <linux/linkage.h> #include <asm/mach-types.h> .section ".start", "ax" .arch armv4 __SA1100_start: @ Preserve r8/r7 i.e. kernel entry values #ifdef CONFIG_SA1100_COLLIE mov r7, #MACH_TYPE_COLLIE #endif #ifdef CONFIG_SA1100_SIMPAD @ UNTIL we've something like an open bootldr mov r7, #MACH_TYPE_SIMPAD @should be 87 #endif mrc p15, 0, r0, c1, c0, 0 @ read control reg ands r0, r0, #0x0d beq 99f @ Data cache might be active. @ Be sure to flush kernel binary out of the cache, @ whatever state it is, before it is turned off. @ This is done by fetching through currently executed @ memory to be sure we hit the same cache. bic r2, pc, #0x1f add r3, r2, #0x4000 @ 16 kb is quite enough... 1: ldr r0, [r2], #32 teq r2, r3 bne 1b mcr p15, 0, r0, c7, c10, 4 @ drain WB mcr p15, 0, r0, c7, c7, 0 @ flush I & D caches @ disabling MMU and caches mrc p15, 0, r0, c1, c0, 0 @ read control reg bic r0, r0, #0x0d @ clear WB, DC, MMU bic r0, r0, #0x1000 @ clear Icache mcr p15, 0, r0, c1, c0, 0 99:
AirFortressIlikara/LS2K0300-linux-4.19
3,499
arch/arm/boot/compressed/vmlinux.lds.S
/* * Copyright (C) 2000 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifdef CONFIG_CPU_ENDIAN_BE8 #define ZIMAGE_MAGIC(x) ( (((x) >> 24) & 0x000000ff) | \ (((x) >> 8) & 0x0000ff00) | \ (((x) << 8) & 0x00ff0000) | \ (((x) << 24) & 0xff000000) ) #else #define ZIMAGE_MAGIC(x) (x) #endif OUTPUT_ARCH(arm) ENTRY(_start) SECTIONS { /DISCARD/ : { *(.ARM.exidx*) *(.ARM.extab*) /* * Discard any r/w data - this produces a link error if we have any, * which is required for PIC decompression. Local data generates * GOTOFF relocations, which prevents it being relocated independently * of the text/got segments. */ *(.data) } . = TEXT_START; _text = .; .text : { _start = .; *(.start) *(.text) *(.text.*) *(.fixup) *(.gnu.warning) *(.glue_7t) *(.glue_7) } .table : ALIGN(4) { _table_start = .; LONG(ZIMAGE_MAGIC(4)) LONG(ZIMAGE_MAGIC(0x5a534c4b)) LONG(ZIMAGE_MAGIC(__piggy_size_addr - _start)) LONG(ZIMAGE_MAGIC(_kernel_bss_size)) LONG(0) _table_end = .; } .rodata : { *(.rodata) *(.rodata.*) *(.data.rel.ro) } .piggydata : { *(.piggydata) __piggy_size_addr = . - 4; } . = ALIGN(4); _etext = .; .got.plt : { *(.got.plt) } _got_start = .; .got : { *(.got) } _got_end = .; /* ensure the zImage file size is always a multiple of 64 bits */ /* (without a dummy byte, ld just ignores the empty section) */ .pad : { BYTE(0); . = ALIGN(8); } #ifdef CONFIG_EFI_STUB .data : ALIGN(4096) { __pecoff_data_start = .; /* * The EFI stub always executes from RAM, and runs strictly before the * decompressor, so we can make an exception for its r/w data, and keep it */ *(.data.efistub) __pecoff_data_end = .; /* * PE/COFF mandates a file size which is a multiple of 512 bytes if the * section size equals or exceeds 4 KB */ . = ALIGN(512); } __pecoff_data_rawsize = . - ADDR(.data); #endif _edata = .; /* * The image_end section appears after any additional loadable sections * that the linker may decide to insert in the binary image. Having * this symbol allows further debug in the near future. */ .image_end (NOLOAD) : { /* * EFI requires that the image is aligned to 512 bytes, and appended * DTB requires that we know where the end of the image is. Ensure * that both are satisfied by ensuring that there are no additional * sections emitted into the decompressor image. */ _edata_real = .; } _magic_sig = ZIMAGE_MAGIC(0x016f2818); _magic_start = ZIMAGE_MAGIC(_start); _magic_end = ZIMAGE_MAGIC(_edata); _magic_table = ZIMAGE_MAGIC(_table_start - _start); . = BSS_START; __bss_start = .; .bss : { *(.bss) } _end = .; . = ALIGN(8); /* the stack must be 64-bit aligned */ .stack : { *(.stack) } PROVIDE(__pecoff_data_size = ALIGN(512) - ADDR(.data)); PROVIDE(__pecoff_end = ALIGN(512)); .stab 0 : { *(.stab) } .stabstr 0 : { *(.stabstr) } .stab.excl 0 : { *(.stab.excl) } .stab.exclstr 0 : { *(.stab.exclstr) } .stab.index 0 : { *(.stab.index) } .stab.indexstr 0 : { *(.stab.indexstr) } .comment 0 : { *(.comment) } } ASSERT(_edata_real == _edata, "error: zImage file size is incorrect");
AirFortressIlikara/LS2K0300-linux-4.19
3,603
arch/arm/boot/compressed/head-sharpsl.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/arch/arm/boot/compressed/head-sharpsl.S * * Copyright (C) 2004-2005 Richard Purdie <rpurdie@rpsys.net> * * Sharp's bootloader doesn't pass any kind of machine ID * so we have to figure out the machine for ourselves... * * Support for Poodle, Corgi (SL-C700), Shepherd (SL-C750) * Husky (SL-C760), Tosa (SL-C6000), Spitz (SL-C3000), * Akita (SL-C1000) and Borzoi (SL-C3100). * */ #include <linux/linkage.h> #include <asm/mach-types.h> #ifndef CONFIG_PXA_SHARPSL #error What am I doing here... #endif .section ".start", "ax" __SharpSL_start: /* Check for TC6393 - if found we have a Tosa */ ldr r7, .TOSAID mov r1, #0x10000000 @ Base address of TC6393 chip mov r6, #0x03 ldrh r3, [r1, #8] @ Load TC6393XB Revison: This is 0x0003 cmp r6, r3 beq .SHARPEND @ Success -> tosa /* Check for pxa270 - if found, branch */ mrc p15, 0, r4, c0, c0 @ Get Processor ID and r4, r4, #0xffffff00 ldr r3, .PXA270ID cmp r4, r3 beq .PXA270 /* Check for w100 - if not found we have a Poodle */ ldr r1, .W100ADDR @ Base address of w100 chip + regs offset mov r6, #0x31 @ Load Magic Init value str r6, [r1, #0x280] @ to SCRATCH_UMSK mov r5, #0x3000 .W100LOOP: subs r5, r5, #1 bne .W100LOOP mov r6, #0x30 @ Load 2nd Magic Init value str r6, [r1, #0x280] @ to SCRATCH_UMSK ldr r6, [r1, #0] @ Load Chip ID ldr r3, .W100ID ldr r7, .POODLEID cmp r6, r3 bne .SHARPEND @ We have no w100 - Poodle /* Check for pxa250 - if found we have a Corgi */ ldr r7, .CORGIID ldr r3, .PXA255ID cmp r4, r3 blo .SHARPEND @ We have a PXA250 - Corgi /* Check for 64MiB flash - if found we have a Shepherd */ bl get_flash_ids ldr r7, .SHEPHERDID cmp r3, #0x76 @ 64MiB flash beq .SHARPEND @ We have Shepherd /* Must be a Husky */ ldr r7, .HUSKYID @ Must be Husky b .SHARPEND .PXA270: /* Check for 16MiB flash - if found we have Spitz */ bl get_flash_ids ldr r7, .SPITZID cmp r3, #0x73 @ 16MiB flash beq .SHARPEND @ We have Spitz /* Check for a second SCOOP chip - if found we have Borzoi */ ldr r1, .SCOOP2ADDR ldr r7, .BORZOIID mov r6, #0x0140 strh r6, [r1] ldrh r6, [r1] cmp r6, #0x0140 beq .SHARPEND @ We have Borzoi /* Must be Akita */ ldr r7, .AKITAID b .SHARPEND @ We have Borzoi .PXA255ID: .word 0x69052d00 @ PXA255 Processor ID .PXA270ID: .word 0x69054100 @ PXA270 Processor ID .W100ID: .word 0x57411002 @ w100 Chip ID .W100ADDR: .word 0x08010000 @ w100 Chip ID Reg Address .SCOOP2ADDR: .word 0x08800040 .POODLEID: .word MACH_TYPE_POODLE .CORGIID: .word MACH_TYPE_CORGI .SHEPHERDID: .word MACH_TYPE_SHEPHERD .HUSKYID: .word MACH_TYPE_HUSKY .TOSAID: .word MACH_TYPE_TOSA .SPITZID: .word MACH_TYPE_SPITZ .AKITAID: .word MACH_TYPE_AKITA .BORZOIID: .word MACH_TYPE_BORZOI /* * Return: r2 - NAND Manufacturer ID * r3 - NAND Chip ID * Corrupts: r1 */ get_flash_ids: mov r1, #0x0c000000 @ Base address of NAND chip ldrb r3, [r1, #24] @ Load FLASHCTL bic r3, r3, #0x11 @ SET NCE orr r3, r3, #0x0a @ SET CLR + FLWP strb r3, [r1, #24] @ Save to FLASHCTL mov r2, #0x90 @ Command "readid" strb r2, [r1, #20] @ Save to FLASHIO bic r3, r3, #2 @ CLR CLE orr r3, r3, #4 @ SET ALE strb r3, [r1, #24] @ Save to FLASHCTL mov r2, #0 @ Address 0x00 strb r2, [r1, #20] @ Save to FLASHIO bic r3, r3, #4 @ CLR ALE strb r3, [r1, #24] @ Save to FLASHCTL .fids1: ldrb r3, [r1, #24] @ Load FLASHCTL tst r3, #32 @ Is chip ready? beq .fids1 ldrb r2, [r1, #20] @ NAND Manufacturer ID ldrb r3, [r1, #20] @ NAND Chip ID mov pc, lr .SHARPEND:
AIS-Bonn/humanoid_op_ros
9,423
src/nimbro/hardware/cm730/firmware/stm32f10x_lib/src/cortexm3_macro.s
/*;******************** (C) COPYRIGHT 2007 STMicroelectronics ****************** ;* File Name : cortexm3_macro.s ;* Author : MCD Application Team ;* Version : V1.0 ;* Date : 10/08/2007 ;* Description : Instruction wrappers for special Cortex-M3 instructions. ;******************************************************************************* ; THE PRESENT SOFTWARE WHICH IS FOR GUIDANCE ONLY AIMS AT PROVIDING CUSTOMERS ; WITH CODING INFORMATION REGARDING THEIR PRODUCTS IN ORDER FOR THEM TO SAVE TIME. ; AS A RESULT, STMICROELECTRONICS SHALL NOT BE HELD LIABLE FOR ANY DIRECT, ; INDIRECT OR CONSEQUENTIAL DAMAGES WITH RESPECT TO ANY CLAIMS ARISING FROM THE ; CONTENT OF SUCH SOFTWARE AND/OR THE USE MADE BY CUSTOMERS OF THE CODING ; INFORMATION CONTAINED HEREIN IN CONNECTION WITH THEIR PRODUCTS. ;******************************************************************************/ .cpu cortex-m3 .fpu softvfp .syntax unified .thumb .text /*; Exported functions*/ .globl __WFI .globl __WFE .globl __SEV .globl __ISB .globl __DSB .globl __DMB .globl __SVC .globl __MRS_CONTROL .globl __MSR_CONTROL .globl __MRS_PSP .globl __MSR_PSP .globl __MRS_MSP .globl __MSR_MSP .globl __SETPRIMASK .globl __RESETPRIMASK .globl __SETFAULTMASK .globl __RESETFAULTMASK .globl __BASEPRICONFIG .globl __GetBASEPRI .globl __REV_HalfWord .globl __REV_Word /*;***************************************************************************** ; Function Name : __WFI ; Description : Assembler function for the WFI instruction. ; Input : None ; Return : None ;******************************************************************************/ .thumb_func __WFI: WFI BX r14 /*;***************************************************************************** ; Function Name : __WFE ; Description : Assembler function for the WFE instruction. ; Input : None ; Return : None ;******************************************************************************/ .thumb_func __WFE: WFE BX r14 /*;***************************************************************************** ; Function Name : __SEV ; Description : Assembler function for the SEV instruction. ; Input : None ; Return : None ;******************************************************************************/ .thumb_func __SEV: SEV BX r14 /*;***************************************************************************** ; Function Name : __ISB ; Description : Assembler function for the ISB instruction. ; Input : None ; Return : None ;******************************************************************************/ .thumb_func __ISB: ISB BX r14 /*;***************************************************************************** ; Function Name : __DSB ; Description : Assembler function for the DSB instruction. ; Input : None ; Return : None ;******************************************************************************/ .thumb_func __DSB: DSB BX r14 /*;***************************************************************************** ; Function Name : __DMB ; Description : Assembler function for the DMB instruction. ; Input : None ; Return : None ;******************************************************************************/ .thumb_func __DMB: DMB BX r14 /*;***************************************************************************** ; Function Name : __SVC ; Description : Assembler function for the SVC instruction. ; Input : None ; Return : None ;******************************************************************************/ .thumb_func __SVC: SVC 0x01 BX r14 /*;***************************************************************************** ; Function Name : __MRS_CONTROL ; Description : Assembler function for the MRS instruction. ; Input : None ; Return : - r4 : Cortex-M3 CONTROL register value. ;******************************************************************************/ .thumb_func __MRS_CONTROL: MRS r0,control BX r14 /*;***************************************************************************** ; Function Name : __MSR_CONTROL ; Description : Assembler function for the MSR instruction. ; Input : - R0 : Cortex-M3 CONTROL register new value. ; Return : None ;******************************************************************************/ .thumb_func __MSR_CONTROL: MSR control, r0 ISB BX r14 /*;***************************************************************************** ; Function Name : __MRS_PSP ; Description : Assembler function for the MRS instruction. ; Input : None ; Return : - r0 : Process Stack value. ;******************************************************************************/ .thumb_func __MRS_PSP: MRS r0, psp BX r14 /*;***************************************************************************** ; Function Name : __MSR_PSP ; Description : Assembler function for the MSR instruction. ; Input : - r0 : Process Stack new value. ; Return : None ;******************************************************************************/ .thumb_func __MSR_PSP: MSR psp, r0 /* set Process Stack value*/ BX r14 /*;***************************************************************************** ; Function Name : __MRS_MSP ; Description : Assembler function for the MRS instruction. ; Input : None ; Return : - r0 : Main Stack value. ;******************************************************************************/ .thumb_func __MRS_MSP: MRS r0, msp BX r14 /*;***************************************************************************** ; Function Name : __MSR_MSP ; Description : Assembler function for the MSR instruction. ; Input : - r0 : Main Stack new value. ; Return : None ;******************************************************************************/ .thumb_func __MSR_MSP: MSR msp, r0 /*; set Main Stack value*/ BX r14 /*;***************************************************************************** ; Function Name : __SETPRIMASK ; Description : Assembler function to set the PRIMASK. ; Input : None ; Return : None ;******************************************************************************/ .thumb_func __SETPRIMASK: CPSID i BX r14 /*;***************************************************************************** ; Function Name : __RESETPRIMASK ; Description : Assembler function to reset the PRIMASK. ; Input : None ; Return : None ;******************************************************************************/ .thumb_func __RESETPRIMASK: CPSIE i BX r14 /*;***************************************************************************** ; Function Name : __SETFAULTMASK ; Description : Assembler function to set the FAULTMASK. ; Input : None ; Return : None ;******************************************************************************/ .thumb_func __SETFAULTMASK: CPSID f BX r14 /*;***************************************************************************** ; Function Name : __RESETFAULTMASK ; Description : Assembler function to reset the FAULTMASK. ; Input : None ; Return : None ;******************************************************************************/ .thumb_func __RESETFAULTMASK: CPSIE f BX r14 /*;***************************************************************************** ; Function Name : __BASEPRICONFIG ; Description : Assembler function to set the Base Priority. ; Input : - r0 : Base Priority new value ; Return : None ;******************************************************************************/ .thumb_func __BASEPRICONFIG: MSR basepri, r0 BX r14 /*;***************************************************************************** ; Function Name : __GetBASEPRI ; Description : Assembler function to get the Base Priority value. ; Input : None ; Return : - r0 : Base Priority value ;******************************************************************************/ .thumb_func __GetBASEPRI: MRS r0, basepri_max BX r14 /*;***************************************************************************** ; Function Name : __REV_HalfWord ; Description : Reverses the byte order in HalfWord(16-bit) input variable. ; Input : - r0 : specifies the input variable ; Return : - r0 : holds tve variable value after byte reversing. ;******************************************************************************/ .thumb_func __REV_HalfWord: REV16 r0, r0 BX r14 /*;***************************************************************************** ; Function Name : __REV_Word ; Description : Reverses the byte order in Word(32-bit) input variable. ; Input : - r0 : specifies the input variable ; Return : - r0 : holds tve variable value after byte reversing. ;******************************************************************************/ .thumb_func __REV_Word: REV r0, r0 BX r14 .end /*;*************** (C) COPYRIGHT 2007 STMicroelectronics *****END OF FILE******/
AirFortressIlikara/LS2K0300-linux-4.19
2,635
arch/arm/boot/bootp/init.S
/* * linux/arch/arm/boot/bootp/init.S * * Copyright (C) 2000-2003 Russell King. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * "Header" file for splitting kernel + initrd. Note that we pass * r0 through to r3 straight through. * * This demonstrates how to append code to the start of the kernel * zImage, and boot the kernel without copying it around. This * example would be simpler; if we didn't have an object of unknown * size immediately following the kernel, we could build this into * a binary blob, and concatenate the zImage using the cat command. */ .section .start,#alloc,#execinstr .type _start, #function .globl _start _start: add lr, pc, #-0x8 @ lr = current load addr adr r13, data ldmia r13!, {r4-r6} @ r5 = dest, r6 = length add r4, r4, lr @ r4 = initrd_start + load addr bl move @ move the initrd /* * Setup the initrd parameters to pass to the kernel. This can only be * passed in via the tagged list. */ ldmia r13, {r5-r9} @ get size and addr of initrd @ r5 = ATAG_CORE @ r6 = ATAG_INITRD2 @ r7 = initrd start @ r8 = initrd end @ r9 = param_struct address ldr r10, [r9, #4] @ get first tag teq r10, r5 @ is it ATAG_CORE? /* * If we didn't find a valid tag list, create a dummy ATAG_CORE entry. */ movne r10, #0 @ terminator movne r4, #2 @ Size of this entry (2 words) stmneia r9, {r4, r5, r10} @ Size, ATAG_CORE, terminator /* * find the end of the tag list, and then add an INITRD tag on the end. * If there is already an INITRD tag, then we ignore it; the last INITRD * tag takes precedence. */ taglist: ldr r10, [r9, #0] @ tag length teq r10, #0 @ last tag (zero length)? addne r9, r9, r10, lsl #2 bne taglist mov r5, #4 @ Size of initrd tag (4 words) stmia r9, {r5, r6, r7, r8, r10} b kernel_start @ call kernel /* * Move the block of memory length r6 from address r4 to address r5 */ move: ldmia r4!, {r7 - r10} @ move 32-bytes at a time stmia r5!, {r7 - r10} ldmia r4!, {r7 - r10} stmia r5!, {r7 - r10} subs r6, r6, #8 * 4 bcs move mov pc, lr .size _start, . - _start .align .type data,#object data: .word initrd_start @ source initrd address .word initrd_phys @ destination initrd address .word initrd_size @ initrd size .word 0x54410001 @ r5 = ATAG_CORE .word 0x54420005 @ r6 = ATAG_INITRD2 .word initrd_phys @ r7 .word initrd_size @ r8 .word params_phys @ r9 .size data, . - data
AirFortressIlikara/LS2K0300-linux-4.19
1,415
arch/arm/include/debug/msm.S
/* * * Copyright (C) 2007 Google, Inc. * Copyright (c) 2011, Code Aurora Forum. All rights reserved. * Author: Brian Swetland <swetland@google.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ .macro addruart, rp, rv, tmp ldr \rp, =CONFIG_DEBUG_UART_PHYS ldr \rv, =CONFIG_DEBUG_UART_VIRT .endm .macro senduart, rd, rx ARM_BE8(rev \rd, \rd ) @ Write the 1 character to UARTDM_TF str \rd, [\rx, #0x70] .endm .macro waituart, rd, rx @ check for TX_EMT in UARTDM_SR ldr \rd, [\rx, #0x08] ARM_BE8(rev \rd, \rd ) tst \rd, #0x08 bne 1002f @ wait for TXREADY in UARTDM_ISR 1001: ldr \rd, [\rx, #0x14] ARM_BE8(rev \rd, \rd ) tst \rd, #0x80 beq 1001b 1002: @ Clear TX_READY by writing to the UARTDM_CR register mov \rd, #0x300 ARM_BE8(rev \rd, \rd ) str \rd, [\rx, #0x10] @ Write 0x1 to NCF register mov \rd, #0x1 ARM_BE8(rev \rd, \rd ) str \rd, [\rx, #0x40] @ UARTDM reg. Read to induce delay ldr \rd, [\rx, #0x08] .endm .macro busyuart, rd, rx .endm
AirFortressIlikara/LS2K0300-linux-4.19
1,789
arch/arm/include/debug/sa1100.S
/* arch/arm/include/debug/sa1100.S * * Debugging macro include header * * Copyright (C) 1994-1999 Russell King * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #define UTCR3 0x0c #define UTDR 0x14 #define UTSR1 0x20 #define UTCR3_TXE 0x00000002 /* Transmit Enable */ #define UTSR1_TBY 0x00000001 /* Transmitter BusY (read) */ #define UTSR1_TNF 0x00000004 /* Transmit FIFO Not Full (read) */ .macro addruart, rp, rv, tmp mrc p15, 0, \rp, c1, c0 tst \rp, #1 @ MMU enabled? moveq \rp, #0x80000000 @ physical base address movne \rp, #0xf8000000 @ virtual address @ We probe for the active serial port here, coherently with @ the comment in arch/arm/mach-sa1100/include/mach/uncompress.h. @ We assume r1 can be clobbered. @ see if Ser3 is active add \rp, \rp, #0x00050000 ldr \rv, [\rp, #UTCR3] tst \rv, #UTCR3_TXE @ if Ser3 is inactive, then try Ser1 addeq \rp, \rp, #(0x00010000 - 0x00050000) ldreq \rv, [\rp, #UTCR3] tsteq \rv, #UTCR3_TXE @ if Ser1 is inactive, then try Ser2 addeq \rp, \rp, #(0x00030000 - 0x00010000) ldreq \rv, [\rp, #UTCR3] tsteq \rv, #UTCR3_TXE @ clear top bits, and generate both phys and virt addresses lsl \rp, \rp, #8 lsr \rp, \rp, #8 orr \rv, \rp, #0xf8000000 @ virtual orr \rp, \rp, #0x80000000 @ physical .endm .macro senduart,rd,rx str \rd, [\rx, #UTDR] .endm .macro waituart,rd,rx 1001: ldr \rd, [\rx, #UTSR1] tst \rd, #UTSR1_TNF beq 1001b .endm .macro busyuart,rd,rx 1001: ldr \rd, [\rx, #UTSR1] tst \rd, #UTSR1_TBY bne 1001b .endm
AirFortressIlikara/LS2K0300-linux-4.19
1,504
arch/arm/include/debug/icedcc.S
/* * arch/arm/include/debug/icedcc.S * * Copyright (C) 1994-1999 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ @@ debug using ARM EmbeddedICE DCC channel .macro addruart, rp, rv, tmp .endm #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7) .macro senduart, rd, rx mcr p14, 0, \rd, c0, c5, 0 .endm .macro busyuart, rd, rx 1001: mrc p14, 0, \rx, c0, c1, 0 tst \rx, #0x20000000 beq 1001b .endm .macro waituart, rd, rx mov \rd, #0x2000000 1001: subs \rd, \rd, #1 bmi 1002f mrc p14, 0, \rx, c0, c1, 0 tst \rx, #0x20000000 bne 1001b 1002: .endm #elif defined(CONFIG_CPU_XSCALE) .macro senduart, rd, rx mcr p14, 0, \rd, c8, c0, 0 .endm .macro busyuart, rd, rx 1001: mrc p14, 0, \rx, c14, c0, 0 tst \rx, #0x10000000 beq 1001b .endm .macro waituart, rd, rx mov \rd, #0x10000000 1001: subs \rd, \rd, #1 bmi 1002f mrc p14, 0, \rx, c14, c0, 0 tst \rx, #0x10000000 bne 1001b 1002: .endm #else .macro senduart, rd, rx mcr p14, 0, \rd, c1, c0, 0 .endm .macro busyuart, rd, rx 1001: mrc p14, 0, \rx, c0, c0, 0 tst \rx, #2 beq 1001b .endm .macro waituart, rd, rx mov \rd, #0x2000000 1001: subs \rd, \rd, #1 bmi 1002f mrc p14, 0, \rx, c0, c0, 0 tst \rx, #2 bne 1001b 1002: .endm #endif /* CONFIG_CPU_V6 */
AirFortressIlikara/LS2K0300-linux-4.19
1,216
arch/arm/include/debug/s3c24xx.S
/* arch/arm/mach-s3c2410/include/mach/debug-macro.S * * Debugging macro include header * * Copyright (C) 1994-1999 Russell King * Copyright (C) 2005 Simtec Electronics * * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/serial_s3c.h> #define S3C2410_UART1_OFF (0x4000) .macro addruart, rp, rv, tmp ldr \rp, = CONFIG_DEBUG_UART_PHYS ldr \rv, = CONFIG_DEBUG_UART_VIRT .endm .macro fifo_full_s3c2410 rd, rx ldr \rd, [\rx, # S3C2410_UFSTAT] tst \rd, #S3C2410_UFSTAT_TXFULL .endm .macro fifo_level_s3c2410 rd, rx ldr \rd, [\rx, # S3C2410_UFSTAT] and \rd, \rd, #S3C2410_UFSTAT_TXMASK .endm /* Select the correct implementation depending on the configuration. The * S3C2440 will get selected by default, as these are the most widely * used variants of these */ #if defined(CONFIG_DEBUG_S3C2410_UART) #define fifo_full fifo_full_s3c2410 #define fifo_level fifo_level_s3c2410 #endif /* include the reset of the code which will do the work */ #include <debug/samsung.S>
AirFortressIlikara/LS2K0300-linux-4.19
1,650
arch/arm/include/debug/zynq.S
/* * Debugging macro include header * * Copyright (C) 2011 Xilinx * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define UART_CR_OFFSET 0x00 /* Control Register [8:0] */ #define UART_SR_OFFSET 0x2C /* Channel Status [11:0] */ #define UART_FIFO_OFFSET 0x30 /* FIFO [15:0] or [7:0] */ #define UART_SR_TXFULL 0x00000010 /* TX FIFO full */ #define UART_SR_TXEMPTY 0x00000008 /* TX FIFO empty */ #define UART0_PHYS 0xE0000000 #define UART0_VIRT 0xF0800000 #define UART1_PHYS 0xE0001000 #define UART1_VIRT 0xF0801000 #if IS_ENABLED(CONFIG_DEBUG_ZYNQ_UART1) # define LL_UART_PADDR UART1_PHYS # define LL_UART_VADDR UART1_VIRT #else # define LL_UART_PADDR UART0_PHYS # define LL_UART_VADDR UART0_VIRT #endif .macro addruart, rp, rv, tmp ldr \rp, =LL_UART_PADDR @ physical ldr \rv, =LL_UART_VADDR @ virtual .endm .macro senduart,rd,rx strb \rd, [\rx, #UART_FIFO_OFFSET] @ TXDATA .endm .macro waituart,rd,rx 1001: ldr \rd, [\rx, #UART_SR_OFFSET] ARM_BE8( rev \rd, \rd ) tst \rd, #UART_SR_TXEMPTY beq 1001b .endm .macro busyuart,rd,rx 1002: ldr \rd, [\rx, #UART_SR_OFFSET] @ get status register ARM_BE8( rev \rd, \rd ) tst \rd, #UART_SR_TXFULL @ bne 1002b @ wait if FIFO is full .endm
AirFortressIlikara/LS2K0300-linux-4.19
1,864
arch/arm/include/debug/samsung.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright 2005, 2007 Simtec Electronics * http://armlinux.simtec.co.uk/ * Ben Dooks <ben@simtec.co.uk> */ #include <linux/serial_s3c.h> /* The S5PV210/S5PC110 implementations are as belows. */ .macro fifo_level_s5pv210 rd, rx ldr \rd, [\rx, # S3C2410_UFSTAT] ARM_BE8(rev \rd, \rd) and \rd, \rd, #S5PV210_UFSTAT_TXMASK .endm .macro fifo_full_s5pv210 rd, rx ldr \rd, [\rx, # S3C2410_UFSTAT] ARM_BE8(rev \rd, \rd) tst \rd, #S5PV210_UFSTAT_TXFULL .endm /* The S3C2440 implementations are used by default as they are the * most widely re-used */ .macro fifo_level_s3c2440 rd, rx ldr \rd, [\rx, # S3C2410_UFSTAT] ARM_BE8(rev \rd, \rd) and \rd, \rd, #S3C2440_UFSTAT_TXMASK .endm #ifndef fifo_level #define fifo_level fifo_level_s3c2440 #endif .macro fifo_full_s3c2440 rd, rx ldr \rd, [\rx, # S3C2410_UFSTAT] ARM_BE8(rev \rd, \rd) tst \rd, #S3C2440_UFSTAT_TXFULL .endm #ifndef fifo_full #define fifo_full fifo_full_s3c2440 #endif .macro senduart,rd,rx strb \rd, [\rx, # S3C2410_UTXH] .endm .macro busyuart, rd, rx ldr \rd, [\rx, # S3C2410_UFCON] ARM_BE8(rev \rd, \rd) tst \rd, #S3C2410_UFCON_FIFOMODE @ fifo enabled? beq 1001f @ @ FIFO enabled... 1003: fifo_full \rd, \rx bne 1003b b 1002f 1001: @ busy waiting for non fifo ldr \rd, [\rx, # S3C2410_UTRSTAT] ARM_BE8(rev \rd, \rd) tst \rd, #S3C2410_UTRSTAT_TXFE beq 1001b 1002: @ exit busyuart .endm .macro waituart,rd,rx ldr \rd, [\rx, # S3C2410_UFCON] ARM_BE8(rev \rd, \rd) tst \rd, #S3C2410_UFCON_FIFOMODE @ fifo enabled? beq 1001f @ @ FIFO enabled... 1003: fifo_level \rd, \rx teq \rd, #0 bne 1003b b 1002f 1001: @ idle waiting for non fifo ldr \rd, [\rx, # S3C2410_UTRSTAT] ARM_BE8(rev \rd, \rd) tst \rd, #S3C2410_UTRSTAT_TXFE beq 1001b 1002: @ exit busyuart .endm
AirFortressIlikara/LS2K0300-linux-4.19
1,033
arch/arm/include/debug/vf.S
/* * Copyright 2013 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #define VF_UART0_BASE_ADDR 0x40027000 #define VF_UART1_BASE_ADDR 0x40028000 #define VF_UART2_BASE_ADDR 0x40029000 #define VF_UART3_BASE_ADDR 0x4002a000 #define VF_UART_BASE_ADDR(n) VF_UART##n##_BASE_ADDR #define VF_UART_BASE(n) VF_UART_BASE_ADDR(n) #define VF_UART_PHYSICAL_BASE VF_UART_BASE(CONFIG_DEBUG_VF_UART_PORT) #define VF_UART_VIRTUAL_BASE 0xfe000000 .macro addruart, rp, rv, tmp ldr \rp, =VF_UART_PHYSICAL_BASE @ physical and \rv, \rp, #0xffffff @ offset within 16MB section add \rv, \rv, #VF_UART_VIRTUAL_BASE .endm .macro senduart, rd, rx strb \rd, [\rx, #0x7] @ Data Register .endm .macro busyuart, rd, rx 1001: ldrb \rd, [\rx, #0x4] @ Status Register 1 tst \rd, #1 << 6 @ TC beq 1001b @ wait until transmit done .endm .macro waituart,rd,rx .endm
AirFortressIlikara/LS2K0300-linux-4.19
1,223
arch/arm/include/debug/8250.S
/* * arch/arm/include/debug/8250.S * * Copyright (C) 1994-2013 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/serial_reg.h> .macro addruart, rp, rv, tmp ldr \rp, =CONFIG_DEBUG_UART_PHYS ldr \rv, =CONFIG_DEBUG_UART_VIRT .endm #ifdef CONFIG_DEBUG_UART_8250_WORD .macro store, rd, rx:vararg ARM_BE8(rev \rd, \rd) str \rd, \rx ARM_BE8(rev \rd, \rd) .endm .macro load, rd, rx:vararg ldr \rd, \rx ARM_BE8(rev \rd, \rd) .endm #else .macro store, rd, rx:vararg strb \rd, \rx .endm .macro load, rd, rx:vararg ldrb \rd, \rx .endm #endif #define UART_SHIFT CONFIG_DEBUG_UART_8250_SHIFT .macro senduart,rd,rx store \rd, [\rx, #UART_TX << UART_SHIFT] .endm .macro busyuart,rd,rx 1002: load \rd, [\rx, #UART_LSR << UART_SHIFT] and \rd, \rd, #UART_LSR_TEMT | UART_LSR_THRE teq \rd, #UART_LSR_TEMT | UART_LSR_THRE bne 1002b .endm .macro waituart,rd,rx #ifdef CONFIG_DEBUG_UART_8250_FLOW_CONTROL 1001: load \rd, [\rx, #UART_MSR << UART_SHIFT] tst \rd, #UART_MSR_CTS beq 1001b #endif .endm
AirFortressIlikara/LS2K0300-linux-4.19
1,077
arch/arm/include/debug/efm32.S
/* * Copyright (C) 2013 Pengutronix * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define UARTn_CMD 0x000c #define UARTn_CMD_TXEN 0x0004 #define UARTn_STATUS 0x0010 #define UARTn_STATUS_TXC 0x0020 #define UARTn_STATUS_TXBL 0x0040 #define UARTn_TXDATA 0x0034 .macro addruart, rx, tmp, tmp2 ldr \rx, =(CONFIG_DEBUG_UART_PHYS) /* * enable TX. The driver might disable it to save energy. We * don't care about disabling at the end as during debug power * consumption isn't that important. */ ldr \tmp, =(UARTn_CMD_TXEN) str \tmp, [\rx, #UARTn_CMD] .endm .macro senduart,rd,rx strb \rd, [\rx, #UARTn_TXDATA] .endm .macro waituart,rd,rx 1001: ldr \rd, [\rx, #UARTn_STATUS] tst \rd, #UARTn_STATUS_TXBL beq 1001b .endm .macro busyuart,rd,rx 1001: ldr \rd, [\rx, UARTn_STATUS] tst \rd, #UARTn_STATUS_TXC bne 1001b .endm
AirFortressIlikara/LS2K0300-linux-4.19
5,540
arch/arm/include/debug/brcmstb.S
/* * Copyright (C) 2016 Broadcom * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/serial_reg.h> #include <asm/cputype.h> /* Physical register offset and virtual register offset */ #define REG_PHYS_BASE 0xf0000000 #define REG_PHYS_BASE_V7 0x08000000 #define REG_VIRT_BASE 0xfc000000 #define REG_PHYS_ADDR(x) ((x) + REG_PHYS_BASE) #define REG_PHYS_ADDR_V7(x) ((x) + REG_PHYS_BASE_V7) /* Product id can be read from here */ #define SUN_TOP_CTRL_BASE REG_PHYS_ADDR(0x404000) #define SUN_TOP_CTRL_BASE_V7 REG_PHYS_ADDR_V7(0x404000) #define UARTA_3390 REG_PHYS_ADDR(0x40a900) #define UARTA_7250 REG_PHYS_ADDR(0x40b400) #define UARTA_7260 REG_PHYS_ADDR(0x40c000) #define UARTA_7268 UARTA_7260 #define UARTA_7271 UARTA_7268 #define UARTA_7278 REG_PHYS_ADDR_V7(0x40c000) #define UARTA_7364 REG_PHYS_ADDR(0x40b000) #define UARTA_7366 UARTA_7364 #define UARTA_74371 REG_PHYS_ADDR(0x406b00) #define UARTA_7439 REG_PHYS_ADDR(0x40a900) #define UARTA_7445 REG_PHYS_ADDR(0x40ab00) #define UART_SHIFT 2 #define checkuart(rp, rv, family_id, family) \ /* Load family id */ \ ldr rp, =family_id ; \ /* Compare SUN_TOP_CTRL value against it */ \ cmp rp, rv ; \ /* Passed test, load address */ \ ldreq rp, =UARTA_##family ; \ /* Jump to save UART address */ \ beq 91f .macro addruart, rp, rv, tmp adr \rp, 99f @ actual addr of 99f ldr \rv, [\rp] @ linked addr is stored there sub \rv, \rv, \rp @ offset between the two ldr \rp, [\rp, #4] @ linked brcmstb_uart_config sub \tmp, \rp, \rv @ actual brcmstb_uart_config ldr \rp, [\tmp] @ Load brcmstb_uart_config cmp \rp, #1 @ needs initialization? bne 100f @ no; go load the addresses mov \rv, #0 @ yes; record init is done str \rv, [\tmp] /* Check for V7 memory map if B53 */ mrc p15, 0, \rv, c0, c0, 0 @ get Main ID register ldr \rp, =ARM_CPU_PART_MASK and \rv, \rv, \rp ldr \rp, =ARM_CPU_PART_BRAHMA_B53 @ check for B53 CPU cmp \rv, \rp bne 10f /* if PERIPHBASE doesn't overlap REG_PHYS_BASE use V7 map */ mrc p15, 1, \rv, c15, c3, 0 @ get PERIPHBASE from CBAR ands \rv, \rv, #REG_PHYS_BASE ldreq \rp, =SUN_TOP_CTRL_BASE_V7 /* Check SUN_TOP_CTRL base */ 10: ldrne \rp, =SUN_TOP_CTRL_BASE @ load SUN_TOP_CTRL PA ldr \rv, [\rp, #0] @ get register contents ARM_BE8( rev \rv, \rv ) and \rv, \rv, #0xffffff00 @ strip revision bits [7:0] /* Chip specific detection starts here */ 20: checkuart(\rp, \rv, 0x33900000, 3390) 21: checkuart(\rp, \rv, 0x72500000, 7250) 22: checkuart(\rp, \rv, 0x72600000, 7260) 23: checkuart(\rp, \rv, 0x72680000, 7268) 24: checkuart(\rp, \rv, 0x72710000, 7271) 25: checkuart(\rp, \rv, 0x73640000, 7364) 26: checkuart(\rp, \rv, 0x73660000, 7366) 27: checkuart(\rp, \rv, 0x07437100, 74371) 28: checkuart(\rp, \rv, 0x74390000, 7439) 29: checkuart(\rp, \rv, 0x74450000, 7445) 30: checkuart(\rp, \rv, 0x72780000, 7278) /* No valid UART found */ 90: mov \rp, #0 /* fall through */ /* Record whichever UART we chose */ 91: str \rp, [\tmp, #4] @ Store in brcmstb_uart_phys cmp \rp, #0 @ Valid UART address? bne 92f @ Yes, go process it str \rp, [\tmp, #8] @ Store 0 in brcmstb_uart_virt b 100f @ Done 92: and \rv, \rp, #0xffffff @ offset within 16MB section add \rv, \rv, #REG_VIRT_BASE str \rv, [\tmp, #8] @ Store in brcmstb_uart_virt b 100f .align 99: .word . .word brcmstb_uart_config .ltorg /* Load previously selected UART address */ 100: ldr \rp, [\tmp, #4] @ Load brcmstb_uart_phys ldr \rv, [\tmp, #8] @ Load brcmstb_uart_virt .endm .macro store, rd, rx:vararg ARM_BE8( rev \rd, \rd ) str \rd, \rx .endm .macro load, rd, rx:vararg ldr \rd, \rx ARM_BE8( rev \rd, \rd ) .endm .macro senduart,rd,rx store \rd, [\rx, #UART_TX << UART_SHIFT] .endm .macro busyuart,rd,rx 1002: load \rd, [\rx, #UART_LSR << UART_SHIFT] and \rd, \rd, #UART_LSR_TEMT | UART_LSR_THRE teq \rd, #UART_LSR_TEMT | UART_LSR_THRE bne 1002b .endm .macro waituart,rd,rx .endm /* * Storage for the state maintained by the macros above. * * In the kernel proper, this data is located in arch/arm/mach-bcm/brcmstb.c. * That's because this header is included from multiple files, and we only * want a single copy of the data. In particular, the UART probing code above * assumes it's running using physical addresses. This is true when this file * is included from head.o, but not when included from debug.o. So we need * to share the probe results between the two copies, rather than having * to re-run the probing again later. * * In the decompressor, we put the symbol/storage right here, since common.c * isn't included in the decompressor build. This symbol gets put in .text * even though it's really data, since .data is discarded from the * decompressor. Luckily, .text is writeable in the decompressor, unless * CONFIG_ZBOOT_ROM. That dependency is handled in arch/arm/Kconfig.debug. */ #if defined(ZIMAGE) brcmstb_uart_config: /* Debug UART initialization required */ .word 1 /* Debug UART physical address */ .word 0 /* Debug UART virtual address */ .word 0 #endif
AirFortressIlikara/LS2K0300-linux-4.19
1,104
arch/arm/include/debug/ks8695.S
/* * arch/arm/include/debug/ks8695.S * * Copyright (C) 2006 Ben Dooks <ben@simtec.co.uk> * Copyright (C) 2006 Simtec Electronics * * KS8695 - Debug macros * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define KS8695_UART_PA 0x03ffe000 #define KS8695_UART_VA 0xf00fe000 #define KS8695_URTH (0x04) #define KS8695_URLS (0x14) #define URLS_URTE (1 << 6) #define URLS_URTHRE (1 << 5) .macro addruart, rp, rv, tmp ldr \rp, =KS8695_UART_PA @ physical base address ldr \rv, =KS8695_UART_VA @ virtual base address .endm .macro senduart, rd, rx str \rd, [\rx, #KS8695_URTH] @ Write to Transmit Holding Register .endm .macro busyuart, rd, rx 1001: ldr \rd, [\rx, #KS8695_URLS] @ Read Line Status Register tst \rd, #URLS_URTE @ Holding & Shift registers empty? beq 1001b .endm .macro waituart, rd, rx 1001: ldr \rd, [\rx, #KS8695_URLS] @ Read Line Status Register tst \rd, #URLS_URTHRE @ Holding Register empty? beq 1001b .endm
AirFortressIlikara/LS2K0300-linux-4.19
1,314
arch/arm/include/debug/imx.S
/* arch/arm/mach-imx/include/mach/debug-macro.S * * Debugging macro include header * * Copyright (C) 1994-1999 Russell King * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <asm/assembler.h> #include "imx-uart.h" /* * FIXME: This is a copy of IMX_IO_P2V in hardware.h, and needs to * stay sync with that. It's hard to maintain, and should be fixed * globally for multi-platform build to use a fixed virtual address * for low-level debug uart port across platforms. */ #define IMX_IO_P2V(x) ( \ (((x) & 0x80000000) >> 7) | \ (0xf4000000 + \ (((x) & 0x50000000) >> 6) + \ (((x) & 0x0b000000) >> 4) + \ (((x) & 0x000fffff)))) #define UART_VADDR IMX_IO_P2V(UART_PADDR) .macro addruart, rp, rv, tmp ldr \rp, =UART_PADDR @ physical ldr \rv, =UART_VADDR @ virtual .endm .macro senduart,rd,rx ARM_BE8(rev \rd, \rd) str \rd, [\rx, #0x40] @ TXDATA .endm .macro waituart,rd,rx .endm .macro busyuart,rd,rx 1002: ldr \rd, [\rx, #0x98] @ SR2 ARM_BE8(rev \rd, \rd) tst \rd, #1 << 3 @ TXDC beq 1002b @ wait until transmit done .endm
AirFortressIlikara/LS2K0300-linux-4.19
2,200
arch/arm/include/debug/omap2plus.S
/* * Debugging macro include header * * Copyright (C) 1994-1999 Russell King * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/serial_reg.h> /* External port on Zoom2/3 */ #define ZOOM_UART_BASE 0x10000000 #define ZOOM_UART_VIRT 0xfa400000 #define OMAP_PORT_SHIFT 2 #define ZOOM_PORT_SHIFT 1 #define UART_OFFSET(addr) ((addr) & 0x00ffffff) .pushsection .data .align 2 omap_uart_phys: .word 0 omap_uart_virt: .word 0 omap_uart_lsr: .word 0 .popsection .macro addruart, rp, rv, tmp /* Use omap_uart_phys/virt if already configured */ 10: adr \rp, 99f @ get effective addr of 99f ldr \rv, [\rp] @ get absolute addr of 99f sub \rv, \rv, \rp @ offset between the two ldr \rp, [\rp, #4] @ abs addr of omap_uart_phys sub \tmp, \rp, \rv @ make it effective ldr \rp, [\tmp, #0] @ omap_uart_phys ldr \rv, [\tmp, #4] @ omap_uart_virt cmp \rp, #0 @ is port configured? cmpne \rv, #0 bne 100f @ already configured /* Configure the UART offset from the phys/virt base */ #ifdef CONFIG_DEBUG_ZOOM_UART ldr \rp, =ZOOM_UART_BASE str \rp, [\tmp, #0] @ omap_uart_phys ldr \rp, =ZOOM_UART_VIRT str \rp, [\tmp, #4] @ omap_uart_virt mov \rp, #(UART_LSR << ZOOM_PORT_SHIFT) str \rp, [\tmp, #8] @ omap_uart_lsr #endif b 10b .align 99: .word . .word omap_uart_phys .ltorg 100: /* Pass the UART_LSR reg address */ ldr \tmp, [\tmp, #8] @ omap_uart_lsr add \rp, \rp, \tmp add \rv, \rv, \tmp .endm .macro senduart,rd,rx orr \rd, \rd, \rx, lsl #24 @ preserve LSR reg offset bic \rx, \rx, #0xff @ get base (THR) reg address strb \rd, [\rx] @ send lower byte of rd orr \rx, \rx, \rd, lsr #24 @ restore original rx (LSR) bic \rd, \rd, #(0xff << 24) @ restore original rd .endm .macro busyuart,rd,rx 1001: ldrb \rd, [\rx] @ rx contains UART_LSR address and \rd, \rd, #(UART_LSR_TEMT | UART_LSR_THRE) teq \rd, #(UART_LSR_TEMT | UART_LSR_THRE) bne 1001b .endm .macro waituart,rd,rx .endm
AirFortressIlikara/LS2K0300-linux-4.19
1,151
arch/arm/include/debug/at91.S
/* * Copyright (C) 2003-2005 SAN People * * Debugging macro include header * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #define AT91_DBGU_SR (0x14) /* Status Register */ #define AT91_DBGU_THR (0x1c) /* Transmitter Holding Register */ #define AT91_DBGU_TXRDY (1 << 1) /* Transmitter Ready */ #define AT91_DBGU_TXEMPTY (1 << 9) /* Transmitter Empty */ .macro addruart, rp, rv, tmp ldr \rp, =CONFIG_DEBUG_UART_PHYS @ System peripherals (phys address) ldr \rv, =CONFIG_DEBUG_UART_VIRT @ System peripherals (virt address) .endm .macro senduart,rd,rx strb \rd, [\rx, #(AT91_DBGU_THR)] @ Write to Transmitter Holding Register .endm .macro waituart,rd,rx 1001: ldr \rd, [\rx, #(AT91_DBGU_SR)] @ Read Status Register tst \rd, #AT91_DBGU_TXRDY @ DBGU_TXRDY = 1 when ready to transmit beq 1001b .endm .macro busyuart,rd,rx 1001: ldr \rd, [\rx, #(AT91_DBGU_SR)] @ Read Status Register tst \rd, #AT91_DBGU_TXEMPTY @ DBGU_TXEMPTY = 1 when transmission complete beq 1001b .endm
AirFortressIlikara/LS2K0300-linux-4.19
1,769
arch/arm/include/debug/sti.S
/* * arch/arm/include/debug/sti.S * * Debugging macro include header * Copyright (C) 2013 STMicroelectronics (R&D) Limited. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define STIH41X_COMMS_BASE 0xfed00000 #define STIH41X_ASC2_BASE (STIH41X_COMMS_BASE+0x32000) #define STIH41X_SBC_LPM_BASE 0xfe400000 #define STIH41X_SBC_COMMS_BASE (STIH41X_SBC_LPM_BASE + 0x100000) #define STIH41X_SBC_ASC1_BASE (STIH41X_SBC_COMMS_BASE + 0x31000) #define VIRT_ADDRESS(x) (x - 0x1000000) #if IS_ENABLED(CONFIG_STIH41X_DEBUG_ASC2) #define DEBUG_LL_UART_BASE STIH41X_ASC2_BASE #endif #if IS_ENABLED(CONFIG_STIH41X_DEBUG_SBC_ASC1) #define DEBUG_LL_UART_BASE STIH41X_SBC_ASC1_BASE #endif #ifndef DEBUG_LL_UART_BASE #error "DEBUG UART is not Configured" #endif #define ASC_TX_BUF_OFF 0x04 #define ASC_CTRL_OFF 0x0c #define ASC_STA_OFF 0x14 #define ASC_STA_TX_FULL (1<<9) #define ASC_STA_TX_EMPTY (1<<1) .macro addruart, rp, rv, tmp ldr \rp, =DEBUG_LL_UART_BASE @ physical base ldr \rv, =VIRT_ADDRESS(DEBUG_LL_UART_BASE) @ virt base .endm .macro senduart,rd,rx strb \rd, [\rx, #ASC_TX_BUF_OFF] .endm .macro waituart,rd,rx 1001: ldr \rd, [\rx, #ASC_STA_OFF] tst \rd, #ASC_STA_TX_FULL bne 1001b .endm .macro busyuart,rd,rx 1001: ldr \rd, [\rx, #ASC_STA_OFF] tst \rd, #ASC_STA_TX_EMPTY beq 1001b .endm
AirFortressIlikara/LS2K0300-linux-4.19
1,472
arch/arm/include/debug/ux500.S
/* * Debugging macro include header * * Copyright (C) 2009 ST-Ericsson * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #if CONFIG_UX500_DEBUG_UART > 2 #error Invalid Ux500 debug UART #endif /* * DEBUG_LL only works if only one SOC is built in. We don't use #else below * in order to get "__UX500_UART redefined" warnings if more than one SOC is * built, so that there's some hint during the build that something is wrong. */ #ifdef CONFIG_UX500_SOC_DB8500 #define U8500_UART0_PHYS_BASE (0x80120000) #define U8500_UART1_PHYS_BASE (0x80121000) #define U8500_UART2_PHYS_BASE (0x80007000) #define U8500_UART0_VIRT_BASE (0xf8120000) #define U8500_UART1_VIRT_BASE (0xf8121000) #define U8500_UART2_VIRT_BASE (0xf8007000) #define __UX500_PHYS_UART(n) U8500_UART##n##_PHYS_BASE #define __UX500_VIRT_UART(n) U8500_UART##n##_VIRT_BASE #endif #if !defined(__UX500_PHYS_UART) || !defined(__UX500_VIRT_UART) #error Unknown SOC #endif #define UX500_PHYS_UART(n) __UX500_PHYS_UART(n) #define UX500_VIRT_UART(n) __UX500_VIRT_UART(n) #define UART_PHYS_BASE UX500_PHYS_UART(CONFIG_UX500_DEBUG_UART) #define UART_VIRT_BASE UX500_VIRT_UART(CONFIG_UX500_DEBUG_UART) .macro addruart, rp, rv, tmp ldr \rp, =UART_PHYS_BASE @ no, physical address ldr \rv, =UART_VIRT_BASE @ yes, virtual address .endm #include <debug/pl01x.S>
AirFortressIlikara/LS2K0300-linux-4.19
1,037
arch/arm/include/debug/s5pv210.S
/* * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. * http://www.samsung.com * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /* pull in the relevant register and map files. */ #define S3C_ADDR_BASE 0xF6000000 #define S3C_VA_UART S3C_ADDR_BASE + 0x01000000 #define S5PV210_PA_UART 0xe2900000 /* note, for the boot process to work we have to keep the UART * virtual address aligned to an 1MiB boundary for the L1 * mapping the head code makes. We keep the UART virtual address * aligned and add in the offset when we load the value here. */ .macro addruart, rp, rv, tmp ldr \rp, =S5PV210_PA_UART ldr \rv, =S3C_VA_UART #if CONFIG_DEBUG_S3C_UART != 0 add \rp, \rp, #(0x400 * CONFIG_DEBUG_S3C_UART) add \rv, \rv, #(0x400 * CONFIG_DEBUG_S3C_UART) #endif .endm #define fifo_full fifo_full_s5pv210 #define fifo_level fifo_level_s5pv210 #include <debug/samsung.S>
AirFortressIlikara/LS2K0300-linux-4.19
1,166
arch/arm/include/debug/exynos.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. * http://www.samsung.com */ /* pull in the relevant register and map files. */ #define S3C_ADDR_BASE 0xF6000000 #define S3C_VA_UART S3C_ADDR_BASE + 0x01000000 #define EXYNOS4_PA_UART 0x13800000 #define EXYNOS5_PA_UART 0x12C00000 /* note, for the boot process to work we have to keep the UART * virtual address aligned to an 1MiB boundary for the L1 * mapping the head code makes. We keep the UART virtual address * aligned and add in the offset when we load the value here. */ .macro addruart, rp, rv, tmp mrc p15, 0, \tmp, c0, c0, 0 and \tmp, \tmp, #0xf0 teq \tmp, #0xf0 @@ A15 beq 100f mrc p15, 0, \tmp, c0, c0, 5 and \tmp, \tmp, #0xf00 teq \tmp, #0x100 @@ A15 + A7 but boot to A7 100: ldreq \rp, =EXYNOS5_PA_UART movne \rp, #EXYNOS4_PA_UART @@ EXYNOS4 ldr \rv, =S3C_VA_UART #if CONFIG_DEBUG_S3C_UART != 0 add \rp, \rp, #(0x10000 * CONFIG_DEBUG_S3C_UART) add \rv, \rv, #(0x10000 * CONFIG_DEBUG_S3C_UART) #endif .endm #define fifo_full fifo_full_s5pv210 #define fifo_level fifo_level_s5pv210 #include <debug/samsung.S>
AirFortressIlikara/LS2K0300-linux-4.19
6,905
arch/arm/include/debug/tegra.S
/* * Copyright (C) 2010,2011 Google, Inc. * Copyright (C) 2011-2012 NVIDIA CORPORATION. All Rights Reserved. * * Author: * Colin Cross <ccross@google.com> * Erik Gilling <konkers@google.com> * Doug Anderson <dianders@chromium.org> * Stephen Warren <swarren@nvidia.com> * * Portions based on mach-omap2's debug-macro.S * Copyright (C) 1994-1999 Russell King * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/serial_reg.h> #define UART_SHIFT 2 /* Physical addresses */ #define TEGRA_CLK_RESET_BASE 0x60006000 #define TEGRA_APB_MISC_BASE 0x70000000 #define TEGRA_UARTA_BASE 0x70006000 #define TEGRA_UARTB_BASE 0x70006040 #define TEGRA_UARTC_BASE 0x70006200 #define TEGRA_UARTD_BASE 0x70006300 #define TEGRA_UARTE_BASE 0x70006400 #define TEGRA_PMC_BASE 0x7000e400 #define TEGRA_CLK_RST_DEVICES_L (TEGRA_CLK_RESET_BASE + 0x04) #define TEGRA_CLK_RST_DEVICES_H (TEGRA_CLK_RESET_BASE + 0x08) #define TEGRA_CLK_RST_DEVICES_U (TEGRA_CLK_RESET_BASE + 0x0c) #define TEGRA_CLK_OUT_ENB_L (TEGRA_CLK_RESET_BASE + 0x10) #define TEGRA_CLK_OUT_ENB_H (TEGRA_CLK_RESET_BASE + 0x14) #define TEGRA_CLK_OUT_ENB_U (TEGRA_CLK_RESET_BASE + 0x18) #define TEGRA_PMC_SCRATCH20 (TEGRA_PMC_BASE + 0xa0) #define TEGRA_APB_MISC_GP_HIDREV (TEGRA_APB_MISC_BASE + 0x804) /* * Must be section-aligned since a section mapping is used early on. * Must not overlap with regions in mach-tegra/io.c:tegra_io_desc[]. */ #define UART_VIRTUAL_BASE 0xfe800000 #define checkuart(rp, rv, lhu, bit, uart) \ /* Load address of CLK_RST register */ \ ldr rp, =TEGRA_CLK_RST_DEVICES_##lhu ; \ /* Load value from CLK_RST register */ \ ldr rp, [rp, #0] ; \ /* Test UART's reset bit */ \ tst rp, #(1 << bit) ; \ /* If set, can't use UART; jump to save no UART */ \ bne 90f ; \ /* Load address of CLK_OUT_ENB register */ \ ldr rp, =TEGRA_CLK_OUT_ENB_##lhu ; \ /* Load value from CLK_OUT_ENB register */ \ ldr rp, [rp, #0] ; \ /* Test UART's clock enable bit */ \ tst rp, #(1 << bit) ; \ /* If clear, can't use UART; jump to save no UART */ \ beq 90f ; \ /* Passed all tests, load address of UART registers */ \ ldr rp, =TEGRA_UART##uart##_BASE ; \ /* Jump to save UART address */ \ b 91f .macro addruart, rp, rv, tmp adr \rp, 99f @ actual addr of 99f ldr \rv, [\rp] @ linked addr is stored there sub \rv, \rv, \rp @ offset between the two ldr \rp, [\rp, #4] @ linked tegra_uart_config sub \tmp, \rp, \rv @ actual tegra_uart_config ldr \rp, [\tmp] @ Load tegra_uart_config cmp \rp, #1 @ needs initialization? bne 100f @ no; go load the addresses mov \rv, #0 @ yes; record init is done str \rv, [\tmp] #ifdef CONFIG_TEGRA_DEBUG_UART_AUTO_ODMDATA /* Check ODMDATA */ 10: ldr \rp, =TEGRA_PMC_SCRATCH20 ldr \rp, [\rp, #0] @ Load PMC_SCRATCH20 lsr \rv, \rp, #18 @ 19:18 are console type and \rv, \rv, #3 cmp \rv, #2 @ 2 and 3 mean DCC, UART beq 11f @ some boards swap the meaning cmp \rv, #3 @ so accept either bne 90f 11: lsr \rv, \rp, #15 @ 17:15 are UART ID and \rv, #7 cmp \rv, #0 @ UART 0? beq 20f cmp \rv, #1 @ UART 1? beq 21f cmp \rv, #2 @ UART 2? beq 22f cmp \rv, #3 @ UART 3? beq 23f cmp \rv, #4 @ UART 4? beq 24f b 90f @ invalid #endif #if defined(CONFIG_TEGRA_DEBUG_UARTA) || \ defined(CONFIG_TEGRA_DEBUG_UART_AUTO_ODMDATA) /* Check UART A validity */ 20: checkuart(\rp, \rv, L, 6, A) #endif #if defined(CONFIG_TEGRA_DEBUG_UARTB) || \ defined(CONFIG_TEGRA_DEBUG_UART_AUTO_ODMDATA) /* Check UART B validity */ 21: checkuart(\rp, \rv, L, 7, B) #endif #if defined(CONFIG_TEGRA_DEBUG_UARTC) || \ defined(CONFIG_TEGRA_DEBUG_UART_AUTO_ODMDATA) /* Check UART C validity */ 22: checkuart(\rp, \rv, H, 23, C) #endif #if defined(CONFIG_TEGRA_DEBUG_UARTD) || \ defined(CONFIG_TEGRA_DEBUG_UART_AUTO_ODMDATA) /* Check UART D validity */ 23: checkuart(\rp, \rv, U, 1, D) #endif #if defined(CONFIG_TEGRA_DEBUG_UARTE) || \ defined(CONFIG_TEGRA_DEBUG_UART_AUTO_ODMDATA) /* Check UART E validity */ 24: checkuart(\rp, \rv, U, 2, E) #endif /* No valid UART found */ 90: mov \rp, #0 /* fall through */ /* Record whichever UART we chose */ 91: str \rp, [\tmp, #4] @ Store in tegra_uart_phys cmp \rp, #0 @ Valid UART address? bne 92f @ Yes, go process it str \rp, [\tmp, #8] @ Store 0 in tegra_uart_virt b 100f @ Done 92: and \rv, \rp, #0xffffff @ offset within 1MB section add \rv, \rv, #UART_VIRTUAL_BASE str \rv, [\tmp, #8] @ Store in tegra_uart_virt b 100f .align 99: .word . .word tegra_uart_config .ltorg /* Load previously selected UART address */ 100: ldr \rp, [\tmp, #4] @ Load tegra_uart_phys ldr \rv, [\tmp, #8] @ Load tegra_uart_virt .endm /* * Code below is swiped from <asm/hardware/debug-8250.S>, but add an extra * check to make sure that the UART address is actually valid. */ .macro senduart, rd, rx cmp \rx, #0 strneb \rd, [\rx, #UART_TX << UART_SHIFT] 1001: .endm .macro busyuart, rd, rx cmp \rx, #0 beq 1002f 1001: ldrb \rd, [\rx, #UART_LSR << UART_SHIFT] and \rd, \rd, #UART_LSR_THRE teq \rd, #UART_LSR_THRE bne 1001b 1002: .endm .macro waituart, rd, rx #ifdef FLOW_CONTROL cmp \rx, #0 beq 1002f 1001: ldrb \rd, [\rx, #UART_MSR << UART_SHIFT] tst \rd, #UART_MSR_CTS beq 1001b 1002: #endif .endm /* * Storage for the state maintained by the macros above. * * In the kernel proper, this data is located in arch/arm/mach-tegra/tegra.c. * That's because this header is included from multiple files, and we only * want a single copy of the data. In particular, the UART probing code above * assumes it's running using physical addresses. This is true when this file * is included from head.o, but not when included from debug.o. So we need * to share the probe results between the two copies, rather than having * to re-run the probing again later. * * In the decompressor, we put the symbol/storage right here, since common.c * isn't included in the decompressor build. This symbol gets put in .text * even though it's really data, since .data is discarded from the * decompressor. Luckily, .text is writeable in the decompressor, unless * CONFIG_ZBOOT_ROM. That dependency is handled in arch/arm/Kconfig.debug. */ #if defined(ZIMAGE) tegra_uart_config: /* Debug UART initialization required */ .word 1 /* Debug UART physical address */ .word 0 /* Debug UART virtual address */ .word 0 #endif
AirFortressIlikara/LS2K0300-linux-4.19
1,362
arch/arm/include/debug/vexpress.S
/* arch/arm/mach-realview/include/mach/debug-macro.S * * Debugging macro include header * * Copyright (C) 1994-1999 Russell King * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define DEBUG_LL_PHYS_BASE 0x10000000 #define DEBUG_LL_UART_OFFSET 0x00009000 #define DEBUG_LL_PHYS_BASE_RS1 0x1c000000 #define DEBUG_LL_UART_OFFSET_RS1 0x00090000 #define DEBUG_LL_UART_PHYS_CRX 0xb0090000 #define DEBUG_LL_VIRT_BASE 0xf8000000 #if defined(CONFIG_DEBUG_VEXPRESS_UART0_DETECT) .macro addruart,rp,rv,tmp .arch armv7-a @ Make an educated guess regarding the memory map: @ - the original A9 core tile (based on ARM Cortex-A9 r0p1) @ should use UART at 0x10009000 @ - all other (RS1 complaint) tiles use UART mapped @ at 0x1c090000 mrc p15, 0, \rp, c0, c0, 0 movw \rv, #0xc091 movt \rv, #0x410f cmp \rp, \rv @ Original memory map moveq \rp, #DEBUG_LL_UART_OFFSET orreq \rv, \rp, #DEBUG_LL_VIRT_BASE orreq \rp, \rp, #DEBUG_LL_PHYS_BASE @ RS1 memory map movne \rp, #DEBUG_LL_UART_OFFSET_RS1 orrne \rv, \rp, #DEBUG_LL_VIRT_BASE orrne \rp, \rp, #DEBUG_LL_PHYS_BASE_RS1 .endm #include <debug/pl01x.S> #endif
AirFortressIlikara/LS2K0300-linux-4.19
5,927
arch/arm/include/asm/hardware/entry-macro-iomd.S
/* * arch/arm/include/asm/hardware/entry-macro-iomd.S * * Low-level IRQ helper macros for IOC/IOMD based platforms * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ /* IOC / IOMD based hardware */ #include <asm/hardware/iomd.h> .macro get_irqnr_and_base, irqnr, irqstat, base, tmp ldrb \irqstat, [\base, #IOMD_IRQREQB] @ get high priority first ldr \tmp, =irq_prio_h teq \irqstat, #0 #ifdef IOMD_BASE ldreqb \irqstat, [\base, #IOMD_DMAREQ] @ get dma addeq \tmp, \tmp, #256 @ irq_prio_h table size teqeq \irqstat, #0 bne 2406f #endif ldreqb \irqstat, [\base, #IOMD_IRQREQA] @ get low priority addeq \tmp, \tmp, #256 @ irq_prio_d table size teqeq \irqstat, #0 #ifdef IOMD_IRQREQC ldreqb \irqstat, [\base, #IOMD_IRQREQC] addeq \tmp, \tmp, #256 @ irq_prio_l table size teqeq \irqstat, #0 #endif #ifdef IOMD_IRQREQD ldreqb \irqstat, [\base, #IOMD_IRQREQD] addeq \tmp, \tmp, #256 @ irq_prio_lc table size teqeq \irqstat, #0 #endif 2406: ldrneb \irqnr, [\tmp, \irqstat] @ get IRQ number .endm /* * Interrupt table (incorporates priority). Please note that we * rely on the order of these tables (see above code). */ .align 5 irq_prio_h: .byte 0, 8, 9, 8,10,10,10,10,11,11,11,11,10,10,10,10 .byte 12, 8, 9, 8,10,10,10,10,11,11,11,11,10,10,10,10 .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10 .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10 .byte 14,14,14,14,10,10,10,10,11,11,11,11,10,10,10,10 .byte 14,14,14,14,10,10,10,10,11,11,11,11,10,10,10,10 .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10 .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10 .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10 .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10 .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10 .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10 .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10 .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10 .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10 .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10 #ifdef IOMD_BASE irq_prio_d: .byte 0,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16 .byte 20,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16 .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16 .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16 .byte 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16 .byte 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16 .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16 .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16 .byte 23,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16 .byte 23,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16 .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16 .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16 .byte 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16 .byte 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16 .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16 .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16 #endif irq_prio_l: .byte 0, 0, 1, 0, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3 .byte 4, 0, 1, 0, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3 .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 .byte 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3 .byte 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3 .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 #ifdef IOMD_IRQREQC irq_prio_lc: .byte 24,24,25,24,26,26,26,26,27,27,27,27,27,27,27,27 .byte 28,24,25,24,26,26,26,26,27,27,27,27,27,27,27,27 .byte 29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29 .byte 29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29 .byte 30,30,30,30,30,30,30,30,27,27,27,27,27,27,27,27 .byte 30,30,30,30,30,30,30,30,27,27,27,27,27,27,27,27 .byte 29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29 .byte 29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29 .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31 .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31 .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31 .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31 .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31 .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31 .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31 .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31 #endif #ifdef IOMD_IRQREQD irq_prio_ld: .byte 40,40,41,40,42,42,42,42,43,43,43,43,43,43,43,43 .byte 44,40,41,40,42,42,42,42,43,43,43,43,43,43,43,43 .byte 45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45 .byte 45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45 .byte 46,46,46,46,46,46,46,46,43,43,43,43,43,43,43,43 .byte 46,46,46,46,46,46,46,46,43,43,43,43,43,43,43,43 .byte 45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45 .byte 45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45 .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47 .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47 .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47 .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47 .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47 .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47 .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47 .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47 #endif
AirFortressIlikara/LS2K0300-linux-4.19
3,671
arch/arm64/xen/hypercall.S
/****************************************************************************** * hypercall.S * * Xen hypercall wrappers * * Stefano Stabellini <stefano.stabellini@eu.citrix.com>, Citrix, 2012 * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ /* * The Xen hypercall calling convention is very similar to the procedure * call standard for the ARM 64-bit architecture: the first parameter is * passed in x0, the second in x1, the third in x2, the fourth in x3 and * the fifth in x4. * * The hypercall number is passed in x16. * * The return value is in x0. * * The hvc ISS is required to be 0xEA1, that is the Xen specific ARM * hypercall tag. * * Parameter structs passed to hypercalls are laid out according to * the ARM 64-bit EABI standard. */ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/asm-uaccess.h> #include <xen/interface/xen.h> #define XEN_IMM 0xEA1 #define HYPERCALL_SIMPLE(hypercall) \ ENTRY(HYPERVISOR_##hypercall) \ mov x16, #__HYPERVISOR_##hypercall; \ hvc XEN_IMM; \ ret; \ ENDPROC(HYPERVISOR_##hypercall) #define HYPERCALL0 HYPERCALL_SIMPLE #define HYPERCALL1 HYPERCALL_SIMPLE #define HYPERCALL2 HYPERCALL_SIMPLE #define HYPERCALL3 HYPERCALL_SIMPLE #define HYPERCALL4 HYPERCALL_SIMPLE #define HYPERCALL5 HYPERCALL_SIMPLE .text HYPERCALL2(xen_version); HYPERCALL3(console_io); HYPERCALL3(grant_table_op); HYPERCALL2(sched_op); HYPERCALL2(event_channel_op); HYPERCALL2(hvm_op); HYPERCALL2(memory_op); HYPERCALL2(physdev_op); HYPERCALL3(vcpu_op); HYPERCALL1(tmem_op); HYPERCALL1(platform_op_raw); HYPERCALL2(multicall); HYPERCALL2(vm_assist); HYPERCALL3(dm_op); ENTRY(privcmd_call) mov x16, x0 mov x0, x1 mov x1, x2 mov x2, x3 mov x3, x4 mov x4, x5 /* * Privcmd calls are issued by the userspace. The kernel needs to * enable access to TTBR0_EL1 as the hypervisor would issue stage 1 * translations to user memory via AT instructions. Since AT * instructions are not affected by the PAN bit (ARMv8.1), we only * need the explicit uaccess_enable/disable if the TTBR0 PAN emulation * is enabled (it implies that hardware UAO and PAN disabled). */ uaccess_ttbr0_enable x6, x7, x8 hvc XEN_IMM /* * Disable userspace access from kernel once the hyp call completed. */ uaccess_ttbr0_disable x6, x7 ret ENDPROC(privcmd_call);
AirFortressIlikara/LS2K0300-linux-4.19
24,736
arch/arm64/kernel/head.S
/* * Low-level CPU initialisation * Based on arch/arm/kernel/head.S * * Copyright (C) 1994-2002 Russell King * Copyright (C) 2003-2012 ARM Ltd. * Authors: Catalin Marinas <catalin.marinas@arm.com> * Will Deacon <will.deacon@arm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/linkage.h> #include <linux/init.h> #include <linux/irqchip/arm-gic-v3.h> #include <asm/assembler.h> #include <asm/boot.h> #include <asm/ptrace.h> #include <asm/asm-offsets.h> #include <asm/cache.h> #include <asm/cputype.h> #include <asm/elf.h> #include <asm/kernel-pgtable.h> #include <asm/kvm_arm.h> #include <asm/memory.h> #include <asm/pgtable-hwdef.h> #include <asm/pgtable.h> #include <asm/page.h> #include <asm/smp.h> #include <asm/sysreg.h> #include <asm/thread_info.h> #include <asm/virt.h> #include "efi-header.S" #define __PHYS_OFFSET (KERNEL_START - TEXT_OFFSET) #if (TEXT_OFFSET & 0xfff) != 0 #error TEXT_OFFSET must be at least 4KB aligned #elif (PAGE_OFFSET & 0x1fffff) != 0 #error PAGE_OFFSET must be at least 2MB aligned #elif TEXT_OFFSET > 0x1fffff #error TEXT_OFFSET must be less than 2MB #endif /* * Kernel startup entry point. * --------------------------- * * The requirements are: * MMU = off, D-cache = off, I-cache = on or off, * x0 = physical address to the FDT blob. * * This code is mostly position independent so you call this at * __pa(PAGE_OFFSET + TEXT_OFFSET). * * Note that the callee-saved registers are used for storing variables * that are useful before the MMU is enabled. The allocations are described * in the entry routines. */ __HEAD _head: /* * DO NOT MODIFY. Image header expected by Linux boot-loaders. */ #ifdef CONFIG_EFI /* * This add instruction has no meaningful effect except that * its opcode forms the magic "MZ" signature required by UEFI. */ add x13, x18, #0x16 b stext #else b stext // branch to kernel start, magic .long 0 // reserved #endif le64sym _kernel_offset_le // Image load offset from start of RAM, little-endian le64sym _kernel_size_le // Effective size of kernel image, little-endian le64sym _kernel_flags_le // Informative flags, little-endian .quad 0 // reserved .quad 0 // reserved .quad 0 // reserved .ascii "ARM\x64" // Magic number #ifdef CONFIG_EFI .long pe_header - _head // Offset to the PE header. pe_header: __EFI_PE_HEADER #else .long 0 // reserved #endif __INIT /* * The following callee saved general purpose registers are used on the * primary lowlevel boot path: * * Register Scope Purpose * x21 stext() .. start_kernel() FDT pointer passed at boot in x0 * x23 stext() .. start_kernel() physical misalignment/KASLR offset * x28 __create_page_tables() callee preserved temp register * x19/x20 __primary_switch() callee preserved temp registers */ ENTRY(stext) bl preserve_boot_args bl el2_setup // Drop to EL1, w0=cpu_boot_mode adrp x23, __PHYS_OFFSET and x23, x23, MIN_KIMG_ALIGN - 1 // KASLR offset, defaults to 0 bl set_cpu_boot_mode_flag bl __create_page_tables /* * The following calls CPU setup code, see arch/arm64/mm/proc.S for * details. * On return, the CPU will be ready for the MMU to be turned on and * the TCR will have been set. */ bl __cpu_setup // initialise processor b __primary_switch ENDPROC(stext) /* * Preserve the arguments passed by the bootloader in x0 .. x3 */ preserve_boot_args: mov x21, x0 // x21=FDT adr_l x0, boot_args // record the contents of stp x21, x1, [x0] // x0 .. x3 at kernel entry stp x2, x3, [x0, #16] dmb sy // needed before dc ivac with // MMU off mov x1, #0x20 // 4 x 8 bytes b __inval_dcache_area // tail call ENDPROC(preserve_boot_args) /* * Macro to create a table entry to the next page. * * tbl: page table address * virt: virtual address * shift: #imm page table shift * ptrs: #imm pointers per table page * * Preserves: virt * Corrupts: ptrs, tmp1, tmp2 * Returns: tbl -> next level table page address */ .macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2 add \tmp1, \tbl, #PAGE_SIZE phys_to_pte \tmp2, \tmp1 orr \tmp2, \tmp2, #PMD_TYPE_TABLE // address of next table and entry type lsr \tmp1, \virt, #\shift sub \ptrs, \ptrs, #1 and \tmp1, \tmp1, \ptrs // table index str \tmp2, [\tbl, \tmp1, lsl #3] add \tbl, \tbl, #PAGE_SIZE // next level table page .endm /* * Macro to populate page table entries, these entries can be pointers to the next level * or last level entries pointing to physical memory. * * tbl: page table address * rtbl: pointer to page table or physical memory * index: start index to write * eindex: end index to write - [index, eindex] written to * flags: flags for pagetable entry to or in * inc: increment to rtbl between each entry * tmp1: temporary variable * * Preserves: tbl, eindex, flags, inc * Corrupts: index, tmp1 * Returns: rtbl */ .macro populate_entries, tbl, rtbl, index, eindex, flags, inc, tmp1 .Lpe\@: phys_to_pte \tmp1, \rtbl orr \tmp1, \tmp1, \flags // tmp1 = table entry str \tmp1, [\tbl, \index, lsl #3] add \rtbl, \rtbl, \inc // rtbl = pa next level add \index, \index, #1 cmp \index, \eindex b.ls .Lpe\@ .endm /* * Compute indices of table entries from virtual address range. If multiple entries * were needed in the previous page table level then the next page table level is assumed * to be composed of multiple pages. (This effectively scales the end index). * * vstart: virtual address of start of range * vend: virtual address of end of range * shift: shift used to transform virtual address into index * ptrs: number of entries in page table * istart: index in table corresponding to vstart * iend: index in table corresponding to vend * count: On entry: how many extra entries were required in previous level, scales * our end index. * On exit: returns how many extra entries required for next page table level * * Preserves: vstart, vend, shift, ptrs * Returns: istart, iend, count */ .macro compute_indices, vstart, vend, shift, ptrs, istart, iend, count lsr \iend, \vend, \shift mov \istart, \ptrs sub \istart, \istart, #1 and \iend, \iend, \istart // iend = (vend >> shift) & (ptrs - 1) mov \istart, \ptrs mul \istart, \istart, \count add \iend, \iend, \istart // iend += (count - 1) * ptrs // our entries span multiple tables lsr \istart, \vstart, \shift mov \count, \ptrs sub \count, \count, #1 and \istart, \istart, \count sub \count, \iend, \istart .endm /* * Map memory for specified virtual address range. Each level of page table needed supports * multiple entries. If a level requires n entries the next page table level is assumed to be * formed from n pages. * * tbl: location of page table * rtbl: address to be used for first level page table entry (typically tbl + PAGE_SIZE) * vstart: start address to map * vend: end address to map - we map [vstart, vend] * flags: flags to use to map last level entries * phys: physical address corresponding to vstart - physical memory is contiguous * pgds: the number of pgd entries * * Temporaries: istart, iend, tmp, count, sv - these need to be different registers * Preserves: vstart, vend, flags * Corrupts: tbl, rtbl, istart, iend, tmp, count, sv */ .macro map_memory, tbl, rtbl, vstart, vend, flags, phys, pgds, istart, iend, tmp, count, sv add \rtbl, \tbl, #PAGE_SIZE mov \sv, \rtbl mov \count, #0 compute_indices \vstart, \vend, #PGDIR_SHIFT, \pgds, \istart, \iend, \count populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp mov \tbl, \sv mov \sv, \rtbl #if SWAPPER_PGTABLE_LEVELS > 3 compute_indices \vstart, \vend, #PUD_SHIFT, #PTRS_PER_PUD, \istart, \iend, \count populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp mov \tbl, \sv mov \sv, \rtbl #endif #if SWAPPER_PGTABLE_LEVELS > 2 compute_indices \vstart, \vend, #SWAPPER_TABLE_SHIFT, #PTRS_PER_PMD, \istart, \iend, \count populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp mov \tbl, \sv #endif compute_indices \vstart, \vend, #SWAPPER_BLOCK_SHIFT, #PTRS_PER_PTE, \istart, \iend, \count bic \count, \phys, #SWAPPER_BLOCK_SIZE - 1 populate_entries \tbl, \count, \istart, \iend, \flags, #SWAPPER_BLOCK_SIZE, \tmp .endm /* * Setup the initial page tables. We only setup the barest amount which is * required to get the kernel running. The following sections are required: * - identity mapping to enable the MMU (low address, TTBR0) * - first few MB of the kernel linear mapping to jump to once the MMU has * been enabled */ __create_page_tables: mov x28, lr /* * Invalidate the idmap and swapper page tables to avoid potential * dirty cache lines being evicted. */ adrp x0, idmap_pg_dir adrp x1, swapper_pg_end sub x1, x1, x0 bl __inval_dcache_area /* * Clear the idmap and swapper page tables. */ adrp x0, idmap_pg_dir adrp x1, swapper_pg_end sub x1, x1, x0 1: stp xzr, xzr, [x0], #16 stp xzr, xzr, [x0], #16 stp xzr, xzr, [x0], #16 stp xzr, xzr, [x0], #16 subs x1, x1, #64 b.ne 1b mov x7, SWAPPER_MM_MMUFLAGS /* * Create the identity mapping. */ adrp x0, idmap_pg_dir adrp x3, __idmap_text_start // __pa(__idmap_text_start) /* * VA_BITS may be too small to allow for an ID mapping to be created * that covers system RAM if that is located sufficiently high in the * physical address space. So for the ID map, use an extended virtual * range in that case, and configure an additional translation level * if needed. * * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the * entire ID map region can be mapped. As T0SZ == (64 - #bits used), * this number conveniently equals the number of leading zeroes in * the physical address of __idmap_text_end. */ adrp x5, __idmap_text_end clz x5, x5 cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough? b.ge 1f // .. then skip VA range extension adr_l x6, idmap_t0sz str x5, [x6] dmb sy dc ivac, x6 // Invalidate potentially stale cache line #if (VA_BITS < 48) #define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3) #define EXTRA_PTRS (1 << (PHYS_MASK_SHIFT - EXTRA_SHIFT)) /* * If VA_BITS < 48, we have to configure an additional table level. * First, we have to verify our assumption that the current value of * VA_BITS was chosen such that all translation levels are fully * utilised, and that lowering T0SZ will always result in an additional * translation level to be configured. */ #if VA_BITS != EXTRA_SHIFT #error "Mismatch between VA_BITS and page size/number of translation levels" #endif mov x4, EXTRA_PTRS create_table_entry x0, x3, EXTRA_SHIFT, x4, x5, x6 #else /* * If VA_BITS == 48, we don't have to configure an additional * translation level, but the top-level table has more entries. */ mov x4, #1 << (PHYS_MASK_SHIFT - PGDIR_SHIFT) str_l x4, idmap_ptrs_per_pgd, x5 #endif 1: ldr_l x4, idmap_ptrs_per_pgd mov x5, x3 // __pa(__idmap_text_start) adr_l x6, __idmap_text_end // __pa(__idmap_text_end) map_memory x0, x1, x3, x6, x7, x3, x4, x10, x11, x12, x13, x14 /* * Map the kernel image (starting with PHYS_OFFSET). */ adrp x0, swapper_pg_dir mov_q x5, KIMAGE_VADDR + TEXT_OFFSET // compile time __va(_text) add x5, x5, x23 // add KASLR displacement mov x4, PTRS_PER_PGD adrp x6, _end // runtime __pa(_end) adrp x3, _text // runtime __pa(_text) sub x6, x6, x3 // _end - _text add x6, x6, x5 // runtime __va(_end) map_memory x0, x1, x5, x6, x7, x3, x4, x10, x11, x12, x13, x14 /* * Since the page tables have been populated with non-cacheable * accesses (MMU disabled), invalidate the idmap and swapper page * tables again to remove any speculatively loaded cache lines. */ adrp x0, idmap_pg_dir adrp x1, swapper_pg_end sub x1, x1, x0 dmb sy bl __inval_dcache_area ret x28 ENDPROC(__create_page_tables) .ltorg /* * The following fragment of code is executed with the MMU enabled. * * x0 = __PHYS_OFFSET */ __primary_switched: adrp x4, init_thread_union add sp, x4, #THREAD_SIZE adr_l x5, init_task msr sp_el0, x5 // Save thread_info adr_l x8, vectors // load VBAR_EL1 with virtual msr vbar_el1, x8 // vector table address isb stp xzr, x30, [sp, #-16]! mov x29, sp str_l x21, __fdt_pointer, x5 // Save FDT pointer ldr_l x4, kimage_vaddr // Save the offset between sub x4, x4, x0 // the kernel virtual and str_l x4, kimage_voffset, x5 // physical mappings // Clear BSS adr_l x0, __bss_start mov x1, xzr adr_l x2, __bss_stop sub x2, x2, x0 bl __pi_memset dsb ishst // Make zero page visible to PTW #ifdef CONFIG_KASAN bl kasan_early_init #endif #ifdef CONFIG_RANDOMIZE_BASE tst x23, ~(MIN_KIMG_ALIGN - 1) // already running randomized? b.ne 0f mov x0, x21 // pass FDT address in x0 bl kaslr_early_init // parse FDT for KASLR options cbz x0, 0f // KASLR disabled? just proceed orr x23, x23, x0 // record KASLR offset ldp x29, x30, [sp], #16 // we must enable KASLR, return ret // to __primary_switch() 0: #endif add sp, sp, #16 mov x29, #0 mov x30, #0 b start_kernel ENDPROC(__primary_switched) /* * end early head section, begin head code that is also used for * hotplug and needs to have the same protections as the text region */ .section ".idmap.text","awx" ENTRY(kimage_vaddr) .quad _text - TEXT_OFFSET /* * If we're fortunate enough to boot at EL2, ensure that the world is * sane before dropping to EL1. * * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in w0 if * booted in EL1 or EL2 respectively. */ ENTRY(el2_setup) msr SPsel, #1 // We want to use SP_EL{1,2} mrs x0, CurrentEL cmp x0, #CurrentEL_EL2 b.eq 1f mov_q x0, (SCTLR_EL1_RES1 | ENDIAN_SET_EL1) msr sctlr_el1, x0 mov w0, #BOOT_CPU_MODE_EL1 // This cpu booted in EL1 isb ret 1: mov_q x0, (SCTLR_EL2_RES1 | ENDIAN_SET_EL2) msr sctlr_el2, x0 #ifdef CONFIG_ARM64_VHE /* * Check for VHE being present. For the rest of the EL2 setup, * x2 being non-zero indicates that we do have VHE, and that the * kernel is intended to run at EL2. */ mrs x2, id_aa64mmfr1_el1 ubfx x2, x2, #8, #4 #else mov x2, xzr #endif /* Hyp configuration. */ mov_q x0, HCR_HOST_NVHE_FLAGS cbz x2, set_hcr mov_q x0, HCR_HOST_VHE_FLAGS set_hcr: msr hcr_el2, x0 isb /* * Allow Non-secure EL1 and EL0 to access physical timer and counter. * This is not necessary for VHE, since the host kernel runs in EL2, * and EL0 accesses are configured in the later stage of boot process. * Note that when HCR_EL2.E2H == 1, CNTHCTL_EL2 has the same bit layout * as CNTKCTL_EL1, and CNTKCTL_EL1 accessing instructions are redefined * to access CNTHCTL_EL2. This allows the kernel designed to run at EL1 * to transparently mess with the EL0 bits via CNTKCTL_EL1 access in * EL2. */ cbnz x2, 1f mrs x0, cnthctl_el2 orr x0, x0, #3 // Enable EL1 physical timers msr cnthctl_el2, x0 1: msr cntvoff_el2, xzr // Clear virtual offset #ifdef CONFIG_ARM_GIC_V3 /* GICv3 system register access */ mrs x0, id_aa64pfr0_el1 ubfx x0, x0, #24, #4 cbz x0, 3f mrs_s x0, SYS_ICC_SRE_EL2 orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1 orr x0, x0, #ICC_SRE_EL2_ENABLE // Set ICC_SRE_EL2.Enable==1 msr_s SYS_ICC_SRE_EL2, x0 isb // Make sure SRE is now set mrs_s x0, SYS_ICC_SRE_EL2 // Read SRE back, tbz x0, #0, 3f // and check that it sticks msr_s SYS_ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults 3: #endif /* Populate ID registers. */ mrs x0, midr_el1 mrs x1, mpidr_el1 msr vpidr_el2, x0 msr vmpidr_el2, x1 #ifdef CONFIG_COMPAT msr hstr_el2, xzr // Disable CP15 traps to EL2 #endif /* EL2 debug */ mrs x1, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer sbfx x0, x1, #8, #4 cmp x0, #1 b.lt 4f // Skip if no PMU present mrs x0, pmcr_el0 // Disable debug access traps ubfx x0, x0, #11, #5 // to EL2 and allow access to 4: csel x3, xzr, x0, lt // all PMU counters from EL1 /* Statistical profiling */ ubfx x0, x1, #32, #4 // Check ID_AA64DFR0_EL1 PMSVer cbz x0, 7f // Skip if SPE not present cbnz x2, 6f // VHE? mrs_s x4, SYS_PMBIDR_EL1 // If SPE available at EL2, and x4, x4, #(1 << SYS_PMBIDR_EL1_P_SHIFT) cbnz x4, 5f // then permit sampling of physical mov x4, #(1 << SYS_PMSCR_EL2_PCT_SHIFT | \ 1 << SYS_PMSCR_EL2_PA_SHIFT) msr_s SYS_PMSCR_EL2, x4 // addresses and physical counter 5: mov x1, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT) orr x3, x3, x1 // If we don't have VHE, then b 7f // use EL1&0 translation. 6: // For VHE, use EL2 translation orr x3, x3, #MDCR_EL2_TPMS // and disable access from EL1 7: msr mdcr_el2, x3 // Configure debug traps /* LORegions */ mrs x1, id_aa64mmfr1_el1 ubfx x0, x1, #ID_AA64MMFR1_LOR_SHIFT, 4 cbz x0, 1f msr_s SYS_LORC_EL1, xzr 1: /* Stage-2 translation */ msr vttbr_el2, xzr cbz x2, install_el2_stub mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2 isb ret install_el2_stub: /* * When VHE is not in use, early init of EL2 and EL1 needs to be * done here. * When VHE _is_ in use, EL1 will not be used in the host and * requires no configuration, and all non-hyp-specific EL2 setup * will be done via the _EL1 system register aliases in __cpu_setup. */ mov_q x0, (SCTLR_EL1_RES1 | ENDIAN_SET_EL1) msr sctlr_el1, x0 /* Coprocessor traps. */ mov x0, #0x33ff msr cptr_el2, x0 // Disable copro. traps to EL2 /* SVE register access */ mrs x1, id_aa64pfr0_el1 ubfx x1, x1, #ID_AA64PFR0_SVE_SHIFT, #4 cbz x1, 7f bic x0, x0, #CPTR_EL2_TZ // Also disable SVE traps msr cptr_el2, x0 // Disable copro. traps to EL2 isb mov x1, #ZCR_ELx_LEN_MASK // SVE: Enable full vector msr_s SYS_ZCR_EL2, x1 // length for EL1. /* Hypervisor stub */ 7: adr_l x0, __hyp_stub_vectors msr vbar_el2, x0 /* spsr */ mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ PSR_MODE_EL1h) msr spsr_el2, x0 msr elr_el2, lr mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2 eret ENDPROC(el2_setup) /* * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed * in w0. See arch/arm64/include/asm/virt.h for more info. */ set_cpu_boot_mode_flag: adr_l x1, __boot_cpu_mode cmp w0, #BOOT_CPU_MODE_EL2 b.ne 1f add x1, x1, #4 1: str w0, [x1] // This CPU has booted in EL1 dmb sy dc ivac, x1 // Invalidate potentially stale cache line ret ENDPROC(set_cpu_boot_mode_flag) /* * These values are written with the MMU off, but read with the MMU on. * Writers will invalidate the corresponding address, discarding up to a * 'Cache Writeback Granule' (CWG) worth of data. The linker script ensures * sufficient alignment that the CWG doesn't overlap another section. */ .pushsection ".mmuoff.data.write", "aw" /* * We need to find out the CPU boot mode long after boot, so we need to * store it in a writable variable. * * This is not in .bss, because we set it sufficiently early that the boot-time * zeroing of .bss would clobber it. */ ENTRY(__boot_cpu_mode) .long BOOT_CPU_MODE_EL2 .long BOOT_CPU_MODE_EL1 /* * The booting CPU updates the failed status @__early_cpu_boot_status, * with MMU turned off. */ ENTRY(__early_cpu_boot_status) .quad 0 .popsection /* * This provides a "holding pen" for platforms to hold all secondary * cores are held until we're ready for them to initialise. */ ENTRY(secondary_holding_pen) bl el2_setup // Drop to EL1, w0=cpu_boot_mode bl set_cpu_boot_mode_flag mrs x0, mpidr_el1 mov_q x1, MPIDR_HWID_BITMASK and x0, x0, x1 adr_l x3, secondary_holding_pen_release pen: ldr x4, [x3] cmp x4, x0 b.eq secondary_startup wfe b pen ENDPROC(secondary_holding_pen) /* * Secondary entry point that jumps straight into the kernel. Only to * be used where CPUs are brought online dynamically by the kernel. */ ENTRY(secondary_entry) bl el2_setup // Drop to EL1 bl set_cpu_boot_mode_flag b secondary_startup ENDPROC(secondary_entry) secondary_startup: /* * Common entry point for secondary CPUs. */ bl __cpu_secondary_check52bitva bl __cpu_setup // initialise processor bl __enable_mmu ldr x8, =__secondary_switched br x8 ENDPROC(secondary_startup) __secondary_switched: adr_l x5, vectors msr vbar_el1, x5 isb adr_l x0, secondary_data ldr x1, [x0, #CPU_BOOT_STACK] // get secondary_data.stack mov sp, x1 ldr x2, [x0, #CPU_BOOT_TASK] msr sp_el0, x2 mov x29, #0 mov x30, #0 b secondary_start_kernel ENDPROC(__secondary_switched) /* * The booting CPU updates the failed status @__early_cpu_boot_status, * with MMU turned off. * * update_early_cpu_boot_status tmp, status * - Corrupts tmp1, tmp2 * - Writes 'status' to __early_cpu_boot_status and makes sure * it is committed to memory. */ .macro update_early_cpu_boot_status status, tmp1, tmp2 mov \tmp2, #\status adr_l \tmp1, __early_cpu_boot_status str \tmp2, [\tmp1] dmb sy dc ivac, \tmp1 // Invalidate potentially stale cache line .endm /* * Enable the MMU. * * x0 = SCTLR_EL1 value for turning on the MMU. * * Returns to the caller via x30/lr. This requires the caller to be covered * by the .idmap.text section. * * Checks if the selected granule size is supported by the CPU. * If it isn't, park the CPU */ ENTRY(__enable_mmu) mrs x1, ID_AA64MMFR0_EL1 ubfx x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4 cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED b.ne __no_granule_support update_early_cpu_boot_status 0, x1, x2 adrp x1, idmap_pg_dir adrp x2, swapper_pg_dir phys_to_ttbr x3, x1 phys_to_ttbr x4, x2 msr ttbr0_el1, x3 // load TTBR0 msr ttbr1_el1, x4 // load TTBR1 isb msr sctlr_el1, x0 isb /* * Invalidate the local I-cache so that any instructions fetched * speculatively from the PoC are discarded, since they may have * been dynamically patched at the PoU. */ ic iallu dsb nsh isb ret ENDPROC(__enable_mmu) ENTRY(__cpu_secondary_check52bitva) #ifdef CONFIG_ARM64_52BIT_VA ldr_l x0, vabits_user cmp x0, #52 b.ne 2f mrs_s x0, SYS_ID_AA64MMFR2_EL1 and x0, x0, #(0xf << ID_AA64MMFR2_LVA_SHIFT) cbnz x0, 2f adr_l x0, va52mismatch mov w1, #1 strb w1, [x0] dmb sy dc ivac, x0 // Invalidate potentially stale cache line update_early_cpu_boot_status CPU_STUCK_IN_KERNEL, x0, x1 1: wfe wfi b 1b #endif 2: ret ENDPROC(__cpu_secondary_check52bitva) __no_granule_support: /* Indicate that this CPU can't boot and is stuck in the kernel */ update_early_cpu_boot_status CPU_STUCK_IN_KERNEL, x1, x2 1: wfe wfi b 1b ENDPROC(__no_granule_support) #ifdef CONFIG_RELOCATABLE __relocate_kernel: /* * Iterate over each entry in the relocation table, and apply the * relocations in place. */ ldr w9, =__rela_offset // offset to reloc table ldr w10, =__rela_size // size of reloc table mov_q x11, KIMAGE_VADDR // default virtual offset add x11, x11, x23 // actual virtual offset add x9, x9, x11 // __va(.rela) add x10, x9, x10 // __va(.rela) + sizeof(.rela) 0: cmp x9, x10 b.hs 1f ldp x11, x12, [x9], #24 ldr x13, [x9, #-8] cmp w12, #R_AARCH64_RELATIVE b.ne 0b add x13, x13, x23 // relocate str x13, [x11, x23] b 0b 1: ret ENDPROC(__relocate_kernel) #endif __primary_switch: #ifdef CONFIG_RANDOMIZE_BASE mov x19, x0 // preserve new SCTLR_EL1 value mrs x20, sctlr_el1 // preserve old SCTLR_EL1 value #endif bl __enable_mmu #ifdef CONFIG_RELOCATABLE bl __relocate_kernel #ifdef CONFIG_RANDOMIZE_BASE ldr x8, =__primary_switched adrp x0, __PHYS_OFFSET blr x8 /* * If we return here, we have a KASLR displacement in x23 which we need * to take into account by discarding the current kernel mapping and * creating a new one. */ pre_disable_mmu_workaround msr sctlr_el1, x20 // disable the MMU isb bl __create_page_tables // recreate kernel mapping tlbi vmalle1 // Remove any stale TLB entries dsb nsh isb msr sctlr_el1, x19 // re-enable the MMU isb ic iallu // flush instructions fetched dsb nsh // via old mapping isb bl __relocate_kernel #endif #endif ldr x8, =__primary_switched adrp x0, __PHYS_OFFSET br x8 ENDPROC(__primary_switch)
AirFortressIlikara/LS2K0300-linux-4.19
4,905
arch/arm64/kernel/efi-header.S
/* * Copyright (C) 2013 - 2017 Linaro, Ltd. * Copyright (C) 2013, 2014 Red Hat, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/pe.h> #include <linux/sizes.h> .macro __EFI_PE_HEADER .long PE_MAGIC coff_header: .short IMAGE_FILE_MACHINE_ARM64 // Machine .short section_count // NumberOfSections .long 0 // TimeDateStamp .long 0 // PointerToSymbolTable .long 0 // NumberOfSymbols .short section_table - optional_header // SizeOfOptionalHeader .short IMAGE_FILE_DEBUG_STRIPPED | \ IMAGE_FILE_EXECUTABLE_IMAGE | \ IMAGE_FILE_LINE_NUMS_STRIPPED // Characteristics optional_header: .short PE_OPT_MAGIC_PE32PLUS // PE32+ format .byte 0x02 // MajorLinkerVersion .byte 0x14 // MinorLinkerVersion .long __initdata_begin - efi_header_end // SizeOfCode .long __pecoff_data_size // SizeOfInitializedData .long 0 // SizeOfUninitializedData .long __efistub_entry - _head // AddressOfEntryPoint .long efi_header_end - _head // BaseOfCode extra_header_fields: .quad 0 // ImageBase .long SZ_4K // SectionAlignment .long PECOFF_FILE_ALIGNMENT // FileAlignment .short 0 // MajorOperatingSystemVersion .short 0 // MinorOperatingSystemVersion .short 0 // MajorImageVersion .short 0 // MinorImageVersion .short 0 // MajorSubsystemVersion .short 0 // MinorSubsystemVersion .long 0 // Win32VersionValue .long _end - _head // SizeOfImage // Everything before the kernel image is considered part of the header .long efi_header_end - _head // SizeOfHeaders .long 0 // CheckSum .short IMAGE_SUBSYSTEM_EFI_APPLICATION // Subsystem .short 0 // DllCharacteristics .quad 0 // SizeOfStackReserve .quad 0 // SizeOfStackCommit .quad 0 // SizeOfHeapReserve .quad 0 // SizeOfHeapCommit .long 0 // LoaderFlags .long (section_table - .) / 8 // NumberOfRvaAndSizes .quad 0 // ExportTable .quad 0 // ImportTable .quad 0 // ResourceTable .quad 0 // ExceptionTable .quad 0 // CertificationTable .quad 0 // BaseRelocationTable #ifdef CONFIG_DEBUG_EFI .long efi_debug_table - _head // DebugTable .long efi_debug_table_size #endif // Section table section_table: .ascii ".text\0\0\0" .long __initdata_begin - efi_header_end // VirtualSize .long efi_header_end - _head // VirtualAddress .long __initdata_begin - efi_header_end // SizeOfRawData .long efi_header_end - _head // PointerToRawData .long 0 // PointerToRelocations .long 0 // PointerToLineNumbers .short 0 // NumberOfRelocations .short 0 // NumberOfLineNumbers .long IMAGE_SCN_CNT_CODE | \ IMAGE_SCN_MEM_READ | \ IMAGE_SCN_MEM_EXECUTE // Characteristics .ascii ".data\0\0\0" .long __pecoff_data_size // VirtualSize .long __initdata_begin - _head // VirtualAddress .long __pecoff_data_rawsize // SizeOfRawData .long __initdata_begin - _head // PointerToRawData .long 0 // PointerToRelocations .long 0 // PointerToLineNumbers .short 0 // NumberOfRelocations .short 0 // NumberOfLineNumbers .long IMAGE_SCN_CNT_INITIALIZED_DATA | \ IMAGE_SCN_MEM_READ | \ IMAGE_SCN_MEM_WRITE // Characteristics .set section_count, (. - section_table) / 40 #ifdef CONFIG_DEBUG_EFI /* * The debug table is referenced via its Relative Virtual Address (RVA), * which is only defined for those parts of the image that are covered * by a section declaration. Since this header is not covered by any * section, the debug table must be emitted elsewhere. So stick it in * the .init.rodata section instead. * * Note that the EFI debug entry itself may legally have a zero RVA, * which means we can simply put it right after the section headers. */ __INITRODATA .align 2 efi_debug_table: // EFI_IMAGE_DEBUG_DIRECTORY_ENTRY .long 0 // Characteristics .long 0 // TimeDateStamp .short 0 // MajorVersion .short 0 // MinorVersion .long IMAGE_DEBUG_TYPE_CODEVIEW // Type .long efi_debug_entry_size // SizeOfData .long 0 // RVA .long efi_debug_entry - _head // FileOffset .set efi_debug_table_size, . - efi_debug_table .previous efi_debug_entry: // EFI_IMAGE_DEBUG_CODEVIEW_NB10_ENTRY .ascii "NB10" // Signature .long 0 // Unknown .long 0 // Unknown2 .long 0 // Unknown3 .asciz VMLINUX_PATH .set efi_debug_entry_size, . - efi_debug_entry #endif /* * EFI will load .text onwards at the 4k section alignment * described in the PE/COFF header. To ensure that instruction * sequences using an adrp and a :lo12: immediate will function * correctly at this alignment, we must ensure that .text is * placed at a 4k boundary in the Image to begin with. */ .align 12 efi_header_end: .endm
AirFortressIlikara/LS2K0300-linux-4.19
29,246
arch/arm64/kernel/entry.S
/* * Low-level exception handling code * * Copyright (C) 2012 ARM Ltd. * Authors: Catalin Marinas <catalin.marinas@arm.com> * Will Deacon <will.deacon@arm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/arm-smccc.h> #include <linux/init.h> #include <linux/linkage.h> #include <asm/alternative.h> #include <asm/assembler.h> #include <asm/asm-offsets.h> #include <asm/cpufeature.h> #include <asm/errno.h> #include <asm/esr.h> #include <asm/irq.h> #include <asm/memory.h> #include <asm/mmu.h> #include <asm/processor.h> #include <asm/ptrace.h> #include <asm/thread_info.h> #include <asm/asm-uaccess.h> #include <asm/unistd.h> /* * Context tracking subsystem. Used to instrument transitions * between user and kernel mode. */ .macro ct_user_exit #ifdef CONFIG_CONTEXT_TRACKING bl context_tracking_user_exit #endif .endm .macro ct_user_enter #ifdef CONFIG_CONTEXT_TRACKING bl context_tracking_user_enter #endif .endm .macro clear_gp_regs .irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29 mov x\n, xzr .endr .endm /* * Bad Abort numbers *----------------- */ #define BAD_SYNC 0 #define BAD_IRQ 1 #define BAD_FIQ 2 #define BAD_ERROR 3 .macro kernel_ventry, el, label, regsize = 64 .align 7 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 alternative_if ARM64_UNMAP_KERNEL_AT_EL0 .if \el == 0 .if \regsize == 64 mrs x30, tpidrro_el0 msr tpidrro_el0, xzr .else mov x30, xzr .endif .endif alternative_else_nop_endif #endif sub sp, sp, #S_FRAME_SIZE #ifdef CONFIG_VMAP_STACK /* * Test whether the SP has overflowed, without corrupting a GPR. * Task and IRQ stacks are aligned to (1 << THREAD_SHIFT). */ add sp, sp, x0 // sp' = sp + x0 sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp tbnz x0, #THREAD_SHIFT, 0f sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0 sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp b el\()\el\()_\label 0: /* * Either we've just detected an overflow, or we've taken an exception * while on the overflow stack. Either way, we won't return to * userspace, and can clobber EL0 registers to free up GPRs. */ /* Stash the original SP (minus S_FRAME_SIZE) in tpidr_el0. */ msr tpidr_el0, x0 /* Recover the original x0 value and stash it in tpidrro_el0 */ sub x0, sp, x0 msr tpidrro_el0, x0 /* Switch to the overflow stack */ adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0 /* * Check whether we were already on the overflow stack. This may happen * after panic() re-enables interrupts. */ mrs x0, tpidr_el0 // sp of interrupted context sub x0, sp, x0 // delta with top of overflow stack tst x0, #~(OVERFLOW_STACK_SIZE - 1) // within range? b.ne __bad_stack // no? -> bad stack pointer /* We were already on the overflow stack. Restore sp/x0 and carry on. */ sub sp, sp, x0 mrs x0, tpidrro_el0 #endif b el\()\el\()_\label .endm .macro tramp_alias, dst, sym mov_q \dst, TRAMP_VALIAS add \dst, \dst, #(\sym - .entry.tramp.text) .endm // This macro corrupts x0-x3. It is the caller's duty // to save/restore them if required. .macro apply_ssbd, state, tmp1, tmp2 #ifdef CONFIG_ARM64_SSBD alternative_cb arm64_enable_wa2_handling b .L__asm_ssbd_skip\@ alternative_cb_end ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1 cbz \tmp2, .L__asm_ssbd_skip\@ ldr \tmp2, [tsk, #TSK_TI_FLAGS] tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2 mov w1, #\state alternative_cb arm64_update_smccc_conduit nop // Patched to SMC/HVC #0 alternative_cb_end .L__asm_ssbd_skip\@: #endif .endm .macro kernel_entry, el, regsize = 64 .if \regsize == 32 mov w0, w0 // zero upper 32 bits of x0 .endif stp x0, x1, [sp, #16 * 0] stp x2, x3, [sp, #16 * 1] stp x4, x5, [sp, #16 * 2] stp x6, x7, [sp, #16 * 3] stp x8, x9, [sp, #16 * 4] stp x10, x11, [sp, #16 * 5] stp x12, x13, [sp, #16 * 6] stp x14, x15, [sp, #16 * 7] stp x16, x17, [sp, #16 * 8] stp x18, x19, [sp, #16 * 9] stp x20, x21, [sp, #16 * 10] stp x22, x23, [sp, #16 * 11] stp x24, x25, [sp, #16 * 12] stp x26, x27, [sp, #16 * 13] stp x28, x29, [sp, #16 * 14] .if \el == 0 clear_gp_regs mrs x21, sp_el0 ldr_this_cpu tsk, __entry_task, x20 // Ensure MDSCR_EL1.SS is clear, ldr x19, [tsk, #TSK_TI_FLAGS] // since we can unmask debug disable_step_tsk x19, x20 // exceptions when scheduling. apply_ssbd 1, x22, x23 .else add x21, sp, #S_FRAME_SIZE get_thread_info tsk /* Save the task's original addr_limit and set USER_DS */ ldr x20, [tsk, #TSK_TI_ADDR_LIMIT] str x20, [sp, #S_ORIG_ADDR_LIMIT] mov x20, #USER_DS str x20, [tsk, #TSK_TI_ADDR_LIMIT] /* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */ .endif /* \el == 0 */ mrs x22, elr_el1 mrs x23, spsr_el1 stp lr, x21, [sp, #S_LR] /* * In order to be able to dump the contents of struct pt_regs at the * time the exception was taken (in case we attempt to walk the call * stack later), chain it together with the stack frames. */ .if \el == 0 stp xzr, xzr, [sp, #S_STACKFRAME] .else stp x29, x22, [sp, #S_STACKFRAME] .endif add x29, sp, #S_STACKFRAME #ifdef CONFIG_ARM64_SW_TTBR0_PAN /* * Set the TTBR0 PAN bit in SPSR. When the exception is taken from * EL0, there is no need to check the state of TTBR0_EL1 since * accesses are always enabled. * Note that the meaning of this bit differs from the ARMv8.1 PAN * feature as all TTBR0_EL1 accesses are disabled, not just those to * user mappings. */ alternative_if ARM64_HAS_PAN b 1f // skip TTBR0 PAN alternative_else_nop_endif .if \el != 0 mrs x21, ttbr0_el1 tst x21, #TTBR_ASID_MASK // Check for the reserved ASID orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR b.eq 1f // TTBR0 access already disabled and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR .endif __uaccess_ttbr0_disable x21 1: #endif stp x22, x23, [sp, #S_PC] /* Not in a syscall by default (el0_svc overwrites for real syscall) */ .if \el == 0 mov w21, #NO_SYSCALL str w21, [sp, #S_SYSCALLNO] .endif /* * Set sp_el0 to current thread_info. */ .if \el == 0 msr sp_el0, tsk .endif /* * Registers that may be useful after this macro is invoked: * * x21 - aborted SP * x22 - aborted PC * x23 - aborted PSTATE */ .endm .macro kernel_exit, el .if \el != 0 disable_daif /* Restore the task's original addr_limit. */ ldr x20, [sp, #S_ORIG_ADDR_LIMIT] str x20, [tsk, #TSK_TI_ADDR_LIMIT] /* No need to restore UAO, it will be restored from SPSR_EL1 */ .endif ldp x21, x22, [sp, #S_PC] // load ELR, SPSR .if \el == 0 ct_user_enter .endif #ifdef CONFIG_ARM64_SW_TTBR0_PAN /* * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR * PAN bit checking. */ alternative_if ARM64_HAS_PAN b 2f // skip TTBR0 PAN alternative_else_nop_endif .if \el != 0 tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set .endif __uaccess_ttbr0_enable x0, x1 .if \el == 0 /* * Enable errata workarounds only if returning to user. The only * workaround currently required for TTBR0_EL1 changes are for the * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache * corruption). */ bl post_ttbr_update_workaround .endif 1: .if \el != 0 and x22, x22, #~PSR_PAN_BIT // ARMv8.0 CPUs do not understand this bit .endif 2: #endif .if \el == 0 ldr x23, [sp, #S_SP] // load return stack pointer msr sp_el0, x23 tst x22, #PSR_MODE32_BIT // native task? b.eq 3f #ifdef CONFIG_ARM64_ERRATUM_845719 alternative_if ARM64_WORKAROUND_845719 #ifdef CONFIG_PID_IN_CONTEXTIDR mrs x29, contextidr_el1 msr contextidr_el1, x29 #else msr contextidr_el1, xzr #endif alternative_else_nop_endif #endif 3: apply_ssbd 0, x0, x1 .endif msr elr_el1, x21 // set up the return data msr spsr_el1, x22 ldp x0, x1, [sp, #16 * 0] ldp x2, x3, [sp, #16 * 1] ldp x4, x5, [sp, #16 * 2] ldp x6, x7, [sp, #16 * 3] ldp x8, x9, [sp, #16 * 4] ldp x10, x11, [sp, #16 * 5] ldp x12, x13, [sp, #16 * 6] ldp x14, x15, [sp, #16 * 7] ldp x16, x17, [sp, #16 * 8] ldp x18, x19, [sp, #16 * 9] ldp x20, x21, [sp, #16 * 10] ldp x22, x23, [sp, #16 * 11] ldp x24, x25, [sp, #16 * 12] ldp x26, x27, [sp, #16 * 13] ldp x28, x29, [sp, #16 * 14] ldr lr, [sp, #S_LR] add sp, sp, #S_FRAME_SIZE // restore sp /* * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on eret context synchronization * when returning from IPI handler, and when returning to user-space. */ .if \el == 0 alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 bne 4f msr far_el1, x30 tramp_alias x30, tramp_exit_native br x30 4: tramp_alias x30, tramp_exit_compat br x30 #endif .else eret .endif .endm .macro irq_stack_entry mov x19, sp // preserve the original sp /* * Compare sp with the base of the task stack. * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack, * and should switch to the irq stack. */ ldr x25, [tsk, TSK_STACK] eor x25, x25, x19 and x25, x25, #~(THREAD_SIZE - 1) cbnz x25, 9998f ldr_this_cpu x25, irq_stack_ptr, x26 mov x26, #IRQ_STACK_SIZE add x26, x25, x26 /* switch to the irq stack */ mov sp, x26 9998: .endm /* * x19 should be preserved between irq_stack_entry and * irq_stack_exit. */ .macro irq_stack_exit mov sp, x19 .endm /* * These are the registers used in the syscall handler, and allow us to * have in theory up to 7 arguments to a function - x0 to x6. * * x7 is reserved for the system call number in 32-bit mode. */ wsc_nr .req w25 // number of system calls xsc_nr .req x25 // number of system calls (zero-extended) wscno .req w26 // syscall number xscno .req x26 // syscall number (zero-extended) stbl .req x27 // syscall table pointer tsk .req x28 // current thread_info /* * Interrupt handling. */ .macro irq_handler ldr_l x1, handle_arch_irq mov x0, sp irq_stack_entry blr x1 irq_stack_exit .endm .text /* * Exception vectors. */ .pushsection ".entry.text", "ax" .align 11 ENTRY(vectors) kernel_ventry 1, sync_invalid // Synchronous EL1t kernel_ventry 1, irq_invalid // IRQ EL1t kernel_ventry 1, fiq_invalid // FIQ EL1t kernel_ventry 1, error_invalid // Error EL1t kernel_ventry 1, sync // Synchronous EL1h kernel_ventry 1, irq // IRQ EL1h kernel_ventry 1, fiq_invalid // FIQ EL1h kernel_ventry 1, error // Error EL1h kernel_ventry 0, sync // Synchronous 64-bit EL0 kernel_ventry 0, irq // IRQ 64-bit EL0 kernel_ventry 0, fiq_invalid // FIQ 64-bit EL0 kernel_ventry 0, error // Error 64-bit EL0 #ifdef CONFIG_COMPAT kernel_ventry 0, sync_compat, 32 // Synchronous 32-bit EL0 kernel_ventry 0, irq_compat, 32 // IRQ 32-bit EL0 kernel_ventry 0, fiq_invalid_compat, 32 // FIQ 32-bit EL0 kernel_ventry 0, error_compat, 32 // Error 32-bit EL0 #else kernel_ventry 0, sync_invalid, 32 // Synchronous 32-bit EL0 kernel_ventry 0, irq_invalid, 32 // IRQ 32-bit EL0 kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0 kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0 #endif END(vectors) #ifdef CONFIG_VMAP_STACK /* * We detected an overflow in kernel_ventry, which switched to the * overflow stack. Stash the exception regs, and head to our overflow * handler. */ __bad_stack: /* Restore the original x0 value */ mrs x0, tpidrro_el0 /* * Store the original GPRs to the new stack. The orginal SP (minus * S_FRAME_SIZE) was stashed in tpidr_el0 by kernel_ventry. */ sub sp, sp, #S_FRAME_SIZE kernel_entry 1 mrs x0, tpidr_el0 add x0, x0, #S_FRAME_SIZE str x0, [sp, #S_SP] /* Stash the regs for handle_bad_stack */ mov x0, sp /* Time to die */ bl handle_bad_stack ASM_BUG() #endif /* CONFIG_VMAP_STACK */ /* * Invalid mode handlers */ .macro inv_entry, el, reason, regsize = 64 kernel_entry \el, \regsize mov x0, sp mov x1, #\reason mrs x2, esr_el1 bl bad_mode ASM_BUG() .endm el0_sync_invalid: inv_entry 0, BAD_SYNC ENDPROC(el0_sync_invalid) el0_irq_invalid: inv_entry 0, BAD_IRQ ENDPROC(el0_irq_invalid) el0_fiq_invalid: inv_entry 0, BAD_FIQ ENDPROC(el0_fiq_invalid) el0_error_invalid: inv_entry 0, BAD_ERROR ENDPROC(el0_error_invalid) #ifdef CONFIG_COMPAT el0_fiq_invalid_compat: inv_entry 0, BAD_FIQ, 32 ENDPROC(el0_fiq_invalid_compat) #endif el1_sync_invalid: inv_entry 1, BAD_SYNC ENDPROC(el1_sync_invalid) el1_irq_invalid: inv_entry 1, BAD_IRQ ENDPROC(el1_irq_invalid) el1_fiq_invalid: inv_entry 1, BAD_FIQ ENDPROC(el1_fiq_invalid) el1_error_invalid: inv_entry 1, BAD_ERROR ENDPROC(el1_error_invalid) /* * EL1 mode handlers. */ .align 6 el1_sync: kernel_entry 1 mrs x1, esr_el1 // read the syndrome register lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1 b.eq el1_da cmp x24, #ESR_ELx_EC_IABT_CUR // instruction abort in EL1 b.eq el1_ia cmp x24, #ESR_ELx_EC_SYS64 // configurable trap b.eq el1_undef cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception b.eq el1_sp_pc cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception b.eq el1_sp_pc cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL1 b.eq el1_undef cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1 b.ge el1_dbg b el1_inv el1_ia: /* * Fall through to the Data abort case */ el1_da: /* * Data abort handling */ mrs x3, far_el1 inherit_daif pstate=x23, tmp=x2 clear_address_tag x0, x3 mov x2, sp // struct pt_regs bl do_mem_abort kernel_exit 1 el1_sp_pc: /* * Stack or PC alignment exception handling */ mrs x0, far_el1 inherit_daif pstate=x23, tmp=x2 mov x2, sp bl do_sp_pc_abort ASM_BUG() el1_undef: /* * Undefined instruction */ inherit_daif pstate=x23, tmp=x2 mov x0, sp bl do_undefinstr kernel_exit 1 el1_dbg: /* * Debug exception handling */ cmp x24, #ESR_ELx_EC_BRK64 // if BRK64 cinc x24, x24, eq // set bit '0' tbz x24, #0, el1_inv // EL1 only mrs x0, far_el1 mov x2, sp // struct pt_regs bl do_debug_exception kernel_exit 1 el1_inv: // TODO: add support for undefined instructions in kernel mode inherit_daif pstate=x23, tmp=x2 mov x0, sp mov x2, x1 mov x1, #BAD_SYNC bl bad_mode ASM_BUG() ENDPROC(el1_sync) .align 6 el1_irq: kernel_entry 1 enable_da_f #ifdef CONFIG_TRACE_IRQFLAGS bl trace_hardirqs_off #endif irq_handler #ifdef CONFIG_PREEMPT ldr w24, [tsk, #TSK_TI_PREEMPT] // get preempt count cbnz w24, 1f // preempt count != 0 ldr x0, [tsk, #TSK_TI_FLAGS] // get flags tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling? bl el1_preempt 1: #endif #ifdef CONFIG_TRACE_IRQFLAGS bl trace_hardirqs_on #endif kernel_exit 1 ENDPROC(el1_irq) #ifdef CONFIG_PREEMPT el1_preempt: mov x24, lr 1: bl preempt_schedule_irq // irq en/disable is done inside ldr x0, [tsk, #TSK_TI_FLAGS] // get new tasks TI_FLAGS tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling? ret x24 #endif /* * EL0 mode handlers. */ .align 6 el0_sync: kernel_entry 0 mrs x25, esr_el1 // read the syndrome register lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class cmp x24, #ESR_ELx_EC_SVC64 // SVC in 64-bit state b.eq el0_svc cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0 b.eq el0_da cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0 b.eq el0_ia cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access b.eq el0_fpsimd_acc cmp x24, #ESR_ELx_EC_SVE // SVE access b.eq el0_sve_acc cmp x24, #ESR_ELx_EC_FP_EXC64 // FP/ASIMD exception b.eq el0_fpsimd_exc cmp x24, #ESR_ELx_EC_SYS64 // configurable trap b.eq el0_sys cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception b.eq el0_sp_pc cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception b.eq el0_sp_pc cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0 b.eq el0_undef cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0 b.ge el0_dbg b el0_inv #ifdef CONFIG_COMPAT .align 6 el0_sync_compat: kernel_entry 0, 32 mrs x25, esr_el1 // read the syndrome register lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class cmp x24, #ESR_ELx_EC_SVC32 // SVC in 32-bit state b.eq el0_svc_compat cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0 b.eq el0_da cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0 b.eq el0_ia cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access b.eq el0_fpsimd_acc cmp x24, #ESR_ELx_EC_FP_EXC32 // FP/ASIMD exception b.eq el0_fpsimd_exc cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception b.eq el0_sp_pc cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0 b.eq el0_undef cmp x24, #ESR_ELx_EC_CP15_32 // CP15 MRC/MCR trap b.eq el0_undef cmp x24, #ESR_ELx_EC_CP15_64 // CP15 MRRC/MCRR trap b.eq el0_undef cmp x24, #ESR_ELx_EC_CP14_MR // CP14 MRC/MCR trap b.eq el0_undef cmp x24, #ESR_ELx_EC_CP14_LS // CP14 LDC/STC trap b.eq el0_undef cmp x24, #ESR_ELx_EC_CP14_64 // CP14 MRRC/MCRR trap b.eq el0_undef cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0 b.ge el0_dbg b el0_inv el0_svc_compat: mov x0, sp bl el0_svc_compat_handler b ret_to_user .align 6 el0_irq_compat: kernel_entry 0, 32 b el0_irq_naked el0_error_compat: kernel_entry 0, 32 b el0_error_naked #endif el0_da: /* * Data abort handling */ mrs x26, far_el1 enable_daif ct_user_exit clear_address_tag x0, x26 mov x1, x25 mov x2, sp bl do_mem_abort b ret_to_user el0_ia: /* * Instruction abort handling */ mrs x26, far_el1 enable_da_f #ifdef CONFIG_TRACE_IRQFLAGS bl trace_hardirqs_off #endif ct_user_exit mov x0, x26 mov x1, x25 mov x2, sp bl do_el0_ia_bp_hardening b ret_to_user el0_fpsimd_acc: /* * Floating Point or Advanced SIMD access */ enable_daif ct_user_exit mov x0, x25 mov x1, sp bl do_fpsimd_acc b ret_to_user el0_sve_acc: /* * Scalable Vector Extension access */ enable_daif ct_user_exit mov x0, x25 mov x1, sp bl do_sve_acc b ret_to_user el0_fpsimd_exc: /* * Floating Point, Advanced SIMD or SVE exception */ enable_daif ct_user_exit mov x0, x25 mov x1, sp bl do_fpsimd_exc b ret_to_user el0_sp_pc: /* * Stack or PC alignment exception handling */ mrs x26, far_el1 enable_da_f #ifdef CONFIG_TRACE_IRQFLAGS bl trace_hardirqs_off #endif ct_user_exit mov x0, x26 mov x1, x25 mov x2, sp bl do_sp_pc_abort b ret_to_user el0_undef: /* * Undefined instruction */ enable_daif ct_user_exit mov x0, sp bl do_undefinstr b ret_to_user el0_sys: /* * System instructions, for trapped cache maintenance instructions */ enable_daif ct_user_exit mov x0, x25 mov x1, sp bl do_sysinstr b ret_to_user el0_dbg: /* * Debug exception handling */ tbnz x24, #0, el0_inv // EL0 only mrs x0, far_el1 mov x1, x25 mov x2, sp bl do_debug_exception enable_da_f ct_user_exit b ret_to_user el0_inv: enable_daif ct_user_exit mov x0, sp mov x1, #BAD_SYNC mov x2, x25 bl bad_el0_sync b ret_to_user ENDPROC(el0_sync) .align 6 el0_irq: kernel_entry 0 el0_irq_naked: enable_da_f #ifdef CONFIG_TRACE_IRQFLAGS bl trace_hardirqs_off #endif ct_user_exit #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR tbz x22, #55, 1f bl do_el0_irq_bp_hardening 1: #endif irq_handler #ifdef CONFIG_TRACE_IRQFLAGS bl trace_hardirqs_on #endif b ret_to_user ENDPROC(el0_irq) el1_error: kernel_entry 1 mrs x1, esr_el1 enable_dbg mov x0, sp bl do_serror kernel_exit 1 ENDPROC(el1_error) el0_error: kernel_entry 0 el0_error_naked: mrs x1, esr_el1 enable_dbg mov x0, sp bl do_serror enable_da_f ct_user_exit b ret_to_user ENDPROC(el0_error) /* * Ok, we need to do extra processing, enter the slow path. */ work_pending: mov x0, sp // 'regs' bl do_notify_resume #ifdef CONFIG_TRACE_IRQFLAGS bl trace_hardirqs_on // enabled while in userspace #endif ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for single-step b finish_ret_to_user /* * "slow" syscall return path. */ ret_to_user: disable_daif ldr x1, [tsk, #TSK_TI_FLAGS] and x2, x1, #_TIF_WORK_MASK cbnz x2, work_pending finish_ret_to_user: enable_step_tsk x1, x2 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK bl stackleak_erase #endif kernel_exit 0 ENDPROC(ret_to_user) /* * SVC handler. */ .align 6 el0_svc: mov x0, sp bl el0_svc_handler b ret_to_user ENDPROC(el0_svc) .popsection // .entry.text #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 /* * Exception vectors trampoline. */ .pushsection ".entry.tramp.text", "ax" .macro tramp_map_kernel, tmp mrs \tmp, ttbr1_el1 add \tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE) bic \tmp, \tmp, #USER_ASID_FLAG msr ttbr1_el1, \tmp #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003 /* ASID already in \tmp[63:48] */ movk \tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12) movk \tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12) /* 2MB boundary containing the vectors, so we nobble the walk cache */ movk \tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12) isb tlbi vae1, \tmp dsb nsh alternative_else_nop_endif #endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */ .endm .macro tramp_unmap_kernel, tmp mrs \tmp, ttbr1_el1 sub \tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE) orr \tmp, \tmp, #USER_ASID_FLAG msr ttbr1_el1, \tmp /* * We avoid running the post_ttbr_update_workaround here because * it's only needed by Cavium ThunderX, which requires KPTI to be * disabled. */ .endm .macro tramp_ventry, regsize = 64 .align 7 1: .if \regsize == 64 msr tpidrro_el0, x30 // Restored in kernel_ventry .endif /* * Defend against branch aliasing attacks by pushing a dummy * entry onto the return stack and using a RET instruction to * enter the full-fat kernel vectors. */ bl 2f b . 2: tramp_map_kernel x30 #ifdef CONFIG_RANDOMIZE_BASE adr x30, tramp_vectors + PAGE_SIZE alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003 ldr x30, [x30] #else ldr x30, =vectors #endif prfm plil1strm, [x30, #(1b - tramp_vectors)] msr vbar_el1, x30 add x30, x30, #(1b - tramp_vectors) isb ret .endm .macro tramp_exit, regsize = 64 adr x30, tramp_vectors msr vbar_el1, x30 tramp_unmap_kernel x30 .if \regsize == 64 mrs x30, far_el1 .endif eret .endm .align 11 ENTRY(tramp_vectors) .space 0x400 tramp_ventry tramp_ventry tramp_ventry tramp_ventry tramp_ventry 32 tramp_ventry 32 tramp_ventry 32 tramp_ventry 32 END(tramp_vectors) ENTRY(tramp_exit_native) tramp_exit END(tramp_exit_native) ENTRY(tramp_exit_compat) tramp_exit 32 END(tramp_exit_compat) .ltorg .popsection // .entry.tramp.text #ifdef CONFIG_RANDOMIZE_BASE .pushsection ".rodata", "a" .align PAGE_SHIFT .globl __entry_tramp_data_start __entry_tramp_data_start: .quad vectors .popsection // .rodata #endif /* CONFIG_RANDOMIZE_BASE */ #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ /* * Register switch for AArch64. The callee-saved registers need to be saved * and restored. On entry: * x0 = previous task_struct (must be preserved across the switch) * x1 = next task_struct * Previous and next are guaranteed not to be the same. * */ ENTRY(cpu_switch_to) mov x10, #THREAD_CPU_CONTEXT add x8, x0, x10 mov x9, sp stp x19, x20, [x8], #16 // store callee-saved registers stp x21, x22, [x8], #16 stp x23, x24, [x8], #16 stp x25, x26, [x8], #16 stp x27, x28, [x8], #16 stp x29, x9, [x8], #16 str lr, [x8] add x8, x1, x10 ldp x19, x20, [x8], #16 // restore callee-saved registers ldp x21, x22, [x8], #16 ldp x23, x24, [x8], #16 ldp x25, x26, [x8], #16 ldp x27, x28, [x8], #16 ldp x29, x9, [x8], #16 ldr lr, [x8] mov sp, x9 msr sp_el0, x1 ret ENDPROC(cpu_switch_to) NOKPROBE(cpu_switch_to) /* * This is how we return from a fork. */ ENTRY(ret_from_fork) bl schedule_tail cbz x19, 1f // not a kernel thread mov x0, x20 blr x19 1: get_thread_info tsk b ret_to_user ENDPROC(ret_from_fork) NOKPROBE(ret_from_fork) #ifdef CONFIG_ARM_SDE_INTERFACE #include <asm/sdei.h> #include <uapi/linux/arm_sdei.h> .macro sdei_handler_exit exit_mode /* On success, this call never returns... */ cmp \exit_mode, #SDEI_EXIT_SMC b.ne 99f smc #0 b . 99: hvc #0 b . .endm #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 /* * The regular SDEI entry point may have been unmapped along with the rest of * the kernel. This trampoline restores the kernel mapping to make the x1 memory * argument accessible. * * This clobbers x4, __sdei_handler() will restore this from firmware's * copy. */ .ltorg .pushsection ".entry.tramp.text", "ax" ENTRY(__sdei_asm_entry_trampoline) mrs x4, ttbr1_el1 tbz x4, #USER_ASID_BIT, 1f tramp_map_kernel tmp=x4 isb mov x4, xzr /* * Use reg->interrupted_regs.addr_limit to remember whether to unmap * the kernel on exit. */ 1: str x4, [x1, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)] #ifdef CONFIG_RANDOMIZE_BASE adr x4, tramp_vectors + PAGE_SIZE add x4, x4, #:lo12:__sdei_asm_trampoline_next_handler ldr x4, [x4] #else ldr x4, =__sdei_asm_handler #endif br x4 ENDPROC(__sdei_asm_entry_trampoline) NOKPROBE(__sdei_asm_entry_trampoline) /* * Make the exit call and restore the original ttbr1_el1 * * x0 & x1: setup for the exit API call * x2: exit_mode * x4: struct sdei_registered_event argument from registration time. */ ENTRY(__sdei_asm_exit_trampoline) ldr x4, [x4, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)] cbnz x4, 1f tramp_unmap_kernel tmp=x4 1: sdei_handler_exit exit_mode=x2 ENDPROC(__sdei_asm_exit_trampoline) NOKPROBE(__sdei_asm_exit_trampoline) .ltorg .popsection // .entry.tramp.text #ifdef CONFIG_RANDOMIZE_BASE .pushsection ".rodata", "a" __sdei_asm_trampoline_next_handler: .quad __sdei_asm_handler .popsection // .rodata #endif /* CONFIG_RANDOMIZE_BASE */ #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ /* * Software Delegated Exception entry point. * * x0: Event number * x1: struct sdei_registered_event argument from registration time. * x2: interrupted PC * x3: interrupted PSTATE * x4: maybe clobbered by the trampoline * * Firmware has preserved x0->x17 for us, we must save/restore the rest to * follow SMC-CC. We save (or retrieve) all the registers as the handler may * want them. */ ENTRY(__sdei_asm_handler) stp x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC] stp x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2] stp x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3] stp x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4] stp x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5] stp x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6] stp x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7] stp x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8] stp x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9] stp x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10] stp x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11] stp x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12] stp x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13] stp x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14] mov x4, sp stp lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR] mov x19, x1 #ifdef CONFIG_VMAP_STACK /* * entry.S may have been using sp as a scratch register, find whether * this is a normal or critical event and switch to the appropriate * stack for this CPU. */ ldrb w4, [x19, #SDEI_EVENT_PRIORITY] cbnz w4, 1f ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6 b 2f 1: ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x6 2: mov x6, #SDEI_STACK_SIZE add x5, x5, x6 mov sp, x5 #endif /* * We may have interrupted userspace, or a guest, or exit-from or * return-to either of these. We can't trust sp_el0, restore it. */ mrs x28, sp_el0 ldr_this_cpu dst=x0, sym=__entry_task, tmp=x1 msr sp_el0, x0 /* If we interrupted the kernel point to the previous stack/frame. */ and x0, x3, #0xc mrs x1, CurrentEL cmp x0, x1 csel x29, x29, xzr, eq // fp, or zero csel x4, x2, xzr, eq // elr, or zero stp x29, x4, [sp, #-16]! mov x29, sp add x0, x19, #SDEI_EVENT_INTREGS mov x1, x19 bl __sdei_handler msr sp_el0, x28 /* restore regs >x17 that we clobbered */ mov x4, x19 // keep x4 for __sdei_asm_exit_trampoline ldp x28, x29, [x4, #SDEI_EVENT_INTREGS + 16 * 14] ldp x18, x19, [x4, #SDEI_EVENT_INTREGS + 16 * 9] ldp lr, x1, [x4, #SDEI_EVENT_INTREGS + S_LR] mov sp, x1 mov x1, x0 // address to complete_and_resume /* x0 = (x0 <= 1) ? EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME */ cmp x0, #1 mov_q x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE mov_q x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME csel x0, x2, x3, ls ldr_l x2, sdei_exit_mode alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0 sdei_handler_exit exit_mode=x2 alternative_else_nop_endif #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline br x5 #endif ENDPROC(__sdei_asm_handler) NOKPROBE(__sdei_asm_handler) #endif /* CONFIG_ARM_SDE_INTERFACE */
AirFortressIlikara/LS2K0300-linux-4.19
1,373
arch/arm64/kernel/entry-fpsimd.S
/* * FP/SIMD state saving and restoring * * Copyright (C) 2012 ARM Ltd. * Author: Catalin Marinas <catalin.marinas@arm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/fpsimdmacros.h> /* * Save the FP registers. * * x0 - pointer to struct fpsimd_state */ ENTRY(fpsimd_save_state) fpsimd_save x0, 8 ret ENDPROC(fpsimd_save_state) /* * Load the FP registers. * * x0 - pointer to struct fpsimd_state */ ENTRY(fpsimd_load_state) fpsimd_restore x0, 8 ret ENDPROC(fpsimd_load_state) #ifdef CONFIG_ARM64_SVE ENTRY(sve_save_state) sve_save 0, x1, 2 ret ENDPROC(sve_save_state) ENTRY(sve_load_state) sve_load 0, x1, x2, 3, x4 ret ENDPROC(sve_load_state) ENTRY(sve_get_vl) _sve_rdvl 0, 1 ret ENDPROC(sve_get_vl) #endif /* CONFIG_ARM64_SVE */
AirFortressIlikara/LS2K0300-linux-4.19
6,129
arch/arm64/kernel/entry-ftrace.S
/* * arch/arm64/kernel/entry-ftrace.S * * Copyright (C) 2013 Linaro Limited * Author: AKASHI Takahiro <takahiro.akashi@linaro.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/ftrace.h> #include <asm/insn.h> /* * Gcc with -pg will put the following code in the beginning of each function: * mov x0, x30 * bl _mcount * [function's body ...] * "bl _mcount" may be replaced to "bl ftrace_caller" or NOP if dynamic * ftrace is enabled. * * Please note that x0 as an argument will not be used here because we can * get lr(x30) of instrumented function at any time by winding up call stack * as long as the kernel is compiled without -fomit-frame-pointer. * (or CONFIG_FRAME_POINTER, this is forced on arm64) * * stack layout after mcount_enter in _mcount(): * * current sp/fp => 0:+-----+ * in _mcount() | x29 | -> instrumented function's fp * +-----+ * | x30 | -> _mcount()'s lr (= instrumented function's pc) * old sp => +16:+-----+ * when instrumented | | * function calls | ... | * _mcount() | | * | | * instrumented => +xx:+-----+ * function's fp | x29 | -> parent's fp * +-----+ * | x30 | -> instrumented function's lr (= parent's pc) * +-----+ * | ... | */ .macro mcount_enter stp x29, x30, [sp, #-16]! mov x29, sp .endm .macro mcount_exit ldp x29, x30, [sp], #16 ret .endm .macro mcount_adjust_addr rd, rn sub \rd, \rn, #AARCH64_INSN_SIZE .endm /* for instrumented function's parent */ .macro mcount_get_parent_fp reg ldr \reg, [x29] ldr \reg, [\reg] .endm /* for instrumented function */ .macro mcount_get_pc0 reg mcount_adjust_addr \reg, x30 .endm .macro mcount_get_pc reg ldr \reg, [x29, #8] mcount_adjust_addr \reg, \reg .endm .macro mcount_get_lr reg ldr \reg, [x29] ldr \reg, [\reg, #8] .endm .macro mcount_get_lr_addr reg ldr \reg, [x29] add \reg, \reg, #8 .endm #ifndef CONFIG_DYNAMIC_FTRACE /* * void _mcount(unsigned long return_address) * @return_address: return address to instrumented function * * This function makes calls, if enabled, to: * - tracer function to probe instrumented function's entry, * - ftrace_graph_caller to set up an exit hook */ ENTRY(_mcount) mcount_enter ldr_l x2, ftrace_trace_function adr x0, ftrace_stub cmp x0, x2 // if (ftrace_trace_function b.eq skip_ftrace_call // != ftrace_stub) { mcount_get_pc x0 // function's pc mcount_get_lr x1 // function's lr (= parent's pc) blr x2 // (*ftrace_trace_function)(pc, lr); skip_ftrace_call: // } #ifdef CONFIG_FUNCTION_GRAPH_TRACER ldr_l x2, ftrace_graph_return cmp x0, x2 // if ((ftrace_graph_return b.ne ftrace_graph_caller // != ftrace_stub) ldr_l x2, ftrace_graph_entry // || (ftrace_graph_entry adr_l x0, ftrace_graph_entry_stub // != ftrace_graph_entry_stub)) cmp x0, x2 b.ne ftrace_graph_caller // ftrace_graph_caller(); #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ mcount_exit ENDPROC(_mcount) #else /* CONFIG_DYNAMIC_FTRACE */ /* * _mcount() is used to build the kernel with -pg option, but all the branch * instructions to _mcount() are replaced to NOP initially at kernel start up, * and later on, NOP to branch to ftrace_caller() when enabled or branch to * NOP when disabled per-function base. */ ENTRY(_mcount) ret ENDPROC(_mcount) /* * void ftrace_caller(unsigned long return_address) * @return_address: return address to instrumented function * * This function is a counterpart of _mcount() in 'static' ftrace, and * makes calls to: * - tracer function to probe instrumented function's entry, * - ftrace_graph_caller to set up an exit hook */ ENTRY(ftrace_caller) mcount_enter mcount_get_pc0 x0 // function's pc mcount_get_lr x1 // function's lr .global ftrace_call ftrace_call: // tracer(pc, lr); nop // This will be replaced with "bl xxx" // where xxx can be any kind of tracer. #ifdef CONFIG_FUNCTION_GRAPH_TRACER .global ftrace_graph_call ftrace_graph_call: // ftrace_graph_caller(); nop // If enabled, this will be replaced // "b ftrace_graph_caller" #endif mcount_exit ENDPROC(ftrace_caller) #endif /* CONFIG_DYNAMIC_FTRACE */ ENTRY(ftrace_stub) ret ENDPROC(ftrace_stub) #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* save return value regs*/ .macro save_return_regs sub sp, sp, #64 stp x0, x1, [sp] stp x2, x3, [sp, #16] stp x4, x5, [sp, #32] stp x6, x7, [sp, #48] .endm /* restore return value regs*/ .macro restore_return_regs ldp x0, x1, [sp] ldp x2, x3, [sp, #16] ldp x4, x5, [sp, #32] ldp x6, x7, [sp, #48] add sp, sp, #64 .endm /* * void ftrace_graph_caller(void) * * Called from _mcount() or ftrace_caller() when function_graph tracer is * selected. * This function w/ prepare_ftrace_return() fakes link register's value on * the call stack in order to intercept instrumented function's return path * and run return_to_handler() later on its exit. */ ENTRY(ftrace_graph_caller) mcount_get_lr_addr x0 // pointer to function's saved lr mcount_get_pc x1 // function's pc mcount_get_parent_fp x2 // parent's fp bl prepare_ftrace_return // prepare_ftrace_return(&lr, pc, fp) mcount_exit ENDPROC(ftrace_graph_caller) /* * void return_to_handler(void) * * Run ftrace_return_to_handler() before going back to parent. * @fp is checked against the value passed by ftrace_graph_caller() * only when HAVE_FUNCTION_GRAPH_FP_TEST is enabled. */ ENTRY(return_to_handler) save_return_regs mov x0, x29 // parent's fp bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp); mov x30, x0 // restore the original return address restore_return_regs ret END(return_to_handler) #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
AirFortressIlikara/LS2K0300-linux-4.19
3,000
arch/arm64/kernel/relocate_kernel.S
/* * kexec for arm64 * * Copyright (C) Linaro. * Copyright (C) Huawei Futurewei Technologies. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kexec.h> #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/kexec.h> #include <asm/page.h> #include <asm/sysreg.h> /* * arm64_relocate_new_kernel - Put a 2nd stage image in place and boot it. * * The memory that the old kernel occupies may be overwritten when coping the * new image to its final location. To assure that the * arm64_relocate_new_kernel routine which does that copy is not overwritten, * all code and data needed by arm64_relocate_new_kernel must be between the * symbols arm64_relocate_new_kernel and arm64_relocate_new_kernel_end. The * machine_kexec() routine will copy arm64_relocate_new_kernel to the kexec * control_code_page, a special page which has been set up to be preserved * during the copy operation. */ ENTRY(arm64_relocate_new_kernel) /* Setup the list loop variables. */ mov x17, x1 /* x17 = kimage_start */ mov x16, x0 /* x16 = kimage_head */ raw_dcache_line_size x15, x0 /* x15 = dcache line size */ mov x14, xzr /* x14 = entry ptr */ mov x13, xzr /* x13 = copy dest */ /* Clear the sctlr_el2 flags. */ mrs x0, CurrentEL cmp x0, #CurrentEL_EL2 b.ne 1f mrs x0, sctlr_el2 ldr x1, =SCTLR_ELx_FLAGS bic x0, x0, x1 pre_disable_mmu_workaround msr sctlr_el2, x0 isb 1: /* Check if the new image needs relocation. */ tbnz x16, IND_DONE_BIT, .Ldone .Lloop: and x12, x16, PAGE_MASK /* x12 = addr */ /* Test the entry flags. */ .Ltest_source: tbz x16, IND_SOURCE_BIT, .Ltest_indirection /* Invalidate dest page to PoC. */ mov x0, x13 add x20, x0, #PAGE_SIZE sub x1, x15, #1 bic x0, x0, x1 2: dc ivac, x0 add x0, x0, x15 cmp x0, x20 b.lo 2b dsb sy mov x20, x13 mov x21, x12 copy_page x20, x21, x0, x1, x2, x3, x4, x5, x6, x7 /* dest += PAGE_SIZE */ add x13, x13, PAGE_SIZE b .Lnext .Ltest_indirection: tbz x16, IND_INDIRECTION_BIT, .Ltest_destination /* ptr = addr */ mov x14, x12 b .Lnext .Ltest_destination: tbz x16, IND_DESTINATION_BIT, .Lnext /* dest = addr */ mov x13, x12 .Lnext: /* entry = *ptr++ */ ldr x16, [x14], #8 /* while (!(entry & DONE)) */ tbz x16, IND_DONE_BIT, .Lloop .Ldone: /* wait for writes from copy_page to finish */ dsb nsh ic iallu dsb nsh isb /* Start new image. */ mov x0, xzr mov x1, xzr mov x2, xzr mov x3, xzr br x17 ENDPROC(arm64_relocate_new_kernel) .ltorg .align 3 /* To keep the 64-bit values below naturally aligned. */ .Lcopy_end: .org KEXEC_CONTROL_PAGE_SIZE /* * arm64_relocate_new_kernel_size - Number of bytes to copy to the * control_code_page. */ .globl arm64_relocate_new_kernel_size arm64_relocate_new_kernel_size: .quad .Lcopy_end - arm64_relocate_new_kernel
AirFortressIlikara/LS2K0300-linux-4.19
3,671
arch/arm64/kernel/kuser32.S
/* * Low-level user helpers placed in the vectors page for AArch32. * Based on the kuser helpers in arch/arm/kernel/entry-armv.S. * * Copyright (C) 2005-2011 Nicolas Pitre <nico@fluxnic.net> * Copyright (C) 2012 ARM Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * * AArch32 user helpers. * * Each segment is 32-byte aligned and will be moved to the top of the high * vector page. New segments (if ever needed) must be added in front of * existing ones. This mechanism should be used only for things that are * really small and justified, and not be abused freely. * * See Documentation/arm/kernel_user_helpers.txt for formal definitions. */ #include <asm/unistd.h> .align 5 .globl __kuser_helper_start __kuser_helper_start: __kuser_cmpxchg64: // 0xffff0f60 .inst 0xe92d00f0 // push {r4, r5, r6, r7} .inst 0xe1c040d0 // ldrd r4, r5, [r0] .inst 0xe1c160d0 // ldrd r6, r7, [r1] .inst 0xe1b20f9f // 1: ldrexd r0, r1, [r2] .inst 0xe0303004 // eors r3, r0, r4 .inst 0x00313005 // eoreqs r3, r1, r5 .inst 0x01a23e96 // stlexdeq r3, r6, [r2] .inst 0x03330001 // teqeq r3, #1 .inst 0x0afffff9 // beq 1b .inst 0xf57ff05b // dmb ish .inst 0xe2730000 // rsbs r0, r3, #0 .inst 0xe8bd00f0 // pop {r4, r5, r6, r7} .inst 0xe12fff1e // bx lr .align 5 __kuser_memory_barrier: // 0xffff0fa0 .inst 0xf57ff05b // dmb ish .inst 0xe12fff1e // bx lr .align 5 __kuser_cmpxchg: // 0xffff0fc0 .inst 0xe1923f9f // 1: ldrex r3, [r2] .inst 0xe0533000 // subs r3, r3, r0 .inst 0x01823e91 // stlexeq r3, r1, [r2] .inst 0x03330001 // teqeq r3, #1 .inst 0x0afffffa // beq 1b .inst 0xf57ff05b // dmb ish .inst 0xe2730000 // rsbs r0, r3, #0 .inst 0xe12fff1e // bx lr .align 5 __kuser_get_tls: // 0xffff0fe0 .inst 0xee1d0f70 // mrc p15, 0, r0, c13, c0, 3 .inst 0xe12fff1e // bx lr .rep 5 .word 0 .endr __kuser_helper_version: // 0xffff0ffc .word ((__kuser_helper_end - __kuser_helper_start) >> 5) .globl __kuser_helper_end __kuser_helper_end: /* * AArch32 sigreturn code * * For ARM syscalls, the syscall number has to be loaded into r7. * We do not support an OABI userspace. * * For Thumb syscalls, we also pass the syscall number via r7. We therefore * need two 16-bit instructions. */ .globl __aarch32_sigret_code_start __aarch32_sigret_code_start: /* * ARM Code */ .byte __NR_compat_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_sigreturn .byte __NR_compat_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_sigreturn /* * Thumb code */ .byte __NR_compat_sigreturn, 0x27 // svc #__NR_compat_sigreturn .byte __NR_compat_sigreturn, 0xdf // mov r7, #__NR_compat_sigreturn /* * ARM code */ .byte __NR_compat_rt_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_rt_sigreturn .byte __NR_compat_rt_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_rt_sigreturn /* * Thumb code */ .byte __NR_compat_rt_sigreturn, 0x27 // svc #__NR_compat_rt_sigreturn .byte __NR_compat_rt_sigreturn, 0xdf // mov r7, #__NR_compat_rt_sigreturn .globl __aarch32_sigret_code_end __aarch32_sigret_code_end:
AirFortressIlikara/LS2K0300-linux-4.19
1,415
arch/arm64/kernel/cpu-reset.S
/* * CPU reset routines * * Copyright (C) 2001 Deep Blue Solutions Ltd. * Copyright (C) 2012 ARM Ltd. * Copyright (C) 2015 Huawei Futurewei Technologies. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/sysreg.h> #include <asm/virt.h> .text .pushsection .idmap.text, "awx" /* * __cpu_soft_restart(el2_switch, entry, arg0, arg1, arg2) - Helper for * cpu_soft_restart. * * @el2_switch: Flag to indicate a swich to EL2 is needed. * @entry: Location to jump to for soft reset. * arg0: First argument passed to @entry. * arg1: Second argument passed to @entry. * arg2: Third argument passed to @entry. * * Put the CPU into the same state as it would be if it had been reset, and * branch to what would be the reset vector. It must be executed with the * flat identity mapping. */ ENTRY(__cpu_soft_restart) /* Clear sctlr_el1 flags. */ mrs x12, sctlr_el1 ldr x13, =SCTLR_ELx_FLAGS bic x12, x12, x13 pre_disable_mmu_workaround msr sctlr_el1, x12 isb cbz x0, 1f // el2_switch? mov x0, #HVC_SOFT_RESTART hvc #0 // no return 1: mov x18, x1 // entry mov x0, x2 // arg0 mov x1, x3 // arg1 mov x2, x4 // arg2 br x18 ENDPROC(__cpu_soft_restart) .popsection
AirFortressIlikara/LS2K0300-linux-4.19
1,543
arch/arm64/kernel/smccc-call.S
/* * Copyright (c) 2015, Linaro Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License Version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/linkage.h> #include <linux/arm-smccc.h> #include <asm/asm-offsets.h> .macro SMCCC instr .cfi_startproc \instr #0 ldr x4, [sp] stp x0, x1, [x4, #ARM_SMCCC_RES_X0_OFFS] stp x2, x3, [x4, #ARM_SMCCC_RES_X2_OFFS] ldr x4, [sp, #8] cbz x4, 1f /* no quirk structure */ ldr x9, [x4, #ARM_SMCCC_QUIRK_ID_OFFS] cmp x9, #ARM_SMCCC_QUIRK_QCOM_A6 b.ne 1f str x6, [x4, ARM_SMCCC_QUIRK_STATE_OFFS] 1: ret .cfi_endproc .endm /* * void arm_smccc_smc(unsigned long a0, unsigned long a1, unsigned long a2, * unsigned long a3, unsigned long a4, unsigned long a5, * unsigned long a6, unsigned long a7, struct arm_smccc_res *res, * struct arm_smccc_quirk *quirk) */ ENTRY(__arm_smccc_smc) SMCCC smc ENDPROC(__arm_smccc_smc) /* * void arm_smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2, * unsigned long a3, unsigned long a4, unsigned long a5, * unsigned long a6, unsigned long a7, struct arm_smccc_res *res, * struct arm_smccc_quirk *quirk) */ ENTRY(__arm_smccc_hvc) SMCCC hvc ENDPROC(__arm_smccc_hvc)
AirFortressIlikara/LS2K0300-linux-4.19
2,926
arch/arm64/kernel/efi-entry.S
/* * EFI entry point. * * Copyright (C) 2013, 2014 Red Hat, Inc. * Author: Mark Salter <msalter@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> #define EFI_LOAD_ERROR 0x8000000000000001 __INIT /* * We arrive here from the EFI boot manager with: * * * CPU in little-endian mode * * MMU on with identity-mapped RAM * * Icache and Dcache on * * We will most likely be running from some place other than where * we want to be. The kernel image wants to be placed at TEXT_OFFSET * from start of RAM. */ ENTRY(entry) /* * Create a stack frame to save FP/LR with extra space * for image_addr variable passed to efi_entry(). */ stp x29, x30, [sp, #-32]! mov x29, sp /* * Call efi_entry to do the real work. * x0 and x1 are already set up by firmware. Current runtime * address of image is calculated and passed via *image_addr. * * unsigned long efi_entry(void *handle, * efi_system_table_t *sys_table, * unsigned long *image_addr) ; */ adr_l x8, _text add x2, sp, 16 str x8, [x2] bl efi_entry cmn x0, #1 b.eq efi_load_fail /* * efi_entry() will have copied the kernel image if necessary and we * return here with device tree address in x0 and the kernel entry * point stored at *image_addr. Save those values in registers which * are callee preserved. */ mov x20, x0 // DTB address ldr x0, [sp, #16] // relocated _text address ldr w21, =stext_offset add x21, x0, x21 /* * Calculate size of the kernel Image (same for original and copy). */ adr_l x1, _text adr_l x2, _edata sub x1, x2, x1 /* * Flush the copied Image to the PoC, and ensure it is not shadowed by * stale icache entries from before relocation. */ bl __flush_dcache_area ic ialluis /* * Ensure that the rest of this function (in the original Image) is * visible when the caches are disabled. The I-cache can't have stale * entries for the VA range of the current image, so no maintenance is * necessary. */ adr x0, entry adr x1, entry_end sub x1, x1, x0 bl __flush_dcache_area /* Turn off Dcache and MMU */ mrs x0, CurrentEL cmp x0, #CurrentEL_EL2 b.ne 1f mrs x0, sctlr_el2 bic x0, x0, #1 << 0 // clear SCTLR.M bic x0, x0, #1 << 2 // clear SCTLR.C pre_disable_mmu_workaround msr sctlr_el2, x0 isb b 2f 1: mrs x0, sctlr_el1 bic x0, x0, #1 << 0 // clear SCTLR.M bic x0, x0, #1 << 2 // clear SCTLR.C pre_disable_mmu_workaround msr sctlr_el1, x0 isb 2: /* Jump to kernel entry point */ mov x0, x20 mov x1, xzr mov x2, xzr mov x3, xzr br x21 efi_load_fail: mov x0, #EFI_LOAD_ERROR ldp x29, x30, [sp], #32 ret entry_end: ENDPROC(entry)
AirFortressIlikara/LS2K0300-linux-4.19
1,577
arch/arm64/kernel/reloc_test_syms.S
/* * Copyright (C) 2017 Linaro, Ltd. <ard.biesheuvel@linaro.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/linkage.h> ENTRY(absolute_data64) ldr x0, 0f ret 0: .quad sym64_abs ENDPROC(absolute_data64) ENTRY(absolute_data32) ldr w0, 0f ret 0: .long sym32_abs ENDPROC(absolute_data32) ENTRY(absolute_data16) adr x0, 0f ldrh w0, [x0] ret 0: .short sym16_abs, 0 ENDPROC(absolute_data16) ENTRY(signed_movw) movz x0, #:abs_g2_s:sym64_abs movk x0, #:abs_g1_nc:sym64_abs movk x0, #:abs_g0_nc:sym64_abs ret ENDPROC(signed_movw) ENTRY(unsigned_movw) movz x0, #:abs_g3:sym64_abs movk x0, #:abs_g2_nc:sym64_abs movk x0, #:abs_g1_nc:sym64_abs movk x0, #:abs_g0_nc:sym64_abs ret ENDPROC(unsigned_movw) .align 12 .space 0xff8 ENTRY(relative_adrp) adrp x0, sym64_rel add x0, x0, #:lo12:sym64_rel ret ENDPROC(relative_adrp) .align 12 .space 0xffc ENTRY(relative_adrp_far) adrp x0, memstart_addr add x0, x0, #:lo12:memstart_addr ret ENDPROC(relative_adrp_far) ENTRY(relative_adr) adr x0, sym64_rel ret ENDPROC(relative_adr) ENTRY(relative_data64) adr x1, 0f ldr x0, [x1] add x0, x0, x1 ret 0: .quad sym64_rel - . ENDPROC(relative_data64) ENTRY(relative_data32) adr x1, 0f ldr w0, [x1] add x0, x0, x1 ret 0: .long sym64_rel - . ENDPROC(relative_data32) ENTRY(relative_data16) adr x1, 0f ldrsh w0, [x1] add x0, x0, x1 ret 0: .short sym64_rel - ., 0 ENDPROC(relative_data16)
AirFortressIlikara/LS2K0300-linux-4.19
5,360
arch/arm64/kernel/hibernate-asm.S
/* * Hibernate low-level support * * Copyright (C) 2016 ARM Ltd. * Author: James Morse <james.morse@arm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/linkage.h> #include <linux/errno.h> #include <asm/asm-offsets.h> #include <asm/assembler.h> #include <asm/cputype.h> #include <asm/memory.h> #include <asm/page.h> #include <asm/virt.h> /* * To prevent the possibility of old and new partial table walks being visible * in the tlb, switch the ttbr to a zero page when we invalidate the old * records. D4.7.1 'General TLB maintenance requirements' in ARM DDI 0487A.i * Even switching to our copied tables will cause a changed output address at * each stage of the walk. */ .macro break_before_make_ttbr_switch zero_page, page_table, tmp phys_to_ttbr \tmp, \zero_page msr ttbr1_el1, \tmp isb tlbi vmalle1 dsb nsh phys_to_ttbr \tmp, \page_table msr ttbr1_el1, \tmp isb .endm /* * Resume from hibernate * * Loads temporary page tables then restores the memory image. * Finally branches to cpu_resume() to restore the state saved by * swsusp_arch_suspend(). * * Because this code has to be copied to a 'safe' page, it can't call out to * other functions by PC-relative address. Also remember that it may be * mid-way through over-writing other functions. For this reason it contains * code from flush_icache_range() and uses the copy_page() macro. * * This 'safe' page is mapped via ttbr0, and executed from there. This function * switches to a copy of the linear map in ttbr1, performs the restore, then * switches ttbr1 to the original kernel's swapper_pg_dir. * * All of memory gets written to, including code. We need to clean the kernel * text to the Point of Coherence (PoC) before secondary cores can be booted. * Because the kernel modules and executable pages mapped to user space are * also written as data, we clean all pages we touch to the Point of * Unification (PoU). * * x0: physical address of temporary page tables * x1: physical address of swapper page tables * x2: address of cpu_resume * x3: linear map address of restore_pblist in the current kernel * x4: physical address of __hyp_stub_vectors, or 0 * x5: physical address of a zero page that remains zero after resume */ .pushsection ".hibernate_exit.text", "ax" ENTRY(swsusp_arch_suspend_exit) /* * We execute from ttbr0, change ttbr1 to our copied linear map tables * with a break-before-make via the zero page */ break_before_make_ttbr_switch x5, x0, x6 mov x21, x1 mov x30, x2 mov x24, x4 mov x25, x5 /* walk the restore_pblist and use copy_page() to over-write memory */ mov x19, x3 1: ldr x10, [x19, #HIBERN_PBE_ORIG] mov x0, x10 ldr x1, [x19, #HIBERN_PBE_ADDR] copy_page x0, x1, x2, x3, x4, x5, x6, x7, x8, x9 add x1, x10, #PAGE_SIZE /* Clean the copied page to PoU - based on flush_icache_range() */ raw_dcache_line_size x2, x3 sub x3, x2, #1 bic x4, x10, x3 2: dc cvau, x4 /* clean D line / unified line */ add x4, x4, x2 cmp x4, x1 b.lo 2b ldr x19, [x19, #HIBERN_PBE_NEXT] cbnz x19, 1b dsb ish /* wait for PoU cleaning to finish */ /* switch to the restored kernels page tables */ break_before_make_ttbr_switch x25, x21, x6 ic ialluis dsb ish isb cbz x24, 3f /* Do we need to re-initialise EL2? */ hvc #0 3: ret .ltorg ENDPROC(swsusp_arch_suspend_exit) /* * Restore the hyp stub. * This must be done before the hibernate page is unmapped by _cpu_resume(), * but happens before any of the hyp-stub's code is cleaned to PoC. * * x24: The physical address of __hyp_stub_vectors */ el1_sync: msr vbar_el2, x24 eret ENDPROC(el1_sync) .macro invalid_vector label \label: b \label ENDPROC(\label) .endm invalid_vector el2_sync_invalid invalid_vector el2_irq_invalid invalid_vector el2_fiq_invalid invalid_vector el2_error_invalid invalid_vector el1_sync_invalid invalid_vector el1_irq_invalid invalid_vector el1_fiq_invalid invalid_vector el1_error_invalid /* el2 vectors - switch el2 here while we restore the memory image. */ .align 11 ENTRY(hibernate_el2_vectors) ventry el2_sync_invalid // Synchronous EL2t ventry el2_irq_invalid // IRQ EL2t ventry el2_fiq_invalid // FIQ EL2t ventry el2_error_invalid // Error EL2t ventry el2_sync_invalid // Synchronous EL2h ventry el2_irq_invalid // IRQ EL2h ventry el2_fiq_invalid // FIQ EL2h ventry el2_error_invalid // Error EL2h ventry el1_sync // Synchronous 64-bit EL1 ventry el1_irq_invalid // IRQ 64-bit EL1 ventry el1_fiq_invalid // FIQ 64-bit EL1 ventry el1_error_invalid // Error 64-bit EL1 ventry el1_sync_invalid // Synchronous 32-bit EL1 ventry el1_irq_invalid // IRQ 32-bit EL1 ventry el1_fiq_invalid // FIQ 32-bit EL1 ventry el1_error_invalid // Error 32-bit EL1 END(hibernate_el2_vectors) .popsection
AirFortressIlikara/LS2K0300-linux-4.19
6,236
arch/arm64/kernel/vmlinux.lds.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * ld script to make ARM Linux kernel * taken from the i386 version by Russell King * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz> */ #include <asm-generic/vmlinux.lds.h> #include <asm/cache.h> #include <asm/kernel-pgtable.h> #include <asm/thread_info.h> #include <asm/memory.h> #include <asm/page.h> #include <asm/pgtable.h> #include "image.h" /* .exit.text needed in case of alternative patching */ #define ARM_EXIT_KEEP(x) x #define ARM_EXIT_DISCARD(x) OUTPUT_ARCH(aarch64) ENTRY(_text) jiffies = jiffies_64; #define HYPERVISOR_EXTABLE \ . = ALIGN(SZ_8); \ __start___kvm_ex_table = .; \ *(__kvm_ex_table) \ __stop___kvm_ex_table = .; #define HYPERVISOR_TEXT \ /* \ * Align to 4 KB so that \ * a) the HYP vector table is at its minimum \ * alignment of 2048 bytes \ * b) the HYP init code will not cross a page \ * boundary if its size does not exceed \ * 4 KB (see related ASSERT() below) \ */ \ . = ALIGN(SZ_4K); \ __hyp_idmap_text_start = .; \ *(.hyp.idmap.text) \ __hyp_idmap_text_end = .; \ __hyp_text_start = .; \ *(.hyp.text) \ HYPERVISOR_EXTABLE \ __hyp_text_end = .; #define IDMAP_TEXT \ . = ALIGN(SZ_4K); \ __idmap_text_start = .; \ *(.idmap.text) \ __idmap_text_end = .; #ifdef CONFIG_HIBERNATION #define HIBERNATE_TEXT \ . = ALIGN(SZ_4K); \ __hibernate_exit_text_start = .; \ *(.hibernate_exit.text) \ __hibernate_exit_text_end = .; #else #define HIBERNATE_TEXT #endif #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 #define TRAMP_TEXT \ . = ALIGN(PAGE_SIZE); \ __entry_tramp_text_start = .; \ *(.entry.tramp.text) \ . = ALIGN(PAGE_SIZE); \ __entry_tramp_text_end = .; #else #define TRAMP_TEXT #endif /* * The size of the PE/COFF section that covers the kernel image, which * runs from stext to _edata, must be a round multiple of the PE/COFF * FileAlignment, which we set to its minimum value of 0x200. 'stext' * itself is 4 KB aligned, so padding out _edata to a 0x200 aligned * boundary should be sufficient. */ PECOFF_FILE_ALIGNMENT = 0x200; #ifdef CONFIG_EFI #define PECOFF_EDATA_PADDING \ .pecoff_edata_padding : { BYTE(0); . = ALIGN(PECOFF_FILE_ALIGNMENT); } #else #define PECOFF_EDATA_PADDING #endif SECTIONS { /* * XXX: The linker does not define how output sections are * assigned to input sections when there are multiple statements * matching the same input section name. There is no documented * order of matching. */ /DISCARD/ : { ARM_EXIT_DISCARD(EXIT_TEXT) ARM_EXIT_DISCARD(EXIT_DATA) EXIT_CALL *(.discard) *(.discard.*) *(.interp .dynamic) *(.dynsym .dynstr .hash .gnu.hash) *(.eh_frame) } . = KIMAGE_VADDR + TEXT_OFFSET; .head.text : { _text = .; HEAD_TEXT } .text : { /* Real text segment */ _stext = .; /* Text and read-only data */ __exception_text_start = .; *(.exception.text) __exception_text_end = .; IRQENTRY_TEXT SOFTIRQENTRY_TEXT ENTRY_TEXT TEXT_TEXT SCHED_TEXT CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT HYPERVISOR_TEXT IDMAP_TEXT HIBERNATE_TEXT TRAMP_TEXT *(.fixup) *(.gnu.warning) . = ALIGN(16); *(.got) /* Global offset table */ } . = ALIGN(SEGMENT_ALIGN); _etext = .; /* End of text section */ RO_DATA(PAGE_SIZE) /* everything from this point to */ EXCEPTION_TABLE(8) /* __init_begin will be marked RO NX */ NOTES . = ALIGN(SEGMENT_ALIGN); __init_begin = .; __inittext_begin = .; INIT_TEXT_SECTION(8) .exit.text : { ARM_EXIT_KEEP(EXIT_TEXT) } . = ALIGN(4); .altinstructions : { __alt_instructions = .; *(.altinstructions) __alt_instructions_end = .; } . = ALIGN(PAGE_SIZE); __inittext_end = .; __initdata_begin = .; .init.data : { INIT_DATA INIT_SETUP(16) INIT_CALLS CON_INITCALL SECURITY_INITCALL INIT_RAM_FS *(.init.rodata.* .init.bss) /* from the EFI stub */ } .exit.data : { ARM_EXIT_KEEP(EXIT_DATA) } PERCPU_SECTION(L1_CACHE_BYTES) .rela.dyn : ALIGN(8) { *(.rela .rela*) } __rela_offset = ABSOLUTE(ADDR(.rela.dyn) - KIMAGE_VADDR); __rela_size = SIZEOF(.rela.dyn); . = ALIGN(SEGMENT_ALIGN); __initdata_end = .; __init_end = .; _data = .; _sdata = .; RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_ALIGN) /* * Data written with the MMU off but read with the MMU on requires * cache lines to be invalidated, discarding up to a Cache Writeback * Granule (CWG) of data from the cache. Keep the section that * requires this type of maintenance to be in its own Cache Writeback * Granule (CWG) area so the cache maintenance operations don't * interfere with adjacent data. */ .mmuoff.data.write : ALIGN(SZ_2K) { __mmuoff_data_start = .; *(.mmuoff.data.write) } . = ALIGN(SZ_2K); .mmuoff.data.read : { *(.mmuoff.data.read) __mmuoff_data_end = .; } PECOFF_EDATA_PADDING __pecoff_data_rawsize = ABSOLUTE(. - __initdata_begin); _edata = .; BSS_SECTION(0, 0, 0) . = ALIGN(PAGE_SIZE); idmap_pg_dir = .; . += IDMAP_DIR_SIZE; #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 tramp_pg_dir = .; . += PAGE_SIZE; #endif #ifdef CONFIG_ARM64_SW_TTBR0_PAN reserved_ttbr0 = .; . += RESERVED_TTBR0_SIZE; #endif swapper_pg_dir = .; . += SWAPPER_DIR_SIZE; swapper_pg_end = .; __pecoff_data_size = ABSOLUTE(. - __initdata_begin); _end = .; STABS_DEBUG HEAD_SYMBOLS } /* * The HYP init code and ID map text can't be longer than a page each, * and should not cross a page boundary. */ ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K, "HYP init code too big or misaligned") ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K, "ID map text too big or misaligned") #ifdef CONFIG_HIBERNATION ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1)) <= SZ_4K, "Hibernate exit text too big or misaligned") #endif #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE, "Entry trampoline text too big") #endif /* * If padding is applied before .head.text, virt<->phys conversions will fail. */ ASSERT(_text == (KIMAGE_VADDR + TEXT_OFFSET), "HEAD is misaligned")
AirFortressIlikara/LS2K0300-linux-4.19
4,476
arch/arm64/kernel/sleep.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <linux/errno.h> #include <linux/linkage.h> #include <asm/asm-offsets.h> #include <asm/assembler.h> .text /* * Implementation of MPIDR_EL1 hash algorithm through shifting * and OR'ing. * * @dst: register containing hash result * @rs0: register containing affinity level 0 bit shift * @rs1: register containing affinity level 1 bit shift * @rs2: register containing affinity level 2 bit shift * @rs3: register containing affinity level 3 bit shift * @mpidr: register containing MPIDR_EL1 value * @mask: register containing MPIDR mask * * Pseudo C-code: * *u32 dst; * *compute_mpidr_hash(u32 rs0, u32 rs1, u32 rs2, u32 rs3, u64 mpidr, u64 mask) { * u32 aff0, aff1, aff2, aff3; * u64 mpidr_masked = mpidr & mask; * aff0 = mpidr_masked & 0xff; * aff1 = mpidr_masked & 0xff00; * aff2 = mpidr_masked & 0xff0000; * aff2 = mpidr_masked & 0xff00000000; * dst = (aff0 >> rs0 | aff1 >> rs1 | aff2 >> rs2 | aff3 >> rs3); *} * Input registers: rs0, rs1, rs2, rs3, mpidr, mask * Output register: dst * Note: input and output registers must be disjoint register sets (eg: a macro instance with mpidr = x1 and dst = x1 is invalid) */ .macro compute_mpidr_hash dst, rs0, rs1, rs2, rs3, mpidr, mask and \mpidr, \mpidr, \mask // mask out MPIDR bits and \dst, \mpidr, #0xff // mask=aff0 lsr \dst ,\dst, \rs0 // dst=aff0>>rs0 and \mask, \mpidr, #0xff00 // mask = aff1 lsr \mask ,\mask, \rs1 orr \dst, \dst, \mask // dst|=(aff1>>rs1) and \mask, \mpidr, #0xff0000 // mask = aff2 lsr \mask ,\mask, \rs2 orr \dst, \dst, \mask // dst|=(aff2>>rs2) and \mask, \mpidr, #0xff00000000 // mask = aff3 lsr \mask ,\mask, \rs3 orr \dst, \dst, \mask // dst|=(aff3>>rs3) .endm /* * Save CPU state in the provided sleep_stack_data area, and publish its * location for cpu_resume()'s use in sleep_save_stash. * * cpu_resume() will restore this saved state, and return. Because the * link-register is saved and restored, it will appear to return from this * function. So that the caller can tell the suspend/resume paths apart, * __cpu_suspend_enter() will always return a non-zero value, whereas the * path through cpu_resume() will return 0. * * x0 = struct sleep_stack_data area */ ENTRY(__cpu_suspend_enter) stp x29, lr, [x0, #SLEEP_STACK_DATA_CALLEE_REGS] stp x19, x20, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+16] stp x21, x22, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+32] stp x23, x24, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+48] stp x25, x26, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+64] stp x27, x28, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+80] /* save the sp in cpu_suspend_ctx */ mov x2, sp str x2, [x0, #SLEEP_STACK_DATA_SYSTEM_REGS + CPU_CTX_SP] /* find the mpidr_hash */ ldr_l x1, sleep_save_stash mrs x7, mpidr_el1 adr_l x9, mpidr_hash ldr x10, [x9, #MPIDR_HASH_MASK] /* * Following code relies on the struct mpidr_hash * members size. */ ldp w3, w4, [x9, #MPIDR_HASH_SHIFTS] ldp w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)] compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10 add x1, x1, x8, lsl #3 str x0, [x1] add x0, x0, #SLEEP_STACK_DATA_SYSTEM_REGS stp x29, lr, [sp, #-16]! bl cpu_do_suspend ldp x29, lr, [sp], #16 mov x0, #1 ret ENDPROC(__cpu_suspend_enter) .pushsection ".idmap.text", "awx" ENTRY(cpu_resume) bl el2_setup // if in EL2 drop to EL1 cleanly bl __cpu_setup /* enable the MMU early - so we can access sleep_save_stash by va */ bl __enable_mmu ldr x8, =_cpu_resume br x8 ENDPROC(cpu_resume) .ltorg .popsection ENTRY(_cpu_resume) mrs x1, mpidr_el1 adr_l x8, mpidr_hash // x8 = struct mpidr_hash virt address /* retrieve mpidr_hash members to compute the hash */ ldr x2, [x8, #MPIDR_HASH_MASK] ldp w3, w4, [x8, #MPIDR_HASH_SHIFTS] ldp w5, w6, [x8, #(MPIDR_HASH_SHIFTS + 8)] compute_mpidr_hash x7, x3, x4, x5, x6, x1, x2 /* x7 contains hash index, let's use it to grab context pointer */ ldr_l x0, sleep_save_stash ldr x0, [x0, x7, lsl #3] add x29, x0, #SLEEP_STACK_DATA_CALLEE_REGS add x0, x0, #SLEEP_STACK_DATA_SYSTEM_REGS /* load sp from context */ ldr x2, [x0, #CPU_CTX_SP] mov sp, x2 /* * cpu_do_resume expects x0 to contain context address pointer */ bl cpu_do_resume #ifdef CONFIG_KASAN mov x0, sp bl kasan_unpoison_task_stack_below #endif ldp x19, x20, [x29, #16] ldp x21, x22, [x29, #32] ldp x23, x24, [x29, #48] ldp x25, x26, [x29, #64] ldp x27, x28, [x29, #80] ldp x29, lr, [x29] mov x0, #0 ret ENDPROC(_cpu_resume)
AirFortressIlikara/LS2K0300-linux-4.19
3,490
arch/arm64/kernel/hyp-stub.S
/* * Hypervisor stub * * Copyright (C) 2012 ARM Ltd. * Author: Marc Zyngier <marc.zyngier@arm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/init.h> #include <linux/linkage.h> #include <linux/irqchip/arm-gic-v3.h> #include <asm/assembler.h> #include <asm/kvm_arm.h> #include <asm/kvm_asm.h> #include <asm/ptrace.h> #include <asm/virt.h> .text .pushsection .hyp.text, "ax" .align 11 ENTRY(__hyp_stub_vectors) ventry el2_sync_invalid // Synchronous EL2t ventry el2_irq_invalid // IRQ EL2t ventry el2_fiq_invalid // FIQ EL2t ventry el2_error_invalid // Error EL2t ventry el2_sync_invalid // Synchronous EL2h ventry el2_irq_invalid // IRQ EL2h ventry el2_fiq_invalid // FIQ EL2h ventry el2_error_invalid // Error EL2h ventry el1_sync // Synchronous 64-bit EL1 ventry el1_irq_invalid // IRQ 64-bit EL1 ventry el1_fiq_invalid // FIQ 64-bit EL1 ventry el1_error_invalid // Error 64-bit EL1 ventry el1_sync_invalid // Synchronous 32-bit EL1 ventry el1_irq_invalid // IRQ 32-bit EL1 ventry el1_fiq_invalid // FIQ 32-bit EL1 ventry el1_error_invalid // Error 32-bit EL1 ENDPROC(__hyp_stub_vectors) .align 11 el1_sync: cmp x0, #HVC_SET_VECTORS b.ne 2f msr vbar_el2, x1 b 9f 2: cmp x0, #HVC_SOFT_RESTART b.ne 3f mov x0, x2 mov x2, x4 mov x4, x1 mov x1, x3 br x4 // no return 3: cmp x0, #HVC_RESET_VECTORS beq 9f // Nothing to reset! /* Someone called kvm_call_hyp() against the hyp-stub... */ ldr x0, =HVC_STUB_ERR eret 9: mov x0, xzr eret ENDPROC(el1_sync) .macro invalid_vector label \label: b \label ENDPROC(\label) .endm invalid_vector el2_sync_invalid invalid_vector el2_irq_invalid invalid_vector el2_fiq_invalid invalid_vector el2_error_invalid invalid_vector el1_sync_invalid invalid_vector el1_irq_invalid invalid_vector el1_fiq_invalid invalid_vector el1_error_invalid /* * __hyp_set_vectors: Call this after boot to set the initial hypervisor * vectors as part of hypervisor installation. On an SMP system, this should * be called on each CPU. * * x0 must be the physical address of the new vector table, and must be * 2KB aligned. * * Before calling this, you must check that the stub hypervisor is installed * everywhere, by waiting for any secondary CPUs to be brought up and then * checking that is_hyp_mode_available() is true. * * If not, there is a pre-existing hypervisor, some CPUs failed to boot, or * something else went wrong... in such cases, trying to install a new * hypervisor is unlikely to work as desired. * * When you call into your shiny new hypervisor, sp_el2 will contain junk, * so you will need to set that to something sensible at the new hypervisor's * initialisation entry point. */ ENTRY(__hyp_set_vectors) mov x1, x0 mov x0, #HVC_SET_VECTORS hvc #0 ret ENDPROC(__hyp_set_vectors) ENTRY(__hyp_reset_vectors) mov x0, #HVC_RESET_VECTORS hvc #0 ret ENDPROC(__hyp_reset_vectors)
AirFortressIlikara/LS2K0300-linux-4.19
6,764
arch/arm64/crypto/sha512-ce-core.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * sha512-ce-core.S - core SHA-384/SHA-512 transform using v8 Crypto Extensions * * Copyright (C) 2018 Linaro Ltd <ard.biesheuvel@linaro.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/linkage.h> #include <asm/assembler.h> .irp b,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19 .set .Lq\b, \b .set .Lv\b\().2d, \b .endr .macro sha512h, rd, rn, rm .inst 0xce608000 | .L\rd | (.L\rn << 5) | (.L\rm << 16) .endm .macro sha512h2, rd, rn, rm .inst 0xce608400 | .L\rd | (.L\rn << 5) | (.L\rm << 16) .endm .macro sha512su0, rd, rn .inst 0xcec08000 | .L\rd | (.L\rn << 5) .endm .macro sha512su1, rd, rn, rm .inst 0xce608800 | .L\rd | (.L\rn << 5) | (.L\rm << 16) .endm /* * The SHA-512 round constants */ .section ".rodata", "a" .align 4 .Lsha512_rcon: .quad 0x428a2f98d728ae22, 0x7137449123ef65cd .quad 0xb5c0fbcfec4d3b2f, 0xe9b5dba58189dbbc .quad 0x3956c25bf348b538, 0x59f111f1b605d019 .quad 0x923f82a4af194f9b, 0xab1c5ed5da6d8118 .quad 0xd807aa98a3030242, 0x12835b0145706fbe .quad 0x243185be4ee4b28c, 0x550c7dc3d5ffb4e2 .quad 0x72be5d74f27b896f, 0x80deb1fe3b1696b1 .quad 0x9bdc06a725c71235, 0xc19bf174cf692694 .quad 0xe49b69c19ef14ad2, 0xefbe4786384f25e3 .quad 0x0fc19dc68b8cd5b5, 0x240ca1cc77ac9c65 .quad 0x2de92c6f592b0275, 0x4a7484aa6ea6e483 .quad 0x5cb0a9dcbd41fbd4, 0x76f988da831153b5 .quad 0x983e5152ee66dfab, 0xa831c66d2db43210 .quad 0xb00327c898fb213f, 0xbf597fc7beef0ee4 .quad 0xc6e00bf33da88fc2, 0xd5a79147930aa725 .quad 0x06ca6351e003826f, 0x142929670a0e6e70 .quad 0x27b70a8546d22ffc, 0x2e1b21385c26c926 .quad 0x4d2c6dfc5ac42aed, 0x53380d139d95b3df .quad 0x650a73548baf63de, 0x766a0abb3c77b2a8 .quad 0x81c2c92e47edaee6, 0x92722c851482353b .quad 0xa2bfe8a14cf10364, 0xa81a664bbc423001 .quad 0xc24b8b70d0f89791, 0xc76c51a30654be30 .quad 0xd192e819d6ef5218, 0xd69906245565a910 .quad 0xf40e35855771202a, 0x106aa07032bbd1b8 .quad 0x19a4c116b8d2d0c8, 0x1e376c085141ab53 .quad 0x2748774cdf8eeb99, 0x34b0bcb5e19b48a8 .quad 0x391c0cb3c5c95a63, 0x4ed8aa4ae3418acb .quad 0x5b9cca4f7763e373, 0x682e6ff3d6b2b8a3 .quad 0x748f82ee5defb2fc, 0x78a5636f43172f60 .quad 0x84c87814a1f0ab72, 0x8cc702081a6439ec .quad 0x90befffa23631e28, 0xa4506cebde82bde9 .quad 0xbef9a3f7b2c67915, 0xc67178f2e372532b .quad 0xca273eceea26619c, 0xd186b8c721c0c207 .quad 0xeada7dd6cde0eb1e, 0xf57d4f7fee6ed178 .quad 0x06f067aa72176fba, 0x0a637dc5a2c898a6 .quad 0x113f9804bef90dae, 0x1b710b35131c471b .quad 0x28db77f523047d84, 0x32caab7b40c72493 .quad 0x3c9ebe0a15c9bebc, 0x431d67c49c100d4c .quad 0x4cc5d4becb3e42b6, 0x597f299cfc657e2a .quad 0x5fcb6fab3ad6faec, 0x6c44198c4a475817 .macro dround, i0, i1, i2, i3, i4, rc0, rc1, in0, in1, in2, in3, in4 .ifnb \rc1 ld1 {v\rc1\().2d}, [x4], #16 .endif add v5.2d, v\rc0\().2d, v\in0\().2d ext v6.16b, v\i2\().16b, v\i3\().16b, #8 ext v5.16b, v5.16b, v5.16b, #8 ext v7.16b, v\i1\().16b, v\i2\().16b, #8 add v\i3\().2d, v\i3\().2d, v5.2d .ifnb \in1 ext v5.16b, v\in3\().16b, v\in4\().16b, #8 sha512su0 v\in0\().2d, v\in1\().2d .endif sha512h q\i3, q6, v7.2d .ifnb \in1 sha512su1 v\in0\().2d, v\in2\().2d, v5.2d .endif add v\i4\().2d, v\i1\().2d, v\i3\().2d sha512h2 q\i3, q\i1, v\i0\().2d .endm /* * void sha512_ce_transform(struct sha512_state *sst, u8 const *src, * int blocks) */ .text ENTRY(sha512_ce_transform) frame_push 3 mov x19, x0 mov x20, x1 mov x21, x2 /* load state */ 0: ld1 {v8.2d-v11.2d}, [x19] /* load first 4 round constants */ adr_l x3, .Lsha512_rcon ld1 {v20.2d-v23.2d}, [x3], #64 /* load input */ 1: ld1 {v12.2d-v15.2d}, [x20], #64 ld1 {v16.2d-v19.2d}, [x20], #64 sub w21, w21, #1 CPU_LE( rev64 v12.16b, v12.16b ) CPU_LE( rev64 v13.16b, v13.16b ) CPU_LE( rev64 v14.16b, v14.16b ) CPU_LE( rev64 v15.16b, v15.16b ) CPU_LE( rev64 v16.16b, v16.16b ) CPU_LE( rev64 v17.16b, v17.16b ) CPU_LE( rev64 v18.16b, v18.16b ) CPU_LE( rev64 v19.16b, v19.16b ) mov x4, x3 // rc pointer mov v0.16b, v8.16b mov v1.16b, v9.16b mov v2.16b, v10.16b mov v3.16b, v11.16b // v0 ab cd -- ef gh ab // v1 cd -- ef gh ab cd // v2 ef gh ab cd -- ef // v3 gh ab cd -- ef gh // v4 -- ef gh ab cd -- dround 0, 1, 2, 3, 4, 20, 24, 12, 13, 19, 16, 17 dround 3, 0, 4, 2, 1, 21, 25, 13, 14, 12, 17, 18 dround 2, 3, 1, 4, 0, 22, 26, 14, 15, 13, 18, 19 dround 4, 2, 0, 1, 3, 23, 27, 15, 16, 14, 19, 12 dround 1, 4, 3, 0, 2, 24, 28, 16, 17, 15, 12, 13 dround 0, 1, 2, 3, 4, 25, 29, 17, 18, 16, 13, 14 dround 3, 0, 4, 2, 1, 26, 30, 18, 19, 17, 14, 15 dround 2, 3, 1, 4, 0, 27, 31, 19, 12, 18, 15, 16 dround 4, 2, 0, 1, 3, 28, 24, 12, 13, 19, 16, 17 dround 1, 4, 3, 0, 2, 29, 25, 13, 14, 12, 17, 18 dround 0, 1, 2, 3, 4, 30, 26, 14, 15, 13, 18, 19 dround 3, 0, 4, 2, 1, 31, 27, 15, 16, 14, 19, 12 dround 2, 3, 1, 4, 0, 24, 28, 16, 17, 15, 12, 13 dround 4, 2, 0, 1, 3, 25, 29, 17, 18, 16, 13, 14 dround 1, 4, 3, 0, 2, 26, 30, 18, 19, 17, 14, 15 dround 0, 1, 2, 3, 4, 27, 31, 19, 12, 18, 15, 16 dround 3, 0, 4, 2, 1, 28, 24, 12, 13, 19, 16, 17 dround 2, 3, 1, 4, 0, 29, 25, 13, 14, 12, 17, 18 dround 4, 2, 0, 1, 3, 30, 26, 14, 15, 13, 18, 19 dround 1, 4, 3, 0, 2, 31, 27, 15, 16, 14, 19, 12 dround 0, 1, 2, 3, 4, 24, 28, 16, 17, 15, 12, 13 dround 3, 0, 4, 2, 1, 25, 29, 17, 18, 16, 13, 14 dround 2, 3, 1, 4, 0, 26, 30, 18, 19, 17, 14, 15 dround 4, 2, 0, 1, 3, 27, 31, 19, 12, 18, 15, 16 dround 1, 4, 3, 0, 2, 28, 24, 12, 13, 19, 16, 17 dround 0, 1, 2, 3, 4, 29, 25, 13, 14, 12, 17, 18 dround 3, 0, 4, 2, 1, 30, 26, 14, 15, 13, 18, 19 dround 2, 3, 1, 4, 0, 31, 27, 15, 16, 14, 19, 12 dround 4, 2, 0, 1, 3, 24, 28, 16, 17, 15, 12, 13 dround 1, 4, 3, 0, 2, 25, 29, 17, 18, 16, 13, 14 dround 0, 1, 2, 3, 4, 26, 30, 18, 19, 17, 14, 15 dround 3, 0, 4, 2, 1, 27, 31, 19, 12, 18, 15, 16 dround 2, 3, 1, 4, 0, 28, 24, 12 dround 4, 2, 0, 1, 3, 29, 25, 13 dround 1, 4, 3, 0, 2, 30, 26, 14 dround 0, 1, 2, 3, 4, 31, 27, 15 dround 3, 0, 4, 2, 1, 24, , 16 dround 2, 3, 1, 4, 0, 25, , 17 dround 4, 2, 0, 1, 3, 26, , 18 dround 1, 4, 3, 0, 2, 27, , 19 /* update state */ add v8.2d, v8.2d, v0.2d add v9.2d, v9.2d, v1.2d add v10.2d, v10.2d, v2.2d add v11.2d, v11.2d, v3.2d /* handled all input blocks? */ cbz w21, 3f if_will_cond_yield_neon st1 {v8.2d-v11.2d}, [x19] do_cond_yield_neon b 0b endif_yield_neon b 1b /* store new state */ 3: st1 {v8.2d-v11.2d}, [x19] frame_pop ret ENDPROC(sha512_ce_transform)
AirFortressIlikara/LS2K0300-linux-4.19
3,485
arch/arm64/crypto/sha1-ce-core.S
/* * sha1-ce-core.S - SHA-1 secure hash using ARMv8 Crypto Extensions * * Copyright (C) 2014 Linaro Ltd <ard.biesheuvel@linaro.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/linkage.h> #include <asm/assembler.h> .text .arch armv8-a+crypto k0 .req v0 k1 .req v1 k2 .req v2 k3 .req v3 t0 .req v4 t1 .req v5 dga .req q6 dgav .req v6 dgb .req s7 dgbv .req v7 dg0q .req q12 dg0s .req s12 dg0v .req v12 dg1s .req s13 dg1v .req v13 dg2s .req s14 .macro add_only, op, ev, rc, s0, dg1 .ifc \ev, ev add t1.4s, v\s0\().4s, \rc\().4s sha1h dg2s, dg0s .ifnb \dg1 sha1\op dg0q, \dg1, t0.4s .else sha1\op dg0q, dg1s, t0.4s .endif .else .ifnb \s0 add t0.4s, v\s0\().4s, \rc\().4s .endif sha1h dg1s, dg0s sha1\op dg0q, dg2s, t1.4s .endif .endm .macro add_update, op, ev, rc, s0, s1, s2, s3, dg1 sha1su0 v\s0\().4s, v\s1\().4s, v\s2\().4s add_only \op, \ev, \rc, \s1, \dg1 sha1su1 v\s0\().4s, v\s3\().4s .endm .macro loadrc, k, val, tmp movz \tmp, :abs_g0_nc:\val movk \tmp, :abs_g1:\val dup \k, \tmp .endm /* * void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src, * int blocks) */ ENTRY(sha1_ce_transform) frame_push 3 mov x19, x0 mov x20, x1 mov x21, x2 /* load round constants */ 0: loadrc k0.4s, 0x5a827999, w6 loadrc k1.4s, 0x6ed9eba1, w6 loadrc k2.4s, 0x8f1bbcdc, w6 loadrc k3.4s, 0xca62c1d6, w6 /* load state */ ld1 {dgav.4s}, [x19] ldr dgb, [x19, #16] /* load sha1_ce_state::finalize */ ldr_l w4, sha1_ce_offsetof_finalize, x4 ldr w4, [x19, x4] /* load input */ 1: ld1 {v8.4s-v11.4s}, [x20], #64 sub w21, w21, #1 CPU_LE( rev32 v8.16b, v8.16b ) CPU_LE( rev32 v9.16b, v9.16b ) CPU_LE( rev32 v10.16b, v10.16b ) CPU_LE( rev32 v11.16b, v11.16b ) 2: add t0.4s, v8.4s, k0.4s mov dg0v.16b, dgav.16b add_update c, ev, k0, 8, 9, 10, 11, dgb add_update c, od, k0, 9, 10, 11, 8 add_update c, ev, k0, 10, 11, 8, 9 add_update c, od, k0, 11, 8, 9, 10 add_update c, ev, k1, 8, 9, 10, 11 add_update p, od, k1, 9, 10, 11, 8 add_update p, ev, k1, 10, 11, 8, 9 add_update p, od, k1, 11, 8, 9, 10 add_update p, ev, k1, 8, 9, 10, 11 add_update p, od, k2, 9, 10, 11, 8 add_update m, ev, k2, 10, 11, 8, 9 add_update m, od, k2, 11, 8, 9, 10 add_update m, ev, k2, 8, 9, 10, 11 add_update m, od, k2, 9, 10, 11, 8 add_update m, ev, k3, 10, 11, 8, 9 add_update p, od, k3, 11, 8, 9, 10 add_only p, ev, k3, 9 add_only p, od, k3, 10 add_only p, ev, k3, 11 add_only p, od /* update state */ add dgbv.2s, dgbv.2s, dg1v.2s add dgav.4s, dgav.4s, dg0v.4s cbz w21, 3f if_will_cond_yield_neon st1 {dgav.4s}, [x19] str dgb, [x19, #16] do_cond_yield_neon b 0b endif_yield_neon b 1b /* * Final block: add padding and total bit count. * Skip if the input size was not a round multiple of the block size, * the padding is handled by the C code in that case. */ 3: cbz x4, 4f ldr_l w4, sha1_ce_offsetof_count, x4 ldr x4, [x19, x4] movi v9.2d, #0 mov x8, #0x80000000 movi v10.2d, #0 ror x7, x4, #29 // ror(lsl(x4, 3), 32) fmov d8, x8 mov x4, #0 mov v11.d[0], xzr mov v11.d[1], x7 b 2b /* store new state */ 4: st1 {dgav.4s}, [x19] str dgb, [x19, #16] frame_pop ret ENDPROC(sha1_ce_transform)
AirFortressIlikara/LS2K0300-linux-4.19
11,496
arch/arm64/crypto/chacha20-neon-core.S
/* * ChaCha20 256-bit cipher algorithm, RFC7539, arm64 NEON functions * * Copyright (C) 2016 Linaro, Ltd. <ard.biesheuvel@linaro.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Based on: * ChaCha20 256-bit cipher algorithm, RFC7539, x64 SSSE3 functions * * Copyright (C) 2015 Martin Willi * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/linkage.h> .text .align 6 ENTRY(chacha20_block_xor_neon) // x0: Input state matrix, s // x1: 1 data block output, o // x2: 1 data block input, i // // This function encrypts one ChaCha20 block by loading the state matrix // in four NEON registers. It performs matrix operation on four words in // parallel, but requires shuffling to rearrange the words after each // round. // // x0..3 = s0..3 adr x3, ROT8 ld1 {v0.4s-v3.4s}, [x0] ld1 {v8.4s-v11.4s}, [x0] ld1 {v12.4s}, [x3] mov x3, #10 .Ldoubleround: // x0 += x1, x3 = rotl32(x3 ^ x0, 16) add v0.4s, v0.4s, v1.4s eor v3.16b, v3.16b, v0.16b rev32 v3.8h, v3.8h // x2 += x3, x1 = rotl32(x1 ^ x2, 12) add v2.4s, v2.4s, v3.4s eor v4.16b, v1.16b, v2.16b shl v1.4s, v4.4s, #12 sri v1.4s, v4.4s, #20 // x0 += x1, x3 = rotl32(x3 ^ x0, 8) add v0.4s, v0.4s, v1.4s eor v3.16b, v3.16b, v0.16b tbl v3.16b, {v3.16b}, v12.16b // x2 += x3, x1 = rotl32(x1 ^ x2, 7) add v2.4s, v2.4s, v3.4s eor v4.16b, v1.16b, v2.16b shl v1.4s, v4.4s, #7 sri v1.4s, v4.4s, #25 // x1 = shuffle32(x1, MASK(0, 3, 2, 1)) ext v1.16b, v1.16b, v1.16b, #4 // x2 = shuffle32(x2, MASK(1, 0, 3, 2)) ext v2.16b, v2.16b, v2.16b, #8 // x3 = shuffle32(x3, MASK(2, 1, 0, 3)) ext v3.16b, v3.16b, v3.16b, #12 // x0 += x1, x3 = rotl32(x3 ^ x0, 16) add v0.4s, v0.4s, v1.4s eor v3.16b, v3.16b, v0.16b rev32 v3.8h, v3.8h // x2 += x3, x1 = rotl32(x1 ^ x2, 12) add v2.4s, v2.4s, v3.4s eor v4.16b, v1.16b, v2.16b shl v1.4s, v4.4s, #12 sri v1.4s, v4.4s, #20 // x0 += x1, x3 = rotl32(x3 ^ x0, 8) add v0.4s, v0.4s, v1.4s eor v3.16b, v3.16b, v0.16b tbl v3.16b, {v3.16b}, v12.16b // x2 += x3, x1 = rotl32(x1 ^ x2, 7) add v2.4s, v2.4s, v3.4s eor v4.16b, v1.16b, v2.16b shl v1.4s, v4.4s, #7 sri v1.4s, v4.4s, #25 // x1 = shuffle32(x1, MASK(2, 1, 0, 3)) ext v1.16b, v1.16b, v1.16b, #12 // x2 = shuffle32(x2, MASK(1, 0, 3, 2)) ext v2.16b, v2.16b, v2.16b, #8 // x3 = shuffle32(x3, MASK(0, 3, 2, 1)) ext v3.16b, v3.16b, v3.16b, #4 subs x3, x3, #1 b.ne .Ldoubleround ld1 {v4.16b-v7.16b}, [x2] // o0 = i0 ^ (x0 + s0) add v0.4s, v0.4s, v8.4s eor v0.16b, v0.16b, v4.16b // o1 = i1 ^ (x1 + s1) add v1.4s, v1.4s, v9.4s eor v1.16b, v1.16b, v5.16b // o2 = i2 ^ (x2 + s2) add v2.4s, v2.4s, v10.4s eor v2.16b, v2.16b, v6.16b // o3 = i3 ^ (x3 + s3) add v3.4s, v3.4s, v11.4s eor v3.16b, v3.16b, v7.16b st1 {v0.16b-v3.16b}, [x1] ret ENDPROC(chacha20_block_xor_neon) .align 6 ENTRY(chacha20_4block_xor_neon) // x0: Input state matrix, s // x1: 4 data blocks output, o // x2: 4 data blocks input, i // // This function encrypts four consecutive ChaCha20 blocks by loading // the state matrix in NEON registers four times. The algorithm performs // each operation on the corresponding word of each state matrix, hence // requires no word shuffling. For final XORing step we transpose the // matrix by interleaving 32- and then 64-bit words, which allows us to // do XOR in NEON registers. // adr x3, CTRINC // ... and ROT8 ld1 {v30.4s-v31.4s}, [x3] // x0..15[0-3] = s0..3[0..3] mov x4, x0 ld4r { v0.4s- v3.4s}, [x4], #16 ld4r { v4.4s- v7.4s}, [x4], #16 ld4r { v8.4s-v11.4s}, [x4], #16 ld4r {v12.4s-v15.4s}, [x4] // x12 += counter values 0-3 add v12.4s, v12.4s, v30.4s mov x3, #10 .Ldoubleround4: // x0 += x4, x12 = rotl32(x12 ^ x0, 16) // x1 += x5, x13 = rotl32(x13 ^ x1, 16) // x2 += x6, x14 = rotl32(x14 ^ x2, 16) // x3 += x7, x15 = rotl32(x15 ^ x3, 16) add v0.4s, v0.4s, v4.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s add v3.4s, v3.4s, v7.4s eor v12.16b, v12.16b, v0.16b eor v13.16b, v13.16b, v1.16b eor v14.16b, v14.16b, v2.16b eor v15.16b, v15.16b, v3.16b rev32 v12.8h, v12.8h rev32 v13.8h, v13.8h rev32 v14.8h, v14.8h rev32 v15.8h, v15.8h // x8 += x12, x4 = rotl32(x4 ^ x8, 12) // x9 += x13, x5 = rotl32(x5 ^ x9, 12) // x10 += x14, x6 = rotl32(x6 ^ x10, 12) // x11 += x15, x7 = rotl32(x7 ^ x11, 12) add v8.4s, v8.4s, v12.4s add v9.4s, v9.4s, v13.4s add v10.4s, v10.4s, v14.4s add v11.4s, v11.4s, v15.4s eor v16.16b, v4.16b, v8.16b eor v17.16b, v5.16b, v9.16b eor v18.16b, v6.16b, v10.16b eor v19.16b, v7.16b, v11.16b shl v4.4s, v16.4s, #12 shl v5.4s, v17.4s, #12 shl v6.4s, v18.4s, #12 shl v7.4s, v19.4s, #12 sri v4.4s, v16.4s, #20 sri v5.4s, v17.4s, #20 sri v6.4s, v18.4s, #20 sri v7.4s, v19.4s, #20 // x0 += x4, x12 = rotl32(x12 ^ x0, 8) // x1 += x5, x13 = rotl32(x13 ^ x1, 8) // x2 += x6, x14 = rotl32(x14 ^ x2, 8) // x3 += x7, x15 = rotl32(x15 ^ x3, 8) add v0.4s, v0.4s, v4.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s add v3.4s, v3.4s, v7.4s eor v12.16b, v12.16b, v0.16b eor v13.16b, v13.16b, v1.16b eor v14.16b, v14.16b, v2.16b eor v15.16b, v15.16b, v3.16b tbl v12.16b, {v12.16b}, v31.16b tbl v13.16b, {v13.16b}, v31.16b tbl v14.16b, {v14.16b}, v31.16b tbl v15.16b, {v15.16b}, v31.16b // x8 += x12, x4 = rotl32(x4 ^ x8, 7) // x9 += x13, x5 = rotl32(x5 ^ x9, 7) // x10 += x14, x6 = rotl32(x6 ^ x10, 7) // x11 += x15, x7 = rotl32(x7 ^ x11, 7) add v8.4s, v8.4s, v12.4s add v9.4s, v9.4s, v13.4s add v10.4s, v10.4s, v14.4s add v11.4s, v11.4s, v15.4s eor v16.16b, v4.16b, v8.16b eor v17.16b, v5.16b, v9.16b eor v18.16b, v6.16b, v10.16b eor v19.16b, v7.16b, v11.16b shl v4.4s, v16.4s, #7 shl v5.4s, v17.4s, #7 shl v6.4s, v18.4s, #7 shl v7.4s, v19.4s, #7 sri v4.4s, v16.4s, #25 sri v5.4s, v17.4s, #25 sri v6.4s, v18.4s, #25 sri v7.4s, v19.4s, #25 // x0 += x5, x15 = rotl32(x15 ^ x0, 16) // x1 += x6, x12 = rotl32(x12 ^ x1, 16) // x2 += x7, x13 = rotl32(x13 ^ x2, 16) // x3 += x4, x14 = rotl32(x14 ^ x3, 16) add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s add v3.4s, v3.4s, v4.4s eor v15.16b, v15.16b, v0.16b eor v12.16b, v12.16b, v1.16b eor v13.16b, v13.16b, v2.16b eor v14.16b, v14.16b, v3.16b rev32 v15.8h, v15.8h rev32 v12.8h, v12.8h rev32 v13.8h, v13.8h rev32 v14.8h, v14.8h // x10 += x15, x5 = rotl32(x5 ^ x10, 12) // x11 += x12, x6 = rotl32(x6 ^ x11, 12) // x8 += x13, x7 = rotl32(x7 ^ x8, 12) // x9 += x14, x4 = rotl32(x4 ^ x9, 12) add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v12.4s add v8.4s, v8.4s, v13.4s add v9.4s, v9.4s, v14.4s eor v16.16b, v5.16b, v10.16b eor v17.16b, v6.16b, v11.16b eor v18.16b, v7.16b, v8.16b eor v19.16b, v4.16b, v9.16b shl v5.4s, v16.4s, #12 shl v6.4s, v17.4s, #12 shl v7.4s, v18.4s, #12 shl v4.4s, v19.4s, #12 sri v5.4s, v16.4s, #20 sri v6.4s, v17.4s, #20 sri v7.4s, v18.4s, #20 sri v4.4s, v19.4s, #20 // x0 += x5, x15 = rotl32(x15 ^ x0, 8) // x1 += x6, x12 = rotl32(x12 ^ x1, 8) // x2 += x7, x13 = rotl32(x13 ^ x2, 8) // x3 += x4, x14 = rotl32(x14 ^ x3, 8) add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s add v3.4s, v3.4s, v4.4s eor v15.16b, v15.16b, v0.16b eor v12.16b, v12.16b, v1.16b eor v13.16b, v13.16b, v2.16b eor v14.16b, v14.16b, v3.16b tbl v15.16b, {v15.16b}, v31.16b tbl v12.16b, {v12.16b}, v31.16b tbl v13.16b, {v13.16b}, v31.16b tbl v14.16b, {v14.16b}, v31.16b // x10 += x15, x5 = rotl32(x5 ^ x10, 7) // x11 += x12, x6 = rotl32(x6 ^ x11, 7) // x8 += x13, x7 = rotl32(x7 ^ x8, 7) // x9 += x14, x4 = rotl32(x4 ^ x9, 7) add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v12.4s add v8.4s, v8.4s, v13.4s add v9.4s, v9.4s, v14.4s eor v16.16b, v5.16b, v10.16b eor v17.16b, v6.16b, v11.16b eor v18.16b, v7.16b, v8.16b eor v19.16b, v4.16b, v9.16b shl v5.4s, v16.4s, #7 shl v6.4s, v17.4s, #7 shl v7.4s, v18.4s, #7 shl v4.4s, v19.4s, #7 sri v5.4s, v16.4s, #25 sri v6.4s, v17.4s, #25 sri v7.4s, v18.4s, #25 sri v4.4s, v19.4s, #25 subs x3, x3, #1 b.ne .Ldoubleround4 ld4r {v16.4s-v19.4s}, [x0], #16 ld4r {v20.4s-v23.4s}, [x0], #16 // x12 += counter values 0-3 add v12.4s, v12.4s, v30.4s // x0[0-3] += s0[0] // x1[0-3] += s0[1] // x2[0-3] += s0[2] // x3[0-3] += s0[3] add v0.4s, v0.4s, v16.4s add v1.4s, v1.4s, v17.4s add v2.4s, v2.4s, v18.4s add v3.4s, v3.4s, v19.4s ld4r {v24.4s-v27.4s}, [x0], #16 ld4r {v28.4s-v31.4s}, [x0] // x4[0-3] += s1[0] // x5[0-3] += s1[1] // x6[0-3] += s1[2] // x7[0-3] += s1[3] add v4.4s, v4.4s, v20.4s add v5.4s, v5.4s, v21.4s add v6.4s, v6.4s, v22.4s add v7.4s, v7.4s, v23.4s // x8[0-3] += s2[0] // x9[0-3] += s2[1] // x10[0-3] += s2[2] // x11[0-3] += s2[3] add v8.4s, v8.4s, v24.4s add v9.4s, v9.4s, v25.4s add v10.4s, v10.4s, v26.4s add v11.4s, v11.4s, v27.4s // x12[0-3] += s3[0] // x13[0-3] += s3[1] // x14[0-3] += s3[2] // x15[0-3] += s3[3] add v12.4s, v12.4s, v28.4s add v13.4s, v13.4s, v29.4s add v14.4s, v14.4s, v30.4s add v15.4s, v15.4s, v31.4s // interleave 32-bit words in state n, n+1 zip1 v16.4s, v0.4s, v1.4s zip2 v17.4s, v0.4s, v1.4s zip1 v18.4s, v2.4s, v3.4s zip2 v19.4s, v2.4s, v3.4s zip1 v20.4s, v4.4s, v5.4s zip2 v21.4s, v4.4s, v5.4s zip1 v22.4s, v6.4s, v7.4s zip2 v23.4s, v6.4s, v7.4s zip1 v24.4s, v8.4s, v9.4s zip2 v25.4s, v8.4s, v9.4s zip1 v26.4s, v10.4s, v11.4s zip2 v27.4s, v10.4s, v11.4s zip1 v28.4s, v12.4s, v13.4s zip2 v29.4s, v12.4s, v13.4s zip1 v30.4s, v14.4s, v15.4s zip2 v31.4s, v14.4s, v15.4s // interleave 64-bit words in state n, n+2 zip1 v0.2d, v16.2d, v18.2d zip2 v4.2d, v16.2d, v18.2d zip1 v8.2d, v17.2d, v19.2d zip2 v12.2d, v17.2d, v19.2d ld1 {v16.16b-v19.16b}, [x2], #64 zip1 v1.2d, v20.2d, v22.2d zip2 v5.2d, v20.2d, v22.2d zip1 v9.2d, v21.2d, v23.2d zip2 v13.2d, v21.2d, v23.2d ld1 {v20.16b-v23.16b}, [x2], #64 zip1 v2.2d, v24.2d, v26.2d zip2 v6.2d, v24.2d, v26.2d zip1 v10.2d, v25.2d, v27.2d zip2 v14.2d, v25.2d, v27.2d ld1 {v24.16b-v27.16b}, [x2], #64 zip1 v3.2d, v28.2d, v30.2d zip2 v7.2d, v28.2d, v30.2d zip1 v11.2d, v29.2d, v31.2d zip2 v15.2d, v29.2d, v31.2d ld1 {v28.16b-v31.16b}, [x2] // xor with corresponding input, write to output eor v16.16b, v16.16b, v0.16b eor v17.16b, v17.16b, v1.16b eor v18.16b, v18.16b, v2.16b eor v19.16b, v19.16b, v3.16b eor v20.16b, v20.16b, v4.16b eor v21.16b, v21.16b, v5.16b st1 {v16.16b-v19.16b}, [x1], #64 eor v22.16b, v22.16b, v6.16b eor v23.16b, v23.16b, v7.16b eor v24.16b, v24.16b, v8.16b eor v25.16b, v25.16b, v9.16b st1 {v20.16b-v23.16b}, [x1], #64 eor v26.16b, v26.16b, v10.16b eor v27.16b, v27.16b, v11.16b eor v28.16b, v28.16b, v12.16b st1 {v24.16b-v27.16b}, [x1], #64 eor v29.16b, v29.16b, v13.16b eor v30.16b, v30.16b, v14.16b eor v31.16b, v31.16b, v15.16b st1 {v28.16b-v31.16b}, [x1] ret ENDPROC(chacha20_4block_xor_neon) CTRINC: .word 0, 1, 2, 3 ROT8: .word 0x02010003, 0x06050407, 0x0a09080b, 0x0e0d0c0f
AirFortressIlikara/LS2K0300-linux-4.19
6,581
arch/arm64/crypto/sha3-ce-core.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * sha3-ce-core.S - core SHA-3 transform using v8.2 Crypto Extensions * * Copyright (C) 2018 Linaro Ltd <ard.biesheuvel@linaro.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/linkage.h> #include <asm/assembler.h> .irp b,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 .set .Lv\b\().2d, \b .set .Lv\b\().16b, \b .endr /* * ARMv8.2 Crypto Extensions instructions */ .macro eor3, rd, rn, rm, ra .inst 0xce000000 | .L\rd | (.L\rn << 5) | (.L\ra << 10) | (.L\rm << 16) .endm .macro rax1, rd, rn, rm .inst 0xce608c00 | .L\rd | (.L\rn << 5) | (.L\rm << 16) .endm .macro bcax, rd, rn, rm, ra .inst 0xce200000 | .L\rd | (.L\rn << 5) | (.L\ra << 10) | (.L\rm << 16) .endm .macro xar, rd, rn, rm, imm6 .inst 0xce800000 | .L\rd | (.L\rn << 5) | ((\imm6) << 10) | (.L\rm << 16) .endm /* * sha3_ce_transform(u64 *st, const u8 *data, int blocks, int dg_size) */ .text ENTRY(sha3_ce_transform) frame_push 4 mov x19, x0 mov x20, x1 mov x21, x2 mov x22, x3 0: /* load state */ add x8, x19, #32 ld1 { v0.1d- v3.1d}, [x19] ld1 { v4.1d- v7.1d}, [x8], #32 ld1 { v8.1d-v11.1d}, [x8], #32 ld1 {v12.1d-v15.1d}, [x8], #32 ld1 {v16.1d-v19.1d}, [x8], #32 ld1 {v20.1d-v23.1d}, [x8], #32 ld1 {v24.1d}, [x8] 1: sub w21, w21, #1 mov w8, #24 adr_l x9, .Lsha3_rcon /* load input */ ld1 {v25.8b-v28.8b}, [x20], #32 ld1 {v29.8b-v31.8b}, [x20], #24 eor v0.8b, v0.8b, v25.8b eor v1.8b, v1.8b, v26.8b eor v2.8b, v2.8b, v27.8b eor v3.8b, v3.8b, v28.8b eor v4.8b, v4.8b, v29.8b eor v5.8b, v5.8b, v30.8b eor v6.8b, v6.8b, v31.8b tbnz x22, #6, 3f // SHA3-512 ld1 {v25.8b-v28.8b}, [x20], #32 ld1 {v29.8b-v30.8b}, [x20], #16 eor v7.8b, v7.8b, v25.8b eor v8.8b, v8.8b, v26.8b eor v9.8b, v9.8b, v27.8b eor v10.8b, v10.8b, v28.8b eor v11.8b, v11.8b, v29.8b eor v12.8b, v12.8b, v30.8b tbnz x22, #4, 2f // SHA3-384 or SHA3-224 // SHA3-256 ld1 {v25.8b-v28.8b}, [x20], #32 eor v13.8b, v13.8b, v25.8b eor v14.8b, v14.8b, v26.8b eor v15.8b, v15.8b, v27.8b eor v16.8b, v16.8b, v28.8b b 4f 2: tbz x22, #2, 4f // bit 2 cleared? SHA-384 // SHA3-224 ld1 {v25.8b-v28.8b}, [x20], #32 ld1 {v29.8b}, [x20], #8 eor v13.8b, v13.8b, v25.8b eor v14.8b, v14.8b, v26.8b eor v15.8b, v15.8b, v27.8b eor v16.8b, v16.8b, v28.8b eor v17.8b, v17.8b, v29.8b b 4f // SHA3-512 3: ld1 {v25.8b-v26.8b}, [x20], #16 eor v7.8b, v7.8b, v25.8b eor v8.8b, v8.8b, v26.8b 4: sub w8, w8, #1 eor3 v29.16b, v4.16b, v9.16b, v14.16b eor3 v26.16b, v1.16b, v6.16b, v11.16b eor3 v28.16b, v3.16b, v8.16b, v13.16b eor3 v25.16b, v0.16b, v5.16b, v10.16b eor3 v27.16b, v2.16b, v7.16b, v12.16b eor3 v29.16b, v29.16b, v19.16b, v24.16b eor3 v26.16b, v26.16b, v16.16b, v21.16b eor3 v28.16b, v28.16b, v18.16b, v23.16b eor3 v25.16b, v25.16b, v15.16b, v20.16b eor3 v27.16b, v27.16b, v17.16b, v22.16b rax1 v30.2d, v29.2d, v26.2d // bc[0] rax1 v26.2d, v26.2d, v28.2d // bc[2] rax1 v28.2d, v28.2d, v25.2d // bc[4] rax1 v25.2d, v25.2d, v27.2d // bc[1] rax1 v27.2d, v27.2d, v29.2d // bc[3] eor v0.16b, v0.16b, v30.16b xar v29.2d, v1.2d, v25.2d, (64 - 1) xar v1.2d, v6.2d, v25.2d, (64 - 44) xar v6.2d, v9.2d, v28.2d, (64 - 20) xar v9.2d, v22.2d, v26.2d, (64 - 61) xar v22.2d, v14.2d, v28.2d, (64 - 39) xar v14.2d, v20.2d, v30.2d, (64 - 18) xar v31.2d, v2.2d, v26.2d, (64 - 62) xar v2.2d, v12.2d, v26.2d, (64 - 43) xar v12.2d, v13.2d, v27.2d, (64 - 25) xar v13.2d, v19.2d, v28.2d, (64 - 8) xar v19.2d, v23.2d, v27.2d, (64 - 56) xar v23.2d, v15.2d, v30.2d, (64 - 41) xar v15.2d, v4.2d, v28.2d, (64 - 27) xar v28.2d, v24.2d, v28.2d, (64 - 14) xar v24.2d, v21.2d, v25.2d, (64 - 2) xar v8.2d, v8.2d, v27.2d, (64 - 55) xar v4.2d, v16.2d, v25.2d, (64 - 45) xar v16.2d, v5.2d, v30.2d, (64 - 36) xar v5.2d, v3.2d, v27.2d, (64 - 28) xar v27.2d, v18.2d, v27.2d, (64 - 21) xar v3.2d, v17.2d, v26.2d, (64 - 15) xar v25.2d, v11.2d, v25.2d, (64 - 10) xar v26.2d, v7.2d, v26.2d, (64 - 6) xar v30.2d, v10.2d, v30.2d, (64 - 3) bcax v20.16b, v31.16b, v22.16b, v8.16b bcax v21.16b, v8.16b, v23.16b, v22.16b bcax v22.16b, v22.16b, v24.16b, v23.16b bcax v23.16b, v23.16b, v31.16b, v24.16b bcax v24.16b, v24.16b, v8.16b, v31.16b ld1r {v31.2d}, [x9], #8 bcax v17.16b, v25.16b, v19.16b, v3.16b bcax v18.16b, v3.16b, v15.16b, v19.16b bcax v19.16b, v19.16b, v16.16b, v15.16b bcax v15.16b, v15.16b, v25.16b, v16.16b bcax v16.16b, v16.16b, v3.16b, v25.16b bcax v10.16b, v29.16b, v12.16b, v26.16b bcax v11.16b, v26.16b, v13.16b, v12.16b bcax v12.16b, v12.16b, v14.16b, v13.16b bcax v13.16b, v13.16b, v29.16b, v14.16b bcax v14.16b, v14.16b, v26.16b, v29.16b bcax v7.16b, v30.16b, v9.16b, v4.16b bcax v8.16b, v4.16b, v5.16b, v9.16b bcax v9.16b, v9.16b, v6.16b, v5.16b bcax v5.16b, v5.16b, v30.16b, v6.16b bcax v6.16b, v6.16b, v4.16b, v30.16b bcax v3.16b, v27.16b, v0.16b, v28.16b bcax v4.16b, v28.16b, v1.16b, v0.16b bcax v0.16b, v0.16b, v2.16b, v1.16b bcax v1.16b, v1.16b, v27.16b, v2.16b bcax v2.16b, v2.16b, v28.16b, v27.16b eor v0.16b, v0.16b, v31.16b cbnz w8, 4b cbz w21, 5f if_will_cond_yield_neon add x8, x19, #32 st1 { v0.1d- v3.1d}, [x19] st1 { v4.1d- v7.1d}, [x8], #32 st1 { v8.1d-v11.1d}, [x8], #32 st1 {v12.1d-v15.1d}, [x8], #32 st1 {v16.1d-v19.1d}, [x8], #32 st1 {v20.1d-v23.1d}, [x8], #32 st1 {v24.1d}, [x8] do_cond_yield_neon b 0b endif_yield_neon b 1b /* save state */ 5: st1 { v0.1d- v3.1d}, [x19], #32 st1 { v4.1d- v7.1d}, [x19], #32 st1 { v8.1d-v11.1d}, [x19], #32 st1 {v12.1d-v15.1d}, [x19], #32 st1 {v16.1d-v19.1d}, [x19], #32 st1 {v20.1d-v23.1d}, [x19], #32 st1 {v24.1d}, [x19] frame_pop ret ENDPROC(sha3_ce_transform) .section ".rodata", "a" .align 8 .Lsha3_rcon: .quad 0x0000000000000001, 0x0000000000008082, 0x800000000000808a .quad 0x8000000080008000, 0x000000000000808b, 0x0000000080000001 .quad 0x8000000080008081, 0x8000000000008009, 0x000000000000008a .quad 0x0000000000000088, 0x0000000080008009, 0x000000008000000a .quad 0x000000008000808b, 0x800000000000008b, 0x8000000000008089 .quad 0x8000000000008003, 0x8000000000008002, 0x8000000000000080 .quad 0x000000000000800a, 0x800000008000000a, 0x8000000080008081 .quad 0x8000000000008080, 0x0000000080000001, 0x8000000080008008
AirFortressIlikara/LS2K0300-linux-4.19
6,095
arch/arm64/crypto/aes-ce-ccm-core.S
/* * aesce-ccm-core.S - AES-CCM transform for ARMv8 with Crypto Extensions * * Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/linkage.h> #include <asm/assembler.h> .text .arch armv8-a+crypto /* * void ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes, * u32 *macp, u8 const rk[], u32 rounds); */ ENTRY(ce_aes_ccm_auth_data) ldr w8, [x3] /* leftover from prev round? */ ld1 {v0.16b}, [x0] /* load mac */ cbz w8, 1f sub w8, w8, #16 eor v1.16b, v1.16b, v1.16b 0: ldrb w7, [x1], #1 /* get 1 byte of input */ subs w2, w2, #1 add w8, w8, #1 ins v1.b[0], w7 ext v1.16b, v1.16b, v1.16b, #1 /* rotate in the input bytes */ beq 8f /* out of input? */ cbnz w8, 0b eor v0.16b, v0.16b, v1.16b 1: ld1 {v3.4s}, [x4] /* load first round key */ prfm pldl1strm, [x1] cmp w5, #12 /* which key size? */ add x6, x4, #16 sub w7, w5, #2 /* modified # of rounds */ bmi 2f bne 5f mov v5.16b, v3.16b b 4f 2: mov v4.16b, v3.16b ld1 {v5.4s}, [x6], #16 /* load 2nd round key */ 3: aese v0.16b, v4.16b aesmc v0.16b, v0.16b 4: ld1 {v3.4s}, [x6], #16 /* load next round key */ aese v0.16b, v5.16b aesmc v0.16b, v0.16b 5: ld1 {v4.4s}, [x6], #16 /* load next round key */ subs w7, w7, #3 aese v0.16b, v3.16b aesmc v0.16b, v0.16b ld1 {v5.4s}, [x6], #16 /* load next round key */ bpl 3b aese v0.16b, v4.16b subs w2, w2, #16 /* last data? */ eor v0.16b, v0.16b, v5.16b /* final round */ bmi 6f ld1 {v1.16b}, [x1], #16 /* load next input block */ eor v0.16b, v0.16b, v1.16b /* xor with mac */ bne 1b 6: st1 {v0.16b}, [x0] /* store mac */ beq 10f adds w2, w2, #16 beq 10f mov w8, w2 7: ldrb w7, [x1], #1 umov w6, v0.b[0] eor w6, w6, w7 strb w6, [x0], #1 subs w2, w2, #1 beq 10f ext v0.16b, v0.16b, v0.16b, #1 /* rotate out the mac bytes */ b 7b 8: cbz w8, 91f mov w7, w8 add w8, w8, #16 9: ext v1.16b, v1.16b, v1.16b, #1 adds w7, w7, #1 bne 9b 91: eor v0.16b, v0.16b, v1.16b st1 {v0.16b}, [x0] 10: str w8, [x3] ret ENDPROC(ce_aes_ccm_auth_data) /* * void ce_aes_ccm_final(u8 mac[], u8 const ctr[], u8 const rk[], * u32 rounds); */ ENTRY(ce_aes_ccm_final) ld1 {v3.4s}, [x2], #16 /* load first round key */ ld1 {v0.16b}, [x0] /* load mac */ cmp w3, #12 /* which key size? */ sub w3, w3, #2 /* modified # of rounds */ ld1 {v1.16b}, [x1] /* load 1st ctriv */ bmi 0f bne 3f mov v5.16b, v3.16b b 2f 0: mov v4.16b, v3.16b 1: ld1 {v5.4s}, [x2], #16 /* load next round key */ aese v0.16b, v4.16b aesmc v0.16b, v0.16b aese v1.16b, v4.16b aesmc v1.16b, v1.16b 2: ld1 {v3.4s}, [x2], #16 /* load next round key */ aese v0.16b, v5.16b aesmc v0.16b, v0.16b aese v1.16b, v5.16b aesmc v1.16b, v1.16b 3: ld1 {v4.4s}, [x2], #16 /* load next round key */ subs w3, w3, #3 aese v0.16b, v3.16b aesmc v0.16b, v0.16b aese v1.16b, v3.16b aesmc v1.16b, v1.16b bpl 1b aese v0.16b, v4.16b aese v1.16b, v4.16b /* final round key cancels out */ eor v0.16b, v0.16b, v1.16b /* en-/decrypt the mac */ st1 {v0.16b}, [x0] /* store result */ ret ENDPROC(ce_aes_ccm_final) .macro aes_ccm_do_crypt,enc ldr x8, [x6, #8] /* load lower ctr */ ld1 {v0.16b}, [x5] /* load mac */ CPU_LE( rev x8, x8 ) /* keep swabbed ctr in reg */ 0: /* outer loop */ ld1 {v1.8b}, [x6] /* load upper ctr */ prfm pldl1strm, [x1] add x8, x8, #1 rev x9, x8 cmp w4, #12 /* which key size? */ sub w7, w4, #2 /* get modified # of rounds */ ins v1.d[1], x9 /* no carry in lower ctr */ ld1 {v3.4s}, [x3] /* load first round key */ add x10, x3, #16 bmi 1f bne 4f mov v5.16b, v3.16b b 3f 1: mov v4.16b, v3.16b ld1 {v5.4s}, [x10], #16 /* load 2nd round key */ 2: /* inner loop: 3 rounds, 2x interleaved */ aese v0.16b, v4.16b aesmc v0.16b, v0.16b aese v1.16b, v4.16b aesmc v1.16b, v1.16b 3: ld1 {v3.4s}, [x10], #16 /* load next round key */ aese v0.16b, v5.16b aesmc v0.16b, v0.16b aese v1.16b, v5.16b aesmc v1.16b, v1.16b 4: ld1 {v4.4s}, [x10], #16 /* load next round key */ subs w7, w7, #3 aese v0.16b, v3.16b aesmc v0.16b, v0.16b aese v1.16b, v3.16b aesmc v1.16b, v1.16b ld1 {v5.4s}, [x10], #16 /* load next round key */ bpl 2b aese v0.16b, v4.16b aese v1.16b, v4.16b subs w2, w2, #16 bmi 6f /* partial block? */ ld1 {v2.16b}, [x1], #16 /* load next input block */ .if \enc == 1 eor v2.16b, v2.16b, v5.16b /* final round enc+mac */ eor v1.16b, v1.16b, v2.16b /* xor with crypted ctr */ .else eor v2.16b, v2.16b, v1.16b /* xor with crypted ctr */ eor v1.16b, v2.16b, v5.16b /* final round enc */ .endif eor v0.16b, v0.16b, v2.16b /* xor mac with pt ^ rk[last] */ st1 {v1.16b}, [x0], #16 /* write output block */ bne 0b CPU_LE( rev x8, x8 ) st1 {v0.16b}, [x5] /* store mac */ str x8, [x6, #8] /* store lsb end of ctr (BE) */ 5: ret 6: eor v0.16b, v0.16b, v5.16b /* final round mac */ eor v1.16b, v1.16b, v5.16b /* final round enc */ st1 {v0.16b}, [x5] /* store mac */ add w2, w2, #16 /* process partial tail block */ 7: ldrb w9, [x1], #1 /* get 1 byte of input */ umov w6, v1.b[0] /* get top crypted ctr byte */ umov w7, v0.b[0] /* get top mac byte */ .if \enc == 1 eor w7, w7, w9 eor w9, w9, w6 .else eor w9, w9, w6 eor w7, w7, w9 .endif strb w9, [x0], #1 /* store out byte */ strb w7, [x5], #1 /* store mac byte */ subs w2, w2, #1 beq 5b ext v0.16b, v0.16b, v0.16b, #1 /* shift out mac byte */ ext v1.16b, v1.16b, v1.16b, #1 /* shift out ctr byte */ b 7b .endm /* * void ce_aes_ccm_encrypt(u8 out[], u8 const in[], u32 cbytes, * u8 const rk[], u32 rounds, u8 mac[], * u8 ctr[]); * void ce_aes_ccm_decrypt(u8 out[], u8 const in[], u32 cbytes, * u8 const rk[], u32 rounds, u8 mac[], * u8 ctr[]); */ ENTRY(ce_aes_ccm_encrypt) aes_ccm_do_crypt 1 ENDPROC(ce_aes_ccm_encrypt) ENTRY(ce_aes_ccm_decrypt) aes_ccm_do_crypt 0 ENDPROC(ce_aes_ccm_decrypt)
AirFortressIlikara/LS2K0300-linux-4.19
11,404
arch/arm64/crypto/aes-modes.S
/* * linux/arch/arm64/crypto/aes-modes.S - chaining mode wrappers for AES * * Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /* included by aes-ce.S and aes-neon.S */ .text .align 4 aes_encrypt_block4x: encrypt_block4x v0, v1, v2, v3, w22, x21, x8, w7 ret ENDPROC(aes_encrypt_block4x) aes_decrypt_block4x: decrypt_block4x v0, v1, v2, v3, w22, x21, x8, w7 ret ENDPROC(aes_decrypt_block4x) /* * aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, * int blocks) * aes_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, * int blocks) */ AES_ENTRY(aes_ecb_encrypt) frame_push 5 mov x19, x0 mov x20, x1 mov x21, x2 mov x22, x3 mov x23, x4 .Lecbencrestart: enc_prepare w22, x21, x5 .LecbencloopNx: subs w23, w23, #4 bmi .Lecbenc1x ld1 {v0.16b-v3.16b}, [x20], #64 /* get 4 pt blocks */ bl aes_encrypt_block4x st1 {v0.16b-v3.16b}, [x19], #64 cond_yield_neon .Lecbencrestart b .LecbencloopNx .Lecbenc1x: adds w23, w23, #4 beq .Lecbencout .Lecbencloop: ld1 {v0.16b}, [x20], #16 /* get next pt block */ encrypt_block v0, w22, x21, x5, w6 st1 {v0.16b}, [x19], #16 subs w23, w23, #1 bne .Lecbencloop .Lecbencout: frame_pop ret AES_ENDPROC(aes_ecb_encrypt) AES_ENTRY(aes_ecb_decrypt) frame_push 5 mov x19, x0 mov x20, x1 mov x21, x2 mov x22, x3 mov x23, x4 .Lecbdecrestart: dec_prepare w22, x21, x5 .LecbdecloopNx: subs w23, w23, #4 bmi .Lecbdec1x ld1 {v0.16b-v3.16b}, [x20], #64 /* get 4 ct blocks */ bl aes_decrypt_block4x st1 {v0.16b-v3.16b}, [x19], #64 cond_yield_neon .Lecbdecrestart b .LecbdecloopNx .Lecbdec1x: adds w23, w23, #4 beq .Lecbdecout .Lecbdecloop: ld1 {v0.16b}, [x20], #16 /* get next ct block */ decrypt_block v0, w22, x21, x5, w6 st1 {v0.16b}, [x19], #16 subs w23, w23, #1 bne .Lecbdecloop .Lecbdecout: frame_pop ret AES_ENDPROC(aes_ecb_decrypt) /* * aes_cbc_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, * int blocks, u8 iv[]) * aes_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, * int blocks, u8 iv[]) */ AES_ENTRY(aes_cbc_encrypt) frame_push 6 mov x19, x0 mov x20, x1 mov x21, x2 mov x22, x3 mov x23, x4 mov x24, x5 .Lcbcencrestart: ld1 {v4.16b}, [x24] /* get iv */ enc_prepare w22, x21, x6 .Lcbcencloop4x: subs w23, w23, #4 bmi .Lcbcenc1x ld1 {v0.16b-v3.16b}, [x20], #64 /* get 4 pt blocks */ eor v0.16b, v0.16b, v4.16b /* ..and xor with iv */ encrypt_block v0, w22, x21, x6, w7 eor v1.16b, v1.16b, v0.16b encrypt_block v1, w22, x21, x6, w7 eor v2.16b, v2.16b, v1.16b encrypt_block v2, w22, x21, x6, w7 eor v3.16b, v3.16b, v2.16b encrypt_block v3, w22, x21, x6, w7 st1 {v0.16b-v3.16b}, [x19], #64 mov v4.16b, v3.16b st1 {v4.16b}, [x24] /* return iv */ cond_yield_neon .Lcbcencrestart b .Lcbcencloop4x .Lcbcenc1x: adds w23, w23, #4 beq .Lcbcencout .Lcbcencloop: ld1 {v0.16b}, [x20], #16 /* get next pt block */ eor v4.16b, v4.16b, v0.16b /* ..and xor with iv */ encrypt_block v4, w22, x21, x6, w7 st1 {v4.16b}, [x19], #16 subs w23, w23, #1 bne .Lcbcencloop .Lcbcencout: st1 {v4.16b}, [x24] /* return iv */ frame_pop ret AES_ENDPROC(aes_cbc_encrypt) AES_ENTRY(aes_cbc_decrypt) frame_push 6 mov x19, x0 mov x20, x1 mov x21, x2 mov x22, x3 mov x23, x4 mov x24, x5 .Lcbcdecrestart: ld1 {v7.16b}, [x24] /* get iv */ dec_prepare w22, x21, x6 .LcbcdecloopNx: subs w23, w23, #4 bmi .Lcbcdec1x ld1 {v0.16b-v3.16b}, [x20], #64 /* get 4 ct blocks */ mov v4.16b, v0.16b mov v5.16b, v1.16b mov v6.16b, v2.16b bl aes_decrypt_block4x sub x20, x20, #16 eor v0.16b, v0.16b, v7.16b eor v1.16b, v1.16b, v4.16b ld1 {v7.16b}, [x20], #16 /* reload 1 ct block */ eor v2.16b, v2.16b, v5.16b eor v3.16b, v3.16b, v6.16b st1 {v0.16b-v3.16b}, [x19], #64 st1 {v7.16b}, [x24] /* return iv */ cond_yield_neon .Lcbcdecrestart b .LcbcdecloopNx .Lcbcdec1x: adds w23, w23, #4 beq .Lcbcdecout .Lcbcdecloop: ld1 {v1.16b}, [x20], #16 /* get next ct block */ mov v0.16b, v1.16b /* ...and copy to v0 */ decrypt_block v0, w22, x21, x6, w7 eor v0.16b, v0.16b, v7.16b /* xor with iv => pt */ mov v7.16b, v1.16b /* ct is next iv */ st1 {v0.16b}, [x19], #16 subs w23, w23, #1 bne .Lcbcdecloop .Lcbcdecout: st1 {v7.16b}, [x24] /* return iv */ frame_pop ret AES_ENDPROC(aes_cbc_decrypt) /* * aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, * int blocks, u8 ctr[]) */ AES_ENTRY(aes_ctr_encrypt) frame_push 6 mov x19, x0 mov x20, x1 mov x21, x2 mov x22, x3 mov x23, x4 mov x24, x5 .Lctrrestart: enc_prepare w22, x21, x6 ld1 {v4.16b}, [x24] umov x6, v4.d[1] /* keep swabbed ctr in reg */ rev x6, x6 .LctrloopNx: subs w23, w23, #4 bmi .Lctr1x cmn w6, #4 /* 32 bit overflow? */ bcs .Lctr1x add w7, w6, #1 mov v0.16b, v4.16b add w8, w6, #2 mov v1.16b, v4.16b add w9, w6, #3 mov v2.16b, v4.16b rev w7, w7 mov v3.16b, v4.16b rev w8, w8 mov v1.s[3], w7 rev w9, w9 mov v2.s[3], w8 mov v3.s[3], w9 ld1 {v5.16b-v7.16b}, [x20], #48 /* get 3 input blocks */ bl aes_encrypt_block4x eor v0.16b, v5.16b, v0.16b ld1 {v5.16b}, [x20], #16 /* get 1 input block */ eor v1.16b, v6.16b, v1.16b eor v2.16b, v7.16b, v2.16b eor v3.16b, v5.16b, v3.16b st1 {v0.16b-v3.16b}, [x19], #64 add x6, x6, #4 rev x7, x6 ins v4.d[1], x7 cbz w23, .Lctrout st1 {v4.16b}, [x24] /* return next CTR value */ cond_yield_neon .Lctrrestart b .LctrloopNx .Lctr1x: adds w23, w23, #4 beq .Lctrout .Lctrloop: mov v0.16b, v4.16b encrypt_block v0, w22, x21, x8, w7 adds x6, x6, #1 /* increment BE ctr */ rev x7, x6 ins v4.d[1], x7 bcs .Lctrcarry /* overflow? */ .Lctrcarrydone: subs w23, w23, #1 bmi .Lctrtailblock /* blocks <0 means tail block */ ld1 {v3.16b}, [x20], #16 eor v3.16b, v0.16b, v3.16b st1 {v3.16b}, [x19], #16 bne .Lctrloop .Lctrout: st1 {v4.16b}, [x24] /* return next CTR value */ .Lctrret: frame_pop ret .Lctrtailblock: st1 {v0.16b}, [x19] b .Lctrret .Lctrcarry: umov x7, v4.d[0] /* load upper word of ctr */ rev x7, x7 /* ... to handle the carry */ add x7, x7, #1 rev x7, x7 ins v4.d[0], x7 b .Lctrcarrydone AES_ENDPROC(aes_ctr_encrypt) .ltorg /* * aes_xts_decrypt(u8 out[], u8 const in[], u8 const rk1[], int rounds, * int blocks, u8 const rk2[], u8 iv[], int first) * aes_xts_decrypt(u8 out[], u8 const in[], u8 const rk1[], int rounds, * int blocks, u8 const rk2[], u8 iv[], int first) */ .macro next_tweak, out, in, const, tmp sshr \tmp\().2d, \in\().2d, #63 and \tmp\().16b, \tmp\().16b, \const\().16b add \out\().2d, \in\().2d, \in\().2d ext \tmp\().16b, \tmp\().16b, \tmp\().16b, #8 eor \out\().16b, \out\().16b, \tmp\().16b .endm .Lxts_mul_x: CPU_LE( .quad 1, 0x87 ) CPU_BE( .quad 0x87, 1 ) AES_ENTRY(aes_xts_encrypt) frame_push 6 mov x19, x0 mov x20, x1 mov x21, x2 mov x22, x3 mov x23, x4 mov x24, x6 ld1 {v4.16b}, [x24] cbz w7, .Lxtsencnotfirst enc_prepare w3, x5, x8 encrypt_block v4, w3, x5, x8, w7 /* first tweak */ enc_switch_key w3, x2, x8 ldr q7, .Lxts_mul_x b .LxtsencNx .Lxtsencrestart: ld1 {v4.16b}, [x24] .Lxtsencnotfirst: enc_prepare w22, x21, x8 .LxtsencloopNx: ldr q7, .Lxts_mul_x next_tweak v4, v4, v7, v8 .LxtsencNx: subs w23, w23, #4 bmi .Lxtsenc1x ld1 {v0.16b-v3.16b}, [x20], #64 /* get 4 pt blocks */ next_tweak v5, v4, v7, v8 eor v0.16b, v0.16b, v4.16b next_tweak v6, v5, v7, v8 eor v1.16b, v1.16b, v5.16b eor v2.16b, v2.16b, v6.16b next_tweak v7, v6, v7, v8 eor v3.16b, v3.16b, v7.16b bl aes_encrypt_block4x eor v3.16b, v3.16b, v7.16b eor v0.16b, v0.16b, v4.16b eor v1.16b, v1.16b, v5.16b eor v2.16b, v2.16b, v6.16b st1 {v0.16b-v3.16b}, [x19], #64 mov v4.16b, v7.16b cbz w23, .Lxtsencout st1 {v4.16b}, [x24] cond_yield_neon .Lxtsencrestart b .LxtsencloopNx .Lxtsenc1x: adds w23, w23, #4 beq .Lxtsencout .Lxtsencloop: ld1 {v1.16b}, [x20], #16 eor v0.16b, v1.16b, v4.16b encrypt_block v0, w22, x21, x8, w7 eor v0.16b, v0.16b, v4.16b st1 {v0.16b}, [x19], #16 subs w23, w23, #1 beq .Lxtsencout next_tweak v4, v4, v7, v8 b .Lxtsencloop .Lxtsencout: st1 {v4.16b}, [x24] frame_pop ret AES_ENDPROC(aes_xts_encrypt) AES_ENTRY(aes_xts_decrypt) frame_push 6 mov x19, x0 mov x20, x1 mov x21, x2 mov x22, x3 mov x23, x4 mov x24, x6 ld1 {v4.16b}, [x24] cbz w7, .Lxtsdecnotfirst enc_prepare w3, x5, x8 encrypt_block v4, w3, x5, x8, w7 /* first tweak */ dec_prepare w3, x2, x8 ldr q7, .Lxts_mul_x b .LxtsdecNx .Lxtsdecrestart: ld1 {v4.16b}, [x24] .Lxtsdecnotfirst: dec_prepare w22, x21, x8 .LxtsdecloopNx: ldr q7, .Lxts_mul_x next_tweak v4, v4, v7, v8 .LxtsdecNx: subs w23, w23, #4 bmi .Lxtsdec1x ld1 {v0.16b-v3.16b}, [x20], #64 /* get 4 ct blocks */ next_tweak v5, v4, v7, v8 eor v0.16b, v0.16b, v4.16b next_tweak v6, v5, v7, v8 eor v1.16b, v1.16b, v5.16b eor v2.16b, v2.16b, v6.16b next_tweak v7, v6, v7, v8 eor v3.16b, v3.16b, v7.16b bl aes_decrypt_block4x eor v3.16b, v3.16b, v7.16b eor v0.16b, v0.16b, v4.16b eor v1.16b, v1.16b, v5.16b eor v2.16b, v2.16b, v6.16b st1 {v0.16b-v3.16b}, [x19], #64 mov v4.16b, v7.16b cbz w23, .Lxtsdecout st1 {v4.16b}, [x24] cond_yield_neon .Lxtsdecrestart b .LxtsdecloopNx .Lxtsdec1x: adds w23, w23, #4 beq .Lxtsdecout .Lxtsdecloop: ld1 {v1.16b}, [x20], #16 eor v0.16b, v1.16b, v4.16b decrypt_block v0, w22, x21, x8, w7 eor v0.16b, v0.16b, v4.16b st1 {v0.16b}, [x19], #16 subs w23, w23, #1 beq .Lxtsdecout next_tweak v4, v4, v7, v8 b .Lxtsdecloop .Lxtsdecout: st1 {v4.16b}, [x24] frame_pop ret AES_ENDPROC(aes_xts_decrypt) /* * aes_mac_update(u8 const in[], u32 const rk[], int rounds, * int blocks, u8 dg[], int enc_before, int enc_after) */ AES_ENTRY(aes_mac_update) frame_push 6 mov x19, x0 mov x20, x1 mov x21, x2 mov x22, x3 mov x23, x4 mov x24, x6 ld1 {v0.16b}, [x23] /* get dg */ enc_prepare w2, x1, x7 cbz w5, .Lmacloop4x encrypt_block v0, w2, x1, x7, w8 .Lmacloop4x: subs w22, w22, #4 bmi .Lmac1x ld1 {v1.16b-v4.16b}, [x19], #64 /* get next pt block */ eor v0.16b, v0.16b, v1.16b /* ..and xor with dg */ encrypt_block v0, w21, x20, x7, w8 eor v0.16b, v0.16b, v2.16b encrypt_block v0, w21, x20, x7, w8 eor v0.16b, v0.16b, v3.16b encrypt_block v0, w21, x20, x7, w8 eor v0.16b, v0.16b, v4.16b cmp w22, wzr csinv x5, x24, xzr, eq cbz w5, .Lmacout encrypt_block v0, w21, x20, x7, w8 st1 {v0.16b}, [x23] /* return dg */ cond_yield_neon .Lmacrestart b .Lmacloop4x .Lmac1x: add w22, w22, #4 .Lmacloop: cbz w22, .Lmacout ld1 {v1.16b}, [x19], #16 /* get next pt block */ eor v0.16b, v0.16b, v1.16b /* ..and xor with dg */ subs w22, w22, #1 csinv x5, x24, xzr, eq cbz w5, .Lmacout .Lmacenc: encrypt_block v0, w21, x20, x7, w8 b .Lmacloop .Lmacout: st1 {v0.16b}, [x23] /* return dg */ frame_pop ret .Lmacrestart: ld1 {v0.16b}, [x23] /* get dg */ enc_prepare w21, x20, x0 b .Lmacloop4x AES_ENDPROC(aes_mac_update)
AirFortressIlikara/LS2K0300-linux-4.19
3,473
arch/arm64/crypto/aes-ce.S
/* * linux/arch/arm64/crypto/aes-ce.S - AES cipher for ARMv8 with * Crypto Extensions * * Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/linkage.h> #include <asm/assembler.h> #define AES_ENTRY(func) ENTRY(ce_ ## func) #define AES_ENDPROC(func) ENDPROC(ce_ ## func) .arch armv8-a+crypto /* preload all round keys */ .macro load_round_keys, rounds, rk cmp \rounds, #12 blo 2222f /* 128 bits */ beq 1111f /* 192 bits */ ld1 {v17.4s-v18.4s}, [\rk], #32 1111: ld1 {v19.4s-v20.4s}, [\rk], #32 2222: ld1 {v21.4s-v24.4s}, [\rk], #64 ld1 {v25.4s-v28.4s}, [\rk], #64 ld1 {v29.4s-v31.4s}, [\rk] .endm /* prepare for encryption with key in rk[] */ .macro enc_prepare, rounds, rk, temp mov \temp, \rk load_round_keys \rounds, \temp .endm /* prepare for encryption (again) but with new key in rk[] */ .macro enc_switch_key, rounds, rk, temp mov \temp, \rk load_round_keys \rounds, \temp .endm /* prepare for decryption with key in rk[] */ .macro dec_prepare, rounds, rk, temp mov \temp, \rk load_round_keys \rounds, \temp .endm .macro do_enc_Nx, de, mc, k, i0, i1, i2, i3 aes\de \i0\().16b, \k\().16b aes\mc \i0\().16b, \i0\().16b .ifnb \i1 aes\de \i1\().16b, \k\().16b aes\mc \i1\().16b, \i1\().16b .ifnb \i3 aes\de \i2\().16b, \k\().16b aes\mc \i2\().16b, \i2\().16b aes\de \i3\().16b, \k\().16b aes\mc \i3\().16b, \i3\().16b .endif .endif .endm /* up to 4 interleaved encryption rounds with the same round key */ .macro round_Nx, enc, k, i0, i1, i2, i3 .ifc \enc, e do_enc_Nx e, mc, \k, \i0, \i1, \i2, \i3 .else do_enc_Nx d, imc, \k, \i0, \i1, \i2, \i3 .endif .endm /* up to 4 interleaved final rounds */ .macro fin_round_Nx, de, k, k2, i0, i1, i2, i3 aes\de \i0\().16b, \k\().16b .ifnb \i1 aes\de \i1\().16b, \k\().16b .ifnb \i3 aes\de \i2\().16b, \k\().16b aes\de \i3\().16b, \k\().16b .endif .endif eor \i0\().16b, \i0\().16b, \k2\().16b .ifnb \i1 eor \i1\().16b, \i1\().16b, \k2\().16b .ifnb \i3 eor \i2\().16b, \i2\().16b, \k2\().16b eor \i3\().16b, \i3\().16b, \k2\().16b .endif .endif .endm /* up to 4 interleaved blocks */ .macro do_block_Nx, enc, rounds, i0, i1, i2, i3 cmp \rounds, #12 blo 2222f /* 128 bits */ beq 1111f /* 192 bits */ round_Nx \enc, v17, \i0, \i1, \i2, \i3 round_Nx \enc, v18, \i0, \i1, \i2, \i3 1111: round_Nx \enc, v19, \i0, \i1, \i2, \i3 round_Nx \enc, v20, \i0, \i1, \i2, \i3 2222: .irp key, v21, v22, v23, v24, v25, v26, v27, v28, v29 round_Nx \enc, \key, \i0, \i1, \i2, \i3 .endr fin_round_Nx \enc, v30, v31, \i0, \i1, \i2, \i3 .endm .macro encrypt_block, in, rounds, t0, t1, t2 do_block_Nx e, \rounds, \in .endm .macro encrypt_block2x, i0, i1, rounds, t0, t1, t2 do_block_Nx e, \rounds, \i0, \i1 .endm .macro encrypt_block4x, i0, i1, i2, i3, rounds, t0, t1, t2 do_block_Nx e, \rounds, \i0, \i1, \i2, \i3 .endm .macro decrypt_block, in, rounds, t0, t1, t2 do_block_Nx d, \rounds, \in .endm .macro decrypt_block2x, i0, i1, rounds, t0, t1, t2 do_block_Nx d, \rounds, \i0, \i1 .endm .macro decrypt_block4x, i0, i1, i2, i3, rounds, t0, t1, t2 do_block_Nx d, \rounds, \i0, \i1, \i2, \i3 .endm #include "aes-modes.S"
AirFortressIlikara/LS2K0300-linux-4.19
12,675
arch/arm64/crypto/ghash-ce-core.S
/* * Accelerated GHASH implementation with ARMv8 PMULL instructions. * * Copyright (C) 2014 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include <linux/linkage.h> #include <asm/assembler.h> SHASH .req v0 SHASH2 .req v1 T1 .req v2 T2 .req v3 MASK .req v4 XL .req v5 XM .req v6 XH .req v7 IN1 .req v7 k00_16 .req v8 k32_48 .req v9 t3 .req v10 t4 .req v11 t5 .req v12 t6 .req v13 t7 .req v14 t8 .req v15 t9 .req v16 perm1 .req v17 perm2 .req v18 perm3 .req v19 sh1 .req v20 sh2 .req v21 sh3 .req v22 sh4 .req v23 ss1 .req v24 ss2 .req v25 ss3 .req v26 ss4 .req v27 XL2 .req v8 XM2 .req v9 XH2 .req v10 XL3 .req v11 XM3 .req v12 XH3 .req v13 TT3 .req v14 TT4 .req v15 HH .req v16 HH3 .req v17 HH4 .req v18 HH34 .req v19 .text .arch armv8-a+crypto .macro __pmull_p64, rd, rn, rm pmull \rd\().1q, \rn\().1d, \rm\().1d .endm .macro __pmull2_p64, rd, rn, rm pmull2 \rd\().1q, \rn\().2d, \rm\().2d .endm .macro __pmull_p8, rq, ad, bd ext t3.8b, \ad\().8b, \ad\().8b, #1 // A1 ext t5.8b, \ad\().8b, \ad\().8b, #2 // A2 ext t7.8b, \ad\().8b, \ad\().8b, #3 // A3 __pmull_p8_\bd \rq, \ad .endm .macro __pmull2_p8, rq, ad, bd tbl t3.16b, {\ad\().16b}, perm1.16b // A1 tbl t5.16b, {\ad\().16b}, perm2.16b // A2 tbl t7.16b, {\ad\().16b}, perm3.16b // A3 __pmull2_p8_\bd \rq, \ad .endm .macro __pmull_p8_SHASH, rq, ad __pmull_p8_tail \rq, \ad\().8b, SHASH.8b, 8b,, sh1, sh2, sh3, sh4 .endm .macro __pmull_p8_SHASH2, rq, ad __pmull_p8_tail \rq, \ad\().8b, SHASH2.8b, 8b,, ss1, ss2, ss3, ss4 .endm .macro __pmull2_p8_SHASH, rq, ad __pmull_p8_tail \rq, \ad\().16b, SHASH.16b, 16b, 2, sh1, sh2, sh3, sh4 .endm .macro __pmull_p8_tail, rq, ad, bd, nb, t, b1, b2, b3, b4 pmull\t t3.8h, t3.\nb, \bd // F = A1*B pmull\t t4.8h, \ad, \b1\().\nb // E = A*B1 pmull\t t5.8h, t5.\nb, \bd // H = A2*B pmull\t t6.8h, \ad, \b2\().\nb // G = A*B2 pmull\t t7.8h, t7.\nb, \bd // J = A3*B pmull\t t8.8h, \ad, \b3\().\nb // I = A*B3 pmull\t t9.8h, \ad, \b4\().\nb // K = A*B4 pmull\t \rq\().8h, \ad, \bd // D = A*B eor t3.16b, t3.16b, t4.16b // L = E + F eor t5.16b, t5.16b, t6.16b // M = G + H eor t7.16b, t7.16b, t8.16b // N = I + J uzp1 t4.2d, t3.2d, t5.2d uzp2 t3.2d, t3.2d, t5.2d uzp1 t6.2d, t7.2d, t9.2d uzp2 t7.2d, t7.2d, t9.2d // t3 = (L) (P0 + P1) << 8 // t5 = (M) (P2 + P3) << 16 eor t4.16b, t4.16b, t3.16b and t3.16b, t3.16b, k32_48.16b // t7 = (N) (P4 + P5) << 24 // t9 = (K) (P6 + P7) << 32 eor t6.16b, t6.16b, t7.16b and t7.16b, t7.16b, k00_16.16b eor t4.16b, t4.16b, t3.16b eor t6.16b, t6.16b, t7.16b zip2 t5.2d, t4.2d, t3.2d zip1 t3.2d, t4.2d, t3.2d zip2 t9.2d, t6.2d, t7.2d zip1 t7.2d, t6.2d, t7.2d ext t3.16b, t3.16b, t3.16b, #15 ext t5.16b, t5.16b, t5.16b, #14 ext t7.16b, t7.16b, t7.16b, #13 ext t9.16b, t9.16b, t9.16b, #12 eor t3.16b, t3.16b, t5.16b eor t7.16b, t7.16b, t9.16b eor \rq\().16b, \rq\().16b, t3.16b eor \rq\().16b, \rq\().16b, t7.16b .endm .macro __pmull_pre_p64 add x8, x3, #16 ld1 {HH.2d-HH4.2d}, [x8] trn1 SHASH2.2d, SHASH.2d, HH.2d trn2 T1.2d, SHASH.2d, HH.2d eor SHASH2.16b, SHASH2.16b, T1.16b trn1 HH34.2d, HH3.2d, HH4.2d trn2 T1.2d, HH3.2d, HH4.2d eor HH34.16b, HH34.16b, T1.16b movi MASK.16b, #0xe1 shl MASK.2d, MASK.2d, #57 .endm .macro __pmull_pre_p8 ext SHASH2.16b, SHASH.16b, SHASH.16b, #8 eor SHASH2.16b, SHASH2.16b, SHASH.16b // k00_16 := 0x0000000000000000_000000000000ffff // k32_48 := 0x00000000ffffffff_0000ffffffffffff movi k32_48.2d, #0xffffffff mov k32_48.h[2], k32_48.h[0] ushr k00_16.2d, k32_48.2d, #32 // prepare the permutation vectors mov_q x5, 0x080f0e0d0c0b0a09 movi T1.8b, #8 dup perm1.2d, x5 eor perm1.16b, perm1.16b, T1.16b ushr perm2.2d, perm1.2d, #8 ushr perm3.2d, perm1.2d, #16 ushr T1.2d, perm1.2d, #24 sli perm2.2d, perm1.2d, #56 sli perm3.2d, perm1.2d, #48 sli T1.2d, perm1.2d, #40 // precompute loop invariants tbl sh1.16b, {SHASH.16b}, perm1.16b tbl sh2.16b, {SHASH.16b}, perm2.16b tbl sh3.16b, {SHASH.16b}, perm3.16b tbl sh4.16b, {SHASH.16b}, T1.16b ext ss1.8b, SHASH2.8b, SHASH2.8b, #1 ext ss2.8b, SHASH2.8b, SHASH2.8b, #2 ext ss3.8b, SHASH2.8b, SHASH2.8b, #3 ext ss4.8b, SHASH2.8b, SHASH2.8b, #4 .endm // // PMULL (64x64->128) based reduction for CPUs that can do // it in a single instruction. // .macro __pmull_reduce_p64 pmull T2.1q, XL.1d, MASK.1d eor XM.16b, XM.16b, T1.16b mov XH.d[0], XM.d[1] mov XM.d[1], XL.d[0] eor XL.16b, XM.16b, T2.16b ext T2.16b, XL.16b, XL.16b, #8 pmull XL.1q, XL.1d, MASK.1d .endm // // Alternative reduction for CPUs that lack support for the // 64x64->128 PMULL instruction // .macro __pmull_reduce_p8 eor XM.16b, XM.16b, T1.16b mov XL.d[1], XM.d[0] mov XH.d[0], XM.d[1] shl T1.2d, XL.2d, #57 shl T2.2d, XL.2d, #62 eor T2.16b, T2.16b, T1.16b shl T1.2d, XL.2d, #63 eor T2.16b, T2.16b, T1.16b ext T1.16b, XL.16b, XH.16b, #8 eor T2.16b, T2.16b, T1.16b mov XL.d[1], T2.d[0] mov XH.d[0], T2.d[1] ushr T2.2d, XL.2d, #1 eor XH.16b, XH.16b, XL.16b eor XL.16b, XL.16b, T2.16b ushr T2.2d, T2.2d, #6 ushr XL.2d, XL.2d, #1 .endm .macro __pmull_ghash, pn ld1 {SHASH.2d}, [x3] ld1 {XL.2d}, [x1] __pmull_pre_\pn /* do the head block first, if supplied */ cbz x4, 0f ld1 {T1.2d}, [x4] mov x4, xzr b 3f 0: .ifc \pn, p64 tbnz w0, #0, 2f // skip until #blocks is a tbnz w0, #1, 2f // round multiple of 4 1: ld1 {XM3.16b-TT4.16b}, [x2], #64 sub w0, w0, #4 rev64 T1.16b, XM3.16b rev64 T2.16b, XH3.16b rev64 TT4.16b, TT4.16b rev64 TT3.16b, TT3.16b ext IN1.16b, TT4.16b, TT4.16b, #8 ext XL3.16b, TT3.16b, TT3.16b, #8 eor TT4.16b, TT4.16b, IN1.16b pmull2 XH2.1q, SHASH.2d, IN1.2d // a1 * b1 pmull XL2.1q, SHASH.1d, IN1.1d // a0 * b0 pmull XM2.1q, SHASH2.1d, TT4.1d // (a1 + a0)(b1 + b0) eor TT3.16b, TT3.16b, XL3.16b pmull2 XH3.1q, HH.2d, XL3.2d // a1 * b1 pmull XL3.1q, HH.1d, XL3.1d // a0 * b0 pmull2 XM3.1q, SHASH2.2d, TT3.2d // (a1 + a0)(b1 + b0) ext IN1.16b, T2.16b, T2.16b, #8 eor XL2.16b, XL2.16b, XL3.16b eor XH2.16b, XH2.16b, XH3.16b eor XM2.16b, XM2.16b, XM3.16b eor T2.16b, T2.16b, IN1.16b pmull2 XH3.1q, HH3.2d, IN1.2d // a1 * b1 pmull XL3.1q, HH3.1d, IN1.1d // a0 * b0 pmull XM3.1q, HH34.1d, T2.1d // (a1 + a0)(b1 + b0) eor XL2.16b, XL2.16b, XL3.16b eor XH2.16b, XH2.16b, XH3.16b eor XM2.16b, XM2.16b, XM3.16b ext IN1.16b, T1.16b, T1.16b, #8 ext TT3.16b, XL.16b, XL.16b, #8 eor XL.16b, XL.16b, IN1.16b eor T1.16b, T1.16b, TT3.16b pmull2 XH.1q, HH4.2d, XL.2d // a1 * b1 eor T1.16b, T1.16b, XL.16b pmull XL.1q, HH4.1d, XL.1d // a0 * b0 pmull2 XM.1q, HH34.2d, T1.2d // (a1 + a0)(b1 + b0) eor XL.16b, XL.16b, XL2.16b eor XH.16b, XH.16b, XH2.16b eor XM.16b, XM.16b, XM2.16b eor T2.16b, XL.16b, XH.16b ext T1.16b, XL.16b, XH.16b, #8 eor XM.16b, XM.16b, T2.16b __pmull_reduce_p64 eor T2.16b, T2.16b, XH.16b eor XL.16b, XL.16b, T2.16b cbz w0, 5f b 1b .endif 2: ld1 {T1.2d}, [x2], #16 sub w0, w0, #1 3: /* multiply XL by SHASH in GF(2^128) */ CPU_LE( rev64 T1.16b, T1.16b ) ext T2.16b, XL.16b, XL.16b, #8 ext IN1.16b, T1.16b, T1.16b, #8 eor T1.16b, T1.16b, T2.16b eor XL.16b, XL.16b, IN1.16b __pmull2_\pn XH, XL, SHASH // a1 * b1 eor T1.16b, T1.16b, XL.16b __pmull_\pn XL, XL, SHASH // a0 * b0 __pmull_\pn XM, T1, SHASH2 // (a1 + a0)(b1 + b0) 4: eor T2.16b, XL.16b, XH.16b ext T1.16b, XL.16b, XH.16b, #8 eor XM.16b, XM.16b, T2.16b __pmull_reduce_\pn eor T2.16b, T2.16b, XH.16b eor XL.16b, XL.16b, T2.16b cbnz w0, 0b 5: st1 {XL.2d}, [x1] ret .endm /* * void pmull_ghash_update(int blocks, u64 dg[], const char *src, * struct ghash_key const *k, const char *head) */ ENTRY(pmull_ghash_update_p64) __pmull_ghash p64 ENDPROC(pmull_ghash_update_p64) ENTRY(pmull_ghash_update_p8) __pmull_ghash p8 ENDPROC(pmull_ghash_update_p8) KS0 .req v12 KS1 .req v13 INP0 .req v14 INP1 .req v15 .macro load_round_keys, rounds, rk cmp \rounds, #12 blo 2222f /* 128 bits */ beq 1111f /* 192 bits */ ld1 {v17.4s-v18.4s}, [\rk], #32 1111: ld1 {v19.4s-v20.4s}, [\rk], #32 2222: ld1 {v21.4s-v24.4s}, [\rk], #64 ld1 {v25.4s-v28.4s}, [\rk], #64 ld1 {v29.4s-v31.4s}, [\rk] .endm .macro enc_round, state, key aese \state\().16b, \key\().16b aesmc \state\().16b, \state\().16b .endm .macro enc_block, state, rounds cmp \rounds, #12 b.lo 2222f /* 128 bits */ b.eq 1111f /* 192 bits */ enc_round \state, v17 enc_round \state, v18 1111: enc_round \state, v19 enc_round \state, v20 2222: .irp key, v21, v22, v23, v24, v25, v26, v27, v28, v29 enc_round \state, \key .endr aese \state\().16b, v30.16b eor \state\().16b, \state\().16b, v31.16b .endm .macro pmull_gcm_do_crypt, enc ld1 {SHASH.2d}, [x4], #16 ld1 {HH.2d}, [x4] ld1 {XL.2d}, [x1] ldr x8, [x5, #8] // load lower counter movi MASK.16b, #0xe1 trn1 SHASH2.2d, SHASH.2d, HH.2d trn2 T1.2d, SHASH.2d, HH.2d CPU_LE( rev x8, x8 ) shl MASK.2d, MASK.2d, #57 eor SHASH2.16b, SHASH2.16b, T1.16b .if \enc == 1 ldr x10, [sp] ld1 {KS0.16b-KS1.16b}, [x10] .endif cbnz x6, 4f 0: ld1 {INP0.16b-INP1.16b}, [x3], #32 rev x9, x8 add x11, x8, #1 add x8, x8, #2 .if \enc == 1 eor INP0.16b, INP0.16b, KS0.16b // encrypt input eor INP1.16b, INP1.16b, KS1.16b .endif ld1 {KS0.8b}, [x5] // load upper counter rev x11, x11 sub w0, w0, #2 mov KS1.8b, KS0.8b ins KS0.d[1], x9 // set lower counter ins KS1.d[1], x11 rev64 T1.16b, INP1.16b cmp w7, #12 b.ge 2f // AES-192/256? 1: enc_round KS0, v21 ext IN1.16b, T1.16b, T1.16b, #8 enc_round KS1, v21 pmull2 XH2.1q, SHASH.2d, IN1.2d // a1 * b1 enc_round KS0, v22 eor T1.16b, T1.16b, IN1.16b enc_round KS1, v22 pmull XL2.1q, SHASH.1d, IN1.1d // a0 * b0 enc_round KS0, v23 pmull XM2.1q, SHASH2.1d, T1.1d // (a1 + a0)(b1 + b0) enc_round KS1, v23 rev64 T1.16b, INP0.16b ext T2.16b, XL.16b, XL.16b, #8 enc_round KS0, v24 ext IN1.16b, T1.16b, T1.16b, #8 eor T1.16b, T1.16b, T2.16b enc_round KS1, v24 eor XL.16b, XL.16b, IN1.16b enc_round KS0, v25 eor T1.16b, T1.16b, XL.16b enc_round KS1, v25 pmull2 XH.1q, HH.2d, XL.2d // a1 * b1 enc_round KS0, v26 pmull XL.1q, HH.1d, XL.1d // a0 * b0 enc_round KS1, v26 pmull2 XM.1q, SHASH2.2d, T1.2d // (a1 + a0)(b1 + b0) enc_round KS0, v27 eor XL.16b, XL.16b, XL2.16b eor XH.16b, XH.16b, XH2.16b enc_round KS1, v27 eor XM.16b, XM.16b, XM2.16b ext T1.16b, XL.16b, XH.16b, #8 enc_round KS0, v28 eor T2.16b, XL.16b, XH.16b eor XM.16b, XM.16b, T1.16b enc_round KS1, v28 eor XM.16b, XM.16b, T2.16b enc_round KS0, v29 pmull T2.1q, XL.1d, MASK.1d enc_round KS1, v29 mov XH.d[0], XM.d[1] mov XM.d[1], XL.d[0] aese KS0.16b, v30.16b eor XL.16b, XM.16b, T2.16b aese KS1.16b, v30.16b ext T2.16b, XL.16b, XL.16b, #8 eor KS0.16b, KS0.16b, v31.16b pmull XL.1q, XL.1d, MASK.1d eor T2.16b, T2.16b, XH.16b eor KS1.16b, KS1.16b, v31.16b eor XL.16b, XL.16b, T2.16b .if \enc == 0 eor INP0.16b, INP0.16b, KS0.16b eor INP1.16b, INP1.16b, KS1.16b .endif st1 {INP0.16b-INP1.16b}, [x2], #32 cbnz w0, 0b CPU_LE( rev x8, x8 ) st1 {XL.2d}, [x1] str x8, [x5, #8] // store lower counter .if \enc == 1 st1 {KS0.16b-KS1.16b}, [x10] .endif ret 2: b.eq 3f // AES-192? enc_round KS0, v17 enc_round KS1, v17 enc_round KS0, v18 enc_round KS1, v18 3: enc_round KS0, v19 enc_round KS1, v19 enc_round KS0, v20 enc_round KS1, v20 b 1b 4: load_round_keys w7, x6 b 0b .endm /* * void pmull_gcm_encrypt(int blocks, u64 dg[], u8 dst[], const u8 src[], * struct ghash_key const *k, u8 ctr[], * int rounds, u8 ks[]) */ ENTRY(pmull_gcm_encrypt) pmull_gcm_do_crypt 1 ENDPROC(pmull_gcm_encrypt) /* * void pmull_gcm_decrypt(int blocks, u64 dg[], u8 dst[], const u8 src[], * struct ghash_key const *k, u8 ctr[], * int rounds) */ ENTRY(pmull_gcm_decrypt) pmull_gcm_do_crypt 0 ENDPROC(pmull_gcm_decrypt) /* * void pmull_gcm_encrypt_block(u8 dst[], u8 src[], u8 rk[], int rounds) */ ENTRY(pmull_gcm_encrypt_block) cbz x2, 0f load_round_keys w3, x2 0: ld1 {v0.16b}, [x1] enc_block v0, w3 st1 {v0.16b}, [x0] ret ENDPROC(pmull_gcm_encrypt_block)
AirFortressIlikara/LS2K0300-linux-4.19
24,697
arch/arm64/crypto/aes-neonbs-core.S
/* * Bit sliced AES using NEON instructions * * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /* * The algorithm implemented here is described in detail by the paper * 'Faster and Timing-Attack Resistant AES-GCM' by Emilia Kaesper and * Peter Schwabe (https://eprint.iacr.org/2009/129.pdf) * * This implementation is based primarily on the OpenSSL implementation * for 32-bit ARM written by Andy Polyakov <appro@openssl.org> */ #include <linux/linkage.h> #include <asm/assembler.h> .text rounds .req x11 bskey .req x12 .macro in_bs_ch, b0, b1, b2, b3, b4, b5, b6, b7 eor \b2, \b2, \b1 eor \b5, \b5, \b6 eor \b3, \b3, \b0 eor \b6, \b6, \b2 eor \b5, \b5, \b0 eor \b6, \b6, \b3 eor \b3, \b3, \b7 eor \b7, \b7, \b5 eor \b3, \b3, \b4 eor \b4, \b4, \b5 eor \b2, \b2, \b7 eor \b3, \b3, \b1 eor \b1, \b1, \b5 .endm .macro out_bs_ch, b0, b1, b2, b3, b4, b5, b6, b7 eor \b0, \b0, \b6 eor \b1, \b1, \b4 eor \b4, \b4, \b6 eor \b2, \b2, \b0 eor \b6, \b6, \b1 eor \b1, \b1, \b5 eor \b5, \b5, \b3 eor \b3, \b3, \b7 eor \b7, \b7, \b5 eor \b2, \b2, \b5 eor \b4, \b4, \b7 .endm .macro inv_in_bs_ch, b6, b1, b2, b4, b7, b0, b3, b5 eor \b1, \b1, \b7 eor \b4, \b4, \b7 eor \b7, \b7, \b5 eor \b1, \b1, \b3 eor \b2, \b2, \b5 eor \b3, \b3, \b7 eor \b6, \b6, \b1 eor \b2, \b2, \b0 eor \b5, \b5, \b3 eor \b4, \b4, \b6 eor \b0, \b0, \b6 eor \b1, \b1, \b4 .endm .macro inv_out_bs_ch, b6, b5, b0, b3, b7, b1, b4, b2 eor \b1, \b1, \b5 eor \b2, \b2, \b7 eor \b3, \b3, \b1 eor \b4, \b4, \b5 eor \b7, \b7, \b5 eor \b3, \b3, \b4 eor \b5, \b5, \b0 eor \b3, \b3, \b7 eor \b6, \b6, \b2 eor \b2, \b2, \b1 eor \b6, \b6, \b3 eor \b3, \b3, \b0 eor \b5, \b5, \b6 .endm .macro mul_gf4, x0, x1, y0, y1, t0, t1 eor \t0, \y0, \y1 and \t0, \t0, \x0 eor \x0, \x0, \x1 and \t1, \x1, \y0 and \x0, \x0, \y1 eor \x1, \t1, \t0 eor \x0, \x0, \t1 .endm .macro mul_gf4_n_gf4, x0, x1, y0, y1, t0, x2, x3, y2, y3, t1 eor \t0, \y0, \y1 eor \t1, \y2, \y3 and \t0, \t0, \x0 and \t1, \t1, \x2 eor \x0, \x0, \x1 eor \x2, \x2, \x3 and \x1, \x1, \y0 and \x3, \x3, \y2 and \x0, \x0, \y1 and \x2, \x2, \y3 eor \x1, \x1, \x0 eor \x2, \x2, \x3 eor \x0, \x0, \t0 eor \x3, \x3, \t1 .endm .macro mul_gf16_2, x0, x1, x2, x3, x4, x5, x6, x7, \ y0, y1, y2, y3, t0, t1, t2, t3 eor \t0, \x0, \x2 eor \t1, \x1, \x3 mul_gf4 \x0, \x1, \y0, \y1, \t2, \t3 eor \y0, \y0, \y2 eor \y1, \y1, \y3 mul_gf4_n_gf4 \t0, \t1, \y0, \y1, \t3, \x2, \x3, \y2, \y3, \t2 eor \x0, \x0, \t0 eor \x2, \x2, \t0 eor \x1, \x1, \t1 eor \x3, \x3, \t1 eor \t0, \x4, \x6 eor \t1, \x5, \x7 mul_gf4_n_gf4 \t0, \t1, \y0, \y1, \t3, \x6, \x7, \y2, \y3, \t2 eor \y0, \y0, \y2 eor \y1, \y1, \y3 mul_gf4 \x4, \x5, \y0, \y1, \t2, \t3 eor \x4, \x4, \t0 eor \x6, \x6, \t0 eor \x5, \x5, \t1 eor \x7, \x7, \t1 .endm .macro inv_gf256, x0, x1, x2, x3, x4, x5, x6, x7, \ t0, t1, t2, t3, s0, s1, s2, s3 eor \t3, \x4, \x6 eor \t0, \x5, \x7 eor \t1, \x1, \x3 eor \s1, \x7, \x6 eor \s0, \x0, \x2 eor \s3, \t3, \t0 orr \t2, \t0, \t1 and \s2, \t3, \s0 orr \t3, \t3, \s0 eor \s0, \s0, \t1 and \t0, \t0, \t1 eor \t1, \x3, \x2 and \s3, \s3, \s0 and \s1, \s1, \t1 eor \t1, \x4, \x5 eor \s0, \x1, \x0 eor \t3, \t3, \s1 eor \t2, \t2, \s1 and \s1, \t1, \s0 orr \t1, \t1, \s0 eor \t3, \t3, \s3 eor \t0, \t0, \s1 eor \t2, \t2, \s2 eor \t1, \t1, \s3 eor \t0, \t0, \s2 and \s0, \x7, \x3 eor \t1, \t1, \s2 and \s1, \x6, \x2 and \s2, \x5, \x1 orr \s3, \x4, \x0 eor \t3, \t3, \s0 eor \t1, \t1, \s2 eor \s0, \t0, \s3 eor \t2, \t2, \s1 and \s2, \t3, \t1 eor \s1, \t2, \s2 eor \s3, \s0, \s2 bsl \s1, \t1, \s0 not \t0, \s0 bsl \s0, \s1, \s3 bsl \t0, \s1, \s3 bsl \s3, \t3, \t2 eor \t3, \t3, \t2 and \s2, \s0, \s3 eor \t1, \t1, \t0 eor \s2, \s2, \t3 mul_gf16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \ \s3, \s2, \s1, \t1, \s0, \t0, \t2, \t3 .endm .macro sbox, b0, b1, b2, b3, b4, b5, b6, b7, \ t0, t1, t2, t3, s0, s1, s2, s3 in_bs_ch \b0\().16b, \b1\().16b, \b2\().16b, \b3\().16b, \ \b4\().16b, \b5\().16b, \b6\().16b, \b7\().16b inv_gf256 \b6\().16b, \b5\().16b, \b0\().16b, \b3\().16b, \ \b7\().16b, \b1\().16b, \b4\().16b, \b2\().16b, \ \t0\().16b, \t1\().16b, \t2\().16b, \t3\().16b, \ \s0\().16b, \s1\().16b, \s2\().16b, \s3\().16b out_bs_ch \b7\().16b, \b1\().16b, \b4\().16b, \b2\().16b, \ \b6\().16b, \b5\().16b, \b0\().16b, \b3\().16b .endm .macro inv_sbox, b0, b1, b2, b3, b4, b5, b6, b7, \ t0, t1, t2, t3, s0, s1, s2, s3 inv_in_bs_ch \b0\().16b, \b1\().16b, \b2\().16b, \b3\().16b, \ \b4\().16b, \b5\().16b, \b6\().16b, \b7\().16b inv_gf256 \b5\().16b, \b1\().16b, \b2\().16b, \b6\().16b, \ \b3\().16b, \b7\().16b, \b0\().16b, \b4\().16b, \ \t0\().16b, \t1\().16b, \t2\().16b, \t3\().16b, \ \s0\().16b, \s1\().16b, \s2\().16b, \s3\().16b inv_out_bs_ch \b3\().16b, \b7\().16b, \b0\().16b, \b4\().16b, \ \b5\().16b, \b1\().16b, \b2\().16b, \b6\().16b .endm .macro enc_next_rk ldp q16, q17, [bskey], #128 ldp q18, q19, [bskey, #-96] ldp q20, q21, [bskey, #-64] ldp q22, q23, [bskey, #-32] .endm .macro dec_next_rk ldp q16, q17, [bskey, #-128]! ldp q18, q19, [bskey, #32] ldp q20, q21, [bskey, #64] ldp q22, q23, [bskey, #96] .endm .macro add_round_key, x0, x1, x2, x3, x4, x5, x6, x7 eor \x0\().16b, \x0\().16b, v16.16b eor \x1\().16b, \x1\().16b, v17.16b eor \x2\().16b, \x2\().16b, v18.16b eor \x3\().16b, \x3\().16b, v19.16b eor \x4\().16b, \x4\().16b, v20.16b eor \x5\().16b, \x5\().16b, v21.16b eor \x6\().16b, \x6\().16b, v22.16b eor \x7\().16b, \x7\().16b, v23.16b .endm .macro shift_rows, x0, x1, x2, x3, x4, x5, x6, x7, mask tbl \x0\().16b, {\x0\().16b}, \mask\().16b tbl \x1\().16b, {\x1\().16b}, \mask\().16b tbl \x2\().16b, {\x2\().16b}, \mask\().16b tbl \x3\().16b, {\x3\().16b}, \mask\().16b tbl \x4\().16b, {\x4\().16b}, \mask\().16b tbl \x5\().16b, {\x5\().16b}, \mask\().16b tbl \x6\().16b, {\x6\().16b}, \mask\().16b tbl \x7\().16b, {\x7\().16b}, \mask\().16b .endm .macro mix_cols, x0, x1, x2, x3, x4, x5, x6, x7, \ t0, t1, t2, t3, t4, t5, t6, t7, inv ext \t0\().16b, \x0\().16b, \x0\().16b, #12 ext \t1\().16b, \x1\().16b, \x1\().16b, #12 eor \x0\().16b, \x0\().16b, \t0\().16b ext \t2\().16b, \x2\().16b, \x2\().16b, #12 eor \x1\().16b, \x1\().16b, \t1\().16b ext \t3\().16b, \x3\().16b, \x3\().16b, #12 eor \x2\().16b, \x2\().16b, \t2\().16b ext \t4\().16b, \x4\().16b, \x4\().16b, #12 eor \x3\().16b, \x3\().16b, \t3\().16b ext \t5\().16b, \x5\().16b, \x5\().16b, #12 eor \x4\().16b, \x4\().16b, \t4\().16b ext \t6\().16b, \x6\().16b, \x6\().16b, #12 eor \x5\().16b, \x5\().16b, \t5\().16b ext \t7\().16b, \x7\().16b, \x7\().16b, #12 eor \x6\().16b, \x6\().16b, \t6\().16b eor \t1\().16b, \t1\().16b, \x0\().16b eor \x7\().16b, \x7\().16b, \t7\().16b ext \x0\().16b, \x0\().16b, \x0\().16b, #8 eor \t2\().16b, \t2\().16b, \x1\().16b eor \t0\().16b, \t0\().16b, \x7\().16b eor \t1\().16b, \t1\().16b, \x7\().16b ext \x1\().16b, \x1\().16b, \x1\().16b, #8 eor \t5\().16b, \t5\().16b, \x4\().16b eor \x0\().16b, \x0\().16b, \t0\().16b eor \t6\().16b, \t6\().16b, \x5\().16b eor \x1\().16b, \x1\().16b, \t1\().16b ext \t0\().16b, \x4\().16b, \x4\().16b, #8 eor \t4\().16b, \t4\().16b, \x3\().16b ext \t1\().16b, \x5\().16b, \x5\().16b, #8 eor \t7\().16b, \t7\().16b, \x6\().16b ext \x4\().16b, \x3\().16b, \x3\().16b, #8 eor \t3\().16b, \t3\().16b, \x2\().16b ext \x5\().16b, \x7\().16b, \x7\().16b, #8 eor \t4\().16b, \t4\().16b, \x7\().16b ext \x3\().16b, \x6\().16b, \x6\().16b, #8 eor \t3\().16b, \t3\().16b, \x7\().16b ext \x6\().16b, \x2\().16b, \x2\().16b, #8 eor \x7\().16b, \t1\().16b, \t5\().16b .ifb \inv eor \x2\().16b, \t0\().16b, \t4\().16b eor \x4\().16b, \x4\().16b, \t3\().16b eor \x5\().16b, \x5\().16b, \t7\().16b eor \x3\().16b, \x3\().16b, \t6\().16b eor \x6\().16b, \x6\().16b, \t2\().16b .else eor \t3\().16b, \t3\().16b, \x4\().16b eor \x5\().16b, \x5\().16b, \t7\().16b eor \x2\().16b, \x3\().16b, \t6\().16b eor \x3\().16b, \t0\().16b, \t4\().16b eor \x4\().16b, \x6\().16b, \t2\().16b mov \x6\().16b, \t3\().16b .endif .endm .macro inv_mix_cols, x0, x1, x2, x3, x4, x5, x6, x7, \ t0, t1, t2, t3, t4, t5, t6, t7 ext \t0\().16b, \x0\().16b, \x0\().16b, #8 ext \t6\().16b, \x6\().16b, \x6\().16b, #8 ext \t7\().16b, \x7\().16b, \x7\().16b, #8 eor \t0\().16b, \t0\().16b, \x0\().16b ext \t1\().16b, \x1\().16b, \x1\().16b, #8 eor \t6\().16b, \t6\().16b, \x6\().16b ext \t2\().16b, \x2\().16b, \x2\().16b, #8 eor \t7\().16b, \t7\().16b, \x7\().16b ext \t3\().16b, \x3\().16b, \x3\().16b, #8 eor \t1\().16b, \t1\().16b, \x1\().16b ext \t4\().16b, \x4\().16b, \x4\().16b, #8 eor \t2\().16b, \t2\().16b, \x2\().16b ext \t5\().16b, \x5\().16b, \x5\().16b, #8 eor \t3\().16b, \t3\().16b, \x3\().16b eor \t4\().16b, \t4\().16b, \x4\().16b eor \t5\().16b, \t5\().16b, \x5\().16b eor \x0\().16b, \x0\().16b, \t6\().16b eor \x1\().16b, \x1\().16b, \t6\().16b eor \x2\().16b, \x2\().16b, \t0\().16b eor \x4\().16b, \x4\().16b, \t2\().16b eor \x3\().16b, \x3\().16b, \t1\().16b eor \x1\().16b, \x1\().16b, \t7\().16b eor \x2\().16b, \x2\().16b, \t7\().16b eor \x4\().16b, \x4\().16b, \t6\().16b eor \x5\().16b, \x5\().16b, \t3\().16b eor \x3\().16b, \x3\().16b, \t6\().16b eor \x6\().16b, \x6\().16b, \t4\().16b eor \x4\().16b, \x4\().16b, \t7\().16b eor \x5\().16b, \x5\().16b, \t7\().16b eor \x7\().16b, \x7\().16b, \t5\().16b mix_cols \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \ \t0, \t1, \t2, \t3, \t4, \t5, \t6, \t7, 1 .endm .macro swapmove_2x, a0, b0, a1, b1, n, mask, t0, t1 ushr \t0\().2d, \b0\().2d, #\n ushr \t1\().2d, \b1\().2d, #\n eor \t0\().16b, \t0\().16b, \a0\().16b eor \t1\().16b, \t1\().16b, \a1\().16b and \t0\().16b, \t0\().16b, \mask\().16b and \t1\().16b, \t1\().16b, \mask\().16b eor \a0\().16b, \a0\().16b, \t0\().16b shl \t0\().2d, \t0\().2d, #\n eor \a1\().16b, \a1\().16b, \t1\().16b shl \t1\().2d, \t1\().2d, #\n eor \b0\().16b, \b0\().16b, \t0\().16b eor \b1\().16b, \b1\().16b, \t1\().16b .endm .macro bitslice, x7, x6, x5, x4, x3, x2, x1, x0, t0, t1, t2, t3 movi \t0\().16b, #0x55 movi \t1\().16b, #0x33 swapmove_2x \x0, \x1, \x2, \x3, 1, \t0, \t2, \t3 swapmove_2x \x4, \x5, \x6, \x7, 1, \t0, \t2, \t3 movi \t0\().16b, #0x0f swapmove_2x \x0, \x2, \x1, \x3, 2, \t1, \t2, \t3 swapmove_2x \x4, \x6, \x5, \x7, 2, \t1, \t2, \t3 swapmove_2x \x0, \x4, \x1, \x5, 4, \t0, \t2, \t3 swapmove_2x \x2, \x6, \x3, \x7, 4, \t0, \t2, \t3 .endm .align 6 M0: .octa 0x0004080c0105090d02060a0e03070b0f M0SR: .octa 0x0004080c05090d010a0e02060f03070b SR: .octa 0x0f0e0d0c0a09080b0504070600030201 SRM0: .octa 0x01060b0c0207080d0304090e00050a0f M0ISR: .octa 0x0004080c0d0105090a0e0206070b0f03 ISR: .octa 0x0f0e0d0c080b0a090504070602010003 ISRM0: .octa 0x0306090c00070a0d01040b0e0205080f /* * void aesbs_convert_key(u8 out[], u32 const rk[], int rounds) */ ENTRY(aesbs_convert_key) ld1 {v7.4s}, [x1], #16 // load round 0 key ld1 {v17.4s}, [x1], #16 // load round 1 key movi v8.16b, #0x01 // bit masks movi v9.16b, #0x02 movi v10.16b, #0x04 movi v11.16b, #0x08 movi v12.16b, #0x10 movi v13.16b, #0x20 movi v14.16b, #0x40 movi v15.16b, #0x80 ldr q16, M0 sub x2, x2, #1 str q7, [x0], #16 // save round 0 key .Lkey_loop: tbl v7.16b ,{v17.16b}, v16.16b ld1 {v17.4s}, [x1], #16 // load next round key cmtst v0.16b, v7.16b, v8.16b cmtst v1.16b, v7.16b, v9.16b cmtst v2.16b, v7.16b, v10.16b cmtst v3.16b, v7.16b, v11.16b cmtst v4.16b, v7.16b, v12.16b cmtst v5.16b, v7.16b, v13.16b cmtst v6.16b, v7.16b, v14.16b cmtst v7.16b, v7.16b, v15.16b not v0.16b, v0.16b not v1.16b, v1.16b not v5.16b, v5.16b not v6.16b, v6.16b subs x2, x2, #1 stp q0, q1, [x0], #128 stp q2, q3, [x0, #-96] stp q4, q5, [x0, #-64] stp q6, q7, [x0, #-32] b.ne .Lkey_loop movi v7.16b, #0x63 // compose .L63 eor v17.16b, v17.16b, v7.16b str q17, [x0] ret ENDPROC(aesbs_convert_key) .align 4 aesbs_encrypt8: ldr q9, [bskey], #16 // round 0 key ldr q8, M0SR ldr q24, SR eor v10.16b, v0.16b, v9.16b // xor with round0 key eor v11.16b, v1.16b, v9.16b tbl v0.16b, {v10.16b}, v8.16b eor v12.16b, v2.16b, v9.16b tbl v1.16b, {v11.16b}, v8.16b eor v13.16b, v3.16b, v9.16b tbl v2.16b, {v12.16b}, v8.16b eor v14.16b, v4.16b, v9.16b tbl v3.16b, {v13.16b}, v8.16b eor v15.16b, v5.16b, v9.16b tbl v4.16b, {v14.16b}, v8.16b eor v10.16b, v6.16b, v9.16b tbl v5.16b, {v15.16b}, v8.16b eor v11.16b, v7.16b, v9.16b tbl v6.16b, {v10.16b}, v8.16b tbl v7.16b, {v11.16b}, v8.16b bitslice v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11 sub rounds, rounds, #1 b .Lenc_sbox .Lenc_loop: shift_rows v0, v1, v2, v3, v4, v5, v6, v7, v24 .Lenc_sbox: sbox v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, \ v13, v14, v15 subs rounds, rounds, #1 b.cc .Lenc_done enc_next_rk mix_cols v0, v1, v4, v6, v3, v7, v2, v5, v8, v9, v10, v11, v12, \ v13, v14, v15 add_round_key v0, v1, v2, v3, v4, v5, v6, v7 b.ne .Lenc_loop ldr q24, SRM0 b .Lenc_loop .Lenc_done: ldr q12, [bskey] // last round key bitslice v0, v1, v4, v6, v3, v7, v2, v5, v8, v9, v10, v11 eor v0.16b, v0.16b, v12.16b eor v1.16b, v1.16b, v12.16b eor v4.16b, v4.16b, v12.16b eor v6.16b, v6.16b, v12.16b eor v3.16b, v3.16b, v12.16b eor v7.16b, v7.16b, v12.16b eor v2.16b, v2.16b, v12.16b eor v5.16b, v5.16b, v12.16b ret ENDPROC(aesbs_encrypt8) .align 4 aesbs_decrypt8: lsl x9, rounds, #7 add bskey, bskey, x9 ldr q9, [bskey, #-112]! // round 0 key ldr q8, M0ISR ldr q24, ISR eor v10.16b, v0.16b, v9.16b // xor with round0 key eor v11.16b, v1.16b, v9.16b tbl v0.16b, {v10.16b}, v8.16b eor v12.16b, v2.16b, v9.16b tbl v1.16b, {v11.16b}, v8.16b eor v13.16b, v3.16b, v9.16b tbl v2.16b, {v12.16b}, v8.16b eor v14.16b, v4.16b, v9.16b tbl v3.16b, {v13.16b}, v8.16b eor v15.16b, v5.16b, v9.16b tbl v4.16b, {v14.16b}, v8.16b eor v10.16b, v6.16b, v9.16b tbl v5.16b, {v15.16b}, v8.16b eor v11.16b, v7.16b, v9.16b tbl v6.16b, {v10.16b}, v8.16b tbl v7.16b, {v11.16b}, v8.16b bitslice v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11 sub rounds, rounds, #1 b .Ldec_sbox .Ldec_loop: shift_rows v0, v1, v2, v3, v4, v5, v6, v7, v24 .Ldec_sbox: inv_sbox v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, \ v13, v14, v15 subs rounds, rounds, #1 b.cc .Ldec_done dec_next_rk add_round_key v0, v1, v6, v4, v2, v7, v3, v5 inv_mix_cols v0, v1, v6, v4, v2, v7, v3, v5, v8, v9, v10, v11, v12, \ v13, v14, v15 b.ne .Ldec_loop ldr q24, ISRM0 b .Ldec_loop .Ldec_done: ldr q12, [bskey, #-16] // last round key bitslice v0, v1, v6, v4, v2, v7, v3, v5, v8, v9, v10, v11 eor v0.16b, v0.16b, v12.16b eor v1.16b, v1.16b, v12.16b eor v6.16b, v6.16b, v12.16b eor v4.16b, v4.16b, v12.16b eor v2.16b, v2.16b, v12.16b eor v7.16b, v7.16b, v12.16b eor v3.16b, v3.16b, v12.16b eor v5.16b, v5.16b, v12.16b ret ENDPROC(aesbs_decrypt8) /* * aesbs_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, * int blocks) * aesbs_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, * int blocks) */ .macro __ecb_crypt, do8, o0, o1, o2, o3, o4, o5, o6, o7 frame_push 5 mov x19, x0 mov x20, x1 mov x21, x2 mov x22, x3 mov x23, x4 99: mov x5, #1 lsl x5, x5, x23 subs w23, w23, #8 csel x23, x23, xzr, pl csel x5, x5, xzr, mi ld1 {v0.16b}, [x20], #16 tbnz x5, #1, 0f ld1 {v1.16b}, [x20], #16 tbnz x5, #2, 0f ld1 {v2.16b}, [x20], #16 tbnz x5, #3, 0f ld1 {v3.16b}, [x20], #16 tbnz x5, #4, 0f ld1 {v4.16b}, [x20], #16 tbnz x5, #5, 0f ld1 {v5.16b}, [x20], #16 tbnz x5, #6, 0f ld1 {v6.16b}, [x20], #16 tbnz x5, #7, 0f ld1 {v7.16b}, [x20], #16 0: mov bskey, x21 mov rounds, x22 bl \do8 st1 {\o0\().16b}, [x19], #16 tbnz x5, #1, 1f st1 {\o1\().16b}, [x19], #16 tbnz x5, #2, 1f st1 {\o2\().16b}, [x19], #16 tbnz x5, #3, 1f st1 {\o3\().16b}, [x19], #16 tbnz x5, #4, 1f st1 {\o4\().16b}, [x19], #16 tbnz x5, #5, 1f st1 {\o5\().16b}, [x19], #16 tbnz x5, #6, 1f st1 {\o6\().16b}, [x19], #16 tbnz x5, #7, 1f st1 {\o7\().16b}, [x19], #16 cbz x23, 1f cond_yield_neon b 99b 1: frame_pop ret .endm .align 4 ENTRY(aesbs_ecb_encrypt) __ecb_crypt aesbs_encrypt8, v0, v1, v4, v6, v3, v7, v2, v5 ENDPROC(aesbs_ecb_encrypt) .align 4 ENTRY(aesbs_ecb_decrypt) __ecb_crypt aesbs_decrypt8, v0, v1, v6, v4, v2, v7, v3, v5 ENDPROC(aesbs_ecb_decrypt) /* * aesbs_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, * int blocks, u8 iv[]) */ .align 4 ENTRY(aesbs_cbc_decrypt) frame_push 6 mov x19, x0 mov x20, x1 mov x21, x2 mov x22, x3 mov x23, x4 mov x24, x5 99: mov x6, #1 lsl x6, x6, x23 subs w23, w23, #8 csel x23, x23, xzr, pl csel x6, x6, xzr, mi ld1 {v0.16b}, [x20], #16 mov v25.16b, v0.16b tbnz x6, #1, 0f ld1 {v1.16b}, [x20], #16 mov v26.16b, v1.16b tbnz x6, #2, 0f ld1 {v2.16b}, [x20], #16 mov v27.16b, v2.16b tbnz x6, #3, 0f ld1 {v3.16b}, [x20], #16 mov v28.16b, v3.16b tbnz x6, #4, 0f ld1 {v4.16b}, [x20], #16 mov v29.16b, v4.16b tbnz x6, #5, 0f ld1 {v5.16b}, [x20], #16 mov v30.16b, v5.16b tbnz x6, #6, 0f ld1 {v6.16b}, [x20], #16 mov v31.16b, v6.16b tbnz x6, #7, 0f ld1 {v7.16b}, [x20] 0: mov bskey, x21 mov rounds, x22 bl aesbs_decrypt8 ld1 {v24.16b}, [x24] // load IV eor v1.16b, v1.16b, v25.16b eor v6.16b, v6.16b, v26.16b eor v4.16b, v4.16b, v27.16b eor v2.16b, v2.16b, v28.16b eor v7.16b, v7.16b, v29.16b eor v0.16b, v0.16b, v24.16b eor v3.16b, v3.16b, v30.16b eor v5.16b, v5.16b, v31.16b st1 {v0.16b}, [x19], #16 mov v24.16b, v25.16b tbnz x6, #1, 1f st1 {v1.16b}, [x19], #16 mov v24.16b, v26.16b tbnz x6, #2, 1f st1 {v6.16b}, [x19], #16 mov v24.16b, v27.16b tbnz x6, #3, 1f st1 {v4.16b}, [x19], #16 mov v24.16b, v28.16b tbnz x6, #4, 1f st1 {v2.16b}, [x19], #16 mov v24.16b, v29.16b tbnz x6, #5, 1f st1 {v7.16b}, [x19], #16 mov v24.16b, v30.16b tbnz x6, #6, 1f st1 {v3.16b}, [x19], #16 mov v24.16b, v31.16b tbnz x6, #7, 1f ld1 {v24.16b}, [x20], #16 st1 {v5.16b}, [x19], #16 1: st1 {v24.16b}, [x24] // store IV cbz x23, 2f cond_yield_neon b 99b 2: frame_pop ret ENDPROC(aesbs_cbc_decrypt) .macro next_tweak, out, in, const, tmp sshr \tmp\().2d, \in\().2d, #63 and \tmp\().16b, \tmp\().16b, \const\().16b add \out\().2d, \in\().2d, \in\().2d ext \tmp\().16b, \tmp\().16b, \tmp\().16b, #8 eor \out\().16b, \out\().16b, \tmp\().16b .endm .align 4 .Lxts_mul_x: CPU_LE( .quad 1, 0x87 ) CPU_BE( .quad 0x87, 1 ) /* * aesbs_xts_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, * int blocks, u8 iv[]) * aesbs_xts_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, * int blocks, u8 iv[]) */ __xts_crypt8: mov x6, #1 lsl x6, x6, x23 subs w23, w23, #8 csel x23, x23, xzr, pl csel x6, x6, xzr, mi ld1 {v0.16b}, [x20], #16 next_tweak v26, v25, v30, v31 eor v0.16b, v0.16b, v25.16b tbnz x6, #1, 0f ld1 {v1.16b}, [x20], #16 next_tweak v27, v26, v30, v31 eor v1.16b, v1.16b, v26.16b tbnz x6, #2, 0f ld1 {v2.16b}, [x20], #16 next_tweak v28, v27, v30, v31 eor v2.16b, v2.16b, v27.16b tbnz x6, #3, 0f ld1 {v3.16b}, [x20], #16 next_tweak v29, v28, v30, v31 eor v3.16b, v3.16b, v28.16b tbnz x6, #4, 0f ld1 {v4.16b}, [x20], #16 str q29, [sp, #.Lframe_local_offset] eor v4.16b, v4.16b, v29.16b next_tweak v29, v29, v30, v31 tbnz x6, #5, 0f ld1 {v5.16b}, [x20], #16 str q29, [sp, #.Lframe_local_offset + 16] eor v5.16b, v5.16b, v29.16b next_tweak v29, v29, v30, v31 tbnz x6, #6, 0f ld1 {v6.16b}, [x20], #16 str q29, [sp, #.Lframe_local_offset + 32] eor v6.16b, v6.16b, v29.16b next_tweak v29, v29, v30, v31 tbnz x6, #7, 0f ld1 {v7.16b}, [x20], #16 str q29, [sp, #.Lframe_local_offset + 48] eor v7.16b, v7.16b, v29.16b next_tweak v29, v29, v30, v31 0: mov bskey, x21 mov rounds, x22 br x7 ENDPROC(__xts_crypt8) .macro __xts_crypt, do8, o0, o1, o2, o3, o4, o5, o6, o7 frame_push 6, 64 mov x19, x0 mov x20, x1 mov x21, x2 mov x22, x3 mov x23, x4 mov x24, x5 0: ldr q30, .Lxts_mul_x ld1 {v25.16b}, [x24] 99: adr x7, \do8 bl __xts_crypt8 ldp q16, q17, [sp, #.Lframe_local_offset] ldp q18, q19, [sp, #.Lframe_local_offset + 32] eor \o0\().16b, \o0\().16b, v25.16b eor \o1\().16b, \o1\().16b, v26.16b eor \o2\().16b, \o2\().16b, v27.16b eor \o3\().16b, \o3\().16b, v28.16b st1 {\o0\().16b}, [x19], #16 mov v25.16b, v26.16b tbnz x6, #1, 1f st1 {\o1\().16b}, [x19], #16 mov v25.16b, v27.16b tbnz x6, #2, 1f st1 {\o2\().16b}, [x19], #16 mov v25.16b, v28.16b tbnz x6, #3, 1f st1 {\o3\().16b}, [x19], #16 mov v25.16b, v29.16b tbnz x6, #4, 1f eor \o4\().16b, \o4\().16b, v16.16b eor \o5\().16b, \o5\().16b, v17.16b eor \o6\().16b, \o6\().16b, v18.16b eor \o7\().16b, \o7\().16b, v19.16b st1 {\o4\().16b}, [x19], #16 tbnz x6, #5, 1f st1 {\o5\().16b}, [x19], #16 tbnz x6, #6, 1f st1 {\o6\().16b}, [x19], #16 tbnz x6, #7, 1f st1 {\o7\().16b}, [x19], #16 cbz x23, 1f st1 {v25.16b}, [x24] cond_yield_neon 0b b 99b 1: st1 {v25.16b}, [x24] frame_pop ret .endm ENTRY(aesbs_xts_encrypt) __xts_crypt aesbs_encrypt8, v0, v1, v4, v6, v3, v7, v2, v5 ENDPROC(aesbs_xts_encrypt) ENTRY(aesbs_xts_decrypt) __xts_crypt aesbs_decrypt8, v0, v1, v6, v4, v2, v7, v3, v5 ENDPROC(aesbs_xts_decrypt) .macro next_ctr, v mov \v\().d[1], x8 adds x8, x8, #1 mov \v\().d[0], x7 adc x7, x7, xzr rev64 \v\().16b, \v\().16b .endm /* * aesbs_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], * int rounds, int blocks, u8 iv[], u8 final[]) */ ENTRY(aesbs_ctr_encrypt) frame_push 8 mov x19, x0 mov x20, x1 mov x21, x2 mov x22, x3 mov x23, x4 mov x24, x5 mov x25, x6 cmp x25, #0 cset x26, ne add x23, x23, x26 // do one extra block if final 98: ldp x7, x8, [x24] ld1 {v0.16b}, [x24] CPU_LE( rev x7, x7 ) CPU_LE( rev x8, x8 ) adds x8, x8, #1 adc x7, x7, xzr 99: mov x9, #1 lsl x9, x9, x23 subs w23, w23, #8 csel x23, x23, xzr, pl csel x9, x9, xzr, le tbnz x9, #1, 0f next_ctr v1 tbnz x9, #2, 0f next_ctr v2 tbnz x9, #3, 0f next_ctr v3 tbnz x9, #4, 0f next_ctr v4 tbnz x9, #5, 0f next_ctr v5 tbnz x9, #6, 0f next_ctr v6 tbnz x9, #7, 0f next_ctr v7 0: mov bskey, x21 mov rounds, x22 bl aesbs_encrypt8 lsr x9, x9, x26 // disregard the extra block tbnz x9, #0, 0f ld1 {v8.16b}, [x20], #16 eor v0.16b, v0.16b, v8.16b st1 {v0.16b}, [x19], #16 tbnz x9, #1, 1f ld1 {v9.16b}, [x20], #16 eor v1.16b, v1.16b, v9.16b st1 {v1.16b}, [x19], #16 tbnz x9, #2, 2f ld1 {v10.16b}, [x20], #16 eor v4.16b, v4.16b, v10.16b st1 {v4.16b}, [x19], #16 tbnz x9, #3, 3f ld1 {v11.16b}, [x20], #16 eor v6.16b, v6.16b, v11.16b st1 {v6.16b}, [x19], #16 tbnz x9, #4, 4f ld1 {v12.16b}, [x20], #16 eor v3.16b, v3.16b, v12.16b st1 {v3.16b}, [x19], #16 tbnz x9, #5, 5f ld1 {v13.16b}, [x20], #16 eor v7.16b, v7.16b, v13.16b st1 {v7.16b}, [x19], #16 tbnz x9, #6, 6f ld1 {v14.16b}, [x20], #16 eor v2.16b, v2.16b, v14.16b st1 {v2.16b}, [x19], #16 tbnz x9, #7, 7f ld1 {v15.16b}, [x20], #16 eor v5.16b, v5.16b, v15.16b st1 {v5.16b}, [x19], #16 8: next_ctr v0 st1 {v0.16b}, [x24] cbz x23, .Lctr_done cond_yield_neon 98b b 99b .Lctr_done: frame_pop ret /* * If we are handling the tail of the input (x6 != NULL), return the * final keystream block back to the caller. */ 0: cbz x25, 8b st1 {v0.16b}, [x25] b 8b 1: cbz x25, 8b st1 {v1.16b}, [x25] b 8b 2: cbz x25, 8b st1 {v4.16b}, [x25] b 8b 3: cbz x25, 8b st1 {v6.16b}, [x25] b 8b 4: cbz x25, 8b st1 {v3.16b}, [x25] b 8b 5: cbz x25, 8b st1 {v7.16b}, [x25] b 8b 6: cbz x25, 8b st1 {v2.16b}, [x25] b 8b 7: cbz x25, 8b st1 {v5.16b}, [x25] b 8b ENDPROC(aesbs_ctr_encrypt)