repo_id
stringlengths
5
115
size
int64
590
5.01M
file_path
stringlengths
4
212
content
stringlengths
590
5.01M
4ms/stm32mp1-baremetal
5,306
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/lib/memcpy.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/arch/arm/lib/memcpy.S * * Author: Nicolas Pitre * Created: Sep 28, 2005 * Copyright: MontaVista Software, Inc. */ #include <linux/linkage.h> #include <asm/assembler.h> #define LDR1W_SHIFT 0 #define STR1W_SHIFT 0 .macro ldr1w ptr reg abort W(ldr) \reg, [\ptr], #4 .endm .macro ldr4w ptr reg1 reg2 reg3 reg4 abort ldmia \ptr!, {\reg1, \reg2, \reg3, \reg4} .endm .macro ldr8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort ldmia \ptr!, {\reg1, \reg2, \reg3, \reg4, \reg5, \reg6, \reg7, \reg8} .endm .macro ldr1b ptr reg cond=al abort ldrb\cond\() \reg, [\ptr], #1 .endm .macro str1w ptr reg abort W(str) \reg, [\ptr], #4 .endm .macro str8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort stmia \ptr!, {\reg1, \reg2, \reg3, \reg4, \reg5, \reg6, \reg7, \reg8} .endm .macro str1b ptr reg cond=al abort strb\cond\() \reg, [\ptr], #1 .endm .macro enter reg1 reg2 stmdb sp!, {r0, \reg1, \reg2} .endm .macro exit reg1 reg2 ldmfd sp!, {r0, \reg1, \reg2} .endm .text /* Prototype: void *memcpy(void *dest, const void *src, size_t n); */ .syntax unified #if CONFIG_IS_ENABLED(SYS_THUMB_BUILD) && !defined(MEMCPY_NO_THUMB_BUILD) .thumb .thumb_func #endif ENTRY(memcpy) cmp r0, r1 bxeq lr enter r4, lr subs r2, r2, #4 blt 8f ands ip, r0, #3 PLD( pld [r1, #0] ) bne 9f ands ip, r1, #3 bne 10f 1: subs r2, r2, #(28) stmfd sp!, {r5 - r8} blt 5f CALGN( ands ip, r0, #31 ) CALGN( rsb r3, ip, #32 ) CALGN( sbcsne r4, r3, r2 ) @ C is always set here CALGN( bcs 2f ) CALGN( adr r4, 6f ) CALGN( subs r2, r2, r3 ) @ C gets set CALGN( add pc, r4, ip ) PLD( pld [r1, #0] ) 2: PLD( subs r2, r2, #96 ) PLD( pld [r1, #28] ) PLD( blt 4f ) PLD( pld [r1, #60] ) PLD( pld [r1, #92] ) 3: PLD( pld [r1, #124] ) 4: ldr8w r1, r3, r4, r5, r6, r7, r8, ip, lr, abort=20f subs r2, r2, #32 str8w r0, r3, r4, r5, r6, r7, r8, ip, lr, abort=20f bge 3b PLD( cmn r2, #96 ) PLD( bge 4b ) 5: ands ip, r2, #28 rsb ip, ip, #32 #if LDR1W_SHIFT > 0 lsl ip, ip, #LDR1W_SHIFT #endif addne pc, pc, ip @ C is always clear here b 7f 6: .rept (1 << LDR1W_SHIFT) W(nop) .endr ldr1w r1, r3, abort=20f ldr1w r1, r4, abort=20f ldr1w r1, r5, abort=20f ldr1w r1, r6, abort=20f ldr1w r1, r7, abort=20f ldr1w r1, r8, abort=20f ldr1w r1, lr, abort=20f #if LDR1W_SHIFT < STR1W_SHIFT lsl ip, ip, #STR1W_SHIFT - LDR1W_SHIFT #elif LDR1W_SHIFT > STR1W_SHIFT lsr ip, ip, #LDR1W_SHIFT - STR1W_SHIFT #endif add pc, pc, ip nop .rept (1 << STR1W_SHIFT) W(nop) .endr str1w r0, r3, abort=20f str1w r0, r4, abort=20f str1w r0, r5, abort=20f str1w r0, r6, abort=20f str1w r0, r7, abort=20f str1w r0, r8, abort=20f str1w r0, lr, abort=20f CALGN( bcs 2b ) 7: ldmfd sp!, {r5 - r8} 8: movs r2, r2, lsl #31 ldr1b r1, r3, ne, abort=21f ldr1b r1, r4, cs, abort=21f ldr1b r1, ip, cs, abort=21f str1b r0, r3, ne, abort=21f str1b r0, r4, cs, abort=21f str1b r0, ip, cs, abort=21f exit r4, lr bx lr 9: rsb ip, ip, #4 cmp ip, #2 ldr1b r1, r3, gt, abort=21f ldr1b r1, r4, ge, abort=21f ldr1b r1, lr, abort=21f str1b r0, r3, gt, abort=21f str1b r0, r4, ge, abort=21f subs r2, r2, ip str1b r0, lr, abort=21f blt 8b ands ip, r1, #3 beq 1b 10: bic r1, r1, #3 cmp ip, #2 ldr1w r1, lr, abort=21f beq 17f bgt 18f .macro forward_copy_shift pull push subs r2, r2, #28 blt 14f CALGN( ands ip, r0, #31 ) CALGN( rsb ip, ip, #32 ) CALGN( sbcsne r4, ip, r2 ) @ C is always set here CALGN( subcc r2, r2, ip ) CALGN( bcc 15f ) 11: stmfd sp!, {r5 - r9} PLD( pld [r1, #0] ) PLD( subs r2, r2, #96 ) PLD( pld [r1, #28] ) PLD( blt 13f ) PLD( pld [r1, #60] ) PLD( pld [r1, #92] ) 12: PLD( pld [r1, #124] ) 13: ldr4w r1, r4, r5, r6, r7, abort=19f mov r3, lr, lspull #\pull subs r2, r2, #32 ldr4w r1, r8, r9, ip, lr, abort=19f orr r3, r3, r4, lspush #\push mov r4, r4, lspull #\pull orr r4, r4, r5, lspush #\push mov r5, r5, lspull #\pull orr r5, r5, r6, lspush #\push mov r6, r6, lspull #\pull orr r6, r6, r7, lspush #\push mov r7, r7, lspull #\pull orr r7, r7, r8, lspush #\push mov r8, r8, lspull #\pull orr r8, r8, r9, lspush #\push mov r9, r9, lspull #\pull orr r9, r9, ip, lspush #\push mov ip, ip, lspull #\pull orr ip, ip, lr, lspush #\push str8w r0, r3, r4, r5, r6, r7, r8, r9, ip, , abort=19f bge 12b PLD( cmn r2, #96 ) PLD( bge 13b ) ldmfd sp!, {r5 - r9} 14: ands ip, r2, #28 beq 16f 15: mov r3, lr, lspull #\pull ldr1w r1, lr, abort=21f subs ip, ip, #4 orr r3, r3, lr, lspush #\push str1w r0, r3, abort=21f bgt 15b CALGN( cmp r2, #0 ) CALGN( bge 11b ) 16: sub r1, r1, #(\push / 8) b 8b .endm forward_copy_shift pull=8 push=24 17: forward_copy_shift pull=16 push=16 18: forward_copy_shift pull=24 push=8 /* * Abort preamble and completion macros. * If a fixup handler is required then those macros must surround it. * It is assumed that the fixup code will handle the private part of * the exit macro. */ .macro copy_abort_preamble 19: ldmfd sp!, {r5 - r9} b 21f 20: ldmfd sp!, {r5 - r8} 21: .endm .macro copy_abort_end ldmfd sp!, {r4, lr} bx lr .endm ENDPROC(memcpy)
4ms/stm32mp1-baremetal
4,596
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/lib/uldivmod.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright 2010, Google Inc. * * Brought in from coreboot uldivmod.S */ #include <linux/linkage.h> #include <asm/assembler.h> /* * A, Q = r0 + (r1 << 32) * B, R = r2 + (r3 << 32) * A / B = Q ... R */ A_0 .req r0 A_1 .req r1 B_0 .req r2 B_1 .req r3 C_0 .req r4 C_1 .req r5 D_0 .req r6 D_1 .req r7 Q_0 .req r0 Q_1 .req r1 R_0 .req r2 R_1 .req r3 THUMB( TMP .req r8 ) .pushsection .text.__aeabi_uldivmod, "ax" ENTRY(__aeabi_uldivmod) stmfd sp!, {r4, r5, r6, r7, THUMB(TMP,) lr} @ Test if B == 0 orrs ip, B_0, B_1 @ Z set -> B == 0 beq L_div_by_0 @ Test if B is power of 2: (B & (B - 1)) == 0 subs C_0, B_0, #1 sbc C_1, B_1, #0 tst C_0, B_0 tsteq B_1, C_1 beq L_pow2 @ Test if A_1 == B_1 == 0 orrs ip, A_1, B_1 beq L_div_32_32 L_div_64_64: /* CLZ only exists in ARM architecture version 5 and above. */ #ifdef HAVE_CLZ mov C_0, #1 mov C_1, #0 @ D_0 = clz A teq A_1, #0 clz D_0, A_1 clzeq ip, A_0 addeq D_0, D_0, ip @ D_1 = clz B teq B_1, #0 clz D_1, B_1 clzeq ip, B_0 addeq D_1, D_1, ip @ if clz B - clz A > 0 subs D_0, D_1, D_0 bls L_done_shift @ B <<= (clz B - clz A) subs D_1, D_0, #32 rsb ip, D_0, #32 movmi B_1, B_1, lsl D_0 ARM( orrmi B_1, B_1, B_0, lsr ip ) THUMB( lsrmi TMP, B_0, ip ) THUMB( orrmi B_1, B_1, TMP ) movpl B_1, B_0, lsl D_1 mov B_0, B_0, lsl D_0 @ C = 1 << (clz B - clz A) movmi C_1, C_1, lsl D_0 ARM( orrmi C_1, C_1, C_0, lsr ip ) THUMB( lsrmi TMP, C_0, ip ) THUMB( orrmi C_1, C_1, TMP ) movpl C_1, C_0, lsl D_1 mov C_0, C_0, lsl D_0 L_done_shift: mov D_0, #0 mov D_1, #0 @ C: current bit; D: result #else @ C: current bit; D: result mov C_0, #1 mov C_1, #0 mov D_0, #0 mov D_1, #0 L_lsl_4: cmp B_1, #0x10000000 cmpcc B_1, A_1 cmpeq B_0, A_0 bcs L_lsl_1 @ B <<= 4 mov B_1, B_1, lsl #4 orr B_1, B_1, B_0, lsr #28 mov B_0, B_0, lsl #4 @ C <<= 4 mov C_1, C_1, lsl #4 orr C_1, C_1, C_0, lsr #28 mov C_0, C_0, lsl #4 b L_lsl_4 L_lsl_1: cmp B_1, #0x80000000 cmpcc B_1, A_1 cmpeq B_0, A_0 bcs L_subtract @ B <<= 1 mov B_1, B_1, lsl #1 orr B_1, B_1, B_0, lsr #31 mov B_0, B_0, lsl #1 @ C <<= 1 mov C_1, C_1, lsl #1 orr C_1, C_1, C_0, lsr #31 mov C_0, C_0, lsl #1 b L_lsl_1 #endif L_subtract: @ if A >= B cmp A_1, B_1 cmpeq A_0, B_0 bcc L_update @ A -= B subs A_0, A_0, B_0 sbc A_1, A_1, B_1 @ D |= C orr D_0, D_0, C_0 orr D_1, D_1, C_1 L_update: @ if A == 0: break orrs ip, A_1, A_0 beq L_exit @ C >>= 1 movs C_1, C_1, lsr #1 movs C_0, C_0, rrx @ if C == 0: break orrs ip, C_1, C_0 beq L_exit @ B >>= 1 movs B_1, B_1, lsr #1 mov B_0, B_0, rrx b L_subtract L_exit: @ Note: A, B & Q, R are aliases mov R_0, A_0 mov R_1, A_1 mov Q_0, D_0 mov Q_1, D_1 ldmfd sp!, {r4, r5, r6, r7, THUMB(TMP,) pc} L_div_32_32: @ Note: A_0 & r0 are aliases @ Q_1 r1 mov r1, B_0 bl __aeabi_uidivmod mov R_0, r1 mov R_1, #0 mov Q_1, #0 ldmfd sp!, {r4, r5, r6, r7, THUMB(TMP,) pc} L_pow2: #ifdef HAVE_CLZ @ Note: A, B and Q, R are aliases @ R = A & (B - 1) and C_0, A_0, C_0 and C_1, A_1, C_1 @ Q = A >> log2(B) @ Note: B must not be 0 here! clz D_0, B_0 add D_1, D_0, #1 rsbs D_0, D_0, #31 bpl L_1 clz D_0, B_1 rsb D_0, D_0, #31 mov A_0, A_1, lsr D_0 add D_0, D_0, #32 L_1: movpl A_0, A_0, lsr D_0 ARM( orrpl A_0, A_0, A_1, lsl D_1 ) THUMB( lslpl TMP, A_1, D_1 ) THUMB( orrpl A_0, A_0, TMP ) mov A_1, A_1, lsr D_0 @ Mov back C to R mov R_0, C_0 mov R_1, C_1 ldmfd sp!, {r4, r5, r6, r7, THUMB(TMP,) pc} #else @ Note: A, B and Q, R are aliases @ R = A & (B - 1) and C_0, A_0, C_0 and C_1, A_1, C_1 @ Q = A >> log2(B) @ Note: B must not be 0 here! @ Count the leading zeroes in B. mov D_0, #0 orrs B_0, B_0, B_0 @ If B is greater than 1 << 31, divide A and B by 1 << 32. moveq A_0, A_1 moveq A_1, #0 moveq B_0, B_1 @ Count the remaining leading zeroes in B. movs B_1, B_0, lsl #16 addeq D_0, #16 moveq B_0, B_0, lsr #16 tst B_0, #0xff addeq D_0, #8 moveq B_0, B_0, lsr #8 tst B_0, #0xf addeq D_0, #4 moveq B_0, B_0, lsr #4 tst B_0, #0x3 addeq D_0, #2 moveq B_0, B_0, lsr #2 tst B_0, #0x1 addeq D_0, #1 @ Shift A to the right by the appropriate amount. rsb D_1, D_0, #32 mov Q_0, A_0, lsr D_0 ARM( orr Q_0, Q_0, A_1, lsl D_1 ) THUMB( lsl A_1, D_1 ) THUMB( orr Q_0, A_1 ) mov Q_1, A_1, lsr D_0 @ Move C to R mov R_0, C_0 mov R_1, C_1 ldmfd sp!, {r4, r5, r6, r7, THUMB(TMP,) pc} #endif L_div_by_0: bl __div0 @ As wrong as it could be mov Q_0, #0 mov Q_1, #0 mov R_0, #0 mov R_1, #0 ldmfd sp!, {r4, r5, r6, r7, THUMB(TMP,) pc} ENDPROC(__aeabi_uldivmod) .popsection
4ms/stm32mp1-baremetal
3,947
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/lib/crt0_arm_efi.S
/* SPDX-License-Identifier: GPL-2.0+ OR BSD-2-Clause */ /* * crt0-efi-arm.S - PE/COFF header for ARM EFI applications * * Copright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org> * * This file is taken and modified from the gnu-efi project. */ #include <asm-generic/pe.h> .section .text.head /* * Magic "MZ" signature for PE/COFF */ .globl image_base image_base: .short IMAGE_DOS_SIGNATURE /* 'MZ' */ .skip 58 /* 'MZ' + pad + offset == 64 */ .long pe_header - image_base /* Offset to the PE header */ pe_header: .long IMAGE_NT_SIGNATURE /* 'PE' */ coff_header: .short IMAGE_FILE_MACHINE_THUMB /* Mixed ARM/Thumb */ .short 2 /* nr_sections */ .long 0 /* TimeDateStamp */ .long 0 /* PointerToSymbolTable */ .long 0 /* NumberOfSymbols */ .short section_table - optional_header /* SizeOfOptionalHeader */ /* Characteristics */ .short (IMAGE_FILE_EXECUTABLE_IMAGE | \ IMAGE_FILE_LINE_NUMS_STRIPPED | \ IMAGE_FILE_LOCAL_SYMS_STRIPPED | \ IMAGE_FILE_32BIT_MACHINE | \ IMAGE_FILE_DEBUG_STRIPPED) optional_header: .short IMAGE_NT_OPTIONAL_HDR32_MAGIC /* PE32 format */ .byte 0x02 /* MajorLinkerVersion */ .byte 0x14 /* MinorLinkerVersion */ .long _edata - _start /* SizeOfCode */ .long 0 /* SizeOfInitializedData */ .long 0 /* SizeOfUninitializedData */ .long _start - image_base /* AddressOfEntryPoint */ .long _start - image_base /* BaseOfCode */ .long 0 /* BaseOfData */ extra_header_fields: .long 0 /* image_base */ .long 0x20 /* SectionAlignment */ .long 0x8 /* FileAlignment */ .short 0 /* MajorOperatingSystemVersion */ .short 0 /* MinorOperatingSystemVersion */ .short 0 /* MajorImageVersion */ .short 0 /* MinorImageVersion */ .short 0 /* MajorSubsystemVersion */ .short 0 /* MinorSubsystemVersion */ .long 0 /* Win32VersionValue */ .long _edata - image_base /* SizeOfImage */ /* * Everything before the kernel image is considered part of the header */ .long _start - image_base /* SizeOfHeaders */ .long 0 /* CheckSum */ .short IMAGE_SUBSYSTEM_EFI_APPLICATION /* Subsystem */ .short 0 /* DllCharacteristics */ .long 0 /* SizeOfStackReserve */ .long 0 /* SizeOfStackCommit */ .long 0 /* SizeOfHeapReserve */ .long 0 /* SizeOfHeapCommit */ .long 0 /* LoaderFlags */ .long 0x6 /* NumberOfRvaAndSizes */ .quad 0 /* ExportTable */ .quad 0 /* ImportTable */ .quad 0 /* ResourceTable */ .quad 0 /* ExceptionTable */ .quad 0 /* CertificationTable */ .quad 0 /* BaseRelocationTable */ section_table: /* * The EFI application loader requires a relocation section * because EFI applications must be relocatable. This is a * dummy section as far as we are concerned. */ .ascii ".reloc" .byte 0 .byte 0 /* end of 0 padding of section name */ .long 0 .long 0 .long 0 /* SizeOfRawData */ .long 0 /* PointerToRawData */ .long 0 /* PointerToRelocations */ .long 0 /* PointerToLineNumbers */ .short 0 /* NumberOfRelocations */ .short 0 /* NumberOfLineNumbers */ .long 0x42100040 /* Characteristics (section flags) */ .ascii ".text" .byte 0 .byte 0 .byte 0 /* end of 0 padding of section name */ .long _edata - _start /* VirtualSize */ .long _start - image_base /* VirtualAddress */ .long _edata - _start /* SizeOfRawData */ .long _start - image_base /* PointerToRawData */ .long 0 /* PointerToRelocations (0 for executables) */ .long 0 /* PointerToLineNumbers (0 for executables) */ .short 0 /* NumberOfRelocations (0 for executables) */ .short 0 /* NumberOfLineNumbers (0 for executables) */ .long 0xe0500020 /* Characteristics (section flags) */ _start: stmfd sp!, {r0-r2, lr} adr r1, .L_DYNAMIC ldr r0, [r1] add r1, r0, r1 adr r0, image_base bl _relocate teq r0, #0 bne 0f ldmfd sp, {r0-r1} bl efi_main 0: add sp, sp, #12 ldr pc, [sp], #4 .L_DYNAMIC: .word _DYNAMIC - .
4ms/stm32mp1-baremetal
6,637
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/lib/vectors.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * vectors - Generic ARM exception table code * * Copyright (c) 1998 Dan Malek <dmalek@jlc.net> * Copyright (c) 1999 Magnus Damm <kieraypc01.p.y.kie.era.ericsson.se> * Copyright (c) 2000 Wolfgang Denk <wd@denx.de> * Copyright (c) 2001 Alex Züpke <azu@sysgo.de> * Copyright (c) 2001 Marius Gröger <mag@sysgo.de> * Copyright (c) 2002 Alex Züpke <azu@sysgo.de> * Copyright (c) 2002 Gary Jennejohn <garyj@denx.de> * Copyright (c) 2002 Kyle Harris <kharris@nexus-tech.net> */ #include <config.h> /* * A macro to allow insertion of an ARM exception vector either * for the non-boot0 case or by a boot0-header. */ .macro ARM_VECTORS #ifdef CONFIG_ARCH_K3 ldr pc, _reset #else b reset #endif ldr pc, _undefined_instruction ldr pc, _software_interrupt ldr pc, _prefetch_abort ldr pc, _data_abort ldr pc, _not_used ldr pc, _irq ldr pc, _fiq .endm /* ************************************************************************* * * Symbol _start is referenced elsewhere, so make it global * ************************************************************************* */ .globl _start /* ************************************************************************* * * Vectors have their own section so linker script can map them easily * ************************************************************************* */ .section ".vectors", "ax" #if defined(CONFIG_ENABLE_ARM_SOC_BOOT0_HOOK) /* * Various SoCs need something special and SoC-specific up front in * order to boot, allow them to set that in their boot0.h file and then * use it here. * * To allow a boot0 hook to insert a 'special' sequence after the vector * table (e.g. for the socfpga), the presence of a boot0 hook supresses * the below vector table and assumes that the vector table is filled in * by the boot0 hook. The requirements for a boot0 hook thus are: * (1) defines '_start:' as appropriate * (2) inserts the vector table using ARM_VECTORS as appropriate */ #include <asm/arch/boot0.h> #else /* ************************************************************************* * * Exception vectors as described in ARM reference manuals * * Uses indirect branch to allow reaching handlers anywhere in memory. * ************************************************************************* */ _start: #ifdef CONFIG_SYS_DV_NOR_BOOT_CFG .word CONFIG_SYS_DV_NOR_BOOT_CFG #endif ARM_VECTORS #endif /* !defined(CONFIG_ENABLE_ARM_SOC_BOOT0_HOOK) */ /* ************************************************************************* * * Indirect vectors table * * Symbols referenced here must be defined somewhere else * ************************************************************************* */ .globl _reset .globl _undefined_instruction .globl _software_interrupt .globl _prefetch_abort .globl _data_abort .globl _not_used .globl _irq .globl _fiq #ifdef CONFIG_ARCH_K3 _reset: .word reset #endif _undefined_instruction: .word undefined_instruction _software_interrupt: .word software_interrupt _prefetch_abort: .word prefetch_abort _data_abort: .word data_abort _not_used: .word not_used _irq: .word irq _fiq: .word fiq .balignl 16,0xdeadbeef /* ************************************************************************* * * Interrupt handling * ************************************************************************* */ /* SPL interrupt handling: just hang */ #ifdef CONFIG_SPL_BUILD .align 5 undefined_instruction: software_interrupt: prefetch_abort: data_abort: not_used: irq: fiq: 1: b 1b /* hang and never return */ #else /* !CONFIG_SPL_BUILD */ /* IRQ stack memory (calculated at run-time) + 8 bytes */ .globl IRQ_STACK_START_IN IRQ_STACK_START_IN: #ifdef IRAM_BASE_ADDR .word IRAM_BASE_ADDR + 0x20 #else .word 0x0badc0de #endif @ @ IRQ stack frame. @ #define S_FRAME_SIZE 72 #define S_OLD_R0 68 #define S_PSR 64 #define S_PC 60 #define S_LR 56 #define S_SP 52 #define S_IP 48 #define S_FP 44 #define S_R10 40 #define S_R9 36 #define S_R8 32 #define S_R7 28 #define S_R6 24 #define S_R5 20 #define S_R4 16 #define S_R3 12 #define S_R2 8 #define S_R1 4 #define S_R0 0 #define MODE_SVC 0x13 #define I_BIT 0x80 /* * use bad_save_user_regs for abort/prefetch/undef/swi ... * use irq_save_user_regs / irq_restore_user_regs for IRQ/FIQ handling */ .macro bad_save_user_regs @ carve out a frame on current user stack sub sp, sp, #S_FRAME_SIZE stmia sp, {r0 - r12} @ Save user registers (now in svc mode) r0-r12 ldr r2, IRQ_STACK_START_IN @ get values for "aborted" pc and cpsr (into parm regs) ldmia r2, {r2 - r3} add r0, sp, #S_FRAME_SIZE @ grab pointer to old stack add r5, sp, #S_SP mov r1, lr stmia r5, {r0 - r3} @ save sp_SVC, lr_SVC, pc, cpsr mov r0, sp @ save current stack into r0 (param register) .endm .macro irq_save_user_regs sub sp, sp, #S_FRAME_SIZE stmia sp, {r0 - r12} @ Calling r0-r12 @ !!!! R8 NEEDS to be saved !!!! a reserved stack spot would be good. add r8, sp, #S_PC stmdb r8, {sp, lr}^ @ Calling SP, LR str lr, [r8, #0] @ Save calling PC mrs r6, spsr str r6, [r8, #4] @ Save CPSR str r0, [r8, #8] @ Save OLD_R0 mov r0, sp .endm .macro irq_restore_user_regs ldmia sp, {r0 - lr}^ @ Calling r0 - lr mov r0, r0 ldr lr, [sp, #S_PC] @ Get PC add sp, sp, #S_FRAME_SIZE subs pc, lr, #4 @ return & move spsr_svc into cpsr .endm .macro get_bad_stack ldr r13, IRQ_STACK_START_IN @ setup our mode stack str lr, [r13] @ save caller lr in position 0 of saved stack mrs lr, spsr @ get the spsr str lr, [r13, #4] @ save spsr in position 1 of saved stack mov r13, #MODE_SVC @ prepare SVC-Mode @ msr spsr_c, r13 msr spsr, r13 @ switch modes, make sure moves will execute mov lr, pc @ capture return pc movs pc, lr @ jump to next instruction & switch modes. .endm .macro get_irq_stack @ setup IRQ stack ldr sp, IRQ_STACK_START .endm .macro get_fiq_stack @ setup FIQ stack ldr sp, FIQ_STACK_START .endm /* * exception handlers */ .align 5 undefined_instruction: get_bad_stack bad_save_user_regs bl do_undefined_instruction .align 5 software_interrupt: get_bad_stack bad_save_user_regs bl do_software_interrupt .align 5 prefetch_abort: get_bad_stack bad_save_user_regs bl do_prefetch_abort .align 5 data_abort: get_bad_stack bad_save_user_regs bl do_data_abort .align 5 not_used: get_bad_stack bad_save_user_regs bl do_not_used .align 5 irq: get_bad_stack bad_save_user_regs bl do_irq .align 5 fiq: get_bad_stack bad_save_user_regs bl do_fiq #endif /* CONFIG_SPL_BUILD */
4ms/stm32mp1-baremetal
5,110
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/lib/crt0_64.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * crt0 - C-runtime startup Code for AArch64 U-Boot * * (C) Copyright 2013 * David Feng <fenghua@phytium.com.cn> * * (C) Copyright 2012 * Albert ARIBAUD <albert.u.boot@aribaud.net> */ #include <config.h> #include <asm-offsets.h> #include <asm/macro.h> #include <linux/linkage.h> /* * This file handles the target-independent stages of the U-Boot * start-up where a C runtime environment is needed. Its entry point * is _main and is branched into from the target's start.S file. * * _main execution sequence is: * * 1. Set up initial environment for calling board_init_f(). * This environment only provides a stack and a place to store * the GD ('global data') structure, both located in some readily * available RAM (SRAM, locked cache...). In this context, VARIABLE * global data, initialized or not (BSS), are UNAVAILABLE; only * CONSTANT initialized data are available. GD should be zeroed * before board_init_f() is called. * * 2. Call board_init_f(). This function prepares the hardware for * execution from system RAM (DRAM, DDR...) As system RAM may not * be available yet, , board_init_f() must use the current GD to * store any data which must be passed on to later stages. These * data include the relocation destination, the future stack, and * the future GD location. * * 3. Set up intermediate environment where the stack and GD are the * ones allocated by board_init_f() in system RAM, but BSS and * initialized non-const data are still not available. * * 4a.For U-Boot proper (not SPL), call relocate_code(). This function * relocates U-Boot from its current location into the relocation * destination computed by board_init_f(). * * 4b.For SPL, board_init_f() just returns (to crt0). There is no * code relocation in SPL. * * 5. Set up final environment for calling board_init_r(). This * environment has BSS (initialized to 0), initialized non-const * data (initialized to their intended value), and stack in system * RAM (for SPL moving the stack and GD into RAM is optional - see * CONFIG_SPL_STACK_R). GD has retained values set by board_init_f(). * * TODO: For SPL, implement stack relocation on AArch64. * * 6. For U-Boot proper (not SPL), some CPUs have some work left to do * at this point regarding memory, so call c_runtime_cpu_setup. * * 7. Branch to board_init_r(). * * For more information see 'Board Initialisation Flow in README. */ ENTRY(_main) /* * Set up initial C runtime environment and call board_init_f(0). */ #if defined(CONFIG_TPL_BUILD) && defined(CONFIG_TPL_NEEDS_SEPARATE_STACK) ldr x0, =(CONFIG_TPL_STACK) #elif defined(CONFIG_SPL_BUILD) && defined(CONFIG_SPL_STACK) ldr x0, =(CONFIG_SPL_STACK) #elif defined(CONFIG_INIT_SP_RELATIVE) adr x0, __bss_start add x0, x0, #CONFIG_SYS_INIT_SP_BSS_OFFSET #else ldr x0, =(CONFIG_SYS_INIT_SP_ADDR) #endif bic sp, x0, #0xf /* 16-byte alignment for ABI compliance */ mov x0, sp bl board_init_f_alloc_reserve mov sp, x0 /* set up gd here, outside any C code */ mov x18, x0 bl board_init_f_init_reserve mov x0, #0 bl board_init_f #if !defined(CONFIG_SPL_BUILD) /* * Set up intermediate environment (new sp and gd) and call * relocate_code(addr_moni). Trick here is that we'll return * 'here' but relocated. */ ldr x0, [x18, #GD_START_ADDR_SP] /* x0 <- gd->start_addr_sp */ bic sp, x0, #0xf /* 16-byte alignment for ABI compliance */ ldr x18, [x18, #GD_NEW_GD] /* x18 <- gd->new_gd */ adr lr, relocation_return #if CONFIG_POSITION_INDEPENDENT /* Add in link-vs-runtime offset */ adr x0, _start /* x0 <- Runtime value of _start */ ldr x9, _TEXT_BASE /* x9 <- Linked value of _start */ sub x9, x9, x0 /* x9 <- Run-vs-link offset */ add lr, lr, x9 #endif /* Add in link-vs-relocation offset */ ldr x9, [x18, #GD_RELOC_OFF] /* x9 <- gd->reloc_off */ add lr, lr, x9 /* new return address after relocation */ ldr x0, [x18, #GD_RELOCADDR] /* x0 <- gd->relocaddr */ b relocate_code relocation_return: /* * Set up final (full) environment */ bl c_runtime_cpu_setup /* still call old routine */ #endif /* !CONFIG_SPL_BUILD */ #if !defined(CONFIG_SPL_BUILD) || CONFIG_IS_ENABLED(FRAMEWORK) #if defined(CONFIG_SPL_BUILD) bl spl_relocate_stack_gd /* may return NULL */ /* set up gd here, outside any C code, if new stack is returned */ cmp x0, #0 csel x18, x0, x18, ne /* * Perform 'sp = (x0 != NULL) ? x0 : sp' while working * around the constraint that conditional moves can not * have 'sp' as an operand */ mov x1, sp cmp x0, #0 csel x0, x0, x1, ne mov sp, x0 #endif /* * Clear BSS section */ ldr x0, =__bss_start /* this is auto-relocated! */ ldr x1, =__bss_end /* this is auto-relocated! */ clear_loop: str xzr, [x0], #8 cmp x0, x1 b.lo clear_loop /* call board_init_r(gd_t *id, ulong dest_addr) */ mov x0, x18 /* gd_t */ ldr x1, [x18, #GD_RELOCADDR] /* dest_addr */ b board_init_r /* PC relative jump */ /* NOTREACHED - board_init_r() does not return */ #endif ENDPROC(_main)
4ms/stm32mp1-baremetal
2,928
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/lib/relocate_64.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * relocate - common relocation function for AArch64 U-Boot * * (C) Copyright 2013 * Albert ARIBAUD <albert.u.boot@aribaud.net> * David Feng <fenghua@phytium.com.cn> */ #include <asm-offsets.h> #include <config.h> #include <elf.h> #include <linux/linkage.h> #include <asm/macro.h> /* * void relocate_code (addr_moni) * * This function relocates the monitor code. * x0 holds the destination address. */ ENTRY(relocate_code) stp x29, x30, [sp, #-32]! /* create a stack frame */ mov x29, sp str x0, [sp, #16] /* * Copy u-boot from flash to RAM */ adrp x1, __image_copy_start /* x1 <- address bits [31:12] */ add x1, x1, :lo12:__image_copy_start/* x1 <- address bits [11:00] */ subs x9, x0, x1 /* x9 <- Run to copy offset */ b.eq relocate_done /* skip relocation */ /* * Don't ldr x1, __image_copy_start here, since if the code is already * running at an address other than it was linked to, that instruction * will load the relocated value of __image_copy_start. To * correctly apply relocations, we need to know the linked value. * * Linked &__image_copy_start, which we know was at * CONFIG_SYS_TEXT_BASE, which is stored in _TEXT_BASE, as a non- * relocated value, since it isn't a symbol reference. */ ldr x1, _TEXT_BASE /* x1 <- Linked &__image_copy_start */ subs x9, x0, x1 /* x9 <- Link to copy offset */ adrp x1, __image_copy_start /* x1 <- address bits [31:12] */ add x1, x1, :lo12:__image_copy_start/* x1 <- address bits [11:00] */ adrp x2, __image_copy_end /* x2 <- address bits [31:12] */ add x2, x2, :lo12:__image_copy_end /* x2 <- address bits [11:00] */ copy_loop: ldp x10, x11, [x1], #16 /* copy from source address [x1] */ stp x10, x11, [x0], #16 /* copy to target address [x0] */ cmp x1, x2 /* until source end address [x2] */ b.lo copy_loop str x0, [sp, #24] /* * Fix .rela.dyn relocations */ adrp x2, __rel_dyn_start /* x2 <- address bits [31:12] */ add x2, x2, :lo12:__rel_dyn_start /* x2 <- address bits [11:00] */ adrp x3, __rel_dyn_end /* x3 <- address bits [31:12] */ add x3, x3, :lo12:__rel_dyn_end /* x3 <- address bits [11:00] */ fixloop: ldp x0, x1, [x2], #16 /* (x0,x1) <- (SRC location, fixup) */ ldr x4, [x2], #8 /* x4 <- addend */ and x1, x1, #0xffffffff cmp x1, #R_AARCH64_RELATIVE bne fixnext /* relative fix: store addend plus offset at dest location */ add x0, x0, x9 add x4, x4, x9 str x4, [x0] fixnext: cmp x2, x3 b.lo fixloop relocate_done: switch_el x1, 3f, 2f, 1f bl hang 3: mrs x0, sctlr_el3 b 0f 2: mrs x0, sctlr_el2 b 0f 1: mrs x0, sctlr_el1 0: tbz w0, #2, 5f /* skip flushing cache if disabled */ tbz w0, #12, 4f /* skip invalidating i-cache if disabled */ ic iallu /* i-cache invalidate all */ isb sy 4: ldp x0, x1, [sp, #16] bl __asm_flush_dcache_range bl __asm_flush_l3_dcache 5: ldp x29, x30, [sp],#32 ret ENDPROC(relocate_code)
4ms/stm32mp1-baremetal
1,579
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/lib/vectors_m.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * (C) Copyright 2015 * Kamil Lulko, <kamil.lulko@gmail.com> */ #include <config.h> #include <asm/assembler.h> #include <linux/linkage.h> .type __hard_fault_entry, %function __hard_fault_entry: mov r0, sp @ pass auto-saved registers as argument b do_hard_fault .type __mm_fault_entry, %function __mm_fault_entry: mov r0, sp @ pass auto-saved registers as argument b do_mm_fault .type __bus_fault_entry, %function __bus_fault_entry: mov r0, sp @ pass auto-saved registers as argument b do_bus_fault .type __usage_fault_entry, %function __usage_fault_entry: mov r0, sp @ pass auto-saved registers as argument b do_usage_fault .type __invalid_entry, %function __invalid_entry: mov r0, sp @ pass auto-saved registers as argument b do_invalid_entry .section .vectors ENTRY(_start) .long CONFIG_SYS_INIT_SP_ADDR @ 0 - Reset stack pointer .long reset @ 1 - Reset .long __invalid_entry @ 2 - NMI .long __hard_fault_entry @ 3 - HardFault .long __mm_fault_entry @ 4 - MemManage .long __bus_fault_entry @ 5 - BusFault .long __usage_fault_entry @ 6 - UsageFault .long __invalid_entry @ 7 - Reserved .long __invalid_entry @ 8 - Reserved .long __invalid_entry @ 9 - Reserved .long __invalid_entry @ 10 - Reserved .long __invalid_entry @ 11 - SVCall .long __invalid_entry @ 12 - Debug Monitor .long __invalid_entry @ 13 - Reserved .long __invalid_entry @ 14 - PendSV .long __invalid_entry @ 15 - SysTick .rept 255 - 16 .long __invalid_entry @ 16..255 - External Interrupts .endr
4ms/stm32mp1-baremetal
1,836
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/lib/ccn504.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * (C) Copyright 2015 Freescale Semiconductor * * Extracted from gic_64.S */ #include <config.h> #include <linux/linkage.h> #include <asm/macro.h> /************************************************************************* * * void ccn504_add_masters_to_dvm(CCI_MN_BASE, CCI_MN_RNF_NODEID_LIST, * CCI_MN_DVM_DOMAIN_CTL_SET); * * Add fully-coherent masters to DVM domain * *************************************************************************/ ENTRY(ccn504_add_masters_to_dvm) /* * x0: CCI_MN_BASE * x1: CCI_MN_RNF_NODEID_LIST * x2: CCI_MN_DVM_DOMAIN_CTL_SET */ /* Add fully-coherent masters to DVM domain */ ldr x9, [x0, x1] str x9, [x0, x2] 1: ldr x10, [x0, x2] mvn x11, x10 tst x11, x10 /* Wait for domain addition to complete */ b.ne 1b ret ENDPROC(ccn504_add_masters_to_dvm) /************************************************************************* * * void ccn504_set_qos(CCI_Sx_QOS_CONTROL_BASE, QoS Value); * * Initialize QoS settings for AR/AW override. * Right now, this function sets the same QoS value for all RN-I ports * *************************************************************************/ ENTRY(ccn504_set_qos) /* * x0: CCI_Sx_QOS_CONTROL_BASE * x1: QoS Value */ /* Set all RN-I ports to QoS value denoted by x1 */ ldr x9, [x0] mov x10, x1 orr x9, x9, x10 str x9, [x0] ret ENDPROC(ccn504_set_qos) /************************************************************************* * * void ccn504_set_aux(CCI_AUX_CONTROL_BASE, Value); * * Initialize AUX control settings * *************************************************************************/ ENTRY(ccn504_set_aux) /* * x0: CCI_AUX_CONTROL_BASE * x1: Value */ ldr x9, [x0] mov x10, x1 orr x9, x9, x10 str x9, [x0] ret ENDPROC(ccn504_set_aux)
4ms/stm32mp1-baremetal
2,354
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/lib/memset.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/arch/arm/lib/memset.S * * Copyright (C) 1995-2000 Russell King * * ASM optimised string functions */ #include <linux/linkage.h> #include <asm/assembler.h> .text .align 5 .syntax unified #if CONFIG_IS_ENABLED(SYS_THUMB_BUILD) && !defined(MEMSET_NO_THUMB_BUILD) .thumb .thumb_func #endif ENTRY(memset) ands r3, r0, #3 @ 1 unaligned? mov ip, r0 @ preserve r0 as return value bne 6f @ 1 /* * we know that the pointer in ip is aligned to a word boundary. */ 1: orr r1, r1, r1, lsl #8 orr r1, r1, r1, lsl #16 mov r3, r1 cmp r2, #16 blt 4f #if ! CALGN(1)+0 /* * We need 2 extra registers for this loop - use r8 and the LR */ stmfd sp!, {r8, lr} mov r8, r1 mov lr, r1 2: subs r2, r2, #64 stmiage ip!, {r1, r3, r8, lr} @ 64 bytes at a time. stmiage ip!, {r1, r3, r8, lr} stmiage ip!, {r1, r3, r8, lr} stmiage ip!, {r1, r3, r8, lr} bgt 2b ldmfdeq sp!, {r8, pc} @ Now <64 bytes to go. /* * No need to correct the count; we're only testing bits from now on */ tst r2, #32 stmiane ip!, {r1, r3, r8, lr} stmiane ip!, {r1, r3, r8, lr} tst r2, #16 stmiane ip!, {r1, r3, r8, lr} ldmfd sp!, {r8, lr} #else /* * This version aligns the destination pointer in order to write * whole cache lines at once. */ stmfd sp!, {r4-r8, lr} mov r4, r1 mov r5, r1 mov r6, r1 mov r7, r1 mov r8, r1 mov lr, r1 cmp r2, #96 tstgt ip, #31 ble 3f and r8, ip, #31 rsb r8, r8, #32 sub r2, r2, r8 movs r8, r8, lsl #(32 - 4) stmiacs ip!, {r4, r5, r6, r7} stmiami ip!, {r4, r5} tst r8, #(1 << 30) mov r8, r1 strne r1, [ip], #4 3: subs r2, r2, #64 stmiage ip!, {r1, r3-r8, lr} stmiage ip!, {r1, r3-r8, lr} bgt 3b ldmfdeq sp!, {r4-r8, pc} tst r2, #32 stmiane ip!, {r1, r3-r8, lr} tst r2, #16 stmiane ip!, {r4-r7} ldmfd sp!, {r4-r8, lr} #endif 4: tst r2, #8 stmiane ip!, {r1, r3} tst r2, #4 strne r1, [ip], #4 /* * When we get here, we've got less than 4 bytes to zero. We * may have an unaligned pointer as well. */ 5: tst r2, #2 strbne r1, [ip], #1 strbne r1, [ip], #1 tst r2, #1 strbne r1, [ip], #1 ret lr 6: subs r2, r2, #4 @ 1 do we have enough blt 5b @ 1 bytes to align with? cmp r3, #2 @ 1 strblt r1, [ip], #1 @ 1 strble r1, [ip], #1 @ 1 strb r1, [ip], #1 @ 1 add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3)) b 1b ENDPROC(memset)
4ms/stm32mp1-baremetal
5,062
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/lib/gic_64.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * GIC Initialization Routines. * * (C) Copyright 2013 * David Feng <fenghua@phytium.com.cn> */ #include <asm-offsets.h> #include <config.h> #include <linux/linkage.h> #include <asm/gic.h> #include <asm/macro.h> /************************************************************************* * * void gic_init_secure(DistributorBase); * * Initialize secure copy of GIC at EL3. * *************************************************************************/ ENTRY(gic_init_secure) /* * Initialize Distributor * x0: Distributor Base */ #if defined(CONFIG_GICV3) mov w9, #0x37 /* EnableGrp0 | EnableGrp1NS */ /* EnableGrp1S | ARE_S | ARE_NS */ str w9, [x0, GICD_CTLR] /* Secure GICD_CTLR */ ldr w9, [x0, GICD_TYPER] and w10, w9, #0x1f /* ITLinesNumber */ cbz w10, 1f /* No SPIs */ add x11, x0, (GICD_IGROUPRn + 4) add x12, x0, (GICD_IGROUPMODRn + 4) mov w9, #~0 0: str w9, [x11], #0x4 str wzr, [x12], #0x4 /* Config SPIs as Group1NS */ sub w10, w10, #0x1 cbnz w10, 0b #elif defined(CONFIG_GICV2) mov w9, #0x3 /* EnableGrp0 | EnableGrp1 */ str w9, [x0, GICD_CTLR] /* Secure GICD_CTLR */ ldr w9, [x0, GICD_TYPER] and w10, w9, #0x1f /* ITLinesNumber */ cbz w10, 1f /* No SPIs */ add x11, x0, GICD_IGROUPRn mov w9, #~0 /* Config SPIs as Grp1 */ str w9, [x11], #0x4 0: str w9, [x11], #0x4 sub w10, w10, #0x1 cbnz w10, 0b ldr x1, =GICC_BASE /* GICC_CTLR */ mov w0, #3 /* EnableGrp0 | EnableGrp1 */ str w0, [x1] mov w0, #1 << 7 /* allow NS access to GICC_PMR */ str w0, [x1, #4] /* GICC_PMR */ #endif 1: ret ENDPROC(gic_init_secure) /************************************************************************* * For Gicv2: * void gic_init_secure_percpu(DistributorBase, CpuInterfaceBase); * For Gicv3: * void gic_init_secure_percpu(ReDistributorBase); * * Initialize secure copy of GIC at EL3. * *************************************************************************/ ENTRY(gic_init_secure_percpu) #if defined(CONFIG_GICV3) /* * Initialize ReDistributor * x0: ReDistributor Base */ mrs x10, mpidr_el1 lsr x9, x10, #32 bfi x10, x9, #24, #8 /* w10 is aff3:aff2:aff1:aff0 */ mov x9, x0 1: ldr x11, [x9, GICR_TYPER] lsr x11, x11, #32 /* w11 is aff3:aff2:aff1:aff0 */ cmp w10, w11 b.eq 2f add x9, x9, #(2 << 16) b 1b /* x9: ReDistributor Base Address of Current CPU */ 2: mov w10, #~0x2 ldr w11, [x9, GICR_WAKER] and w11, w11, w10 /* Clear ProcessorSleep */ str w11, [x9, GICR_WAKER] dsb st isb 3: ldr w10, [x9, GICR_WAKER] tbnz w10, #2, 3b /* Wait Children be Alive */ add x10, x9, #(1 << 16) /* SGI_Base */ mov w11, #~0 str w11, [x10, GICR_IGROUPRn] str wzr, [x10, GICR_IGROUPMODRn] /* SGIs|PPIs Group1NS */ mov w11, #0x1 /* Enable SGI 0 */ str w11, [x10, GICR_ISENABLERn] switch_el x10, 3f, 2f, 1f 3: /* Initialize Cpu Interface */ mrs x10, ICC_SRE_EL3 orr x10, x10, #0xf /* SRE & Disable IRQ/FIQ Bypass & */ /* Allow EL2 access to ICC_SRE_EL2 */ msr ICC_SRE_EL3, x10 isb mov x10, #0x3 /* EnableGrp1NS | EnableGrp1S */ msr ICC_IGRPEN1_EL3, x10 isb msr ICC_CTLR_EL3, xzr isb 2: mrs x10, ICC_SRE_EL2 orr x10, x10, #0xf /* SRE & Disable IRQ/FIQ Bypass & */ /* Allow EL1 access to ICC_SRE_EL1 */ msr ICC_SRE_EL2, x10 isb 1: msr ICC_CTLR_EL1, xzr /* NonSecure ICC_CTLR_EL1 */ isb mov x10, #0x1 << 7 /* Non-Secure access to ICC_PMR_EL1 */ msr ICC_PMR_EL1, x10 isb #elif defined(CONFIG_GICV2) /* * Initialize SGIs and PPIs * x0: Distributor Base * x1: Cpu Interface Base */ mov w9, #~0 /* Config SGIs and PPIs as Grp1 */ str w9, [x0, GICD_IGROUPRn] /* GICD_IGROUPR0 */ mov w9, #0x1 /* Enable SGI 0 */ str w9, [x0, GICD_ISENABLERn] /* Initialize Cpu Interface */ mov w9, #0x1e7 /* Disable IRQ/FIQ Bypass & */ /* Enable Ack Group1 Interrupt & */ /* EnableGrp0 & EnableGrp1 */ str w9, [x1, GICC_CTLR] /* Secure GICC_CTLR */ mov w9, #0x1 << 7 /* Non-Secure access to GICC_PMR */ str w9, [x1, GICC_PMR] #endif ret ENDPROC(gic_init_secure_percpu) /************************************************************************* * For Gicv2: * void gic_kick_secondary_cpus(DistributorBase); * For Gicv3: * void gic_kick_secondary_cpus(void); * *************************************************************************/ ENTRY(gic_kick_secondary_cpus) #if defined(CONFIG_GICV3) mov x9, #(1 << 40) msr ICC_ASGI1R_EL1, x9 isb #elif defined(CONFIG_GICV2) mov w9, #0x8000 movk w9, #0x100, lsl #16 str w9, [x0, GICD_SGIR] #endif ret ENDPROC(gic_kick_secondary_cpus) /************************************************************************* * For Gicv2: * void gic_wait_for_interrupt(CpuInterfaceBase); * For Gicv3: * void gic_wait_for_interrupt(void); * * Wait for SGI 0 from master. * *************************************************************************/ ENTRY(gic_wait_for_interrupt) #if defined(CONFIG_GICV3) gic_wait_for_interrupt_m x9 #elif defined(CONFIG_GICV2) gic_wait_for_interrupt_m x0, w9 #endif ret ENDPROC(gic_wait_for_interrupt)
4ms/stm32mp1-baremetal
5,407
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/lib/crt0.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * crt0 - C-runtime startup Code for ARM U-Boot * * Copyright (c) 2012 Albert ARIBAUD <albert.u.boot@aribaud.net> */ #include <config.h> #include <asm-offsets.h> #include <linux/linkage.h> #include <asm/assembler.h> /* * This file handles the target-independent stages of the U-Boot * start-up where a C runtime environment is needed. Its entry point * is _main and is branched into from the target's start.S file. * * _main execution sequence is: * * 1. Set up initial environment for calling board_init_f(). * This environment only provides a stack and a place to store * the GD ('global data') structure, both located in some readily * available RAM (SRAM, locked cache...). In this context, VARIABLE * global data, initialized or not (BSS), are UNAVAILABLE; only * CONSTANT initialized data are available. GD should be zeroed * before board_init_f() is called. * * 2. Call board_init_f(). This function prepares the hardware for * execution from system RAM (DRAM, DDR...) As system RAM may not * be available yet, , board_init_f() must use the current GD to * store any data which must be passed on to later stages. These * data include the relocation destination, the future stack, and * the future GD location. * * 3. Set up intermediate environment where the stack and GD are the * ones allocated by board_init_f() in system RAM, but BSS and * initialized non-const data are still not available. * * 4a.For U-Boot proper (not SPL), call relocate_code(). This function * relocates U-Boot from its current location into the relocation * destination computed by board_init_f(). * * 4b.For SPL, board_init_f() just returns (to crt0). There is no * code relocation in SPL. * * 5. Set up final environment for calling board_init_r(). This * environment has BSS (initialized to 0), initialized non-const * data (initialized to their intended value), and stack in system * RAM (for SPL moving the stack and GD into RAM is optional - see * CONFIG_SPL_STACK_R). GD has retained values set by board_init_f(). * * 6. For U-Boot proper (not SPL), some CPUs have some work left to do * at this point regarding memory, so call c_runtime_cpu_setup. * * 7. Branch to board_init_r(). * * For more information see 'Board Initialisation Flow in README. */ /* * Macro for clearing BSS during SPL execution. Usually called during the * relocation process for most boards before entering board_init_r(), but * can also be done early before entering board_init_f() on plaforms that * can afford it due to sufficient memory being available early. */ .macro SPL_CLEAR_BSS ldr r0, =__bss_start /* this is auto-relocated! */ #ifdef CONFIG_USE_ARCH_MEMSET ldr r3, =__bss_end /* this is auto-relocated! */ mov r1, #0x00000000 /* prepare zero to clear BSS */ subs r2, r3, r0 /* r2 = memset len */ bl memset #else ldr r1, =__bss_end /* this is auto-relocated! */ mov r2, #0x00000000 /* prepare zero to clear BSS */ clbss_l:cmp r0, r1 /* while not at end of BSS */ strlo r2, [r0] /* clear 32-bit BSS word */ addlo r0, r0, #4 /* move to next */ blo clbss_l #endif .endm /* * entry point of crt0 sequence */ ENTRY(_main) /* * Set up initial C runtime environment and call board_init_f(0). */ #if defined(CONFIG_TPL_BUILD) && defined(CONFIG_TPL_NEEDS_SEPARATE_STACK) ldr r0, =(CONFIG_TPL_STACK) #elif defined(CONFIG_SPL_BUILD) && defined(CONFIG_SPL_STACK) ldr r0, =(CONFIG_SPL_STACK) #else ldr r0, =(CONFIG_SYS_INIT_SP_ADDR) #endif bic r0, r0, #7 /* 8-byte alignment for ABI compliance */ mov sp, r0 bl board_init_f_alloc_reserve mov sp, r0 /* set up gd here, outside any C code */ mov r9, r0 bl board_init_f_init_reserve #if defined(CONFIG_SPL_EARLY_BSS) SPL_CLEAR_BSS #endif mov r0, #0 bl board_init_f #if ! defined(CONFIG_SPL_BUILD) /* * Set up intermediate environment (new sp and gd) and call * relocate_code(addr_moni). Trick here is that we'll return * 'here' but relocated. */ ldr r0, [r9, #GD_START_ADDR_SP] /* sp = gd->start_addr_sp */ bic r0, r0, #7 /* 8-byte alignment for ABI compliance */ mov sp, r0 ldr r9, [r9, #GD_NEW_GD] /* r9 <- gd->new_gd */ adr lr, here ldr r0, [r9, #GD_RELOC_OFF] /* r0 = gd->reloc_off */ add lr, lr, r0 #if defined(CONFIG_CPU_V7M) orr lr, #1 /* As required by Thumb-only */ #endif ldr r0, [r9, #GD_RELOCADDR] /* r0 = gd->relocaddr */ b relocate_code here: /* * now relocate vectors */ bl relocate_vectors /* Set up final (full) environment */ bl c_runtime_cpu_setup /* we still call old routine here */ #endif #if !defined(CONFIG_SPL_BUILD) || CONFIG_IS_ENABLED(FRAMEWORK) #if !defined(CONFIG_SPL_EARLY_BSS) SPL_CLEAR_BSS #endif # ifdef CONFIG_SPL_BUILD /* Use a DRAM stack for the rest of SPL, if requested */ bl spl_relocate_stack_gd cmp r0, #0 movne sp, r0 movne r9, r0 # endif #if ! defined(CONFIG_SPL_BUILD) bl coloured_LED_init bl red_led_on #endif /* call board_init_r(gd_t *id, ulong dest_addr) */ mov r0, r9 /* gd_t */ ldr r1, [r9, #GD_RELOCADDR] /* dest_addr */ /* call board_init_r */ #if CONFIG_IS_ENABLED(SYS_THUMB_BUILD) ldr lr, =board_init_r /* this is auto-relocated! */ bx lr #else ldr pc, =board_init_r /* this is auto-relocated! */ #endif /* we should not return here. */ #endif ENDPROC(_main)
4ms/stm32mp1-baremetal
3,895
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/lib/crt0_aarch64_efi.S
/* SPDX-License-Identifier: GPL-2.0+ OR BSD-2-Clause */ /* * crt0-efi-aarch64.S - PE/COFF header for aarch64 EFI applications * * Copright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org> * * * This file is taken and modified from the gnu-efi project. */ #include <asm-generic/pe.h> .section .text.head /* * Magic "MZ" signature for PE/COFF */ .globl ImageBase ImageBase: .short IMAGE_DOS_SIGNATURE /* 'MZ' */ .skip 58 /* 'MZ' + pad + offset == 64 */ .long pe_header - ImageBase /* Offset to the PE header */ pe_header: .long IMAGE_NT_SIGNATURE /* 'PE' */ coff_header: .short IMAGE_FILE_MACHINE_ARM64 /* AArch64 */ .short 2 /* nr_sections */ .long 0 /* TimeDateStamp */ .long 0 /* PointerToSymbolTable */ .long 0 /* NumberOfSymbols */ .short section_table - optional_header /* SizeOfOptionalHeader */ /* Characteristics */ .short (IMAGE_FILE_EXECUTABLE_IMAGE | \ IMAGE_FILE_LINE_NUMS_STRIPPED | \ IMAGE_FILE_LOCAL_SYMS_STRIPPED | \ IMAGE_FILE_DEBUG_STRIPPED) optional_header: .short IMAGE_NT_OPTIONAL_HDR64_MAGIC /* PE32+ format */ .byte 0x02 /* MajorLinkerVersion */ .byte 0x14 /* MinorLinkerVersion */ .long _edata - _start /* SizeOfCode */ .long 0 /* SizeOfInitializedData */ .long 0 /* SizeOfUninitializedData */ .long _start - ImageBase /* AddressOfEntryPoint */ .long _start - ImageBase /* BaseOfCode */ extra_header_fields: .quad 0 /* ImageBase */ .long 0x20 /* SectionAlignment */ .long 0x8 /* FileAlignment */ .short 0 /* MajorOperatingSystemVersion */ .short 0 /* MinorOperatingSystemVersion */ .short 0 /* MajorImageVersion */ .short 0 /* MinorImageVersion */ .short 0 /* MajorSubsystemVersion */ .short 0 /* MinorSubsystemVersion */ .long 0 /* Win32VersionValue */ .long _edata - ImageBase /* SizeOfImage */ /* * Everything before the kernel image is considered part of the header */ .long _start - ImageBase /* SizeOfHeaders */ .long 0 /* CheckSum */ .short IMAGE_SUBSYSTEM_EFI_APPLICATION /* Subsystem */ .short 0 /* DllCharacteristics */ .quad 0 /* SizeOfStackReserve */ .quad 0 /* SizeOfStackCommit */ .quad 0 /* SizeOfHeapReserve */ .quad 0 /* SizeOfHeapCommit */ .long 0 /* LoaderFlags */ .long 0x6 /* NumberOfRvaAndSizes */ .quad 0 /* ExportTable */ .quad 0 /* ImportTable */ .quad 0 /* ResourceTable */ .quad 0 /* ExceptionTable */ .quad 0 /* CertificationTable */ .quad 0 /* BaseRelocationTable */ /* Section table */ section_table: /* * The EFI application loader requires a relocation section * because EFI applications must be relocatable. This is a * dummy section as far as we are concerned. */ .ascii ".reloc" .byte 0 .byte 0 /* end of 0 padding of section name */ .long 0 .long 0 .long 0 /* SizeOfRawData */ .long 0 /* PointerToRawData */ .long 0 /* PointerToRelocations */ .long 0 /* PointerToLineNumbers */ .short 0 /* NumberOfRelocations */ .short 0 /* NumberOfLineNumbers */ .long 0x42100040 /* Characteristics (section flags) */ .ascii ".text" .byte 0 .byte 0 .byte 0 /* end of 0 padding of section name */ .long _edata - _start /* VirtualSize */ .long _start - ImageBase /* VirtualAddress */ .long _edata - _start /* SizeOfRawData */ .long _start - ImageBase /* PointerToRawData */ .long 0 /* PointerToRelocations (0 for executables) */ .long 0 /* PointerToLineNumbers (0 for executables) */ .short 0 /* NumberOfRelocations (0 for executables) */ .short 0 /* NumberOfLineNumbers (0 for executables) */ .long 0xe0500020 /* Characteristics (section flags) */ _start: stp x29, x30, [sp, #-32]! mov x29, sp stp x0, x1, [sp, #16] adr x0, ImageBase adrp x1, _DYNAMIC add x1, x1, #:lo12:_DYNAMIC bl _relocate cbnz x0, 0f ldp x0, x1, [sp, #16] bl efi_main 0: ldp x29, x30, [sp], #32 ret
4ms/stm32mp1-baremetal
8,771
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/lib/lib1funcs.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * linux/arch/arm/lib/lib1funcs.S: Optimized ARM division routines * * Author: Nicolas Pitre <nico@fluxnic.net> * - contributed to gcc-3.4 on Sep 30, 2003 * - adapted for the Linux kernel on Oct 2, 2003 */ /* * Copyright 1995, 1996, 1998, 1999, 2000, 2003 Free Software Foundation, Inc. */ #include <linux/linkage.h> #include <asm/assembler.h> /* * U-Boot compatibility bit, define empty UNWIND() macro as, since we * do not support stack unwinding and define CONFIG_AEABI to make all * of the functions available without diverging from Linux code. */ #ifdef __UBOOT__ #define UNWIND(x...) #define CONFIG_AEABI #endif .macro ARM_DIV_BODY dividend, divisor, result, curbit #if __LINUX_ARM_ARCH__ >= 5 clz \curbit, \divisor clz \result, \dividend sub \result, \curbit, \result mov \curbit, #1 mov \divisor, \divisor, lsl \result mov \curbit, \curbit, lsl \result mov \result, #0 #else @ Initially shift the divisor left 3 bits if possible, @ set curbit accordingly. This allows for curbit to be located @ at the left end of each 4 bit nibbles in the division loop @ to save one loop in most cases. tst \divisor, #0xe0000000 moveq \divisor, \divisor, lsl #3 moveq \curbit, #8 movne \curbit, #1 @ Unless the divisor is very big, shift it up in multiples of @ four bits, since this is the amount of unwinding in the main @ division loop. Continue shifting until the divisor is @ larger than the dividend. 1: cmp \divisor, #0x10000000 cmplo \divisor, \dividend movlo \divisor, \divisor, lsl #4 movlo \curbit, \curbit, lsl #4 blo 1b @ For very big divisors, we must shift it a bit at a time, or @ we will be in danger of overflowing. 1: cmp \divisor, #0x80000000 cmplo \divisor, \dividend movlo \divisor, \divisor, lsl #1 movlo \curbit, \curbit, lsl #1 blo 1b mov \result, #0 #endif @ Division loop 1: cmp \dividend, \divisor subhs \dividend, \dividend, \divisor orrhs \result, \result, \curbit cmp \dividend, \divisor, lsr #1 subhs \dividend, \dividend, \divisor, lsr #1 orrhs \result, \result, \curbit, lsr #1 cmp \dividend, \divisor, lsr #2 subhs \dividend, \dividend, \divisor, lsr #2 orrhs \result, \result, \curbit, lsr #2 cmp \dividend, \divisor, lsr #3 subhs \dividend, \dividend, \divisor, lsr #3 orrhs \result, \result, \curbit, lsr #3 cmp \dividend, #0 @ Early termination? movsne \curbit, \curbit, lsr #4 @ No, any more bits to do? movne \divisor, \divisor, lsr #4 bne 1b .endm .macro ARM_DIV2_ORDER divisor, order #if __LINUX_ARM_ARCH__ >= 5 clz \order, \divisor rsb \order, \order, #31 #else cmp \divisor, #(1 << 16) movhs \divisor, \divisor, lsr #16 movhs \order, #16 movlo \order, #0 cmp \divisor, #(1 << 8) movhs \divisor, \divisor, lsr #8 addhs \order, \order, #8 cmp \divisor, #(1 << 4) movhs \divisor, \divisor, lsr #4 addhs \order, \order, #4 cmp \divisor, #(1 << 2) addhi \order, \order, #3 addls \order, \order, \divisor, lsr #1 #endif .endm .macro ARM_MOD_BODY dividend, divisor, order, spare #if __LINUX_ARM_ARCH__ >= 5 clz \order, \divisor clz \spare, \dividend sub \order, \order, \spare mov \divisor, \divisor, lsl \order #else mov \order, #0 @ Unless the divisor is very big, shift it up in multiples of @ four bits, since this is the amount of unwinding in the main @ division loop. Continue shifting until the divisor is @ larger than the dividend. 1: cmp \divisor, #0x10000000 cmplo \divisor, \dividend movlo \divisor, \divisor, lsl #4 addlo \order, \order, #4 blo 1b @ For very big divisors, we must shift it a bit at a time, or @ we will be in danger of overflowing. 1: cmp \divisor, #0x80000000 cmplo \divisor, \dividend movlo \divisor, \divisor, lsl #1 addlo \order, \order, #1 blo 1b #endif @ Perform all needed subtractions to keep only the reminder. @ Do comparisons in batch of 4 first. subs \order, \order, #3 @ yes, 3 is intended here blt 2f 1: cmp \dividend, \divisor subhs \dividend, \dividend, \divisor cmp \dividend, \divisor, lsr #1 subhs \dividend, \dividend, \divisor, lsr #1 cmp \dividend, \divisor, lsr #2 subhs \dividend, \dividend, \divisor, lsr #2 cmp \dividend, \divisor, lsr #3 subhs \dividend, \dividend, \divisor, lsr #3 cmp \dividend, #1 mov \divisor, \divisor, lsr #4 subsge \order, \order, #4 bge 1b tst \order, #3 teqne \dividend, #0 beq 5f @ Either 1, 2 or 3 comparison/subtractions are left. 2: cmn \order, #2 blt 4f beq 3f cmp \dividend, \divisor subhs \dividend, \dividend, \divisor mov \divisor, \divisor, lsr #1 3: cmp \dividend, \divisor subhs \dividend, \dividend, \divisor mov \divisor, \divisor, lsr #1 4: cmp \dividend, \divisor subhs \dividend, \dividend, \divisor 5: .endm .pushsection .text.__udivsi3, "ax" ENTRY(__udivsi3) ENTRY(__aeabi_uidiv) UNWIND(.fnstart) subs r2, r1, #1 reteq lr bcc Ldiv0 cmp r0, r1 bls 11f tst r1, r2 beq 12f ARM_DIV_BODY r0, r1, r2, r3 mov r0, r2 ret lr 11: moveq r0, #1 movne r0, #0 ret lr 12: ARM_DIV2_ORDER r1, r2 mov r0, r0, lsr r2 ret lr UNWIND(.fnend) ENDPROC(__udivsi3) ENDPROC(__aeabi_uidiv) .popsection .pushsection .text.__umodsi3, "ax" ENTRY(__umodsi3) UNWIND(.fnstart) subs r2, r1, #1 @ compare divisor with 1 bcc Ldiv0 cmpne r0, r1 @ compare dividend with divisor moveq r0, #0 tsthi r1, r2 @ see if divisor is power of 2 andeq r0, r0, r2 retls lr ARM_MOD_BODY r0, r1, r2, r3 ret lr UNWIND(.fnend) ENDPROC(__umodsi3) .popsection .pushsection .text.__divsi3, "ax" ENTRY(__divsi3) ENTRY(__aeabi_idiv) UNWIND(.fnstart) cmp r1, #0 eor ip, r0, r1 @ save the sign of the result. beq Ldiv0 rsbmi r1, r1, #0 @ loops below use unsigned. subs r2, r1, #1 @ division by 1 or -1 ? beq 10f movs r3, r0 rsbmi r3, r0, #0 @ positive dividend value cmp r3, r1 bls 11f tst r1, r2 @ divisor is power of 2 ? beq 12f ARM_DIV_BODY r3, r1, r0, r2 cmp ip, #0 rsbmi r0, r0, #0 ret lr 10: teq ip, r0 @ same sign ? rsbmi r0, r0, #0 ret lr 11: movlo r0, #0 moveq r0, ip, asr #31 orreq r0, r0, #1 ret lr 12: ARM_DIV2_ORDER r1, r2 cmp ip, #0 mov r0, r3, lsr r2 rsbmi r0, r0, #0 ret lr UNWIND(.fnend) ENDPROC(__divsi3) ENDPROC(__aeabi_idiv) .popsection .pushsection .text.__modsi3, "ax" ENTRY(__modsi3) UNWIND(.fnstart) cmp r1, #0 beq Ldiv0 rsbmi r1, r1, #0 @ loops below use unsigned. movs ip, r0 @ preserve sign of dividend rsbmi r0, r0, #0 @ if negative make positive subs r2, r1, #1 @ compare divisor with 1 cmpne r0, r1 @ compare dividend with divisor moveq r0, #0 tsthi r1, r2 @ see if divisor is power of 2 andeq r0, r0, r2 bls 10f ARM_MOD_BODY r0, r1, r2, r3 10: cmp ip, #0 rsbmi r0, r0, #0 ret lr UNWIND(.fnend) ENDPROC(__modsi3) .popsection #ifdef CONFIG_AEABI .pushsection .text.__aeabi_uidivmod, "ax" ENTRY(__aeabi_uidivmod) UNWIND(.fnstart) UNWIND(.save {r0, r1, ip, lr} ) stmfd sp!, {r0, r1, ip, lr} bl __aeabi_uidiv ldmfd sp!, {r1, r2, ip, lr} mul r3, r0, r2 sub r1, r1, r3 ret lr UNWIND(.fnend) ENDPROC(__aeabi_uidivmod) .popsection .pushsection .text.__aeabi_uidivmod, "ax" ENTRY(__aeabi_idivmod) UNWIND(.fnstart) UNWIND(.save {r0, r1, ip, lr} ) stmfd sp!, {r0, r1, ip, lr} bl __aeabi_idiv ldmfd sp!, {r1, r2, ip, lr} mul r3, r0, r2 sub r1, r1, r3 ret lr UNWIND(.fnend) ENDPROC(__aeabi_idivmod) .popsection #endif .pushsection .text.Ldiv0, "ax" Ldiv0: UNWIND(.fnstart) UNWIND(.pad #4) UNWIND(.save {lr}) str lr, [sp, #-8]! bl __div0 mov r0, #0 @ About as wrong as it could be. ldr pc, [sp], #8 UNWIND(.fnend) ENDPROC(Ldiv0) .popsection /* Thumb-1 specialities */ #if CONFIG_IS_ENABLED(SYS_THUMB_BUILD) && !defined(CONFIG_HAS_THUMB2) .pushsection .text.__gnu_thumb1_case_sqi, "ax" ENTRY(__gnu_thumb1_case_sqi) push {r1} mov r1, lr lsrs r1, r1, #1 lsls r1, r1, #1 ldrsb r1, [r1, r0] lsls r1, r1, #1 add lr, lr, r1 pop {r1} bx lr ENDPROC(__gnu_thumb1_case_sqi) .popsection .pushsection .text.__gnu_thumb1_case_uqi, "ax" ENTRY(__gnu_thumb1_case_uqi) push {r1} mov r1, lr lsrs r1, r1, #1 lsls r1, r1, #1 ldrb r1, [r1, r0] lsls r1, r1, #1 add lr, lr, r1 pop {r1} bx lr ENDPROC(__gnu_thumb1_case_uqi) .popsection .pushsection .text.__gnu_thumb1_case_shi, "ax" ENTRY(__gnu_thumb1_case_shi) push {r0, r1} mov r1, lr lsrs r1, r1, #1 lsls r0, r0, #1 lsls r1, r1, #1 ldrsh r1, [r1, r0] lsls r1, r1, #1 add lr, lr, r1 pop {r0, r1} bx lr ENDPROC(__gnu_thumb1_case_shi) .popsection .pushsection .text.__gnu_thumb1_case_uhi, "ax" ENTRY(__gnu_thumb1_case_uhi) push {r0, r1} mov r1, lr lsrs r1, r1, #1 lsls r0, r0, #1 lsls r1, r1, #1 ldrh r1, [r1, r0] lsls r1, r1, #1 add lr, lr, r1 pop {r0, r1} bx lr ENDPROC(__gnu_thumb1_case_uhi) .popsection #endif
4ms/stm32mp1-baremetal
2,196
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/lib/debug.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * linux/arch/arm/kernel/debug.S * * Copyright (C) 1994-1999 Russell King * * 32-bit debugging code */ #include <linux/linkage.h> #include <asm/assembler.h> .text /* * Some debugging routines (useful if you've got MM problems and * printk isn't working). For DEBUGGING ONLY!!! Do not leave * references to these in a production kernel! */ #if !defined(CONFIG_DEBUG_SEMIHOSTING) #include CONFIG_DEBUG_LL_INCLUDE #endif #ifdef CONFIG_MMU .macro addruart_current, rx, tmp1, tmp2 addruart \tmp1, \tmp2, \rx mrc p15, 0, \rx, c1, c0 tst \rx, #1 moveq \rx, \tmp1 movne \rx, \tmp2 .endm #else /* !CONFIG_MMU */ .macro addruart_current, rx, tmp1, tmp2 addruart \rx, \tmp1, \tmp2 .endm #endif /* CONFIG_MMU */ /* * Useful debugging routines */ ENTRY(printhex8) mov r1, #8 b printhex ENDPROC(printhex8) ENTRY(printhex4) mov r1, #4 b printhex ENDPROC(printhex4) ENTRY(printhex2) mov r1, #2 printhex: adr r2, hexbuf add r3, r2, r1 mov r1, #0 strb r1, [r3] 1: and r1, r0, #15 mov r0, r0, lsr #4 cmp r1, #10 addlt r1, r1, #'0' addge r1, r1, #'a' - 10 strb r1, [r3, #-1]! teq r3, r2 bne 1b mov r0, r2 b printascii ENDPROC(printhex2) hexbuf: .space 16 .ltorg #ifndef CONFIG_DEBUG_SEMIHOSTING ENTRY(printascii) addruart_current r3, r1, r2 b 2f 1: waituart r2, r3 senduart r1, r3 busyuart r2, r3 teq r1, #'\n' moveq r1, #'\r' beq 1b 2: teq r0, #0 ldrneb r1, [r0], #1 teqne r1, #0 bne 1b mov pc, lr ENDPROC(printascii) ENTRY(printch) addruart_current r3, r1, r2 mov r1, r0 mov r0, #0 b 1b ENDPROC(printch) #ifdef CONFIG_MMU ENTRY(debug_ll_addr) addruart r2, r3, ip str r2, [r0] str r3, [r1] mov pc, lr ENDPROC(debug_ll_addr) #endif #else ENTRY(printascii) mov r1, r0 mov r0, #0x04 @ SYS_WRITE0 ARM( svc #0x123456 ) THUMB( svc #0xab ) mov pc, lr ENDPROC(printascii) ENTRY(printch) adr r1, hexbuf strb r0, [r1] mov r0, #0x03 @ SYS_WRITEC ARM( svc #0x123456 ) THUMB( svc #0xab ) mov pc, lr ENDPROC(printch) ENTRY(debug_ll_addr) mov r2, #0 str r2, [r0] str r2, [r1] mov pc, lr ENDPROC(debug_ll_addr) #endif
4ms/stm32mp1-baremetal
3,677
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/mach-exynos/sec_boot.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright (C) 2013 Samsung Electronics * Akshay Saraswat <akshay.s@samsung.com> */ #include <config.h> #include <asm/arch/cpu.h> .globl relocate_wait_code relocate_wait_code: adr r0, code_base @ r0: source address (start) adr r1, code_end @ r1: source address (end) ldr r2, =0x02073000 @ r2: target address 1: ldmia r0!, {r3-r6} stmia r2!, {r3-r6} cmp r0, r1 blt 1b b code_end .ltorg /* * Secondary core waits here until Primary wake it up. * Below code is copied to CONFIG_EXYNOS_RELOCATE_CODE_BASE. * This is a workaround code which is supposed to act as a * substitute/supplement to the iROM code. * * This workaround code is relocated to the address 0x02073000 * because that comes out to be the last 4KB of the iRAM * (Base Address - 0x02020000, Limit Address - 0x020740000). * * U-Boot and kernel are aware of this code and flags by the simple * fact that we are implementing a workaround in the last 4KB * of the iRAM and we have already defined these flag and address * values in both kernel and U-Boot for our use. */ code_base: b 1f /* * These addresses are being used as flags in u-boot and kernel. * * Jump address for resume and flag to check for resume/reset: * Resume address - 0x2073008 * Resume flag - 0x207300C * * Jump address for cluster switching: * Switch address - 0x2073018 * * Jump address for core hotplug: * Hotplug address - 0x207301C * * Jump address for C2 state (Reserved for future not being used right now): * C2 address - 0x2073024 * * Managed per core status for the active cluster: * CPU0 state - 0x2073028 * CPU1 state - 0x207302C * CPU2 state - 0x2073030 * CPU3 state - 0x2073034 * * Managed per core GIC status for the active cluster: * CPU0 gic state - 0x2073038 * CPU1 gic state - 0x207303C * CPU2 gic state - 0x2073040 * CPU3 gic state - 0x2073044 * * Logic of the code: * Step-1: Read current CPU status. * Step-2: If it's a resume then continue, else jump to step 4. * Step-3: Clear inform1 PMU register and jump to inform0 value. * Step-4: If it's a switch, C2 or reset, get the hotplug address. * Step-5: If address is not available, enter WFE. * Step-6: If address is available, jump to that address. */ nop @ for backward compatibility .word 0x0 @ REG0: RESUME_ADDR .word 0x0 @ REG1: RESUME_FLAG .word 0x0 @ REG2 .word 0x0 @ REG3 _switch_addr: .word 0x0 @ REG4: SWITCH_ADDR _hotplug_addr: .word 0x0 @ REG5: CPU1_BOOT_REG .word 0x0 @ REG6 _c2_addr: .word 0x0 @ REG7: REG_C2_ADDR _cpu_state: .word 0x1 @ CPU0_STATE : RESET .word 0x2 @ CPU1_STATE : SECONDARY RESET .word 0x2 @ CPU2_STATE : SECONDARY RESET .word 0x2 @ CPU3_STATE : SECONDARY RESET _gic_state: .word 0x0 @ CPU0 - GICD_IGROUPR0 .word 0x0 @ CPU1 - GICD_IGROUPR0 .word 0x0 @ CPU2 - GICD_IGROUPR0 .word 0x0 @ CPU3 - GICD_IGROUPR0 1: adr r0, _cpu_state mrc p15, 0, r7, c0, c0, 5 @ read MPIDR and r7, r7, #0xf @ r7 = cpu id /* Read the current cpu state */ ldr r10, [r0, r7, lsl #2] svc_entry: tst r10, #(1 << 4) adrne r0, _switch_addr bne wait_for_addr /* Clear INFORM1 */ ldr r0, =(0x10040000 + 0x804) ldr r1, [r0] cmp r1, #0x0 movne r1, #0x0 strne r1, [r0] /* Get INFORM0 */ ldrne r1, =(0x10040000 + 0x800) ldrne pc, [r1] tst r10, #(1 << 0) ldrne pc, =0x23e00000 adr r0, _hotplug_addr wait_for_addr: ldr r1, [r0] cmp r1, #0x0 bxne r1 wfe b wait_for_addr .ltorg code_end: mov pc, lr
4ms/stm32mp1-baremetal
1,392
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/mach-mvebu/lowlevel_spl.S
/* SPDX-License-Identifier: GPL-2.0+ */ #include <config.h> #include <linux/linkage.h> ENTRY(save_boot_params) stmfd sp!, {r0 - r12, lr} /* @ save registers on stack */ ldr r12, =CONFIG_SPL_BOOTROM_SAVE str sp, [r12] b save_boot_params_ret ENDPROC(save_boot_params) ENTRY(return_to_bootrom) ldr r12, =CONFIG_SPL_BOOTROM_SAVE ldr sp, [r12] mov r0, #0x0 /* @ return value: 0x0 NO_ERR */ ldmfd sp!, {r0 - r12, pc} /* @ restore regs and return */ ENDPROC(return_to_bootrom) /* * cache_inv - invalidate Cache line * r0 - dest */ .global cache_inv .type cache_inv, %function cache_inv: stmfd sp!, {r1-r12} mcr p15, 0, r0, c7, c6, 1 ldmfd sp!, {r1-r12} bx lr /* * flush_l1_v6 - l1 cache clean invalidate * r0 - dest */ .global flush_l1_v6 .type flush_l1_v6, %function flush_l1_v6: stmfd sp!, {r1-r12} mcr p15, 0, r0, c7, c10, 5 /* @ data memory barrier */ mcr p15, 0, r0, c7, c14, 1 /* @ clean & invalidate D line */ mcr p15, 0, r0, c7, c10, 4 /* @ data sync barrier */ ldmfd sp!, {r1-r12} bx lr /* * flush_l1_v7 - l1 cache clean invalidate * r0 - dest */ .global flush_l1_v7 .type flush_l1_v7, %function flush_l1_v7: stmfd sp!, {r1-r12} dmb /* @data memory barrier */ mcr p15, 0, r0, c7, c14, 1 /* @ clean & invalidate D line */ dsb /* @data sync barrier */ ldmfd sp!, {r1-r12} bx lr
4ms/stm32mp1-baremetal
2,202
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/mach-tegra/psci.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright (C) 2014, NVIDIA * Copyright (C) 2015, Siemens AG * * Authors: * Thierry Reding <treding@nvidia.com> * Jan Kiszka <jan.kiszka@siemens.com> */ #include <linux/linkage.h> #include <asm/macro.h> #include <asm/psci.h> .pushsection ._secure.text, "ax" .arch_extension sec #define TEGRA_SB_CSR_0 0x6000c200 #define NS_RST_VEC_WR_DIS (1 << 1) #define TEGRA_RESET_EXCEPTION_VECTOR 0x6000f100 #define TEGRA_FLOW_CTRL_BASE 0x60007000 #define FLOW_CTRL_CPU_CSR 0x08 #define CSR_ENABLE (1 << 0) #define CSR_IMMEDIATE_WAKE (1 << 3) #define CSR_WAIT_WFI_SHIFT 8 #define FLOW_CTRL_CPU1_CSR 0x18 @ converts CPU ID into FLOW_CTRL_CPUn_CSR offset .macro get_csr_reg cpu, ofs, tmp cmp \cpu, #0 @ CPU0? lsl \tmp, \cpu, #3 @ multiple by 8 (register offset CPU1-3) moveq \ofs, #FLOW_CTRL_CPU_CSR addne \ofs, \tmp, #FLOW_CTRL_CPU1_CSR - 8 .endm ENTRY(psci_arch_init) mov r6, lr mrc p15, 0, r5, c1, c1, 0 @ Read SCR bic r5, r5, #1 @ Secure mode mcr p15, 0, r5, c1, c1, 0 @ Write SCR isb @ lock reset vector for non-secure ldr r4, =TEGRA_SB_CSR_0 ldr r5, [r4] orr r5, r5, #NS_RST_VEC_WR_DIS str r5, [r4] bl psci_get_cpu_id @ CPU ID => r0 adr r5, _sys_clock_freq cmp r0, #0 mrceq p15, 0, r7, c14, c0, 0 @ read CNTFRQ from CPU0 streq r7, [r5] ldrne r7, [r5] mcrne p15, 0, r7, c14, c0, 0 @ write CNTFRQ to CPU1..3 bx r6 ENDPROC(psci_arch_init) _sys_clock_freq: .word 0 ENTRY(psci_cpu_off) bl psci_cpu_off_common bl psci_get_cpu_id @ CPU ID => r0 get_csr_reg r0, r2, r3 ldr r6, =TEGRA_FLOW_CTRL_BASE mov r5, #(CSR_ENABLE) mov r4, #(1 << CSR_WAIT_WFI_SHIFT) add r5, r4, lsl r0 str r5, [r6, r2] _loop: wfi b _loop ENDPROC(psci_cpu_off) ENTRY(psci_cpu_on) push {r4, r5, r6, lr} mov r4, r1 mov r0, r1 mov r1, r2 mov r2, r3 bl psci_save @ store target PC and context id mov r1, r4 ldr r6, =TEGRA_RESET_EXCEPTION_VECTOR ldr r5, =psci_cpu_entry str r5, [r6] get_csr_reg r1, r2, r3 ldr r6, =TEGRA_FLOW_CTRL_BASE mov r5, #(CSR_IMMEDIATE_WAKE | CSR_ENABLE) str r5, [r6, r2] mov r0, #ARM_PSCI_RET_SUCCESS @ Return PSCI_RET_SUCCESS pop {r4, r5, r6, pc} ENDPROC(psci_cpu_on) .popsection
4ms/stm32mp1-baremetal
1,951
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/mach-mediatek/mt7629/lowlevel_init.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2018 MediaTek Inc. */ #include <linux/linkage.h> #include <asm/proc-armv/ptrace.h> #define WAIT_CODE_SRAM_BASE 0x0010ff00 #define SLAVE_JUMP_REG 0x10202034 #define SLAVE1_MAGIC_REG 0x10202038 #define SLAVE1_MAGIC_NUM 0x534c4131 #define GIC_CPU_BASE 0x10320000 ENTRY(lowlevel_init) #ifndef CONFIG_SPL_BUILD /* Return to U-Boot via saved link register */ mov pc, lr #else /* * Arch timer : * set CNTFRQ = 20Mhz, set CNTVOFF = 0 */ movw r0, #0x2d00 movt r0, #0x131 mcr p15, 0, r0, c14, c0, 0 cps #MON_MODE mrc p15, 0, r1, c1, c1, 0 @ Get Secure Config orr r0, r1, #1 mcr p15, 0, r0, c1, c1, 0 @ Set Non Secure bit isb mov r0, #0 mcrr p15, 4, r0, r0, c14 @ CNTVOFF = 0 isb mcr p15, 0, r1, c1, c1, 0 @ Set Secure bit isb cps #SVC_MODE /* enable SMP bit */ mrc p15, 0, r0, c1, c0, 1 orr r0, r0, #0x40 mcr p15, 0, r0, c1, c0, 1 /* if MP core, handle secondary cores */ mrc p15, 0, r0, c0, c0, 5 ands r1, r0, #0x40000000 bne go @ Go if UP /* read slave CPU number */ ands r0, r0, #0x0f beq go @ Go if core0 on primary core tile b secondary go: /* master CPU */ mov pc, lr secondary: /* enable GIC as cores will be waken up by IPI */ ldr r2, =GIC_CPU_BASE mov r1, #0xf0 str r1, [r2, #4] mov r1, #1 str r1, [r2, #0] ldr r1, [r2] orr r1, #1 str r1, [r2] /* copy wait code into SRAM */ ldr r0, =slave_cpu_wait ldm r0, {r1 - r8} @ slave_cpu_wait has eight insns ldr r0, =WAIT_CODE_SRAM_BASE stm r0, {r1 - r8} /* pass args to slave_cpu_wait */ ldr r0, =SLAVE1_MAGIC_REG ldr r1, =SLAVE1_MAGIC_NUM /* jump to wait code in SRAM */ ldr pc, =WAIT_CODE_SRAM_BASE #endif ENDPROC(lowlevel_init) /* This function will be copied into SRAM */ ENTRY(slave_cpu_wait) wfi ldr r2, [r0] cmp r2, r1 bne slave_cpu_wait movw r0, #:lower16:SLAVE_JUMP_REG movt r0, #:upper16:SLAVE_JUMP_REG ldr r1, [r0] mov pc, r1 ENDPROC(slave_cpu_wait)
4ms/stm32mp1-baremetal
12,571
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/mach-omap2/omap3/lowlevel_init.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * Board specific setup info * * (C) Copyright 2008 * Texas Instruments, <www.ti.com> * * Initial Code by: * Richard Woodruff <r-woodruff2@ti.com> * Syed Mohammed Khasim <khasim@ti.com> */ #include <config.h> #include <asm/arch/mem.h> #include <asm/arch/clocks_omap3.h> #include <linux/linkage.h> /* * Funtion for making PPA HAL API calls in secure devices * Input: * R0 - Service ID * R1 - paramer list */ /* TODO: Re-evaluate the comment at the end regarding armv5 vs armv7 */ ENTRY(do_omap3_emu_romcode_call) PUSH {r4-r12, lr} @ Save all registers from ROM code! MOV r12, r0 @ Copy the Secure Service ID in R12 MOV r3, r1 @ Copy the pointer to va_list in R3 MOV r1, #0 @ Process ID - 0 MOV r2, #OMAP3_EMU_HAL_START_HAL_CRITICAL @ Copy the pointer @ to va_list in R3 MOV r6, #0xFF @ Indicate new Task call mcr p15, 0, r0, c7, c10, 4 @ DSB mcr p15, 0, r0, c7, c10, 5 @ DMB .word 0xe1600071 @ SMC #1 to call PPA service - hand assembled @ because we use -march=armv5 POP {r4-r12, pc} ENDPROC(do_omap3_emu_romcode_call) #if !defined(CONFIG_SYS_NAND_BOOT) && !defined(CONFIG_SYS_NAND_BOOT) /************************************************************************** * cpy_clk_code: relocates clock code into SRAM where its safer to execute * R1 = SRAM destination address. *************************************************************************/ ENTRY(cpy_clk_code) /* Copy DPLL code into SRAM */ adr r0, go_to_speed /* copy from start of go_to_speed... */ adr r2, lowlevel_init /* ... up to start of low_level_init */ next2: ldmia r0!, {r3 - r10} /* copy from source address [r0] */ stmia r1!, {r3 - r10} /* copy to target address [r1] */ cmp r0, r2 /* until source end address [r2] */ blo next2 mov pc, lr /* back to caller */ ENDPROC(cpy_clk_code) /* *************************************************************************** * go_to_speed: -Moves to bypass, -Commits clock dividers, -puts dpll at speed * -executed from SRAM. * R0 = CM_CLKEN_PLL-bypass value * R1 = CM_CLKSEL1_PLL-m, n, and divider values * R2 = CM_CLKSEL_CORE-divider values * R3 = CM_IDLEST_CKGEN - addr dpll lock wait * * Note: If core unlocks/relocks and SDRAM is running fast already it gets * confused. A reset of the controller gets it back. Taking away its * L3 when its not in self refresh seems bad for it. Normally, this * code runs from flash before SDR is init so that should be ok. ****************************************************************************/ ENTRY(go_to_speed) stmfd sp!, {r4 - r6} /* move into fast relock bypass */ ldr r4, pll_ctl_add str r0, [r4] wait1: ldr r5, [r3] /* get status */ and r5, r5, #0x1 /* isolate core status */ cmp r5, #0x1 /* still locked? */ beq wait1 /* if lock, loop */ /* set new dpll dividers _after_ in bypass */ ldr r5, pll_div_add1 str r1, [r5] /* set m, n, m2 */ ldr r5, pll_div_add2 str r2, [r5] /* set l3/l4/.. dividers*/ ldr r5, pll_div_add3 /* wkup */ ldr r2, pll_div_val3 /* rsm val */ str r2, [r5] ldr r5, pll_div_add4 /* gfx */ ldr r2, pll_div_val4 str r2, [r5] ldr r5, pll_div_add5 /* emu */ ldr r2, pll_div_val5 str r2, [r5] /* now prepare GPMC (flash) for new dpll speed */ /* flash needs to be stable when we jump back to it */ ldr r5, flash_cfg3_addr ldr r2, flash_cfg3_val str r2, [r5] ldr r5, flash_cfg4_addr ldr r2, flash_cfg4_val str r2, [r5] ldr r5, flash_cfg5_addr ldr r2, flash_cfg5_val str r2, [r5] ldr r5, flash_cfg1_addr ldr r2, [r5] orr r2, r2, #0x3 /* up gpmc divider */ str r2, [r5] /* lock DPLL3 and wait a bit */ orr r0, r0, #0x7 /* set up for lock mode */ str r0, [r4] /* lock */ nop /* ARM slow at this point working at sys_clk */ nop nop nop wait2: ldr r5, [r3] /* get status */ and r5, r5, #0x1 /* isolate core status */ cmp r5, #0x1 /* still locked? */ bne wait2 /* if lock, loop */ nop nop nop nop ldmfd sp!, {r4 - r6} mov pc, lr /* back to caller, locked */ ENDPROC(go_to_speed) _go_to_speed: .word go_to_speed /* these constants need to be close for PIC code */ /* The Nor has to be in the Flash Base CS0 for this condition to happen */ flash_cfg1_addr: .word (GPMC_CONFIG_CS0_BASE + GPMC_CONFIG1) flash_cfg3_addr: .word (GPMC_CONFIG_CS0_BASE + GPMC_CONFIG3) flash_cfg3_val: .word STNOR_GPMC_CONFIG3 flash_cfg4_addr: .word (GPMC_CONFIG_CS0_BASE + GPMC_CONFIG4) flash_cfg4_val: .word STNOR_GPMC_CONFIG4 flash_cfg5_val: .word STNOR_GPMC_CONFIG5 flash_cfg5_addr: .word (GPMC_CONFIG_CS0_BASE + GPMC_CONFIG5) pll_ctl_add: .word CM_CLKEN_PLL pll_div_add1: .word CM_CLKSEL1_PLL pll_div_add2: .word CM_CLKSEL_CORE pll_div_add3: .word CM_CLKSEL_WKUP pll_div_val3: .word (WKUP_RSM << 1) pll_div_add4: .word CM_CLKSEL_GFX pll_div_val4: .word (GFX_DIV << 0) pll_div_add5: .word CM_CLKSEL1_EMU pll_div_val5: .word CLSEL1_EMU_VAL #endif ENTRY(lowlevel_init) ldr sp, SRAM_STACK str ip, [sp] /* stash ip register */ mov ip, lr /* save link reg across call */ #if !defined(CONFIG_SYS_NAND_BOOT) && !defined(CONFIG_SYS_ONENAND_BOOT) /* * No need to copy/exec the clock code - DPLL adjust already done * in NAND/oneNAND Boot. */ ldr r1, =SRAM_CLK_CODE bl cpy_clk_code #endif /* NAND Boot */ mov lr, ip /* restore link reg */ ldr ip, [sp] /* restore save ip */ /* tail-call s_init to setup pll, mux, memory */ b s_init ENDPROC(lowlevel_init) /* the literal pools origin */ .ltorg REG_CONTROL_STATUS: .word CONTROL_STATUS SRAM_STACK: .word LOW_LEVEL_SRAM_STACK /* DPLL(1-4) PARAM TABLES */ /* * Each of the tables has M, N, FREQSEL, M2 values defined for nominal * OPP (1.2V). The fields are defined according to dpll_param struct (clock.c). * The values are defined for all possible sysclk and for ES1 and ES2. */ mpu_dpll_param: /* 12MHz */ /* ES1 */ .word MPU_M_12_ES1, MPU_N_12_ES1, MPU_FSEL_12_ES1, MPU_M2_12_ES1 /* ES2 */ .word MPU_M_12_ES2, MPU_N_12_ES2, MPU_FSEL_12_ES2, MPU_M2_ES2 /* 3410 */ .word MPU_M_12, MPU_N_12, MPU_FSEL_12, MPU_M2_12 /* 13MHz */ /* ES1 */ .word MPU_M_13_ES1, MPU_N_13_ES1, MPU_FSEL_13_ES1, MPU_M2_13_ES1 /* ES2 */ .word MPU_M_13_ES2, MPU_N_13_ES2, MPU_FSEL_13_ES2, MPU_M2_13_ES2 /* 3410 */ .word MPU_M_13, MPU_N_13, MPU_FSEL_13, MPU_M2_13 /* 19.2MHz */ /* ES1 */ .word MPU_M_19P2_ES1, MPU_N_19P2_ES1, MPU_FSEL_19P2_ES1, MPU_M2_19P2_ES1 /* ES2 */ .word MPU_M_19P2_ES2, MPU_N_19P2_ES2, MPU_FSEL_19P2_ES2, MPU_M2_19P2_ES2 /* 3410 */ .word MPU_M_19P2, MPU_N_19P2, MPU_FSEL_19P2, MPU_M2_19P2 /* 26MHz */ /* ES1 */ .word MPU_M_26_ES1, MPU_N_26_ES1, MPU_FSEL_26_ES1, MPU_M2_26_ES1 /* ES2 */ .word MPU_M_26_ES2, MPU_N_26_ES2, MPU_FSEL_26_ES2, MPU_M2_26_ES2 /* 3410 */ .word MPU_M_26, MPU_N_26, MPU_FSEL_26, MPU_M2_26 /* 38.4MHz */ /* ES1 */ .word MPU_M_38P4_ES1, MPU_N_38P4_ES1, MPU_FSEL_38P4_ES1, MPU_M2_38P4_ES1 /* ES2 */ .word MPU_M_38P4_ES2, MPU_N_38P4_ES2, MPU_FSEL_38P4_ES2, MPU_M2_38P4_ES2 /* 3410 */ .word MPU_M_38P4, MPU_N_38P4, MPU_FSEL_38P4, MPU_M2_38P4 .globl get_mpu_dpll_param get_mpu_dpll_param: adr r0, mpu_dpll_param mov pc, lr iva_dpll_param: /* 12MHz */ /* ES1 */ .word IVA_M_12_ES1, IVA_N_12_ES1, IVA_FSEL_12_ES1, IVA_M2_12_ES1 /* ES2 */ .word IVA_M_12_ES2, IVA_N_12_ES2, IVA_FSEL_12_ES2, IVA_M2_12_ES2 /* 3410 */ .word IVA_M_12, IVA_N_12, IVA_FSEL_12, IVA_M2_12 /* 13MHz */ /* ES1 */ .word IVA_M_13_ES1, IVA_N_13_ES1, IVA_FSEL_13_ES1, IVA_M2_13_ES1 /* ES2 */ .word IVA_M_13_ES2, IVA_N_13_ES2, IVA_FSEL_13_ES2, IVA_M2_13_ES2 /* 3410 */ .word IVA_M_13, IVA_N_13, IVA_FSEL_13, IVA_M2_13 /* 19.2MHz */ /* ES1 */ .word IVA_M_19P2_ES1, IVA_N_19P2_ES1, IVA_FSEL_19P2_ES1, IVA_M2_19P2_ES1 /* ES2 */ .word IVA_M_19P2_ES2, IVA_N_19P2_ES2, IVA_FSEL_19P2_ES2, IVA_M2_19P2_ES2 /* 3410 */ .word IVA_M_19P2, IVA_N_19P2, IVA_FSEL_19P2, IVA_M2_19P2 /* 26MHz */ /* ES1 */ .word IVA_M_26_ES1, IVA_N_26_ES1, IVA_FSEL_26_ES1, IVA_M2_26_ES1 /* ES2 */ .word IVA_M_26_ES2, IVA_N_26_ES2, IVA_FSEL_26_ES2, IVA_M2_26_ES2 /* 3410 */ .word IVA_M_26, IVA_N_26, IVA_FSEL_26, IVA_M2_26 /* 38.4MHz */ /* ES1 */ .word IVA_M_38P4_ES1, IVA_N_38P4_ES1, IVA_FSEL_38P4_ES1, IVA_M2_38P4_ES1 /* ES2 */ .word IVA_M_38P4_ES2, IVA_N_38P4_ES2, IVA_FSEL_38P4_ES2, IVA_M2_38P4_ES2 /* 3410 */ .word IVA_M_38P4, IVA_N_38P4, IVA_FSEL_38P4, IVA_M2_38P4 .globl get_iva_dpll_param get_iva_dpll_param: adr r0, iva_dpll_param mov pc, lr /* Core DPLL targets for L3 at 166 & L133 */ core_dpll_param: /* 12MHz */ /* ES1 */ .word CORE_M_12_ES1, CORE_N_12_ES1, CORE_FSL_12_ES1, CORE_M2_12_ES1 /* ES2 */ .word CORE_M_12, CORE_N_12, CORE_FSEL_12, CORE_M2_12 /* 3410 */ .word CORE_M_12, CORE_N_12, CORE_FSEL_12, CORE_M2_12 /* 13MHz */ /* ES1 */ .word CORE_M_13_ES1, CORE_N_13_ES1, CORE_FSL_13_ES1, CORE_M2_13_ES1 /* ES2 */ .word CORE_M_13, CORE_N_13, CORE_FSEL_13, CORE_M2_13 /* 3410 */ .word CORE_M_13, CORE_N_13, CORE_FSEL_13, CORE_M2_13 /* 19.2MHz */ /* ES1 */ .word CORE_M_19P2_ES1, CORE_N_19P2_ES1, CORE_FSL_19P2_ES1, CORE_M2_19P2_ES1 /* ES2 */ .word CORE_M_19P2, CORE_N_19P2, CORE_FSEL_19P2, CORE_M2_19P2 /* 3410 */ .word CORE_M_19P2, CORE_N_19P2, CORE_FSEL_19P2, CORE_M2_19P2 /* 26MHz */ /* ES1 */ .word CORE_M_26_ES1, CORE_N_26_ES1, CORE_FSL_26_ES1, CORE_M2_26_ES1 /* ES2 */ .word CORE_M_26, CORE_N_26, CORE_FSEL_26, CORE_M2_26 /* 3410 */ .word CORE_M_26, CORE_N_26, CORE_FSEL_26, CORE_M2_26 /* 38.4MHz */ /* ES1 */ .word CORE_M_38P4_ES1, CORE_N_38P4_ES1, CORE_FSL_38P4_ES1, CORE_M2_38P4_ES1 /* ES2 */ .word CORE_M_38P4, CORE_N_38P4, CORE_FSEL_38P4, CORE_M2_38P4 /* 3410 */ .word CORE_M_38P4, CORE_N_38P4, CORE_FSEL_38P4, CORE_M2_38P4 .globl get_core_dpll_param get_core_dpll_param: adr r0, core_dpll_param mov pc, lr /* PER DPLL values are same for both ES1 and ES2 */ per_dpll_param: /* 12MHz */ .word PER_M_12, PER_N_12, PER_FSEL_12, PER_M2_12 /* 13MHz */ .word PER_M_13, PER_N_13, PER_FSEL_13, PER_M2_13 /* 19.2MHz */ .word PER_M_19P2, PER_N_19P2, PER_FSEL_19P2, PER_M2_19P2 /* 26MHz */ .word PER_M_26, PER_N_26, PER_FSEL_26, PER_M2_26 /* 38.4MHz */ .word PER_M_38P4, PER_N_38P4, PER_FSEL_38P4, PER_M2_38P4 .globl get_per_dpll_param get_per_dpll_param: adr r0, per_dpll_param mov pc, lr /* PER2 DPLL values */ per2_dpll_param: /* 12MHz */ .word PER2_M_12, PER2_N_12, PER2_FSEL_12, PER2_M2_12 /* 13MHz */ .word PER2_M_13, PER2_N_13, PER2_FSEL_13, PER2_M2_13 /* 19.2MHz */ .word PER2_M_19P2, PER2_N_19P2, PER2_FSEL_19P2, PER2_M2_19P2 /* 26MHz */ .word PER2_M_26, PER2_N_26, PER2_FSEL_26, PER2_M2_26 /* 38.4MHz */ .word PER2_M_38P4, PER2_N_38P4, PER2_FSEL_38P4, PER2_M2_38P4 .globl get_per2_dpll_param get_per2_dpll_param: adr r0, per2_dpll_param mov pc, lr /* * Tables for 36XX/37XX devices * */ mpu_36x_dpll_param: /* 12MHz */ .word 50, 0, 0, 1 /* 13MHz */ .word 600, 12, 0, 1 /* 19.2MHz */ .word 125, 3, 0, 1 /* 26MHz */ .word 300, 12, 0, 1 /* 38.4MHz */ .word 125, 7, 0, 1 iva_36x_dpll_param: /* 12MHz */ .word 130, 2, 0, 1 /* 13MHz */ .word 20, 0, 0, 1 /* 19.2MHz */ .word 325, 11, 0, 1 /* 26MHz */ .word 10, 0, 0, 1 /* 38.4MHz */ .word 325, 23, 0, 1 core_36x_dpll_param: /* 12MHz */ .word 100, 2, 0, 1 /* 13MHz */ .word 400, 12, 0, 1 /* 19.2MHz */ .word 375, 17, 0, 1 /* 26MHz */ .word 200, 12, 0, 1 /* 38.4MHz */ .word 375, 35, 0, 1 per_36x_dpll_param: /* SYSCLK M N M2 M3 M4 M5 M6 m2DIV */ .word 12000, 360, 4, 9, 16, 5, 4, 3, 1 .word 13000, 864, 12, 9, 16, 9, 4, 3, 1 .word 19200, 360, 7, 9, 16, 5, 4, 3, 1 .word 26000, 432, 12, 9, 16, 9, 4, 3, 1 .word 38400, 360, 15, 9, 16, 5, 4, 3, 1 per2_36x_dpll_param: /* 12MHz */ .word PER2_36XX_M_12, PER2_36XX_N_12, 0, PER2_36XX_M2_12 /* 13MHz */ .word PER2_36XX_M_13, PER2_36XX_N_13, 0, PER2_36XX_M2_13 /* 19.2MHz */ .word PER2_36XX_M_19P2, PER2_36XX_N_19P2, 0, PER2_36XX_M2_19P2 /* 26MHz */ .word PER2_36XX_M_26, PER2_36XX_N_26, 0, PER2_36XX_M2_26 /* 38.4MHz */ .word PER2_36XX_M_38P4, PER2_36XX_N_38P4, 0, PER2_36XX_M2_38P4 ENTRY(get_36x_mpu_dpll_param) adr r0, mpu_36x_dpll_param mov pc, lr ENDPROC(get_36x_mpu_dpll_param) ENTRY(get_36x_iva_dpll_param) adr r0, iva_36x_dpll_param mov pc, lr ENDPROC(get_36x_iva_dpll_param) ENTRY(get_36x_core_dpll_param) adr r0, core_36x_dpll_param mov pc, lr ENDPROC(get_36x_core_dpll_param) ENTRY(get_36x_per_dpll_param) adr r0, per_36x_dpll_param mov pc, lr ENDPROC(get_36x_per_dpll_param) ENTRY(get_36x_per2_dpll_param) adr r0, per2_36x_dpll_param mov pc, lr ENDPROC(get_36x_per2_dpll_param)
4ms/stm32mp1-baremetal
2,918
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/mach-omap2/omap5/sec_entry_cpu1.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * Secure entry function for CPU Core #1 * * (C) Copyright 2016 * Texas Instruments, <www.ti.com> * * Author : * Harinarayan Bhatta <harinarayan@ti.com> */ #include <config.h> #include <asm/arch/omap.h> #include <asm/omap_common.h> #include <linux/linkage.h> .arch_extension sec #if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) .global flush_dcache_range #endif #define AUX_CORE_BOOT_0 0x48281800 #define AUX_CORE_BOOT_1 0x48281804 #ifdef CONFIG_DRA7XX /* DRA7xx ROM code function "startup_BootSlave". This function is where CPU1 * waits on WFE, polling on AUX_CORE_BOOT_x registers. * This address is same for J6 and J6 Eco. */ #define ROM_FXN_STARTUP_BOOTSLAVE 0x00038a64 #endif /* Assembly core where CPU1 is woken up into * No need to save-restore registers, does not use stack. */ LENTRY(cpu1_entry) ldr r4, =omap_smc_sec_cpu1_args ldm r4, {r0,r1,r2,r3} @ Retrieve args mov r6, #0xFF @ Indicate new Task call mov r12, #0x00 @ Secure Service ID in R12 dsb dmb smc 0 @ SMC #0 to enter monitor mode b .Lend @ exit at end of the service execution nop @ In case of IRQ happening in Secure, then ARM will branch here. @ At that moment, IRQ will be pending and ARM will jump to Non Secure @ IRQ handler mov r12, #0xFE dsb dmb smc 0 @ SMC #0 to enter monitor mode .Lend: ldr r4, =omap_smc_sec_cpu1_args str r0, [r4, #0x10] @ save return value ldr r4, =AUX_CORE_BOOT_0 mov r5, #0x0 str r5, [r4] ldr r4, =ROM_FXN_STARTUP_BOOTSLAVE sev @ Tell CPU0 we are done bx r4 @ Jump back to ROM END(cpu1_entry) /* * u32 omap_smc_sec_cpu1(u32 service, u32 proc_id, u32 flag, u32 *params); * * Makes a secure ROM/PPA call on CPU Core #1 on supported platforms. * Assumes that CPU #1 is waiting in ROM code and not yet woken up or used by * u-boot. */ ENTRY(omap_smc_sec_cpu1) push {r4, r5, lr} ldr r4, =omap_smc_sec_cpu1_args stm r4, {r0,r1,r2,r3} @ Save args to memory #if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) mov r0, r4 mov r1, #CONFIG_SYS_CACHELINE_SIZE add r1, r0, r1 @ dcache is not enabled on CPU1, so blx flush_dcache_range @ flush the cache on args buffer #endif ldr r4, =AUX_CORE_BOOT_1 ldr r5, =cpu1_entry str r5, [r4] @ Setup CPU1 entry function ldr r4, =AUX_CORE_BOOT_0 mov r5, #0x10 str r5, [r4] @ Tell ROM to exit while loop sev @ Wake up CPU1 .Lwait: wfe @ Wait for CPU1 to finish nop ldr r5, [r4] @ Check if CPU1 is done cmp r5, #0 bne .Lwait ldr r4, =omap_smc_sec_cpu1_args ldr r0, [r4, #0x10] @ Retrieve return value pop {r4, r5, pc} ENDPROC(omap_smc_sec_cpu1) /* * Buffer to save function arguments and return value for omap_smc_sec_cpu1 */ .section .data omap_smc_sec_cpu1_args: #if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) .balign CONFIG_SYS_CACHELINE_SIZE .rept CONFIG_SYS_CACHELINE_SIZE/4 .word 0 .endr #else .rept 5 .word 0 .endr #endif END(omap_smc_sec_cpu1_args)
4ms/stm32mp1-baremetal
1,199
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/mach-imx/mx7/psci-suspend.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright 2018 NXP */ #include <config.h> #include <linux/linkage.h> #include <asm/armv7.h> #include <asm/psci.h> .pushsection ._secure.text, "ax" .arch_extension sec .globl v7_invalidate_l1 v7_invalidate_l1: mov r0, #0 mcr p15, 2, r0, c0, c0, 0 mrc p15, 1, r0, c0, c0, 0 movw r1, #0x7fff and r2, r1, r0, lsr #13 movw r1, #0x3ff and r3, r1, r0, lsr #3 @ NumWays - 1 add r2, r2, #1 @ NumSets and r0, r0, #0x7 add r0, r0, #4 @ SetShift clz r1, r3 @ WayShift add r4, r3, #1 @ NumWays 1: sub r2, r2, #1 @ NumSets-- mov r3, r4 @ Temp = NumWays 2: subs r3, r3, #1 @ Temp-- mov r5, r3, lsl r1 mov r6, r2, lsl r0 orr r5, r5, r6 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift) mcr p15, 0, r5, c7, c6, 2 bgt 2b cmp r2, #0 bgt 1b dsb st isb mov pc, lr .globl psci_system_resume psci_system_resume: mov sp, r0 /* invalidate L1 I-cache first */ mov r6, #0x0 mcr p15, 0, r6, c7, c5, 0 mcr p15, 0, r6, c7, c5, 6 /* enable the Icache and branch prediction */ mov r6, #0x1800 mcr p15, 0, r6, c1, c0, 0 isb bl v7_invalidate_l1 b imx_system_resume .popsection
4ms/stm32mp1-baremetal
1,255
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/mach-imx/imx8m/lowlevel_init.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright 2017 NXP */ #include <config.h> .align 8 .global rom_pointer rom_pointer: .space 256 /* * Routine: save_boot_params (called after reset from start.S) */ .global save_boot_params save_boot_params: /* The firmware provided ATAG/FDT address can be found in r2/x0 */ adr x0, rom_pointer stp x1, x2, [x0], #16 stp x3, x4, [x0], #16 stp x5, x6, [x0], #16 stp x7, x8, [x0], #16 stp x9, x10, [x0], #16 stp x11, x12, [x0], #16 stp x13, x14, [x0], #16 stp x15, x16, [x0], #16 stp x17, x18, [x0], #16 stp x19, x20, [x0], #16 stp x21, x22, [x0], #16 stp x23, x24, [x0], #16 stp x25, x26, [x0], #16 stp x27, x28, [x0], #16 stp x29, x30, [x0], #16 mov x30, sp str x30, [x0], #8 /* Returns */ b save_boot_params_ret .global restore_boot_params restore_boot_params: adr x0, rom_pointer ldp x1, x2, [x0], #16 ldp x3, x4, [x0], #16 ldp x5, x6, [x0], #16 ldp x7, x8, [x0], #16 ldp x9, x10, [x0], #16 ldp x11, x12, [x0], #16 ldp x13, x14, [x0], #16 ldp x15, x16, [x0], #16 ldp x17, x18, [x0], #16 ldp x19, x20, [x0], #16 ldp x21, x22, [x0], #16 ldp x23, x24, [x0], #16 ldp x25, x26, [x0], #16 ldp x27, x28, [x0], #16 ldp x29, x30, [x0], #16 ldr x0, [x0] mov sp, x0 ret
4ms/stm32mp1-baremetal
9,285
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/mach-imx/mx5/lowlevel_init.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright (C) 2007, Guennadi Liakhovetski <lg@denx.de> * * (C) Copyright 2009 Freescale Semiconductor, Inc. */ #include <config.h> #include <asm/arch/imx-regs.h> #include <generated/asm-offsets.h> #include <linux/linkage.h> .section ".text.init", "x" .macro init_arm_erratum /* ARM erratum ID #468414 */ mrc 15, 0, r1, c1, c0, 1 orr r1, r1, #(1 << 5) /* enable L1NEON bit */ mcr 15, 0, r1, c1, c0, 1 .endm /* * L2CC Cache setup/invalidation/disable */ .macro init_l2cc /* explicitly disable L2 cache */ mrc 15, 0, r0, c1, c0, 1 bic r0, r0, #0x2 mcr 15, 0, r0, c1, c0, 1 /* reconfigure L2 cache aux control reg */ ldr r0, =0xC0 | /* tag RAM */ \ 0x4 | /* data RAM */ \ 1 << 24 | /* disable write allocate delay */ \ 1 << 23 | /* disable write allocate combine */ \ 1 << 22 /* disable write allocate */ #if defined(CONFIG_MX51) ldr r3, [r4, #ROM_SI_REV] cmp r3, #0x10 /* disable write combine for TO 2 and lower revs */ orrls r0, r0, #1 << 25 #endif mcr 15, 1, r0, c9, c0, 2 /* enable L2 cache */ mrc 15, 0, r0, c1, c0, 1 orr r0, r0, #2 mcr 15, 0, r0, c1, c0, 1 .endm /* init_l2cc */ /* AIPS setup - Only setup MPROTx registers. * The PACR default values are good.*/ .macro init_aips /* * Set all MPROTx to be non-bufferable, trusted for R/W, * not forced to user-mode. */ ldr r0, =AIPS1_BASE_ADDR ldr r1, =0x77777777 str r1, [r0, #0x0] str r1, [r0, #0x4] ldr r0, =AIPS2_BASE_ADDR str r1, [r0, #0x0] str r1, [r0, #0x4] /* * Clear the on and off peripheral modules Supervisor Protect bit * for SDMA to access them. Did not change the AIPS control registers * (offset 0x20) access type */ .endm /* init_aips */ /* M4IF setup */ .macro init_m4if #ifdef CONFIG_MX51 /* VPU and IPU given higher priority (0x4) * IPU accesses with ID=0x1 given highest priority (=0xA) */ ldr r0, =M4IF_BASE_ADDR ldr r1, =0x00000203 str r1, [r0, #0x40] str r4, [r0, #0x44] ldr r1, =0x00120125 str r1, [r0, #0x9C] ldr r1, =0x001901A3 str r1, [r0, #0x48] #endif .endm /* init_m4if */ .macro setup_pll pll, freq ldr r0, =\pll adr r2, W_DP_\freq bl setup_pll_func .endm #define W_DP_OP 0 #define W_DP_MFD 4 #define W_DP_MFN 8 setup_pll_func: ldr r1, =0x00001232 str r1, [r0, #PLL_DP_CTL] /* Set DPLL ON (set UPEN bit): BRMO=1 */ mov r1, #0x2 str r1, [r0, #PLL_DP_CONFIG] /* Enable auto-restart AREN bit */ ldr r1, [r2, #W_DP_OP] str r1, [r0, #PLL_DP_OP] str r1, [r0, #PLL_DP_HFS_OP] ldr r1, [r2, #W_DP_MFD] str r1, [r0, #PLL_DP_MFD] str r1, [r0, #PLL_DP_HFS_MFD] ldr r1, [r2, #W_DP_MFN] str r1, [r0, #PLL_DP_MFN] str r1, [r0, #PLL_DP_HFS_MFN] ldr r1, =0x00001232 str r1, [r0, #PLL_DP_CTL] 1: ldr r1, [r0, #PLL_DP_CTL] ands r1, r1, #0x1 beq 1b /* r10 saved upper lr */ mov pc, lr .macro setup_pll_errata pll, freq ldr r2, =\pll str r4, [r2, #PLL_DP_CONFIG] /* Disable auto-restart AREN bit */ ldr r1, =0x00001236 str r1, [r2, #PLL_DP_CTL] /* Restart PLL with PLM=1 */ 1: ldr r1, [r2, #PLL_DP_CTL] /* Wait for lock */ ands r1, r1, #0x1 beq 1b ldr r5, \freq str r5, [r2, #PLL_DP_MFN] /* Modify MFN value */ str r5, [r2, #PLL_DP_HFS_MFN] mov r1, #0x1 str r1, [r2, #PLL_DP_CONFIG] /* Reload MFN value */ 2: ldr r1, [r2, #PLL_DP_CONFIG] tst r1, #1 bne 2b ldr r1, =100 /* Wait at least 4 us */ 3: subs r1, r1, #1 bge 3b mov r1, #0x2 str r1, [r2, #PLL_DP_CONFIG] /* Enable auto-restart AREN bit */ .endm .macro init_clock #if defined (CONFIG_MX51) ldr r0, =CCM_BASE_ADDR /* Gate of clocks to the peripherals first */ ldr r1, =0x3FFFFFFF str r1, [r0, #CLKCTL_CCGR0] str r4, [r0, #CLKCTL_CCGR1] str r4, [r0, #CLKCTL_CCGR2] str r4, [r0, #CLKCTL_CCGR3] ldr r1, =0x00030000 str r1, [r0, #CLKCTL_CCGR4] ldr r1, =0x00FFF030 str r1, [r0, #CLKCTL_CCGR5] ldr r1, =0x00000300 str r1, [r0, #CLKCTL_CCGR6] /* Disable IPU and HSC dividers */ mov r1, #0x60000 str r1, [r0, #CLKCTL_CCDR] /* Make sure to switch the DDR away from PLL 1 */ ldr r1, =0x19239145 str r1, [r0, #CLKCTL_CBCDR] /* make sure divider effective */ 1: ldr r1, [r0, #CLKCTL_CDHIPR] cmp r1, #0x0 bne 1b /* Switch ARM to step clock */ mov r1, #0x4 str r1, [r0, #CLKCTL_CCSR] #if defined(CONFIG_MX51_PLL_ERRATA) setup_pll PLL1_BASE_ADDR, 864 setup_pll_errata PLL1_BASE_ADDR, W_DP_MFN_800_DIT #else setup_pll PLL1_BASE_ADDR, 800 #endif setup_pll PLL3_BASE_ADDR, 665 /* Switch peripheral to PLL 3 */ ldr r0, =CCM_BASE_ADDR ldr r1, =0x000010C0 | CONFIG_SYS_DDR_CLKSEL str r1, [r0, #CLKCTL_CBCMR] ldr r1, =0x13239145 str r1, [r0, #CLKCTL_CBCDR] setup_pll PLL2_BASE_ADDR, 665 /* Switch peripheral to PLL2 */ ldr r0, =CCM_BASE_ADDR ldr r1, =0x19239145 str r1, [r0, #CLKCTL_CBCDR] ldr r1, =0x000020C0 | CONFIG_SYS_DDR_CLKSEL str r1, [r0, #CLKCTL_CBCMR] setup_pll PLL3_BASE_ADDR, 216 /* Set the platform clock dividers */ ldr r0, =ARM_BASE_ADDR ldr r1, =0x00000725 str r1, [r0, #0x14] ldr r0, =CCM_BASE_ADDR /* Run 3.0 at Full speed, for other TO's wait till we increase VDDGP */ ldr r3, [r4, #ROM_SI_REV] cmp r3, #0x10 movls r1, #0x1 movhi r1, #0 str r1, [r0, #CLKCTL_CACRR] /* Switch ARM back to PLL 1 */ str r4, [r0, #CLKCTL_CCSR] /* setup the rest */ /* Use lp_apm (24MHz) source for perclk */ ldr r1, =0x000020C2 | CONFIG_SYS_DDR_CLKSEL str r1, [r0, #CLKCTL_CBCMR] /* ddr clock from PLL 1, all perclk dividers are 1 since using 24MHz */ ldr r1, =CONFIG_SYS_CLKTL_CBCDR str r1, [r0, #CLKCTL_CBCDR] /* Restore the default values in the Gate registers */ ldr r1, =0xFFFFFFFF str r1, [r0, #CLKCTL_CCGR0] str r1, [r0, #CLKCTL_CCGR1] str r1, [r0, #CLKCTL_CCGR2] str r1, [r0, #CLKCTL_CCGR3] str r1, [r0, #CLKCTL_CCGR4] str r1, [r0, #CLKCTL_CCGR5] str r1, [r0, #CLKCTL_CCGR6] /* Use PLL 2 for UART's, get 66.5MHz from it */ ldr r1, =0xA5A2A020 str r1, [r0, #CLKCTL_CSCMR1] ldr r1, =0x00C30321 str r1, [r0, #CLKCTL_CSCDR1] /* make sure divider effective */ 1: ldr r1, [r0, #CLKCTL_CDHIPR] cmp r1, #0x0 bne 1b str r4, [r0, #CLKCTL_CCDR] /* for cko - for ARM div by 8 */ mov r1, #0x000A0000 add r1, r1, #0x00000F0 str r1, [r0, #CLKCTL_CCOSR] #else /* CONFIG_MX53 */ ldr r0, =CCM_BASE_ADDR /* Gate of clocks to the peripherals first */ ldr r1, =0x3FFFFFFF str r1, [r0, #CLKCTL_CCGR0] str r4, [r0, #CLKCTL_CCGR1] str r4, [r0, #CLKCTL_CCGR2] str r4, [r0, #CLKCTL_CCGR3] str r4, [r0, #CLKCTL_CCGR7] ldr r1, =0x00030000 str r1, [r0, #CLKCTL_CCGR4] ldr r1, =0x00FFF030 str r1, [r0, #CLKCTL_CCGR5] ldr r1, =0x0F00030F str r1, [r0, #CLKCTL_CCGR6] /* Switch ARM to step clock */ mov r1, #0x4 str r1, [r0, #CLKCTL_CCSR] setup_pll PLL1_BASE_ADDR, 800 setup_pll PLL3_BASE_ADDR, 400 /* Switch peripheral to PLL3 */ ldr r0, =CCM_BASE_ADDR ldr r1, =0x00015154 str r1, [r0, #CLKCTL_CBCMR] ldr r1, =0x02898945 str r1, [r0, #CLKCTL_CBCDR] /* make sure change is effective */ 1: ldr r1, [r0, #CLKCTL_CDHIPR] cmp r1, #0x0 bne 1b setup_pll PLL2_BASE_ADDR, 400 /* Switch peripheral to PLL2 */ ldr r0, =CCM_BASE_ADDR ldr r1, =0x00888945 str r1, [r0, #CLKCTL_CBCDR] ldr r1, =0x00016154 str r1, [r0, #CLKCTL_CBCMR] /*change uart clk parent to pll2*/ ldr r1, [r0, #CLKCTL_CSCMR1] and r1, r1, #0xfcffffff orr r1, r1, #0x01000000 str r1, [r0, #CLKCTL_CSCMR1] /* make sure change is effective */ 1: ldr r1, [r0, #CLKCTL_CDHIPR] cmp r1, #0x0 bne 1b setup_pll PLL3_BASE_ADDR, 216 setup_pll PLL4_BASE_ADDR, 455 /* Set the platform clock dividers */ ldr r0, =ARM_BASE_ADDR ldr r1, =0x00000124 str r1, [r0, #0x14] ldr r0, =CCM_BASE_ADDR mov r1, #0 str r1, [r0, #CLKCTL_CACRR] /* Switch ARM back to PLL 1. */ mov r1, #0x0 str r1, [r0, #CLKCTL_CCSR] /* make uart div=6 */ ldr r1, [r0, #CLKCTL_CSCDR1] and r1, r1, #0xffffffc0 orr r1, r1, #0x0a str r1, [r0, #CLKCTL_CSCDR1] /* Restore the default values in the Gate registers */ ldr r1, =0xFFFFFFFF str r1, [r0, #CLKCTL_CCGR0] str r1, [r0, #CLKCTL_CCGR1] str r1, [r0, #CLKCTL_CCGR2] str r1, [r0, #CLKCTL_CCGR3] str r1, [r0, #CLKCTL_CCGR4] str r1, [r0, #CLKCTL_CCGR5] str r1, [r0, #CLKCTL_CCGR6] str r1, [r0, #CLKCTL_CCGR7] mov r1, #0x00000 str r1, [r0, #CLKCTL_CCDR] /* for cko - for ARM div by 8 */ mov r1, #0x000A0000 add r1, r1, #0x00000F0 str r1, [r0, #CLKCTL_CCOSR] #endif /* CONFIG_MX53 */ .endm ENTRY(lowlevel_init) mov r10, lr mov r4, #0 /* Fix R4 to 0 */ #if defined(CONFIG_SYS_MAIN_PWR_ON) ldr r0, =GPIO1_BASE_ADDR ldr r1, [r0, #0x0] orr r1, r1, #1 << 23 str r1, [r0, #0x0] ldr r1, [r0, #0x4] orr r1, r1, #1 << 23 str r1, [r0, #0x4] #endif init_arm_erratum init_l2cc init_aips init_m4if init_clock mov pc, r10 ENDPROC(lowlevel_init) /* Board level setting value */ #if defined(CONFIG_MX51_PLL_ERRATA) W_DP_864: .word DP_OP_864 .word DP_MFD_864 .word DP_MFN_864 W_DP_MFN_800_DIT: .word DP_MFN_800_DIT #else W_DP_800: .word DP_OP_800 .word DP_MFD_800 .word DP_MFN_800 #endif #if defined(CONFIG_MX51) W_DP_665: .word DP_OP_665 .word DP_MFD_665 .word DP_MFN_665 #endif W_DP_216: .word DP_OP_216 .word DP_MFD_216 .word DP_MFN_216 W_DP_400: .word DP_OP_400 .word DP_MFD_400 .word DP_MFN_400 W_DP_455: .word DP_OP_455 .word DP_MFD_455 .word DP_MFN_455
4ms/stm32mp1-baremetal
5,149
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/cpu/armv7/cache_v7_asm.S
/* SPDX-License-Identifier: GPL-2.0+ */ #include <config.h> #include <linux/linkage.h> #include <linux/sizes.h> #include <asm/system.h> #if CONFIG_IS_ENABLED(SYS_THUMB_BUILD) #define ARM(x...) #define THUMB(x...) x #else #define ARM(x...) x #define THUMB(x...) #endif /* * v7_flush_dcache_all() * * Flush the whole D-cache. * * Corrupted registers: r0-r7, r9-r11 (r6 only in Thumb mode) * * Note: copied from arch/arm/mm/cache-v7.S of Linux 4.4 */ ENTRY(__v7_flush_dcache_all) dmb @ ensure ordering with previous memory accesses mrc p15, 1, r0, c0, c0, 1 @ read clidr mov r3, r0, lsr #23 @ move LoC into position ands r3, r3, #7 << 1 @ extract LoC*2 from clidr beq finished @ if loc is 0, then no need to clean start_flush_levels: mov r10, #0 @ start clean at cache level 0 flush_levels: add r2, r10, r10, lsr #1 @ work out 3x current cache level mov r1, r0, lsr r2 @ extract cache type bits from clidr and r1, r1, #7 @ mask of the bits for current cache only cmp r1, #2 @ see what cache we have at this level blt skip @ skip if no cache, or just i-cache mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr isb @ isb to sych the new cssr&csidr mrc p15, 1, r1, c0, c0, 0 @ read the new csidr and r2, r1, #7 @ extract the length of the cache lines add r2, r2, #4 @ add 4 (line length offset) movw r4, #0x3ff ands r4, r4, r1, lsr #3 @ find maximum number on the way size clz r5, r4 @ find bit position of way size increment movw r7, #0x7fff ands r7, r7, r1, lsr #13 @ extract max number of the index size loop1: mov r9, r7 @ create working copy of max index loop2: ARM( orr r11, r10, r4, lsl r5 ) @ factor way and cache number into r11 THUMB( lsl r6, r4, r5 ) THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11 ARM( orr r11, r11, r9, lsl r2 ) @ factor index number into r11 THUMB( lsl r6, r9, r2 ) THUMB( orr r11, r11, r6 ) @ factor index number into r11 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way subs r9, r9, #1 @ decrement the index bge loop2 subs r4, r4, #1 @ decrement the way bge loop1 skip: add r10, r10, #2 @ increment cache number cmp r3, r10 bgt flush_levels finished: mov r10, #0 @ swith back to cache level 0 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr dsb st isb bx lr ENDPROC(__v7_flush_dcache_all) ENTRY(v7_flush_dcache_all) ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} ) THUMB( stmfd sp!, {r4-r7, r9-r11, lr} ) bl __v7_flush_dcache_all ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} ) THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} ) bx lr ENDPROC(v7_flush_dcache_all) /* * v7_invalidate_dcache_all() * * Invalidate the whole D-cache. * * Corrupted registers: r0-r7, r9-r11 (r6 only in Thumb mode) * * Note: copied from __v7_flush_dcache_all above with * mcr p15, 0, r11, c7, c14, 2 * Replaced with: * mcr p15, 0, r11, c7, c6, 2 */ ENTRY(__v7_invalidate_dcache_all) dmb @ ensure ordering with previous memory accesses mrc p15, 1, r0, c0, c0, 1 @ read clidr mov r3, r0, lsr #23 @ move LoC into position ands r3, r3, #7 << 1 @ extract LoC*2 from clidr beq inval_finished @ if loc is 0, then no need to clean mov r10, #0 @ start clean at cache level 0 inval_levels: add r2, r10, r10, lsr #1 @ work out 3x current cache level mov r1, r0, lsr r2 @ extract cache type bits from clidr and r1, r1, #7 @ mask of the bits for current cache only cmp r1, #2 @ see what cache we have at this level blt inval_skip @ skip if no cache, or just i-cache mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr isb @ isb to sych the new cssr&csidr mrc p15, 1, r1, c0, c0, 0 @ read the new csidr and r2, r1, #7 @ extract the length of the cache lines add r2, r2, #4 @ add 4 (line length offset) movw r4, #0x3ff ands r4, r4, r1, lsr #3 @ find maximum number on the way size clz r5, r4 @ find bit position of way size increment movw r7, #0x7fff ands r7, r7, r1, lsr #13 @ extract max number of the index size inval_loop1: mov r9, r7 @ create working copy of max index inval_loop2: ARM( orr r11, r10, r4, lsl r5 ) @ factor way and cache number into r11 THUMB( lsl r6, r4, r5 ) THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11 ARM( orr r11, r11, r9, lsl r2 ) @ factor index number into r11 THUMB( lsl r6, r9, r2 ) THUMB( orr r11, r11, r6 ) @ factor index number into r11 mcr p15, 0, r11, c7, c6, 2 @ invalidate by set/way subs r9, r9, #1 @ decrement the index bge inval_loop2 subs r4, r4, #1 @ decrement the way bge inval_loop1 inval_skip: add r10, r10, #2 @ increment cache number cmp r3, r10 bgt inval_levels inval_finished: mov r10, #0 @ swith back to cache level 0 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr dsb st isb bx lr ENDPROC(__v7_invalidate_dcache_all) ENTRY(v7_invalidate_dcache_all) ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} ) THUMB( stmfd sp!, {r4-r7, r9-r11, lr} ) bl __v7_invalidate_dcache_all ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} ) THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} ) bx lr ENDPROC(v7_invalidate_dcache_all)
4ms/stm32mp1-baremetal
9,096
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/cpu/armv7/psci.S
/* * Copyright (C) 2013,2014 - ARM Ltd * Author: Marc Zyngier <marc.zyngier@arm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <config.h> #include <linux/linkage.h> #include <asm/macro.h> #include <asm/psci.h> .pushsection ._secure.text, "ax" .arch_extension sec .align 5 .globl _psci_vectors _psci_vectors: b default_psci_vector @ reset b default_psci_vector @ undef b _smc_psci @ smc b default_psci_vector @ pabort b default_psci_vector @ dabort b default_psci_vector @ hyp b default_psci_vector @ irq b psci_fiq_enter @ fiq ENTRY(psci_fiq_enter) movs pc, lr ENDPROC(psci_fiq_enter) .weak psci_fiq_enter ENTRY(default_psci_vector) movs pc, lr ENDPROC(default_psci_vector) .weak default_psci_vector ENTRY(psci_version) ENTRY(psci_cpu_suspend) ENTRY(psci_cpu_off) ENTRY(psci_cpu_on) ENTRY(psci_affinity_info) ENTRY(psci_migrate) ENTRY(psci_migrate_info_type) ENTRY(psci_migrate_info_up_cpu) ENTRY(psci_system_off) ENTRY(psci_system_reset) ENTRY(psci_features) ENTRY(psci_cpu_freeze) ENTRY(psci_cpu_default_suspend) ENTRY(psci_node_hw_state) ENTRY(psci_system_suspend) ENTRY(psci_set_suspend_mode) ENTRY(psi_stat_residency) ENTRY(psci_stat_count) mov r0, #ARM_PSCI_RET_NI @ Return -1 (Not Implemented) mov pc, lr ENDPROC(psci_stat_count) ENDPROC(psi_stat_residency) ENDPROC(psci_set_suspend_mode) ENDPROC(psci_system_suspend) ENDPROC(psci_node_hw_state) ENDPROC(psci_cpu_default_suspend) ENDPROC(psci_cpu_freeze) ENDPROC(psci_features) ENDPROC(psci_system_reset) ENDPROC(psci_system_off) ENDPROC(psci_migrate_info_up_cpu) ENDPROC(psci_migrate_info_type) ENDPROC(psci_migrate) ENDPROC(psci_affinity_info) ENDPROC(psci_cpu_on) ENDPROC(psci_cpu_off) ENDPROC(psci_cpu_suspend) ENDPROC(psci_version) .weak psci_version .weak psci_cpu_suspend .weak psci_cpu_off .weak psci_cpu_on .weak psci_affinity_info .weak psci_migrate .weak psci_migrate_info_type .weak psci_migrate_info_up_cpu .weak psci_system_off .weak psci_system_reset .weak psci_features .weak psci_cpu_freeze .weak psci_cpu_default_suspend .weak psci_node_hw_state .weak psci_system_suspend .weak psci_set_suspend_mode .weak psi_stat_residency .weak psci_stat_count _psci_table: .word ARM_PSCI_FN_CPU_SUSPEND .word psci_cpu_suspend .word ARM_PSCI_FN_CPU_OFF .word psci_cpu_off .word ARM_PSCI_FN_CPU_ON .word psci_cpu_on .word ARM_PSCI_FN_MIGRATE .word psci_migrate .word ARM_PSCI_0_2_FN_PSCI_VERSION .word psci_version .word ARM_PSCI_0_2_FN_CPU_SUSPEND .word psci_cpu_suspend .word ARM_PSCI_0_2_FN_CPU_OFF .word psci_cpu_off .word ARM_PSCI_0_2_FN_CPU_ON .word psci_cpu_on .word ARM_PSCI_0_2_FN_AFFINITY_INFO .word psci_affinity_info .word ARM_PSCI_0_2_FN_MIGRATE .word psci_migrate .word ARM_PSCI_0_2_FN_MIGRATE_INFO_TYPE .word psci_migrate_info_type .word ARM_PSCI_0_2_FN_MIGRATE_INFO_UP_CPU .word psci_migrate_info_up_cpu .word ARM_PSCI_0_2_FN_SYSTEM_OFF .word psci_system_off .word ARM_PSCI_0_2_FN_SYSTEM_RESET .word psci_system_reset .word ARM_PSCI_1_0_FN_PSCI_FEATURES .word psci_features .word ARM_PSCI_1_0_FN_CPU_FREEZE .word psci_cpu_freeze .word ARM_PSCI_1_0_FN_CPU_DEFAULT_SUSPEND .word psci_cpu_default_suspend .word ARM_PSCI_1_0_FN_NODE_HW_STATE .word psci_node_hw_state .word ARM_PSCI_1_0_FN_SYSTEM_SUSPEND .word psci_system_suspend .word ARM_PSCI_1_0_FN_SET_SUSPEND_MODE .word psci_set_suspend_mode .word ARM_PSCI_1_0_FN_STAT_RESIDENCY .word psi_stat_residency .word ARM_PSCI_1_0_FN_STAT_COUNT .word psci_stat_count .word 0 .word 0 _smc_psci: push {r4-r7,lr} @ Switch to secure mrc p15, 0, r7, c1, c1, 0 bic r4, r7, #1 mcr p15, 0, r4, c1, c1, 0 isb adr r4, _psci_table 1: ldr r5, [r4] @ Load PSCI function ID ldr r6, [r4, #4] @ Load target PC cmp r5, #0 @ If reach the end, bail out moveq r0, #ARM_PSCI_RET_INVAL @ Return -2 (Invalid) beq 2f cmp r0, r5 @ If not matching, try next entry addne r4, r4, #8 bne 1b blx r6 @ Execute PSCI function @ Switch back to non-secure 2: mcr p15, 0, r7, c1, c1, 0 pop {r4-r7, lr} movs pc, lr @ Return to the kernel @ Requires dense and single-cluster CPU ID space ENTRY(psci_get_cpu_id) mrc p15, 0, r0, c0, c0, 5 /* read MPIDR */ and r0, r0, #0xff /* return CPU ID in cluster */ bx lr ENDPROC(psci_get_cpu_id) .weak psci_get_cpu_id /* Imported from Linux kernel */ ENTRY(psci_v7_flush_dcache_all) stmfd sp!, {r4-r5, r7, r9-r11, lr} dmb @ ensure ordering with previous memory accesses mrc p15, 1, r0, c0, c0, 1 @ read clidr ands r3, r0, #0x7000000 @ extract loc from clidr mov r3, r3, lsr #23 @ left align loc bit field beq finished @ if loc is 0, then no need to clean mov r10, #0 @ start clean at cache level 0 flush_levels: add r2, r10, r10, lsr #1 @ work out 3x current cache level mov r1, r0, lsr r2 @ extract cache type bits from clidr and r1, r1, #7 @ mask of the bits for current cache only cmp r1, #2 @ see what cache we have at this level blt skip @ skip if no cache, or just i-cache mrs r9, cpsr @ make cssr&csidr read atomic mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr isb @ isb to sych the new cssr&csidr mrc p15, 1, r1, c0, c0, 0 @ read the new csidr msr cpsr_c, r9 and r2, r1, #7 @ extract the length of the cache lines add r2, r2, #4 @ add 4 (line length offset) ldr r4, =0x3ff ands r4, r4, r1, lsr #3 @ find maximum number on the way size clz r5, r4 @ find bit position of way size increment ldr r7, =0x7fff ands r7, r7, r1, lsr #13 @ extract max number of the index size loop1: mov r9, r7 @ create working copy of max index loop2: orr r11, r10, r4, lsl r5 @ factor way and cache number into r11 orr r11, r11, r9, lsl r2 @ factor index number into r11 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way subs r9, r9, #1 @ decrement the index bge loop2 subs r4, r4, #1 @ decrement the way bge loop1 skip: add r10, r10, #2 @ increment cache number cmp r3, r10 bgt flush_levels finished: mov r10, #0 @ swith back to cache level 0 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr dsb st isb ldmfd sp!, {r4-r5, r7, r9-r11, lr} bx lr ENDPROC(psci_v7_flush_dcache_all) ENTRY(psci_disable_smp) mrc p15, 0, r0, c1, c0, 1 @ ACTLR bic r0, r0, #(1 << 6) @ Clear SMP bit mcr p15, 0, r0, c1, c0, 1 @ ACTLR isb dsb bx lr ENDPROC(psci_disable_smp) .weak psci_disable_smp ENTRY(psci_enable_smp) mrc p15, 0, r0, c1, c0, 1 @ ACTLR orr r0, r0, #(1 << 6) @ Set SMP bit mcr p15, 0, r0, c1, c0, 1 @ ACTLR isb bx lr ENDPROC(psci_enable_smp) .weak psci_enable_smp ENTRY(psci_cpu_off_common) push {lr} bl psci_v7_flush_dcache_all clrex @ Why??? mrc p15, 0, r0, c1, c0, 0 @ SCTLR bic r0, r0, #(1 << 2) @ Clear C bit mcr p15, 0, r0, c1, c0, 0 @ SCTLR isb dsb bl psci_v7_flush_dcache_all clrex @ Why??? bl psci_disable_smp pop {lr} bx lr ENDPROC(psci_cpu_off_common) @ The stacks are allocated in reverse order, i.e. @ the stack for CPU0 has the highest memory address. @ @ -------------------- __secure_stack_end @ | CPU0 target PC | @ |------------------| @ | | @ | CPU0 stack | @ | | @ |------------------| __secure_stack_end - 1KB @ | . | @ | . | @ | . | @ | . | @ -------------------- __secure_stack_start @ @ This expects CPU ID in r0 and returns stack top in r0 LENTRY(psci_get_cpu_stack_top) @ stack top = __secure_stack_end - (cpuid << ARM_PSCI_STACK_SHIFT) ldr r3, =__secure_stack_end sub r0, r3, r0, LSL #ARM_PSCI_STACK_SHIFT sub r0, r0, #4 @ Save space for target PC bx lr ENDPROC(psci_get_cpu_stack_top) @ {r0, r1, r2, ip} from _do_nonsec_entry(kernel_entry, 0, machid, r2) in @ arch/arm/lib/bootm.c:boot_jump_linux() must remain unchanged across @ this function. ENTRY(psci_stack_setup) mov r6, lr mov r7, r0 bl psci_get_cpu_id @ CPU ID => r0 bl psci_get_cpu_stack_top @ stack top => r0 mov sp, r0 mov r0, r7 bx r6 ENDPROC(psci_stack_setup) ENTRY(psci_arch_init) mov pc, lr ENDPROC(psci_arch_init) .weak psci_arch_init ENTRY(psci_arch_cpu_entry) mov pc, lr ENDPROC(psci_arch_cpu_entry) .weak psci_arch_cpu_entry ENTRY(psci_cpu_entry) bl psci_enable_smp bl _nonsec_init bl psci_stack_setup bl psci_arch_cpu_entry bl psci_get_cpu_id @ CPU ID => r0 mov r2, r0 @ CPU ID => r2 bl psci_get_context_id @ context id => r0 mov r1, r0 @ context id => r1 mov r0, r2 @ CPU ID => r0 bl psci_get_target_pc @ target PC => r0 b _do_nonsec_entry ENDPROC(psci_cpu_entry) .popsection
4ms/stm32mp1-baremetal
10,454
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/cpu/armv7/start.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * armboot - Startup Code for OMAP3530/ARM Cortex CPU-core * * Copyright (c) 2004 Texas Instruments <r-woodruff2@ti.com> * * Copyright (c) 2001 Marius Gröger <mag@sysgo.de> * Copyright (c) 2002 Alex Züpke <azu@sysgo.de> * Copyright (c) 2002 Gary Jennejohn <garyj@denx.de> * Copyright (c) 2003 Richard Woodruff <r-woodruff2@ti.com> * Copyright (c) 2003 Kshitij <kshitij@ti.com> * Copyright (c) 2006-2008 Syed Mohammed Khasim <x0khasim@ti.com> */ #include <asm-offsets.h> #include <config.h> #include <asm/system.h> #include <linux/linkage.h> #include <asm/armv7.h> /************************************************************************* * * Startup Code (reset vector) * * Do important init only if we don't start from memory! * Setup memory and board specific bits prior to relocation. * Relocate armboot to ram. Setup stack. * *************************************************************************/ .globl reset .globl save_boot_params_ret .type save_boot_params_ret,%function #ifdef CONFIG_ARMV7_LPAE .global switch_to_hypervisor_ret #endif reset: /* Allow the board to save important registers */ b save_boot_params save_boot_params_ret: #ifdef CONFIG_ARMV7_LPAE /* * check for Hypervisor support */ mrc p15, 0, r0, c0, c1, 1 @ read ID_PFR1 and r0, r0, #CPUID_ARM_VIRT_MASK @ mask virtualization bits cmp r0, #(1 << CPUID_ARM_VIRT_SHIFT) beq switch_to_hypervisor switch_to_hypervisor_ret: #endif /* * disable interrupts (FIQ and IRQ), also set the cpu to SVC32 mode, * except if in HYP mode already */ mrs r0, cpsr and r1, r0, #0x1f @ mask mode bits teq r1, #0x1a @ test for HYP mode bicne r0, r0, #0x1f @ clear all mode bits orrne r0, r0, #0x13 @ set SVC mode orr r0, r0, #0xc0 @ disable FIQ and IRQ msr cpsr,r0 /* * Setup vector: * (OMAP4 spl TEXT_BASE is not 32 byte aligned. * Continue to use ROM code vector only in OMAP4 spl) */ #if !(defined(CONFIG_OMAP44XX) && defined(CONFIG_SPL_BUILD)) /* Set V=0 in CP15 SCTLR register - for VBAR to point to vector */ mrc p15, 0, r0, c1, c0, 0 @ Read CP15 SCTLR Register bic r0, #CR_V @ V = 0 mcr p15, 0, r0, c1, c0, 0 @ Write CP15 SCTLR Register #ifdef CONFIG_HAS_VBAR /* Set vector address in CP15 VBAR register */ ldr r0, =_start mcr p15, 0, r0, c12, c0, 0 @Set VBAR #endif #endif /* the mask ROM code should have PLL and others stable */ #ifndef CONFIG_SKIP_LOWLEVEL_INIT #ifdef CONFIG_CPU_V7A bl cpu_init_cp15 #endif #ifndef CONFIG_SKIP_LOWLEVEL_INIT_ONLY bl cpu_init_crit #endif #endif bl _main /*------------------------------------------------------------------------------*/ ENTRY(c_runtime_cpu_setup) /* * If I-cache is enabled invalidate it */ #if !CONFIG_IS_ENABLED(SYS_ICACHE_OFF) mcr p15, 0, r0, c7, c5, 0 @ invalidate icache mcr p15, 0, r0, c7, c10, 4 @ DSB mcr p15, 0, r0, c7, c5, 4 @ ISB #endif bx lr ENDPROC(c_runtime_cpu_setup) /************************************************************************* * * void save_boot_params(u32 r0, u32 r1, u32 r2, u32 r3) * __attribute__((weak)); * * Stack pointer is not yet initialized at this moment * Don't save anything to stack even if compiled with -O0 * *************************************************************************/ ENTRY(save_boot_params) b save_boot_params_ret @ back to my caller ENDPROC(save_boot_params) .weak save_boot_params #ifdef CONFIG_ARMV7_LPAE ENTRY(switch_to_hypervisor) b switch_to_hypervisor_ret ENDPROC(switch_to_hypervisor) .weak switch_to_hypervisor #endif /************************************************************************* * * cpu_init_cp15 * * Setup CP15 registers (cache, MMU, TLBs). The I-cache is turned on unless * CONFIG_SYS_ICACHE_OFF is defined. * *************************************************************************/ ENTRY(cpu_init_cp15) /* * Invalidate L1 I/D */ mov r0, #0 @ set up for MCR mcr p15, 0, r0, c8, c7, 0 @ invalidate TLBs mcr p15, 0, r0, c7, c5, 0 @ invalidate icache mcr p15, 0, r0, c7, c5, 6 @ invalidate BP array mcr p15, 0, r0, c7, c10, 4 @ DSB mcr p15, 0, r0, c7, c5, 4 @ ISB /* * disable MMU stuff and caches */ mrc p15, 0, r0, c1, c0, 0 bic r0, r0, #0x00002000 @ clear bits 13 (--V-) bic r0, r0, #0x00000007 @ clear bits 2:0 (-CAM) orr r0, r0, #0x00000002 @ set bit 1 (--A-) Align orr r0, r0, #0x00000800 @ set bit 11 (Z---) BTB #if CONFIG_IS_ENABLED(SYS_ICACHE_OFF) bic r0, r0, #0x00001000 @ clear bit 12 (I) I-cache #else orr r0, r0, #0x00001000 @ set bit 12 (I) I-cache #endif mcr p15, 0, r0, c1, c0, 0 #ifdef CONFIG_ARM_ERRATA_716044 mrc p15, 0, r0, c1, c0, 0 @ read system control register orr r0, r0, #1 << 11 @ set bit #11 mcr p15, 0, r0, c1, c0, 0 @ write system control register #endif #if (defined(CONFIG_ARM_ERRATA_742230) || defined(CONFIG_ARM_ERRATA_794072)) mrc p15, 0, r0, c15, c0, 1 @ read diagnostic register orr r0, r0, #1 << 4 @ set bit #4 mcr p15, 0, r0, c15, c0, 1 @ write diagnostic register #endif #ifdef CONFIG_ARM_ERRATA_743622 mrc p15, 0, r0, c15, c0, 1 @ read diagnostic register orr r0, r0, #1 << 6 @ set bit #6 mcr p15, 0, r0, c15, c0, 1 @ write diagnostic register #endif #ifdef CONFIG_ARM_ERRATA_751472 mrc p15, 0, r0, c15, c0, 1 @ read diagnostic register orr r0, r0, #1 << 11 @ set bit #11 mcr p15, 0, r0, c15, c0, 1 @ write diagnostic register #endif #ifdef CONFIG_ARM_ERRATA_761320 mrc p15, 0, r0, c15, c0, 1 @ read diagnostic register orr r0, r0, #1 << 21 @ set bit #21 mcr p15, 0, r0, c15, c0, 1 @ write diagnostic register #endif #ifdef CONFIG_ARM_ERRATA_845369 mrc p15, 0, r0, c15, c0, 1 @ read diagnostic register orr r0, r0, #1 << 22 @ set bit #22 mcr p15, 0, r0, c15, c0, 1 @ write diagnostic register #endif mov r5, lr @ Store my Caller mrc p15, 0, r1, c0, c0, 0 @ r1 has Read Main ID Register (MIDR) mov r3, r1, lsr #20 @ get variant field and r3, r3, #0xf @ r3 has CPU variant and r4, r1, #0xf @ r4 has CPU revision mov r2, r3, lsl #4 @ shift variant field for combined value orr r2, r4, r2 @ r2 has combined CPU variant + revision /* Early stack for ERRATA that needs into call C code */ #if defined(CONFIG_SPL_BUILD) && defined(CONFIG_SPL_STACK) ldr r0, =(CONFIG_SPL_STACK) #else ldr r0, =(CONFIG_SYS_INIT_SP_ADDR) #endif bic r0, r0, #7 /* 8-byte alignment for ABI compliance */ mov sp, r0 #ifdef CONFIG_ARM_ERRATA_798870 cmp r2, #0x30 @ Applies to lower than R3p0 bge skip_errata_798870 @ skip if not affected rev cmp r2, #0x20 @ Applies to including and above R2p0 blt skip_errata_798870 @ skip if not affected rev mrc p15, 1, r0, c15, c0, 0 @ read l2 aux ctrl reg orr r0, r0, #1 << 7 @ Enable hazard-detect timeout push {r1-r5} @ Save the cpu info registers bl v7_arch_cp15_set_l2aux_ctrl isb @ Recommended ISB after l2actlr update pop {r1-r5} @ Restore the cpu info - fall through skip_errata_798870: #endif #ifdef CONFIG_ARM_ERRATA_801819 cmp r2, #0x24 @ Applies to lt including R2p4 bgt skip_errata_801819 @ skip if not affected rev cmp r2, #0x20 @ Applies to including and above R2p0 blt skip_errata_801819 @ skip if not affected rev mrc p15, 0, r0, c0, c0, 6 @ pick up REVIDR reg and r0, r0, #1 << 3 @ check REVIDR[3] cmp r0, #1 << 3 beq skip_errata_801819 @ skip erratum if REVIDR[3] is set mrc p15, 0, r0, c1, c0, 1 @ read auxilary control register orr r0, r0, #3 << 27 @ Disables streaming. All write-allocate @ lines allocate in the L1 or L2 cache. orr r0, r0, #3 << 25 @ Disables streaming. All write-allocate @ lines allocate in the L1 cache. push {r1-r5} @ Save the cpu info registers bl v7_arch_cp15_set_acr pop {r1-r5} @ Restore the cpu info - fall through skip_errata_801819: #endif #ifdef CONFIG_ARM_CORTEX_A15_CVE_2017_5715 mrc p15, 0, r0, c1, c0, 1 @ read auxilary control register orr r0, r0, #1 << 0 @ Enable invalidates of BTB push {r1-r5} @ Save the cpu info registers bl v7_arch_cp15_set_acr pop {r1-r5} @ Restore the cpu info - fall through #endif #ifdef CONFIG_ARM_ERRATA_454179 mrc p15, 0, r0, c1, c0, 1 @ Read ACR cmp r2, #0x21 @ Only on < r2p1 orrlt r0, r0, #(0x3 << 6) @ Set DBSM(BIT7) and IBE(BIT6) bits push {r1-r5} @ Save the cpu info registers bl v7_arch_cp15_set_acr pop {r1-r5} @ Restore the cpu info - fall through #endif #if defined(CONFIG_ARM_ERRATA_430973) || defined (CONFIG_ARM_CORTEX_A8_CVE_2017_5715) mrc p15, 0, r0, c1, c0, 1 @ Read ACR #ifdef CONFIG_ARM_CORTEX_A8_CVE_2017_5715 orr r0, r0, #(0x1 << 6) @ Set IBE bit always to enable OS WA #else cmp r2, #0x21 @ Only on < r2p1 orrlt r0, r0, #(0x1 << 6) @ Set IBE bit #endif push {r1-r5} @ Save the cpu info registers bl v7_arch_cp15_set_acr pop {r1-r5} @ Restore the cpu info - fall through #endif #ifdef CONFIG_ARM_ERRATA_621766 mrc p15, 0, r0, c1, c0, 1 @ Read ACR cmp r2, #0x21 @ Only on < r2p1 orrlt r0, r0, #(0x1 << 5) @ Set L1NEON bit push {r1-r5} @ Save the cpu info registers bl v7_arch_cp15_set_acr pop {r1-r5} @ Restore the cpu info - fall through #endif #ifdef CONFIG_ARM_ERRATA_725233 mrc p15, 1, r0, c9, c0, 2 @ Read L2ACR cmp r2, #0x21 @ Only on < r2p1 (Cortex A8) orrlt r0, r0, #(0x1 << 27) @ L2 PLD data forwarding disable push {r1-r5} @ Save the cpu info registers bl v7_arch_cp15_set_l2aux_ctrl pop {r1-r5} @ Restore the cpu info - fall through #endif #ifdef CONFIG_ARM_ERRATA_852421 mrc p15, 0, r0, c15, c0, 1 @ read diagnostic register orr r0, r0, #1 << 24 @ set bit #24 mcr p15, 0, r0, c15, c0, 1 @ write diagnostic register #endif #ifdef CONFIG_ARM_ERRATA_852423 mrc p15, 0, r0, c15, c0, 1 @ read diagnostic register orr r0, r0, #1 << 12 @ set bit #12 mcr p15, 0, r0, c15, c0, 1 @ write diagnostic register #endif mov pc, r5 @ back to my caller ENDPROC(cpu_init_cp15) #if !defined(CONFIG_SKIP_LOWLEVEL_INIT) && \ !defined(CONFIG_SKIP_LOWLEVEL_INIT_ONLY) /************************************************************************* * * CPU_init_critical registers * * setup important registers * setup memory timing * *************************************************************************/ ENTRY(cpu_init_crit) /* * Jump to board specific initialization... * The Mask ROM will have already initialized * basic memory. Go here to bump up clock rate and handle * wake up conditions. */ b lowlevel_init @ go setup pll,mux,memory ENDPROC(cpu_init_crit) #endif
4ms/stm32mp1-baremetal
5,656
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/cpu/armv7/nonsec_virt.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * code for switching cores into non-secure state and into HYP mode * * Copyright (c) 2013 Andre Przywara <andre.przywara@linaro.org> */ #include <config.h> #include <linux/linkage.h> #include <asm/gic.h> #include <asm/armv7.h> #include <asm/proc-armv/ptrace.h> .arch_extension sec .arch_extension virt .pushsection ._secure.text, "ax" .align 5 /* the vector table for secure state and HYP mode */ _monitor_vectors: .word 0 /* reset */ .word 0 /* undef */ adr pc, _secure_monitor .word 0 .word 0 .word 0 .word 0 .word 0 .macro is_cpu_virt_capable tmp mrc p15, 0, \tmp, c0, c1, 1 @ read ID_PFR1 and \tmp, \tmp, #CPUID_ARM_VIRT_MASK @ mask virtualization bits cmp \tmp, #(1 << CPUID_ARM_VIRT_SHIFT) .endm /* * secure monitor handler * U-Boot calls this "software interrupt" in start.S * This is executed on a "smc" instruction, we use a "smc #0" to switch * to non-secure state. * r0, r1, r2: passed to the callee * ip: target PC */ _secure_monitor: #ifdef CONFIG_ARMV7_PSCI ldr r5, =_psci_vectors @ Switch to the next monitor mcr p15, 0, r5, c12, c0, 1 isb @ Obtain a secure stack bl psci_stack_setup @ Configure the PSCI backend push {r0, r1, r2, ip} bl psci_arch_init pop {r0, r1, r2, ip} #endif #ifdef CONFIG_ARM_ERRATA_773022 mrc p15, 0, r5, c1, c0, 1 orr r5, r5, #(1 << 1) mcr p15, 0, r5, c1, c0, 1 isb #endif #ifdef CONFIG_ARM_ERRATA_774769 mrc p15, 0, r5, c1, c0, 1 orr r5, r5, #(1 << 25) mcr p15, 0, r5, c1, c0, 1 isb #endif mrc p15, 0, r5, c1, c1, 0 @ read SCR bic r5, r5, #0x4a @ clear IRQ, EA, nET bits orr r5, r5, #0x31 @ enable NS, AW, FW bits @ FIQ preserved for secure mode mov r6, #SVC_MODE @ default mode is SVC is_cpu_virt_capable r4 #ifdef CONFIG_ARMV7_VIRT orreq r5, r5, #0x100 @ allow HVC instruction moveq r6, #HYP_MODE @ Enter the kernel as HYP mrseq r3, sp_svc msreq sp_hyp, r3 @ migrate SP #endif mcr p15, 0, r5, c1, c1, 0 @ write SCR (with NS bit set) isb bne 1f @ Reset CNTVOFF to 0 before leaving monitor mode mrc p15, 0, r4, c0, c1, 1 @ read ID_PFR1 ands r4, r4, #CPUID_ARM_GENTIMER_MASK @ test arch timer bits movne r4, #0 mcrrne p15, 4, r4, r4, c14 @ Reset CNTVOFF to zero 1: mov lr, ip mov ip, #(F_BIT | I_BIT | A_BIT) @ Set A, I and F tst lr, #1 @ Check for Thumb PC orrne ip, ip, #T_BIT @ Set T if Thumb orr ip, ip, r6 @ Slot target mode in msr spsr_cxfs, ip @ Set full SPSR movs pc, lr @ ERET to non-secure ENTRY(_do_nonsec_entry) mov ip, r0 mov r0, r1 mov r1, r2 mov r2, r3 smc #0 ENDPROC(_do_nonsec_entry) .macro get_cbar_addr addr #ifdef CONFIG_ARM_GIC_BASE_ADDRESS ldr \addr, =CONFIG_ARM_GIC_BASE_ADDRESS #else mrc p15, 4, \addr, c15, c0, 0 @ read CBAR bfc \addr, #0, #15 @ clear reserved bits #endif .endm .macro get_gicd_addr addr get_cbar_addr \addr add \addr, \addr, #GIC_DIST_OFFSET @ GIC dist i/f offset .endm .macro get_gicc_addr addr, tmp get_cbar_addr \addr is_cpu_virt_capable \tmp movne \tmp, #GIC_CPU_OFFSET_A9 @ GIC CPU offset for A9 moveq \tmp, #GIC_CPU_OFFSET_A15 @ GIC CPU offset for A15/A7 add \addr, \addr, \tmp .endm #ifndef CONFIG_ARMV7_PSCI /* * Secondary CPUs start here and call the code for the core specific parts * of the non-secure and HYP mode transition. The GIC distributor specific * code has already been executed by a C function before. * Then they go back to wfi and wait to be woken up by the kernel again. */ ENTRY(_smp_pen) cpsid i cpsid f bl _nonsec_init adr r0, _smp_pen @ do not use this address again b smp_waitloop @ wait for IPIs, board specific ENDPROC(_smp_pen) #endif /* * Switch a core to non-secure state. * * 1. initialize the GIC per-core interface * 2. allow coprocessor access in non-secure modes * * Called from smp_pen by secondary cores and directly by the BSP. * Do not assume that the stack is available and only use registers * r0-r3 and r12. * * PERIPHBASE is used to get the GIC address. This could be 40 bits long, * though, but we check this in C before calling this function. */ ENTRY(_nonsec_init) get_gicd_addr r3 mvn r1, #0 @ all bits to 1 str r1, [r3, #GICD_IGROUPRn] @ allow private interrupts get_gicc_addr r3, r1 mov r1, #3 @ Enable both groups str r1, [r3, #GICC_CTLR] @ and clear all other bits mov r1, #0xff str r1, [r3, #GICC_PMR] @ set priority mask register mrc p15, 0, r0, c1, c1, 2 movw r1, #0x3fff movt r1, #0x0004 orr r0, r0, r1 mcr p15, 0, r0, c1, c1, 2 @ NSACR = all copros to non-sec /* The CNTFRQ register of the generic timer needs to be * programmed in secure state. Some primary bootloaders / firmware * omit this, so if the frequency is provided in the configuration, * we do this here instead. * But first check if we have the generic timer. */ #ifdef COUNTER_FREQUENCY mrc p15, 0, r0, c0, c1, 1 @ read ID_PFR1 and r0, r0, #CPUID_ARM_GENTIMER_MASK @ mask arch timer bits cmp r0, #(1 << CPUID_ARM_GENTIMER_SHIFT) ldreq r1, =COUNTER_FREQUENCY mcreq p15, 0, r1, c14, c0, 0 @ write CNTFRQ #endif adr r1, _monitor_vectors mcr p15, 0, r1, c12, c0, 1 @ set MVBAR to secure vectors isb mov r0, r3 @ return GICC address bx lr ENDPROC(_nonsec_init) #ifdef CONFIG_SMP_PEN_ADDR /* void __weak smp_waitloop(unsigned previous_address); */ ENTRY(smp_waitloop) wfi ldr r1, =CONFIG_SMP_PEN_ADDR @ load start address ldr r1, [r1] #ifdef CONFIG_PEN_ADDR_BIG_ENDIAN rev r1, r1 #endif cmp r0, r1 @ make sure we dont execute this code beq smp_waitloop @ again (due to a spurious wakeup) mov r0, r1 b _do_nonsec_entry ENDPROC(smp_waitloop) .weak smp_waitloop #endif .popsection
4ms/stm32mp1-baremetal
1,257
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/cpu/armv7/smccc-call.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2015, Linaro Limited */ #include <linux/linkage.h> #include <asm/opcodes-sec.h> #include <asm/opcodes-virt.h> #ifdef CONFIG_EFI_LOADER .section .text.efi_runtime #endif #define UNWIND(x...) /* * Wrap c macros in asm macros to delay expansion until after the * SMCCC asm macro is expanded. */ .macro SMCCC_SMC __SMC(0) .endm .macro SMCCC_HVC __HVC(0) .endm .macro SMCCC instr UNWIND( .fnstart) mov r12, sp push {r4-r7} UNWIND( .save {r4-r7}) ldm r12, {r4-r7} \instr pop {r4-r7} ldr r12, [sp, #(4 * 4)] stm r12, {r0-r3} bx lr UNWIND( .fnend) .endm /* * void smccc_smc(unsigned long a0, unsigned long a1, unsigned long a2, * unsigned long a3, unsigned long a4, unsigned long a5, * unsigned long a6, unsigned long a7, struct arm_smccc_res *res, * struct arm_smccc_quirk *quirk) */ ENTRY(__arm_smccc_smc) SMCCC SMCCC_SMC ENDPROC(__arm_smccc_smc) /* * void smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2, * unsigned long a3, unsigned long a4, unsigned long a5, * unsigned long a6, unsigned long a7, struct arm_smccc_res *res, * struct arm_smccc_quirk *quirk) */ ENTRY(__arm_smccc_hvc) SMCCC SMCCC_HVC ENDPROC(__arm_smccc_hvc)
4ms/stm32mp1-baremetal
1,501
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/cpu/armv7/lowlevel_init.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * A lowlevel_init function that sets up the stack to call a C function to * perform further init. * * (C) Copyright 2010 * Texas Instruments, <www.ti.com> * * Author : * Aneesh V <aneesh@ti.com> */ #include <asm-offsets.h> #include <config.h> #include <linux/linkage.h> .pushsection .text.s_init, "ax" WEAK(s_init) bx lr ENDPROC(s_init) .popsection .pushsection .text.lowlevel_init, "ax" WEAK(lowlevel_init) /* * Setup a temporary stack. Global data is not available yet. */ #if defined(CONFIG_SPL_BUILD) && defined(CONFIG_SPL_STACK) ldr sp, =CONFIG_SPL_STACK #else ldr sp, =CONFIG_SYS_INIT_SP_ADDR #endif bic sp, sp, #7 /* 8-byte alignment for ABI compliance */ #ifdef CONFIG_SPL_DM mov r9, #0 #else /* * Set up global data for boards that still need it. This will be * removed soon. */ #ifdef CONFIG_SPL_BUILD ldr r9, =gdata #else sub sp, sp, #GD_SIZE bic sp, sp, #7 mov r9, sp #endif #endif /* * Save the old lr(passed in ip) and the current lr to stack */ push {ip, lr} /* * Call the very early init function. This should do only the * absolute bare minimum to get started. It should not: * * - set up DRAM * - use global_data * - clear BSS * - try to start a console * * For boards with SPL this should be empty since SPL can do all of * this init in the SPL board_init_f() function which is called * immediately after this. */ bl s_init pop {ip, pc} ENDPROC(lowlevel_init) .popsection
4ms/stm32mp1-baremetal
2,810
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/cpu/arm926ejs/start.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * armboot - Startup Code for ARM926EJS CPU-core * * Copyright (c) 2003 Texas Instruments * * ----- Adapted for OMAP1610 OMAP730 from ARM925t code ------ * * Copyright (c) 2001 Marius Gröger <mag@sysgo.de> * Copyright (c) 2002 Alex Züpke <azu@sysgo.de> * Copyright (c) 2002 Gary Jennejohn <garyj@denx.de> * Copyright (c) 2003 Richard Woodruff <r-woodruff2@ti.com> * Copyright (c) 2003 Kshitij <kshitij@ti.com> * Copyright (c) 2010 Albert Aribaud <albert.u.boot@aribaud.net> */ #include <asm-offsets.h> #include <config.h> #include <common.h> /* ************************************************************************* * * Startup Code (reset vector) * * do important init only if we don't start from memory! * setup Memory and board specific bits prior to relocation. * relocate armboot to ram * setup stack * ************************************************************************* */ .globl reset reset: /* * set the cpu to SVC32 mode */ mrs r0,cpsr bic r0,r0,#0x1f orr r0,r0,#0xd3 msr cpsr,r0 /* * we do sys-critical inits only at reboot, * not when booting from ram! */ #ifndef CONFIG_SKIP_LOWLEVEL_INIT bl cpu_init_crit #endif bl _main /*------------------------------------------------------------------------------*/ .globl c_runtime_cpu_setup c_runtime_cpu_setup: bx lr /* ************************************************************************* * * CPU_init_critical registers * * setup important registers * setup memory timing * ************************************************************************* */ #ifndef CONFIG_SKIP_LOWLEVEL_INIT cpu_init_crit: /* * flush D cache before disabling it */ mov r0, #0 flush_dcache: mrc p15, 0, r15, c7, c10, 3 bne flush_dcache mcr p15, 0, r0, c8, c7, 0 /* invalidate TLB */ mcr p15, 0, r0, c7, c5, 0 /* invalidate I Cache */ /* * disable MMU and D cache * enable I cache if SYS_ICACHE_OFF is not defined */ mrc p15, 0, r0, c1, c0, 0 bic r0, r0, #0x00000300 /* clear bits 9:8 (---- --RS) */ bic r0, r0, #0x00000087 /* clear bits 7, 2:0 (B--- -CAM) */ #ifdef CONFIG_SYS_EXCEPTION_VECTORS_HIGH orr r0, r0, #0x00002000 /* set bit 13 (--V- ----) */ #else bic r0, r0, #0x00002000 /* clear bit 13 (--V- ----) */ #endif orr r0, r0, #0x00000002 /* set bit 1 (A) Align */ #if !CONFIG_IS_ENABLED(SYS_ICACHE_OFF) orr r0, r0, #0x00001000 /* set bit 12 (I) I-Cache */ #endif mcr p15, 0, r0, c1, c0, 0 #ifndef CONFIG_SKIP_LOWLEVEL_INIT_ONLY /* * Go setup Memory and board specific bits prior to relocation. */ mov r4, lr /* perserve link reg across call */ bl lowlevel_init /* go setup pll,mux,memory */ mov lr, r4 /* restore link */ #endif mov pc, lr /* back to my caller */ #endif /* CONFIG_SKIP_LOWLEVEL_INIT */
4ms/stm32mp1-baremetal
2,428
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/cpu/arm946es/start.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * armboot - Startup Code for ARM926EJS CPU-core * * Copyright (c) 2003 Texas Instruments * * ----- Adapted for OMAP1610 OMAP730 from ARM925t code ------ * * Copyright (c) 2001 Marius Gröger <mag@sysgo.de> * Copyright (c) 2002 Alex Züpke <azu@sysgo.de> * Copyright (c) 2002 Gary Jennejohn <garyj@denx.de> * Copyright (c) 2003 Richard Woodruff <r-woodruff2@ti.com> * Copyright (c) 2003 Kshitij <kshitij@ti.com> * Copyright (c) 2010 Albert Aribaud <albert.u.boot@aribaud.net> */ #include <asm-offsets.h> #include <config.h> /* ************************************************************************* * * Startup Code (reset vector) * * do important init only if we don't start from memory! * setup Memory and board specific bits prior to relocation. * relocate armboot to ram * setup stack * ************************************************************************* */ .globl reset reset: /* * set the cpu to SVC32 mode */ mrs r0,cpsr bic r0,r0,#0x1f orr r0,r0,#0xd3 msr cpsr,r0 /* * we do sys-critical inits only at reboot, * not when booting from ram! */ #ifndef CONFIG_SKIP_LOWLEVEL_INIT bl cpu_init_crit #endif bl _main /*------------------------------------------------------------------------------*/ .globl c_runtime_cpu_setup c_runtime_cpu_setup: mov pc, lr /* ************************************************************************* * * CPU_init_critical registers * * setup important registers * setup memory timing * ************************************************************************* */ #ifndef CONFIG_SKIP_LOWLEVEL_INIT cpu_init_crit: /* * flush v4 I/D caches */ mov r0, #0 mcr p15, 0, r0, c7, c5, 0 /* flush v4 I-cache */ mcr p15, 0, r0, c7, c6, 0 /* flush v4 D-cache */ /* * disable MMU stuff and caches */ mrc p15, 0, r0, c1, c0, 0 bic r0, r0, #0x00002300 /* clear bits 13, 9:8 (--V- --RS) */ bic r0, r0, #0x00000087 /* clear bits 7, 2:0 (B--- -CAM) */ orr r0, r0, #0x00000002 /* set bit 1 (A) Align */ orr r0, r0, #0x00001000 /* set bit 12 (I) I-Cache */ mcr p15, 0, r0, c1, c0, 0 #ifndef CONFIG_SKIP_LOWLEVEL_INIT_ONLY /* * Go setup Memory and board specific bits prior to relocation. */ mov ip, lr /* perserve link reg across call */ bl lowlevel_init /* go setup memory */ mov lr, ip /* restore link */ #endif mov pc, lr /* back to my caller */ #endif
4ms/stm32mp1-baremetal
1,657
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/cpu/arm720t/start.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * armboot - Startup Code for ARM720 CPU-core * * Copyright (c) 2001 Marius Gröger <mag@sysgo.de> * Copyright (c) 2002 Alex Züpke <azu@sysgo.de> */ #include <asm-offsets.h> #include <config.h> /* ************************************************************************* * * Startup Code (reset vector) * * do important init only if we don't start from RAM! * relocate armboot to ram * setup stack * jump to second stage * ************************************************************************* */ .globl reset reset: /* * set the cpu to SVC32 mode */ mrs r0,cpsr bic r0,r0,#0x1f orr r0,r0,#0xd3 msr cpsr,r0 /* * we do sys-critical inits only at reboot, * not when booting from ram! */ #if !defined(CONFIG_SKIP_LOWLEVEL_INIT) && \ !defined(CONFIG_SKIP_LOWLEVEL_INIT_ONLY) bl cpu_init_crit #endif bl _main /*------------------------------------------------------------------------------*/ .globl c_runtime_cpu_setup c_runtime_cpu_setup: mov pc, lr /* ************************************************************************* * * CPU_init_critical registers * * setup important registers * setup memory timing * ************************************************************************* */ #if !defined(CONFIG_SKIP_LOWLEVEL_INIT) && \ !defined(CONFIG_SKIP_LOWLEVEL_INIT_ONLY) cpu_init_crit: mov ip, lr /* * before relocating, we have to setup RAM timing * because memory timing is board-dependent, you will * find a lowlevel_init.S in your board directory. */ bl lowlevel_init mov lr, ip mov pc, lr #endif /* CONFIG_SKIP_LOWLEVEL_INIT */
4ms/stm32mp1-baremetal
2,783
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/cpu/arm1176/start.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * armboot - Startup Code for ARM1176 CPU-core * * Copyright (c) 2007 Samsung Electronics * * Copyright (C) 2008 * Guennadi Liakhovetki, DENX Software Engineering, <lg@denx.de> * * 2007-09-21 - Restructured codes by jsgood (jsgood.yang@samsung.com) * 2007-09-21 - Added MoviNAND and OneNAND boot codes by * jsgood (jsgood.yang@samsung.com) * Base codes by scsuh (sc.suh) */ #include <asm-offsets.h> #include <config.h> #include <linux/linkage.h> #ifndef CONFIG_SYS_PHY_UBOOT_BASE #define CONFIG_SYS_PHY_UBOOT_BASE CONFIG_SYS_UBOOT_BASE #endif /* ************************************************************************* * * Startup Code (reset vector) * * do important init only if we don't start from memory! * setup Memory and board specific bits prior to relocation. * relocate armboot to ram * setup stack * ************************************************************************* */ .globl reset reset: /* Allow the board to save important registers */ b save_boot_params .globl save_boot_params_ret save_boot_params_ret: /* * set the cpu to SVC32 mode */ mrs r0, cpsr bic r0, r0, #0x3f orr r0, r0, #0xd3 msr cpsr, r0 /* ************************************************************************* * * CPU_init_critical registers * * setup important registers * setup memory timing * ************************************************************************* */ /* * we do sys-critical inits only at reboot, * not when booting from ram! */ cpu_init_crit: /* * When booting from NAND - it has definitely been a reset, so, no need * to flush caches and disable the MMU */ #ifndef CONFIG_SPL_BUILD /* * flush v4 I/D caches */ mov r0, #0 mcr p15, 0, r0, c7, c7, 0 /* flush v3/v4 cache */ mcr p15, 0, r0, c8, c7, 0 /* flush v4 TLB */ /* * disable MMU stuff and caches */ mrc p15, 0, r0, c1, c0, 0 bic r0, r0, #0x00002300 @ clear bits 13, 9:8 (--V- --RS) bic r0, r0, #0x00000087 @ clear bits 7, 2:0 (B--- -CAM) orr r0, r0, #0x00000002 @ set bit 1 (A) Align orr r0, r0, #0x00001000 @ set bit 12 (I) I-Cache /* Prepare to disable the MMU */ adr r2, mmu_disable_phys sub r2, r2, #(CONFIG_SYS_PHY_UBOOT_BASE - CONFIG_SYS_TEXT_BASE) b mmu_disable .align 5 /* Run in a single cache-line */ mmu_disable: mcr p15, 0, r0, c1, c0, 0 nop nop mov pc, r2 mmu_disable_phys: #endif /* * Go setup Memory and board specific bits prior to relocation. */ bl lowlevel_init /* go setup pll,mux,memory */ bl _main /*------------------------------------------------------------------------------*/ .globl c_runtime_cpu_setup c_runtime_cpu_setup: mov pc, lr WEAK(save_boot_params) b save_boot_params_ret /* back to my caller */ ENDPROC(save_boot_params)
4ms/stm32mp1-baremetal
2,432
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/cpu/arm1136/start.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * armboot - Startup Code for OMP2420/ARM1136 CPU-core * * Copyright (c) 2004 Texas Instruments <r-woodruff2@ti.com> * * Copyright (c) 2001 Marius Gröger <mag@sysgo.de> * Copyright (c) 2002 Alex Züpke <azu@sysgo.de> * Copyright (c) 2002 Gary Jennejohn <garyj@denx.de> * Copyright (c) 2003 Richard Woodruff <r-woodruff2@ti.com> * Copyright (c) 2003 Kshitij <kshitij@ti.com> */ #include <asm-offsets.h> #include <config.h> /* ************************************************************************* * * Startup Code (reset vector) * * do important init only if we don't start from memory! * setup Memory and board specific bits prior to relocation. * relocate armboot to ram * setup stack * ************************************************************************* */ .globl reset reset: /* * set the cpu to SVC32 mode */ mrs r0,cpsr bic r0,r0,#0x1f orr r0,r0,#0xd3 msr cpsr,r0 /* the mask ROM code should have PLL and others stable */ #ifndef CONFIG_SKIP_LOWLEVEL_INIT bl cpu_init_crit #endif bl _main /*------------------------------------------------------------------------------*/ .globl c_runtime_cpu_setup c_runtime_cpu_setup: bx lr /* ************************************************************************* * * CPU_init_critical registers * * setup important registers * setup memory timing * ************************************************************************* */ #ifndef CONFIG_SKIP_LOWLEVEL_INIT cpu_init_crit: /* * flush v4 I/D caches */ mov r0, #0 mcr p15, 0, r0, c7, c7, 0 /* Invalidate I+D+BTB caches */ mcr p15, 0, r0, c8, c7, 0 /* Invalidate Unified TLB */ /* * disable MMU stuff and caches */ mrc p15, 0, r0, c1, c0, 0 bic r0, r0, #0x00002300 @ clear bits 13, 9:8 (--V- --RS) bic r0, r0, #0x00000087 @ clear bits 7, 2:0 (B--- -CAM) orr r0, r0, #0x00000002 @ set bit 1 (A) Align orr r0, r0, #0x00001000 @ set bit 12 (I) I-Cache mcr p15, 0, r0, c1, c0, 0 #ifndef CONFIG_SKIP_LOWLEVEL_INIT_ONLY /* * Jump to board specific initialization... The Mask ROM will have already initialized * basic memory. Go here to bump up clock rate and handle wake up conditions. */ mov ip, lr /* persevere link reg across call */ bl lowlevel_init /* go setup pll,mux,memory */ mov lr, ip /* restore link */ #endif mov pc, lr /* back to my caller */ #endif /* CONFIG_SKIP_LOWLEVEL_INIT */
4ms/stm32mp1-baremetal
2,525
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/cpu/sa1100/start.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * armboot - Startup Code for SA1100 CPU * * Copyright (C) 1998 Dan Malek <dmalek@jlc.net> * Copyright (C) 1999 Magnus Damm <kieraypc01.p.y.kie.era.ericsson.se> * Copyright (C) 2000 Wolfgang Denk <wd@denx.de> * Copyright (c) 2001 Alex Züpke <azu@sysgo.de> */ #include <asm-offsets.h> #include <config.h> /* ************************************************************************* * * Startup Code (reset vector) * * do important init only if we don't start from memory! * relocate armboot to ram * setup stack * jump to second stage * ************************************************************************* */ .globl reset reset: /* * set the cpu to SVC32 mode */ mrs r0,cpsr bic r0,r0,#0x1f orr r0,r0,#0xd3 msr cpsr,r0 /* * we do sys-critical inits only at reboot, * not when booting from ram! */ #ifndef CONFIG_SKIP_LOWLEVEL_INIT bl cpu_init_crit #endif bl _main /*------------------------------------------------------------------------------*/ .globl c_runtime_cpu_setup c_runtime_cpu_setup: mov pc, lr /* ************************************************************************* * * CPU_init_critical registers * * setup important registers * setup memory timing * ************************************************************************* */ /* Interrupt-Controller base address */ IC_BASE: .word 0x90050000 #define ICMR 0x04 /* Reset-Controller */ RST_BASE: .word 0x90030000 #define RSRR 0x00 #define RCSR 0x04 /* PWR */ PWR_BASE: .word 0x90020000 #define PSPR 0x08 #define PPCR 0x14 cpuspeed: .word CONFIG_SYS_CPUSPEED cpu_init_crit: /* * mask all IRQs */ ldr r0, IC_BASE mov r1, #0x00 str r1, [r0, #ICMR] /* set clock speed */ ldr r0, PWR_BASE ldr r1, cpuspeed str r1, [r0, #PPCR] #ifndef CONFIG_SKIP_LOWLEVEL_INIT_ONLY /* * before relocating, we have to setup RAM timing * because memory timing is board-dependend, you will * find a lowlevel_init.S in your board directory. */ mov ip, lr bl lowlevel_init mov lr, ip #endif /* * disable MMU stuff and enable I-cache */ mrc p15,0,r0,c1,c0 bic r0, r0, #0x00002000 @ clear bit 13 (X) bic r0, r0, #0x0000000f @ clear bits 3-0 (WCAM) orr r0, r0, #0x00001000 @ set bit 12 (I) Icache orr r0, r0, #0x00000002 @ set bit 1 (A) Align mcr p15,0,r0,c1,c0 /* * flush v4 I/D caches */ mov r0, #0 mcr p15, 0, r0, c7, c7, 0 /* flush v3/v4 cache */ mcr p15, 0, r0, c8, c7, 0 /* flush v4 TLB */ mov pc, lr
4ms/stm32mp1-baremetal
6,046
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/cpu/armv8/cache.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * (C) Copyright 2013 * David Feng <fenghua@phytium.com.cn> * * This file is based on sample code from ARMv8 ARM. */ #include <asm-offsets.h> #include <config.h> #include <asm/macro.h> #include <asm/system.h> #include <linux/linkage.h> /* * void __asm_dcache_level(level) * * flush or invalidate one level cache. * * x0: cache level * x1: 0 clean & invalidate, 1 invalidate only * x2~x9: clobbered */ .pushsection .text.__asm_dcache_level, "ax" ENTRY(__asm_dcache_level) lsl x12, x0, #1 msr csselr_el1, x12 /* select cache level */ isb /* sync change of cssidr_el1 */ mrs x6, ccsidr_el1 /* read the new cssidr_el1 */ and x2, x6, #7 /* x2 <- log2(cache line size)-4 */ add x2, x2, #4 /* x2 <- log2(cache line size) */ mov x3, #0x3ff and x3, x3, x6, lsr #3 /* x3 <- max number of #ways */ clz w5, w3 /* bit position of #ways */ mov x4, #0x7fff and x4, x4, x6, lsr #13 /* x4 <- max number of #sets */ /* x12 <- cache level << 1 */ /* x2 <- line length offset */ /* x3 <- number of cache ways - 1 */ /* x4 <- number of cache sets - 1 */ /* x5 <- bit position of #ways */ loop_set: mov x6, x3 /* x6 <- working copy of #ways */ loop_way: lsl x7, x6, x5 orr x9, x12, x7 /* map way and level to cisw value */ lsl x7, x4, x2 orr x9, x9, x7 /* map set number to cisw value */ tbz w1, #0, 1f dc isw, x9 b 2f 1: dc cisw, x9 /* clean & invalidate by set/way */ 2: subs x6, x6, #1 /* decrement the way */ b.ge loop_way subs x4, x4, #1 /* decrement the set */ b.ge loop_set ret ENDPROC(__asm_dcache_level) .popsection /* * void __asm_flush_dcache_all(int invalidate_only) * * x0: 0 clean & invalidate, 1 invalidate only * * flush or invalidate all data cache by SET/WAY. */ .pushsection .text.__asm_dcache_all, "ax" ENTRY(__asm_dcache_all) mov x1, x0 dsb sy mrs x10, clidr_el1 /* read clidr_el1 */ lsr x11, x10, #24 and x11, x11, #0x7 /* x11 <- loc */ cbz x11, finished /* if loc is 0, exit */ mov x15, lr mov x0, #0 /* start flush at cache level 0 */ /* x0 <- cache level */ /* x10 <- clidr_el1 */ /* x11 <- loc */ /* x15 <- return address */ loop_level: lsl x12, x0, #1 add x12, x12, x0 /* x0 <- tripled cache level */ lsr x12, x10, x12 and x12, x12, #7 /* x12 <- cache type */ cmp x12, #2 b.lt skip /* skip if no cache or icache */ bl __asm_dcache_level /* x1 = 0 flush, 1 invalidate */ skip: add x0, x0, #1 /* increment cache level */ cmp x11, x0 b.gt loop_level mov x0, #0 msr csselr_el1, x0 /* restore csselr_el1 */ dsb sy isb mov lr, x15 finished: ret ENDPROC(__asm_dcache_all) .popsection .pushsection .text.__asm_flush_dcache_all, "ax" ENTRY(__asm_flush_dcache_all) mov x0, #0 b __asm_dcache_all ENDPROC(__asm_flush_dcache_all) .popsection .pushsection .text.__asm_invalidate_dcache_all, "ax" ENTRY(__asm_invalidate_dcache_all) mov x0, #0x1 b __asm_dcache_all ENDPROC(__asm_invalidate_dcache_all) .popsection /* * void __asm_flush_dcache_range(start, end) * * clean & invalidate data cache in the range * * x0: start address * x1: end address */ .pushsection .text.__asm_flush_dcache_range, "ax" ENTRY(__asm_flush_dcache_range) mrs x3, ctr_el0 lsr x3, x3, #16 and x3, x3, #0xf mov x2, #4 lsl x2, x2, x3 /* cache line size */ /* x2 <- minimal cache line size in cache system */ sub x3, x2, #1 bic x0, x0, x3 1: dc civac, x0 /* clean & invalidate data or unified cache */ add x0, x0, x2 cmp x0, x1 b.lo 1b dsb sy ret ENDPROC(__asm_flush_dcache_range) .popsection /* * void __asm_invalidate_dcache_range(start, end) * * invalidate data cache in the range * * x0: start address * x1: end address */ .pushsection .text.__asm_invalidate_dcache_range, "ax" ENTRY(__asm_invalidate_dcache_range) mrs x3, ctr_el0 ubfm x3, x3, #16, #19 mov x2, #4 lsl x2, x2, x3 /* cache line size */ /* x2 <- minimal cache line size in cache system */ sub x3, x2, #1 bic x0, x0, x3 1: dc ivac, x0 /* invalidate data or unified cache */ add x0, x0, x2 cmp x0, x1 b.lo 1b dsb sy ret ENDPROC(__asm_invalidate_dcache_range) .popsection /* * void __asm_invalidate_icache_all(void) * * invalidate all tlb entries. */ .pushsection .text.__asm_invalidate_icache_all, "ax" ENTRY(__asm_invalidate_icache_all) ic ialluis isb sy ret ENDPROC(__asm_invalidate_icache_all) .popsection .pushsection .text.__asm_invalidate_l3_dcache, "ax" ENTRY(__asm_invalidate_l3_dcache) mov x0, #0 /* return status as success */ ret ENDPROC(__asm_invalidate_l3_dcache) .weak __asm_invalidate_l3_dcache .popsection .pushsection .text.__asm_flush_l3_dcache, "ax" ENTRY(__asm_flush_l3_dcache) mov x0, #0 /* return status as success */ ret ENDPROC(__asm_flush_l3_dcache) .weak __asm_flush_l3_dcache .popsection .pushsection .text.__asm_invalidate_l3_icache, "ax" ENTRY(__asm_invalidate_l3_icache) mov x0, #0 /* return status as success */ ret ENDPROC(__asm_invalidate_l3_icache) .weak __asm_invalidate_l3_icache .popsection /* * void __asm_switch_ttbr(ulong new_ttbr) * * Safely switches to a new page table. */ .pushsection .text.__asm_switch_ttbr, "ax" ENTRY(__asm_switch_ttbr) /* x2 = SCTLR (alive throghout the function) */ switch_el x4, 3f, 2f, 1f 3: mrs x2, sctlr_el3 b 0f 2: mrs x2, sctlr_el2 b 0f 1: mrs x2, sctlr_el1 0: /* Unset CR_M | CR_C | CR_I from SCTLR to disable all caches */ movn x1, #(CR_M | CR_C | CR_I) and x1, x2, x1 switch_el x4, 3f, 2f, 1f 3: msr sctlr_el3, x1 b 0f 2: msr sctlr_el2, x1 b 0f 1: msr sctlr_el1, x1 0: isb /* This call only clobbers x30 (lr) and x9 (unused) */ mov x3, x30 bl __asm_invalidate_tlb_all /* From here on we're running safely with caches disabled */ /* Set TTBR to our first argument */ switch_el x4, 3f, 2f, 1f 3: msr ttbr0_el3, x0 b 0f 2: msr ttbr0_el2, x0 b 0f 1: msr ttbr0_el1, x0 0: isb /* Restore original SCTLR and thus enable caches again */ switch_el x4, 3f, 2f, 1f 3: msr sctlr_el3, x2 b 0f 2: msr sctlr_el2, x2 b 0f 1: msr sctlr_el1, x2 0: isb ret x3 ENDPROC(__asm_switch_ttbr) .popsection
4ms/stm32mp1-baremetal
4,382
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/cpu/armv8/exceptions.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * (C) Copyright 2013 * David Feng <fenghua@phytium.com.cn> */ #include <asm-offsets.h> #include <config.h> #include <asm/ptrace.h> #include <asm/macro.h> #include <linux/linkage.h> /* * AArch64 exception vectors: * We have four types of exceptions: * - synchronous: traps, data aborts, undefined instructions, ... * - IRQ: group 1 (normal) interrupts * - FIQ: group 0 or secure interrupts * - SError: fatal system errors * There are entries for all four of those for different contexts: * - from same exception level, when using the SP_EL0 stack pointer * - from same exception level, when using the SP_ELx stack pointer * - from lower exception level, when this is AArch64 * - from lower exception level, when this is AArch32 * Each of those 16 entries have space for 32 instructions, each entry must * be 128 byte aligned, the whole table must be 2K aligned. * The 32 instructions are not enough to save and restore all registers and * to branch to the actual handler, so we split this up: * Each entry saves the LR, branches to the save routine, then to the actual * handler, then to the restore routine. The save and restore routines are * each split in half and stuffed in the unused gap between the entries. * Also as we do not run anything in a lower exception level, we just provide * the first 8 entries for exceptions from the same EL. */ .align 11 .globl vectors vectors: .align 7 /* Current EL Synchronous Thread */ stp x29, x30, [sp, #-16]! bl _exception_entry bl do_bad_sync b exception_exit /* * Save (most of) the GP registers to the stack frame. * This is the first part of the shared routine called into from all entries. */ _exception_entry: stp x27, x28, [sp, #-16]! stp x25, x26, [sp, #-16]! stp x23, x24, [sp, #-16]! stp x21, x22, [sp, #-16]! stp x19, x20, [sp, #-16]! stp x17, x18, [sp, #-16]! stp x15, x16, [sp, #-16]! stp x13, x14, [sp, #-16]! stp x11, x12, [sp, #-16]! stp x9, x10, [sp, #-16]! stp x7, x8, [sp, #-16]! stp x5, x6, [sp, #-16]! stp x3, x4, [sp, #-16]! stp x1, x2, [sp, #-16]! b _save_el_regs /* jump to the second part */ .align 7 /* Current EL IRQ Thread */ stp x29, x30, [sp, #-16]! bl _exception_entry bl do_bad_irq b exception_exit /* * Save exception specific context: ESR and ELR, for all exception levels. * This is the second part of the shared routine called into from all entries. */ _save_el_regs: /* Could be running at EL3/EL2/EL1 */ switch_el x11, 3f, 2f, 1f 3: mrs x1, esr_el3 mrs x2, elr_el3 b 0f 2: mrs x1, esr_el2 mrs x2, elr_el2 b 0f 1: mrs x1, esr_el1 mrs x2, elr_el1 0: stp x2, x0, [sp, #-16]! mov x0, sp ret .align 7 /* Current EL FIQ Thread */ stp x29, x30, [sp, #-16]! bl _exception_entry bl do_bad_fiq /* falling through to _exception_exit */ /* * Restore the exception return address, for all exception levels. * This is the first part of the shared routine called into from all entries. */ exception_exit: ldp x2, x0, [sp],#16 switch_el x11, 3f, 2f, 1f 3: msr elr_el3, x2 b _restore_regs 2: msr elr_el2, x2 b _restore_regs 1: msr elr_el1, x2 b _restore_regs /* jump to the second part */ .align 7 /* Current EL Error Thread */ stp x29, x30, [sp, #-16]! bl _exception_entry bl do_bad_error b exception_exit /* * Restore the general purpose registers from the exception stack, then return. * This is the second part of the shared routine called into from all entries. */ _restore_regs: ldp x1, x2, [sp],#16 ldp x3, x4, [sp],#16 ldp x5, x6, [sp],#16 ldp x7, x8, [sp],#16 ldp x9, x10, [sp],#16 ldp x11, x12, [sp],#16 ldp x13, x14, [sp],#16 ldp x15, x16, [sp],#16 ldp x17, x18, [sp],#16 ldp x19, x20, [sp],#16 ldp x21, x22, [sp],#16 ldp x23, x24, [sp],#16 ldp x25, x26, [sp],#16 ldp x27, x28, [sp],#16 ldp x29, x30, [sp],#16 eret .align 7 /* Current EL (SP_ELx) Synchronous Handler */ stp x29, x30, [sp, #-16]! bl _exception_entry bl do_sync b exception_exit .align 7 /* Current EL (SP_ELx) IRQ Handler */ stp x29, x30, [sp, #-16]! bl _exception_entry bl do_irq b exception_exit .align 7 /* Current EL (SP_ELx) FIQ Handler */ stp x29, x30, [sp, #-16]! bl _exception_entry bl do_fiq b exception_exit .align 7 /* Current EL (SP_ELx) Error Handler */ stp x29, x30, [sp, #-16]! bl _exception_entry bl do_error b exception_exit
4ms/stm32mp1-baremetal
1,131
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/cpu/armv8/transition.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * (C) Copyright 2013 * David Feng <fenghua@phytium.com.cn> */ #include <asm-offsets.h> #include <config.h> #include <linux/linkage.h> #include <asm/macro.h> .pushsection .text.armv8_switch_to_el2, "ax" ENTRY(armv8_switch_to_el2) switch_el x6, 1f, 0f, 0f 0: cmp x5, #ES_TO_AARCH64 b.eq 2f /* * When loading 32-bit kernel, it will jump * to secure firmware again, and never return. */ bl armv8_el2_to_aarch32 2: /* * x4 is kernel entry point or switch_to_el1 * if CONFIG_ARMV8_SWITCH_TO_EL1 is defined. * When running in EL2 now, jump to the * address saved in x4. */ br x4 1: armv8_switch_to_el2_m x4, x5, x6 ENDPROC(armv8_switch_to_el2) .popsection .pushsection .text.armv8_switch_to_el1, "ax" ENTRY(armv8_switch_to_el1) switch_el x6, 0f, 1f, 0f 0: /* x4 is kernel entry point. When running in EL1 * now, jump to the address saved in x4. */ br x4 1: armv8_switch_to_el1_m x4, x5, x6 ENDPROC(armv8_switch_to_el1) .popsection .pushsection .text.armv8_el2_to_aarch32, "ax" WEAK(armv8_el2_to_aarch32) ret ENDPROC(armv8_el2_to_aarch32) .popsection
4ms/stm32mp1-baremetal
9,297
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/cpu/armv8/psci.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright 2016 Freescale Semiconductor, Inc. * Author: Hongbo Zhang <hongbo.zhang@nxp.com> * This file implements LS102X platform PSCI SYSTEM-SUSPEND function */ #include <config.h> #include <linux/linkage.h> #include <asm/psci.h> #include <asm/secure.h> /* Default PSCI function, return -1, Not Implemented */ #define PSCI_DEFAULT(__fn) \ ENTRY(__fn); \ mov w0, #ARM_PSCI_RET_NI; \ ret; \ ENDPROC(__fn); \ .weak __fn /* PSCI function and ID table definition*/ #define PSCI_TABLE(__id, __fn) \ .quad __id; \ .quad __fn .pushsection ._secure.text, "ax" /* 32 bits PSCI default functions */ PSCI_DEFAULT(psci_version) PSCI_DEFAULT(psci_cpu_suspend) PSCI_DEFAULT(psci_cpu_off) PSCI_DEFAULT(psci_cpu_on) PSCI_DEFAULT(psci_affinity_info) PSCI_DEFAULT(psci_migrate) PSCI_DEFAULT(psci_migrate_info_type) PSCI_DEFAULT(psci_migrate_info_up_cpu) PSCI_DEFAULT(psci_system_off) PSCI_DEFAULT(psci_system_reset) PSCI_DEFAULT(psci_features) PSCI_DEFAULT(psci_cpu_freeze) PSCI_DEFAULT(psci_cpu_default_suspend) PSCI_DEFAULT(psci_node_hw_state) PSCI_DEFAULT(psci_system_suspend) PSCI_DEFAULT(psci_set_suspend_mode) PSCI_DEFAULT(psi_stat_residency) PSCI_DEFAULT(psci_stat_count) .align 3 _psci_32_table: PSCI_TABLE(ARM_PSCI_FN_CPU_SUSPEND, psci_cpu_suspend) PSCI_TABLE(ARM_PSCI_FN_CPU_OFF, psci_cpu_off) PSCI_TABLE(ARM_PSCI_FN_CPU_ON, psci_cpu_on) PSCI_TABLE(ARM_PSCI_FN_MIGRATE, psci_migrate) PSCI_TABLE(ARM_PSCI_0_2_FN_PSCI_VERSION, psci_version) PSCI_TABLE(ARM_PSCI_0_2_FN_CPU_SUSPEND, psci_cpu_suspend) PSCI_TABLE(ARM_PSCI_0_2_FN_CPU_OFF, psci_cpu_off) PSCI_TABLE(ARM_PSCI_0_2_FN_CPU_ON, psci_cpu_on) PSCI_TABLE(ARM_PSCI_0_2_FN_AFFINITY_INFO, psci_affinity_info) PSCI_TABLE(ARM_PSCI_0_2_FN_MIGRATE, psci_migrate) PSCI_TABLE(ARM_PSCI_0_2_FN_MIGRATE_INFO_TYPE, psci_migrate_info_type) PSCI_TABLE(ARM_PSCI_0_2_FN_MIGRATE_INFO_UP_CPU, psci_migrate_info_up_cpu) PSCI_TABLE(ARM_PSCI_0_2_FN_SYSTEM_OFF, psci_system_off) PSCI_TABLE(ARM_PSCI_0_2_FN_SYSTEM_RESET, psci_system_reset) PSCI_TABLE(ARM_PSCI_1_0_FN_PSCI_FEATURES, psci_features) PSCI_TABLE(ARM_PSCI_1_0_FN_CPU_FREEZE, psci_cpu_freeze) PSCI_TABLE(ARM_PSCI_1_0_FN_CPU_DEFAULT_SUSPEND, psci_cpu_default_suspend) PSCI_TABLE(ARM_PSCI_1_0_FN_NODE_HW_STATE, psci_node_hw_state) PSCI_TABLE(ARM_PSCI_1_0_FN_SYSTEM_SUSPEND, psci_system_suspend) PSCI_TABLE(ARM_PSCI_1_0_FN_SET_SUSPEND_MODE, psci_set_suspend_mode) PSCI_TABLE(ARM_PSCI_1_0_FN_STAT_RESIDENCY, psi_stat_residency) PSCI_TABLE(ARM_PSCI_1_0_FN_STAT_COUNT, psci_stat_count) PSCI_TABLE(0, 0) /* 64 bits PSCI default functions */ PSCI_DEFAULT(psci_cpu_suspend_64) PSCI_DEFAULT(psci_cpu_on_64) PSCI_DEFAULT(psci_affinity_info_64) PSCI_DEFAULT(psci_migrate_64) PSCI_DEFAULT(psci_migrate_info_up_cpu_64) PSCI_DEFAULT(psci_cpu_default_suspend_64) PSCI_DEFAULT(psci_node_hw_state_64) PSCI_DEFAULT(psci_system_suspend_64) PSCI_DEFAULT(psci_stat_residency_64) PSCI_DEFAULT(psci_stat_count_64) .align 3 _psci_64_table: PSCI_TABLE(ARM_PSCI_0_2_FN64_CPU_SUSPEND, psci_cpu_suspend_64) PSCI_TABLE(ARM_PSCI_0_2_FN64_CPU_ON, psci_cpu_on_64) PSCI_TABLE(ARM_PSCI_0_2_FN64_AFFINITY_INFO, psci_affinity_info_64) PSCI_TABLE(ARM_PSCI_0_2_FN64_MIGRATE, psci_migrate_64) PSCI_TABLE(ARM_PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU, psci_migrate_info_up_cpu_64) PSCI_TABLE(ARM_PSCI_1_0_FN64_CPU_DEFAULT_SUSPEND, psci_cpu_default_suspend_64) PSCI_TABLE(ARM_PSCI_1_0_FN64_NODE_HW_STATE, psci_node_hw_state_64) PSCI_TABLE(ARM_PSCI_1_0_FN64_SYSTEM_SUSPEND, psci_system_suspend_64) PSCI_TABLE(ARM_PSCI_1_0_FN64_STAT_RESIDENCY, psci_stat_residency_64) PSCI_TABLE(ARM_PSCI_1_0_FN64_STAT_COUNT, psci_stat_count_64) PSCI_TABLE(0, 0) .macro psci_enter /* PSCI call is Fast Call(atomic), so mask DAIF */ mrs x15, DAIF stp x15, xzr, [sp, #-16]! ldr x15, =0x3C0 msr DAIF, x15 /* SMC convention, x18 ~ x30 should be saved by callee */ stp x29, x30, [sp, #-16]! stp x27, x28, [sp, #-16]! stp x25, x26, [sp, #-16]! stp x23, x24, [sp, #-16]! stp x21, x22, [sp, #-16]! stp x19, x20, [sp, #-16]! mrs x15, elr_el3 stp x18, x15, [sp, #-16]! .endm .macro psci_return /* restore registers */ ldp x18, x15, [sp], #16 msr elr_el3, x15 ldp x19, x20, [sp], #16 ldp x21, x22, [sp], #16 ldp x23, x24, [sp], #16 ldp x25, x26, [sp], #16 ldp x27, x28, [sp], #16 ldp x29, x30, [sp], #16 /* restore DAIF */ ldp x15, xzr, [sp], #16 msr DAIF, x15 eret .endm /* Caller must put PSCI function-ID table base in x9 */ handle_psci: psci_enter 1: ldr x10, [x9] /* Load PSCI function table */ cbz x10, 3f /* If reach the end, bail out */ cmp x10, x0 b.eq 2f /* PSCI function found */ add x9, x9, #16 /* If not match, try next entry */ b 1b 2: ldr x11, [x9, #8] /* Load PSCI function */ blr x11 /* Call PSCI function */ psci_return 3: mov x0, #ARM_PSCI_RET_NI psci_return /* * Handle SiP service functions defined in SiP service function table. * Use DECLARE_SECURE_SVC(_name, _id, _fn) to add platform specific SiP * service function into the SiP service function table. * SiP service function table is located in '._secure_svc_tbl_entries' section, * which is next to '._secure.text' section. */ handle_svc: adr x9, __secure_svc_tbl_start adr x10, __secure_svc_tbl_end subs x12, x10, x9 /* Get number of entries in table */ b.eq 2f /* Make sure SiP function table is not empty */ psci_enter 1: ldr x10, [x9] /* Load SiP function table */ ldr x11, [x9, #8] cmp w10, w0 b.eq 2b /* SiP service function found */ add x9, x9, #SECURE_SVC_TBL_OFFSET /* Move to next entry */ subs x12, x12, #SECURE_SVC_TBL_OFFSET b.eq 3b /* If reach the end, bail out */ b 1b 2: ldr x0, =0xFFFFFFFF eret handle_smc32: /* SMC function ID 0x84000000-0x8400001F: 32 bits PSCI */ ldr w9, =0x8400001F cmp w0, w9 b.gt handle_svc ldr w9, =0x84000000 cmp w0, w9 b.lt handle_svc adr x9, _psci_32_table b handle_psci handle_smc64: /* check SMC32 or SMC64 calls */ ubfx x9, x0, #30, #1 cbz x9, handle_smc32 /* SMC function ID 0xC4000000-0xC400001F: 64 bits PSCI */ ldr x9, =0xC400001F cmp x0, x9 b.gt handle_svc ldr x9, =0xC4000000 cmp x0, x9 b.lt handle_svc adr x9, _psci_64_table b handle_psci /* * Get CPU ID from MPIDR, suppose every cluster has same number of CPU cores, * Platform with asymmetric clusters should implement their own interface. * In case this function being called by other platform's C code, the ARM * Architecture Procedure Call Standard is considered, e.g. register X0 is * used for the return value, while in this PSCI environment, X0 usually holds * the SMC function identifier, so X0 should be saved by caller function. */ ENTRY(psci_get_cpu_id) #ifdef CONFIG_ARMV8_PSCI_CPUS_PER_CLUSTER mrs x9, MPIDR_EL1 ubfx x9, x9, #8, #8 ldr x10, =CONFIG_ARMV8_PSCI_CPUS_PER_CLUSTER mul x9, x10, x9 #else mov x9, xzr #endif mrs x10, MPIDR_EL1 ubfx x10, x10, #0, #8 add x0, x10, x9 ret ENDPROC(psci_get_cpu_id) .weak psci_get_cpu_id /* CPU ID input in x0, stack top output in x0*/ LENTRY(psci_get_cpu_stack_top) adr x9, __secure_stack_end lsl x0, x0, #ARM_PSCI_STACK_SHIFT sub x0, x9, x0 ret ENDPROC(psci_get_cpu_stack_top) unhandled_exception: b unhandled_exception /* simply dead loop */ handle_sync: mov x15, x30 mov x14, x0 bl psci_get_cpu_id bl psci_get_cpu_stack_top mov x9, #1 msr spsel, x9 mov sp, x0 mov x0, x14 mov x30, x15 mrs x9, esr_el3 ubfx x9, x9, #26, #6 cmp x9, #0x13 b.eq handle_smc32 cmp x9, #0x17 b.eq handle_smc64 b unhandled_exception #ifdef CONFIG_ARMV8_EA_EL3_FIRST /* * Override this function if custom error handling is * needed for asynchronous aborts */ ENTRY(plat_error_handler) ret ENDPROC(plat_error_handler) .weak plat_error_handler handle_error: bl psci_get_cpu_id bl psci_get_cpu_stack_top mov x9, #1 msr spsel, x9 mov sp, x0 bl plat_error_handler /* Platform specific error handling */ deadloop: b deadloop /* Never return */ #endif .align 11 .globl el3_exception_vectors el3_exception_vectors: b unhandled_exception /* Sync, Current EL using SP0 */ .align 7 b unhandled_exception /* IRQ, Current EL using SP0 */ .align 7 b unhandled_exception /* FIQ, Current EL using SP0 */ .align 7 b unhandled_exception /* SError, Current EL using SP0 */ .align 7 b unhandled_exception /* Sync, Current EL using SPx */ .align 7 b unhandled_exception /* IRQ, Current EL using SPx */ .align 7 b unhandled_exception /* FIQ, Current EL using SPx */ .align 7 b unhandled_exception /* SError, Current EL using SPx */ .align 7 b handle_sync /* Sync, Lower EL using AArch64 */ .align 7 b unhandled_exception /* IRQ, Lower EL using AArch64 */ .align 7 b unhandled_exception /* FIQ, Lower EL using AArch64 */ .align 7 #ifdef CONFIG_ARMV8_EA_EL3_FIRST b handle_error /* SError, Lower EL using AArch64 */ #else b unhandled_exception /* SError, Lower EL using AArch64 */ #endif .align 7 b unhandled_exception /* Sync, Lower EL using AArch32 */ .align 7 b unhandled_exception /* IRQ, Lower EL using AArch32 */ .align 7 b unhandled_exception /* FIQ, Lower EL using AArch32 */ .align 7 b unhandled_exception /* SError, Lower EL using AArch32 */ ENTRY(psci_setup_vectors) adr x0, el3_exception_vectors msr vbar_el3, x0 ret ENDPROC(psci_setup_vectors) ENTRY(psci_arch_init) ret ENDPROC(psci_arch_init) .weak psci_arch_init .popsection
4ms/stm32mp1-baremetal
8,322
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/cpu/armv8/start.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * (C) Copyright 2013 * David Feng <fenghua@phytium.com.cn> */ #include <asm-offsets.h> #include <config.h> #include <linux/linkage.h> #include <asm/macro.h> #include <asm/armv8/mmu.h> /************************************************************************* * * Startup Code (reset vector) * *************************************************************************/ .globl _start _start: #if defined(CONFIG_LINUX_KERNEL_IMAGE_HEADER) #include <asm/boot0-linux-kernel-header.h> #elif defined(CONFIG_ENABLE_ARM_SOC_BOOT0_HOOK) /* * Various SoCs need something special and SoC-specific up front in * order to boot, allow them to set that in their boot0.h file and then * use it here. */ #include <asm/arch/boot0.h> #else b reset #endif .align 3 .globl _TEXT_BASE _TEXT_BASE: .quad CONFIG_SYS_TEXT_BASE /* * These are defined in the linker script. */ .globl _end_ofs _end_ofs: .quad _end - _start .globl _bss_start_ofs _bss_start_ofs: .quad __bss_start - _start .globl _bss_end_ofs _bss_end_ofs: .quad __bss_end - _start reset: /* Allow the board to save important registers */ b save_boot_params .globl save_boot_params_ret save_boot_params_ret: #if CONFIG_POSITION_INDEPENDENT /* * Fix .rela.dyn relocations. This allows U-Boot to be loaded to and * executed at a different address than it was linked at. */ pie_fixup: adr x0, _start /* x0 <- Runtime value of _start */ ldr x1, _TEXT_BASE /* x1 <- Linked value of _start */ sub x9, x0, x1 /* x9 <- Run-vs-link offset */ adr x2, __rel_dyn_start /* x2 <- Runtime &__rel_dyn_start */ adr x3, __rel_dyn_end /* x3 <- Runtime &__rel_dyn_end */ pie_fix_loop: ldp x0, x1, [x2], #16 /* (x0, x1) <- (Link location, fixup) */ ldr x4, [x2], #8 /* x4 <- addend */ cmp w1, #1027 /* relative fixup? */ bne pie_skip_reloc /* relative fix: store addend plus offset at dest location */ add x0, x0, x9 add x4, x4, x9 str x4, [x0] pie_skip_reloc: cmp x2, x3 b.lo pie_fix_loop pie_fixup_done: #endif #ifdef CONFIG_SYS_RESET_SCTRL bl reset_sctrl #endif #if defined(CONFIG_ARMV8_SPL_EXCEPTION_VECTORS) || !defined(CONFIG_SPL_BUILD) .macro set_vbar, regname, reg msr \regname, \reg .endm adr x0, vectors #else .macro set_vbar, regname, reg .endm #endif /* * Could be EL3/EL2/EL1, Initial State: * Little Endian, MMU Disabled, i/dCache Disabled */ switch_el x1, 3f, 2f, 1f 3: set_vbar vbar_el3, x0 mrs x0, scr_el3 orr x0, x0, #0xf /* SCR_EL3.NS|IRQ|FIQ|EA */ msr scr_el3, x0 msr cptr_el3, xzr /* Enable FP/SIMD */ #ifdef COUNTER_FREQUENCY ldr x0, =COUNTER_FREQUENCY msr cntfrq_el0, x0 /* Initialize CNTFRQ */ #endif b 0f 2: set_vbar vbar_el2, x0 mov x0, #0x33ff msr cptr_el2, x0 /* Enable FP/SIMD */ b 0f 1: set_vbar vbar_el1, x0 mov x0, #3 << 20 msr cpacr_el1, x0 /* Enable FP/SIMD */ 0: /* * Enable SMPEN bit for coherency. * This register is not architectural but at the moment * this bit should be set for A53/A57/A72. */ #ifdef CONFIG_ARMV8_SET_SMPEN switch_el x1, 3f, 1f, 1f 3: mrs x0, S3_1_c15_c2_1 /* cpuectlr_el1 */ orr x0, x0, #0x40 msr S3_1_c15_c2_1, x0 1: #endif /* Apply ARM core specific erratas */ bl apply_core_errata /* * Cache/BPB/TLB Invalidate * i-cache is invalidated before enabled in icache_enable() * tlb is invalidated before mmu is enabled in dcache_enable() * d-cache is invalidated before enabled in dcache_enable() */ /* Processor specific initialization */ bl lowlevel_init #if defined(CONFIG_ARMV8_SPIN_TABLE) && !defined(CONFIG_SPL_BUILD) branch_if_master x0, x1, master_cpu b spin_table_secondary_jump /* never return */ #elif defined(CONFIG_ARMV8_MULTIENTRY) branch_if_master x0, x1, master_cpu /* * Slave CPUs */ slave_cpu: wfe ldr x1, =CPU_RELEASE_ADDR ldr x0, [x1] cbz x0, slave_cpu br x0 /* branch to the given address */ #endif /* CONFIG_ARMV8_MULTIENTRY */ master_cpu: bl _main #ifdef CONFIG_SYS_RESET_SCTRL reset_sctrl: switch_el x1, 3f, 2f, 1f 3: mrs x0, sctlr_el3 b 0f 2: mrs x0, sctlr_el2 b 0f 1: mrs x0, sctlr_el1 0: ldr x1, =0xfdfffffa and x0, x0, x1 switch_el x1, 6f, 5f, 4f 6: msr sctlr_el3, x0 b 7f 5: msr sctlr_el2, x0 b 7f 4: msr sctlr_el1, x0 7: dsb sy isb b __asm_invalidate_tlb_all ret #endif /*-----------------------------------------------------------------------*/ WEAK(apply_core_errata) mov x29, lr /* Save LR */ /* For now, we support Cortex-A53, Cortex-A57 specific errata */ /* Check if we are running on a Cortex-A53 core */ branch_if_a53_core x0, apply_a53_core_errata /* Check if we are running on a Cortex-A57 core */ branch_if_a57_core x0, apply_a57_core_errata 0: mov lr, x29 /* Restore LR */ ret apply_a53_core_errata: #ifdef CONFIG_ARM_ERRATA_855873 mrs x0, midr_el1 tst x0, #(0xf << 20) b.ne 0b mrs x0, midr_el1 and x0, x0, #0xf cmp x0, #3 b.lt 0b mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ /* Enable data cache clean as data cache clean/invalidate */ orr x0, x0, #1 << 44 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ #endif b 0b apply_a57_core_errata: #ifdef CONFIG_ARM_ERRATA_828024 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ /* Disable non-allocate hint of w-b-n-a memory type */ orr x0, x0, #1 << 49 /* Disable write streaming no L1-allocate threshold */ orr x0, x0, #3 << 25 /* Disable write streaming no-allocate threshold */ orr x0, x0, #3 << 27 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ #endif #ifdef CONFIG_ARM_ERRATA_826974 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ /* Disable speculative load execution ahead of a DMB */ orr x0, x0, #1 << 59 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ #endif #ifdef CONFIG_ARM_ERRATA_833471 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ /* FPSCR write flush. * Note that in some cases where a flush is unnecessary this could impact performance. */ orr x0, x0, #1 << 38 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ #endif #ifdef CONFIG_ARM_ERRATA_829520 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ /* Disable Indirect Predictor bit will prevent this erratum from occurring * Note that in some cases where a flush is unnecessary this could impact performance. */ orr x0, x0, #1 << 4 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ #endif #ifdef CONFIG_ARM_ERRATA_833069 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ /* Disable Enable Invalidates of BTB bit */ and x0, x0, #0xE msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ #endif b 0b ENDPROC(apply_core_errata) /*-----------------------------------------------------------------------*/ WEAK(lowlevel_init) mov x29, lr /* Save LR */ #if defined(CONFIG_GICV2) || defined(CONFIG_GICV3) branch_if_slave x0, 1f ldr x0, =GICD_BASE bl gic_init_secure 1: #if defined(CONFIG_GICV3) ldr x0, =GICR_BASE bl gic_init_secure_percpu #elif defined(CONFIG_GICV2) ldr x0, =GICD_BASE ldr x1, =GICC_BASE bl gic_init_secure_percpu #endif #endif #ifdef CONFIG_ARMV8_MULTIENTRY branch_if_master x0, x1, 2f /* * Slave should wait for master clearing spin table. * This sync prevent salves observing incorrect * value of spin table and jumping to wrong place. */ #if defined(CONFIG_GICV2) || defined(CONFIG_GICV3) #ifdef CONFIG_GICV2 ldr x0, =GICC_BASE #endif bl gic_wait_for_interrupt #endif /* * All slaves will enter EL2 and optionally EL1. */ adr x4, lowlevel_in_el2 ldr x5, =ES_TO_AARCH64 bl armv8_switch_to_el2 lowlevel_in_el2: #ifdef CONFIG_ARMV8_SWITCH_TO_EL1 adr x4, lowlevel_in_el1 ldr x5, =ES_TO_AARCH64 bl armv8_switch_to_el1 lowlevel_in_el1: #endif #endif /* CONFIG_ARMV8_MULTIENTRY */ 2: mov lr, x29 /* Restore LR */ ret ENDPROC(lowlevel_init) WEAK(smp_kick_all_cpus) /* Kick secondary cpus up by SGI 0 interrupt */ #if defined(CONFIG_GICV2) || defined(CONFIG_GICV3) ldr x0, =GICD_BASE b gic_kick_secondary_cpus #endif ret ENDPROC(smp_kick_all_cpus) /*-----------------------------------------------------------------------*/ ENTRY(c_runtime_cpu_setup) #if defined(CONFIG_ARMV8_SPL_EXCEPTION_VECTORS) || !defined(CONFIG_SPL_BUILD) /* Relocate vBAR */ adr x0, vectors switch_el x1, 3f, 2f, 1f 3: msr vbar_el3, x0 b 0f 2: msr vbar_el2, x0 b 0f 1: msr vbar_el1, x0 0: #endif ret ENDPROC(c_runtime_cpu_setup) WEAK(save_boot_params) b save_boot_params_ret /* back to my caller */ ENDPROC(save_boot_params)
4ms/stm32mp1-baremetal
1,208
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/cpu/armv8/smccc-call.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2015, Linaro Limited */ #include <linux/linkage.h> #include <linux/arm-smccc.h> #include <generated/asm-offsets.h> #ifdef CONFIG_EFI_LOADER .section .text.efi_runtime #endif .macro SMCCC instr .cfi_startproc \instr #0 ldr x4, [sp] stp x0, x1, [x4, #ARM_SMCCC_RES_X0_OFFS] stp x2, x3, [x4, #ARM_SMCCC_RES_X2_OFFS] ldr x4, [sp, #8] cbz x4, 1f /* no quirk structure */ ldr x9, [x4, #ARM_SMCCC_QUIRK_ID_OFFS] cmp x9, #ARM_SMCCC_QUIRK_QCOM_A6 b.ne 1f str x6, [x4, ARM_SMCCC_QUIRK_STATE_OFFS] 1: ret .cfi_endproc .endm /* * void arm_smccc_smc(unsigned long a0, unsigned long a1, unsigned long a2, * unsigned long a3, unsigned long a4, unsigned long a5, * unsigned long a6, unsigned long a7, struct arm_smccc_res *res, * struct arm_smccc_quirk *quirk) */ ENTRY(__arm_smccc_smc) SMCCC smc ENDPROC(__arm_smccc_smc) /* * void arm_smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2, * unsigned long a3, unsigned long a4, unsigned long a5, * unsigned long a6, unsigned long a7, struct arm_smccc_res *res, * struct arm_smccc_quirk *quirk) */ ENTRY(__arm_smccc_hvc) SMCCC hvc ENDPROC(__arm_smccc_hvc)
4ms/stm32mp1-baremetal
1,052
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/cpu/armv8/lowlevel_init.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * A lowlevel_init function that sets up the stack to call a C function to * perform further init. */ #include <asm-offsets.h> #include <config.h> #include <linux/linkage.h> ENTRY(lowlevel_init) /* * Setup a temporary stack. Global data is not available yet. */ #if defined(CONFIG_SPL_BUILD) && defined(CONFIG_SPL_STACK) ldr w0, =CONFIG_SPL_STACK #else ldr w0, =CONFIG_SYS_INIT_SP_ADDR #endif bic sp, x0, #0xf /* 16-byte alignment for ABI compliance */ /* * Save the old LR(passed in x29) and the current LR to stack */ stp x29, x30, [sp, #-16]! /* * Call the very early init function. This should do only the * absolute bare minimum to get started. It should not: * * - set up DRAM * - use global_data * - clear BSS * - try to start a console * * For boards with SPL this should be empty since SPL can do all of * this init in the SPL board_init_f() function which is called * immediately after this. */ bl s_init ldp x29, x30, [sp] ret ENDPROC(lowlevel_init)
4ms/stm32mp1-baremetal
1,430
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/cpu/armv8/sec_firmware_asm.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright 2016 NXP Semiconductor, Inc. */ #include <config.h> #include <linux/linkage.h> #include <asm/system.h> #include <asm/macro.h> WEAK(_sec_firmware_entry) /* * x0: Secure Firmware entry point * x1: Exception return address Low * x2: Exception return address High */ /* Save stack pointer for EL2 */ mov x3, sp msr sp_el2, x3 /* Set exception return address hold pointer */ adr x4, 1f mov x3, x4 #ifdef CONFIG_ARMV8_SEC_FIRMWARE_ERET_ADDR_REVERT rev w3, w3 #endif str w3, [x1] lsr x3, x4, #32 #ifdef CONFIG_ARMV8_SEC_FIRMWARE_ERET_ADDR_REVERT rev w3, w3 #endif str w3, [x2] /* Call SEC monitor */ br x0 1: mov x0, #0 ret ENDPROC(_sec_firmware_entry) #ifdef CONFIG_SEC_FIRMWARE_ARMV8_PSCI ENTRY(_sec_firmware_support_psci_version) mov x0, 0x84000000 mov x1, 0x0 mov x2, 0x0 mov x3, 0x0 smc #0 ret ENDPROC(_sec_firmware_support_psci_version) /* * Switch from AArch64 EL2 to AArch32 EL2 * @param inputs: * x0: argument, zero * x1: machine nr * x2: fdt address * x3: input argument * x4: kernel entry point * @param outputs for secure firmware: * x0: function id * x1: kernel entry point * x2: machine nr * x3: fdt address */ ENTRY(armv8_el2_to_aarch32) mov x3, x2 mov x2, x1 mov x1, x4 ldr x0, =0xc200ff17 smc #0 ret ENDPROC(armv8_el2_to_aarch32) #endif
4ms/stm32mp1-baremetal
2,388
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/cpu/arm920t/start.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * armboot - Startup Code for ARM920 CPU-core * * Copyright (c) 2001 Marius Gröger <mag@sysgo.de> * Copyright (c) 2002 Alex Züpke <azu@sysgo.de> * Copyright (c) 2002 Gary Jennejohn <garyj@denx.de> */ #include <asm-offsets.h> #include <common.h> #include <config.h> /* ************************************************************************* * * Startup Code (called from the ARM reset exception vector) * * do important init only if we don't start from memory! * relocate armboot to ram * setup stack * jump to second stage * ************************************************************************* */ .globl reset reset: /* * set the cpu to SVC32 mode */ mrs r0, cpsr bic r0, r0, #0x1f orr r0, r0, #0xd3 msr cpsr, r0 #if defined(CONFIG_AT91RM9200DK) || defined(CONFIG_AT91RM9200EK) /* * relocate exception table */ ldr r0, =_start ldr r1, =0x0 mov r2, #16 copyex: subs r2, r2, #1 ldr r3, [r0], #4 str r3, [r1], #4 bne copyex #endif /* * we do sys-critical inits only at reboot, * not when booting from ram! */ #ifndef CONFIG_SKIP_LOWLEVEL_INIT bl cpu_init_crit #endif bl _main /*------------------------------------------------------------------------------*/ .globl c_runtime_cpu_setup c_runtime_cpu_setup: mov pc, lr /* ************************************************************************* * * CPU_init_critical registers * * setup important registers * setup memory timing * ************************************************************************* */ #ifndef CONFIG_SKIP_LOWLEVEL_INIT cpu_init_crit: /* * flush v4 I/D caches */ mov r0, #0 mcr p15, 0, r0, c7, c7, 0 /* flush v3/v4 cache */ mcr p15, 0, r0, c8, c7, 0 /* flush v4 TLB */ /* * disable MMU stuff and caches */ mrc p15, 0, r0, c1, c0, 0 bic r0, r0, #0x00002300 @ clear bits 13, 9:8 (--V- --RS) bic r0, r0, #0x00000087 @ clear bits 7, 2:0 (B--- -CAM) orr r0, r0, #0x00000002 @ set bit 1 (A) Align orr r0, r0, #0x00001000 @ set bit 12 (I) I-Cache mcr p15, 0, r0, c1, c0, 0 #ifndef CONFIG_SKIP_LOWLEVEL_INIT_ONLY /* * before relocating, we have to setup RAM timing * because memory timing is board-dependend, you will * find a lowlevel_init.S in your board directory. */ mov ip, lr bl lowlevel_init mov lr, ip #endif mov pc, lr #endif /* CONFIG_SKIP_LOWLEVEL_INIT */
4ms/stm32mp1-baremetal
4,898
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/cpu/pxa/start.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * armboot - Startup Code for XScale CPU-core * * Copyright (C) 1998 Dan Malek <dmalek@jlc.net> * Copyright (C) 1999 Magnus Damm <kieraypc01.p.y.kie.era.ericsson.se> * Copyright (C) 2000 Wolfgang Denk <wd@denx.de> * Copyright (C) 2001 Alex Zuepke <azu@sysgo.de> * Copyright (C) 2001 Marius Groger <mag@sysgo.de> * Copyright (C) 2002 Alex Zupke <azu@sysgo.de> * Copyright (C) 2002 Gary Jennejohn <garyj@denx.de> * Copyright (C) 2002 Kyle Harris <kharris@nexus-tech.net> * Copyright (C) 2003 Kai-Uwe Bloem <kai-uwe.bloem@auerswald.de> * Copyright (C) 2003 Kshitij <kshitij@ti.com> * Copyright (C) 2003 Richard Woodruff <r-woodruff2@ti.com> * Copyright (C) 2003 Robert Schwebel <r.schwebel@pengutronix.de> * Copyright (C) 2004 Texas Instruments <r-woodruff2@ti.com> * Copyright (C) 2010 Marek Vasut <marek.vasut@gmail.com> */ #include <asm-offsets.h> #include <config.h> /* ************************************************************************* * * Startup Code (reset vector) * * do important init only if we don't start from memory! * setup Memory and board specific bits prior to relocation. * relocate armboot to ram * setup stack * ************************************************************************* */ .globl reset reset: /* * set the cpu to SVC32 mode */ mrs r0,cpsr bic r0,r0,#0x1f orr r0,r0,#0xd3 msr cpsr,r0 #ifndef CONFIG_SKIP_LOWLEVEL_INIT bl cpu_init_crit #endif #ifdef CONFIG_CPU_PXA25X bl lock_cache_for_stack #endif #ifdef CONFIG_CPU_PXA27X /* * enable clock for SRAM */ ldr r0,=CKEN ldr r1,[r0] orr r1,r1,#(1 << 20) str r1,[r0] #endif bl _main /*------------------------------------------------------------------------------*/ .globl c_runtime_cpu_setup c_runtime_cpu_setup: #ifdef CONFIG_CPU_PXA25X /* * Unlock (actually, disable) the cache now that board_init_f * is done. We could do this earlier but we would need to add * a new C runtime hook, whereas c_runtime_cpu_setup already * exists. * As this routine is just a call to cpu_init_crit, let us * tail-optimize and do a simple branch here. */ b cpu_init_crit #else bx lr #endif /* ************************************************************************* * * CPU_init_critical registers * * setup important registers * setup memory timing * ************************************************************************* */ #if !defined(CONFIG_SKIP_LOWLEVEL_INIT) || defined(CONFIG_CPU_PXA25X) cpu_init_crit: /* * flush v4 I/D caches */ mov r0, #0 mcr p15, 0, r0, c7, c7, 0 /* Invalidate I+D+BTB caches */ mcr p15, 0, r0, c8, c7, 0 /* Invalidate Unified TLB */ /* * disable MMU stuff and caches */ mrc p15, 0, r0, c1, c0, 0 bic r0, r0, #0x00003300 @ clear bits 13:12, 9:8 (--VI --RS) bic r0, r0, #0x00000087 @ clear bits 7, 2:0 (B--- -CAM) orr r0, r0, #0x00000002 @ set bit 1 (A) Align mcr p15, 0, r0, c1, c0, 0 mov pc, lr /* back to my caller */ #endif /* !CONFIG_SKIP_LOWLEVEL_INIT || CONFIG_CPU_PXA25X */ /* * Enable MMU to use DCache as DRAM. * * This is useful on PXA25x and PXA26x in early bootstages, where there is no * other possible memory available to hold stack. */ #ifdef CONFIG_CPU_PXA25X .macro CPWAIT reg mrc p15, 0, \reg, c2, c0, 0 mov \reg, \reg sub pc, pc, #4 .endm lock_cache_for_stack: /* Domain access -- enable for all CPs */ ldr r0, =0x0000ffff mcr p15, 0, r0, c3, c0, 0 /* Point TTBR to MMU table */ ldr r0, =mmutable mcr p15, 0, r0, c2, c0, 0 /* Kick in MMU, ICache, DCache, BTB */ mrc p15, 0, r0, c1, c0, 0 bic r0, #0x1b00 bic r0, #0x0087 orr r0, #0x1800 orr r0, #0x0005 mcr p15, 0, r0, c1, c0, 0 CPWAIT r0 /* Unlock Icache, Dcache */ mcr p15, 0, r0, c9, c1, 1 mcr p15, 0, r0, c9, c2, 1 /* Flush Icache, Dcache, BTB */ mcr p15, 0, r0, c7, c7, 0 /* Unlock I-TLB, D-TLB */ mcr p15, 0, r0, c10, c4, 1 mcr p15, 0, r0, c10, c8, 1 /* Flush TLB */ mcr p15, 0, r0, c8, c7, 0 /* Allocate 4096 bytes of Dcache as RAM */ /* Drain pending loads and stores */ mcr p15, 0, r0, c7, c10, 4 mov r4, #0x00 mov r5, #0x00 mov r2, #0x01 mcr p15, 0, r0, c9, c2, 0 CPWAIT r0 /* 128 lines reserved (128 x 32bytes = 4096 bytes total) */ mov r0, #128 ldr r1, =0xfffff000 alloc: mcr p15, 0, r1, c7, c2, 5 /* Drain pending loads and stores */ mcr p15, 0, r0, c7, c10, 4 strd r4, [r1], #8 strd r4, [r1], #8 strd r4, [r1], #8 strd r4, [r1], #8 subs r0, #0x01 bne alloc /* Drain pending loads and stores */ mcr p15, 0, r0, c7, c10, 4 mov r2, #0x00 mcr p15, 0, r2, c9, c2, 0 CPWAIT r0 mov pc, lr .section .mmutable, "a" mmutable: .align 14 /* 0x00000000 - 0xffe00000 : 1:1, uncached mapping */ .set __base, 0 .rept 0xfff .word (__base << 20) | 0xc12 .set __base, __base + 1 .endr /* 0xfff00000 : 1:1, cached mapping */ .word (0xfff << 20) | 0x1c1e #endif /* CONFIG_CPU_PXA25X */
4ms/stm32mp1-baremetal
5,253
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/cpu/armv7/ls102xa/psci.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright 2015 Freescale Semiconductor, Inc. * Author: Wang Dongsheng <dongsheng.wang@freescale.com> */ #include <config.h> #include <linux/linkage.h> #include <asm/armv7.h> #include <asm/arch-armv7/generictimer.h> #include <asm/psci.h> #define RCPM_TWAITSR 0x04C #define SCFG_CORE0_SFT_RST 0x130 #define SCFG_CORESRENCR 0x204 #define DCFG_CCSR_RSTCR 0x0B0 #define DCFG_CCSR_RSTCR_RESET_REQ 0x2 #define DCFG_CCSR_BRR 0x0E4 #define DCFG_CCSR_SCRATCHRW1 0x200 #define PSCI_FN_PSCI_VERSION_FEATURE_MASK 0x0 #define PSCI_FN_CPU_SUSPEND_FEATURE_MASK 0x0 #define PSCI_FN_CPU_OFF_FEATURE_MASK 0x0 #define PSCI_FN_CPU_ON_FEATURE_MASK 0x0 #define PSCI_FN_AFFINITY_INFO_FEATURE_MASK 0x0 #define PSCI_FN_SYSTEM_OFF_FEATURE_MASK 0x0 #define PSCI_FN_SYSTEM_RESET_FEATURE_MASK 0x0 #define PSCI_FN_SYSTEM_SUSPEND_FEATURE_MASK 0x0 .pushsection ._secure.text, "ax" .arch_extension sec .align 5 #define ONE_MS (COUNTER_FREQUENCY / 1000) #define RESET_WAIT (30 * ONE_MS) .globl psci_version psci_version: movw r0, #0 movt r0, #1 bx lr _ls102x_psci_supported_table: .word ARM_PSCI_0_2_FN_PSCI_VERSION .word PSCI_FN_PSCI_VERSION_FEATURE_MASK .word ARM_PSCI_0_2_FN_CPU_SUSPEND .word PSCI_FN_CPU_SUSPEND_FEATURE_MASK .word ARM_PSCI_0_2_FN_CPU_OFF .word PSCI_FN_CPU_OFF_FEATURE_MASK .word ARM_PSCI_0_2_FN_CPU_ON .word PSCI_FN_CPU_ON_FEATURE_MASK .word ARM_PSCI_0_2_FN_AFFINITY_INFO .word PSCI_FN_AFFINITY_INFO_FEATURE_MASK .word ARM_PSCI_0_2_FN_SYSTEM_OFF .word PSCI_FN_SYSTEM_OFF_FEATURE_MASK .word ARM_PSCI_0_2_FN_SYSTEM_RESET .word PSCI_FN_SYSTEM_RESET_FEATURE_MASK .word ARM_PSCI_1_0_FN_SYSTEM_SUSPEND .word PSCI_FN_SYSTEM_SUSPEND_FEATURE_MASK .word 0 .word ARM_PSCI_RET_NI .globl psci_features psci_features: adr r2, _ls102x_psci_supported_table 1: ldr r3, [r2] cmp r3, #0 beq out_psci_features cmp r1, r3 addne r2, r2, #8 bne 1b out_psci_features: ldr r0, [r2, #4] bx lr @ r0: return value ARM_PSCI_RET_SUCCESS or ARM_PSCI_RET_INVAL @ r1: input target CPU ID in MPIDR format, original value in r1 may be dropped @ r4: output validated CPU ID if ARM_PSCI_RET_SUCCESS returns, meaningless for @ ARM_PSCI_RET_INVAL,suppose caller saves r4 before calling LENTRY(psci_check_target_cpu_id) @ Get the real CPU number and r4, r1, #0xff mov r0, #ARM_PSCI_RET_INVAL @ Bit[31:24], bits must be zero. tst r1, #0xff000000 bxne lr @ Affinity level 2 - Cluster: only one cluster in LS1021xa. tst r1, #0xff0000 bxne lr @ Affinity level 1 - Processors: should be in 0xf00 format. lsr r1, r1, #8 teq r1, #0xf bxne lr @ Affinity level 0 - CPU: only 0, 1 are valid in LS1021xa. cmp r4, #2 bxge lr mov r0, #ARM_PSCI_RET_SUCCESS bx lr ENDPROC(psci_check_target_cpu_id) @ r1 = target CPU @ r2 = target PC .globl psci_cpu_on psci_cpu_on: push {r4, r5, r6, lr} @ Clear and Get the correct CPU number @ r1 = 0xf01 bl psci_check_target_cpu_id cmp r0, #ARM_PSCI_RET_INVAL beq out_psci_cpu_on mov r0, r4 mov r1, r2 mov r2, r3 bl psci_save mov r1, r4 @ Get DCFG base address movw r4, #(CONFIG_SYS_FSL_GUTS_ADDR & 0xffff) movt r4, #(CONFIG_SYS_FSL_GUTS_ADDR >> 16) @ Detect target CPU state ldr r2, [r4, #DCFG_CCSR_BRR] rev r2, r2 lsr r2, r2, r1 ands r2, r2, #1 beq holdoff_release @ Reset target CPU @ Get SCFG base address movw r0, #(CONFIG_SYS_FSL_SCFG_ADDR & 0xffff) movt r0, #(CONFIG_SYS_FSL_SCFG_ADDR >> 16) @ Enable CORE Soft Reset movw r5, #0 movt r5, #(1 << 15) rev r5, r5 str r5, [r0, #SCFG_CORESRENCR] @ Get CPUx offset register mov r6, #0x4 mul r6, r6, r1 add r2, r0, r6 @ Do reset on target CPU movw r5, #0 movt r5, #(1 << 15) rev r5, r5 str r5, [r2, #SCFG_CORE0_SFT_RST] @ Wait target CPU up timer_wait r2, RESET_WAIT @ Disable CORE soft reset mov r5, #0 str r5, [r0, #SCFG_CORESRENCR] holdoff_release: @ Release on target CPU ldr r2, [r4, #DCFG_CCSR_BRR] mov r6, #1 lsl r6, r6, r1 @ 32 bytes per CPU rev r6, r6 orr r2, r2, r6 str r2, [r4, #DCFG_CCSR_BRR] @ Set secondary boot entry ldr r6, =psci_cpu_entry rev r6, r6 str r6, [r4, #DCFG_CCSR_SCRATCHRW1] isb dsb @ Return mov r0, #ARM_PSCI_RET_SUCCESS out_psci_cpu_on: pop {r4, r5, r6, lr} bx lr .globl psci_cpu_off psci_cpu_off: bl psci_cpu_off_common 1: wfi b 1b .globl psci_affinity_info psci_affinity_info: push {lr} mov r0, #ARM_PSCI_RET_INVAL @ Verify Affinity level cmp r2, #0 bne out_affinity_info bl psci_check_target_cpu_id cmp r0, #ARM_PSCI_RET_INVAL beq out_affinity_info mov r1, r4 @ Get RCPM base address movw r4, #(CONFIG_SYS_FSL_RCPM_ADDR & 0xffff) movt r4, #(CONFIG_SYS_FSL_RCPM_ADDR >> 16) mov r0, #PSCI_AFFINITY_LEVEL_ON @ Detect target CPU state ldr r2, [r4, #RCPM_TWAITSR] rev r2, r2 lsr r2, r2, r1 ands r2, r2, #1 beq out_affinity_info mov r0, #PSCI_AFFINITY_LEVEL_OFF out_affinity_info: pop {pc} .globl psci_system_reset psci_system_reset: @ Get DCFG base address movw r1, #(CONFIG_SYS_FSL_GUTS_ADDR & 0xffff) movt r1, #(CONFIG_SYS_FSL_GUTS_ADDR >> 16) mov r2, #DCFG_CCSR_RSTCR_RESET_REQ rev r2, r2 str r2, [r1, #DCFG_CCSR_RSTCR] 1: wfi b 1b .globl psci_system_suspend psci_system_suspend: push {lr} bl ls1_system_suspend pop {pc} .popsection
4ms/stm32mp1-baremetal
2,810
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/cpu/arm926ejs/mxs/start.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * armboot - Startup Code for ARM926EJS CPU-core * * Copyright (c) 2003 Texas Instruments * * ----- Adapted for OMAP1610 OMAP730 from ARM925t code ------ * * Copyright (c) 2001 Marius Groger <mag@sysgo.de> * Copyright (c) 2002 Alex Zupke <azu@sysgo.de> * Copyright (c) 2002 Gary Jennejohn <garyj@denx.de> * Copyright (c) 2003 Richard Woodruff <r-woodruff2@ti.com> * Copyright (c) 2003 Kshitij <kshitij@ti.com> * Copyright (c) 2010 Albert Aribaud <albert.u.boot@aribaud.net> * * Change to support call back into iMX28 bootrom * Copyright (c) 2011 Marek Vasut <marek.vasut@gmail.com> * on behalf of DENX Software Engineering GmbH */ #include <asm-offsets.h> #include <config.h> #include <common.h> /* ************************************************************************* * * Startup Code (reset vector) * * do important init only if we don't start from memory! * setup Memory and board specific bits prior to relocation. * relocate armboot to ram * setup stack * ************************************************************************* */ .globl reset reset: /* * If the CPU is configured in "Wait JTAG connection mode", the stack * pointer is not configured and is zero. This will cause crash when * trying to push data onto stack right below here. Load the SP and make * it point to the end of OCRAM if the SP is zero. */ cmp sp, #0x00000000 ldreq sp, =CONFIG_SYS_INIT_SP_ADDR /* * Store all registers on old stack pointer, this will allow us later to * return to the BootROM and let the BootROM load U-Boot into RAM. * * WARNING: Register r0 and r1 are used by the BootROM to pass data * to the called code. Register r0 will contain arbitrary * data that are set in the BootStream. In case this code * was started with CALL instruction, register r1 will contain * pointer to the return value this function can then set. * The code below MUST NOT CHANGE register r0 and r1 ! */ push {r0-r12,r14} /* Save control register c1 */ mrc p15, 0, r2, c1, c0, 0 push {r2} /* Set the cpu to SVC32 mode and store old CPSR register content. */ mrs r2, cpsr push {r2} bic r2, r2, #0x1f orr r2, r2, #0xd3 msr cpsr, r2 bl board_init_ll /* Restore BootROM's CPU mode (especially FIQ). */ pop {r2} msr cpsr,r2 /* * Restore c1 register. Especially set exception vector location * back to BootROM space which is required by bootrom for USB boot. */ pop {r2} mcr p15, 0, r2, c1, c0, 0 pop {r0-r12,r14} /* * In case this code was started by the CALL instruction, the register * r0 is examined by the BootROM after this code returns. The value in * r0 must be set to 0 to indicate successful return. */ mov r0, #0 bx lr
4ms/stm32mp1-baremetal
1,694
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/cpu/arm926ejs/spear/start.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * armboot - Startup Code for ARM926EJS CPU-core * * Copyright (c) 2003 Texas Instruments * * ----- Adapted for OMAP1610 OMAP730 from ARM925t code ------ * * Copyright (c) 2001 Marius Gröger <mag@sysgo.de> * Copyright (c) 2002 Alex Züpke <azu@sysgo.de> * Copyright (c) 2002 Gary Jennejohn <garyj@denx.de> * Copyright (c) 2003 Richard Woodruff <r-woodruff2@ti.com> * Copyright (c) 2003 Kshitij <kshitij@ti.com> */ #include <config.h> /* ************************************************************************* * * Startup Code (reset vector) * * The BootROM already initialized its own stack in the [0-0xb00] reserved * range of the SRAM. The SPL (in _main) will update the stack pointer to * its own SRAM area (right before the gd section). * ************************************************************************* */ .globl reset .globl back_to_bootrom reset: /* * SPL has to return back to BootROM in a few cases (eg. Ethernet boot, * UART boot, USB boot): save registers in BootROM's stack and then the * BootROM's stack pointer in the SPL's data section. */ push {r0-r12,lr} ldr r0, =bootrom_stash_sp str sp, [r0] /* * Flush v4 I/D caches */ mov r0, #0 mcr p15, 0, r0, c7, c7, 0 /* Flush v3/v4 cache */ mcr p15, 0, r0, c8, c7, 0 /* Flush v4 TLB */ /* * Enable instruction cache */ mrc p15, 0, r0, c1, c0, 0 orr r0, r0, #0x00001000 /* set bit 12 (I) I-Cache */ mcr p15, 0, r0, c1, c0, 0 /* * Go setup Memory and board specific bits prior to relocation. * This call is not supposed to return. */ b _main /* _main will call board_init_f */ back_to_bootrom: pop {r0-r12,pc}
4ms/stm32mp1-baremetal
2,744
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/cpu/arm926ejs/spear/spr_lowlevel_init.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * (C) Copyright 2006 * Vipin Kumar, ST Micoelectronics, vipin.kumar@st.com. */ #include <config.h> /* * platform specific initializations are already done in Xloader * Initializations already done include * DDR, PLLs, IP's clock enable and reset release etc */ .globl lowlevel_init lowlevel_init: mov pc, lr /* void setfreq(unsigned int device, unsigned int frequency) */ .global setfreq setfreq: stmfd sp!,{r14} stmfd sp!,{r0-r12} mov r8,sp ldr sp,SRAM_STACK_V /* Saving the function arguements for later use */ mov r4,r0 mov r5,r1 /* Putting DDR into self refresh */ ldr r0,DDR_07_V ldr r1,[r0] ldr r2,DDR_ACTIVE_V bic r1, r1, r2 str r1,[r0] ldr r0,DDR_57_V ldr r1,[r0] ldr r2,CYCLES_MASK_V bic r1, r1, r2 ldr r2,REFRESH_CYCLES_V orr r1, r1, r2, lsl #16 str r1,[r0] ldr r0,DDR_07_V ldr r1,[r0] ldr r2,SREFRESH_MASK_V orr r1, r1, r2 str r1,[r0] /* flush pipeline */ b flush .align 5 flush: /* Delay to ensure self refresh mode */ ldr r0,SREFRESH_DELAY_V delay: sub r0,r0,#1 cmp r0,#0 bne delay /* Putting system in slow mode */ ldr r0,SCCTRL_V mov r1,#2 str r1,[r0] /* Changing PLL(1/2) frequency */ mov r0,r4 mov r1,r5 cmp r4,#0 beq pll1_freq /* Change PLL2 (DDR frequency) */ ldr r6,PLL2_FREQ_V ldr r7,PLL2_CNTL_V b pll2_freq pll1_freq: /* Change PLL1 (CPU frequency) */ ldr r6,PLL1_FREQ_V ldr r7,PLL1_CNTL_V pll2_freq: mov r0,r6 ldr r1,[r0] ldr r2,PLLFREQ_MASK_V bic r1,r1,r2 mov r2,r5,lsr#1 orr r1,r1,r2,lsl#24 str r1,[r0] mov r0,r7 ldr r1,P1C0A_V str r1,[r0] ldr r1,P1C0E_V str r1,[r0] ldr r1,P1C06_V str r1,[r0] ldr r1,P1C0E_V str r1,[r0] lock: ldr r1,[r0] and r1,r1,#1 cmp r1,#0 beq lock /* Putting system back to normal mode */ ldr r0,SCCTRL_V mov r1,#4 str r1,[r0] /* Putting DDR back to normal */ ldr r0,DDR_07_V ldr r1,[R0] ldr r2,SREFRESH_MASK_V bic r1, r1, r2 str r1,[r0] ldr r2,DDR_ACTIVE_V orr r1, r1, r2 str r1,[r0] /* Delay to ensure self refresh mode */ ldr r0,SREFRESH_DELAY_V 1: sub r0,r0,#1 cmp r0,#0 bne 1b mov sp,r8 /* Resuming back to code */ ldmia sp!,{r0-r12} ldmia sp!,{pc} SCCTRL_V: .word 0xfca00000 PLL1_FREQ_V: .word 0xfca8000C PLL1_CNTL_V: .word 0xfca80008 PLL2_FREQ_V: .word 0xfca80018 PLL2_CNTL_V: .word 0xfca80014 PLLFREQ_MASK_V: .word 0xff000000 P1C0A_V: .word 0x1C0A P1C0E_V: .word 0x1C0E P1C06_V: .word 0x1C06 SREFRESH_DELAY_V: .word 0x9999 SRAM_STACK_V: .word 0xD2800600 DDR_07_V: .word 0xfc60001c DDR_ACTIVE_V: .word 0x01000000 DDR_57_V: .word 0xfc6000e4 CYCLES_MASK_V: .word 0xffff0000 REFRESH_CYCLES_V: .word 0xf0f0 SREFRESH_MASK_V: .word 0x00010000 .global setfreq_sz setfreq_sz: .word setfreq_sz - setfreq
4ms/stm32mp1-baremetal
1,892
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/cpu/arm926ejs/mx27/relocate.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * relocate - i.MX27-specific vector relocation * * Copyright (c) 2013 Albert ARIBAUD <albert.u.boot@aribaud.net> */ #include <asm-offsets.h> #include <config.h> #include <linux/linkage.h> /* * The i.MX27 SoC is very specific with respect to exceptions: it * does not provide RAM at the high vectors address (0xFFFF0000), * thus only the low address (0x00000000) is useable; but that is * in ROM. Therefore, vectors cannot be changed at all. * * However, these ROM-based vectors actually just perform indirect * calls through pointers located in RAM at SoC-specific addresses, * as follows: * * Offset Exception Use by ROM code * 0x00000000 reset indirect branch to [0x00000014] * 0x00000004 undefined instruction indirect branch to [0xfffffef0] * 0x00000008 software interrupt indirect branch to [0xfffffef4] * 0x0000000c prefetch abort indirect branch to [0xfffffef8] * 0x00000010 data abort indirect branch to [0xfffffefc] * 0x00000014 (reserved in ARMv5) vector to ROM reset: 0xc0000000 * 0x00000018 IRQ indirect branch to [0xffffff00] * 0x0000001c FIQ indirect branch to [0xffffff04] * * In order to initialize exceptions on i.MX27, we must copy U-Boot's * indirect (not exception!) vector table into 0xfffffef0..0xffffff04 * taking care not to copy vectors number 5 (reserved exception). */ .section .text.relocate_vectors,"ax",%progbits ENTRY(relocate_vectors) ldr r0, [r9, #GD_RELOCADDR] /* r0 = gd->relocaddr */ ldr r1, =32 /* size of vector table */ add r0, r0, r1 /* skip to indirect table */ ldr r1, =0xFFFFFEF0 /* i.MX27 indirect table */ ldmia r0!, {r2-r8} /* load indirect vectors 1..7 */ stmia r1!, {r2-r5, r7,r8} /* write all but vector 5 */ bx lr ENDPROC(relocate_vectors)
4ms/stm32mp1-baremetal
12,413
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * (C) Copyright 2014-2015 Freescale Semiconductor * Copyright 2019 NXP * * Extracted from armv8/start.S */ #include <config.h> #include <linux/linkage.h> #include <asm/gic.h> #include <asm/macro.h> #include <asm/arch-fsl-layerscape/soc.h> #ifdef CONFIG_MP #include <asm/arch/mp.h> #endif #ifdef CONFIG_FSL_LSCH3 #include <asm/arch-fsl-layerscape/immap_lsch3.h> #endif #include <asm/u-boot.h> /* Get GIC offset * For LS1043a rev1.0, GIC base address align with 4k. * For LS1043a rev1.1, if DCFG_GIC400_ALIGN[GIC_ADDR_BIT] * is set, GIC base address align with 4K, or else align * with 64k. * output: * x0: the base address of GICD * x1: the base address of GICC */ ENTRY(get_gic_offset) ldr x0, =GICD_BASE #ifdef CONFIG_GICV2 ldr x1, =GICC_BASE #endif #ifdef CONFIG_HAS_FEATURE_GIC64K_ALIGN ldr x2, =DCFG_CCSR_SVR ldr w2, [x2] rev w2, w2 lsr w3, w2, #16 ldr w4, =SVR_DEV(SVR_LS1043A) cmp w3, w4 b.ne 1f ands w2, w2, #0xff cmp w2, #REV1_0 b.eq 1f ldr x2, =SCFG_GIC400_ALIGN ldr w2, [x2] rev w2, w2 tbnz w2, #GIC_ADDR_BIT, 1f ldr x0, =GICD_BASE_64K #ifdef CONFIG_GICV2 ldr x1, =GICC_BASE_64K #endif 1: #endif ret ENDPROC(get_gic_offset) ENTRY(smp_kick_all_cpus) /* Kick secondary cpus up by SGI 0 interrupt */ #if defined(CONFIG_GICV2) || defined(CONFIG_GICV3) mov x29, lr /* Save LR */ bl get_gic_offset bl gic_kick_secondary_cpus mov lr, x29 /* Restore LR */ #endif ret ENDPROC(smp_kick_all_cpus) ENTRY(lowlevel_init) mov x29, lr /* Save LR */ /* unmask SError and abort */ msr daifclr, #4 /* Set HCR_EL2[AMO] so SError @EL2 is taken */ mrs x0, hcr_el2 orr x0, x0, #0x20 /* AMO */ msr hcr_el2, x0 isb switch_el x1, 1f, 100f, 100f /* skip if not in EL3 */ 1: #if defined (CONFIG_SYS_FSL_HAS_CCN504) /* Set Wuo bit for RN-I 20 */ #ifdef CONFIG_ARCH_LS2080A ldr x0, =CCI_AUX_CONTROL_BASE(20) ldr x1, =0x00000010 bl ccn504_set_aux /* * Set forced-order mode in RNI-6, RNI-20 * This is required for performance optimization on LS2088A * LS2080A family does not support setting forced-order mode, * so skip this operation for LS2080A family */ bl get_svr lsr w0, w0, #16 ldr w1, =SVR_DEV(SVR_LS2080A) cmp w0, w1 b.eq 1f ldr x0, =CCI_AUX_CONTROL_BASE(6) ldr x1, =0x00000020 bl ccn504_set_aux ldr x0, =CCI_AUX_CONTROL_BASE(20) ldr x1, =0x00000020 bl ccn504_set_aux 1: #endif /* Add fully-coherent masters to DVM domain */ ldr x0, =CCI_MN_BASE ldr x1, =CCI_MN_RNF_NODEID_LIST ldr x2, =CCI_MN_DVM_DOMAIN_CTL_SET bl ccn504_add_masters_to_dvm /* Set all RN-I ports to QoS of 15 */ ldr x0, =CCI_S0_QOS_CONTROL_BASE(0) ldr x1, =0x00FF000C bl ccn504_set_qos ldr x0, =CCI_S1_QOS_CONTROL_BASE(0) ldr x1, =0x00FF000C bl ccn504_set_qos ldr x0, =CCI_S2_QOS_CONTROL_BASE(0) ldr x1, =0x00FF000C bl ccn504_set_qos ldr x0, =CCI_S0_QOS_CONTROL_BASE(2) ldr x1, =0x00FF000C bl ccn504_set_qos ldr x0, =CCI_S1_QOS_CONTROL_BASE(2) ldr x1, =0x00FF000C bl ccn504_set_qos ldr x0, =CCI_S2_QOS_CONTROL_BASE(2) ldr x1, =0x00FF000C bl ccn504_set_qos ldr x0, =CCI_S0_QOS_CONTROL_BASE(6) ldr x1, =0x00FF000C bl ccn504_set_qos ldr x0, =CCI_S1_QOS_CONTROL_BASE(6) ldr x1, =0x00FF000C bl ccn504_set_qos ldr x0, =CCI_S2_QOS_CONTROL_BASE(6) ldr x1, =0x00FF000C bl ccn504_set_qos ldr x0, =CCI_S0_QOS_CONTROL_BASE(12) ldr x1, =0x00FF000C bl ccn504_set_qos ldr x0, =CCI_S1_QOS_CONTROL_BASE(12) ldr x1, =0x00FF000C bl ccn504_set_qos ldr x0, =CCI_S2_QOS_CONTROL_BASE(12) ldr x1, =0x00FF000C bl ccn504_set_qos ldr x0, =CCI_S0_QOS_CONTROL_BASE(16) ldr x1, =0x00FF000C bl ccn504_set_qos ldr x0, =CCI_S1_QOS_CONTROL_BASE(16) ldr x1, =0x00FF000C bl ccn504_set_qos ldr x0, =CCI_S2_QOS_CONTROL_BASE(16) ldr x1, =0x00FF000C bl ccn504_set_qos ldr x0, =CCI_S0_QOS_CONTROL_BASE(20) ldr x1, =0x00FF000C bl ccn504_set_qos ldr x0, =CCI_S1_QOS_CONTROL_BASE(20) ldr x1, =0x00FF000C bl ccn504_set_qos ldr x0, =CCI_S2_QOS_CONTROL_BASE(20) ldr x1, =0x00FF000C bl ccn504_set_qos #endif /* CONFIG_SYS_FSL_HAS_CCN504 */ #ifdef SMMU_BASE /* Set the SMMU page size in the sACR register */ ldr x1, =SMMU_BASE ldr w0, [x1, #0x10] orr w0, w0, #1 << 16 /* set sACR.pagesize to indicate 64K page */ str w0, [x1, #0x10] #endif /* Initialize GIC Secure Bank Status */ #if defined(CONFIG_GICV2) || defined(CONFIG_GICV3) branch_if_slave x0, 1f bl get_gic_offset bl gic_init_secure 1: #ifdef CONFIG_GICV3 ldr x0, =GICR_BASE bl gic_init_secure_percpu #elif defined(CONFIG_GICV2) bl get_gic_offset bl gic_init_secure_percpu #endif #endif 100: branch_if_master x0, x1, 2f #if defined(CONFIG_MP) && defined(CONFIG_ARMV8_MULTIENTRY) ldr x0, =secondary_boot_func blr x0 #endif 2: switch_el x1, 1f, 100f, 100f /* skip if not in EL3 */ 1: #ifdef CONFIG_FSL_TZPC_BP147 /* Set Non Secure access for all devices protected via TZPC */ ldr x1, =TZPCDECPROT_0_SET_BASE /* Decode Protection-0 Set Reg */ orr w0, w0, #1 << 3 /* DCFG_RESET is accessible from NS world */ str w0, [x1] isb dsb sy #endif #ifdef CONFIG_FSL_TZASC_400 /* * LS2080 and its personalities does not support TZASC * So skip TZASC related operations */ bl get_svr lsr w0, w0, #16 ldr w1, =SVR_DEV(SVR_LS2080A) cmp w0, w1 b.eq 1f /* Set TZASC so that: * a. We use only Region0 whose global secure write/read is EN * b. We use only Region0 whose NSAID write/read is EN * * NOTE: As per the CCSR map doc, TZASC 3 and TZASC 4 are just * placeholders. */ .macro tzasc_prog, xreg mov x12, TZASC1_BASE mov x16, #0x10000 mul x14, \xreg, x16 add x14, x14,x12 mov x1, #0x8 add x1, x1, x14 ldr w0, [x1] /* Filter 0 Gate Keeper Register */ orr w0, w0, #1 << 0 /* Set open_request for Filter 0 */ str w0, [x1] mov x1, #0x110 add x1, x1, x14 ldr w0, [x1] /* Region-0 Attributes Register */ orr w0, w0, #1 << 31 /* Set Sec global write en, Bit[31] */ orr w0, w0, #1 << 30 /* Set Sec global read en, Bit[30] */ str w0, [x1] mov x1, #0x114 add x1, x1, x14 ldr w0, [x1] /* Region-0 Access Register */ mov w0, #0xFFFFFFFF /* Set nsaid_wr_en and nsaid_rd_en */ str w0, [x1] .endm #ifdef CONFIG_FSL_TZASC_1 mov x13, #0 tzasc_prog x13 #endif #ifdef CONFIG_FSL_TZASC_2 mov x13, #1 tzasc_prog x13 #endif isb dsb sy #endif 100: 1: #ifdef CONFIG_ARCH_LS1046A switch_el x1, 1f, 100f, 100f /* skip if not in EL3 */ 1: /* Initialize the L2 RAM latency */ mrs x1, S3_1_c11_c0_2 mov x0, #0x1C7 /* Clear L2 Tag RAM latency and L2 Data RAM latency */ bic x1, x1, x0 /* Set L2 data ram latency bits [2:0] */ orr x1, x1, #0x2 /* set L2 tag ram latency bits [8:6] */ orr x1, x1, #0x80 msr S3_1_c11_c0_2, x1 isb 100: #endif #if !defined(CONFIG_TFABOOT) && \ (defined(CONFIG_FSL_LSCH2) && !defined(CONFIG_SPL_BUILD)) bl fsl_ocram_init #endif mov lr, x29 /* Restore LR */ ret ENDPROC(lowlevel_init) #if defined(CONFIG_FSL_LSCH2) && !defined(CONFIG_SPL_BUILD) ENTRY(fsl_ocram_init) mov x28, lr /* Save LR */ bl fsl_clear_ocram bl fsl_ocram_clear_ecc_err mov lr, x28 /* Restore LR */ ret ENDPROC(fsl_ocram_init) ENTRY(fsl_clear_ocram) /* Clear OCRAM */ ldr x0, =CONFIG_SYS_FSL_OCRAM_BASE ldr x1, =(CONFIG_SYS_FSL_OCRAM_BASE + CONFIG_SYS_FSL_OCRAM_SIZE) mov x2, #0 clear_loop: str x2, [x0] add x0, x0, #8 cmp x0, x1 b.lo clear_loop ret ENDPROC(fsl_clear_ocram) ENTRY(fsl_ocram_clear_ecc_err) /* OCRAM1/2 ECC status bit */ mov w1, #0x60 ldr x0, =DCSR_DCFG_SBEESR2 str w1, [x0] ldr x0, =DCSR_DCFG_MBEESR2 str w1, [x0] ret ENDPROC(fsl_ocram_init) #endif #ifdef CONFIG_FSL_LSCH3 .globl get_svr get_svr: ldr x1, =FSL_LSCH3_SVR ldr w0, [x1] ret #endif #if defined(CONFIG_SYS_FSL_HAS_CCN504) || defined(CONFIG_SYS_FSL_HAS_CCN508) hnf_pstate_poll: /* x0 has the desired status, return only if operation succeed * clobber x1, x2, x6 */ mov x1, x0 mov w6, #8 /* HN-F node count */ mov x0, #0x18 movk x0, #0x420, lsl #16 /* HNF0_PSTATE_STATUS */ 1: ldr x2, [x0] cmp x2, x1 /* check status */ b.eq 2f b 1b 2: add x0, x0, #0x10000 /* move to next node */ subs w6, w6, #1 cbnz w6, 1b ret hnf_set_pstate: /* x0 has the desired state, clobber x1, x2, x6 */ mov x1, x0 /* power state to SFONLY */ mov w6, #8 /* HN-F node count */ mov x0, #0x10 movk x0, #0x420, lsl #16 /* HNF0_PSTATE_REQ */ 1: /* set pstate to sfonly */ ldr x2, [x0] and x2, x2, #0xfffffffffffffffc /* & HNFPSTAT_MASK */ orr x2, x2, x1 str x2, [x0] add x0, x0, #0x10000 /* move to next node */ subs w6, w6, #1 cbnz w6, 1b ret ENTRY(__asm_flush_l3_dcache) /* * Return status in x0 * success 0 */ mov x29, lr dsb sy mov x0, #0x1 /* HNFPSTAT_SFONLY */ bl hnf_set_pstate mov x0, #0x4 /* SFONLY status */ bl hnf_pstate_poll dsb sy mov x0, #0x3 /* HNFPSTAT_FAM */ bl hnf_set_pstate mov x0, #0xc /* FAM status */ bl hnf_pstate_poll mov x0, #0 mov lr, x29 ret ENDPROC(__asm_flush_l3_dcache) #endif /* CONFIG_SYS_FSL_HAS_CCN504 */ #ifdef CONFIG_MP /* Keep literals not used by the secondary boot code outside it */ .ltorg /* Using 64 bit alignment since the spin table is accessed as data */ .align 4 .global secondary_boot_code /* Secondary Boot Code starts here */ secondary_boot_code: .global __spin_table __spin_table: .space CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE .align 2 ENTRY(secondary_boot_func) /* * MPIDR_EL1 Fields: * MPIDR[1:0] = AFF0_CPUID <- Core ID (0,1) * MPIDR[7:2] = AFF0_RES * MPIDR[15:8] = AFF1_CLUSTERID <- Cluster ID (0,1,2,3) * MPIDR[23:16] = AFF2_CLUSTERID * MPIDR[24] = MT * MPIDR[29:25] = RES0 * MPIDR[30] = U * MPIDR[31] = ME * MPIDR[39:32] = AFF3 * * Linear Processor ID (LPID) calculation from MPIDR_EL1: * (We only use AFF0_CPUID and AFF1_CLUSTERID for now * until AFF2_CLUSTERID and AFF3 have non-zero values) * * LPID = MPIDR[15:8] | MPIDR[1:0] */ mrs x0, mpidr_el1 ubfm x1, x0, #8, #15 ubfm x2, x0, #0, #1 orr x10, x2, x1, lsl #2 /* x10 has LPID */ ubfm x9, x0, #0, #15 /* x9 contains MPIDR[15:0] */ /* * offset of the spin table element for this core from start of spin * table (each elem is padded to 64 bytes) */ lsl x1, x10, #6 ldr x0, =__spin_table /* physical address of this cpus spin table element */ add x11, x1, x0 ldr x0, =__real_cntfrq ldr x0, [x0] msr cntfrq_el0, x0 /* set with real frequency */ str x9, [x11, #16] /* LPID */ mov x4, #1 str x4, [x11, #8] /* STATUS */ dsb sy #if defined(CONFIG_GICV3) gic_wait_for_interrupt_m x0 #elif defined(CONFIG_GICV2) bl get_gic_offset mov x0, x1 gic_wait_for_interrupt_m x0, w1 #endif slave_cpu: wfe ldr x0, [x11] cbz x0, slave_cpu #ifndef CONFIG_ARMV8_SWITCH_TO_EL1 mrs x1, sctlr_el2 #else mrs x1, sctlr_el1 #endif tbz x1, #25, cpu_is_le rev x0, x0 /* BE to LE conversion */ cpu_is_le: ldr x5, [x11, #24] cbz x5, 1f #ifdef CONFIG_ARMV8_SWITCH_TO_EL1 adr x4, secondary_switch_to_el1 ldr x5, =ES_TO_AARCH64 #else ldr x4, [x11] ldr x5, =ES_TO_AARCH32 #endif bl secondary_switch_to_el2 1: #ifdef CONFIG_ARMV8_SWITCH_TO_EL1 adr x4, secondary_switch_to_el1 #else ldr x4, [x11] #endif ldr x5, =ES_TO_AARCH64 bl secondary_switch_to_el2 ENDPROC(secondary_boot_func) ENTRY(secondary_switch_to_el2) switch_el x6, 1f, 0f, 0f 0: ret 1: armv8_switch_to_el2_m x4, x5, x6 ENDPROC(secondary_switch_to_el2) ENTRY(secondary_switch_to_el1) mrs x0, mpidr_el1 ubfm x1, x0, #8, #15 ubfm x2, x0, #0, #1 orr x10, x2, x1, lsl #2 /* x10 has LPID */ lsl x1, x10, #6 ldr x0, =__spin_table /* physical address of this cpus spin table element */ add x11, x1, x0 ldr x4, [x11] ldr x5, [x11, #24] cbz x5, 2f ldr x5, =ES_TO_AARCH32 bl switch_to_el1 2: ldr x5, =ES_TO_AARCH64 switch_to_el1: switch_el x6, 0f, 1f, 0f 0: ret 1: armv8_switch_to_el1_m x4, x5, x6 ENDPROC(secondary_switch_to_el1) /* Ensure that the literals used by the secondary boot code are * assembled within it (this is required so that we can protect * this area with a single memreserve region */ .ltorg /* 64 bit alignment for elements accessed as data */ .align 4 .global __real_cntfrq __real_cntfrq: .quad COUNTER_FREQUENCY .globl __secondary_boot_code_size .type __secondary_boot_code_size, %object /* Secondary Boot Code ends here */ __secondary_boot_code_size: .quad .-secondary_boot_code #endif
4ms/stm32mp1-baremetal
10,486
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/cpu/arm920t/ep93xx/lowlevel_init.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * Low-level initialization for EP93xx * * Copyright (C) 2009 Matthias Kaehlcke <matthias@kaehlcke.net> * Copyright (C) 2013 * Sergey Kostanabev <sergey.kostanbaev <at> fairwaves.ru> * * Copyright (C) 2006 Dominic Rath <Dominic.Rath@gmx.de> * Copyright (C) 2006 Cirrus Logic Inc. * * See file CREDITS for list of people who contributed to this * project. */ #include <config.h> #include <asm/arch-ep93xx/ep93xx.h> /* /* Configure the SDRAM based on the supplied settings. * * Input: r0 - SDRAM DEVCFG register * r2 - configuration for SDRAM chips * Output: none * Modifies: r3, r4 */ ep93xx_sdram_config: /* Program the SDRAM device configuration register. */ ldr r3, =SDRAM_BASE #ifdef CONFIG_EDB93XX_SDCS0 str r0, [r3, #SDRAM_OFF_DEVCFG0] #endif #ifdef CONFIG_EDB93XX_SDCS1 str r0, [r3, #SDRAM_OFF_DEVCFG1] #endif #ifdef CONFIG_EDB93XX_SDCS2 str r0, [r3, #SDRAM_OFF_DEVCFG2] #endif #ifdef CONFIG_EDB93XX_SDCS3 str r0, [r3, #SDRAM_OFF_DEVCFG3] #endif /* Set the Initialize and MRS bits (issue continuous NOP commands * (INIT & MRS set)) */ ldr r4, =(EP93XX_SDRAMCTRL_GLOBALCFG_INIT | \ EP93XX_SDRAMCTRL_GLOBALCFG_MRS | \ EP93XX_SDRAMCTRL_GLOBALCFG_CKE) str r4, [r3, #SDRAM_OFF_GLCONFIG] /* Delay for 200us. */ mov r4, #0x3000 delay1: subs r4, r4, #1 bne delay1 /* Clear the MRS bit to issue a precharge all. */ ldr r4, =(EP93XX_SDRAMCTRL_GLOBALCFG_INIT | \ EP93XX_SDRAMCTRL_GLOBALCFG_CKE) str r4, [r3, #SDRAM_OFF_GLCONFIG] /* Temporarily set the refresh timer to 0x10. Make it really low so * that refresh cycles are generated. */ ldr r4, =0x10 str r4, [r3, #SDRAM_OFF_REFRSHTIMR] /* Delay for at least 80 SDRAM clock cycles. */ mov r4, #80 delay2: subs r4, r4, #1 bne delay2 /* Set the refresh timer to the fastest required for any device * that might be used. Set 9.6 ms refresh time. */ ldr r4, =0x01e0 str r4, [r3, #SDRAM_OFF_REFRSHTIMR] /* Select mode register update mode. */ ldr r4, =(EP93XX_SDRAMCTRL_GLOBALCFG_CKE | \ EP93XX_SDRAMCTRL_GLOBALCFG_MRS) str r4, [r3, #SDRAM_OFF_GLCONFIG] /* Program the mode register on the SDRAM by performing fake read */ ldr r4, [r2] /* Select normal operating mode. */ ldr r4, =EP93XX_SDRAMCTRL_GLOBALCFG_CKE str r4, [r3, #SDRAM_OFF_GLCONFIG] /* Return to the caller. */ mov pc, lr /* * Test to see if the SDRAM has been configured in a usable mode. * * Input: r0 - Test address of SDRAM * Output: r0 - 0 -- Test OK, -1 -- Failed * Modifies: r0-r5 */ ep93xx_sdram_test: /* Load the test patterns to be written to SDRAM. */ ldr r1, =0xf00dface ldr r2, =0xdeadbeef ldr r3, =0x08675309 ldr r4, =0xdeafc0ed /* Store the test patterns to SDRAM. */ stmia r0, {r1-r4} /* Load the test patterns from SDRAM one at a time and compare them * to the actual pattern. */ ldr r5, [r0] cmp r5, r1 ldreq r5, [r0, #0x0004] cmpeq r5, r2 ldreq r5, [r0, #0x0008] cmpeq r5, r3 ldreq r5, [r0, #0x000c] cmpeq r5, r4 /* Return -1 if a mismatch was encountered, 0 otherwise. */ mvnne r0, #0xffffffff moveq r0, #0x00000000 /* Return to the caller. */ mov pc, lr /* * Determine the size of the SDRAM. Use data=address for the scan. * * Input: r0 - Start SDRAM address * Return: r0 - Single block size * r1 - Valid block mask * r2 - Total block count * Modifies: r0-r5 */ ep93xx_sdram_size: /* Store zero at offset zero. */ str r0, [r0] /* Start checking for an alias at 1MB into SDRAM. */ ldr r1, =0x00100000 /* Store the offset at the current offset. */ check_block_size: str r1, [r0, r1] /* Read back from zero. */ ldr r2, [r0] /* Stop searching of an alias was found. */ cmp r1, r2 beq found_block_size /* Advance to the next power of two boundary. */ mov r1, r1, lsl #1 /* Loop back if the size has not reached 256MB. */ cmp r1, #0x10000000 bne check_block_size /* A full 256MB of memory was found, so return it now. */ ldr r0, =0x10000000 ldr r1, =0x00000000 ldr r2, =0x00000001 mov pc, lr /* An alias was found. See if the first block is 128MB in size. */ found_block_size: cmp r1, #0x08000000 /* The first block is 128MB, so there is no further memory. Return it * now. */ ldreq r0, =0x08000000 ldreq r1, =0x00000000 ldreq r2, =0x00000001 moveq pc, lr /* Save the block size, set the block address bits to zero, and * initialize the block count to one. */ mov r3, r1 ldr r4, =0x00000000 ldr r5, =0x00000001 /* Look for additional blocks of memory by searching for non-aliases. */ find_blocks: /* Store zero back to address zero. It may be overwritten. */ str r0, [r0] /* Advance to the next power of two boundary. */ mov r1, r1, lsl #1 /* Store the offset at the current offset. */ str r1, [r0, r1] /* Read back from zero. */ ldr r2, [r0] /* See if a non-alias was found. */ cmp r1, r2 /* If a non-alias was found, then or in the block address bit and * multiply the block count by two (since there are two unique * blocks, one with this bit zero and one with it one). */ orrne r4, r4, r1 movne r5, r5, lsl #1 /* Continue searching if there are more address bits to check. */ cmp r1, #0x08000000 bne find_blocks /* Return the block size, address mask, and count. */ mov r0, r3 mov r1, r4 mov r2, r5 /* Return to the caller. */ mov pc, lr .globl lowlevel_init lowlevel_init: mov r6, lr /* Make sure caches are off and invalidated. */ ldr r0, =0x00000000 mcr p15, 0, r0, c1, c0, 0 nop nop nop nop nop /* Turn off the green LED and turn on the red LED. If the red LED * is left on for too long, the external reset circuit described * by application note AN258 will cause the system to reset. */ ldr r1, =EP93XX_LED_DATA ldr r0, [r1] bic r0, r0, #EP93XX_LED_GREEN_ON orr r0, r0, #EP93XX_LED_RED_ON str r0, [r1] /* Undo the silly static memory controller programming performed * by the boot rom. */ ldr r0, =SMC_BASE /* Set WST1 and WST2 to 31 HCLK cycles (slowest access) */ ldr r1, =0x0000fbe0 /* Reset EP93XX_OFF_SMCBCR0 */ ldr r2, [r0] orr r2, r2, r1 str r2, [r0] ldr r2, [r0, #EP93XX_OFF_SMCBCR1] orr r2, r2, r1 str r2, [r0, #EP93XX_OFF_SMCBCR1] ldr r2, [r0, #EP93XX_OFF_SMCBCR2] orr r2, r2, r1 str r2, [r0, #EP93XX_OFF_SMCBCR2] ldr r2, [r0, #EP93XX_OFF_SMCBCR3] orr r2, r2, r1 str r2, [r0, #EP93XX_OFF_SMCBCR3] ldr r2, [r0, #EP93XX_OFF_SMCBCR6] orr r2, r2, r1 str r2, [r0, #EP93XX_OFF_SMCBCR6] ldr r2, [r0, #EP93XX_OFF_SMCBCR7] orr r2, r2, r1 str r2, [r0, #EP93XX_OFF_SMCBCR7] /* Set the PLL1 and processor clock. */ ldr r0, =SYSCON_BASE #ifdef CONFIG_EDB9301 /* 332MHz, giving a 166MHz processor clock. */ ldr r1, = 0x02b49907 #else #ifdef CONFIG_EDB93XX_INDUSTRIAL /* 384MHz, giving a 196MHz processor clock. */ ldr r1, =0x02a4bb38 #else /* 400MHz, giving a 200MHz processor clock. */ ldr r1, =0x02a4e39e #endif #endif str r1, [r0, #SYSCON_OFF_CLKSET1] nop nop nop nop nop /* Need to make sure that SDRAM is configured correctly before * coping the code into it. */ #ifdef CONFIG_EDB93XX_SDCS0 mov r11, #SDRAM_DEVCFG0_BASE #endif #ifdef CONFIG_EDB93XX_SDCS1 mov r11, #SDRAM_DEVCFG1_BASE #endif #ifdef CONFIG_EDB93XX_SDCS2 mov r11, #SDRAM_DEVCFG2_BASE #endif #ifdef CONFIG_EDB93XX_SDCS3 ldr r0, =SYSCON_BASE ldr r0, [r0, #SYSCON_OFF_SYSCFG] ands r0, r0, #SYSCON_SYSCFG_LASDO moveq r11, #SDRAM_DEVCFG3_ASD0_BASE movne r11, #SDRAM_DEVCFG3_ASD1_BASE #endif /* See Table 13-5 in EP93xx datasheet for more info about DRAM * register mapping */ /* Try a 32-bit wide configuration of SDRAM. */ ldr r0, =(EP93XX_SDRAMCTRL_DEVCFG_BANKCOUNT | \ EP93XX_SDRAMCTRL_DEVCFG_SROMLL | \ EP93XX_SDRAMCTRL_DEVCFG_CASLAT_2 | \ EP93XX_SDRAMCTRL_DEVCFG_RASTOCAS_2) /* Set burst count: 4 and CAS: 2 * Burst mode [A11:A10]; CAS [A16:A14] */ orr r2, r11, #0x00008800 bl ep93xx_sdram_config /* Test the SDRAM. */ mov r0, r11 bl ep93xx_sdram_test cmp r0, #0x00000000 beq ep93xx_sdram_done /* Try a 16-bit wide configuration of SDRAM. */ ldr r0, =(EP93XX_SDRAMCTRL_DEVCFG_BANKCOUNT | \ EP93XX_SDRAMCTRL_DEVCFG_SROMLL | \ EP93XX_SDRAMCTRL_DEVCFG_CASLAT_2 | \ EP93XX_SDRAMCTRL_DEVCFG_RASTOCAS_2 | \ EP93XX_SDRAMCTRL_DEVCFG_EXTBUSWIDTH) /* Set burst count: 8, CAS: 2, sequential burst * Accoring to Table 13-3 for 16bit operations mapping must be shifted. * Burst mode [A10:A9]; CAS [A15:A13] */ orr r2, r11, #0x00004600 bl ep93xx_sdram_config /* Test the SDRAM. */ mov r0, r11 bl ep93xx_sdram_test cmp r0, #0x00000000 beq ep93xx_sdram_done /* Turn off the red LED. */ ldr r0, =EP93XX_LED_DATA ldr r1, [r0] bic r1, r1, #EP93XX_LED_RED_ON str r1, [r0] /* There is no SDRAM so flash the green LED. */ flash_green: orr r1, r1, #EP93XX_LED_GREEN_ON str r1, [r0] ldr r2, =0x00010000 flash_green_delay_1: subs r2, r2, #1 bne flash_green_delay_1 bic r1, r1, #EP93XX_LED_GREEN_ON str r1, [r0] ldr r2, =0x00010000 flash_green_delay_2: subs r2, r2, #1 bne flash_green_delay_2 orr r1, r1, #EP93XX_LED_GREEN_ON str r1, [r0] ldr r2, =0x00010000 flash_green_delay_3: subs r2, r2, #1 bne flash_green_delay_3 bic r1, r1, #EP93XX_LED_GREEN_ON str r1, [r0] ldr r2, =0x00050000 flash_green_delay_4: subs r2, r2, #1 bne flash_green_delay_4 b flash_green ep93xx_sdram_done: ldr r1, =EP93XX_LED_DATA ldr r0, [r1] bic r0, r0, #EP93XX_LED_RED_ON str r0, [r1] /* Determine the size of the SDRAM. */ mov r0, r11 bl ep93xx_sdram_size /* Save the SDRAM characteristics. */ mov r8, r0 mov r9, r1 mov r10, r2 /* Compute total memory size into r1 */ mul r1, r8, r10 #ifdef CONFIG_EDB93XX_SDCS0 ldr r2, [r0, #SDRAM_OFF_DEVCFG0] #endif #ifdef CONFIG_EDB93XX_SDCS1 ldr r2, [r0, #SDRAM_OFF_DEVCFG1] #endif #ifdef CONFIG_EDB93XX_SDCS2 ldr r2, [r0, #SDRAM_OFF_DEVCFG2] #endif #ifdef CONFIG_EDB93XX_SDCS3 ldr r2, [r0, #SDRAM_OFF_DEVCFG3] #endif /* Consider small DRAM size as: * < 32Mb for 32bit bus * < 64Mb for 16bit bus */ tst r2, #EP93XX_SDRAMCTRL_DEVCFG_EXTBUSWIDTH moveq r1, r1, lsr #1 cmp r1, #0x02000000 #if defined(CONFIG_EDB9301) /* Set refresh counter to 20ms for small DRAM size, otherwise 9.6ms */ movlt r1, #0x03f0 movge r1, #0x01e0 #else /* Set refresh counter to 30.7ms for small DRAM size, otherwise 15ms */ movlt r1, #0x0600 movge r1, #0x2f0 #endif str r1, [r0, #SDRAM_OFF_REFRSHTIMR] /* Save the memory configuration information. */ orr r0, r11, #UBOOT_MEMORYCNF_BANK_SIZE stmia r0, {r8-r11} mov lr, r6 mov pc, lr
4ms/stm32mp1-baremetal
3,729
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/mach-uniphier/arm32/lowlevel_init.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright (C) 2012-2015 Panasonic Corporation * Copyright (C) 2015-2016 Socionext Inc. * Author: Masahiro Yamada <yamada.masahiro@socionext.com> */ #include <config.h> #include <linux/linkage.h> #include <linux/sizes.h> #include <asm/system.h> ENTRY(lowlevel_init) mov r8, lr @ persevere link reg across call /* * The UniPhier Boot ROM loads SPL code to the L2 cache. * But CPUs can only do instruction fetch now because start.S has * cleared C and M bits. * First we need to turn on MMU and Dcache again to get back * data access to L2. */ mrc p15, 0, r0, c1, c0, 0 @ SCTLR (System Control Register) orr r0, r0, #(CR_C | CR_M) @ enable MMU and Dcache mcr p15, 0, r0, c1, c0, 0 #ifdef CONFIG_DEBUG_LL bl debug_ll_init #endif bl setup_init_ram @ RAM area for stack and page table /* * Now we are using the page table embedded in the Boot ROM. * What we need to do next is to create a page table and switch * over to it. */ bl create_page_table bl __v7_flush_dcache_all /* Disable MMU and Dcache before switching Page Table */ mrc p15, 0, r0, c1, c0, 0 @ SCTLR (System Control Register) bic r0, r0, #(CR_C | CR_M) @ disable MMU and Dcache mcr p15, 0, r0, c1, c0, 0 bl enable_mmu mov lr, r8 @ restore link mov pc, lr @ back to my caller ENDPROC(lowlevel_init) ENTRY(enable_mmu) mrc p15, 0, r0, c2, c0, 2 @ TTBCR (Translation Table Base Control Register) bic r0, r0, #0x37 orr r0, r0, #0x20 @ disable TTBR1 mcr p15, 0, r0, c2, c0, 2 orr r0, r12, #0x8 @ Outer Cacheability for table walks: WBWA mcr p15, 0, r0, c2, c0, 0 @ TTBR0 mov r0, #0 mcr p15, 0, r0, c8, c7, 0 @ invalidate TLBs mov r0, #-1 @ manager for all domains (No permission check) mcr p15, 0, r0, c3, c0, 0 @ DACR (Domain Access Control Register) dsb isb /* * MMU on: * TLBs was already invalidated in "../start.S" * So, we don't need to invalidate it here. */ mrc p15, 0, r0, c1, c0, 0 @ SCTLR (System Control Register) orr r0, r0, #(CR_C | CR_M) @ MMU and Dcache enable mcr p15, 0, r0, c1, c0, 0 mov pc, lr ENDPROC(enable_mmu) /* * For PH1-Pro4 or older SoCs, the size of WAY is 32KB. * It is large enough for tmp RAM. */ #define BOOT_RAM_SIZE (SZ_32K) #define BOOT_RAM_BASE ((CONFIG_SPL_STACK) - (BOOT_RAM_SIZE)) #define BOOT_RAM_WAYS (0x00000100) @ way 8 #define SSCO_BASE 0x506c0000 #define SSCOPE 0x244 #define SSCOQM 0x248 #define SSCOQAD 0x24c #define SSCOQSZ 0x250 #define SSCOQWN 0x258 #define SSCOPPQSEF 0x25c #define SSCOLPQS 0x260 ENTRY(setup_init_ram) ldr r1, = SSCO_BASE /* Touch to zero for the boot way */ 0: ldr r0, = 0x00408006 @ touch to zero with address range str r0, [r1, #SSCOQM] ldr r0, = BOOT_RAM_BASE str r0, [r1, #SSCOQAD] ldr r0, = BOOT_RAM_SIZE str r0, [r1, #SSCOQSZ] ldr r0, = BOOT_RAM_WAYS str r0, [r1, #SSCOQWN] ldr r0, [r1, #SSCOPPQSEF] cmp r0, #0 @ check if the command is successfully set bne 0b @ try again if an error occurs 1: ldr r0, [r1, #SSCOLPQS] cmp r0, #0x4 bne 1b @ wait until the operation is completed str r0, [r1, #SSCOLPQS] @ clear the complete notification flag mov pc, lr ENDPROC(setup_init_ram) #define DEVICE 0x00002002 /* Non-shareable Device */ #define NORMAL 0x0000000e /* Normal Memory Write-Back, No Write-Allocate */ ENTRY(create_page_table) ldr r0, = DEVICE ldr r1, = BOOT_RAM_BASE mov r12, r1 @ r12 is preserved during D-cache flush 0: str r0, [r1], #4 @ specify all the sections as Device adds r0, r0, #0x00100000 bcc 0b ldr r0, = NORMAL str r0, [r12] @ mark the first section as Normal add r0, r0, #0x00100000 str r0, [r12, #4] @ mark the second section as Normal mov pc, lr ENDPROC(create_page_table)
4ms/stm32mp1-baremetal
3,982
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/mach-uniphier/arm32/debug_ll.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * On-chip UART initializaion for low-level debugging * * Copyright (C) 2014-2015 Masahiro Yamada <yamada.masahiro@socionext.com> */ #include <linux/serial_reg.h> #include <linux/linkage.h> #include "../bcu/bcu-regs.h" #include "../sc-regs.h" #include "../sg-regs.h" #if !defined(CONFIG_DEBUG_SEMIHOSTING) #include CONFIG_DEBUG_LL_INCLUDE #endif #define SG_REVISION_TYPE_SHIFT 16 #define SG_REVISION_TYPE_MASK (0xff << SG_REVISION_TYPE_SHIFT) #define BAUDRATE 115200 #define DIV_ROUND(x, d) (((x) + ((d) / 2)) / (d)) .macro sg_set_pinsel, pin, muxval, mux_bits, reg_stride, ra, rd ldr \ra, =(SG_BASE + SG_PINCTRL_BASE + \pin * \mux_bits / 32 * \reg_stride) ldr \rd, [\ra] and \rd, \rd, #~(((1 << \mux_bits) - 1) << (\pin * \mux_bits % 32)) orr \rd, \rd, #(\muxval << (\pin * \mux_bits % 32)) str \rd, [\ra] .endm ENTRY(debug_ll_init) ldr r0, =(SG_BASE + SG_REVISION) ldr r1, [r0] and r1, r1, #SG_REVISION_TYPE_MASK mov r1, r1, lsr #SG_REVISION_TYPE_SHIFT #if defined(CONFIG_ARCH_UNIPHIER_LD4) #define UNIPHIER_LD4_UART_CLK 36864000 cmp r1, #0x26 bne ld4_end ldr r0, =(SG_BASE + SG_IECTRL) ldr r1, [r0] orr r1, r1, #1 str r1, [r0] sg_set_pinsel 88, 1, 8, 4, r0, r1 @ HSDOUT6 -> TXD0 ldr r3, =DIV_ROUND(UNIPHIER_LD4_UART_CLK, 16 * BAUDRATE) b init_uart ld4_end: #endif #if defined(CONFIG_ARCH_UNIPHIER_PRO4) #define UNIPHIER_PRO4_UART_CLK 73728000 cmp r1, #0x28 bne pro4_end sg_set_pinsel 128, 0, 4, 8, r0, r1 @ TXD0 -> TXD0 ldr r0, =(SG_BASE + SG_LOADPINCTRL) mov r1, #1 str r1, [r0] ldr r0, =(SC_BASE + SC_CLKCTRL) ldr r1, [r0] orr r1, r1, #SC_CLKCTRL_CEN_PERI str r1, [r0] ldr r3, =DIV_ROUND(UNIPHIER_PRO4_UART_CLK, 16 * BAUDRATE) b init_uart pro4_end: #endif #if defined(CONFIG_ARCH_UNIPHIER_SLD8) #define UNIPHIER_SLD8_UART_CLK 80000000 cmp r1, #0x29 bne sld8_end ldr r0, =(SG_BASE + SG_IECTRL) ldr r1, [r0] orr r1, r1, #1 str r1, [r0] sg_set_pinsel 70, 3, 8, 4, r0, r1 @ HSDOUT0 -> TXD0 ldr r3, =DIV_ROUND(UNIPHIER_SLD8_UART_CLK, 16 * BAUDRATE) b init_uart sld8_end: #endif #if defined(CONFIG_ARCH_UNIPHIER_PRO5) #define UNIPHIER_PRO5_UART_CLK 73728000 cmp r1, #0x2A bne pro5_end sg_set_pinsel 47, 0, 4, 8, r0, r1 @ TXD0 -> TXD0 sg_set_pinsel 49, 0, 4, 8, r0, r1 @ TXD1 -> TXD1 sg_set_pinsel 51, 0, 4, 8, r0, r1 @ TXD2 -> TXD2 sg_set_pinsel 53, 0, 4, 8, r0, r1 @ TXD3 -> TXD3 ldr r0, =(SG_BASE + SG_LOADPINCTRL) mov r1, #1 str r1, [r0] ldr r0, =(SC_BASE + SC_CLKCTRL) ldr r1, [r0] orr r1, r1, #SC_CLKCTRL_CEN_PERI str r1, [r0] ldr r3, =DIV_ROUND(UNIPHIER_PRO5_UART_CLK, 16 * BAUDRATE) b init_uart pro5_end: #endif #if defined(CONFIG_ARCH_UNIPHIER_PXS2) #define UNIPHIER_PXS2_UART_CLK 88900000 cmp r1, #0x2E bne pxs2_end ldr r0, =(SG_BASE + SG_IECTRL) ldr r1, [r0] orr r1, r1, #1 str r1, [r0] sg_set_pinsel 217, 8, 8, 4, r0, r1 @ TXD0 -> TXD0 sg_set_pinsel 115, 8, 8, 4, r0, r1 @ TXD1 -> TXD1 sg_set_pinsel 113, 8, 8, 4, r0, r1 @ TXD2 -> TXD2 sg_set_pinsel 219, 8, 8, 4, r0, r1 @ TXD3 -> TXD3 ldr r0, =(SC_BASE + SC_CLKCTRL) ldr r1, [r0] orr r1, r1, #SC_CLKCTRL_CEN_PERI str r1, [r0] ldr r3, =DIV_ROUND(UNIPHIER_PXS2_UART_CLK, 16 * BAUDRATE) b init_uart pxs2_end: #endif #if defined(CONFIG_ARCH_UNIPHIER_LD6B) #define UNIPHIER_LD6B_UART_CLK 88900000 cmp r1, #0x2F bne ld6b_end ldr r0, =(SG_BASE + SG_IECTRL) ldr r1, [r0] orr r1, r1, #1 str r1, [r0] sg_set_pinsel 135, 3, 8, 4, r0, r1 @ PORT10 -> TXD0 sg_set_pinsel 115, 0, 8, 4, r0, r1 @ TXD1 -> TXD1 sg_set_pinsel 113, 2, 8, 4, r0, r1 @ SBO0 -> TXD2 ldr r0, =(SC_BASE + SC_CLKCTRL) ldr r1, [r0] orr r1, r1, #SC_CLKCTRL_CEN_PERI str r1, [r0] ldr r3, =DIV_ROUND(UNIPHIER_LD6B_UART_CLK, 16 * BAUDRATE) b init_uart ld6b_end: #endif mov pc, lr init_uart: addruart r0, r1, r2 mov r1, #UART_LCR_WLEN8 << 8 str r1, [r0, #0x10] str r3, [r0, #0x24] mov pc, lr ENDPROC(debug_ll_init)
4ms/stm32mp1-baremetal
6,439
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/mach-at91/arm926ejs/lowlevel_init.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * Memory Setup stuff - taken from blob memsetup.S * * Copyright (C) 1999 2000 2001 Erik Mouw (J.A.K.Mouw@its.tudelft.nl) and * Jan-Derk Bakker (J.D.Bakker@its.tudelft.nl) * * Copyright (C) 2008 Ronetix Ilko Iliev (www.ronetix.at) * Copyright (C) 2009 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com> */ #include <config.h> #include <asm/arch/hardware.h> #include <asm/arch/at91_pmc.h> #include <asm/arch/at91_wdt.h> #include <asm/arch/at91_pio.h> #include <asm/arch/at91_matrix.h> #include <asm/arch/at91sam9_sdramc.h> #include <asm/arch/at91sam9_smc.h> #include <asm/arch/at91_rstc.h> #ifdef CONFIG_ATMEL_LEGACY #include <asm/arch/at91sam9_matrix.h> #endif #ifndef CONFIG_SYS_MATRIX_EBICSA_VAL #define CONFIG_SYS_MATRIX_EBICSA_VAL CONFIG_SYS_MATRIX_EBI0CSA_VAL #endif .globl lowlevel_init .type lowlevel_init,function lowlevel_init: POS1: adr r5, POS1 /* r5 = POS1 run time */ ldr r0, =POS1 /* r0 = POS1 compile */ sub r5, r5, r0 /* r0 = CONFIG_SYS_TEXT_BASE-1 */ /* memory control configuration 1 */ ldr r0, =SMRDATA ldr r2, =SMRDATA1 add r0, r0, r5 add r2, r2, r5 0: /* the address */ ldr r1, [r0], #4 /* the value */ ldr r3, [r0], #4 str r3, [r1] cmp r2, r0 bne 0b /* ---------------------------------------------------------------------------- * PMC Init Step 1. * ---------------------------------------------------------------------------- * - Check if the PLL is already initialized * ---------------------------------------------------------------------------- */ ldr r1, =(AT91_ASM_PMC_MCKR) ldr r0, [r1] and r0, r0, #3 cmp r0, #0 bne PLL_setup_end /* --------------------------------------------------------------------------- * - Enable the Main Oscillator * --------------------------------------------------------------------------- */ ldr r1, =(AT91_ASM_PMC_MOR) ldr r2, =(AT91_ASM_PMC_SR) /* Main oscillator Enable register PMC_MOR: */ ldr r0, =CONFIG_SYS_MOR_VAL str r0, [r1] /* Reading the PMC Status to detect when the Main Oscillator is enabled */ mov r4, #AT91_PMC_IXR_MOSCS MOSCS_Loop: ldr r3, [r2] and r3, r4, r3 cmp r3, #AT91_PMC_IXR_MOSCS bne MOSCS_Loop /* ---------------------------------------------------------------------------- * PMC Init Step 2. * ---------------------------------------------------------------------------- * Setup PLLA * ---------------------------------------------------------------------------- */ ldr r1, =(AT91_ASM_PMC_PLLAR) ldr r0, =CONFIG_SYS_PLLAR_VAL str r0, [r1] /* Reading the PMC Status register to detect when the PLLA is locked */ mov r4, #AT91_PMC_IXR_LOCKA MOSCS_Loop1: ldr r3, [r2] and r3, r4, r3 cmp r3, #AT91_PMC_IXR_LOCKA bne MOSCS_Loop1 /* ---------------------------------------------------------------------------- * PMC Init Step 3. * ---------------------------------------------------------------------------- * - Switch on the Main Oscillator * ---------------------------------------------------------------------------- */ ldr r1, =(AT91_ASM_PMC_MCKR) /* -Master Clock Controller register PMC_MCKR */ ldr r0, =CONFIG_SYS_MCKR1_VAL str r0, [r1] /* Reading the PMC Status to detect when the Master clock is ready */ mov r4, #AT91_PMC_IXR_MCKRDY MCKRDY_Loop: ldr r3, [r2] and r3, r4, r3 cmp r3, #AT91_PMC_IXR_MCKRDY bne MCKRDY_Loop ldr r0, =CONFIG_SYS_MCKR2_VAL str r0, [r1] /* Reading the PMC Status to detect when the Master clock is ready */ mov r4, #AT91_PMC_IXR_MCKRDY MCKRDY_Loop1: ldr r3, [r2] and r3, r4, r3 cmp r3, #AT91_PMC_IXR_MCKRDY bne MCKRDY_Loop1 PLL_setup_end: /* ---------------------------------------------------------------------------- * - memory control configuration 2 * ---------------------------------------------------------------------------- */ ldr r0, =(AT91_ASM_SDRAMC_TR) ldr r1, [r0] cmp r1, #0 bne SDRAM_setup_end ldr r0, =SMRDATA1 ldr r2, =SMRDATA2 add r0, r0, r5 add r2, r2, r5 2: /* the address */ ldr r1, [r0], #4 /* the value */ ldr r3, [r0], #4 str r3, [r1] cmp r2, r0 bne 2b SDRAM_setup_end: /* everything is fine now */ mov pc, lr .ltorg SMRDATA: .word AT91_ASM_WDT_MR .word CONFIG_SYS_WDTC_WDMR_VAL /* configure PIOx as EBI0 D[16-31] */ #if defined(CONFIG_AT91SAM9263) .word AT91_ASM_PIOD_PDR .word CONFIG_SYS_PIOD_PDR_VAL1 .word AT91_ASM_PIOD_PUDR .word CONFIG_SYS_PIOD_PPUDR_VAL .word AT91_ASM_PIOD_ASR .word CONFIG_SYS_PIOD_PPUDR_VAL #elif defined(CONFIG_AT91SAM9260) || defined(CONFIG_AT91SAM9261) \ || defined(CONFIG_AT91SAM9G20) .word AT91_ASM_PIOC_PDR .word CONFIG_SYS_PIOC_PDR_VAL1 .word AT91_ASM_PIOC_PUDR .word CONFIG_SYS_PIOC_PPUDR_VAL #endif .word AT91_ASM_MATRIX_CSA0 .word CONFIG_SYS_MATRIX_EBICSA_VAL /* flash */ .word AT91_ASM_SMC_MODE0 .word CONFIG_SYS_SMC0_MODE0_VAL .word AT91_ASM_SMC_CYCLE0 .word CONFIG_SYS_SMC0_CYCLE0_VAL .word AT91_ASM_SMC_PULSE0 .word CONFIG_SYS_SMC0_PULSE0_VAL .word AT91_ASM_SMC_SETUP0 .word CONFIG_SYS_SMC0_SETUP0_VAL SMRDATA1: .word AT91_ASM_SDRAMC_MR .word CONFIG_SYS_SDRC_MR_VAL1 .word AT91_ASM_SDRAMC_TR .word CONFIG_SYS_SDRC_TR_VAL1 .word AT91_ASM_SDRAMC_CR .word CONFIG_SYS_SDRC_CR_VAL .word AT91_ASM_SDRAMC_MDR .word CONFIG_SYS_SDRC_MDR_VAL .word AT91_ASM_SDRAMC_MR .word CONFIG_SYS_SDRC_MR_VAL2 .word CONFIG_SYS_SDRAM_BASE .word CONFIG_SYS_SDRAM_VAL1 .word AT91_ASM_SDRAMC_MR .word CONFIG_SYS_SDRC_MR_VAL3 .word CONFIG_SYS_SDRAM_BASE .word CONFIG_SYS_SDRAM_VAL2 .word CONFIG_SYS_SDRAM_BASE .word CONFIG_SYS_SDRAM_VAL3 .word CONFIG_SYS_SDRAM_BASE .word CONFIG_SYS_SDRAM_VAL4 .word CONFIG_SYS_SDRAM_BASE .word CONFIG_SYS_SDRAM_VAL5 .word CONFIG_SYS_SDRAM_BASE .word CONFIG_SYS_SDRAM_VAL6 .word CONFIG_SYS_SDRAM_BASE .word CONFIG_SYS_SDRAM_VAL7 .word CONFIG_SYS_SDRAM_BASE .word CONFIG_SYS_SDRAM_VAL8 .word CONFIG_SYS_SDRAM_BASE .word CONFIG_SYS_SDRAM_VAL9 .word AT91_ASM_SDRAMC_MR .word CONFIG_SYS_SDRC_MR_VAL4 .word CONFIG_SYS_SDRAM_BASE .word CONFIG_SYS_SDRAM_VAL10 .word AT91_ASM_SDRAMC_MR .word CONFIG_SYS_SDRC_MR_VAL5 .word CONFIG_SYS_SDRAM_BASE .word CONFIG_SYS_SDRAM_VAL11 .word AT91_ASM_SDRAMC_TR .word CONFIG_SYS_SDRC_TR_VAL2 .word CONFIG_SYS_SDRAM_BASE .word CONFIG_SYS_SDRAM_VAL12 /* User reset enable*/ .word AT91_ASM_RSTC_MR .word CONFIG_SYS_RSTC_RMR_VAL #ifdef CONFIG_SYS_MATRIX_MCFG_REMAP /* MATRIX_MCFG - REMAP all masters */ .word AT91_ASM_MATRIX_MCFG .word 0x1FF #endif SMRDATA2: .word 0
4ms/stm32mp1-baremetal
3,620
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/mach-at91/arm920t/lowlevel_init.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright (C) 1999 2000 2001 Erik Mouw (J.A.K.Mouw@its.tudelft.nl) and * Jan-Derk Bakker (J.D.Bakker@its.tudelft.nl) * * Modified for the at91rm9200dk board by * (C) Copyright 2004 * Gary Jennejohn, DENX Software Engineering, <garyj@denx.de> */ #include <config.h> #ifndef CONFIG_SKIP_LOWLEVEL_INIT #include <asm/arch/hardware.h> #include <asm/arch/at91_mc.h> #include <asm/arch/at91_pmc.h> #include <asm/arch/at91_pio.h> #define ARM920T_CONTROL 0xC0000000 /* @ set bit 31 (iA) and 30 (nF) */ _MTEXT_BASE: #undef START_FROM_MEM #ifdef START_FROM_MEM .word CONFIG_SYS_TEXT_BASE-PHYS_FLASH_1 #else .word CONFIG_SYS_TEXT_BASE #endif .globl lowlevel_init lowlevel_init: ldr r1, =AT91_ASM_PMC_MOR /* Main oscillator Enable register */ #ifdef CONFIG_SYS_USE_MAIN_OSCILLATOR ldr r0, =0x0000FF01 /* Enable main oscillator */ #else ldr r0, =0x0000FF00 /* Disable main oscillator */ #endif str r0, [r1] /*AT91C_CKGR_MOR] */ /* Add loop to compensate Main Oscillator startup time */ ldr r0, =0x00000010 LoopOsc: subs r0, r0, #1 bhi LoopOsc /* memory control configuration */ /* this isn't very elegant, but what the heck */ ldr r0, =SMRDATA ldr r1, _MTEXT_BASE sub r0, r0, r1 ldr r2, =SMRDATAE sub r2, r2, r1 pllloop: /* the address */ ldr r1, [r0], #4 /* the value */ ldr r3, [r0], #4 str r3, [r1] cmp r2, r0 bne pllloop /* delay - this is all done by guess */ ldr r0, =0x00010000 /* (vs reading PMC_SR for LOCKA, LOCKB ... or MOSCS earlier) */ lock: subs r0, r0, #1 bhi lock ldr r0, =SMRDATA1 ldr r1, _MTEXT_BASE sub r0, r0, r1 ldr r2, =SMRDATA1E sub r2, r2, r1 sdinit: /* the address */ ldr r1, [r0], #4 /* the value */ ldr r3, [r0], #4 str r3, [r1] cmp r2, r0 bne sdinit /* switch from FastBus to Asynchronous clock mode */ mrc p15, 0, r0, c1, c0, 0 orr r0, r0, #ARM920T_CONTROL mcr p15, 0, r0, c1, c0, 0 /* everything is fine now */ mov pc, lr .ltorg SMRDATA: .word AT91_ASM_MC_EBI_CFG .word CONFIG_SYS_EBI_CFGR_VAL .word AT91_ASM_MC_SMC_CSR0 .word CONFIG_SYS_SMC_CSR0_VAL .word AT91_ASM_PMC_PLLAR .word CONFIG_SYS_PLLAR_VAL .word AT91_ASM_PMC_PLLBR .word CONFIG_SYS_PLLBR_VAL .word AT91_ASM_PMC_MCKR .word CONFIG_SYS_MCKR_VAL SMRDATAE: /* here there's a delay */ SMRDATA1: .word AT91_ASM_PIOC_ASR .word CONFIG_SYS_PIOC_ASR_VAL .word AT91_ASM_PIOC_BSR .word CONFIG_SYS_PIOC_BSR_VAL .word AT91_ASM_PIOC_PDR .word CONFIG_SYS_PIOC_PDR_VAL .word AT91_ASM_MC_EBI_CSA .word CONFIG_SYS_EBI_CSA_VAL .word AT91_ASM_MC_SDRAMC_CR .word CONFIG_SYS_SDRC_CR_VAL .word AT91_ASM_MC_SDRAMC_MR .word CONFIG_SYS_SDRC_MR_VAL .word CONFIG_SYS_SDRAM .word CONFIG_SYS_SDRAM_VAL .word AT91_ASM_MC_SDRAMC_MR .word CONFIG_SYS_SDRC_MR_VAL1 .word CONFIG_SYS_SDRAM .word CONFIG_SYS_SDRAM_VAL .word CONFIG_SYS_SDRAM .word CONFIG_SYS_SDRAM_VAL .word CONFIG_SYS_SDRAM .word CONFIG_SYS_SDRAM_VAL .word CONFIG_SYS_SDRAM .word CONFIG_SYS_SDRAM_VAL .word CONFIG_SYS_SDRAM .word CONFIG_SYS_SDRAM_VAL .word CONFIG_SYS_SDRAM .word CONFIG_SYS_SDRAM_VAL .word CONFIG_SYS_SDRAM .word CONFIG_SYS_SDRAM_VAL .word CONFIG_SYS_SDRAM .word CONFIG_SYS_SDRAM_VAL .word AT91_ASM_MC_SDRAMC_MR .word CONFIG_SYS_SDRC_MR_VAL2 .word CONFIG_SYS_SDRAM1 .word CONFIG_SYS_SDRAM_VAL .word AT91_ASM_MC_SDRAMC_TR .word CONFIG_SYS_SDRC_TR_VAL .word CONFIG_SYS_SDRAM .word CONFIG_SYS_SDRAM_VAL .word AT91_ASM_MC_SDRAMC_MR .word CONFIG_SYS_SDRC_MR_VAL3 .word CONFIG_SYS_SDRAM .word CONFIG_SYS_SDRAM_VAL SMRDATA1E: /* SMRDATA1 is 176 bytes long */ #endif /* CONFIG_SKIP_LOWLEVEL_INIT */
4ms/stm32mp1-baremetal
4,327
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/include/asm/arch-mx6/mx6_plugin.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright (C) 2016 Freescale Semiconductor, Inc. */ #include <config.h> #ifdef CONFIG_ROM_UNIFIED_SECTIONS #define ROM_API_TABLE_BASE_ADDR_LEGACY 0x180 #define ROM_VERSION_OFFSET 0x80 #else #define ROM_API_TABLE_BASE_ADDR_LEGACY 0xC0 #define ROM_VERSION_OFFSET 0x48 #endif #define ROM_API_TABLE_BASE_ADDR_MX6DQ_TO15 0xC4 #define ROM_API_TABLE_BASE_ADDR_MX6DL_TO12 0xC4 #define ROM_API_HWCNFG_SETUP_OFFSET 0x08 #define ROM_VERSION_TO10 0x10 #define ROM_VERSION_TO12 0x12 #define ROM_VERSION_TO15 0x15 plugin_start: push {r0-r4, lr} imx6_ddr_setting imx6_clock_gating imx6_qos_setting /* * The following is to fill in those arguments for this ROM function * pu_irom_hwcnfg_setup(void **start, size_t *bytes, const void *boot_data) * This function is used to copy data from the storage media into DDR. * start - Initial (possibly partial) image load address on entry. * Final image load address on exit. * bytes - Initial (possibly partial) image size on entry. * Final image size on exit. * boot_data - Initial @ref ivt Boot Data load address. */ adr r0, boot_data2 adr r1, image_len2 adr r2, boot_data2 #ifdef CONFIG_NOR_BOOT #ifdef CONFIG_MX6SX ldr r3, =ROM_VERSION_OFFSET ldr r4, [r3] cmp r4, #ROM_VERSION_TO10 bgt before_calling_rom___pu_irom_hwcnfg_setup ldr r3, =0x00900b00 ldr r4, =0x50000000 str r4, [r3, #0x5c] #else ldr r3, =0x00900800 ldr r4, =0x08000000 str r4, [r3, #0xc0] #endif #endif /* * check the _pu_irom_api_table for the address */ before_calling_rom___pu_irom_hwcnfg_setup: ldr r3, =ROM_VERSION_OFFSET ldr r4, [r3] #if defined(CONFIG_MX6SOLO) || defined(CONFIG_MX6DL) ldr r3, =ROM_VERSION_TO12 cmp r4, r3 ldrge r3, =ROM_API_TABLE_BASE_ADDR_MX6DL_TO12 ldrlt r3, =ROM_API_TABLE_BASE_ADDR_LEGACY #elif defined(CONFIG_MX6Q) ldr r3, =ROM_VERSION_TO15 cmp r4, r3 ldrge r3, =ROM_API_TABLE_BASE_ADDR_MX6DQ_TO15 ldrlt r3, =ROM_API_TABLE_BASE_ADDR_LEGACY #else ldr r3, =ROM_API_TABLE_BASE_ADDR_LEGACY #endif ldr r4, [r3, #ROM_API_HWCNFG_SETUP_OFFSET] blx r4 after_calling_rom___pu_irom_hwcnfg_setup: /* * ROM_API_HWCNFG_SETUP function enables MMU & Caches. * Thus disable MMU & Caches. */ mrc p15, 0, r0, c1, c0, 0 /* read CP15 register 1 into r0*/ ands r0, r0, #0x1 /* check if MMU is enabled */ beq mmu_disable_notreq /* exit if MMU is already disabled */ /* Disable caches, MMU */ mrc p15, 0, r0, c1, c0, 0 /* read CP15 register 1 into r0 */ bic r0, r0, #(1 << 2) /* disable D Cache */ bic r0, r0, #0x1 /* clear bit 0 ; MMU off */ bic r0, r0, #(0x1 << 11) /* disable Z, branch prediction */ bic r0, r0, #(0x1 << 1) /* disable A, Strict alignment */ /* check enabled. */ mcr p15, 0, r0, c1, c0, 0 /* write CP15 register 1 */ mov r0, r0 mov r0, r0 mov r0, r0 mov r0, r0 mmu_disable_notreq: NOP /* To return to ROM from plugin, we need to fill in these argument. * Here is what need to do: * Need to construct the paramters for this function before return to ROM: * plugin_download(void **start, size_t *bytes, UINT32 *ivt_offset) */ pop {r0-r4, lr} push {r5} ldr r5, boot_data2 str r5, [r0] ldr r5, image_len2 str r5, [r1] ldr r5, second_ivt_offset str r5, [r2] mov r0, #1 pop {r5} /* return back to ROM code */ bx lr /* make the following data right in the end of the output*/ .ltorg #if (defined(CONFIG_NOR_BOOT) || defined(CONFIG_QSPI_BOOT)) #define FLASH_OFFSET 0x1000 #else #define FLASH_OFFSET 0x400 #endif /* * second_ivt_offset is the offset from the "second_ivt_header" to * "image_copy_start", which involves FLASH_OFFSET, plus the first * ivt_header, the plugin code size itself recorded by "ivt2_header" */ second_ivt_offset: .long (ivt2_header + 0x2C + FLASH_OFFSET) /* * The following is the second IVT header plus the second boot data */ ivt2_header: .long 0x0 app2_code_jump_v: .long 0x0 reserv3: .long 0x0 dcd2_ptr: .long 0x0 boot_data2_ptr: .long 0x0 self_ptr2: .long 0x0 app_code_csf2: .long 0x0 reserv4: .long 0x0 boot_data2: .long 0x0 image_len2: .long 0x0 plugin2: .long 0x0
4ms/stm32mp1-baremetal
3,595
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/include/asm/arch-mx35/lowlevel_macro.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright (C) 2007, Guennadi Liakhovetski <lg@denx.de> * * (C) Copyright 2008-2010 Freescale Semiconductor, Inc. */ #include <asm/arch/imx-regs.h> #include <generated/asm-offsets.h> #include <asm/macro.h> /* * AIPS setup - Only setup MPROTx registers. * The PACR default values are good. * * Default argument values: * - MPR: Set all MPROTx to be non-bufferable, trusted for R/W, not forced to * user-mode. * - OPACR: Clear the on and off peripheral modules Supervisor Protect bit for * SDMA to access them. */ .macro init_aips mpr=0x77777777, opacr=0x00000000 ldr r0, =AIPS1_BASE_ADDR ldr r1, =\mpr str r1, [r0, #AIPS_MPR_0_7] str r1, [r0, #AIPS_MPR_8_15] ldr r2, =AIPS2_BASE_ADDR str r1, [r2, #AIPS_MPR_0_7] str r1, [r2, #AIPS_MPR_8_15] /* Did not change the AIPS control registers access type. */ ldr r1, =\opacr str r1, [r0, #AIPS_OPACR_0_7] str r1, [r0, #AIPS_OPACR_8_15] str r1, [r0, #AIPS_OPACR_16_23] str r1, [r0, #AIPS_OPACR_24_31] str r1, [r0, #AIPS_OPACR_32_39] str r1, [r2, #AIPS_OPACR_0_7] str r1, [r2, #AIPS_OPACR_8_15] str r1, [r2, #AIPS_OPACR_16_23] str r1, [r2, #AIPS_OPACR_24_31] str r1, [r2, #AIPS_OPACR_32_39] .endm /* * MAX (Multi-Layer AHB Crossbar Switch) setup * * Default argument values: * - MPR: priority is M4 > M2 > M3 > M5 > M0 > M1 * - SGPCR: always park on last master * - MGPCR: restore default values */ .macro init_max mpr=0x00302154, sgpcr=0x00000010, mgpcr=0x00000000 ldr r0, =MAX_BASE_ADDR ldr r1, =\mpr str r1, [r0, #MAX_MPR0] /* for S0 */ str r1, [r0, #MAX_MPR1] /* for S1 */ str r1, [r0, #MAX_MPR2] /* for S2 */ str r1, [r0, #MAX_MPR3] /* for S3 */ str r1, [r0, #MAX_MPR4] /* for S4 */ ldr r1, =\sgpcr str r1, [r0, #MAX_SGPCR0] /* for S0 */ str r1, [r0, #MAX_SGPCR1] /* for S1 */ str r1, [r0, #MAX_SGPCR2] /* for S2 */ str r1, [r0, #MAX_SGPCR3] /* for S3 */ str r1, [r0, #MAX_SGPCR4] /* for S4 */ ldr r1, =\mgpcr str r1, [r0, #MAX_MGPCR0] /* for M0 */ str r1, [r0, #MAX_MGPCR1] /* for M1 */ str r1, [r0, #MAX_MGPCR2] /* for M2 */ str r1, [r0, #MAX_MGPCR3] /* for M3 */ str r1, [r0, #MAX_MGPCR4] /* for M4 */ str r1, [r0, #MAX_MGPCR5] /* for M5 */ .endm /* * M3IF setup * * Default argument values: * - CTL: * MRRP[0] = L2CC0 not on priority list (0 << 0) = 0x00000000 * MRRP[1] = L2CC1 not on priority list (0 << 1) = 0x00000000 * MRRP[2] = MBX not on priority list (0 << 2) = 0x00000000 * MRRP[3] = MAX1 not on priority list (0 << 3) = 0x00000000 * MRRP[4] = SDMA not on priority list (0 << 4) = 0x00000000 * MRRP[5] = MPEG4 not on priority list (0 << 5) = 0x00000000 * MRRP[6] = IPU1 on priority list (1 << 6) = 0x00000040 * MRRP[7] = IPU2 not on priority list (0 << 7) = 0x00000000 * ------------ * 0x00000040 */ .macro init_m3if ctl=0x00000040 /* M3IF Control Register (M3IFCTL) */ write32 M3IF_BASE_ADDR, \ctl .endm .macro core_init mrc p15, 0, r1, c1, c0, 0 /* Set branch prediction enable */ mrc p15, 0, r0, c1, c0, 1 orr r0, r0, #7 mcr p15, 0, r0, c1, c0, 1 orr r1, r1, #1 << 11 /* Set unaligned access enable */ orr r1, r1, #1 << 22 /* Set low int latency enable */ orr r1, r1, #1 << 21 mcr p15, 0, r1, c1, c0, 0 mov r0, #0 mcr p15, 0, r0, c15, c2, 4 mcr p15, 0, r0, c7, c7, 0 /* Invalidate I cache and D cache */ mcr p15, 0, r0, c8, c7, 0 /* Invalidate TLBs */ mcr p15, 0, r0, c7, c10, 4 /* Drain the write buffer */ /* Setup the Peripheral Port Memory Remap Register */ ldr r0, =0x40000015 /* Start from AIPS 2-GB region */ mcr p15, 0, r0, c15, c2, 4 .endm
4ms/stm32mp1-baremetal
2,770
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/include/asm/arch-mx7/mx7_plugin.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright (C) 2016 Freescale Semiconductor, Inc. */ #include <config.h> #define ROM_API_TABLE_BASE_ADDR_LEGACY 0x180 #define ROM_VERSION_OFFSET 0x80 #define ROM_API_HWCNFG_SETUP_OFFSET 0x08 plugin_start: push {r0-r4, lr} imx7_ddr_setting imx7_clock_gating imx7_qos_setting /* * Check if we are in USB serial download mode and immediately return to ROM * Need to check USB CTRL clock firstly, then check the USBx_nASYNCLISTADDR */ ldr r0, =0x30384680 ldr r1, [r0] cmp r1, #0 beq normal_boot ldr r0, =0x30B10158 ldr r1, [r0] cmp r1, #0 beq normal_boot pop {r0-r4, lr} bx lr normal_boot: /* * The following is to fill in those arguments for this ROM function * pu_irom_hwcnfg_setup(void **start, size_t *bytes, const void *boot_data) * This function is used to copy data from the storage media into DDR. * start - Initial (possibly partial) image load address on entry. * Final image load address on exit. * bytes - Initial (possibly partial) image size on entry. * Final image size on exit. * boot_data - Initial @ref ivt Boot Data load address. */ adr r0, boot_data2 adr r1, image_len2 adr r2, boot_data2 /* * check the _pu_irom_api_table for the address */ before_calling_rom___pu_irom_hwcnfg_setup: ldr r3, =ROM_VERSION_OFFSET ldr r4, [r3] ldr r3, =ROM_API_TABLE_BASE_ADDR_LEGACY ldr r4, [r3, #ROM_API_HWCNFG_SETUP_OFFSET] blx r4 after_calling_rom___pu_irom_hwcnfg_setup: /* To return to ROM from plugin, we need to fill in these argument. * Here is what need to do: * Need to construct the paramters for this function before return to ROM: * plugin_download(void **start, size_t *bytes, UINT32 *ivt_offset) */ pop {r0-r4, lr} push {r5} ldr r5, boot_data2 str r5, [r0] ldr r5, image_len2 str r5, [r1] ldr r5, second_ivt_offset str r5, [r2] mov r0, #1 pop {r5} /* return back to ROM code */ bx lr /* make the following data right in the end of the output*/ .ltorg #define FLASH_OFFSET 0x400 /* * second_ivt_offset is the offset from the "second_ivt_header" to * "image_copy_start", which involves FLASH_OFFSET, plus the first * ivt_header, the plugin code size itself recorded by "ivt2_header" */ second_ivt_offset: .long (ivt2_header + 0x2C + FLASH_OFFSET) /* * The following is the second IVT header plus the second boot data */ ivt2_header: .long 0x0 app2_code_jump_v: .long 0x0 reserv3: .long 0x0 dcd2_ptr: .long 0x0 boot_data2_ptr: .long 0x0 self_ptr2: .long 0x0 app_code_csf2: .long 0x0 reserv4: .long 0x0 boot_data2: .long 0x0 image_len2: .long 0x0 plugin2: .long 0x0
4ms/stm32mp1-baremetal
2,437
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/include/asm/arch-mx7ulp/mx7ulp_plugin.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright 2019 NXP */ #include <config.h> #define ROM_API_TABLE_BASE_ADDR_LEGACY 0x180 #define ROM_VERSION_OFFSET 0x80 #define ROM_API_HWCNFG_SETUP_OFFSET 0x08 plugin_start: push {r0-r4, lr} imx7ulp_ddr_setting imx7ulp_clock_gating imx7ulp_qos_setting normal_boot: /* * The following is to fill in those arguments for this ROM function * pu_irom_hwcnfg_setup(void **start, size_t *bytes, const void *boot_data) * This function is used to copy data from the storage media into DDR. * start - Initial (possibly partial) image load address on entry. * Final image load address on exit. * bytes - Initial (possibly partial) image size on entry. * Final image size on exit. * boot_data - Initial @ref ivt Boot Data load address. */ adr r0, boot_data2 adr r1, image_len2 adr r2, boot_data2 /* * check the _pu_irom_api_table for the address */ before_calling_rom___pu_irom_hwcnfg_setup: ldr r3, =ROM_VERSION_OFFSET ldr r4, [r3] ldr r3, =ROM_API_TABLE_BASE_ADDR_LEGACY ldr r4, [r3, #ROM_API_HWCNFG_SETUP_OFFSET] blx r4 after_calling_rom___pu_irom_hwcnfg_setup: /* * To return to ROM from plugin, we need to fill in these argument. * Here is what need to do: * Need to construct the parameters for this function before return to ROM: * plugin_download(void **start, size_t *bytes, UINT32 *ivt_offset) */ pop {r0-r4, lr} push {r5} ldr r5, boot_data2 str r5, [r0] ldr r5, image_len2 str r5, [r1] ldr r5, second_ivt_offset str r5, [r2] mov r0, #1 pop {r5} /* return back to ROM code */ bx lr /* make the following data right in the end of the output*/ .ltorg #define FLASH_OFFSET 0x400 /* * second_ivt_offset is the offset from the "second_ivt_header" to * "image_copy_start", which involves FLASH_OFFSET, plus the first * ivt_header, the plugin code size itself recorded by "ivt2_header" */ second_ivt_offset: .long (ivt2_header + 0x2C + FLASH_OFFSET) /* * The following is the second IVT header plus the second boot data */ ivt2_header: .long 0x0 app2_code_jump_v: .long 0x0 reserv3: .long 0x0 dcd2_ptr: .long 0x0 boot_data2_ptr: .long 0x0 self_ptr2: .long 0x0 app_code_csf2: .long 0x0 reserv4: .long 0x0 boot_data2: .long 0x0 image_len2: .long 0x0 plugin2: .long 0x0
530154436/cpp_learning
1,093
csapp/ch05.s
.text .cstring lC0: .ascii "%d\12\0" .text .globl _main _main: LFB1: pushq %rbp LCFI0: movq %rsp, %rbp LCFI1: subq $16, %rsp movl -4(%rbp), %eax leal 1(%rax), %edx movl %edx, -4(%rbp) movl %eax, %esi leaq lC0(%rip), %rdi movl $0, %eax call _printf movl $0, %eax leave LCFI2: ret LFE1: .section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support EH_frame1: .set L$set$0,LECIE1-LSCIE1 .long L$set$0 LSCIE1: .long 0 .byte 0x1 .ascii "zR\0" .byte 0x1 .byte 0x78 .byte 0x10 .byte 0x1 .byte 0x10 .byte 0xc .byte 0x7 .byte 0x8 .byte 0x90 .byte 0x1 .align 3 LECIE1: LSFDE1: .set L$set$1,LEFDE1-LASFDE1 .long L$set$1 LASFDE1: .long LASFDE1-EH_frame1 .quad LFB1-. .set L$set$2,LFE1-LFB1 .quad L$set$2 .byte 0 .byte 0x4 .set L$set$3,LCFI0-LFB1 .long L$set$3 .byte 0xe .byte 0x10 .byte 0x86 .byte 0x2 .byte 0x4 .set L$set$4,LCFI1-LCFI0 .long L$set$4 .byte 0xd .byte 0x6 .byte 0x4 .set L$set$5,LCFI2-LCFI1 .long L$set$5 .byte 0xc .byte 0x7 .byte 0x8 .align 3 LEFDE1: .constructor .destructor .align 1 .subsections_via_symbols
530154436/cpp_learning
3,074
csapp/ch03/exer.s
.text .globl _vec_length _vec_length: LFB0: pushq %rbp LCFI0: movq %rsp, %rbp LCFI1: movq %rdi, -8(%rbp) movq -8(%rbp), %rax movl (%rax), %eax popq %rbp LCFI2: ret LFE0: .globl _get_vec_element _get_vec_element: LFB1: pushq %rbp LCFI3: movq %rsp, %rbp LCFI4: movq %rdi, -24(%rbp) movl %esi, -28(%rbp) movq -24(%rbp), %rax movl (%rax), %eax cmpl %eax, -28(%rbp) jl L4 pxor %xmm0, %xmm0 jmp L5 L4: movq -24(%rbp), %rax movq 8(%rax), %rdx movl -28(%rbp), %eax cltq salq $2, %rax addq %rdx, %rax movss (%rax), %xmm0 movq -8(%rbp), %rax movss %xmm0, (%rax) movq -8(%rbp), %rax movss (%rax), %xmm0 L5: popq %rbp LCFI5: ret LFE1: .globl _inner0 _inner0: LFB2: pushq %rbp LCFI6: movq %rsp, %rbp LCFI7: subq $48, %rsp movq %rdi, -24(%rbp) movq %rsi, -32(%rbp) movq %rdx, -40(%rbp) movq -40(%rbp), %rax pxor %xmm0, %xmm0 movss %xmm0, (%rax) movq $0, -8(%rbp) jmp L7 L8: movq -40(%rbp), %rax movss (%rax), %xmm1 movss %xmm1, -44(%rbp) movq -8(%rbp), %rax movl %eax, %edx movq -24(%rbp), %rax movl %edx, %esi movq %rax, %rdi call _get_vec_element movss %xmm0, -48(%rbp) movq -8(%rbp), %rax movl %eax, %edx movq -32(%rbp), %rax movl %edx, %esi movq %rax, %rdi call _get_vec_element mulss -48(%rbp), %xmm0 addss -44(%rbp), %xmm0 movq -40(%rbp), %rax movss %xmm0, (%rax) addq $1, -8(%rbp) L7: movq -24(%rbp), %rax movq %rax, %rdi call _vec_length cltq cmpq %rax, -8(%rbp) jl L8 nop leave LCFI8: ret LFE2: .section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support EH_frame1: .set L$set$0,LECIE1-LSCIE1 .long L$set$0 LSCIE1: .long 0 .byte 0x1 .ascii "zR\0" .byte 0x1 .byte 0x78 .byte 0x10 .byte 0x1 .byte 0x10 .byte 0xc .byte 0x7 .byte 0x8 .byte 0x90 .byte 0x1 .align 3 LECIE1: LSFDE1: .set L$set$1,LEFDE1-LASFDE1 .long L$set$1 LASFDE1: .long LASFDE1-EH_frame1 .quad LFB0-. .set L$set$2,LFE0-LFB0 .quad L$set$2 .byte 0 .byte 0x4 .set L$set$3,LCFI0-LFB0 .long L$set$3 .byte 0xe .byte 0x10 .byte 0x86 .byte 0x2 .byte 0x4 .set L$set$4,LCFI1-LCFI0 .long L$set$4 .byte 0xd .byte 0x6 .byte 0x4 .set L$set$5,LCFI2-LCFI1 .long L$set$5 .byte 0xc .byte 0x7 .byte 0x8 .align 3 LEFDE1: LSFDE3: .set L$set$6,LEFDE3-LASFDE3 .long L$set$6 LASFDE3: .long LASFDE3-EH_frame1 .quad LFB1-. .set L$set$7,LFE1-LFB1 .quad L$set$7 .byte 0 .byte 0x4 .set L$set$8,LCFI3-LFB1 .long L$set$8 .byte 0xe .byte 0x10 .byte 0x86 .byte 0x2 .byte 0x4 .set L$set$9,LCFI4-LCFI3 .long L$set$9 .byte 0xd .byte 0x6 .byte 0x4 .set L$set$10,LCFI5-LCFI4 .long L$set$10 .byte 0xc .byte 0x7 .byte 0x8 .align 3 LEFDE3: LSFDE5: .set L$set$11,LEFDE5-LASFDE5 .long L$set$11 LASFDE5: .long LASFDE5-EH_frame1 .quad LFB2-. .set L$set$12,LFE2-LFB2 .quad L$set$12 .byte 0 .byte 0x4 .set L$set$13,LCFI6-LFB2 .long L$set$13 .byte 0xe .byte 0x10 .byte 0x86 .byte 0x2 .byte 0x4 .set L$set$14,LCFI7-LCFI6 .long L$set$14 .byte 0xd .byte 0x6 .byte 0x4 .set L$set$15,LCFI8-LCFI7 .long L$set$15 .byte 0xc .byte 0x7 .byte 0x8 .align 3 LEFDE5: .subsections_via_symbols
530154436/cpp_learning
8,466
csapp/ch05/p347_程序优化方法.s
.text .globl _new_vec _new_vec: LFB3: pushq %rbp LCFI0: movq %rsp, %rbp LCFI1: subq $32, %rsp movq %rdi, -24(%rbp) movl $16, %edi call _malloc movq %rax, -8(%rbp) cmpq $0, -8(%rbp) jne L2 movl $0, %eax jmp L3 L2: movq -8(%rbp), %rax movq -24(%rbp), %rdx movq %rdx, (%rax) cmpq $0, -24(%rbp) jle L4 movq -24(%rbp), %rax movl $4, %esi movq %rax, %rdi call _calloc movq %rax, -16(%rbp) cmpq $0, -16(%rbp) jne L5 movq -8(%rbp), %rax movq %rax, %rdi call _free movl $0, %eax jmp L3 L5: movq -8(%rbp), %rax movq -16(%rbp), %rdx movq %rdx, 8(%rax) jmp L6 L4: movq -8(%rbp), %rax movq $0, 8(%rax) L6: movq -8(%rbp), %rax L3: leave LCFI2: ret LFE3: .globl _get_vec_element _get_vec_element: LFB4: pushq %rbp LCFI3: movq %rsp, %rbp LCFI4: movq %rdi, -8(%rbp) movq %rsi, -16(%rbp) movq %rdx, -24(%rbp) cmpq $0, -16(%rbp) js L8 movq -8(%rbp), %rax movq (%rax), %rax cmpq %rax, -16(%rbp) jl L9 L8: movl $0, %eax jmp L10 L9: movq -8(%rbp), %rax movq 8(%rax), %rdx movq -16(%rbp), %rax salq $2, %rax addq %rdx, %rax movl (%rax), %edx movq -24(%rbp), %rax movl %edx, (%rax) movl $1, %eax L10: popq %rbp LCFI5: ret LFE4: .globl _vec_length _vec_length: LFB5: pushq %rbp LCFI6: movq %rsp, %rbp LCFI7: movq %rdi, -8(%rbp) movq -8(%rbp), %rax movq (%rax), %rax popq %rbp LCFI8: ret LFE5: .globl _combine1 _combine1: LFB6: pushq %rbp LCFI9: movq %rsp, %rbp LCFI10: subq $32, %rsp movq %rdi, -24(%rbp) movq %rsi, -32(%rbp) movq -32(%rbp), %rax movl $0, (%rax) movq $0, -8(%rbp) jmp L14 L15: leaq -12(%rbp), %rax movq -8(%rbp), %rsi movq -24(%rbp), %rcx movq %rax, %rdx movq %rcx, %rdi call _get_vec_element movq -32(%rbp), %rax movl (%rax), %edx movl -12(%rbp), %eax addl %eax, %edx movq -32(%rbp), %rax movl %edx, (%rax) addq $1, -8(%rbp) L14: movq -24(%rbp), %rax movq %rax, %rdi call _vec_length cmpq %rax, -8(%rbp) jl L15 nop leave LCFI11: ret LFE6: .globl _combine2 _combine2: LFB7: pushq %rbp LCFI12: movq %rsp, %rbp LCFI13: subq $48, %rsp movq %rdi, -40(%rbp) movq %rsi, -48(%rbp) movq -40(%rbp), %rax movq %rax, %rdi call _vec_length movq %rax, -16(%rbp) movq -48(%rbp), %rax movl $0, (%rax) movq $0, -8(%rbp) jmp L17 L18: leaq -20(%rbp), %rax movq -8(%rbp), %rsi movq -40(%rbp), %rcx movq %rax, %rdx movq %rcx, %rdi call _get_vec_element movq -48(%rbp), %rax movl (%rax), %edx movl -20(%rbp), %eax addl %eax, %edx movq -48(%rbp), %rax movl %edx, (%rax) addq $1, -8(%rbp) L17: movq -8(%rbp), %rax cmpq -16(%rbp), %rax jl L18 nop leave LCFI14: ret LFE7: .globl _get_vec_start _get_vec_start: LFB8: pushq %rbp LCFI15: movq %rsp, %rbp LCFI16: movq %rdi, -8(%rbp) movq -8(%rbp), %rax movq 8(%rax), %rax popq %rbp LCFI17: ret LFE8: .globl _combine3 _combine3: LFB9: pushq %rbp LCFI18: movq %rsp, %rbp LCFI19: subq $48, %rsp movq %rdi, -40(%rbp) movq %rsi, -48(%rbp) movq -40(%rbp), %rax movq %rax, %rdi call _vec_length movq %rax, -16(%rbp) movq -40(%rbp), %rax movq %rax, %rdi call _get_vec_start movq %rax, -24(%rbp) movq -48(%rbp), %rax movl $0, (%rax) movq $0, -8(%rbp) jmp L22 L23: movq -48(%rbp), %rax movl (%rax), %edx movq -8(%rbp), %rax leaq 0(,%rax,4), %rcx movq -24(%rbp), %rax addq %rcx, %rax movl (%rax), %eax addl %eax, %edx movq -48(%rbp), %rax movl %edx, (%rax) addq $1, -8(%rbp) L22: movq -8(%rbp), %rax cmpq -16(%rbp), %rax jl L23 nop leave LCFI20: ret LFE9: .globl _combine4 _combine4: LFB10: pushq %rbp LCFI21: movq %rsp, %rbp LCFI22: subq $48, %rsp movq %rdi, -40(%rbp) movq %rsi, -48(%rbp) movq -40(%rbp), %rax movq %rax, %rdi call _vec_length movq %rax, -24(%rbp) movq -40(%rbp), %rax movq %rax, %rdi call _get_vec_start movq %rax, -32(%rbp) movl $0, -12(%rbp) movq $0, -8(%rbp) jmp L25 L26: movq -8(%rbp), %rax leaq 0(,%rax,4), %rdx movq -32(%rbp), %rax addq %rdx, %rax movl (%rax), %eax addl %eax, -12(%rbp) addq $1, -8(%rbp) L25: movq -8(%rbp), %rax cmpq -24(%rbp), %rax jl L26 movq -48(%rbp), %rax movl -12(%rbp), %edx movl %edx, (%rax) nop leave LCFI23: ret LFE10: .globl _main _main: LFB11: pushq %rbp LCFI24: movq %rsp, %rbp LCFI25: movl $0, %eax popq %rbp LCFI26: ret LFE11: .section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support EH_frame1: .set L$set$0,LECIE1-LSCIE1 .long L$set$0 LSCIE1: .long 0 .byte 0x1 .ascii "zR\0" .byte 0x1 .byte 0x78 .byte 0x10 .byte 0x1 .byte 0x10 .byte 0xc .byte 0x7 .byte 0x8 .byte 0x90 .byte 0x1 .align 3 LECIE1: LSFDE1: .set L$set$1,LEFDE1-LASFDE1 .long L$set$1 LASFDE1: .long LASFDE1-EH_frame1 .quad LFB3-. .set L$set$2,LFE3-LFB3 .quad L$set$2 .byte 0 .byte 0x4 .set L$set$3,LCFI0-LFB3 .long L$set$3 .byte 0xe .byte 0x10 .byte 0x86 .byte 0x2 .byte 0x4 .set L$set$4,LCFI1-LCFI0 .long L$set$4 .byte 0xd .byte 0x6 .byte 0x4 .set L$set$5,LCFI2-LCFI1 .long L$set$5 .byte 0xc .byte 0x7 .byte 0x8 .align 3 LEFDE1: LSFDE3: .set L$set$6,LEFDE3-LASFDE3 .long L$set$6 LASFDE3: .long LASFDE3-EH_frame1 .quad LFB4-. .set L$set$7,LFE4-LFB4 .quad L$set$7 .byte 0 .byte 0x4 .set L$set$8,LCFI3-LFB4 .long L$set$8 .byte 0xe .byte 0x10 .byte 0x86 .byte 0x2 .byte 0x4 .set L$set$9,LCFI4-LCFI3 .long L$set$9 .byte 0xd .byte 0x6 .byte 0x4 .set L$set$10,LCFI5-LCFI4 .long L$set$10 .byte 0xc .byte 0x7 .byte 0x8 .align 3 LEFDE3: LSFDE5: .set L$set$11,LEFDE5-LASFDE5 .long L$set$11 LASFDE5: .long LASFDE5-EH_frame1 .quad LFB5-. .set L$set$12,LFE5-LFB5 .quad L$set$12 .byte 0 .byte 0x4 .set L$set$13,LCFI6-LFB5 .long L$set$13 .byte 0xe .byte 0x10 .byte 0x86 .byte 0x2 .byte 0x4 .set L$set$14,LCFI7-LCFI6 .long L$set$14 .byte 0xd .byte 0x6 .byte 0x4 .set L$set$15,LCFI8-LCFI7 .long L$set$15 .byte 0xc .byte 0x7 .byte 0x8 .align 3 LEFDE5: LSFDE7: .set L$set$16,LEFDE7-LASFDE7 .long L$set$16 LASFDE7: .long LASFDE7-EH_frame1 .quad LFB6-. .set L$set$17,LFE6-LFB6 .quad L$set$17 .byte 0 .byte 0x4 .set L$set$18,LCFI9-LFB6 .long L$set$18 .byte 0xe .byte 0x10 .byte 0x86 .byte 0x2 .byte 0x4 .set L$set$19,LCFI10-LCFI9 .long L$set$19 .byte 0xd .byte 0x6 .byte 0x4 .set L$set$20,LCFI11-LCFI10 .long L$set$20 .byte 0xc .byte 0x7 .byte 0x8 .align 3 LEFDE7: LSFDE9: .set L$set$21,LEFDE9-LASFDE9 .long L$set$21 LASFDE9: .long LASFDE9-EH_frame1 .quad LFB7-. .set L$set$22,LFE7-LFB7 .quad L$set$22 .byte 0 .byte 0x4 .set L$set$23,LCFI12-LFB7 .long L$set$23 .byte 0xe .byte 0x10 .byte 0x86 .byte 0x2 .byte 0x4 .set L$set$24,LCFI13-LCFI12 .long L$set$24 .byte 0xd .byte 0x6 .byte 0x4 .set L$set$25,LCFI14-LCFI13 .long L$set$25 .byte 0xc .byte 0x7 .byte 0x8 .align 3 LEFDE9: LSFDE11: .set L$set$26,LEFDE11-LASFDE11 .long L$set$26 LASFDE11: .long LASFDE11-EH_frame1 .quad LFB8-. .set L$set$27,LFE8-LFB8 .quad L$set$27 .byte 0 .byte 0x4 .set L$set$28,LCFI15-LFB8 .long L$set$28 .byte 0xe .byte 0x10 .byte 0x86 .byte 0x2 .byte 0x4 .set L$set$29,LCFI16-LCFI15 .long L$set$29 .byte 0xd .byte 0x6 .byte 0x4 .set L$set$30,LCFI17-LCFI16 .long L$set$30 .byte 0xc .byte 0x7 .byte 0x8 .align 3 LEFDE11: LSFDE13: .set L$set$31,LEFDE13-LASFDE13 .long L$set$31 LASFDE13: .long LASFDE13-EH_frame1 .quad LFB9-. .set L$set$32,LFE9-LFB9 .quad L$set$32 .byte 0 .byte 0x4 .set L$set$33,LCFI18-LFB9 .long L$set$33 .byte 0xe .byte 0x10 .byte 0x86 .byte 0x2 .byte 0x4 .set L$set$34,LCFI19-LCFI18 .long L$set$34 .byte 0xd .byte 0x6 .byte 0x4 .set L$set$35,LCFI20-LCFI19 .long L$set$35 .byte 0xc .byte 0x7 .byte 0x8 .align 3 LEFDE13: LSFDE15: .set L$set$36,LEFDE15-LASFDE15 .long L$set$36 LASFDE15: .long LASFDE15-EH_frame1 .quad LFB10-. .set L$set$37,LFE10-LFB10 .quad L$set$37 .byte 0 .byte 0x4 .set L$set$38,LCFI21-LFB10 .long L$set$38 .byte 0xe .byte 0x10 .byte 0x86 .byte 0x2 .byte 0x4 .set L$set$39,LCFI22-LCFI21 .long L$set$39 .byte 0xd .byte 0x6 .byte 0x4 .set L$set$40,LCFI23-LCFI22 .long L$set$40 .byte 0xc .byte 0x7 .byte 0x8 .align 3 LEFDE15: LSFDE17: .set L$set$41,LEFDE17-LASFDE17 .long L$set$41 LASFDE17: .long LASFDE17-EH_frame1 .quad LFB11-. .set L$set$42,LFE11-LFB11 .quad L$set$42 .byte 0 .byte 0x4 .set L$set$43,LCFI24-LFB11 .long L$set$43 .byte 0xe .byte 0x10 .byte 0x86 .byte 0x2 .byte 0x4 .set L$set$44,LCFI25-LCFI24 .long L$set$44 .byte 0xd .byte 0x6 .byte 0x4 .set L$set$45,LCFI26-LCFI25 .long L$set$45 .byte 0xc .byte 0x7 .byte 0x8 .align 3 LEFDE17: .subsections_via_symbols
530154436/cpp_learning
1,301
csapp/ch03/p142/control.s
.text .globl _absdiff _absdiff: LFB0: cmpq %rsi, %rdi jge L2 movq %rsi, %rax subq %rdi, %rax ret L2: movq %rdi, %rax subq %rsi, %rax ret LFE0: .globl _goto_absdiff _goto_absdiff: LFB1: cmpq %rsi, %rdi jl L5 movq %rdi, %rax subq %rsi, %rax ret L5: movq %rsi, %rax subq %rdi, %rax L6: ret LFE1: .globl _cmovdiff _cmovdiff: LFB2: movq %rsi, %rdx subq %rdi, %rdx movq %rdi, %rax subq %rsi, %rax cmpq %rdi, %rsi jle L7 movq %rdx, %rax L7: ret LFE2: .section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support EH_frame1: .set L$set$0,LECIE1-LSCIE1 .long L$set$0 LSCIE1: .long 0 .byte 0x1 .ascii "zR\0" .byte 0x1 .byte 0x78 .byte 0x10 .byte 0x1 .byte 0x10 .byte 0xc .byte 0x7 .byte 0x8 .byte 0x90 .byte 0x1 .align 3 LECIE1: LSFDE1: .set L$set$1,LEFDE1-LASFDE1 .long L$set$1 LASFDE1: .long LASFDE1-EH_frame1 .quad LFB0-. .set L$set$2,LFE0-LFB0 .quad L$set$2 .byte 0 .align 3 LEFDE1: LSFDE3: .set L$set$3,LEFDE3-LASFDE3 .long L$set$3 LASFDE3: .long LASFDE3-EH_frame1 .quad LFB1-. .set L$set$4,LFE1-LFB1 .quad L$set$4 .byte 0 .align 3 LEFDE3: LSFDE5: .set L$set$5,LEFDE5-LASFDE5 .long L$set$5 LASFDE5: .long LASFDE5-EH_frame1 .quad LFB2-. .set L$set$6,LFE2-LFB2 .quad L$set$6 .byte 0 .align 3 LEFDE5: .subsections_via_symbols
530154436/cpp_learning
2,577
csapp/ch03/3_10/buffer_overflow.s
.text .globl _gets _gets: LFB4: pushq %rbp LCFI0: movq %rsp, %rbp LCFI1: subq $32, %rsp movq %rdi, -24(%rbp) movq -24(%rbp), %rax movq %rax, -8(%rbp) jmp L2 L4: movq -8(%rbp), %rax leaq 1(%rax), %rdx movq %rdx, -8(%rbp) movl -12(%rbp), %edx movb %dl, (%rax) L2: call _getchar movl %eax, -12(%rbp) cmpl $10, -12(%rbp) je L3 cmpl $-1, -12(%rbp) jne L4 L3: cmpl $-1, -12(%rbp) jne L5 movq -8(%rbp), %rax cmpq -24(%rbp), %rax jne L5 movl $0, %eax jmp L6 L5: movq -8(%rbp), %rax leaq 1(%rax), %rdx movq %rdx, -8(%rbp) movb $0, (%rax) movq -24(%rbp), %rax L6: leave LCFI2: ret LFE4: .globl _echo _echo: LFB5: pushq %rbp LCFI3: movq %rsp, %rbp LCFI4: subq $16, %rsp leaq -4(%rbp), %rax movq %rax, %rdi call _gets leaq -4(%rbp), %rax movq %rax, %rdi call _puts nop leave LCFI5: ret LFE5: .globl _main _main: LFB6: pushq %rbp LCFI6: movq %rsp, %rbp LCFI7: movl $0, %eax call _echo movl $0, %eax popq %rbp LCFI8: ret LFE6: .section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support EH_frame1: .set L$set$0,LECIE1-LSCIE1 .long L$set$0 LSCIE1: .long 0 .byte 0x1 .ascii "zR\0" .byte 0x1 .byte 0x78 .byte 0x10 .byte 0x1 .byte 0x10 .byte 0xc .byte 0x7 .byte 0x8 .byte 0x90 .byte 0x1 .align 3 LECIE1: LSFDE1: .set L$set$1,LEFDE1-LASFDE1 .long L$set$1 LASFDE1: .long LASFDE1-EH_frame1 .quad LFB4-. .set L$set$2,LFE4-LFB4 .quad L$set$2 .byte 0 .byte 0x4 .set L$set$3,LCFI0-LFB4 .long L$set$3 .byte 0xe .byte 0x10 .byte 0x86 .byte 0x2 .byte 0x4 .set L$set$4,LCFI1-LCFI0 .long L$set$4 .byte 0xd .byte 0x6 .byte 0x4 .set L$set$5,LCFI2-LCFI1 .long L$set$5 .byte 0xc .byte 0x7 .byte 0x8 .align 3 LEFDE1: LSFDE3: .set L$set$6,LEFDE3-LASFDE3 .long L$set$6 LASFDE3: .long LASFDE3-EH_frame1 .quad LFB5-. .set L$set$7,LFE5-LFB5 .quad L$set$7 .byte 0 .byte 0x4 .set L$set$8,LCFI3-LFB5 .long L$set$8 .byte 0xe .byte 0x10 .byte 0x86 .byte 0x2 .byte 0x4 .set L$set$9,LCFI4-LCFI3 .long L$set$9 .byte 0xd .byte 0x6 .byte 0x4 .set L$set$10,LCFI5-LCFI4 .long L$set$10 .byte 0xc .byte 0x7 .byte 0x8 .align 3 LEFDE3: LSFDE5: .set L$set$11,LEFDE5-LASFDE5 .long L$set$11 LASFDE5: .long LASFDE5-EH_frame1 .quad LFB6-. .set L$set$12,LFE6-LFB6 .quad L$set$12 .byte 0 .byte 0x4 .set L$set$13,LCFI6-LFB6 .long L$set$13 .byte 0xe .byte 0x10 .byte 0x86 .byte 0x2 .byte 0x4 .set L$set$14,LCFI7-LCFI6 .long L$set$14 .byte 0xd .byte 0x6 .byte 0x4 .set L$set$15,LCFI8-LCFI7 .long L$set$15 .byte 0xc .byte 0x7 .byte 0x8 .align 3 LEFDE5: .subsections_via_symbols
530154436/cpp_learning
5,940
csapp/code/perf/clock.s
.file "clock.c" .version "01.01" gcc2_compiled.: .data .align 4 .type cyc_hi,@object .size cyc_hi,4 cyc_hi: .long 0 .align 4 .type cyc_lo,@object .size cyc_lo,4 cyc_lo: .long 0 .text .align 4 .globl access_counter .type access_counter,@function access_counter: pushl %ebp movl %esp,%ebp pushl %edi pushl %esi pushl %ebx movl 8(%ebp),%esi movl 12(%ebp),%edi #APP rdtsc; movl %edx,%ecx; movl %eax,%ebx #NO_APP movl %ecx,(%esi) movl %ebx,(%edi) popl %ebx popl %esi popl %edi movl %ebp,%esp popl %ebp ret .Lfe1: .size access_counter,.Lfe1-access_counter .align 4 .globl start_counter .type start_counter,@function start_counter: pushl %ebp movl %esp,%ebp subl $8,%esp addl $-8,%esp pushl $cyc_lo pushl $cyc_hi call access_counter movl %ebp,%esp popl %ebp ret .Lfe2: .size start_counter,.Lfe2-start_counter .section .rodata .align 32 .LC2: .string "Error: Cycle counter returning negative value: %.0f\n" .align 8 .LC0: .long 0x0,0x41d00000 .align 8 .LC1: .long 0x0,0x40100000 .text .align 4 .globl get_counter .type get_counter,@function get_counter: pushl %ebp movl %esp,%ebp subl $52,%esp pushl %ebx addl $-8,%esp leal -12(%ebp),%eax pushl %eax leal -16(%ebp),%eax pushl %eax call access_counter movl -12(%ebp),%eax movl %eax,%ecx subl cyc_lo,%ecx cmpl %eax,%ecx seta %al movzbl %al,%edx movl cyc_hi,%eax movl -16(%ebp),%ebx subl %eax,%ebx movl %ebx,%eax subl %edx,%eax xorl %edx,%edx movl %eax,-8(%ebp) movl %edx,-4(%ebp) fildll -8(%ebp) fmull .LC0 movl %ecx,%eax fmull .LC1 movl %eax,-8(%ebp) movl %edx,-4(%ebp) fildll -8(%ebp) faddp %st,%st(1) fldz addl $16,%esp fcomp %st(1) fnstsw %ax andb $69,%ah jne .L35 subl $8,%esp fstl (%esp) pushl $.LC2 movl stderr,%eax pushl %eax fstpt -32(%ebp) call fprintf fldt -32(%ebp) .L35: movl -56(%ebp),%ebx movl %ebp,%esp popl %ebp ret .Lfe3: .size get_counter,.Lfe3-get_counter .align 4 .globl ovhd .type ovhd,@function ovhd: pushl %ebp movl %esp,%ebp subl $20,%esp pushl %ebx movl $1,%ebx jmp .L40 .L42: fstp %st(0) .p2align 4,,7 .L40: call start_counter call get_counter decl %ebx jns .L42 popl %ebx movl %ebp,%esp popl %ebp ret .Lfe4: .size ovhd,.Lfe4-ovhd .section .rodata .align 32 .LC4: .string "Processor Clock Rate ~= %.1f MHz\n" .align 8 .LC3: .long 0x0,0x412e8480 .text .align 4 .globl mhz_full .type mhz_full,@function mhz_full: pushl %ebp movl %esp,%ebp subl $32,%esp pushl %esi pushl %ebx movl 8(%ebp),%esi movl 12(%ebp),%ebx call start_counter addl $-12,%esp pushl %ebx call sleep call get_counter movl %ebx,-4(%ebp) fildl -4(%ebp) fmull .LC3 addl $16,%esp fdivrp %st,%st(1) testl %esi,%esi je .L44 addl $-4,%esp subl $8,%esp fstl (%esp) pushl $.LC4 fstpt -16(%ebp) call printf fldt -16(%ebp) .L44: leal -40(%ebp),%esp popl %ebx popl %esi movl %ebp,%esp popl %ebp ret .Lfe5: .size mhz_full,.Lfe5-mhz_full .align 4 .globl mhz .type mhz,@function mhz: pushl %ebp movl %esp,%ebp subl $8,%esp movl 8(%ebp),%eax addl $-8,%esp pushl $2 pushl %eax call mhz_full movl %ebp,%esp popl %ebp ret .Lfe6: .size mhz,.Lfe6-mhz .data .align 8 .type cyc_per_tick,@object .size cyc_per_tick,8 cyc_per_tick: .long 0x0,0x0 .section .rodata .LC7: .string "Setting cyc_per_tick to %f\n" .align 8 .LC5: .long 0x0,0x408f4000 .align 8 .LC6: .long 0x0,0x40a77000 .text .align 4 .type callibrate,@function callibrate: pushl %ebp movl %esp,%ebp subl $76,%esp pushl %edi pushl %esi pushl %ebx addl $-12,%esp leal -16(%ebp),%ebx pushl %ebx call times movl -16(%ebp),%esi call start_counter call get_counter addl $16,%esp movl %ebx,%edi movl $99,%ebx .p2align 4,,7 .L49: fstpt -48(%ebp) call get_counter fld %st(0) fldt -48(%ebp) fsubr %st,%st(1) fxch %st(1) fcoml .LC5 fnstsw %ax andb $5,%ah jne .L56 fstp %st(1) addl $-12,%esp pushl %edi fstpt -32(%ebp) fstpt -64(%ebp) call times movl -16(%ebp),%edx addl $16,%esp fldt -32(%ebp) fldt -64(%ebp) cmpl %esi,%edx jle .L61 movl %edx,%eax subl %esi,%eax movl %eax,-20(%ebp) fildl -20(%ebp) fdivrp %st,%st(2) fldz fldl cyc_per_tick fucom %st(1) fnstsw %ax andb $69,%ah cmpb $64,%ah fstp %st(1) je .L58 fcomp %st(2) fnstsw %ax andb $69,%ah jne .L59 jmp .L53 .L58: fstp %st(0) .L53: fxch %st(1) fcoml .LC6 fnstsw %ax andb $69,%ah jne .L60 fstpl cyc_per_tick jmp .L52 .L59: fstp %st(1) jmp .L52 .L60: fstp %st(0) .L52: decl %ebx movl %edx,%esi jmp .L47 .L56: fstp %st(0) .L61: fstp %st(1) .L47: testl %ebx,%ebx jge .L49 fstp %st(0) cmpl $0,8(%ebp) je .L55 fldl cyc_per_tick addl $-4,%esp subl $8,%esp fstpl (%esp) pushl $.LC7 call printf .L55: leal -88(%ebp),%esp popl %ebx popl %esi popl %edi movl %ebp,%esp popl %ebp ret .Lfe7: .size callibrate,.Lfe7-callibrate .data .align 4 .type start_tick,@object .size start_tick,4 start_tick: .long 0 .text .align 4 .globl start_comp_counter .type start_comp_counter,@function start_comp_counter: pushl %ebp fldl cyc_per_tick fldz movl %esp,%ebp subl $24,%esp fucompp fnstsw %ax andb $68,%ah xorb $64,%ah jne .L63 addl $-12,%esp pushl $1 call callibrate addl $16,%esp .L63: addl $-12,%esp leal -16(%ebp),%eax pushl %eax call times movl -16(%ebp),%eax movl %eax,start_tick call start_counter movl %ebp,%esp popl %ebp ret .Lfe8: .size start_comp_counter,.Lfe8-start_comp_counter .align 4 .globl get_comp_counter .type get_comp_counter,@function get_comp_counter: pushl %ebp movl %esp,%ebp subl $40,%esp call get_counter fstpl -32(%ebp) addl $-12,%esp leal -16(%ebp),%eax pushl %eax call times movl start_tick,%eax movl -16(%ebp),%edx subl %eax,%edx movl %edx,%eax movl %eax,-20(%ebp) fildl -20(%ebp) fmull cyc_per_tick fsubrl -32(%ebp) fstl -32(%ebp) movl %ebp,%esp popl %ebp ret .Lfe9: .size get_comp_counter,.Lfe9-get_comp_counter .ident "GCC: (GNU) 2.95.3 20010315 (release)"
530154436/cpp_learning
1,640
csapp/code/conc/badcnt.s
.file "badcnt.c" .text .globl thread .type thread, @function thread: .LFB92: .cfi_startproc movq (%rdi), %rcx testq %rcx, %rcx jle .L2 movl $0, %eax .L3: movq cnt(%rip), %rdx addq $1, %rdx movq %rdx, cnt(%rip) addq $1, %rax cmpq %rcx, %rax jne .L3 .L2: movl $0, %eax ret .cfi_endproc .LFE92: .size thread, .-thread .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "usage: %s <niters>\n" .LC1: .string "BOOM! cnt=%ld\n" .LC2: .string "OK cnt=%ld\n" .text .globl main .type main, @function main: .LFB91: .cfi_startproc subq $40, %rsp .cfi_def_cfa_offset 48 cmpl $2, %edi je .L5 movq (%rsi), %rdx movl $.LC0, %esi movl $1, %edi movl $0, %eax call __printf_chk movl $0, %edi call exit .L5: movq 8(%rsi), %rdi movl $10, %edx movl $0, %esi call strtol cltq movq %rax, 8(%rsp) leaq 8(%rsp), %rcx movl $thread, %edx movl $0, %esi leaq 16(%rsp), %rdi call Pthread_create leaq 8(%rsp), %rcx movl $thread, %edx movl $0, %esi leaq 24(%rsp), %rdi call Pthread_create movl $0, %esi movq 16(%rsp), %rdi call Pthread_join movl $0, %esi movq 24(%rsp), %rdi call Pthread_join movq cnt(%rip), %rdx movq 8(%rsp), %rax addq %rax, %rax cmpq %rdx, %rax je .L6 movq cnt(%rip), %rdx movl $.LC1, %esi movl $1, %edi movl $0, %eax call __printf_chk jmp .L7 .L6: movq cnt(%rip), %rdx movl $.LC2, %esi movl $1, %edi movl $0, %eax call __printf_chk .L7: movl $0, %edi call exit .cfi_endproc .LFE91: .size main, .-main .globl cnt .bss .align 8 .type cnt, @object .size cnt, 8 cnt: .zero 8 .ident "GCC: (Ubuntu 4.8.1-2ubuntu1~12.04) 4.8.1" .section .note.GNU-stack,"",@progbits
530154436/cpp_learning
2,061
csapp/code/conc/goodcnt.s
.file "goodcnt.c" .text .globl thread .type thread, @function thread: .LFB92: .cfi_startproc pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 subq $8, %rsp .cfi_def_cfa_offset 32 movl (%rdi), %ebp testl %ebp, %ebp jle .L2 movl $0, %ebx .L3: movl $mutex, %edi call P movq cnt(%rip), %rax addq $1, %rax movq %rax, cnt(%rip) movl $mutex, %edi call V addl $1, %ebx cmpl %ebp, %ebx jne .L3 .L2: movl $0, %eax addq $8, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE92: .size thread, .-thread .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "usage: %s <niters>\n" .LC1: .string "BOOM! cnt=%ld\n" .LC2: .string "OK cnt=%ld\n" .text .globl main .type main, @function main: .LFB91: .cfi_startproc subq $40, %rsp .cfi_def_cfa_offset 48 cmpl $2, %edi je .L6 movq (%rsi), %rdx movl $.LC0, %esi movl $1, %edi movl $0, %eax call __printf_chk movl $0, %edi call exit .L6: movq 8(%rsi), %rdi movl $10, %edx movl $0, %esi call strtol movl %eax, 12(%rsp) movl $1, %edx movl $0, %esi movl $mutex, %edi call Sem_init leaq 12(%rsp), %rcx movl $thread, %edx movl $0, %esi leaq 16(%rsp), %rdi call Pthread_create leaq 12(%rsp), %rcx movl $thread, %edx movl $0, %esi leaq 24(%rsp), %rdi call Pthread_create movl $0, %esi movq 16(%rsp), %rdi call Pthread_join movl $0, %esi movq 24(%rsp), %rdi call Pthread_join movq cnt(%rip), %rax movl 12(%rsp), %ecx leal (%rcx,%rcx), %edx movslq %edx, %rdx cmpq %rax, %rdx je .L7 movq cnt(%rip), %rdx movl $.LC1, %esi movl $1, %edi movl $0, %eax call __printf_chk jmp .L8 .L7: movq cnt(%rip), %rdx movl $.LC2, %esi movl $1, %edi movl $0, %eax call __printf_chk .L8: movl $0, %edi call exit .cfi_endproc .LFE91: .size main, .-main .comm mutex,32,32 .globl cnt .bss .align 8 .type cnt, @object .size cnt, 8 cnt: .zero 8 .ident "GCC: (Ubuntu 4.8.1-2ubuntu1~12.04) 4.8.1" .section .note.GNU-stack,"",@progbits
4ms/stm32mp1-baremetal
1,114
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/arm/mach-tegra/tegra186/cache.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2016, NVIDIA CORPORATION. */ #include <config.h> #include <linux/linkage.h> #define SMC_SIP_INVOKE_MCE 0x82FFFF00 #define MCE_SMC_ROC_FLUSH_CACHE (SMC_SIP_INVOKE_MCE | 11) #define MCE_SMC_ROC_FLUSH_CACHE_ONLY (SMC_SIP_INVOKE_MCE | 14) #define MCE_SMC_ROC_CLEAN_CACHE_ONLY (SMC_SIP_INVOKE_MCE | 15) ENTRY(__asm_tegra_cache_smc) mov x1, #0 mov x2, #0 mov x3, #0 mov x4, #0 mov x5, #0 mov x6, #0 smc #0 mov x0, #0 ret ENDPROC(__asm_invalidate_l3_dcache) ENTRY(__asm_invalidate_l3_dcache) mov x0, #(MCE_SMC_ROC_FLUSH_CACHE_ONLY & 0xffff) movk x0, #(MCE_SMC_ROC_FLUSH_CACHE_ONLY >> 16), lsl #16 b __asm_tegra_cache_smc ENDPROC(__asm_invalidate_l3_dcache) ENTRY(__asm_flush_l3_dcache) mov x0, #(MCE_SMC_ROC_CLEAN_CACHE_ONLY & 0xffff) movk x0, #(MCE_SMC_ROC_CLEAN_CACHE_ONLY >> 16), lsl #16 b __asm_tegra_cache_smc ENDPROC(__asm_flush_l3_dcache) ENTRY(__asm_invalidate_l3_icache) mov x0, #(MCE_SMC_ROC_FLUSH_CACHE & 0xffff) movk x0, #(MCE_SMC_ROC_FLUSH_CACHE >> 16), lsl #16 b __asm_tegra_cache_smc ENDPROC(__asm_invalidate_l3_icache)
530154436/cpp_learning
3,919
csapp/code/asm/linked-list.O1.s
.file "linked-list.c" .text .globl new_list_ele .type new_list_ele, @function new_list_ele: pushq %rbx movq %rdi, %rbx movl $16, %edi call malloc movq %rbx, (%rax) movq $0, 8(%rax) popq %rbx ret .size new_list_ele, .-new_list_ele .globl array_to_list_addtail .type array_to_list_addtail, @function array_to_list_addtail: pushq %r13 pushq %r12 pushq %rbp pushq %rbx subq $24, %rsp movq %rsi, %r12 movq $0, 8(%rsp) testq %rsi, %rsi je .L4 movq %rdi, %r13 movl $0, %ebx leaq 8(%rsp), %rbp .L5: movslq 0(%r13,%rbx,4), %rdi call new_list_ele movq %rax, 0(%rbp) leaq 8(%rax), %rbp addq $1, %rbx cmpq %r12, %rbx jne .L5 .L4: movq 8(%rsp), %rax addq $24, %rsp popq %rbx popq %rbp popq %r12 popq %r13 ret .size array_to_list_addtail, .-array_to_list_addtail .globl array_to_list_reverse .type array_to_list_reverse, @function array_to_list_reverse: pushq %r13 pushq %r12 pushq %rbp pushq %rbx subq $8, %rsp movq %rsi, %r12 leaq -1(%rsi), %rbx cmpq %rbx, %rsi jbe .L10 movq %rdi, %r13 movl $0, %ebp .L9: movslq 0(%r13,%rbx,4), %rdi call new_list_ele movq %rbp, 8(%rax) subq $1, %rbx cmpq %rbx, %r12 jbe .L8 movq %rax, %rbp jmp .L9 .L10: movl $0, %eax .L8: addq $8, %rsp popq %rbx popq %rbp popq %r12 popq %r13 ret .size array_to_list_reverse, .-array_to_list_reverse .globl array_to_list_recurse .type array_to_list_recurse, @function array_to_list_recurse: movl $0, %eax testq %rsi, %rsi je .L17 pushq %rbp pushq %rbx subq $8, %rsp movq %rdi, %rbx subq $1, %rsi leaq 4(%rdi), %rdi call array_to_list_recurse movq %rax, %rbp movslq (%rbx), %rdi call new_list_ele movq %rbp, 8(%rax) addq $8, %rsp popq %rbx popq %rbp .L17: rep; ret .size array_to_list_recurse, .-array_to_list_recurse .globl incr_ele .type incr_ele, @function incr_ele: addq $1, (%rdi) ret .size incr_ele, .-incr_ele .globl incr_list .type incr_list, @function incr_list: testq %rdi, %rdi je .L19 .L21: addq $1, (%rdi) movq 8(%rdi), %rdi testq %rdi, %rdi jne .L21 .L19: rep; ret .size incr_list, .-incr_list .section .eh_frame,"a",@progbits .Lframe1: .LSCIE1: .long 0 .byte 0x3 .string "zR" .uleb128 0x1 .sleb128 -8 .uleb128 0x10 .uleb128 0x1 .byte 0x3 .byte 0xc .uleb128 0x7 .uleb128 0x8 .byte 0x90 .uleb128 0x1 .align 8 .LSFDE1: .uleb128 0 .byte 0x4 .byte 0xe .uleb128 0x10 .byte 0x83 .uleb128 0x2 .byte 0x4 .byte 0xe .uleb128 0x8 .align 8 .LSFDE3: .uleb128 0 .byte 0x4 .byte 0xe .uleb128 0x10 .byte 0x8d .uleb128 0x2 .byte 0x4 .byte 0xe .uleb128 0x18 .byte 0x8c .uleb128 0x3 .byte 0x4 .byte 0xe .uleb128 0x20 .byte 0x86 .uleb128 0x4 .byte 0x4 .byte 0xe .uleb128 0x28 .byte 0x83 .uleb128 0x5 .byte 0x4 .byte 0xe .uleb128 0x40 .byte 0x4 .byte 0xe .uleb128 0x28 .byte 0x4 .byte 0xe .uleb128 0x20 .byte 0x4 .byte 0xe .uleb128 0x18 .byte 0x4 .byte 0xe .uleb128 0x10 .byte 0x4 .byte 0xe .uleb128 0x8 .align 8 .LSFDE5: .uleb128 0 .byte 0x4 .byte 0xe .uleb128 0x10 .byte 0x8d .uleb128 0x2 .byte 0x4 .byte 0xe .uleb128 0x18 .byte 0x8c .uleb128 0x3 .byte 0x4 .byte 0xe .uleb128 0x20 .byte 0x86 .uleb128 0x4 .byte 0x4 .byte 0xe .uleb128 0x28 .byte 0x83 .uleb128 0x5 .byte 0x4 .byte 0xe .uleb128 0x30 .byte 0x4 .byte 0xe .uleb128 0x28 .byte 0x4 .byte 0xe .uleb128 0x20 .byte 0x4 .byte 0xe .uleb128 0x18 .byte 0x4 .byte 0xe .uleb128 0x10 .byte 0x4 .byte 0xe .uleb128 0x8 .align 8 .LSFDE7: .uleb128 0 .byte 0x4 .byte 0xe .uleb128 0x10 .byte 0x86 .uleb128 0x2 .byte 0x4 .byte 0xe .uleb128 0x18 .byte 0x83 .uleb128 0x3 .byte 0x4 .byte 0xe .uleb128 0x20 .byte 0x4 .byte 0xe .uleb128 0x18 .byte 0x4 .byte 0xc3 .byte 0xe .uleb128 0x10 .byte 0x4 .byte 0xc6 .byte 0xe .uleb128 0x8 .align 8 .LSFDE9: .uleb128 0 .align 8 .LSFDE11: .uleb128 0 .align 8 .ident "GCC: (Ubuntu 4.8.1-2ubuntu1~12.04) 4.8.1" .section .note.GNU-stack,"",@progbits
530154436/cpp_learning
4,001
csapp/code/asm/linked-list.Og.s
.file "linked-list.c" .text .globl new_list_ele .type new_list_ele, @function new_list_ele: pushq %rbx movq %rdi, %rbx movl $16, %edi call malloc movq %rbx, (%rax) movq $0, 8(%rax) popq %rbx ret .size new_list_ele, .-new_list_ele .globl array_to_list_addtail .type array_to_list_addtail, @function array_to_list_addtail: pushq %r13 pushq %r12 pushq %rbp pushq %rbx subq $24, %rsp movq %rdi, %r13 movq %rsi, %r12 movq $0, 8(%rsp) movl $0, %ebx leaq 8(%rsp), %rbp jmp .L4 .L5: movslq 0(%r13,%rbx,4), %rdi call new_list_ele movq %rax, 0(%rbp) leaq 8(%rax), %rbp addq $1, %rbx .L4: cmpq %r12, %rbx jb .L5 movq 8(%rsp), %rax addq $24, %rsp popq %rbx popq %rbp popq %r12 popq %r13 ret .size array_to_list_addtail, .-array_to_list_addtail .globl array_to_list_reverse .type array_to_list_reverse, @function array_to_list_reverse: pushq %r13 pushq %r12 pushq %rbp pushq %rbx subq $8, %rsp movq %rdi, %r13 movq %rsi, %r12 leaq -1(%rsi), %rbx movl $0, %ebp jmp .L8 .L9: movslq 0(%r13,%rbx,4), %rdi call new_list_ele movq %rbp, 8(%rax) subq $1, %rbx movq %rax, %rbp .L8: cmpq %r12, %rbx jb .L9 movq %rbp, %rax addq $8, %rsp popq %rbx popq %rbp popq %r12 popq %r13 ret .size array_to_list_reverse, .-array_to_list_reverse .globl array_to_list_recurse .type array_to_list_recurse, @function array_to_list_recurse: movl $0, %eax testq %rsi, %rsi je .L16 pushq %rbp pushq %rbx subq $8, %rsp movq %rdi, %rbx subq $1, %rsi leaq 4(%rdi), %rdi call array_to_list_recurse movq %rax, %rbp movslq (%rbx), %rdi call new_list_ele movq %rbp, 8(%rax) addq $8, %rsp popq %rbx popq %rbp .L16: rep; ret .size array_to_list_recurse, .-array_to_list_recurse .globl incr_ele .type incr_ele, @function incr_ele: addq $1, (%rdi) ret .size incr_ele, .-incr_ele .globl incr_list .type incr_list, @function incr_list: pushq %rbx movq %rdi, %rbx jmp .L19 .L20: movq %rbx, %rdi call incr_ele movq 8(%rbx), %rbx .L19: testq %rbx, %rbx jne .L20 popq %rbx ret .size incr_list, .-incr_list .section .eh_frame,"a",@progbits .Lframe1: .LSCIE1: .long 0 .byte 0x3 .string "zR" .uleb128 0x1 .sleb128 -8 .uleb128 0x10 .uleb128 0x1 .byte 0x3 .byte 0xc .uleb128 0x7 .uleb128 0x8 .byte 0x90 .uleb128 0x1 .align 8 .LSFDE1: .uleb128 0 .byte 0x4 .byte 0xe .uleb128 0x10 .byte 0x83 .uleb128 0x2 .byte 0x4 .byte 0xe .uleb128 0x8 .align 8 .LSFDE3: .uleb128 0 .byte 0x4 .byte 0xe .uleb128 0x10 .byte 0x8d .uleb128 0x2 .byte 0x4 .byte 0xe .uleb128 0x18 .byte 0x8c .uleb128 0x3 .byte 0x4 .byte 0xe .uleb128 0x20 .byte 0x86 .uleb128 0x4 .byte 0x4 .byte 0xe .uleb128 0x28 .byte 0x83 .uleb128 0x5 .byte 0x4 .byte 0xe .uleb128 0x40 .byte 0x4 .byte 0xe .uleb128 0x28 .byte 0x4 .byte 0xe .uleb128 0x20 .byte 0x4 .byte 0xe .uleb128 0x18 .byte 0x4 .byte 0xe .uleb128 0x10 .byte 0x4 .byte 0xe .uleb128 0x8 .align 8 .LSFDE5: .uleb128 0 .byte 0x4 .byte 0xe .uleb128 0x10 .byte 0x8d .uleb128 0x2 .byte 0x4 .byte 0xe .uleb128 0x18 .byte 0x8c .uleb128 0x3 .byte 0x4 .byte 0xe .uleb128 0x20 .byte 0x86 .uleb128 0x4 .byte 0x4 .byte 0xe .uleb128 0x28 .byte 0x83 .uleb128 0x5 .byte 0x4 .byte 0xe .uleb128 0x30 .byte 0x4 .byte 0xe .uleb128 0x28 .byte 0x4 .byte 0xe .uleb128 0x20 .byte 0x4 .byte 0xe .uleb128 0x18 .byte 0x4 .byte 0xe .uleb128 0x10 .byte 0x4 .byte 0xe .uleb128 0x8 .align 8 .LSFDE7: .uleb128 0 .byte 0x4 .byte 0xe .uleb128 0x10 .byte 0x86 .uleb128 0x2 .byte 0x4 .byte 0xe .uleb128 0x18 .byte 0x83 .uleb128 0x3 .byte 0x4 .byte 0xe .uleb128 0x20 .byte 0x4 .byte 0xe .uleb128 0x18 .byte 0x4 .byte 0xc3 .byte 0xe .uleb128 0x10 .byte 0x4 .byte 0xc6 .byte 0xe .uleb128 0x8 .align 8 .LSFDE9: .uleb128 0 .align 8 .LSFDE11: .uleb128 0 .byte 0x4 .byte 0xe .uleb128 0x10 .byte 0x83 .uleb128 0x2 .byte 0x4 .byte 0xe .uleb128 0x8 .align 8 .ident "GCC: (Ubuntu 4.8.1-2ubuntu1~12.04) 4.8.1" .section .note.GNU-stack,"",@progbits
530154436/cpp_learning
1,904
csapp/code/asm/450-bufdemo.s
.file "450-bufdemo.c" .text .globl gets .type gets, @function gets: .LFB34: .cfi_startproc pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 subq $8, %rsp .cfi_def_cfa_offset 32 movq %rdi, %rbp movq %rdi, %rbx jmp .L2 .L4: movb %al, (%rbx) leaq 1(%rbx), %rbx .L2: movq stdin(%rip), %rdi call _IO_getc cmpl $10, %eax je .L3 cmpl $-1, %eax jne .L4 .L3: cmpl $-1, %eax sete %dl cmpq %rbp, %rbx sete %al testb %al, %dl jne .L6 movb $0, (%rbx) movq %rbp, %rax jmp .L5 .L6: movl $0, %eax .L5: addq $8, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE34: .size gets, .-gets .globl echo .type echo, @function echo: .LFB35: .cfi_startproc subq $24, %rsp .cfi_def_cfa_offset 32 movq %rsp, %rdi call gets movq %rsp, %rdi call puts addq $24, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE35: .size echo, .-echo .globl good_echo .type good_echo, @function good_echo: .LFB36: .cfi_startproc subq $24, %rsp .cfi_def_cfa_offset 32 movq stdin(%rip), %rdx movl $8, %esi movq %rsp, %rdi call fgets testq %rax, %rax je .L10 movq %rax, %rdi call puts .L10: addq $24, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE36: .size good_echo, .-good_echo .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "Type a string:" .text .globl main .type main, @function main: .LFB37: .cfi_startproc pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 movl %edi, %ebx movl $.LC0, %esi movl $1, %edi movl $0, %eax call __printf_chk cmpl $1, %ebx jle .L14 movl $0, %eax call good_echo jmp .L15 .L14: movl $0, %eax call echo .L15: movl $0, %eax popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE37: .size main, .-main .ident "GCC: (Ubuntu 4.8.1-2ubuntu1~12.04) 4.8.1" .section .note.GNU-stack,"",@progbits
530154436/cpp_learning
3,027
csapp/code/asm/020-fragments.s
moveg: /* $begin 020-moveg-sa 0 */ movabsq $0x0011223344556677, %rax # %rax = 0011223344556677 # line:asm:moveg:init movb $-1, %al # %rax = 00112233445566FF # line:asm:moveg:movb movw $-1, %ax # %rax = 001122334455FFFF # line:asm:moveg:movw movl $-1, %eax # %rax = 00000000FFFFFFFF # line:asm:moveg:movl movq $-1, %rax # %rax = FFFFFFFFFFFFFFFF # line:asm:moveg:movq /* $end 020-moveg-sa 0 */ movmix: /* $begin 020-movmix-sa 0 */ movl $0x4050,%eax # Immediate--Register, 4 bytes movw %bp,%sp # Register--Register, 2 bytes movb (%rdi,%rcx),%al # Memory--Register, 1 byte movb $-17,(%esp) # Immediate--Memory, 1 byte movq %rax,-12(%rbp) # Register--Memory, 8 bytes /* $end 020-movmix-sa 0 */ cmov: cmovge (%rax), %rdx cmovge (%rax), %edx cmovge (%rax), %dx # cmovge (%rax), %dl pushq: /* $begin 020-pushq-sa 0 */ subq $8,%rsp # Decrement stack pointer movq %rbp,(%rsp) # Store %rbp on stack /* $end 020-pushq-sa 0 */ popq: /* $begin 020-popq-sa 0 */ movq (%rsp),%rax # Read %rax from stack addq $8,%rsp # Increment stack pointer /* $end 020-popq-sa 0 */ words_suffix: /* $begin 020-word-sizes-sa 0 */ movb %bl,%al # One byte movw %bx,%ax # Two bytes movl %ebx,%eax # Four bytes movq %rbx,%rax # Eight bytes /* $end 020-word-sizes-sa 0 */ words: /* $begin 020-word-sizes-sa 0 */ mov %bl,%al # One byte mov %bx,%ax # Two bytes mov %ebx,%eax # Four bytes mov %rbx,%rax # Eight bytes /* $end 020-word-sizes-sa 0 */ byte_move: /* $begin 020-byte-move-sa 0 */ movabsq $0x0011223344556677, %rax # %rax = 0011223344556677 # line:asm:bytemove:inita movb $0xAA, %dl # %dl = AA # line:asm:bytemove:initd movb %dl,%al # %rax = 00112233445566AA # line:asm:bytemove:movb movsbq %dl,%rax # %rax = FFFFFFFFFFFFFFAA # line:asm:bytemove:movsb movzbq %dl,%rax # %rax = 00000000000000AA # line:asm:bytemove:movzb /* $end 020-byte-move-sa 0 */ jump: /* $begin 020-jump-sa 0 */ movq $0,%rax # Set %rax to 0 jmp .L1 # Goto .L1 movq (%rax),%rdx # Null pointer dereference (Skipped) .L1: popq %rdx # Jump target /* $end 020-jump-sa 0 */ pc_trick: /* $begin 020-loadpc-sa 0 */ call next next: popq %rax /* $end 020-loadpc-sa 0 */ idivq: /* $begin 020-idivq-alt-sa 0 */ # x in \rsireg, y in \rdireg movq %rsi,%rax # Load x into \raxreg cltq # Sign extend into \rdxreg idivq %rdi # Divide by y movq %rax, 8(%rsp) # Store x / y movq %rdx, (%rsp) # Store x % y /* $end 020-idivq-alt-sa 0 */ divq: /* $begin 020-divq-sa 0 */ # x at \ebpreg+8, y at \ebpreg+12 movq %rsi,%rax # Load x into \raxreg movq $0,%rdx # Set high-order bits to 0 divq %rdi # Divide by y movq %rax, 8(%rsp) # Store x / y movq %rdx, (%rsp) # Store x % y xorq %rdx,%rdx movq $0, %rdx test: xorl %edx, %edx xorq %rdx, %rdx movq $0, %rdx movl $0, %edx
530154436/cpp_learning
1,787
csapp/code/asm/480-charbuf.s
.file "480-charbuf.c" .text .globl len .type len, @function len: .LFB56: .cfi_startproc movl $0, %eax movq $-1, %rcx repnz; scasb notq %rcx leaq -1(%rcx), %rax ret .cfi_endproc .LFE56: .size len, .-len .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "%ld" .text .globl iptoa .type iptoa, @function iptoa: .LFB57: .cfi_startproc subq $8, %rsp .cfi_def_cfa_offset 16 movq (%rsi), %r8 movl $.LC0, %ecx movq $-1, %rdx movl $1, %esi movl $0, %eax call __sprintf_chk addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE57: .size iptoa, .-iptoa .globl intlen .type intlen, @function intlen: .LFB58: .cfi_startproc subq $40, %rsp .cfi_def_cfa_offset 48 movq %rdi, 24(%rsp) leaq 24(%rsp), %rsi movq %rsp, %rdi call iptoa movq %rsp, %rdi call len addq $40, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE58: .size intlen, .-intlen .section .rodata.str1.1 .LC1: .string "%d" .text .globl itoa .type itoa, @function itoa: .LFB59: .cfi_startproc subq $8, %rsp .cfi_def_cfa_offset 16 movl %esi, %r8d movl $.LC1, %ecx movq $-1, %rdx movl $1, %esi movl $0, %eax call __sprintf_chk addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE59: .size itoa, .-itoa .globl storeint .type storeint, @function storeint: .LFB60: .cfi_startproc movl %edi, (%rsi) ret .cfi_endproc .LFE60: .size storeint, .-storeint .globl intlen2 .type intlen2, @function intlen2: .LFB61: .cfi_startproc subq $24, %rsp .cfi_def_cfa_offset 32 leaq 12(%rsp), %rsi call storeint movl 12(%rsp), %esi movq %rsp, %rdi call itoa movq %rsp, %rdi call len addq $24, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE61: .size intlen2, .-intlen2 .ident "GCC: (Ubuntu 4.8.1-2ubuntu1~12.04) 4.8.1" .section .note.GNU-stack,"",@progbits
530154436/cpp_learning
3,909
csapp/code/asm/linked-list.s
.file "linked-list.c" .text .p2align 4,,15 .globl new_list_ele .type new_list_ele, @function new_list_ele: .LFB12: .cfi_startproc pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 movq %rdi, %rbx movl $16, %edi call malloc movq %rbx, (%rax) movq $0, 8(%rax) popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE12: .size new_list_ele, .-new_list_ele .p2align 4,,15 .globl array_to_list_addtail .type array_to_list_addtail, @function array_to_list_addtail: .LFB13: .cfi_startproc pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 movq %rsi, %r13 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 subq $16, %rsp .cfi_def_cfa_offset 64 testq %rsi, %rsi movq $0, 8(%rsp) je .L7 leaq 8(%rsp), %rbp movq %rdi, %r14 xorl %ebx, %ebx .p2align 4,,10 .p2align 3 .L6: movslq (%r14,%rbx,4), %r12 movl $16, %edi addq $1, %rbx call malloc cmpq %r13, %rbx movq $0, 8(%rax) movq %r12, (%rax) movq %rax, 0(%rbp) leaq 8(%rax), %rbp jne .L6 movq 8(%rsp), %rax .L4: addq $16, %rsp .cfi_remember_state .cfi_def_cfa_offset 48 popq %rbx .cfi_def_cfa_offset 40 popq %rbp .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r13 .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state xorl %eax, %eax jmp .L4 .cfi_endproc .LFE13: .size array_to_list_addtail, .-array_to_list_addtail .p2align 4,,15 .globl array_to_list_reverse .type array_to_list_reverse, @function array_to_list_reverse: .LFB14: .cfi_startproc pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 movq %rsi, %r13 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 leaq -1(%rsi), %rbx cmpq %rbx, %rsi jbe .L13 movq %rdi, %r14 xorl %r12d, %r12d jmp .L12 .p2align 4,,10 .p2align 3 .L15: movq %rax, %r12 .L12: movslq (%r14,%rbx,4), %rbp movl $16, %edi subq $1, %rbx call malloc cmpq %rbx, %r13 movq %rax, %rdx movq %r12, 8(%rax) movq %rbp, (%rax) ja .L15 .L11: popq %rbx .cfi_remember_state .cfi_def_cfa_offset 40 popq %rbp .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r13 .cfi_def_cfa_offset 16 movq %rdx, %rax popq %r14 .cfi_def_cfa_offset 8 ret .L13: .cfi_restore_state xorl %edx, %edx jmp .L11 .cfi_endproc .LFE14: .size array_to_list_reverse, .-array_to_list_reverse .p2align 4,,15 .globl array_to_list_recurse .type array_to_list_recurse, @function array_to_list_recurse: .LFB15: .cfi_startproc xorl %eax, %eax testq %rsi, %rsi je .L22 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 subq $1, %rsi pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 movq %rdi, %rbx leaq 4(%rdi), %rdi subq $8, %rsp .cfi_def_cfa_offset 32 call array_to_list_recurse movslq (%rbx), %rbx movq %rax, %rbp movl $16, %edi call malloc movq %rbp, 8(%rax) movq %rbx, (%rax) addq $8, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_restore 3 .cfi_def_cfa_offset 16 popq %rbp .cfi_restore 6 .cfi_def_cfa_offset 8 .L22: rep; ret .cfi_endproc .LFE15: .size array_to_list_recurse, .-array_to_list_recurse .p2align 4,,15 .globl incr_ele .type incr_ele, @function incr_ele: .LFB16: .cfi_startproc addq $1, (%rdi) ret .cfi_endproc .LFE16: .size incr_ele, .-incr_ele .p2align 4,,15 .globl incr_list .type incr_list, @function incr_list: .LFB17: .cfi_startproc jmp .L32 .p2align 4,,10 .p2align 3 .L30: addq $1, (%rdi) movq 8(%rdi), %rdi .L32: testq %rdi, %rdi jne .L30 rep; ret .cfi_endproc .LFE17: .size incr_list, .-incr_list .ident "GCC: (Ubuntu 4.8.1-2ubuntu1~12.04) 4.8.1" .section .note.GNU-stack,"",@progbits
530154436/cpp_learning
2,230
csapp/code/asm/460-bufovf.s
.file "460-bufovf.c" .text .globl gets .type gets, @function gets: .LFB34: .cfi_startproc pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 subq $8, %rsp .cfi_def_cfa_offset 32 movq %rdi, %rbp movq %rdi, %rbx jmp .L2 .L4: movb %al, (%rbx) leaq 1(%rbx), %rbx .L2: movq stdin(%rip), %rdi call _IO_getc cmpl $10, %eax je .L3 cmpl $-1, %eax jne .L4 .L3: cmpl $-1, %eax sete %dl cmpq %rbp, %rbx sete %al testb %al, %dl jne .L6 movb $0, (%rbx) movq %rbp, %rax jmp .L5 .L6: movl $0, %eax .L5: addq $8, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE34: .size gets, .-gets .globl my_strlen .type my_strlen, @function my_strlen: .LFB35: .cfi_startproc movl $0, %eax jmp .L9 .L10: addq $1, %rax movq %rdx, %rdi .L9: leaq 1(%rdi), %rdx cmpb $0, (%rdi) jne .L10 rep; ret .cfi_endproc .LFE35: .size my_strlen, .-my_strlen .globl strcpy .type strcpy, @function strcpy: .LFB36: .cfi_startproc movq %rdi, %rax movq %rdi, %rdx .L12: movzbl (%rsi), %ecx movb %cl, (%rdx) leaq 1(%rdx), %rdx leaq 1(%rsi), %rsi testb %cl, %cl jne .L12 rep; ret .cfi_endproc .LFE36: .size strcpy, .-strcpy .globl get_line .type get_line, @function get_line: .LFB37: .cfi_startproc pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 subq $16, %rsp .cfi_def_cfa_offset 32 movq %rsp, %rdi call gets movq %rsp, %rdi call my_strlen movq %rax, %rdi call malloc movq %rax, %rbx movq %rsp, %rsi movq %rax, %rdi call strcpy movq %rbx, %rax addq $16, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE37: .size get_line, .-get_line .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "Input>" .text .globl main .type main, @function main: .LFB38: .cfi_startproc subq $8, %rsp .cfi_def_cfa_offset 16 movl $.LC0, %esi movl $1, %edi movl $0, %eax call __printf_chk movl $0, %eax call get_line movq %rax, %rdi call puts movl $0, %eax addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE38: .size main, .-main .ident "GCC: (Ubuntu 4.8.1-2ubuntu1~12.04) 4.8.1" .section .note.GNU-stack,"",@progbits
4ms/stm32mp1-baremetal
2,405
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/xtensa/lib/misc.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * Miscellaneous assembly functions. * * Copyright (C) 2001 - 2007 Tensilica Inc. * Copyright (C) 2014 - 2016 Cadence Design Systems Inc. * * Chris Zankel <chris@zankel.net> */ #include <linux/linkage.h> #include <asm/asmmacro.h> #include <asm/cacheasm.h> /* * void __invalidate_icache_page(ulong start) */ ENTRY(__invalidate_icache_page) abi_entry ___invalidate_icache_page a2 a3 isync abi_ret ENDPROC(__invalidate_icache_page) /* * void __invalidate_dcache_page(ulong start) */ ENTRY(__invalidate_dcache_page) abi_entry ___invalidate_dcache_page a2 a3 dsync abi_ret ENDPROC(__invalidate_dcache_page) /* * void __flush_invalidate_dcache_page(ulong start) */ ENTRY(__flush_invalidate_dcache_page) abi_entry ___flush_invalidate_dcache_page a2 a3 dsync abi_ret ENDPROC(__flush_invalidate_dcache_page) /* * void __flush_dcache_page(ulong start) */ ENTRY(__flush_dcache_page) abi_entry ___flush_dcache_page a2 a3 dsync abi_ret ENDPROC(__flush_dcache_page) /* * void __invalidate_icache_range(ulong start, ulong size) */ ENTRY(__invalidate_icache_range) abi_entry ___invalidate_icache_range a2 a3 a4 isync abi_ret ENDPROC(__invalidate_icache_range) /* * void __flush_invalidate_dcache_range(ulong start, ulong size) */ ENTRY(__flush_invalidate_dcache_range) abi_entry ___flush_invalidate_dcache_range a2 a3 a4 dsync abi_ret ENDPROC(__flush_invalidate_dcache_range) /* * void _flush_dcache_range(ulong start, ulong size) */ ENTRY(__flush_dcache_range) abi_entry ___flush_dcache_range a2 a3 a4 dsync abi_ret ENDPROC(__flush_dcache_range) /* * void _invalidate_dcache_range(ulong start, ulong size) */ ENTRY(__invalidate_dcache_range) abi_entry ___invalidate_dcache_range a2 a3 a4 abi_ret ENDPROC(__invalidate_dcache_range) /* * void _invalidate_icache_all(void) */ ENTRY(__invalidate_icache_all) abi_entry ___invalidate_icache_all a2 a3 isync abi_ret ENDPROC(__invalidate_icache_all) /* * void _flush_invalidate_dcache_all(void) */ ENTRY(__flush_invalidate_dcache_all) abi_entry ___flush_invalidate_dcache_all a2 a3 dsync abi_ret ENDPROC(__flush_invalidate_dcache_all) /* * void _invalidate_dcache_all(void) */ ENTRY(__invalidate_dcache_all) abi_entry ___invalidate_dcache_all a2 a3 dsync abi_ret ENDPROC(__invalidate_dcache_all)
4ms/stm32mp1-baremetal
13,741
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/xtensa/cpu/start.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * (C) Copyright 2008 - 2013 Tensilica Inc. * (C) Copyright 2014 - 2016 Cadence Design Systems Inc. */ #include <config.h> #include <asm/asmmacro.h> #include <asm/cacheasm.h> #include <asm/regs.h> #include <asm/arch/tie.h> #include <asm-offsets.h> /* * Offsets into the the pt_regs struture. * Make sure these always match with the structure defined in ptrace.h! */ #define PT_PC 0 #define PT_PS 4 #define PT_DEPC 8 #define PT_EXCCAUSE 12 #define PT_EXCVADDR 16 #define PT_DEBUGCAUSE 20 #define PT_WMASK 24 #define PT_LBEG 28 #define PT_LEND 32 #define PT_LCOUNT 36 #define PT_SAR 40 #define PT_WINDOWBASE 44 #define PT_WINDOWSTART 48 #define PT_SYSCALL 52 #define PT_ICOUNTLEVEL 56 #define PT_RESERVED 60 #define PT_AREG 64 #define PT_SIZE (64 + 64) /* * Cache attributes are different for full MMU and region protection. */ #if XCHAL_HAVE_PTP_MMU #define CA_WRITEBACK (0x7) #else #define CA_WRITEBACK (0x4) #endif /* * Reset vector. * Only a trampoline to jump to _start * (Note that we have to mark the section writable as the section contains * a relocatable literal) */ .section .ResetVector.text, "awx" .global _ResetVector _ResetVector: j 1f .align 4 2: .long _start 1: l32r a2, 2b jx a2 /* * Processor initialization. We still run in rom space. * * NOTE: Running in ROM * For Xtensa, we currently don't allow to run some code from ROM but * unpack the data immediately to memory. This requires, for example, * that DDR has been set up before running U-Boot. (See also comments * inline for ways to change it) */ .section .reset.text, "ax" .global _start .align 4 _start: /* Keep a0 = 0 for various initializations */ movi a0, 0 /* * For full MMU cores, put page table at unmapped virtual address. * This ensures that accesses outside the static maps result * in miss exceptions rather than random behaviour. */ #if XCHAL_HAVE_PTP_MMU wsr a0, PTEVADDR #endif /* Disable dbreak debug exceptions */ #if XCHAL_HAVE_DEBUG && XCHAL_NUM_DBREAK > 0 .set _index, 0 .rept XCHAL_NUM_DBREAK wsr a0, DBREAKC + _index .set _index, _index + 1 .endr #endif /* Reset windowbase and windowstart */ #if XCHAL_HAVE_WINDOWED movi a3, 1 wsr a3, windowstart wsr a0, windowbase rsync movi a0, 0 /* windowbase might have changed */ #endif /* * Vecbase in bitstream may differ from header files * set or check it. */ #if XCHAL_HAVE_VECBASE movi a3, XCHAL_VECBASE_RESET_VADDR /* VECBASE reset value */ wsr a3, VECBASE #endif #if XCHAL_HAVE_LOOPS /* Disable loops */ wsr a0, LCOUNT #endif /* Set PS.WOE = 0, PS.EXCM = 0 (for loop), PS.INTLEVEL = EXCM level */ #if XCHAL_HAVE_XEA1 movi a2, 1 #else movi a2, XCHAL_EXCM_LEVEL #endif wsr a2, PS rsync /* Unlock and invalidate caches */ ___unlock_dcache_all a2, a3 ___invalidate_dcache_all a2, a3 ___unlock_icache_all a2, a3 ___invalidate_icache_all a2, a3 isync /* Unpack data sections */ movi a2, __reloc_table_start movi a3, __reloc_table_end 1: beq a2, a3, 3f # no more entries? l32i a4, a2, 0 # start destination (in RAM) l32i a5, a2, 4 # end destination (in RAM) l32i a6, a2, 8 # start source (in ROM) addi a2, a2, 12 # next entry beq a4, a5, 1b # skip, empty entry beq a4, a6, 1b # skip, source and destination are the same /* If there's memory protection option with 512MB TLB regions and * cache attributes in TLB entries and caching is not inhibited, * enable data/instruction cache for relocated image. */ #if XCHAL_HAVE_SPANNING_WAY && \ !(CONFIG_IS_ENABLED(SYS_DCACHE_OFF) && \ CONFIG_IS_ENABLED(SYS_ICACHE_OFF)) srli a7, a4, 29 slli a7, a7, 29 addi a7, a7, XCHAL_SPANNING_WAY #if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) rdtlb1 a8, a7 srli a8, a8, 4 slli a8, a8, 4 addi a8, a8, CA_WRITEBACK wdtlb a8, a7 #endif #if !CONFIG_IS_ENABLED(SYS_ICACHE_OFF) ritlb1 a8, a7 srli a8, a8, 4 slli a8, a8, 4 addi a8, a8, CA_WRITEBACK witlb a8, a7 #endif isync #endif 2: l32i a7, a6, 0 addi a6, a6, 4 s32i a7, a4, 0 addi a4, a4, 4 bltu a4, a5, 2b j 1b 3: /* All code and initalized data segments have been copied */ /* Setup PS, PS.WOE = 1, PS.EXCM = 0, PS.INTLEVEL = EXCM level. */ #if __XTENSA_CALL0_ABI__ movi a2, XCHAL_EXCM_LEVEL #else movi a2, (1<<PS_WOE_BIT) | XCHAL_EXCM_LEVEL #endif wsr a2, PS rsync /* Writeback */ ___flush_dcache_all a2, a3 #ifdef __XTENSA_WINDOWED_ABI__ /* * In windowed ABI caller and call target need to be within the same * gigabyte. Put the rest of the code into the text segment and jump * there. */ movi a4, .Lboard_init_code jx a4 .text .align 4 .Lboard_init_code: #endif movi a0, 0 movi sp, (XTENSA_SYS_TEXT_ADDR - 16) & 0xfffffff0 #ifdef CONFIG_DEBUG_UART movi a4, debug_uart_init #ifdef __XTENSA_CALL0_ABI__ callx0 a4 #else callx4 a4 #endif #endif movi a4, board_init_f_alloc_reserve #ifdef __XTENSA_CALL0_ABI__ mov a2, sp callx0 a4 mov sp, a2 #else mov a6, sp callx4 a4 movsp sp, a6 #endif movi a4, board_init_f_init_reserve #ifdef __XTENSA_CALL0_ABI__ callx0 a4 #else callx4 a4 #endif /* * Call board initialization routine (never returns). */ movi a4, board_init_f #ifdef __XTENSA_CALL0_ABI__ movi a2, 0 callx0 a4 #else movi a6, 0 callx4 a4 #endif /* Never Returns */ ill /* * void relocate_code (addr_sp, gd, addr_moni) * * This "function" does not return, instead it continues in RAM * after relocating the monitor code. * * a2 = addr_sp * a3 = gd * a4 = destination address */ .text .globl relocate_code .align 4 relocate_code: abi_entry #ifdef __XTENSA_CALL0_ABI__ mov a1, a2 mov a2, a3 mov a3, a4 movi a0, board_init_r callx0 a0 #else /* We can't movsp here, because the chain of stack frames may cross * the now reserved memory. We need to toss all window frames except * the current, create new pristine stack frame and start from scratch. */ rsr a0, windowbase ssl a0 movi a0, 1 sll a0, a0 wsr a0, windowstart rsync movi a0, 0 /* Reserve 16-byte save area */ addi sp, a2, -16 mov a6, a3 mov a7, a4 movi a4, board_init_r callx4 a4 #endif ill #if XCHAL_HAVE_EXCEPTIONS /* * Exception vectors. * * Various notes: * - We currently don't use the user exception vector (PS.UM is always 0), * but do define such a vector, just in case. They both jump to the * same exception handler, though. * - We currently only save the bare minimum number of registers: * a0...a15, sar, loop-registers, exception register (epc1, excvaddr, * exccause, depc) * - WINDOWSTART is only saved to identify if registers have been spilled * to the wrong stack (exception stack) while executing the exception * handler. */ .section .KernelExceptionVector.text, "ax" .global _KernelExceptionVector _KernelExceptionVector: wsr a2, EXCSAVE1 movi a2, ExceptionHandler jx a2 .section .UserExceptionVector.text, "ax" .global _UserExceptionVector _UserExceptionVector: wsr a2, EXCSAVE1 movi a2, ExceptionHandler jx a2 #if !XCHAL_HAVE_XEA1 .section .DoubleExceptionVector.text, "ax" .global _DoubleExceptionVector _DoubleExceptionVector: #ifdef __XTENSA_CALL0_ABI__ wsr a0, EXCSAVE1 movi a0, hang # report and ask user to reset board callx0 a0 #else wsr a4, EXCSAVE1 movi a4, hang # report and ask user to reset board callx4 a4 #endif #endif /* Does not return here */ .text .align 4 ExceptionHandler: rsr a2, EXCCAUSE # find handler #if XCHAL_HAVE_WINDOWED /* Special case for alloca handler */ bnei a2, 5, 1f # jump if not alloca exception addi a1, a1, -16 - 4 # create a small stack frame s32i a3, a1, 0 # and save a3 (a2 still in excsave1) movi a2, fast_alloca_exception jx a2 # jump to fast_alloca_exception #endif /* All other exceptions go here: */ /* Create ptrace stack and save a0...a3 */ 1: addi a2, a1, - PT_SIZE - 16 s32i a0, a2, PT_AREG + 0 * 4 s32i a1, a2, PT_AREG + 1 * 4 s32i a3, a2, PT_AREG + 3 * 4 rsr a3, EXCSAVE1 s32i a3, a2, PT_AREG + 2 * 4 mov a1, a2 /* Save remaining AR registers */ s32i a4, a1, PT_AREG + 4 * 4 s32i a5, a1, PT_AREG + 5 * 4 s32i a6, a1, PT_AREG + 6 * 4 s32i a7, a1, PT_AREG + 7 * 4 s32i a8, a1, PT_AREG + 8 * 4 s32i a9, a1, PT_AREG + 9 * 4 s32i a10, a1, PT_AREG + 10 * 4 s32i a11, a1, PT_AREG + 11 * 4 s32i a12, a1, PT_AREG + 12 * 4 s32i a13, a1, PT_AREG + 13 * 4 s32i a14, a1, PT_AREG + 14 * 4 s32i a15, a1, PT_AREG + 15 * 4 /* Save SRs */ #if XCHAL_HAVE_WINDOWED rsr a2, WINDOWSTART s32i a2, a1, PT_WINDOWSTART #endif rsr a2, SAR rsr a3, EPC1 rsr a4, EXCVADDR s32i a2, a1, PT_SAR s32i a3, a1, PT_PC s32i a4, a1, PT_EXCVADDR #if XCHAL_HAVE_LOOPS movi a2, 0 rsr a3, LBEG xsr a2, LCOUNT s32i a3, a1, PT_LBEG rsr a3, LEND s32i a2, a1, PT_LCOUNT s32i a3, a1, PT_LEND #endif /* Set up C environment and call registered handler */ /* Setup stack, PS.WOE = 1, PS.EXCM = 0, PS.INTLEVEL = EXCM level. */ rsr a2, EXCCAUSE #if XCHAL_HAVE_XEA1 movi a3, (1<<PS_WOE_BIT) | 1 #elif __XTENSA_CALL0_ABI__ movi a3, XCHAL_EXCM_LEVEL #else movi a3, (1<<PS_WOE_BIT) | XCHAL_EXCM_LEVEL #endif xsr a3, PS rsync s32i a2, a1, PT_EXCCAUSE s32i a3, a1, PT_PS movi a0, exc_table addx4 a0, a2, a0 l32i a0, a0, 0 #ifdef __XTENSA_CALL0_ABI__ mov a2, a1 # Provide stack frame as only argument callx0 a0 l32i a3, a1, PT_PS #else mov a6, a1 # Provide stack frame as only argument callx4 a0 #endif /* Restore PS and go to exception mode (PS.EXCM=1) */ wsr a3, PS /* Restore SR registers */ #if XCHAL_HAVE_LOOPS l32i a2, a1, PT_LBEG l32i a3, a1, PT_LEND l32i a4, a1, PT_LCOUNT wsr a2, LBEG wsr a3, LEND wsr a4, LCOUNT #endif l32i a2, a1, PT_SAR l32i a3, a1, PT_PC wsr a2, SAR wsr a3, EPC1 #if XCHAL_HAVE_WINDOWED /* Do we need to simulate a MOVSP? */ l32i a2, a1, PT_WINDOWSTART addi a3, a2, -1 and a2, a2, a3 beqz a2, 1f # Skip if regs were spilled before exc. rsr a2, WINDOWSTART addi a3, a2, -1 and a2, a2, a3 bnez a2, 1f # Skip if registers aren't spilled now addi a2, a1, -16 l32i a4, a2, 0 l32i a5, a2, 4 s32i a4, a1, PT_SIZE + 0 s32i a5, a1, PT_SIZE + 4 l32i a4, a2, 8 l32i a5, a2, 12 s32i a4, a1, PT_SIZE + 8 s32i a5, a1, PT_SIZE + 12 #endif /* Restore address register */ 1: l32i a15, a1, PT_AREG + 15 * 4 l32i a14, a1, PT_AREG + 14 * 4 l32i a13, a1, PT_AREG + 13 * 4 l32i a12, a1, PT_AREG + 12 * 4 l32i a11, a1, PT_AREG + 11 * 4 l32i a10, a1, PT_AREG + 10 * 4 l32i a9, a1, PT_AREG + 9 * 4 l32i a8, a1, PT_AREG + 8 * 4 l32i a7, a1, PT_AREG + 7 * 4 l32i a6, a1, PT_AREG + 6 * 4 l32i a5, a1, PT_AREG + 5 * 4 l32i a4, a1, PT_AREG + 4 * 4 l32i a3, a1, PT_AREG + 3 * 4 l32i a2, a1, PT_AREG + 2 * 4 l32i a0, a1, PT_AREG + 0 * 4 l32i a1, a1, PT_AREG + 1 * 4 # Remove ptrace stack frame rfe #endif /* XCHAL_HAVE_EXCEPTIONS */ #if XCHAL_HAVE_WINDOWED /* * Window overflow and underflow handlers. * The handlers must be 64 bytes apart, first starting with the underflow * handlers underflow-4 to underflow-12, then the overflow handlers * overflow-4 to overflow-12. * * Note: We rerun the underflow handlers if we hit an exception, so * we try to access any page that would cause a page fault early. */ .section .WindowVectors.text, "ax" /* 4-Register Window Overflow Vector (Handler) */ .align 64 .global _WindowOverflow4 _WindowOverflow4: s32e a0, a5, -16 s32e a1, a5, -12 s32e a2, a5, -8 s32e a3, a5, -4 rfwo /* 4-Register Window Underflow Vector (Handler) */ .align 64 .global _WindowUnderflow4 _WindowUnderflow4: l32e a0, a5, -16 l32e a1, a5, -12 l32e a2, a5, -8 l32e a3, a5, -4 rfwu /* * a0: a0 * a1: new stack pointer = a1 - 16 - 4 * a2: available, saved in excsave1 * a3: available, saved on stack *a1 */ /* 15*/ .byte 0xff fast_alloca_exception: /* must be at _WindowUnderflow4 + 16 */ /* 16*/ rsr a2, PS /* 19*/ rsr a3, WINDOWBASE /* 22*/ extui a2, a2, PS_OWB_SHIFT, PS_OWB_SHIFT /* 25*/ xor a2, a2, a3 /* 28*/ rsr a3, PS /* 31*/ slli a2, a2, PS_OWB_SHIFT /* 34*/ xor a2, a3, a2 /* 37*/ wsr a2, PS /* 40*/ _l32i a3, a1, 0 /* 43*/ addi a1, a1, 16 + 4 /* 46*/ rsr a2, EXCSAVE1 /* 49*/ rotw -1 /* 52*/ _bbci.l a4, 31, _WindowUnderflow4 /* 0x: call4 */ /* 55*/ rotw -1 /* 58*/ _bbci.l a8, 30, _WindowUnderflow8 /* 10: call8 */ /* 61*/ _j __WindowUnderflow12 /* 11: call12 */ /* 64*/ /* 8-Register Window Overflow Vector (Handler) */ .align 64 .global _WindowOverflow8 _WindowOverflow8: s32e a0, a9, -16 l32e a0, a1, -12 s32e a2, a9, -8 s32e a1, a9, -12 s32e a3, a9, -4 s32e a4, a0, -32 s32e a5, a0, -28 s32e a6, a0, -24 s32e a7, a0, -20 rfwo /* 8-Register Window Underflow Vector (Handler) */ .align 64 .global _WindowUnderflow8 _WindowUnderflow8: l32e a1, a9, -12 l32e a0, a9, -16 l32e a7, a1, -12 l32e a2, a9, -8 l32e a4, a7, -32 l32e a3, a9, -4 l32e a5, a7, -28 l32e a6, a7, -24 l32e a7, a7, -20 rfwu /* 12-Register Window Overflow Vector (Handler) */ .align 64 .global _WindowOverflow12 _WindowOverflow12: s32e a0, a13, -16 l32e a0, a1, -12 s32e a1, a13, -12 s32e a2, a13, -8 s32e a3, a13, -4 s32e a4, a0, -48 s32e a5, a0, -44 s32e a6, a0, -40 s32e a7, a0, -36 s32e a8, a0, -32 s32e a9, a0, -28 s32e a10, a0, -24 s32e a11, a0, -20 rfwo /* 12-Register Window Underflow Vector (Handler) */ .org _WindowOverflow12 + 64 - 3 __WindowUnderflow12: rotw -1 .global _WindowUnderflow12 _WindowUnderflow12: l32e a1, a13, -12 l32e a0, a13, -16 l32e a11, a1, -12 l32e a2, a13, -8 l32e a4, a11, -48 l32e a8, a11, -32 l32e a3, a13, -4 l32e a5, a11, -44 l32e a6, a11, -40 l32e a7, a11, -36 l32e a9, a11, -28 l32e a10, a11, -24 l32e a11, a11, -20 rfwu #endif /* XCHAL_HAVE_WINDOWED */
4ms/stm32mp1-baremetal
6,657
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/x86/lib/bios_asm.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * From coreboot x86_asm.S, cleaned up substantially * * Copyright (C) 2009-2010 coresystems GmbH */ #include <asm/processor.h> #include <asm/processor-flags.h> #include "bios.h" #define SEG(segment) $segment * X86_GDT_ENTRY_SIZE /* * This is the interrupt handler stub code. It gets copied to the IDT and * to some fixed addresses in the F segment. Before the code can used, * it gets patched up by the C function copying it: byte 3 (the $0 in * movb $0, %al) is overwritten with the interrupt numbers. */ .code16 .globl __idt_handler __idt_handler: pushal movb $0, %al /* This instruction gets modified */ ljmp $0, $__interrupt_handler_16bit .globl __idt_handler_size __idt_handler_size: .long . - __idt_handler .macro setup_registers /* initial register values */ movl 44(%ebp), %eax movl %eax, __registers + 0 /* eax */ movl 48(%ebp), %eax movl %eax, __registers + 4 /* ebx */ movl 52(%ebp), %eax movl %eax, __registers + 8 /* ecx */ movl 56(%ebp), %eax movl %eax, __registers + 12 /* edx */ movl 60(%ebp), %eax movl %eax, __registers + 16 /* esi */ movl 64(%ebp), %eax movl %eax, __registers + 20 /* edi */ .endm .macro enter_real_mode /* Activate the right segment descriptor real mode. */ ljmp SEG(X86_GDT_ENTRY_16BIT_CS), $PTR_TO_REAL_MODE(1f) 1: .code16 /* * Load the segment registers with properly configured segment * descriptors. They will retain these configurations (limits, * writability, etc.) once protected mode is turned off. */ mov SEG(X86_GDT_ENTRY_16BIT_DS), %ax mov %ax, %ds mov %ax, %es mov %ax, %fs mov %ax, %gs mov %ax, %ss /* Turn off protection */ movl %cr0, %eax andl $~X86_CR0_PE, %eax movl %eax, %cr0 /* Now really going into real mode */ ljmp $0, $PTR_TO_REAL_MODE(1f) 1: /* * Set up a stack: Put the stack at the end of page zero. That way * we can easily share it between real and protected, since the * 16-bit ESP at segment 0 will work for any case. */ mov $0x0, %ax mov %ax, %ss /* Load 16 bit IDT */ xor %ax, %ax mov %ax, %ds lidt __realmode_idt .endm .macro prepare_for_irom movl $0x1000, %eax movl %eax, %esp /* Initialise registers for option rom lcall */ movl __registers + 0, %eax movl __registers + 4, %ebx movl __registers + 8, %ecx movl __registers + 12, %edx movl __registers + 16, %esi movl __registers + 20, %edi /* Set all segments to 0x0000, ds to 0x0040 */ push %ax xor %ax, %ax mov %ax, %es mov %ax, %fs mov %ax, %gs mov SEG(X86_GDT_ENTRY_16BIT_FLAT_DS), %ax mov %ax, %ds pop %ax .endm .macro enter_protected_mode /* Go back to protected mode */ movl %cr0, %eax orl $X86_CR0_PE, %eax movl %eax, %cr0 /* Now that we are in protected mode jump to a 32 bit code segment */ data32 ljmp SEG(X86_GDT_ENTRY_32BIT_CS), $PTR_TO_REAL_MODE(1f) 1: .code32 mov SEG(X86_GDT_ENTRY_32BIT_DS), %ax mov %ax, %ds mov %ax, %es mov %ax, %gs mov %ax, %ss mov SEG(X86_GDT_ENTRY_32BIT_FS), %ax mov %ax, %fs /* restore proper idt */ lidt idt_ptr .endm /* * In order to be independent of U-Boot's position in RAM we relocate a part * of the code to the first megabyte of RAM, so the CPU can use it in * real-mode. This code lives at asm_realmode_code. */ .globl asm_realmode_code asm_realmode_code: /* Realmode IDT pointer structure. */ __realmode_idt = PTR_TO_REAL_MODE(.) .word 1023 /* 16 bit limit */ .long 0 /* 24 bit base */ .word 0 /* Preserve old stack */ __stack = PTR_TO_REAL_MODE(.) .long 0 /* Register store for realmode_call and realmode_interrupt */ __registers = PTR_TO_REAL_MODE(.) .long 0 /* 0 - EAX */ .long 0 /* 4 - EBX */ .long 0 /* 8 - ECX */ .long 0 /* 12 - EDX */ .long 0 /* 16 - ESI */ .long 0 /* 20 - EDI */ /* 256 byte buffer, used by int10 */ .globl asm_realmode_buffer asm_realmode_buffer: .skip 256 .code32 .globl asm_realmode_call asm_realmode_call: /* save all registers to the stack */ pusha pushf movl %esp, __stack movl %esp, %ebp /* * This function is called with regparm=0 and we have to skip the * 36 bytes from pushf+pusha. Hence start at 40. * Set up our call instruction. */ movl 40(%ebp), %eax mov %ax, __lcall_instr + 1 andl $0xffff0000, %eax shrl $4, %eax mov %ax, __lcall_instr + 3 wbinvd setup_registers enter_real_mode prepare_for_irom __lcall_instr = PTR_TO_REAL_MODE(.) .byte 0x9a .word 0x0000, 0x0000 enter_protected_mode /* restore stack pointer, eflags and register values and exit */ movl __stack, %esp popf popa ret .globl __realmode_interrupt __realmode_interrupt: /* save all registers to the stack and store the stack pointer */ pusha pushf movl %esp, __stack movl %esp, %ebp /* * This function is called with regparm=0 and we have to skip the * 36 bytes from pushf+pusha. Hence start at 40. * Prepare interrupt calling code. */ movl 40(%ebp), %eax movb %al, __intXX_instr + 1 /* intno */ setup_registers enter_real_mode prepare_for_irom __intXX_instr = PTR_TO_REAL_MODE(.) .byte 0xcd, 0x00 /* This becomes intXX */ enter_protected_mode /* restore stack pointer, eflags and register values and exit */ movl __stack, %esp popf popa ret /* * This is the 16-bit interrupt entry point called by the IDT stub code. * * Before this code code is called, %eax is pushed to the stack, and the * interrupt number is loaded into %al. On return this function cleans up * for its caller. */ .code16 __interrupt_handler_16bit = PTR_TO_REAL_MODE(.) push %ds push %es push %fs push %gs /* Save real mode SS */ movw %ss, %cs:__realmode_ss /* Clear DF to not break ABI assumptions */ cld /* * Clean up the interrupt number. We could do this in the stub, but * it would cost two more bytes per stub entry. */ andl $0xff, %eax pushl %eax /* ... and make it the first parameter */ enter_protected_mode /* * Now we are in protected mode. We need compute the right ESP based * on saved real mode SS otherwise interrupt_handler() won't get * correct parameters from the stack. */ movzwl %cs:__realmode_ss, %ecx shll $4, %ecx addl %ecx, %esp /* Call the C interrupt handler */ movl $interrupt_handler, %eax call *%eax /* Restore real mode ESP based on saved SS */ movzwl %cs:__realmode_ss, %ecx shll $4, %ecx subl %ecx, %esp enter_real_mode /* Restore real mode SS */ movw %cs:__realmode_ss, %ss /* * Restore all registers, including those manipulated by the C * handler */ popl %eax pop %gs pop %fs pop %es pop %ds popal iret __realmode_ss = PTR_TO_REAL_MODE(.) .word 0 .globl asm_realmode_code_size asm_realmode_code_size: .long . - asm_realmode_code
530154436/cpp_learning
23,473
csapp/code/mem/matmult-test/mm.s
.file "mm.c" .text .p2align 4,,15 .globl ijk .type ijk, @function ijk: .LFB39: .cfi_startproc testl %ecx, %ecx jle .L11 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 leal -1(%rcx), %r11d vxorpd %xmm2, %xmm2, %xmm2 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 leaq 8(%rdi,%r11,8), %r8 addq $1, %r11 imulq $9704, %r11, %r12 movq %rdx, %rbp salq $3, %r11 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 movq %rdi, %rbx xorl %edi, %edi .L3: leaq 0(%rbp,%rdi), %r10 leaq (%rbx,%rdi), %rdx xorl %r9d, %r9d .p2align 4,,10 .p2align 3 .L7: leaq (%rsi,%r9), %rcx vmovapd %xmm2, %xmm0 movq %rdx, %rax .p2align 4,,10 .p2align 3 .L6: vmovsd (%rax), %xmm1 addq $8, %rax addq $9704, %rcx vmulsd -9704(%rcx), %xmm1, %xmm1 cmpq %r8, %rax vaddsd %xmm1, %xmm0, %xmm0 jne .L6 vaddsd (%r10,%r9), %xmm0, %xmm0 vmovsd %xmm0, (%r10,%r9) addq $8, %r9 cmpq %r11, %r9 jne .L7 addq $9704, %rdi addq $9704, %r8 cmpq %r12, %rdi jne .L3 popq %rbx .cfi_restore 3 .cfi_def_cfa_offset 24 popq %rbp .cfi_restore 6 .cfi_def_cfa_offset 16 popq %r12 .cfi_restore 12 .cfi_def_cfa_offset 8 .L11: ret .cfi_endproc .LFE39: .size ijk, .-ijk .p2align 4,,15 .globl jik .type jik, @function jik: .LFB40: .cfi_startproc testl %ecx, %ecx jle .L23 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 leal -1(%rcx), %eax movq %rsi, %r9 vxorpd %xmm2, %xmm2, %xmm2 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 movq %rdi, %rbp pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 movq %rdx, %rbx leaq 0(,%rax,8), %rdx addq $1, %rax subq %rbx, %rbp imulq $9704, %rax, %r10 leaq 8(%rdx,%rbx), %r12 addq %rdx, %rbp .L15: leaq 8(%rbp), %r11 movq %rbx, %rsi xorl %r8d, %r8d .p2align 4,,10 .p2align 3 .L19: leaq (%r11,%rsi), %rcx leaq (%rdi,%r8), %rax movq %r9, %rdx vmovapd %xmm2, %xmm0 .p2align 4,,10 .p2align 3 .L18: vmovsd (%rax), %xmm1 addq $8, %rax addq $9704, %rdx vmulsd -9704(%rdx), %xmm1, %xmm1 cmpq %rcx, %rax vaddsd %xmm1, %xmm0, %xmm0 jne .L18 vaddsd (%rsi), %xmm0, %xmm0 addq $9704, %r8 addq $9704, %rsi vmovsd %xmm0, -9704(%rsi) cmpq %r10, %r8 jne .L19 addq $8, %rbx addq $8, %r9 subq $8, %rbp cmpq %r12, %rbx jne .L15 popq %rbx .cfi_restore 3 .cfi_def_cfa_offset 24 popq %rbp .cfi_restore 6 .cfi_def_cfa_offset 16 popq %r12 .cfi_restore 12 .cfi_def_cfa_offset 8 .L23: ret .cfi_endproc .LFE40: .size jik, .-jik .p2align 4,,15 .globl ikj .type ikj, @function ikj: .LFB41: .cfi_startproc pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 movq %rsp, %rbp .cfi_def_cfa_register 6 pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx andq $-32, %rsp addq $8, %rsp testl %ecx, %ecx .cfi_offset 15, -24 .cfi_offset 14, -32 .cfi_offset 13, -40 .cfi_offset 12, -48 .cfi_offset 3, -56 movq %rdi, -64(%rsp) movq %rsi, -72(%rsp) jle .L50 leal -1(%rcx), %r8d movl %ecx, %r9d leaq 32(%rdx), %rsi shrl $2, %r9d movq $0, -48(%rsp) addq $1, %r8 leal 0(,%r9,4), %r11d movq %rsi, -80(%rsp) imulq $9704, %r8, %rax salq $3, %r8 movq %rax, -56(%rsp) movslq %r11d, %rax leal 1(%rax), %ebx leal 2(%rax), %edi movq %rax, -24(%rsp) salq $3, %rax movq %rax, -88(%rsp) movslq %ebx, %rax movl %ebx, -12(%rsp) movq %rax, -32(%rsp) salq $3, %rax movl %edi, -16(%rsp) movq %rax, -96(%rsp) movslq %edi, %rax movq %rax, -40(%rsp) salq $3, %rax movq %rax, -104(%rsp) .L26: movq -48(%rsp), %rax movq -64(%rsp), %r15 xorl %r10d, %r10d movq -80(%rsp), %r14 movq -72(%rsp), %rsi addq %rax, %r15 addq %rax, %r14 movq -88(%rsp), %rax leaq (%rdx,%rax), %r13 movq -96(%rsp), %rax leaq (%rdx,%rax), %r12 movq -104(%rsp), %rax leaq (%rdx,%rax), %rbx .p2align 4,,10 .p2align 3 .L37: leaq 32(%rsi), %rax vmovsd (%r15,%r10), %xmm2 cmpq %rax, %rdx setae %dil cmpq %r14, %rsi setae %al orb %al, %dil je .L27 cmpl $5, %ecx jbe .L27 testl %r11d, %r11d je .L28 vbroadcastsd %xmm2, %ymm3 xorl %eax, %eax xorl %edi, %edi .p2align 4,,10 .p2align 3 .L33: vmovupd (%rsi,%rax), %xmm0 addl $1, %edi vinsertf128 $0x1, 16(%rsi,%rax), %ymm0, %ymm0 vmovupd (%rdx,%rax), %xmm1 vinsertf128 $0x1, 16(%rdx,%rax), %ymm1, %ymm1 vmulpd %ymm3, %ymm0, %ymm0 vaddpd %ymm0, %ymm1, %ymm0 vmovupd %xmm0, (%rdx,%rax) vextractf128 $0x1, %ymm0, 16(%rdx,%rax) addq $32, %rax cmpl %edi, %r9d ja .L33 cmpl %ecx, %r11d je .L34 .L28: movq -24(%rsp), %rax cmpl -12(%rsp), %ecx vmulsd (%rsi,%rax,8), %xmm2, %xmm0 vaddsd 0(%r13), %xmm0, %xmm0 vmovsd %xmm0, 0(%r13) jle .L34 movq -32(%rsp), %rax cmpl -16(%rsp), %ecx vmulsd (%rsi,%rax,8), %xmm2, %xmm0 vaddsd (%r12), %xmm0, %xmm0 vmovsd %xmm0, (%r12) jle .L34 movq -40(%rsp), %rax vmulsd (%rsi,%rax,8), %xmm2, %xmm0 vaddsd (%rbx), %xmm0, %xmm0 vmovsd %xmm0, (%rbx) .L34: addq $8, %r10 addq $9704, %rsi cmpq %r8, %r10 jne .L37 addq $9704, -48(%rsp) addq $9704, %rdx movq -56(%rsp), %rax cmpq %rax, -48(%rsp) jne .L26 vzeroupper .L50: leaq -40(%rbp), %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp .cfi_remember_state .cfi_def_cfa 7, 8 ret .p2align 4,,10 .p2align 3 .L27: .cfi_restore_state xorl %eax, %eax .p2align 4,,10 .p2align 3 .L35: vmulsd (%rsi,%rax), %xmm2, %xmm0 vaddsd (%rdx,%rax), %xmm0, %xmm0 vmovsd %xmm0, (%rdx,%rax) addq $8, %rax cmpq %r8, %rax jne .L35 jmp .L34 .cfi_endproc .LFE41: .size ikj, .-ikj .p2align 4,,15 .globl kij .type kij, @function kij: .LFB42: .cfi_startproc pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 movq %rsp, %rbp .cfi_def_cfa_register 6 pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx andq $-32, %rsp addq $8, %rsp testl %ecx, %ecx .cfi_offset 15, -24 .cfi_offset 14, -32 .cfi_offset 13, -40 .cfi_offset 12, -48 .cfi_offset 3, -56 movq %rdi, -72(%rsp) movq %rdx, -80(%rsp) jle .L78 movl %ecx, %r9d leal -1(%rcx), %r12d movq $0, -64(%rsp) shrl $2, %r9d leal 0(,%r9,4), %r11d addq $1, %r12 leaq 0(,%r12,8), %r8 movslq %r11d, %r15 leal 1(%r15), %eax leal 2(%r15), %ebx imulq $9704, %r12, %r12 movl %eax, -12(%rsp) cltq movl %ebx, %edi movq %rax, -32(%rsp) salq $3, %rax movl %ebx, -16(%rsp) movq %rax, -40(%rsp) movq %rdx, %rbx movslq %edi, %rax leaq 0(,%r15,8), %rdx movq %rax, -48(%rsp) salq $3, %rax addq $32, %rbx movq %rax, -56(%rsp) movq %rdx, -24(%rsp) .L54: movq -72(%rsp), %rax movq -64(%rsp), %rdi leaq 32(%rsi), %r13 movq -80(%rsp), %rdx xorl %r10d, %r10d leaq (%rax,%rdi), %r14 .p2align 4,,10 .p2align 3 .L65: leaq (%rbx,%r10), %rax vmovsd (%r14,%r10), %xmm2 cmpq %rax, %rsi setae %dil cmpq %r13, %rdx setae %al orb %al, %dil je .L55 cmpl $5, %ecx jbe .L55 testl %r11d, %r11d je .L56 vbroadcastsd %xmm2, %ymm3 xorl %eax, %eax xorl %edi, %edi .p2align 4,,10 .p2align 3 .L61: vmovupd (%rsi,%rax), %xmm0 addl $1, %edi vinsertf128 $0x1, 16(%rsi,%rax), %ymm0, %ymm0 vmovupd (%rdx,%rax), %xmm1 vinsertf128 $0x1, 16(%rdx,%rax), %ymm1, %ymm1 vmulpd %ymm3, %ymm0, %ymm0 vaddpd %ymm0, %ymm1, %ymm0 vmovupd %xmm0, (%rdx,%rax) vextractf128 $0x1, %ymm0, 16(%rdx,%rax) addq $32, %rax cmpl %edi, %r9d ja .L61 cmpl %ecx, %r11d je .L62 .L56: vmulsd (%rsi,%r15,8), %xmm2, %xmm0 movq -24(%rsp), %rax addq %rdx, %rax cmpl -12(%rsp), %ecx vaddsd (%rax), %xmm0, %xmm0 vmovsd %xmm0, (%rax) jle .L62 movq -32(%rsp), %rdi movq -40(%rsp), %rax vmulsd (%rsi,%rdi,8), %xmm2, %xmm0 addq %rdx, %rax cmpl -16(%rsp), %ecx vaddsd (%rax), %xmm0, %xmm0 vmovsd %xmm0, (%rax) jle .L62 movq -48(%rsp), %rdi movq -56(%rsp), %rax vmulsd (%rsi,%rdi,8), %xmm2, %xmm0 addq %rdx, %rax vaddsd (%rax), %xmm0, %xmm0 vmovsd %xmm0, (%rax) .L62: addq $9704, %r10 addq $9704, %rdx cmpq %r12, %r10 jne .L65 addq $8, -64(%rsp) addq $9704, %rsi cmpq %r8, -64(%rsp) jne .L54 vzeroupper .L78: leaq -40(%rbp), %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp .cfi_remember_state .cfi_def_cfa 7, 8 ret .p2align 4,,10 .p2align 3 .L55: .cfi_restore_state xorl %eax, %eax .p2align 4,,10 .p2align 3 .L63: vmulsd (%rsi,%rax), %xmm2, %xmm0 vaddsd (%rdx,%rax), %xmm0, %xmm0 vmovsd %xmm0, (%rdx,%rax) addq $8, %rax cmpq %r8, %rax jne .L63 jmp .L62 .cfi_endproc .LFE42: .size kij, .-kij .p2align 4,,15 .globl kji .type kji, @function kji: .LFB43: .cfi_startproc testl %ecx, %ecx jle .L90 leal -1(%rcx), %eax pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 xorl %r11d, %r11d imulq $-9704, %rax, %rcx leaq 8(,%rax,8), %rbp pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 movq %rcx, %r8 subq %rdx, %r8 addq %r8, %rsi imulq $9704, %rax, %r8 imulq $9712, %rax, %rax leaq 9704(%rdx,%r8), %rbx leaq -9704(%rcx), %r8 leaq 9712(%rdx,%rax), %r9 .L82: leaq (%rdi,%r11), %r10 movq %rbx, %rcx .p2align 4,,10 .p2align 3 .L86: vmovsd -9704(%rsi,%rcx), %xmm1 leaq (%r8,%rcx), %rax movq %r10, %rdx .p2align 4,,10 .p2align 3 .L84: vmulsd (%rdx), %xmm1, %xmm0 addq $9704, %rax addq $9704, %rdx vaddsd -9704(%rax), %xmm0, %xmm0 vmovsd %xmm0, -9704(%rax) cmpq %rcx, %rax jne .L84 leaq 8(%rax), %rcx cmpq %r9, %rcx jne .L86 addq $8, %r11 addq $9704, %rsi cmpq %rbp, %r11 jne .L82 popq %rbx .cfi_restore 3 .cfi_def_cfa_offset 16 popq %rbp .cfi_restore 6 .cfi_def_cfa_offset 8 .L90: ret .cfi_endproc .LFE43: .size kji, .-kji .p2align 4,,15 .globl jki .type jki, @function jki: .LFB44: .cfi_startproc testl %ecx, %ecx jle .L101 leal -1(%rcx), %eax pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 imulq $9704, %rax, %r8 leaq 1(%rax), %r11 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 imulq $9712, %rax, %r9 imulq $-9704, %rax, %rax pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 subq %r8, %rsi leaq 9704(%rdx,%r8), %rcx subq %rdx, %rsi leaq 9712(%rdx,%r9), %r12 imulq $9704, %r11, %r11 leaq -9704(%rsi), %rbp leaq -9704(%rax), %rbx .L93: leaq 0(%rbp,%rcx), %r10 leaq (%rbx,%rcx), %r9 movq %rdi, %r8 xorl %esi, %esi .p2align 4,,10 .p2align 3 .L97: vmovsd (%r10,%rsi), %xmm1 movq %r8, %rdx movq %r9, %rax .p2align 4,,10 .p2align 3 .L95: vmulsd (%rdx), %xmm1, %xmm0 addq $9704, %rax addq $9704, %rdx vaddsd -9704(%rax), %xmm0, %xmm0 vmovsd %xmm0, -9704(%rax) cmpq %rax, %rcx jne .L95 addq $9704, %rsi addq $8, %r8 cmpq %r11, %rsi jne .L97 addq $8, %rcx cmpq %r12, %rcx jne .L93 popq %rbx .cfi_restore 3 .cfi_def_cfa_offset 24 popq %rbp .cfi_restore 6 .cfi_def_cfa_offset 16 popq %r12 .cfi_restore 12 .cfi_def_cfa_offset 8 .L101: rep; ret .cfi_endproc .LFE44: .size jki, .-jki .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC1: .string "Error: bad number (%f) in result matrix (%d,%d)\n" .text .p2align 4,,15 .globl checkresult .type checkresult, @function checkresult: .LFB34: .cfi_startproc subq $8, %rsp .cfi_def_cfa_offset 16 testl %esi, %esi jle .L111 vcvtsi2sd %esi, %xmm1, %xmm1 addq $8, %rdi xorl %edx, %edx .p2align 4,,10 .p2align 3 .L104: vmovsd -8(%rdi), %xmm0 vucomisd %xmm0, %xmm1 jp .L110 jne .L110 movq %rdi, %rax xorl %ecx, %ecx jmp .L109 .p2align 4,,10 .p2align 3 .L107: addq $8, %rax vmovsd -8(%rax), %xmm0 vucomisd %xmm0, %xmm1 jp .L108 jne .L108 .L109: addl $1, %ecx cmpl %esi, %ecx jne .L107 addl $1, %edx addq $9704, %rdi cmpl %esi, %edx jne .L104 .L111: addq $8, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L110: .cfi_restore_state xorl %ecx, %ecx .L108: movl $.LC1, %esi movl $1, %edi movl $1, %eax call __printf_chk movq stdout(%rip), %rdi call fflush xorl %edi, %edi call exit .cfi_endproc .LFE34: .size checkresult, .-checkresult .p2align 4,,15 .globl run .type run, @function run: .LFB35: .cfi_startproc pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 movl $1, %edx movl %esi, %ebx call fcyc movl %ebx, %eax imull %ebx, %eax imull %ebx, %eax testl %ebx, %ebx vcvtsi2sd %eax, %xmm1, %xmm1 vdivsd %xmm1, %xmm0, %xmm0 jle .L124 vcvtsi2sd %ebx, %xmm2, %xmm2 leal -1(%rbx), %r8d movl $gc, %edi xorl %edx, %edx addq $1, %r8 .p2align 4,,10 .p2align 3 .L119: xorl %eax, %eax .p2align 4,,10 .p2align 3 .L120: movl %eax, %ecx vmovsd (%rdi,%rax,8), %xmm1 vucomisd %xmm2, %xmm1 jp .L121 jne .L121 addq $1, %rax cmpq %r8, %rax jne .L120 addl $1, %edx addq $9704, %rdi cmpl %ebx, %edx jne .L119 .L124: popq %rbx .cfi_remember_state .cfi_def_cfa_offset 8 ret .L121: .cfi_restore_state vmovapd %xmm1, %xmm0 movl $.LC1, %esi movl $1, %edi movl $1, %eax call __printf_chk movq stdout(%rip), %rdi call fflush xorl %edi, %edi call exit .cfi_endproc .LFE35: .size run, .-run .p2align 4,,15 .globl reset .type reset, @function reset: .LFB36: .cfi_startproc testl %esi, %esi jle .L133 pushq %r13 .cfi_def_cfa_offset 16 .cfi_offset 13, -16 movq %rdi, %r13 pushq %r12 .cfi_def_cfa_offset 24 .cfi_offset 12, -24 leal -1(%rsi), %r12d movslq %esi, %rsi pushq %rbp .cfi_def_cfa_offset 32 .cfi_offset 6, -32 addq $1, %r12 leaq 0(,%rsi,8), %rbp imulq $9704, %r12, %r12 pushq %rbx .cfi_def_cfa_offset 40 .cfi_offset 3, -40 xorl %ebx, %ebx subq $8, %rsp .cfi_def_cfa_offset 48 .p2align 4,,10 .p2align 3 .L128: leaq 0(%r13,%rbx), %rdi xorl %esi, %esi movq %rbp, %rdx addq $9704, %rbx call memset cmpq %r12, %rbx jne .L128 addq $8, %rsp .cfi_def_cfa_offset 40 popq %rbx .cfi_restore 3 .cfi_def_cfa_offset 32 popq %rbp .cfi_restore 6 .cfi_def_cfa_offset 24 popq %r12 .cfi_restore 12 .cfi_def_cfa_offset 16 popq %r13 .cfi_restore 13 .cfi_def_cfa_offset 8 .L133: rep; ret .cfi_endproc .LFE36: .size reset, .-reset .p2align 4,,15 .globl init .type init, @function init: .LFB37: .cfi_startproc testl %edx, %edx jle .L159 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 movl %edx, %r8d leal -1(%rdx), %eax shrl $2, %r8d movq %rdi, %rcx vmovapd .LC3(%rip), %ymm0 movq %rsp, %rbp .cfi_def_cfa_register 6 pushq %r15 leal 0(,%r8,4), %r10d leaq 8(,%rax,8), %rdi xorl %r9d, %r9d pushq %r14 pushq %r13 pushq %r12 .cfi_offset 15, -24 .cfi_offset 14, -32 .cfi_offset 13, -40 .cfi_offset 12, -48 movslq %r10d, %r12 leal 1(%r12), %r14d leal 2(%r12), %r13d pushq %rbx .cfi_offset 3, -56 movslq %r13d, %r11 movslq %r14d, %rbx .p2align 4,,10 .p2align 3 .L136: leaq 32(%rsi), %rax cmpq %rax, %rcx leaq 32(%rcx), %rax setae %r15b cmpq %rax, %rsi setae %al orb %al, %r15b je .L147 cmpl $13, %edx jbe .L147 testl %r10d, %r10d je .L140 xorl %eax, %eax xorl %r15d, %r15d .p2align 4,,10 .p2align 3 .L137: addl $1, %r15d vextractf128 $0x1, %ymm0, 16(%rcx,%rax) vmovupd %xmm0, (%rcx,%rax) vextractf128 $0x1, %ymm0, 16(%rsi,%rax) vmovupd %xmm0, (%rsi,%rax) addq $32, %rax cmpl %r8d, %r15d jb .L137 cmpl %edx, %r10d je .L138 .L140: vmovsd .LC2(%rip), %xmm5 cmpl %r14d, %edx vmovsd %xmm5, (%rcx,%r12,8) vmovsd %xmm5, (%rsi,%r12,8) jle .L138 cmpl %r13d, %edx vmovsd %xmm5, (%rcx,%rbx,8) vmovsd %xmm5, (%rsi,%rbx,8) jle .L138 vmovsd .LC2(%rip), %xmm3 vmovsd %xmm3, (%rcx,%r11,8) vmovsd %xmm3, (%rsi,%r11,8) .L138: addl $1, %r9d addq $9704, %rsi addq $9704, %rcx cmpl %edx, %r9d jne .L136 vzeroupper popq %rbx .cfi_restore 3 popq %r12 .cfi_restore 12 popq %r13 .cfi_restore 13 popq %r14 .cfi_restore 14 popq %r15 .cfi_restore 15 popq %rbp .cfi_restore 6 .cfi_def_cfa 7, 8 .L159: rep; ret .p2align 4,,10 .p2align 3 .L147: .cfi_def_cfa 6, 16 .cfi_offset 3, -56 .cfi_offset 6, -16 .cfi_offset 12, -48 .cfi_offset 13, -40 .cfi_offset 14, -32 .cfi_offset 15, -24 vmovsd .LC2(%rip), %xmm1 xorl %eax, %eax .p2align 4,,10 .p2align 3 .L139: vmovsd %xmm1, (%rcx,%rax) vmovsd %xmm1, (%rsi,%rax) addq $8, %rax cmpq %rdi, %rax jne .L139 jmp .L138 .cfi_endproc .LFE37: .size init, .-init .section .rodata.str1.1,"aMS",@progbits,1 .LC4: .string "%5.1f " .text .p2align 4,,15 .globl printarray .type printarray, @function printarray: .LFB38: .cfi_startproc pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 movl %esi, %r14d pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $8, %rsp .cfi_def_cfa_offset 64 testl %esi, %esi jle .L165 leal -1(%rsi), %eax movq %rdi, %r12 xorl %r13d, %r13d leaq 8(,%rax,8), %r15 .p2align 4,,10 .p2align 3 .L162: leaq (%r15,%r12), %rbp movq %r12, %rbx .p2align 4,,10 .p2align 3 .L164: vmovsd (%rbx), %xmm0 movl $.LC4, %esi movl $1, %edi movl $1, %eax addq $8, %rbx call __printf_chk cmpq %rbp, %rbx jne .L164 movl $10, %edi addl $1, %r13d addq $9704, %r12 call putchar cmpl %r14d, %r13d jne .L162 .L165: addq $8, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE38: .size printarray, .-printarray .section .rodata.str1.1 .LC5: .string "matmult cycles/loop iteration" .LC6: .string "ijk" .LC7: .string "kji" .LC8: .string "jki" .LC9: .string "n" .LC10: .string "%3s%6s%6s%6s%6s%6s%6s\n" .LC11: .string "ikj" .LC12: .string "kij" .LC13: .string "jik" .LC14: .string "%3d " .LC15: .string "%5.2f " .section .text.startup,"ax",@progbits .p2align 4,,15 .globl main .type main, @function main: .LFB45: .cfi_startproc pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 movl $ga, %r8d movl $gb, %r9d movq %rsp, %rbp .cfi_def_cfa_register 6 pushq %r12 pushq %rbx subq $48, %rsp .cfi_offset 12, -24 .cfi_offset 3, -32 vmovapd .LC3(%rip), %ymm0 .p2align 4,,10 .p2align 3 .L168: movq %r8, %rax andl $31, %eax shrq $3, %rax negq %rax andl $3, %eax je .L215 vmovsd .LC2(%rip), %xmm4 cmpl $1, %eax vmovsd %xmm4, (%r8) vmovsd %xmm4, (%r9) je .L216 cmpl $3, %eax vmovsd %xmm4, 8(%r8) vmovsd %xmm4, 8(%r9) jne .L217 vmovsd %xmm4, 16(%r8) movl $697, %ebx movl $3, %r12d vmovsd %xmm4, 16(%r9) .L174: movl $700, %r10d xorl %edx, %edx subl %eax, %r10d movl %eax, %eax movl %r10d, %esi leaq 0(,%rax,8), %rcx xorl %eax, %eax shrl $2, %esi leal 0(,%rsi,4), %r11d leaq (%r8,%rcx), %rdi addq %r9, %rcx .p2align 4,,10 .p2align 3 .L176: addl $1, %edx vmovapd %ymm0, (%rdi,%rax) vmovupd %xmm0, (%rcx,%rax) vextractf128 $0x1, %ymm0, 16(%rcx,%rax) addq $32, %rax cmpl %esi, %edx jb .L176 subl %r11d, %ebx cmpl %r10d, %r11d leal (%r11,%r12), %eax je .L171 vmovsd .LC2(%rip), %xmm5 movslq %eax, %rdx cmpl $1, %ebx vmovsd %xmm5, (%r8,%rdx,8) vmovsd %xmm5, (%r9,%rdx,8) leal 1(%rax), %edx je .L171 movslq %edx, %rdx addl $2, %eax cmpl $2, %ebx vmovsd %xmm5, (%r8,%rdx,8) vmovsd %xmm5, (%r9,%rdx,8) je .L171 vmovsd .LC2(%rip), %xmm3 cltq vmovsd %xmm3, (%r8,%rax,8) vmovsd %xmm3, (%r9,%rax,8) .L171: addq $9704, %r8 addq $9704, %r9 cmpq $ga+6792800, %r8 jne .L168 movl $.LC5, %edi vzeroupper call puts movl $.LC6, %r9d movl $.LC7, %r8d movl $.LC8, %ecx movl $.LC9, %edx movl $.LC10, %esi movl $1, %edi movq $.LC11, 16(%rsp) movq $.LC12, 8(%rsp) xorl %eax, %eax movq $.LC13, (%rsp) movl $50, %ebx call __printf_chk movq stdout(%rip), %rdi call fflush .L214: movl %ebx, %edx movl $.LC14, %esi movl $1, %edi xorl %eax, %eax call __printf_chk movl $1, %edx movl %ebx, %esi movl $jki, %edi call fcyc movl %ebx, %eax vcvtsi2sd %ebx, %xmm1, %xmm1 imull %ebx, %eax movl $gc, %esi xorl %edx, %edx imull %ebx, %eax vcvtsi2sd %eax, %xmm5, %xmm5 vdivsd %xmm5, %xmm0, %xmm0 vmovsd %xmm5, 40(%rsp) .p2align 4,,10 .p2align 3 .L177: xorl %eax, %eax .p2align 4,,10 .p2align 3 .L182: movl %eax, %ecx vmovsd (%rsi,%rax,8), %xmm2 vucomisd %xmm1, %xmm2 jp .L223 jne .L223 addq $1, %rax cmpl %eax, %ebx jg .L182 addl $1, %edx addq $9704, %rsi cmpl %ebx, %edx jl .L177 movl $.LC15, %esi movl $1, %edi movl $1, %eax vmovsd %xmm1, 32(%rsp) call __printf_chk movl $1, %edx movl %ebx, %esi movl $kji, %edi call fcyc vdivsd 40(%rsp), %xmm0, %xmm0 movl $gc, %esi xorl %edx, %edx vmovsd 32(%rsp), %xmm1 .p2align 4,,10 .p2align 3 .L183: xorl %eax, %eax .p2align 4,,10 .p2align 3 .L188: vmovsd (%rsi,%rax,8), %xmm2 movl %eax, %ecx vucomisd %xmm1, %xmm2 jp .L223 jne .L223 addq $1, %rax cmpl %eax, %ebx jg .L188 addl $1, %edx addq $9704, %rsi cmpl %ebx, %edx jl .L183 movl $.LC15, %esi movl $1, %edi movl $1, %eax vmovsd %xmm1, 32(%rsp) call __printf_chk movl $1, %edx movl %ebx, %esi movl $ijk, %edi call fcyc vdivsd 40(%rsp), %xmm0, %xmm0 movl $gc, %esi xorl %edx, %edx vmovsd 32(%rsp), %xmm1 .p2align 4,,10 .p2align 3 .L189: xorl %eax, %eax .p2align 4,,10 .p2align 3 .L194: vmovsd (%rsi,%rax,8), %xmm2 movl %eax, %ecx vucomisd %xmm1, %xmm2 jp .L223 jne .L223 addq $1, %rax cmpl %eax, %ebx jg .L194 addl $1, %edx addq $9704, %rsi cmpl %ebx, %edx jl .L189 movl $.LC15, %esi movl $1, %edi movl $1, %eax vmovsd %xmm1, 32(%rsp) call __printf_chk movl $1, %edx movl %ebx, %esi movl $jik, %edi call fcyc vdivsd 40(%rsp), %xmm0, %xmm0 movl $gc, %esi xorl %edx, %edx vmovsd 32(%rsp), %xmm1 .p2align 4,,10 .p2align 3 .L195: xorl %eax, %eax .p2align 4,,10 .p2align 3 .L200: vmovsd (%rsi,%rax,8), %xmm2 movl %eax, %ecx vucomisd %xmm1, %xmm2 jp .L223 jne .L223 addq $1, %rax cmpl %eax, %ebx jg .L200 addl $1, %edx addq $9704, %rsi cmpl %ebx, %edx jl .L195 movl $.LC15, %esi movl $1, %edi movl $1, %eax vmovsd %xmm1, 32(%rsp) call __printf_chk movl $1, %edx movl %ebx, %esi movl $kij, %edi call fcyc vdivsd 40(%rsp), %xmm0, %xmm0 movl $gc, %esi xorl %edx, %edx vmovsd 32(%rsp), %xmm1 .p2align 4,,10 .p2align 3 .L201: xorl %eax, %eax .p2align 4,,10 .p2align 3 .L206: vmovsd (%rsi,%rax,8), %xmm2 movl %eax, %ecx vucomisd %xmm1, %xmm2 jp .L223 jne .L223 addq $1, %rax cmpl %eax, %ebx jg .L206 addl $1, %edx addq $9704, %rsi cmpl %ebx, %edx jl .L201 movl $.LC15, %esi movl $1, %edi movl $1, %eax vmovsd %xmm1, 32(%rsp) call __printf_chk movl $1, %edx movl %ebx, %esi movl $ikj, %edi call fcyc vdivsd 40(%rsp), %xmm0, %xmm0 movl $gc, %esi xorl %edx, %edx vmovsd 32(%rsp), %xmm1 .L207: xorl %eax, %eax .p2align 4,,10 .p2align 3 .L212: vmovsd (%rsi,%rax,8), %xmm2 movl %eax, %ecx vucomisd %xmm1, %xmm2 jp .L223 jne .L223 addq $1, %rax cmpl %eax, %ebx jg .L212 addl $1, %edx addq $9704, %rsi cmpl %ebx, %edx jl .L207 movl $.LC15, %esi movl $1, %edi movl $1, %eax call __printf_chk movl $10, %edi addl $50, %ebx call putchar movq stdout(%rip), %rdi call fflush cmpl $750, %ebx jne .L214 xorl %edi, %edi call exit .p2align 4,,10 .p2align 3 .L215: movl $700, %ebx xorl %r12d, %r12d jmp .L174 .p2align 4,,10 .p2align 3 .L217: movl $698, %ebx movl $2, %r12d jmp .L174 .p2align 4,,10 .p2align 3 .L216: movl $699, %ebx movl $1, %r12d jmp .L174 .L223: vmovapd %xmm2, %xmm0 movl $.LC1, %esi movl $1, %edi movl $1, %eax call __printf_chk movq stdout(%rip), %rdi call fflush xorl %edi, %edi call exit .cfi_endproc .LFE45: .size main, .-main .comm gc,6792800,32 .comm gb,6792800,32 .comm ga,6792800,32 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC2: .long 0 .long 1072693248 .section .rodata.cst32,"aM",@progbits,32 .align 32 .LC3: .long 0 .long 1072693248 .long 0 .long 1072693248 .long 0 .long 1072693248 .long 0 .long 1072693248 .ident "GCC: (Ubuntu 4.8.1-2ubuntu1~12.04) 4.8.1" .section .note.GNU-stack,"",@progbits
4ms/stm32mp1-baremetal
1,040
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/x86/lib/crt0_ia32_efi.S
/* SPDX-License-Identifier: BSD-3-Clause */ /* * crt0-efi-ia32.S - x86 EFI startup code. * * Copyright (C) 1999 Hewlett-Packard Co. * Contributed by David Mosberger <davidm@hpl.hp.com>. * All rights reserved. */ .text .align 4 .globl _start _start: pushl %ebp movl %esp,%ebp pushl 12(%ebp) # copy "image" argument pushl 8(%ebp) # copy "systab" argument call 0f 0: popl %eax movl %eax,%ebx addl $image_base-0b,%eax # %eax = ldbase addl $_DYNAMIC-0b,%ebx # %ebx = _DYNAMIC pushl %ebx # pass _DYNAMIC as second argument pushl %eax # pass ldbase as first argument call _relocate popl %ebx popl %ebx testl %eax,%eax jne .exit call efi_main # call app with "image" and "systab" argument .exit: leave ret /* * hand-craft a dummy .reloc section so EFI knows it's a relocatable * executable: */ .data dummy: .long 0 #define IMAGE_REL_ABSOLUTE 0 .section .reloc .long dummy /* Page RVA */ .long 10 /* Block Size (2*4+2) */ .word (IMAGE_REL_ABSOLUTE << 12) + 0 /* reloc for dummy */
4ms/stm32mp1-baremetal
1,850
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/x86/cpu/start_from_spl.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * 32-bit x86 Startup Code when running from SPL. This is the startup code in * U-Boot proper, when SPL is used. * Copyright 2018 Google, Inc * Written by Simon Glass <sjg@chromium.org> */ #include <config.h> .section .text.start .code32 .globl _start .type _start, @function _start: /* Set up memory using the existing stack */ movl $(CONFIG_SYS_CAR_ADDR + CONFIG_SYS_CAR_SIZE - 4), %eax #ifdef CONFIG_DCACHE_RAM_MRC_VAR_SIZE subl $CONFIG_DCACHE_RAM_MRC_VAR_SIZE, %eax #endif /* * We don't subject CONFIG_DCACHE_RAM_MRC_VAR_SIZE since memory is * already set up. This has the happy side-effect of putting gd in a * new place separate from SPL, so the memset() in * board_init_f_init_reserve() does not cause any problems (otherwise * it would zero out the gd and crash) */ call board_init_f_alloc_reserve mov %eax, %esp call board_init_f_init_reserve call x86_cpu_reinit_f xorl %eax, %eax call board_init_f call board_init_f_r /* Should not return here */ jmp . .globl board_init_f_r_trampoline .type board_init_f_r_trampoline, @function board_init_f_r_trampoline: /* * SPL has been executed and SDRAM has been initialised, U-Boot code * has been copied into RAM, BSS has been cleared and relocation * adjustments have been made. It is now time to jump into the in-RAM * copy of U-Boot * * %eax = Address of top of new stack */ /* Stack grows down from top of SDRAM */ movl %eax, %esp /* Re-enter U-Boot by calling board_init_f_r() */ call board_init_f_r die: hlt jmp die hlt .align 4 _dt_ucode_base_size: /* These next two fields are filled in by binman */ .globl ucode_base ucode_base: /* Declared in microcode.h */ .long 0 /* microcode base */ .globl ucode_size ucode_size: /* Declared in microcode.h */ .long 0 /* microcode size */
4ms/stm32mp1-baremetal
7,765
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/x86/cpu/start.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * U-Boot - x86 Startup Code * * This is always the first code to run from the U-Boot source. To spell it out: * * 1. When TPL (Tertiary Program Loader) is enabled, the boot flow is * TPL->SPL->U-Boot and this file is used for TPL. Then start_from_tpl.S is used * for SPL and start_from_spl.S is used for U-Boot proper. * * 2. When SPL (Secondary Program Loader) is enabled, but not TPL, the boot * flow is SPL->U-Boot and this file is used for SPL. Then start_from_spl.S is * used for U-Boot proper. * * 3. When neither TPL nor SPL is used, this file is used for U-Boot proper. * * (C) Copyright 2008-2011 * Graeme Russ, <graeme.russ@gmail.com> * * (C) Copyright 2002 * Daniel Engström, Omicron Ceti AB, <daniel@omicron.se> */ #include <config.h> #include <asm/global_data.h> #include <asm/post.h> #include <asm/processor.h> #include <asm/processor-flags.h> #include <generated/generic-asm-offsets.h> #include <generated/asm-offsets.h> #include <linux/linkage.h> .section .text.start .code32 .globl _start .type _start, @function .globl _x86boot_start _x86boot_start: /* * This is the fail-safe 32-bit bootstrap entry point. * * This code is used when booting from another boot loader like * coreboot or EFI. So we repeat some of the same init found in * start16. */ cli cld /* Turn off cache (this might require a 486-class CPU) */ movl %cr0, %eax orl $(X86_CR0_NW | X86_CR0_CD), %eax movl %eax, %cr0 wbinvd /* * Zero the BIST (Built-In Self Test) value since we don't have it. * It must be 0 or the previous loader would have reported an error. */ movl $0, %ebp jmp 1f /* Add a way for tools to discover the _start entry point */ .align 4 .long 0x12345678 _start: /* This is the 32-bit cold-reset entry point, coming from start16 */ /* Save BIST */ movl %eax, %ebp 1: /* Save table pointer */ movl %ecx, %esi #ifdef CONFIG_X86_LOAD_FROM_32_BIT lgdt gdt_ptr2 #endif /* Load the segement registers to match the GDT loaded in start16.S */ movl $(X86_GDT_ENTRY_32BIT_DS * X86_GDT_ENTRY_SIZE), %eax movw %ax, %fs movw %ax, %ds movw %ax, %gs movw %ax, %es movw %ax, %ss /* Clear the interrupt vectors */ lidt blank_idt_ptr /* * Critical early platform init - generally not used, we prefer init * to happen later when we have a console, in case something goes * wrong. */ jmp early_board_init .globl early_board_init_ret early_board_init_ret: post_code(POST_START) /* Initialise Cache-As-RAM */ jmp car_init .globl car_init_ret car_init_ret: #ifdef CONFIG_USE_CAR /* * We now have CONFIG_SYS_CAR_SIZE bytes of Cache-As-RAM (or SRAM, * or fully initialised SDRAM - we really don't care which) * starting at CONFIG_SYS_CAR_ADDR to be used as a temporary stack * and early malloc() area. The MRC requires some space at the top. * * Stack grows down from top of CAR. We have: * * top-> CONFIG_SYS_CAR_ADDR + CONFIG_SYS_CAR_SIZE * MRC area * global_data with x86 global descriptor table * early malloc area * stack * bottom-> CONFIG_SYS_CAR_ADDR */ movl $(CONFIG_SYS_CAR_ADDR + CONFIG_SYS_CAR_SIZE - 4), %esp #ifdef CONFIG_DCACHE_RAM_MRC_VAR_SIZE subl $CONFIG_DCACHE_RAM_MRC_VAR_SIZE, %esp #endif #else /* * U-Boot enters here twice. For the first time it comes from * car_init_done() with esp points to a temporary stack and esi * set to zero. For the second time it comes from fsp_init_done() * with esi holding the HOB list address returned by the FSP. */ #endif /* Set up global data */ mov %esp, %eax call board_init_f_alloc_reserve mov %eax, %esp call board_init_f_init_reserve #ifdef CONFIG_DEBUG_UART call debug_uart_init #endif /* Get address of global_data */ mov %fs:0, %edx #if defined(CONFIG_USE_HOB) && !defined(CONFIG_USE_CAR) /* Store the HOB list if we have one */ test %esi, %esi jz skip_hob movl %esi, GD_HOB_LIST(%edx) #ifdef CONFIG_HAVE_FSP /* * After fsp_init() returns, the stack has already been switched to a * place within system memory as defined by CONFIG_FSP_TEMP_RAM_ADDR. * Enlarge the size of malloc() pool before relocation since we have * plenty of memory now. */ subl $CONFIG_FSP_SYS_MALLOC_F_LEN, %esp movl %esp, GD_MALLOC_BASE(%edx) #endif skip_hob: #else /* Store table pointer */ movl %esi, GD_TABLE(%edx) #endif /* Store BIST */ movl %ebp, GD_BIST(%edx) /* Set parameter to board_init_f() to boot flags */ post_code(POST_START_DONE) xorl %eax, %eax /* Enter, U-Boot! */ call board_init_f /* indicate (lack of) progress */ movw $0x85, %ax jmp die .globl board_init_f_r_trampoline .type board_init_f_r_trampoline, @function board_init_f_r_trampoline: /* * SDRAM has been initialised, U-Boot code has been copied into * RAM, BSS has been cleared and relocation adjustments have been * made. It is now time to jump into the in-RAM copy of U-Boot * * %eax = Address of top of new stack */ /* Stack grows down from top of SDRAM */ movl %eax, %esp /* See if we need to disable CAR */ call car_uninit /* Re-enter U-Boot by calling board_init_f_r() */ call board_init_f_r #ifdef CONFIG_TPL .globl jump_to_spl .type jump_to_spl, @function jump_to_spl: /* Reset stack to the top of CAR space */ movl $(CONFIG_SYS_CAR_ADDR + CONFIG_SYS_CAR_SIZE - 4), %esp #ifdef CONFIG_DCACHE_RAM_MRC_VAR_SIZE subl $CONFIG_DCACHE_RAM_MRC_VAR_SIZE, %esp #endif jmp *%eax #endif die: hlt jmp die hlt WEAK(car_uninit) ret ENDPROC(car_uninit) blank_idt_ptr: .word 0 /* limit */ .long 0 /* base */ .p2align 2 /* force 4-byte alignment */ /* Add a multiboot header so U-Boot can be loaded by GRUB2 */ multiboot_header: /* magic */ .long 0x1badb002 /* flags */ .long (1 << 16) /* checksum */ .long -0x1BADB002 - (1 << 16) /* header addr */ .long multiboot_header - _x86boot_start + CONFIG_SYS_TEXT_BASE /* load addr */ .long CONFIG_SYS_TEXT_BASE /* load end addr */ .long 0 /* bss end addr */ .long 0 /* entry addr */ .long CONFIG_SYS_TEXT_BASE #ifdef CONFIG_X86_LOAD_FROM_32_BIT /* * The following Global Descriptor Table is just enough to get us into * 'Flat Protected Mode' - It will be discarded as soon as the final * GDT is setup in a safe location in RAM */ gdt_ptr2: .word 0x1f /* limit (31 bytes = 4 GDT entries - 1) */ .long gdt_rom2 /* base */ /* Some CPUs are picky about GDT alignment... */ .align 16 .globl gdt_rom2 gdt_rom2: /* * The GDT table ... * * Selector Type * 0x00 NULL * 0x08 Unused * 0x10 32bit code * 0x18 32bit data/stack */ /* The NULL Desciptor - Mandatory */ .word 0x0000 /* limit_low */ .word 0x0000 /* base_low */ .byte 0x00 /* base_middle */ .byte 0x00 /* access */ .byte 0x00 /* flags + limit_high */ .byte 0x00 /* base_high */ /* Unused Desciptor - (matches Linux) */ .word 0x0000 /* limit_low */ .word 0x0000 /* base_low */ .byte 0x00 /* base_middle */ .byte 0x00 /* access */ .byte 0x00 /* flags + limit_high */ .byte 0x00 /* base_high */ /* * The Code Segment Descriptor: * - Base = 0x00000000 * - Size = 4GB * - Access = Present, Ring 0, Exec (Code), Readable * - Flags = 4kB Granularity, 32-bit */ .word 0xffff /* limit_low */ .word 0x0000 /* base_low */ .byte 0x00 /* base_middle */ .byte 0x9b /* access */ .byte 0xcf /* flags + limit_high */ .byte 0x00 /* base_high */ /* * The Data Segment Descriptor: * - Base = 0x00000000 * - Size = 4GB * - Access = Present, Ring 0, Non-Exec (Data), Writable * - Flags = 4kB Granularity, 32-bit */ .word 0xffff /* limit_low */ .word 0x0000 /* base_low */ .byte 0x00 /* base_middle */ .byte 0x93 /* access */ .byte 0xcf /* flags + limit_high */ .byte 0x00 /* base_high */ #endif
4ms/stm32mp1-baremetal
1,240
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/x86/cpu/call32.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * (C) Copyright 2015 Google, Inc * Written by Simon Glass <sjg@chromium.org> */ #include <asm/global_data.h> #include <asm/msr-index.h> #include <asm/processor-flags.h> /* * rdi - 32-bit code segment selector * rsi - target address * rdx - table address (0 if none) */ .code64 .globl cpu_call32 cpu_call32: cli /* Save table pointer */ mov %edx, %ebx /* * Debugging option, this outputs characters to the console UART * mov $0x3f8,%edx * mov $'a',%al * out %al,(%dx) */ pushf push %rdi /* 32-bit code segment */ lea compat(%rip), %rax push %rax .byte 0x48 /* REX prefix to force 64-bit far return */ retf .code32 compat: /* * We are now in compatibility mode with a default operand size of * 32 bits. First disable paging. */ movl %cr0, %eax andl $~X86_CR0_PG, %eax movl %eax, %cr0 /* Invalidate TLB */ xorl %eax, %eax movl %eax, %cr3 /* Disable Long mode in EFER (Extended Feature Enable Register) */ movl $MSR_EFER, %ecx rdmsr btr $_EFER_LME, %eax wrmsr /* Set up table pointer for _x86boot_start */ mov %ebx, %ecx /* Jump to the required target */ pushl %edi /* 32-bit code segment */ pushl %esi /* 32-bit target address */ retf
4ms/stm32mp1-baremetal
1,661
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/x86/cpu/wakeup.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright (C) 2017, Bin Meng <bmeng.cn@gmail.com> * * From coreboot src/arch/x86/wakeup.S */ #include <acpi_s3.h> #include <asm/processor.h> #include <asm/processor-flags.h> #define RELOCATED(x) ((x) - __wakeup + WAKEUP_BASE) #define CODE_SEG (X86_GDT_ENTRY_16BIT_CS * X86_GDT_ENTRY_SIZE) #define DATA_SEG (X86_GDT_ENTRY_16BIT_DS * X86_GDT_ENTRY_SIZE) .code32 .globl __wakeup __wakeup: /* First prepare the jmp to the resume vector */ mov 0x4(%esp), %eax /* vector */ /* last 4 bits of linear addr are taken as offset */ andw $0x0f, %ax movw %ax, (__wakeup_offset) mov 0x4(%esp), %eax /* the rest is taken as segment */ shr $4, %eax movw %ax, (__wakeup_segment) /* Activate the right segment descriptor real mode */ ljmp $CODE_SEG, $RELOCATED(1f) 1: /* 16 bit code from here on... */ .code16 /* * Load the segment registers w/ properly configured segment * descriptors. They will retain these configurations (limits, * writability, etc.) once protected mode is turned off. */ mov $DATA_SEG, %ax mov %ax, %ds mov %ax, %es mov %ax, %fs mov %ax, %gs mov %ax, %ss /* Turn off protection */ movl %cr0, %eax andl $~X86_CR0_PE, %eax movl %eax, %cr0 /* Now really going into real mode */ ljmp $0, $RELOCATED(1f) 1: movw $0x0, %ax movw %ax, %ds movw %ax, %es movw %ax, %ss movw %ax, %fs movw %ax, %gs /* * This is a FAR JMP to the OS waking vector. * The C code changes the address to be correct. */ .byte 0xea __wakeup_offset = RELOCATED(.) .word 0x0000 __wakeup_segment = RELOCATED(.) .word 0x0000 .globl __wakeup_size __wakeup_size: .long . - __wakeup
4ms/stm32mp1-baremetal
4,514
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/x86/cpu/sipi_vector.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2015 Google, Inc * * Taken from coreboot file of the same name */ /* * The SIPI vector is responsible for initializing the APs in the sytem. It * loads microcode, sets up MSRs, and enables caching before calling into * C code */ #include <asm/global_data.h> #include <asm/msr-index.h> #include <asm/processor.h> #include <asm/processor-flags.h> #include <asm/sipi.h> #define CODE_SEG (X86_GDT_ENTRY_32BIT_CS * X86_GDT_ENTRY_SIZE) #define DATA_SEG (X86_GDT_ENTRY_32BIT_DS * X86_GDT_ENTRY_SIZE) /* * First we have the 16-bit section. Every AP process starts here. * The simple task is to load U-Boot's Global Descriptor Table (GDT) to allow * U-Boot's 32-bit code to become visible, then jump to ap_start. * * Note that this code is copied to RAM below 1MB in mp_init.c, and runs from * there, but the 32-bit code (ap_start and onwards) is part of U-Boot and * is therefore relocated to the top of RAM with other U-Boot code. This * means that for the 16-bit code we must write relocatable code, but for the * rest, we can do what we like. */ .text .code16 .globl ap_start16 ap_start16: cli xorl %eax, %eax movl %eax, %cr3 /* Invalidate TLB */ /* setup the data segment */ movw %cs, %ax movw %ax, %ds /* Use an address relative to the data segment for the GDT */ movl $gdtaddr, %ebx subl $ap_start16, %ebx data32 lgdt (%ebx) movl %cr0, %eax andl $(~(X86_CR0_PG | X86_CR0_AM | X86_CR0_WP | X86_CR0_NE | \ X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)), %eax orl $(X86_CR0_NW | X86_CR0_CD | X86_CR0_PE), %eax movl %eax, %cr0 movl $ap_start_jmp, %eax subl $ap_start16, %eax movw %ax, %bp /* Jump to ap_start within U-Boot */ data32 cs ljmp *(%bp) .align 4 .globl sipi_params_16bit sipi_params_16bit: /* 48-bit far pointer */ ap_start_jmp: .long 0 /* offset set to ap_start by U-Boot */ .word CODE_SEG /* segment */ .word 0 /* padding */ gdtaddr: .word 0 /* limit */ .long 0 /* table */ .word 0 /* unused */ .globl ap_start16_code_end ap_start16_code_end: /* * Set up the special 'fs' segment for global_data. Then jump to ap_continue * to set up the AP. */ .globl ap_start ap_start: .code32 movw $DATA_SEG, %ax movw %ax, %ds movw %ax, %es movw %ax, %ss movw %ax, %gs movw $(X86_GDT_ENTRY_32BIT_FS * X86_GDT_ENTRY_SIZE), %ax movw %ax, %fs /* Load the Interrupt descriptor table */ mov idt_ptr, %ebx lidt (%ebx) /* Obtain cpu number */ movl ap_count, %eax 1: movl %eax, %ecx inc %ecx lock cmpxchg %ecx, ap_count jnz 1b /* Setup stacks for each CPU */ movl stack_size, %eax mul %ecx movl stack_top, %edx subl %eax, %edx mov %edx, %esp /* Save cpu number */ mov %ecx, %esi /* Determine if one should check microcode versions */ mov microcode_ptr, %edi test %edi, %edi jz microcode_done /* Bypass if no microde exists */ /* Get the Microcode version */ mov $1, %eax cpuid mov $MSR_IA32_UCODE_REV, %ecx rdmsr /* If something already loaded skip loading again */ test %edx, %edx jnz microcode_done /* Determine if parallel microcode loading is allowed */ cmp $0xffffffff, microcode_lock je load_microcode /* Protect microcode loading */ lock_microcode: lock bts $0, microcode_lock jc lock_microcode load_microcode: /* Load new microcode */ mov $MSR_IA32_UCODE_WRITE, %ecx xor %edx, %edx mov %edi, %eax /* * The microcode pointer is passed in pointing to the header. Adjust * pointer to reflect the payload (header size is 48 bytes) */ add $UCODE_HEADER_LEN, %eax pusha wrmsr popa /* Unconditionally unlock microcode loading */ cmp $0xffffffff, microcode_lock je microcode_done xor %eax, %eax mov %eax, microcode_lock microcode_done: /* * Load MSRs. Each entry in the table consists of: * 0: index, * 4: value[31:0] * 8: value[63:32] * See struct saved_msr in mp_init.c. */ mov msr_table_ptr, %edi mov msr_count, %ebx test %ebx, %ebx jz 1f load_msr: mov (%edi), %ecx mov 4(%edi), %eax mov 8(%edi), %edx wrmsr add $12, %edi dec %ebx jnz load_msr 1: /* Enable caching */ mov %cr0, %eax andl $(~(X86_CR0_CD | X86_CR0_NW)), %eax mov %eax, %cr0 /* c_handler(cpu_num) */ movl %esi, %eax /* cpu_num */ mov c_handler, %esi call *%esi /* This matches struct sipi_param */ .align 4 .globl sipi_params sipi_params: idt_ptr: .long 0 stack_top: .long 0 stack_size: .long 0 microcode_lock: .long 0 microcode_ptr: .long 0 msr_table_ptr: .long 0 msr_count: .long 0 c_handler: .long 0 ap_count: .long 0
4ms/stm32mp1-baremetal
2,856
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/x86/cpu/start16.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * U-Boot - x86 Startup Code * * (C) Copyright 2008-2011 * Graeme Russ, <graeme.russ@gmail.com> * * (C) Copyright 2002,2003 * Daniel Engström, Omicron Ceti AB, <daniel@omicron.se> */ #include <asm/global_data.h> #include <asm/processor-flags.h> #define BOOT_SEG 0xffff0000 /* linear segment of boot code */ .section .start16, "ax" .code16 .globl start16 start16: /* Save BIST */ movl %eax, %ecx xorl %eax, %eax movl %eax, %cr3 /* Invalidate TLB */ /* Turn off cache (this might require a 486-class CPU) */ movl %cr0, %eax orl $(X86_CR0_NW | X86_CR0_CD), %eax movl %eax, %cr0 wbinvd /* load the temporary Global Descriptor Table */ data32 cs lidt idt_ptr data32 cs lgdt gdt_ptr /* Now, we enter protected mode */ movl %cr0, %eax orl $X86_CR0_PE, %eax movl %eax, %cr0 /* Flush the prefetch queue */ jmp ff ff: /* Finally restore BIST and jump to the 32-bit initialization code */ movl %ecx, %eax data32 cs ljmp *code32start /* 48-bit far pointer */ code32start: .long _start /* offset */ .word 0x10 /* segment */ idt_ptr: .word 0 /* limit */ .long 0 /* base */ /* * The following Global Descriptor Table is just enough to get us into * 'Flat Protected Mode' - It will be discarded as soon as the final * GDT is setup in a safe location in RAM */ gdt_ptr: .word 0x1f /* limit (31 bytes = 4 GDT entries - 1) */ .long BOOT_SEG + gdt_rom /* base */ /* Some CPUs are picky about GDT alignment... */ .align 16 .globl gdt_rom gdt_rom: /* * The GDT table ... * * Selector Type * 0x00 NULL * 0x08 Unused * 0x10 32bit code * 0x18 32bit data/stack */ /* The NULL Desciptor - Mandatory */ .word 0x0000 /* limit_low */ .word 0x0000 /* base_low */ .byte 0x00 /* base_middle */ .byte 0x00 /* access */ .byte 0x00 /* flags + limit_high */ .byte 0x00 /* base_high */ /* Unused Desciptor - (matches Linux) */ .word 0x0000 /* limit_low */ .word 0x0000 /* base_low */ .byte 0x00 /* base_middle */ .byte 0x00 /* access */ .byte 0x00 /* flags + limit_high */ .byte 0x00 /* base_high */ /* * The Code Segment Descriptor: * - Base = 0x00000000 * - Size = 4GB * - Access = Present, Ring 0, Exec (Code), Readable * - Flags = 4kB Granularity, 32-bit */ .word 0xffff /* limit_low */ .word 0x0000 /* base_low */ .byte 0x00 /* base_middle */ .byte 0x9b /* access */ .byte 0xcf /* flags + limit_high */ .byte 0x00 /* base_high */ /* * The Data Segment Descriptor: * - Base = 0x00000000 * - Size = 4GB * - Access = Present, Ring 0, Non-Exec (Data), Writable * - Flags = 4kB Granularity, 32-bit */ .word 0xffff /* limit_low */ .word 0x0000 /* base_low */ .byte 0x00 /* base_middle */ .byte 0x93 /* access */ .byte 0xcf /* flags + limit_high */ .byte 0x00 /* base_high */
4ms/stm32mp1-baremetal
2,530
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/x86/lib/fsp1/fsp_car.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright (C) 2014, Bin Meng <bmeng.cn@gmail.com> */ #include <config.h> #include <asm/post.h> .globl car_init car_init: /* * Note: ebp holds the BIST value (built-in self test) so far, but ebp * will be destroyed through the FSP call, thus we have to test the * BIST value here before we call into FSP. */ test %ebp, %ebp jz car_init_start post_code(POST_BIST_FAILURE) jmp die car_init_start: post_code(POST_CAR_START) lea fsp_find_header_romstack, %esp jmp fsp_find_header fsp_find_header_ret: /* EAX points to FSP_INFO_HEADER */ mov %eax, %ebp /* sanity test */ cmp $CONFIG_FSP_ADDR, %eax jb die /* calculate TempRamInitEntry address */ mov 0x30(%ebp), %eax add 0x1c(%ebp), %eax /* call FSP TempRamInitEntry to setup temporary stack */ lea temp_ram_init_romstack, %esp jmp *%eax temp_ram_init_ret: addl $4, %esp cmp $0, %eax jnz car_init_fail post_code(POST_CAR_CPU_CACHE) /* * The FSP TempRamInit initializes the ecx and edx registers to * point to a temporary but writable memory range (Cache-As-RAM). * ecx: the start of this temporary memory range, * edx: the end of this range. */ /* stack grows down from top of CAR */ movl %edx, %esp subl $4, %esp xor %esi, %esi jmp car_init_done .global fsp_init_done fsp_init_done: /* * We come here from fsp_continue() with eax pointing to the HOB list. * Save eax to esi temporarily. */ movl %eax, %esi car_init_done: /* * Re-initialize the ebp (BIST) to zero, as we already reach here * which means we passed BIST testing before. */ xorl %ebp, %ebp jmp car_init_ret car_init_fail: post_code(POST_CAR_FAILURE) die: hlt jmp die hlt /* * The function call before CAR initialization is tricky. It cannot * be called using the 'call' instruction but only the 'jmp' with * the help of a handcrafted stack in the ROM. The stack needs to * contain the function return address as well as the parameters. */ .balign 4 fsp_find_header_romstack: .long fsp_find_header_ret .balign 4 temp_ram_init_romstack: .long temp_ram_init_ret .long temp_ram_init_params temp_ram_init_params: _dt_ucode_base_size: /* These next two fields are filled in by binman */ .globl ucode_base ucode_base: /* Declared in microcode.h */ .long 0 /* microcode base */ .globl ucode_size ucode_size: /* Declared in microcode.h */ .long 0 /* microcode size */ .long CONFIG_SYS_MONITOR_BASE /* code region base */ .long CONFIG_SYS_MONITOR_LEN /* code region size */
4ms/stm32mp1-baremetal
5,617
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/x86/cpu/intel_common/car.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2014 Google, Inc * * From Coreboot file cpu/intel/model_206ax/cache_as_ram.inc * * Copyright (C) 2000,2007 Ronald G. Minnich <rminnich@gmail.com> * Copyright (C) 2005 Tyan (written by Yinghai Lu for Tyan) * Copyright (C) 2007-2008 coresystems GmbH * Copyright (C) 2012 Kyösti Mälkki <kyosti.malkki@gmail.com> */ #include <common.h> #include <asm/microcode.h> #include <asm/msr-index.h> #include <asm/mtrr.h> #include <asm/post.h> #include <asm/processor.h> #include <asm/processor-flags.h> #define MTRR_PHYS_BASE_MSR(reg) (0x200 + 2 * (reg)) #define MTRR_PHYS_MASK_MSR(reg) (0x200 + 2 * (reg) + 1) #define CACHE_AS_RAM_SIZE CONFIG_DCACHE_RAM_SIZE #define CACHE_AS_RAM_BASE CONFIG_DCACHE_RAM_BASE /* Cache 4GB - MRC_SIZE_KB for MRC */ #define CACHE_MRC_BYTES ((CONFIG_CACHE_MRC_SIZE_KB << 10) - 1) #define CACHE_MRC_BASE (0xFFFFFFFF - CACHE_MRC_BYTES) #define CACHE_MRC_MASK (~CACHE_MRC_BYTES) #define CPU_PHYSMASK_HI (1 << (CONFIG_CPU_ADDR_BITS - 32) - 1) #define NOEVICTMOD_MSR 0x2e0 /* * Note: ebp must not be touched in this code as it holds the BIST * value (built-in self test). We preserve this value until it can * be written to global_data when CAR is ready for use. */ .globl car_init car_init: post_code(POST_CAR_START) /* Send INIT IPI to all excluding ourself */ movl $0x000C4500, %eax movl $0xFEE00300, %esi movl %eax, (%esi) /* TODO: Load microcode later - the 'no eviction' mode breaks this */ movl $MSR_IA32_UCODE_WRITE, %ecx xorl %edx, %edx movl $_dt_ucode_base_size, %eax movl (%eax), %eax addl $UCODE_HEADER_LEN, %eax wrmsr post_code(POST_CAR_SIPI) /* Zero out all fixed range and variable range MTRRs */ movl $mtrr_table, %esi movl $((mtrr_table_end - mtrr_table) / 2), %edi xorl %eax, %eax xorl %edx, %edx clear_mtrrs: movw (%esi), %bx movzx %bx, %ecx wrmsr add $2, %esi dec %edi jnz clear_mtrrs post_code(POST_CAR_MTRR) /* Configure the default memory type to uncacheable */ movl $MTRR_DEF_TYPE_MSR, %ecx rdmsr andl $(~0x00000cff), %eax wrmsr post_code(POST_CAR_UNCACHEABLE) /* Set Cache-as-RAM base address */ movl $(MTRR_PHYS_BASE_MSR(0)), %ecx movl $(CACHE_AS_RAM_BASE | MTRR_TYPE_WRBACK), %eax xorl %edx, %edx wrmsr post_code(POST_CAR_BASE_ADDRESS) /* Set Cache-as-RAM mask */ movl $(MTRR_PHYS_MASK_MSR(0)), %ecx movl $(~(CACHE_AS_RAM_SIZE - 1) | MTRR_PHYS_MASK_VALID), %eax movl $CPU_PHYSMASK_HI, %edx wrmsr post_code(POST_CAR_MASK) /* Enable MTRR */ movl $MTRR_DEF_TYPE_MSR, %ecx rdmsr orl $MTRR_DEF_TYPE_EN, %eax wrmsr /* Enable cache (CR0.CD = 0, CR0.NW = 0) */ movl %cr0, %eax andl $(~(X86_CR0_CD | X86_CR0_NW)), %eax invd movl %eax, %cr0 /* enable the 'no eviction' mode */ movl $NOEVICTMOD_MSR, %ecx rdmsr orl $1, %eax andl $~2, %eax wrmsr /* Clear the cache memory region. This will also fill up the cache */ movl $CACHE_AS_RAM_BASE, %esi movl %esi, %edi movl $(CACHE_AS_RAM_SIZE / 4), %ecx xorl %eax, %eax rep stosl /* enable the 'no eviction run' state */ movl $NOEVICTMOD_MSR, %ecx rdmsr orl $3, %eax wrmsr post_code(POST_CAR_FILL) /* Enable Cache-as-RAM mode by disabling cache */ movl %cr0, %eax orl $X86_CR0_CD, %eax movl %eax, %cr0 /* Enable cache for our code in Flash because we do XIP here */ movl $MTRR_PHYS_BASE_MSR(1), %ecx xorl %edx, %edx movl $car_init_ret, %eax andl $(~(CONFIG_XIP_ROM_SIZE - 1)), %eax orl $MTRR_TYPE_WRPROT, %eax wrmsr movl $MTRR_PHYS_MASK_MSR(1), %ecx movl $CPU_PHYSMASK_HI, %edx movl $(~(CONFIG_XIP_ROM_SIZE - 1) | MTRR_PHYS_MASK_VALID), %eax wrmsr post_code(POST_CAR_ROM_CACHE) #ifdef CONFIG_CACHE_MRC_BIN /* Enable caching for ram init code to run faster */ movl $MTRR_PHYS_BASE_MSR(2), %ecx movl $(CACHE_MRC_BASE | MTRR_TYPE_WRPROT), %eax xorl %edx, %edx wrmsr movl $MTRR_PHYS_MASK_MSR(2), %ecx movl $(CACHE_MRC_MASK | MTRR_PHYS_MASK_VALID), %eax movl $CPU_PHYSMASK_HI, %edx wrmsr #endif post_code(POST_CAR_MRC_CACHE) /* Enable cache */ movl %cr0, %eax andl $(~(X86_CR0_CD | X86_CR0_NW)), %eax movl %eax, %cr0 post_code(POST_CAR_CPU_CACHE) /* All CPUs need to be in Wait for SIPI state */ wait_for_sipi: movl (%esi), %eax bt $12, %eax jc wait_for_sipi /* return */ jmp car_init_ret .globl car_uninit car_uninit: /* Disable cache */ movl %cr0, %eax orl $X86_CR0_CD, %eax movl %eax, %cr0 /* Disable MTRRs */ movl $MTRR_DEF_TYPE_MSR, %ecx rdmsr andl $(~MTRR_DEF_TYPE_EN), %eax wrmsr /* Disable the no-eviction run state */ movl $NOEVICTMOD_MSR, %ecx rdmsr andl $~2, %eax wrmsr invd /* Disable the no-eviction mode */ rdmsr andl $~1, %eax wrmsr #ifdef CONFIG_CACHE_MRC_BIN /* Clear the MTRR that was used to cache MRC */ xorl %eax, %eax xorl %edx, %edx movl $MTRR_PHYS_BASE_MSR(2), %ecx wrmsr movl $MTRR_PHYS_MASK_MSR(2), %ecx wrmsr #endif /* Enable MTRRs */ movl $MTRR_DEF_TYPE_MSR, %ecx rdmsr orl $MTRR_DEF_TYPE_EN, %eax wrmsr invd ret mtrr_table: /* Fixed MTRRs */ .word 0x250, 0x258, 0x259 .word 0x268, 0x269, 0x26A .word 0x26B, 0x26C, 0x26D .word 0x26E, 0x26F /* Variable MTRRs */ .word 0x200, 0x201, 0x202, 0x203 .word 0x204, 0x205, 0x206, 0x207 .word 0x208, 0x209, 0x20A, 0x20B .word 0x20C, 0x20D, 0x20E, 0x20F .word 0x210, 0x211, 0x212, 0x213 mtrr_table_end: .align 4 _dt_ucode_base_size: /* These next two fields are filled in by binman */ .globl ucode_base ucode_base: /* Declared in microcode.h */ .long 0 /* microcode base */ .globl ucode_size ucode_size: /* Declared in microcode.h */ .long 0 /* microcode size */
4ms/stm32mp1-baremetal
1,139
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/x86/cpu/i386/setjmp.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Written by H. Peter Anvin <hpa@zytor.com> * Brought in from Linux v4.4 and modified for U-Boot * From Linux arch/um/sys-i386/setjmp.S */ #define _REGPARM /* * The jmp_buf is assumed to contain the following, in order: * %ebx * %esp * %ebp * %esi * %edi * <return address> */ .text .align 4 .globl setjmp .type setjmp, @function setjmp: #ifdef _REGPARM movl %eax, %edx #else movl 4(%esp), %edx #endif popl %ecx /* Return address, and adjust the stack */ xorl %eax, %eax /* Return value */ movl %ebx, (%edx) movl %esp, 4(%edx) /* Post-return %esp! */ pushl %ecx /* Make the call/return stack happy */ movl %ebp, 8(%edx) movl %esi, 12(%edx) movl %edi, 16(%edx) movl %ecx, 20(%edx) /* Return address */ ret /* Provide function size if needed */ .size setjmp, .-setjmp .align 4 .globl longjmp .type longjmp, @function longjmp: #ifdef _REGPARM xchgl %eax, %edx #else movl 4(%esp), %edx /* jmp_ptr address */ #endif movl (%edx), %ebx movl 4(%edx), %esp movl 8(%edx), %ebp movl 12(%edx), %esi movl 16(%edx), %edi jmp *20(%edx) .size longjmp, .-longjmp
4ms/stm32mp1-baremetal
2,245
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/x86/cpu/i386/call64.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * (C) Copyright 2014 Google, Inc * Copyright (C) 1991, 1992, 1993 Linus Torvalds * * Parts of this copied from Linux arch/x86/boot/compressed/head_64.S */ #include <asm/global_data.h> #include <asm/msr-index.h> #include <asm/processor-flags.h> .code32 .globl cpu_call64 cpu_call64: /* * cpu_call64(ulong pgtable, ulong setup_base, ulong target) * * eax - pgtable * edx - setup_base * ecx - target */ cli push %ecx /* arg2 = target */ push %edx /* arg1 = setup_base */ mov %eax, %ebx /* Load new GDT with the 64bit segments using 32bit descriptor */ leal gdt, %eax movl %eax, gdt+2 lgdt gdt /* Enable PAE mode */ movl $(X86_CR4_PAE), %eax movl %eax, %cr4 /* Enable the boot page tables */ leal (%ebx), %eax movl %eax, %cr3 /* Enable Long mode in EFER (Extended Feature Enable Register) */ movl $MSR_EFER, %ecx rdmsr btsl $_EFER_LME, %eax wrmsr /* After gdt is loaded */ xorl %eax, %eax lldt %ax movl $0x20, %eax ltr %ax /* * Setup for the jump to 64bit mode * * When the jump is performed we will be in long mode but * in 32bit compatibility mode with EFER.LME = 1, CS.L = 0, CS.D = 1 * (and in turn EFER.LMA = 1). To jump into 64bit mode we use * the new gdt/idt that has __KERNEL_CS with CS.L = 1. * We place all of the values on our mini stack so lret can * used to perform that far jump. See the gdt below. */ pop %esi /* setup_base */ pushl $0x10 leal lret_target, %eax pushl %eax /* Enter paged protected Mode, activating Long Mode */ movl $(X86_CR0_PG | X86_CR0_PE), %eax movl %eax, %cr0 /* Jump from 32bit compatibility mode into 64bit mode. */ lret code64: lret_target: pop %eax /* target */ mov %eax, %eax /* Clear bits 63:32 */ jmp *%eax /* Jump to the 64-bit target */ .globl call64_stub_size call64_stub_size: .long . - cpu_call64 .data .align 16 .globl gdt64 gdt64: gdt: .word gdt_end - gdt - 1 .long gdt /* Fixed up by code above */ .word 0 .quad 0x0000000000000000 /* NULL descriptor */ .quad 0x00af9a000000ffff /* __KERNEL_CS */ .quad 0x00cf92000000ffff /* __KERNEL_DS */ .quad 0x0080890000000000 /* TS descriptor */ .quad 0x0000000000000000 /* TS continued */ gdt_end:
4ms/stm32mp1-baremetal
2,247
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/x86/cpu/quark/car.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright (C) 2015, Bin Meng <bmeng.cn@gmail.com> */ #include <config.h> #include <asm/pci.h> #include <asm/post.h> #include <asm/arch/quark.h> #include <asm/arch/msg_port.h> .globl car_init car_init: post_code(POST_CAR_START) /* * Quark SoC contains an embedded 512KiB SRAM (eSRAM) that is * initialized by hardware. eSRAM is the ideal place to be used * for Cache-As-RAM (CAR) before system memory is available. * * Relocate this eSRAM to a suitable location in the physical * memory map and enable it. */ /* Host Memory Bound Register P03h:R08h */ mov $((MSG_PORT_HOST_BRIDGE << 16) | (HM_BOUND << 8)), %eax mov $(DRAM_BASE + DRAM_MAX_SIZE + ESRAM_SIZE), %edx lea 1f, %esp jmp msg_port_write 1: /* eSRAM Block Page Control Register P05h:R82h */ mov $((MSG_PORT_MEM_MGR << 16) | (ESRAM_BLK_CTRL << 8)), %eax mov $(ESRAM_BLOCK_MODE | (CONFIG_ESRAM_BASE >> 24)), %edx lea 2f, %esp jmp msg_port_write 2: post_code(POST_CAR_CPU_CACHE) jmp car_init_ret msg_port_read: /* * Parameter: * eax[23:16] - Message Port ID * eax[15:08] - Register Address * * Return Value: * eax - Message Port Register value * * Return Address: esp */ or $((MSG_OP_READ << 24) | MSG_BYTE_ENABLE), %eax mov %eax, %ebx /* Write MCR B0:D0:F0:RD0 */ mov $(PCI_CFG_EN | MSG_CTRL_REG), %eax mov $PCI_REG_ADDR, %dx out %eax, %dx mov $PCI_REG_DATA, %dx mov %ebx, %eax out %eax, %dx /* Read MDR B0:D0:F0:RD4 */ mov $(PCI_CFG_EN | MSG_DATA_REG), %eax mov $PCI_REG_ADDR, %dx out %eax, %dx mov $PCI_REG_DATA, %dx in %dx, %eax jmp *%esp msg_port_write: /* * Parameter: * eax[23:16] - Message Port ID * eax[15:08] - Register Address * edx - Message Port Register value to write * * Return Address: esp */ or $((MSG_OP_WRITE << 24) | MSG_BYTE_ENABLE), %eax mov %eax, %esi mov %edx, %edi /* Write MDR B0:D0:F0:RD4 */ mov $(PCI_CFG_EN | MSG_DATA_REG), %eax mov $PCI_REG_ADDR, %dx out %eax, %dx mov $PCI_REG_DATA, %dx mov %edi, %eax out %eax, %dx /* Write MCR B0:D0:F0:RD0 */ mov $(PCI_CFG_EN | MSG_CTRL_REG), %eax mov $PCI_REG_ADDR, %dx out %eax, %dx mov $PCI_REG_DATA, %dx mov %esi, %eax out %eax, %dx jmp *%esp
4ms/stm32mp1-baremetal
1,634
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/mips/mach-jz47xx/start.S
// SPDX-License-Identifier: GPL-2.0+ /* * Startup Code for MIPS32 XBURST CPU-core * * Copyright (c) 2010 Xiangfu Liu <xiangfu@sharism.cc> */ #include <config.h> #include <asm/regdef.h> #include <asm/mipsregs.h> #include <asm/addrspace.h> #include <asm/cacheops.h> #include <asm/cache.h> #include <mach/jz4780.h> .set noreorder .globl _start .text _start: #ifdef CONFIG_SPL_BUILD /* magic value ("MSPL") */ .word 0x4d53504c /* Invalidate BTB */ mfc0 t0, CP0_CONFIG, 7 nop ori t0, 2 mtc0 t0, CP0_CONFIG, 7 nop /* * CU0=UM=EXL=IE=0, BEV=ERL=1, IP2~7=1 */ li t0, 0x0040FC04 mtc0 t0, CP0_STATUS /* CAUSE register */ /* IV=1, use the specical interrupt vector (0x200) */ li t1, 0x00800000 mtc0 t1, CP0_CAUSE #ifdef CONFIG_SOC_JZ4780 /* enable bridge radical mode */ la t0, CPM_BASE lw t1, 0x24(t0) ori t1, t1, 0x22 sw t1, 0x24(t0) #endif /* Set up stack */ li sp, CONFIG_SPL_STACK b board_init_f nop #ifdef CONFIG_SOC_JZ4780 .globl enable_caches .ent enable_caches enable_caches: mtc0 zero, CP0_TAGLO mtc0 zero, CP0_TAGHI li t0, KSEG0 addu t1, t0, CONFIG_SYS_DCACHE_SIZE 1: cache INDEX_STORE_TAG_D, 0(t0) bne t0, t1, 1b addiu t0, t0, CONFIG_SYS_CACHELINE_SIZE li t0, KSEG0 addu t1, t0, CONFIG_SYS_ICACHE_SIZE 2: cache INDEX_STORE_TAG_I, 0(t0) bne t0, t1, 2b addiu t0, t0, CONFIG_SYS_CACHELINE_SIZE /* Invalidate BTB */ mfc0 t0, CP0_CONFIG, 7 nop ori t0, 2 mtc0 t0, CP0_CONFIG, 7 nop /* Enable caches */ li t0, CONF_CM_CACHABLE_NONCOHERENT mtc0 t0, CP0_CONFIG nop jr ra nop .end enable_caches #endif /* CONFIG_SOC_JZ4780 */ #endif /* !CONFIG_SPL_BUILD */
4ms/stm32mp1-baremetal
1,691
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/mips/mach-mscc/lowlevel_init_luton.S
/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */ /* * Copyright (c) 2018 Microsemi Corporation */ #include <asm/asm.h> #include <asm/regdef.h> #define BASE_MACRO 0x600a0000 #define REG_OFFSET(t, o) (t + (o*4)) #define REG_MACRO(x) REG_OFFSET(BASE_MACRO, x) #define BIT(nr) (1 << (nr)) #define MACRO_CTRL_PLL5G_STATUS_PLL5G_STATUS0 REG_MACRO(6) #define MACRO_CTRL_PLL5G_STATUS_PLL5G_STATUS0_LOCK_STATUS BIT(0) #define MACRO_CTRL_PLL5G_CFG_PLL5G_CFG2 REG_MACRO(2) #define MACRO_CTRL_PLL5G_CFG_PLL5G_CFG0 REG_MACRO(0) #define MACRO_CTRL_PLL5G_CFG_PLL5G_CFG0_CPU_CLK_DIV (0x3F << 6) #define MACRO_CTRL_PLL5G_CFG_PLL5G_CFG0_CPU_CLK_DIV_ENC(x) (x << 6) .set noreorder LEAF(pll_init) /* Make sure PLL is locked */ lw v0, MACRO_CTRL_PLL5G_STATUS_PLL5G_STATUS0 andi v1, v0, MACRO_CTRL_PLL5G_STATUS_PLL5G_STATUS0_LOCK_STATUS bne v1, zero, 1f nop /* Black magic from frontend */ li v1, 0x00610400 sw v1, MACRO_CTRL_PLL5G_CFG_PLL5G_CFG2 li v1, 0x00610c00 sw v1, MACRO_CTRL_PLL5G_CFG_PLL5G_CFG2 li v1, 0x00610800 sw v1, MACRO_CTRL_PLL5G_CFG_PLL5G_CFG2 li v1, 0x00610000 sw v1, MACRO_CTRL_PLL5G_CFG_PLL5G_CFG2 /* Wait for lock */ 2: lw v0, MACRO_CTRL_PLL5G_STATUS_PLL5G_STATUS0 andi v1, v0, MACRO_CTRL_PLL5G_STATUS_PLL5G_STATUS0_LOCK_STATUS /* Keep looping if zero (no lock bit yet) */ beq v1, zero, 2b nop /* Setup PLL CPU clock divider for 416MHz */ 1: lw v0, MACRO_CTRL_PLL5G_CFG_PLL5G_CFG0 /* Keep reserved bits */ li v1, ~MACRO_CTRL_PLL5G_CFG_PLL5G_CFG0_CPU_CLK_DIV and v0, v0, v1 /* Set code 6 ~ 416.66 MHz */ ori v0, v0, MACRO_CTRL_PLL5G_CFG_PLL5G_CFG0_CPU_CLK_DIV_ENC(6) sw v0, MACRO_CTRL_PLL5G_CFG_PLL5G_CFG0 jr ra nop END(pll_init)
4ms/stm32mp1-baremetal
11,070
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/mips/lib/cache_init.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * Cache-handling routined for MIPS CPUs * * Copyright (c) 2003 Wolfgang Denk <wd@denx.de> */ #include <asm-offsets.h> #include <config.h> #include <asm/asm.h> #include <asm/regdef.h> #include <asm/mipsregs.h> #include <asm/addrspace.h> #include <asm/cacheops.h> #include <asm/cm.h> .macro f_fill64 dst, offset, val LONG_S \val, (\offset + 0 * LONGSIZE)(\dst) LONG_S \val, (\offset + 1 * LONGSIZE)(\dst) LONG_S \val, (\offset + 2 * LONGSIZE)(\dst) LONG_S \val, (\offset + 3 * LONGSIZE)(\dst) LONG_S \val, (\offset + 4 * LONGSIZE)(\dst) LONG_S \val, (\offset + 5 * LONGSIZE)(\dst) LONG_S \val, (\offset + 6 * LONGSIZE)(\dst) LONG_S \val, (\offset + 7 * LONGSIZE)(\dst) #if LONGSIZE == 4 LONG_S \val, (\offset + 8 * LONGSIZE)(\dst) LONG_S \val, (\offset + 9 * LONGSIZE)(\dst) LONG_S \val, (\offset + 10 * LONGSIZE)(\dst) LONG_S \val, (\offset + 11 * LONGSIZE)(\dst) LONG_S \val, (\offset + 12 * LONGSIZE)(\dst) LONG_S \val, (\offset + 13 * LONGSIZE)(\dst) LONG_S \val, (\offset + 14 * LONGSIZE)(\dst) LONG_S \val, (\offset + 15 * LONGSIZE)(\dst) #endif .endm .macro cache_loop curr, end, line_sz, op 10: cache \op, 0(\curr) PTR_ADDU \curr, \curr, \line_sz bne \curr, \end, 10b .endm .macro l1_info sz, line_sz, off .set push .set noat mfc0 $1, CP0_CONFIG, 1 /* detect line size */ srl \line_sz, $1, \off + MIPS_CONF1_DL_SHF - MIPS_CONF1_DA_SHF andi \line_sz, \line_sz, (MIPS_CONF1_DL >> MIPS_CONF1_DL_SHF) move \sz, zero beqz \line_sz, 10f li \sz, 2 sllv \line_sz, \sz, \line_sz /* detect associativity */ srl \sz, $1, \off + MIPS_CONF1_DA_SHF - MIPS_CONF1_DA_SHF andi \sz, \sz, (MIPS_CONF1_DA >> MIPS_CONF1_DA_SHF) addiu \sz, \sz, 1 /* sz *= line_sz */ mul \sz, \sz, \line_sz /* detect log32(sets) */ srl $1, $1, \off + MIPS_CONF1_DS_SHF - MIPS_CONF1_DA_SHF andi $1, $1, (MIPS_CONF1_DS >> MIPS_CONF1_DS_SHF) addiu $1, $1, 1 andi $1, $1, 0x7 /* sz <<= log32(sets) */ sllv \sz, \sz, $1 /* sz *= 32 */ li $1, 32 mul \sz, \sz, $1 10: .set pop .endm /* * mips_cache_reset - low level initialisation of the primary caches * * This routine initialises the primary caches to ensure that they have good * parity. It must be called by the ROM before any cached locations are used * to prevent the possibility of data with bad parity being written to memory. * * To initialise the instruction cache it is essential that a source of data * with good parity is available. This routine will initialise an area of * memory starting at location zero to be used as a source of parity. * * Note that this function does not follow the standard calling convention & * may clobber typically callee-saved registers. * * RETURNS: N/A * */ #define R_RETURN s0 #define R_IC_SIZE s1 #define R_IC_LINE s2 #define R_DC_SIZE s3 #define R_DC_LINE s4 #define R_L2_SIZE s5 #define R_L2_LINE s6 #define R_L2_BYPASSED s7 #define R_L2_L2C t8 LEAF(mips_cache_reset) move R_RETURN, ra #ifdef CONFIG_MIPS_L2_CACHE /* * For there to be an L2 present, Config2 must be present. If it isn't * then we proceed knowing there's no L2 cache. */ move R_L2_SIZE, zero move R_L2_LINE, zero move R_L2_BYPASSED, zero move R_L2_L2C, zero mfc0 t0, CP0_CONFIG, 1 bgez t0, l2_probe_done /* * From MIPSr6 onwards the L2 cache configuration might not be reported * by Config2. The Config5.L2C bit indicates whether this is the case, * and if it is then we need knowledge of where else to look. For cores * from Imagination Technologies this is a CM GCR. */ # if __mips_isa_rev >= 6 /* Check that Config5 exists */ mfc0 t0, CP0_CONFIG, 2 bgez t0, l2_probe_cop0 mfc0 t0, CP0_CONFIG, 3 bgez t0, l2_probe_cop0 mfc0 t0, CP0_CONFIG, 4 bgez t0, l2_probe_cop0 /* Check Config5.L2C is set */ mfc0 t0, CP0_CONFIG, 5 and R_L2_L2C, t0, MIPS_CONF5_L2C beqz R_L2_L2C, l2_probe_cop0 /* Config5.L2C is set */ # ifdef CONFIG_MIPS_CM /* The CM will provide L2 configuration */ PTR_LI t0, CKSEG1ADDR(CONFIG_MIPS_CM_BASE) lw t1, GCR_L2_CONFIG(t0) bgez t1, l2_probe_done ext R_L2_LINE, t1, \ GCR_L2_CONFIG_LINESZ_SHIFT, GCR_L2_CONFIG_LINESZ_BITS beqz R_L2_LINE, l2_probe_done li t2, 2 sllv R_L2_LINE, t2, R_L2_LINE ext t2, t1, GCR_L2_CONFIG_ASSOC_SHIFT, GCR_L2_CONFIG_ASSOC_BITS addiu t2, t2, 1 mul R_L2_SIZE, R_L2_LINE, t2 ext t2, t1, GCR_L2_CONFIG_SETSZ_SHIFT, GCR_L2_CONFIG_SETSZ_BITS sllv R_L2_SIZE, R_L2_SIZE, t2 li t2, 64 mul R_L2_SIZE, R_L2_SIZE, t2 /* Bypass the L2 cache so that we can init the L1s early */ or t1, t1, GCR_L2_CONFIG_BYPASS sw t1, GCR_L2_CONFIG(t0) sync li R_L2_BYPASSED, 1 /* Zero the L2 tag registers */ sw zero, GCR_L2_TAG_ADDR(t0) sw zero, GCR_L2_TAG_ADDR_UPPER(t0) sw zero, GCR_L2_TAG_STATE(t0) sw zero, GCR_L2_TAG_STATE_UPPER(t0) sw zero, GCR_L2_DATA(t0) sw zero, GCR_L2_DATA_UPPER(t0) sync # else /* We don't know how to retrieve L2 configuration on this system */ # endif b l2_probe_done # endif /* * For pre-r6 systems, or r6 systems with Config5.L2C==0, probe the L2 * cache configuration from the cop0 Config2 register. */ l2_probe_cop0: mfc0 t0, CP0_CONFIG, 2 srl R_L2_LINE, t0, MIPS_CONF2_SL_SHF andi R_L2_LINE, R_L2_LINE, MIPS_CONF2_SL >> MIPS_CONF2_SL_SHF beqz R_L2_LINE, l2_probe_done li t1, 2 sllv R_L2_LINE, t1, R_L2_LINE srl t1, t0, MIPS_CONF2_SA_SHF andi t1, t1, MIPS_CONF2_SA >> MIPS_CONF2_SA_SHF addiu t1, t1, 1 mul R_L2_SIZE, R_L2_LINE, t1 srl t1, t0, MIPS_CONF2_SS_SHF andi t1, t1, MIPS_CONF2_SS >> MIPS_CONF2_SS_SHF sllv R_L2_SIZE, R_L2_SIZE, t1 li t1, 64 mul R_L2_SIZE, R_L2_SIZE, t1 /* Attempt to bypass the L2 so that we can init the L1s early */ or t0, t0, MIPS_CONF2_L2B mtc0 t0, CP0_CONFIG, 2 ehb mfc0 t0, CP0_CONFIG, 2 and R_L2_BYPASSED, t0, MIPS_CONF2_L2B /* Zero the L2 tag registers */ mtc0 zero, CP0_TAGLO, 4 ehb l2_probe_done: #endif #ifndef CONFIG_SYS_CACHE_SIZE_AUTO li R_IC_SIZE, CONFIG_SYS_ICACHE_SIZE li R_IC_LINE, CONFIG_SYS_ICACHE_LINE_SIZE #else l1_info R_IC_SIZE, R_IC_LINE, MIPS_CONF1_IA_SHF #endif #ifndef CONFIG_SYS_CACHE_SIZE_AUTO li R_DC_SIZE, CONFIG_SYS_DCACHE_SIZE li R_DC_LINE, CONFIG_SYS_DCACHE_LINE_SIZE #else l1_info R_DC_SIZE, R_DC_LINE, MIPS_CONF1_DA_SHF #endif #ifdef CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD /* Determine the largest L1 cache size */ #ifndef CONFIG_SYS_CACHE_SIZE_AUTO #if CONFIG_SYS_ICACHE_SIZE > CONFIG_SYS_DCACHE_SIZE li v0, CONFIG_SYS_ICACHE_SIZE #else li v0, CONFIG_SYS_DCACHE_SIZE #endif #else move v0, R_IC_SIZE sltu t1, R_IC_SIZE, R_DC_SIZE movn v0, R_DC_SIZE, t1 #endif /* * Now clear that much memory starting from zero. */ PTR_LI a0, CKSEG1ADDR(CONFIG_MIPS_CACHE_INDEX_BASE) PTR_ADDU a1, a0, v0 2: PTR_ADDIU a0, 64 f_fill64 a0, -64, zero bne a0, a1, 2b #endif /* CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD */ #ifdef CONFIG_MIPS_L2_CACHE /* * If the L2 is bypassed, init the L1 first so that we can execute the * rest of the cache initialisation using the L1 instruction cache. */ bnez R_L2_BYPASSED, l1_init l2_init: PTR_LI t0, CKSEG0ADDR(CONFIG_MIPS_CACHE_INDEX_BASE) PTR_ADDU t1, t0, R_L2_SIZE 1: cache INDEX_STORE_TAG_SD, 0(t0) PTR_ADDU t0, t0, R_L2_LINE bne t0, t1, 1b /* * If the L2 was bypassed then we already initialised the L1s before * the L2, so we are now done. */ bnez R_L2_BYPASSED, l2_unbypass #endif /* * The TagLo registers used depend upon the CPU implementation, but the * architecture requires that it is safe for software to write to both * TagLo selects 0 & 2 covering supported cases. */ l1_init: mtc0 zero, CP0_TAGLO mtc0 zero, CP0_TAGLO, 2 ehb /* * The caches are probably in an indeterminate state, so we force good * parity into them by doing an invalidate for each line. If * CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD is set then we'll proceed to * perform a load/fill & a further invalidate for each line, assuming * that the bottom of RAM (having just been cleared) will generate good * parity for the cache. */ /* * Initialize the I-cache first, */ blez R_IC_SIZE, 1f PTR_LI t0, CKSEG0ADDR(CONFIG_MIPS_CACHE_INDEX_BASE) PTR_ADDU t1, t0, R_IC_SIZE /* clear tag to invalidate */ cache_loop t0, t1, R_IC_LINE, INDEX_STORE_TAG_I #ifdef CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD /* fill once, so data field parity is correct */ PTR_LI t0, CKSEG0ADDR(CONFIG_MIPS_CACHE_INDEX_BASE) cache_loop t0, t1, R_IC_LINE, FILL /* invalidate again - prudent but not strictly neccessary */ PTR_LI t0, CKSEG0ADDR(CONFIG_MIPS_CACHE_INDEX_BASE) cache_loop t0, t1, R_IC_LINE, INDEX_STORE_TAG_I #endif sync /* * Enable use of the I-cache by setting Config.K0. The code for this * must be executed from KSEG1. Jump from KSEG0 to KSEG1 to do this. * Jump back to KSEG0 after caches are enabled and insert an * instruction hazard barrier. */ PTR_LA t0, change_k0_cca li t1, CPHYSADDR(~0) and t0, t0, t1 PTR_LI t1, CKSEG1 or t0, t0, t1 li a0, CONF_CM_CACHABLE_NONCOHERENT jalr.hb t0 /* * then initialize D-cache. */ 1: blez R_DC_SIZE, 3f PTR_LI t0, CKSEG0ADDR(CONFIG_MIPS_CACHE_INDEX_BASE) PTR_ADDU t1, t0, R_DC_SIZE /* clear all tags */ cache_loop t0, t1, R_DC_LINE, INDEX_STORE_TAG_D #ifdef CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD /* load from each line (in cached space) */ PTR_LI t0, CKSEG0ADDR(CONFIG_MIPS_CACHE_INDEX_BASE) 2: LONG_L zero, 0(t0) PTR_ADDU t0, R_DC_LINE bne t0, t1, 2b /* clear all tags */ PTR_LI t0, CKSEG0ADDR(CONFIG_MIPS_CACHE_INDEX_BASE) cache_loop t0, t1, R_DC_LINE, INDEX_STORE_TAG_D #endif 3: #ifdef CONFIG_MIPS_L2_CACHE /* If the L2 isn't bypassed then we're done */ beqz R_L2_BYPASSED, return /* The L2 is bypassed - go initialise it */ b l2_init l2_unbypass: # if __mips_isa_rev >= 6 beqz R_L2_L2C, 1f li t0, CKSEG1ADDR(CONFIG_MIPS_CM_BASE) lw t1, GCR_L2_CONFIG(t0) xor t1, t1, GCR_L2_CONFIG_BYPASS sw t1, GCR_L2_CONFIG(t0) sync ehb b 2f # endif 1: mfc0 t0, CP0_CONFIG, 2 xor t0, t0, MIPS_CONF2_L2B mtc0 t0, CP0_CONFIG, 2 ehb 2: # ifdef CONFIG_MIPS_CM /* Config3 must exist for a CM to be present */ mfc0 t0, CP0_CONFIG, 1 bgez t0, 2f mfc0 t0, CP0_CONFIG, 2 bgez t0, 2f /* Check Config3.CMGCR to determine CM presence */ mfc0 t0, CP0_CONFIG, 3 and t0, t0, MIPS_CONF3_CMGCR beqz t0, 2f /* Change Config.K0 to a coherent CCA */ PTR_LA t0, change_k0_cca li a0, CONF_CM_CACHABLE_COW jalr t0 /* * Join the coherent domain such that the caches of this core are kept * coherent with those of other cores. */ PTR_LI t0, CKSEG1ADDR(CONFIG_MIPS_CM_BASE) lw t1, GCR_REV(t0) li t2, GCR_REV_CM3 li t3, GCR_Cx_COHERENCE_EN bge t1, t2, 1f li t3, GCR_Cx_COHERENCE_DOM_EN 1: sw t3, GCR_Cx_COHERENCE(t0) ehb 2: # endif #endif return: /* Ensure all cache operations complete before returning */ sync jr R_RETURN END(mips_cache_reset) LEAF(change_k0_cca) mfc0 t0, CP0_CONFIG #if __mips_isa_rev >= 2 ins t0, a0, 0, 3 #else xor a0, a0, t0 andi a0, a0, CONF_CM_CMASK xor a0, a0, t0 #endif mtc0 a0, CP0_CONFIG jr.hb ra END(change_k0_cca)
4ms/stm32mp1-baremetal
3,891
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/mips/lib/genex.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle * Copyright (C) 1999, 2000 Silicon Graphics, Inc. * Copyright (C) 2002, 2007 Maciej W. Rozycki * Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved. */ #include <asm/asm.h> #include <asm/regdef.h> #include <asm/mipsregs.h> #include <asm/asm-offsets.h> #define STATMASK 0x1f .set noreorder /* * Macros copied and adapted from Linux MIPS */ .macro SAVE_AT .set push .set noat LONG_S $1, PT_R1(sp) .set pop .endm .macro SAVE_TEMP #if __mips_isa_rev < 6 mfhi v1 #endif #ifdef CONFIG_32BIT LONG_S $8, PT_R8(sp) LONG_S $9, PT_R9(sp) #endif LONG_S $10, PT_R10(sp) LONG_S $11, PT_R11(sp) LONG_S $12, PT_R12(sp) #if __mips_isa_rev < 6 LONG_S v1, PT_HI(sp) mflo v1 #endif LONG_S $13, PT_R13(sp) LONG_S $14, PT_R14(sp) LONG_S $15, PT_R15(sp) LONG_S $24, PT_R24(sp) #if __mips_isa_rev < 6 LONG_S v1, PT_LO(sp) #endif .endm .macro SAVE_STATIC LONG_S $16, PT_R16(sp) LONG_S $17, PT_R17(sp) LONG_S $18, PT_R18(sp) LONG_S $19, PT_R19(sp) LONG_S $20, PT_R20(sp) LONG_S $21, PT_R21(sp) LONG_S $22, PT_R22(sp) LONG_S $23, PT_R23(sp) LONG_S $30, PT_R30(sp) .endm .macro SAVE_SOME .set push .set noat PTR_SUBU k1, sp, PT_SIZE LONG_S sp, PT_R29(k1) move sp, k1 LONG_S $3, PT_R3(sp) LONG_S $0, PT_R0(sp) mfc0 v1, CP0_STATUS LONG_S $2, PT_R2(sp) LONG_S v1, PT_STATUS(sp) LONG_S $4, PT_R4(sp) mfc0 v1, CP0_CAUSE LONG_S $5, PT_R5(sp) LONG_S v1, PT_CAUSE(sp) LONG_S $6, PT_R6(sp) MFC0 v1, CP0_EPC LONG_S $7, PT_R7(sp) #ifdef CONFIG_64BIT LONG_S $8, PT_R8(sp) LONG_S $9, PT_R9(sp) #endif LONG_S v1, PT_EPC(sp) LONG_S $25, PT_R25(sp) LONG_S $28, PT_R28(sp) LONG_S $31, PT_R31(sp) .set pop .endm .macro RESTORE_AT .set push .set noat LONG_L $1, PT_R1(sp) .set pop .endm .macro RESTORE_TEMP #if __mips_isa_rev < 6 LONG_L $24, PT_LO(sp) mtlo $24 LONG_L $24, PT_HI(sp) mthi $24 #endif #ifdef CONFIG_32BIT LONG_L $8, PT_R8(sp) LONG_L $9, PT_R9(sp) #endif LONG_L $10, PT_R10(sp) LONG_L $11, PT_R11(sp) LONG_L $12, PT_R12(sp) LONG_L $13, PT_R13(sp) LONG_L $14, PT_R14(sp) LONG_L $15, PT_R15(sp) LONG_L $24, PT_R24(sp) .endm .macro RESTORE_STATIC LONG_L $16, PT_R16(sp) LONG_L $17, PT_R17(sp) LONG_L $18, PT_R18(sp) LONG_L $19, PT_R19(sp) LONG_L $20, PT_R20(sp) LONG_L $21, PT_R21(sp) LONG_L $22, PT_R22(sp) LONG_L $23, PT_R23(sp) LONG_L $30, PT_R30(sp) .endm .macro RESTORE_SOME .set push .set reorder .set noat mfc0 a0, CP0_STATUS ori a0, STATMASK xori a0, STATMASK mtc0 a0, CP0_STATUS li v1, ST0_CU1 | ST0_FR | ST0_IM and a0, v1 LONG_L v0, PT_STATUS(sp) nor v1, $0, v1 and v0, v1 or v0, a0 mtc0 v0, CP0_STATUS LONG_L v1, PT_EPC(sp) MTC0 v1, CP0_EPC LONG_L $31, PT_R31(sp) LONG_L $28, PT_R28(sp) LONG_L $25, PT_R25(sp) #ifdef CONFIG_64BIT LONG_L $8, PT_R8(sp) LONG_L $9, PT_R9(sp) #endif LONG_L $7, PT_R7(sp) LONG_L $6, PT_R6(sp) LONG_L $5, PT_R5(sp) LONG_L $4, PT_R4(sp) LONG_L $3, PT_R3(sp) LONG_L $2, PT_R2(sp) .set pop .endm .macro RESTORE_SP LONG_L sp, PT_R29(sp) .endm NESTED(except_vec3_generic, 0, sp) PTR_LA k1, handle_reserved jr k1 nop END(except_vec3_generic) NESTED(except_vec_ejtag_debug, 0, sp) PTR_LA k1, handle_ejtag_debug jr k1 nop END(except_vec_ejtag_debug) NESTED(handle_reserved, PT_SIZE, sp) SAVE_SOME SAVE_AT SAVE_TEMP SAVE_STATIC PTR_LA t9, do_reserved jr t9 move a0, sp END(handle_reserved) NESTED(handle_ejtag_debug, PT_SIZE, sp) .set push .set noat MTC0 k1, CP0_DESAVE /* Check for SDBBP */ MFC0 k1, CP0_DEBUG sll k1, k1, 30 bgez k1, ejtag_return nop SAVE_SOME SAVE_AT SAVE_TEMP SAVE_STATIC PTR_LA t9, do_ejtag_debug jalr t9 move a0, sp RESTORE_TEMP RESTORE_STATIC RESTORE_AT RESTORE_SOME RESTORE_SP ejtag_return: MFC0 k1, CP0_DESAVE deret .set pop END(handle_ejtag_debug)
4ms/stm32mp1-baremetal
5,261
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/mips/cpu/start.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * Startup Code for MIPS32 CPU-core * * Copyright (c) 2003 Wolfgang Denk <wd@denx.de> */ #include <asm-offsets.h> #include <config.h> #include <asm/asm.h> #include <asm/regdef.h> #include <asm/mipsregs.h> #ifndef CONFIG_SYS_INIT_SP_ADDR #define CONFIG_SYS_INIT_SP_ADDR (CONFIG_SYS_SDRAM_BASE + \ CONFIG_SYS_INIT_SP_OFFSET) #endif #ifdef CONFIG_32BIT # define MIPS_RELOC 3 # define STATUS_SET 0 #endif #ifdef CONFIG_64BIT # ifdef CONFIG_SYS_LITTLE_ENDIAN # define MIPS64_R_INFO(ssym, r_type3, r_type2, r_type) \ (((r_type) << 24) | ((r_type2) << 16) | ((r_type3) << 8) | (ssym)) # else # define MIPS64_R_INFO(ssym, r_type3, r_type2, r_type) \ ((r_type) | ((r_type2) << 8) | ((r_type3) << 16) | (ssym) << 24) # endif # define MIPS_RELOC MIPS64_R_INFO(0x00, 0x00, 0x12, 0x03) # define STATUS_SET ST0_KX #endif .set noreorder .macro init_wr sel MTC0 zero, CP0_WATCHLO,\sel mtc0 t1, CP0_WATCHHI,\sel mfc0 t0, CP0_WATCHHI,\sel bgez t0, wr_done nop .endm .macro uhi_mips_exception move k0, t9 # preserve t9 in k0 move k1, a0 # preserve a0 in k1 li t9, 15 # UHI exception operation li a0, 0 # Use hard register context sdbbp 1 # Invoke UHI operation .endm .macro setup_stack_gd li t0, -16 PTR_LI t1, CONFIG_SYS_INIT_SP_ADDR and sp, t1, t0 # force 16 byte alignment PTR_SUBU \ sp, sp, GD_SIZE # reserve space for gd and sp, sp, t0 # force 16 byte alignment move k0, sp # save gd pointer #if CONFIG_VAL(SYS_MALLOC_F_LEN) li t2, CONFIG_VAL(SYS_MALLOC_F_LEN) PTR_SUBU \ sp, sp, t2 # reserve space for early malloc and sp, sp, t0 # force 16 byte alignment #endif move fp, sp /* Clear gd */ move t0, k0 1: PTR_S zero, 0(t0) blt t0, t1, 1b PTR_ADDIU t0, PTRSIZE #if CONFIG_VAL(SYS_MALLOC_F_LEN) PTR_S sp, GD_MALLOC_BASE(k0) # gd->malloc_base offset #endif .endm ENTRY(_start) /* U-Boot entry point */ b reset mtc0 zero, CP0_COUNT # clear cp0 count for most accurate boot timing #if defined(CONFIG_MIPS_INSERT_BOOT_CONFIG) /* * Store some board-specific boot configuration. This is used by some * MIPS systems like Malta. */ .org 0x10 .word CONFIG_MIPS_BOOT_CONFIG_WORD0 .word CONFIG_MIPS_BOOT_CONFIG_WORD1 #endif #if defined(CONFIG_ROM_EXCEPTION_VECTORS) /* * Exception vector entry points. When running from ROM, an exception * cannot be handled. Halt execution and transfer control to debugger, * if one is attached. */ .org 0x200 /* TLB refill, 32 bit task */ uhi_mips_exception .org 0x280 /* XTLB refill, 64 bit task */ uhi_mips_exception .org 0x300 /* Cache error exception */ uhi_mips_exception .org 0x380 /* General exception */ uhi_mips_exception .org 0x400 /* Catch interrupt exceptions */ uhi_mips_exception .org 0x480 /* EJTAG debug exception */ 1: b 1b nop .org 0x500 #endif reset: #if __mips_isa_rev >= 6 mfc0 t0, CP0_CONFIG, 5 and t0, t0, MIPS_CONF5_VP beqz t0, 1f nop b 2f mfc0 t0, CP0_GLOBALNUMBER #endif #ifdef CONFIG_ARCH_BMIPS 1: mfc0 t0, CP0_DIAGNOSTIC, 3 and t0, t0, (1 << 31) #else 1: mfc0 t0, CP0_EBASE and t0, t0, EBASE_CPUNUM #endif /* Hang if this isn't the first CPU in the system */ 2: beqz t0, 4f nop 3: wait b 3b nop /* Init CP0 Status */ 4: mfc0 t0, CP0_STATUS and t0, ST0_IMPL or t0, ST0_BEV | ST0_ERL | STATUS_SET mtc0 t0, CP0_STATUS /* * Check whether CP0 Config1 is implemented. If not continue * with legacy Watch register initialization. */ mfc0 t0, CP0_CONFIG bgez t0, wr_legacy nop /* * Check WR bit in CP0 Config1 to determine if Watch registers * are implemented. */ mfc0 t0, CP0_CONFIG, 1 andi t0, (1 << 3) beqz t0, wr_done nop /* Clear Watch Status bits and disable watch exceptions */ li t1, 0x7 # Clear I, R and W conditions init_wr 0 init_wr 1 init_wr 2 init_wr 3 init_wr 4 init_wr 5 init_wr 6 init_wr 7 b wr_done nop wr_legacy: MTC0 zero, CP0_WATCHLO mtc0 zero, CP0_WATCHHI wr_done: /* Clear WP, IV and SW interrupts */ mtc0 zero, CP0_CAUSE /* Clear timer interrupt (CP0_COUNT cleared on branch to 'reset') */ mtc0 zero, CP0_COMPARE #ifndef CONFIG_SKIP_LOWLEVEL_INIT mfc0 t0, CP0_CONFIG and t0, t0, MIPS_CONF_IMPL or t0, t0, CONF_CM_UNCACHED mtc0 t0, CP0_CONFIG ehb #endif #ifdef CONFIG_MIPS_CM PTR_LA t9, mips_cm_map jalr t9 nop #endif #ifdef CONFIG_MIPS_INIT_STACK_IN_SRAM /* Set up initial stack and global data */ setup_stack_gd # ifdef CONFIG_DEBUG_UART /* Earliest point to set up debug uart */ PTR_LA t9, debug_uart_init jalr t9 nop # endif #endif #ifndef CONFIG_SKIP_LOWLEVEL_INIT # ifdef CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD /* Initialize any external memory */ PTR_LA t9, lowlevel_init jalr t9 nop # endif /* Initialize caches... */ PTR_LA t9, mips_cache_reset jalr t9 nop # ifndef CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD /* Initialize any external memory */ PTR_LA t9, lowlevel_init jalr t9 nop # endif #endif #ifndef CONFIG_MIPS_INIT_STACK_IN_SRAM /* Set up initial stack and global data */ setup_stack_gd # ifdef CONFIG_DEBUG_UART /* Earliest point to set up debug uart */ PTR_LA t9, debug_uart_init jalr t9 nop # endif #endif move a0, zero # a0 <-- boot_flags = 0 PTR_LA t9, board_init_f jr t9 move ra, zero END(_start)
4ms/stm32mp1-baremetal
6,107
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/mips/mach-mtmips/lowlevel_init.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * (c) 2018 Stefan Roese <sr@denx.de> * * This code is mostly based on the code extracted from this MediaTek * github repository: * * https://github.com/MediaTek-Labs/linkit-smart-uboot.git * * I was not able to find a specific license or other developers * copyrights here, so I can't add them here. */ #include <config.h> #include <asm/regdef.h> #include <asm/mipsregs.h> #include <asm/addrspace.h> #include <asm/asm.h> #include "mt76xx.h" #ifndef BIT #define BIT(nr) (1 << (nr)) #endif #define DELAY_USEC(us) ((us) / 100) #define DDR_CFG1_CHIP_WIDTH_MASK (0x3 << 16) #define DDR_CFG1_BUS_WIDTH_MASK (0x3 << 12) #if defined(CONFIG_ONBOARD_DDR2_SIZE_256MBIT) #define DDR_CFG1_SIZE_VAL 0x222e2323 #define DDR_CFG4_SIZE_VAL 7 #endif #if defined(CONFIG_ONBOARD_DDR2_SIZE_512MBIT) #define DDR_CFG1_SIZE_VAL 0x22322323 #define DDR_CFG4_SIZE_VAL 9 #endif #if defined(CONFIG_ONBOARD_DDR2_SIZE_1024MBIT) #define DDR_CFG1_SIZE_VAL 0x22362323 #define DDR_CFG4_SIZE_VAL 9 #endif #if defined(CONFIG_ONBOARD_DDR2_SIZE_2048MBIT) #define DDR_CFG1_SIZE_VAL 0x223a2323 #define DDR_CFG4_SIZE_VAL 9 #endif #if defined(CONFIG_ONBOARD_DDR2_CHIP_WIDTH_8BIT) #define DDR_CFG1_CHIP_WIDTH_VAL (0x1 << 16) #endif #if defined(CONFIG_ONBOARD_DDR2_CHIP_WIDTH_16BIT) #define DDR_CFG1_CHIP_WIDTH_VAL (0x2 << 16) #endif #if defined(CONFIG_ONBOARD_DDR2_BUS_WIDTH_16BIT) #define DDR_CFG1_BUS_WIDTH_VAL (0x2 << 12) #endif #if defined(CONFIG_ONBOARD_DDR2_BUS_WIDTH_32BIT) #define DDR_CFG1_BUS_WIDTH_VAL (0x3 << 12) #endif .set noreorder LEAF(lowlevel_init) /* Load base addresses as physical addresses for later usage */ li s0, CKSEG1ADDR(MT76XX_SYSCTL_BASE) li s1, CKSEG1ADDR(MT76XX_MEMCTRL_BASE) li s2, CKSEG1ADDR(MT76XX_RGCTRL_BASE) /* polling CPLL is ready */ li t1, DELAY_USEC(1000000) la t5, MT76XX_ROM_STATUS_REG 1: lw t2, 0(t5) andi t2, t2, 0x1 bnez t2, CPLL_READY subu t1, t1, 1 bgtz t1, 1b nop la t0, MT76XX_CLKCFG0_REG lw t3, 0(t0) ori t3, t3, 0x1 sw t3, 0(t0) b CPLL_DONE nop CPLL_READY: la t0, MT76XX_CLKCFG0_REG lw t1, 0(t0) li t2, ~0x0c and t1, t1, t2 ori t1, t1, 0xc sw t1, 0(t0) la t0, MT76XX_DYN_CFG0_REG lw t3, 0(t0) li t5, ~((0x0f << 8) | (0x0f << 0)) and t3, t3, t5 li t5, (10 << 8) | (1 << 0) or t3, t3, t5 sw t3, 0(t0) la t0, MT76XX_CLKCFG0_REG lw t3, 0(t0) li t4, ~0x0F and t3, t3, t4 ori t3, t3, 0xc sw t3, 0(t0) lw t3, 0(t0) ori t3, t3, 0x08 sw t3, 0(t0) CPLL_DONE: /* Reset MC */ lw t2, 0x34(s0) ori t2, BIT(10) sw t2, 0x34(s0) nop /* * SDR and DDR initialization: delay 200us */ li t0, DELAY_USEC(200 + 40) li t1, 0x1 1: sub t0, t0, t1 bnez t0, 1b nop /* set DRAM IO PAD for MT7628IC */ /* DDR LDO Enable */ lw t4, 0x100(s2) li t2, BIT(31) or t4, t4, t2 sw t4, 0x100(s2) lw t4, 0x10c(s2) j LDO_1P8V nop LDO_1P8V: li t2, ~BIT(6) and t4, t4, t2 sw t4, 0x10c(s2) j DDRLDO_SOFT_START LDO_2P5V: /* suppose external DDR1 LDO 2.5V */ li t2, BIT(6) or t4, t4, t2 sw t4, 0x10c(s2) DDRLDO_SOFT_START: lw t2, 0x10c(s2) li t3, BIT(16) or t2, t2, t3 sw t2, 0x10c(s2) li t3, DELAY_USEC(250*50) LDO_DELAY: subu t3, t3, 1 bnez t3, LDO_DELAY nop lw t2, 0x10c(s2) li t3, BIT(18) or t2, t2, t3 sw t2, 0x10c(s2) SET_RG_BUCK_FPWM: lw t2, 0x104(s2) ori t2, t2, BIT(10) sw t2, 0x104(s2) DDR_PAD_CFG: /* clean CLK PAD */ lw t2, 0x704(s2) li t8, 0xfffff0f0 and t2, t2, t8 /* clean CMD PAD */ lw t3, 0x70c(s2) li t8, 0xfffff0f0 and t3, t3, t8 /* clean DQ IPAD */ lw t4, 0x710(s2) li t8, 0xfffff8ff and t4, t4, t8 /* clean DQ OPAD */ lw t5, 0x714(s2) li t8, 0xfffff0f0 and t5, t5, t8 /* clean DQS IPAD */ lw t6, 0x718(s2) li t8, 0xfffff8ff and t6, t6, t8 /* clean DQS OPAD */ lw t7, 0x71c(s2) li t8, 0xfffff0f0 and t7, t7, t8 lw t9, 0xc(s0) srl t9, t9, 16 andi t9, t9, 0x1 bnez t9, MT7628_AN_DDR1_PAD MT7628_KN_PAD: li t8, 0x00000303 or t2, t2, t8 or t3, t3, t8 or t5, t5, t8 or t7, t7, t8 li t8, 0x00000000 or t4, t4, t8 or t6, t6, t8 j SET_PAD_CFG MT7628_AN_DDR1_PAD: lw t1, 0x10(s0) andi t1, t1, 0x1 beqz t1, MT7628_AN_DDR2_PAD li t8, 0x00000c0c or t2, t2, t8 li t8, 0x00000202 or t3, t3, t8 li t8, 0x00000707 or t5, t5, t8 li t8, 0x00000c0c or t7, t7, t8 li t8, 0x00000000 or t4, t4, t8 or t6, t6, t8 j SET_PAD_CFG MT7628_AN_DDR2_PAD: li t8, 0x00000c0c or t2, t2, t8 li t8, 0x00000202 or t3, t3, t8 li t8, 0x00000404 or t5, t5, t8 li t8, 0x00000c0c or t7, t7, t8 li t8, 0x00000000 /* ODT off */ or t4, t4, t8 or t6, t6, t8 SET_PAD_CFG: sw t2, 0x704(s2) sw t3, 0x70c(s2) sw t4, 0x710(s2) sw t5, 0x714(s2) sw t6, 0x718(s2) sw t7, 0x71c(s2) /* * DDR initialization: reset pin to 0 */ lw t2, 0x34(s0) and t2, ~BIT(10) sw t2, 0x34(s0) nop /* * DDR initialization: wait til reg DDR_CFG1 bit 21 equal to 1 (ready) */ DDR_READY: li t1, DDR_CFG1_REG lw t0, 0(t1) nop and t2, t0, BIT(21) beqz t2, DDR_READY nop /* * DDR initialization * * Only DDR2 supported right now. DDR2 support can be added, once * boards using it will get added to mainline U-Boot. */ li t1, DDR_CFG2_REG lw t0, 0(t1) nop and t0, ~BIT(30) and t0, ~(7 << 4) or t0, (4 << 4) or t0, BIT(30) or t0, BIT(11) sw t0, 0(t1) nop li t1, DDR_CFG3_REG lw t2, 0(t1) /* Disable ODT; reference board ok, ev board fail */ and t2, ~BIT(6) or t2, BIT(2) li t0, DDR_CFG4_REG lw t1, 0(t0) li t2, ~(0x01f | 0x0f0) and t1, t1, t2 ori t1, t1, DDR_CFG4_SIZE_VAL sw t1, 0(t0) nop /* * DDR initialization: config size and width on reg DDR_CFG1 */ li t6, DDR_CFG1_SIZE_VAL and t6, ~DDR_CFG1_CHIP_WIDTH_MASK or t6, DDR_CFG1_CHIP_WIDTH_VAL /* CONFIG DDR_CFG1[13:12] about TOTAL WIDTH */ and t6, ~DDR_CFG1_BUS_WIDTH_MASK or t6, DDR_CFG1_BUS_WIDTH_VAL li t5, DDR_CFG1_REG sw t6, 0(t5) nop /* * DDR: enable self auto refresh for power saving * enable it by default for both RAM and ROM version (for CoC) */ lw t1, 0x14(s1) nop and t1, 0xff000000 or t1, 0x01 sw t1, 0x14(s1) nop lw t1, 0x10(s1) nop or t1, 0x10 sw t1, 0x10(s1) nop jr ra nop END(lowlevel_init)
4ms/stm32mp1-baremetal
4,298
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/mips/mach-ath79/qca956x/qca956x-ddr-tap.S
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2019 Rosy Song <rosysong@rosinson.com> * * Based on QSDK */ #include <config.h> #include <asm/asm.h> #include <asm/regdef.h> #include <asm/mipsregs.h> #include <asm/addrspace.h> #include <mach/ar71xx_regs.h> .set noreorder LEAF(ddr_tap_tuning) li a0, 0xbd001f00 sw zero, 0x0(a0) /* Place where the tap values are saved and used for SWEEP */ sw zero, 0x4(a0) /* Place where the number of passing taps are saved. */ sw zero, 0x14(a0) /* Place where the last pass tap value is stored */ li a1, 0xaa55aa55 /* Indicates that the First pass tap value is not found */ sw a1, 0x10(a0) /* Place where the First pass tap value is stored */ nop li a0, CKSEG1ADDR(AR71XX_RESET_BASE) /* RESET_BASE_ADDRESS */ lw a1, 0x1c(a0) /* Reading the RST_RESET_ADDRESS */ li a2, 0x08000000 /* Setting the RST_RESET_RTC_RESET */ or a1, a1, a2 sw a1, 0x1c(a0) li a3, 0xffffffff xor a2, a2, a3 and a1, a1, a2 sw a1, 0x1c(a0) /* Taking the RTC out of RESET */ nop li a0, CKSEG1ADDR(QCA956X_RTC_BASE) /* RTC_BASE_ADDRESS */ li a1, 0x1 sw a1, 0x0040(a0) /* RTC_SYNC_RESET_ADDRESS */ li a2, 0x2 _poll_for_RTC_ON: lw a1, 0x0044(a0) /* RTC_SYNC_STATUS_ADDRESS */ and a1, a2, a1 bne a1, a2, _poll_for_RTC_ON nop _CHANGE_TAPS: li t0, 0xbd001f00 /* Read the current value of the TAP for programming */ lw t1, 0x0(t0) li t2, 0x00000000 or t3, t1, t2 li t0, 0xb8000000 /* DDR_BASE_ADDRESS */ sw t3, 0x1c(t0) /* TAP_CONTROL_0_ADDRESS */ sw t3, 0x20(t0) /* TAP_CONTROL_1_ADDRESS */ sw t3, 0x24(t0) /* TAP_CONTROL_2_ADDRESS */ sw t3, 0x28(t0) /* TAP_CONTROL_3_ADDRESS */ li t1, 0x00000010 /* Running the test 8 times */ sw t1, 0x0068(t0) /* PERF_COMP_ADDR_1_ADDRESS */ li t1, 0xfa5de83f /* 4 Row Address Bits, 4 Column Address Bits, 2 BA bits */ sw t1, 0x002c(t0) /* PERF_MASK_ADDR_0_ADDRESS */ li t1, 0x0000ffff sw t1, 0x0070(t0) /* PERF_COMP_AHB_GE0_1_ADDRESS */ li t1, 0x0000ffff sw t1, 0x0040(t0) /* PERF_COMP_AHB_GE1_0_ADDRESS */ li t1, 0x0000ffff sw t1, 0x0078(t0) /* PERF_COMP_AHB_GE1_1_ADDRESS */ li t1, 0x0000ffff sw t1, 0x0034(t0) /* PERF_MASK_AHB_GE0_0_ADDRESS */ li t1, 0x0000ffff sw t1, 0x006c(t0) /* PERF_MASK_AHB_GE0_1_ADDRESS */ li t1, 0x0000ffff sw t1, 0x003c(t0) /* PERF_MASK_AHB_GE1_0_ADDRESS */ li t1, 0x0000ffff sw t1, 0x0074(t0) /* PERF_MASK_AHB_GE1_1_ADDRESS */ li t1, 0x0000ffff sw t1, 0x0038(t0) /* PERF_COMP_AHB_GE0_0_ADDRESS */ li t1, 0x00000001 sw t1, 0x011c(t0) /* DDR_BIST_ADDRESS */ li t2, 0x1 _bist_done_poll: lw t1, 0x0120(t0) /* DDR_BIST_STATUS_ADDRESS */ and t1, t1, t2 bne t1, t2, _bist_done_poll nop lw t1, 0x0120(t0) /* DDR_BIST_STATUS_ADDRESS */ li t4, 0x000001fe and t2, t1, t4 srl t2, t2, 0x1 /* no. of Pass Runs */ li t5, 0x00000000 sw t5, 0x011c(t0) /* DDR_BIST_ADDRESS - Stop the DDR BIST test */ li t5, 0x0001fe00 and t5, t5, t1 bnez t5, _iterate_tap /* This is a redundant compare but nevertheless - Comparing the FAILS */ nop lw t1, 0x0068(t0) /* PERF_COMP_ADDR_1_ADDRESS */ li t3, 0x000001fe and t3, t3, t1 srl t3, t3, 0x1 /* No. of runs in the config register. */ bne t3, t2, _iterate_tap nop pass_tap: li t0, 0xbd001f00 lw t1, 0x4(t0) addiu t1, t1, 0x1 sw t1, 0x4(t0) li t0, 0xbd001f10 lw t1, 0x0(t0) li t2, 0xaa55aa55 beq t1, t2, _first_pass nop li t0, 0xbd001f00 lw t1, 0x0(t0) li t0, 0xbd001f10 sw t1, 0x4(t0) nop b _iterate_tap nop _first_pass: li t0, 0xbd001f00 lw t1, 0x0(t0) li t0, 0xbd001f10 sw t1, 0x0(t0) sw t1, 0x4(t0) nop _iterate_tap: li t0, 0xbd001f00 lw t1, 0x0(t0) li t2, 0x3f beq t1, t2, _STOP_TEST nop addiu t1, t1, 0x1 sw t1, 0x0(t0) nop b _CHANGE_TAPS nop _STOP_TEST: li t0, 0xbd001f00 lw t1, 0x4(t0) bnez t1, _load_center_tap nop li t3, 0x8 /* Default Tap to be used */ b _load_tap_into_reg nop _load_center_tap: li t0, 0xbd001f10 lw t1, 0x0(t0) lw t2, 0x4(t0) add t3, t1, t2 srl t3, t3, 0x1 li t4, 0x3f and t3, t3, t4 _load_tap_into_reg: li t0, 0xb8000000 sw t3, 0x1c(t0) /* TAP_CONTROL_0_ADDRESS */ sw t3, 0x20(t0) /* TAP_CONTROL_1_ADDRESS */ sw t3, 0x24(t0) /* TAP_CONTROL_2_ADDRESS */ sw t3, 0x28(t0) /* TAP_CONTROL_3_ADDRESS */ nop jr ra nop END(ddr_tap_tuning)
4ms/stm32mp1-baremetal
6,733
third-party/u-boot/u-boot-stm32mp1-baremetal/arch/mips/mach-ath79/ar933x/lowlevel_init.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright (C) 2015-2016 Wills Wang <wills.wang@live.com> * Based on Atheros LSDK/QSDK and u-boot_mod project */ #include <config.h> #include <asm/asm.h> #include <asm/regdef.h> #include <asm/mipsregs.h> #include <asm/addrspace.h> #include <mach/ar71xx_regs.h> #define SET_BIT(val, bit) ((val) | (1 << (bit))) #define SET_PLL_PD(val) SET_BIT(val, 30) #define AHB_DIV_TO_4(val) SET_BIT(SET_BIT(val, 15), 16) #define PLL_BYPASS(val) SET_BIT(val, 2) #define MK_PLL_CONF(divint, refdiv, range, outdiv) \ (((0x3F & divint) << 10) | \ ((0x1F & refdiv) << 16) | \ ((0x1 & range) << 21) | \ ((0x7 & outdiv) << 23) ) #define MK_CLK_CNTL(cpudiv, ddrdiv, ahbdiv) \ (((0x3 & (cpudiv - 1)) << 5) | \ ((0x3 & (ddrdiv - 1)) << 10) | \ ((0x3 & (ahbdiv - 1)) << 15) ) /* * PLL_CPU_CONFIG_VAL * * Bit30 is set (CPU_PLLPWD = 1 -> power down control for CPU PLL) * After PLL configuration we need to clear this bit * * Values written into CPU PLL Configuration (CPU_PLL_CONFIG) * * bits 10..15 (6bit) DIV_INT (Integer part of the DIV to CPU PLL) * => 32 (0x20) VCOOUT = XTAL * DIV_INT * bits 16..20 (5bit) REFDIV (Reference clock divider) * => 1 (0x1) [Must start at values 1] * bits 21 (1bit) RANGE (VCO frequency range of the CPU PLL) * => 0 (0x0) [Doesn't impact clock values] * bits 23..25 (3bit) OUTDIV (Ratio between VCO and PLL output) * => 1 (0x1) [0 is illegal!] * PLLOUT = VCOOUT * (1/2^OUTDIV) */ /* DIV_INT=32 (25MHz*32/2=400MHz), REFDIV=1, RANGE=0, OUTDIV=1 */ #define PLL_CPU_CONFIG_VAL_40M MK_PLL_CONF(20, 1, 0, 1) /* DIV_INT=20 (40MHz*20/2=400MHz), REFDIV=1, RANGE=0, OUTDIV=1 */ #define PLL_CPU_CONFIG_VAL_25M MK_PLL_CONF(32, 1, 0, 1) /* * PLL_CLK_CONTROL_VAL * * In PLL_CLK_CONTROL_VAL bit 2 is set (BYPASS = 1 -> bypass PLL) * After PLL configuration we need to clear this bit * * Values written into CPU Clock Control Register CLOCK_CONTROL * * bits 2 (1bit) BYPASS (Bypass PLL. This defaults to 1 for test. * Software must enable the CPU PLL for normal and * then set this bit to 0) * bits 5..6 (2bit) CPU_POST_DIV => 0 (DEFAULT, Ratio = 1) * CPU_CLK = PLLOUT / CPU_POST_DIV * bits 10..11 (2bit) DDR_POST_DIV => 0 (DEFAULT, Ratio = 1) * DDR_CLK = PLLOUT / DDR_POST_DIV * bits 15..16 (2bit) AHB_POST_DIV => 1 (DEFAULT, Ratio = 2) * AHB_CLK = PLLOUT / AHB_POST_DIV * */ #define PLL_CLK_CONTROL_VAL MK_CLK_CNTL(1, 1, 2) .text .set noreorder LEAF(lowlevel_init) /* These three WLAN_RESET will avoid original issue */ li t3, 0x03 1: li t0, CKSEG1ADDR(AR71XX_RESET_BASE) lw t1, AR933X_RESET_REG_RESET_MODULE(t0) ori t1, t1, 0x0800 sw t1, AR933X_RESET_REG_RESET_MODULE(t0) nop lw t1, AR933X_RESET_REG_RESET_MODULE(t0) li t2, 0xfffff7ff and t1, t1, t2 sw t1, AR933X_RESET_REG_RESET_MODULE(t0) nop addi t3, t3, -1 bnez t3, 1b nop li t2, 0x20 2: beqz t2, 1b nop addi t2, t2, -1 lw t5, AR933X_RESET_REG_BOOTSTRAP(t0) andi t1, t5, 0x10 bnez t1, 2b nop li t1, 0x02110E sw t1, AR933X_RESET_REG_BOOTSTRAP(t0) nop /* RTC Force Wake */ li t0, CKSEG1ADDR(AR933X_RTC_BASE) li t1, 0x03 sw t1, AR933X_RTC_REG_FORCE_WAKE(t0) nop nop /* RTC Reset */ li t1, 0x00 sw t1, AR933X_RTC_REG_RESET(t0) nop nop li t1, 0x01 sw t1, AR933X_RTC_REG_RESET(t0) nop nop /* Wait for RTC in on state */ 1: lw t1, AR933X_RTC_REG_STATUS(t0) andi t1, t1, 0x02 beqz t1, 1b nop /* Program ki/kd */ li t0, CKSEG1ADDR(AR933X_SRIF_BASE) andi t1, t5, 0x01 # t5 BOOT_STRAP bnez t1, 1f nop li t1, 0x19e82f01 b 2f nop 1: li t1, 0x18e82f01 2: sw t1, AR933X_SRIF_DDR_DPLL2_REG(t0) /* Program phase shift */ lw t1, AR933X_SRIF_DDR_DPLL3_REG(t0) li t2, 0xc07fffff and t1, t1, t2 li t2, 0x800000 or t1, t1, t2 sw t1, AR933X_SRIF_DDR_DPLL3_REG(t0) nop /* in some cases, the SoC doesn't start with higher clock on AHB */ li t0, CKSEG1ADDR(AR71XX_PLL_BASE) li t1, AHB_DIV_TO_4(PLL_BYPASS(PLL_CLK_CONTROL_VAL)) sw t1, AR933X_PLL_CLK_CTRL_REG(t0) nop /* Set SETTLE_TIME in CPU PLL */ andi t1, t5, 0x01 # t5 BOOT_STRAP bnez t1, 1f nop li t1, 0x0352 b 2f nop 1: li t1, 0x0550 2: sw t1, AR71XX_PLL_REG_SEC_CONFIG(t0) nop /* Set nint, frac, refdiv, outdiv, range according to xtal */ 0: andi t1, t5, 0x01 # t5 BOOT_STRAP bnez t1, 1f nop li t1, SET_PLL_PD(PLL_CPU_CONFIG_VAL_25M) b 2f nop 1: li t1, SET_PLL_PD(PLL_CPU_CONFIG_VAL_40M) 2: sw t1, AR933X_PLL_CPU_CONFIG_REG(t0) nop 1: lw t1, AR933X_PLL_CPU_CONFIG_REG(t0) li t2, 0x80000000 and t1, t1, t2 bnez t1, 1b nop /* Put frac bit19:10 configuration */ li t1, 0x1003E8 sw t1, AR933X_PLL_DITHER_FRAC_REG(t0) nop /* Clear PLL power down bit in CPU PLL configuration */ andi t1, t5, 0x01 # t5 BOOT_STRAP bnez t1, 1f nop li t1, PLL_CPU_CONFIG_VAL_25M b 2f nop 1: li t1, PLL_CPU_CONFIG_VAL_40M 2: sw t1, AR933X_PLL_CPU_CONFIG_REG(t0) nop /* Wait for PLL update -> bit 31 in CPU_PLL_CONFIG should be 0 */ 1: lw t1, AR933X_PLL_CPU_CONFIG_REG(t0) li t2, 0x80000000 and t1, t1, t2 bnez t1, 1b nop /* Confirm DDR PLL lock */ li t3, 100 li t4, 0 2: addi t4, t4, 1 bgt t4, t3, 0b nop li t3, 5 3: /* Clear do_meas */ li t0, CKSEG1ADDR(AR933X_SRIF_BASE) lw t1, AR933X_SRIF_DDR_DPLL3_REG(t0) li t2, 0xBFFFFFFF and t1, t1, t2 sw t1, AR933X_SRIF_DDR_DPLL3_REG(t0) nop li t2, 10 1: subu t2, t2, 1 bnez t2, 1b nop /* Set do_meas */ li t2, 0x40000000 or t1, t1, t2 sw t1, AR933X_SRIF_DDR_DPLL3_REG(t0) nop /* Check meas_done */ 1: lw t1, AR933X_SRIF_DDR_DPLL4_REG(t0) andi t1, t1, 0x8 beqz t1, 1b nop lw t1, AR933X_SRIF_DDR_DPLL3_REG(t0) li t2, 0x007FFFF8 and t1, t1, t2 srl t1, t1, 3 li t2, 0x4000 bgt t1, t2, 2b nop addi t3, t3, -1 bnez t3, 3b nop /* clear PLL bypass (bit 2) in CPU CLOCK CONTROL register */ li t0, CKSEG1ADDR(AR71XX_PLL_BASE) li t1, PLL_CLK_CONTROL_VAL sw t1, AR933X_PLL_CLK_CTRL_REG(t0) nop nop jr ra nop END(lowlevel_init)