text
stringlengths
9
39.2M
dir
stringlengths
25
226
lang
stringclasses
163 values
created_date
timestamp[s]
updated_date
timestamp[s]
repo_name
stringclasses
751 values
repo_full_name
stringclasses
752 values
star
int64
1.01k
183k
len_tokens
int64
1
18.5M
```unknown /* * */ #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #include <offsets_short.h> #include <zephyr/arch/sparc/sparc.h> GTEXT(z_sparc_arch_switch) GTEXT(z_sparc_context_switch) GTEXT(z_thread_entry_wrapper) /* * The routine z_sparc_context_switch() is called from arch_switch(), or from * the interrupt trap handler in case of preemption. The subtraction to get the * "old" thread from "switched_from" has already been performed and the "old" * thread is now in register %o1. We can address old->switch_handle in assembly * as: [%o1 + ___thread_t_switch_handle_OFFSET]. * * The switch_handle is written in z_sparc_context_switch() after the old * context has been saved. * * This is a leaf function, so only out registers * can be used without saving their context first. * * o0: new thread to restore * o1: old thread to save */ SECTION_FUNC(TEXT, z_sparc_context_switch) mov %y, %o4 st %o4, [%o1 + _thread_offset_to_y] std %l0, [%o1 + _thread_offset_to_l0_and_l1] std %l2, [%o1 + _thread_offset_to_l2] std %l4, [%o1 + _thread_offset_to_l4] std %l6, [%o1 + _thread_offset_to_l6] std %i0, [%o1 + _thread_offset_to_i0] std %i2, [%o1 + _thread_offset_to_i2] std %i4, [%o1 + _thread_offset_to_i4] std %i6, [%o1 + _thread_offset_to_i6] std %o6, [%o1 + _thread_offset_to_o6] rd %psr, %o4 st %o4, [%o1 + _thread_offset_to_psr] and %o4, PSR_CWP, %g3 /* %g3 = CWP */ andn %o4, PSR_ET, %g1 /* %g1 = psr with traps disabled */ wr %g1, %psr /* disable traps */ nop nop nop rd %wim, %g2 /* %g2 = wim */ mov 1, %g4 sll %g4, %g3, %g4 /* %g4 = wim mask for CW invalid */ .Lsave_frame_loop: sll %g4, 1, %g5 /* rotate wim left by 1 */ srl %g4, (CONFIG_SPARC_NWIN-1), %g4 or %g4, %g5, %g4 /* %g4 = wim if we do one restore */ /* if restore would not underflow, continue */ andcc %g4, %g2, %g0 /* window to flush? */ bnz .Ldone_flushing /* continue */ nop restore /* go one window back */ /* essentially the same as window overflow */ /* sp still points to task stack */ std %l0, [%sp + 0x00] std %l2, [%sp + 0x08] std %l4, [%sp + 0x10] std %l6, [%sp + 0x18] std %i0, [%sp + 0x20] std %i2, [%sp + 0x28] std %i4, [%sp + 0x30] std %i6, [%sp + 0x38] ba .Lsave_frame_loop nop .Ldone_flushing: /* * "wrpsr" is a delayed write instruction so wait three instructions * after the write before using non-global registers or instructions * affecting the CWP. */ wr %g1, %psr /* restore cwp */ nop nop nop add %g3, 1, %g2 /* calculate desired wim */ cmp %g2, (CONFIG_SPARC_NWIN-1) /* check if wim is in range */ bg,a .Lwim_overflow mov 0, %g2 .Lwim_overflow: mov 1, %g4 sll %g4, %g2, %g4 /* %g4 = new wim */ wr %g4, %wim nop nop nop /* * We have finished saving the "old" context and are also back in the * register window for which z_sparc_context_switch() was called. * * Now write the old thread into switch handle. * "old->switch_handle = old". */ st %o1, [%o1 + ___thread_t_switch_handle_OFFSET] ldd [%o0 + _thread_offset_to_y], %o4 mov %o4, %y /* restore local registers */ ldd [%o0 + _thread_offset_to_l0_and_l1], %l0 ldd [%o0 + _thread_offset_to_l2], %l2 ldd [%o0 + _thread_offset_to_l4], %l4 ldd [%o0 + _thread_offset_to_l6], %l6 /* restore input registers */ ldd [%o0 + _thread_offset_to_i0], %i0 ldd [%o0 + _thread_offset_to_i2], %i2 ldd [%o0 + _thread_offset_to_i4], %i4 ldd [%o0 + _thread_offset_to_i6], %i6 /* restore output registers */ ldd [%o0 + _thread_offset_to_o6], %o6 #ifdef CONFIG_THREAD_LOCAL_STORAGE ld [%o0 + _thread_offset_to_tls], %g7 #endif ld [%o0 + _thread_offset_to_psr], %g1 /* %g1 = new thread psr */ andn %g1, PSR_CWP, %g1 /* psr without cwp */ or %g1, %g3, %g1 /* psr with new cwp */ wr %g1, %psr /* restore status register and ET */ nop nop nop /* jump into thread */ jmp %o7 + 8 nop SECTION_FUNC(TEXT, z_thread_entry_wrapper) mov %g0, %o7 mov %i0, %o0 mov %i1, %o1 mov %i2, %o2 mov %i3, %o3 call z_thread_entry nop ```
/content/code_sandbox/arch/sparc/core/switch.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,532
```unknown /* * */ #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #include <zephyr/arch/sparc/sparc.h> /* The trap table reset entry jumps to here. */ GTEXT(__sparc_trap_reset) SECTION_FUNC(TEXT, __sparc_trap_reset) set __sparc_trap_table, %g1 wr %g1, %tbr wr 2, %wim wr PSR_PIL | PSR_S | PSR_PS | PSR_ET, %psr /* NOTE: wrpsr above may have changed the current register window. */ /* We are in the 3 instruction wrpsr delay so use global registers. */ set z_interrupt_stacks, %g2 set CONFIG_ISR_STACK_SIZE, %g4 add %g2, %g4, %g1 and %g1, 0xfffffff0, %l3 /* * According to SPARC ABI, Chapter 3: The system marks the deepest * stack frame by setting the frame pointer to zero. No other frame's * %fp has a zero value. */ sub %l3, 96, %sp clr %fp clr %i7 #ifdef CONFIG_INIT_STACKS /* In-place memset() to avoid register window related traps. */ set 0xaaaaaaaa, %l0 mov %l0, %l1 1: std %l0, [%g2] add %g2, 8, %g2 cmp %g2, %l3 bne 1b nop #endif call z_bss_zero nop call z_prep_c nop /* We halt the system by generating a "trap in trap" condition. */ GTEXT(arch_system_halt) SECTION_FUNC(TEXT, arch_system_halt) mov %o0, %g0 mov %g1, %g0 set 1, %g1 ta 0x00 ```
/content/code_sandbox/arch/sparc/core/reset_trap.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
455
```c /* * */ /** * @file * @brief Full C support initialization */ #include <kernel_internal.h> /** * @brief Prepare to and run C code * * This routine prepares for the execution of and runs C code. */ void z_prep_c(void) { z_data_copy(); z_cstart(); CODE_UNREACHABLE; } ```
/content/code_sandbox/arch/sparc/core/prep_c.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
73
```objective-c /* * */ #ifndef ZEPHYR_ARCH_SPARC_CORE_STACK_H_ #define ZEPHYR_ARCH_SPARC_CORE_STACK_H_ /* * Offsets for SPARC ABI stack frame. * * Reference: System V Application Binary Interface, SPARC Processor * Supplement, Third Edition, Page 3-35. */ #define STACK_FRAME_L0_OFFSET 0x00 #define STACK_FRAME_L1_OFFSET 0x04 #define STACK_FRAME_L2_OFFSET 0x08 #define STACK_FRAME_L3_OFFSET 0x0c #define STACK_FRAME_L4_OFFSET 0x10 #define STACK_FRAME_L5_OFFSET 0x14 #define STACK_FRAME_L6_OFFSET 0x18 #define STACK_FRAME_L7_OFFSET 0x1c #define STACK_FRAME_I0_OFFSET 0x20 #define STACK_FRAME_I1_OFFSET 0x24 #define STACK_FRAME_I2_OFFSET 0x28 #define STACK_FRAME_I3_OFFSET 0x2c #define STACK_FRAME_I4_OFFSET 0x30 #define STACK_FRAME_I5_OFFSET 0x34 #define STACK_FRAME_I6_OFFSET 0x38 #define STACK_FRAME_I7_OFFSET 0x3c #define STACK_FRAME_STRUCTURE_RETURN_ADDRESS_OFFSET 0x40 #define STACK_FRAME_SAVED_ARG0_OFFSET 0x44 #define STACK_FRAME_SAVED_ARG1_OFFSET 0x48 #define STACK_FRAME_SAVED_ARG2_OFFSET 0x4c #define STACK_FRAME_SAVED_ARG3_OFFSET 0x50 #define STACK_FRAME_SAVED_ARG4_OFFSET 0x54 #define STACK_FRAME_SAVED_ARG5_OFFSET 0x58 #define STACK_FRAME_PAD0_OFFSET 0x5c #define STACK_FRAME_SIZE 0x60 /* Interrupt stack frame */ #define ISF_PSR_OFFSET (0x40 + 0x00) #define ISF_PC_OFFSET (0x40 + 0x04) #define ISF_NPC_OFFSET (0x40 + 0x08) #define ISF_G1_OFFSET (0x40 + 0x0c) #define ISF_G2_OFFSET (0x40 + 0x10) #define ISF_G3_OFFSET (0x40 + 0x14) #define ISF_G4_OFFSET (0x40 + 0x18) #define ISF_Y_OFFSET (0x40 + 0x1c) #if !defined(_FLAT) #define ISF_SIZE (0x20) #else /* * The flat ABI stores and loads "local" and "in" registers in the save area as * part of function prologue and epilogue. So we allocate space for a new save * area (0x40 byte) as part of the interrupt stack frame. */ #define ISF_SIZE (0x40 + 0x20) #endif #endif /* ZEPHYR_ARCH_SPARC_CORE_STACK_H_ */ ```
/content/code_sandbox/arch/sparc/core/stack_offsets.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
636
```c /* * */ #include <zephyr/kernel.h> #include <zephyr/kernel_structs.h> #include <kernel_internal.h> #include <kernel_tls.h> #include <zephyr/app_memory/app_memdomain.h> #include <zephyr/sys/util.h> size_t arch_tls_stack_setup(struct k_thread *new_thread, char *stack_ptr) { new_thread->tls = POINTER_TO_UINT(stack_ptr); stack_ptr -= z_tls_data_size(); z_tls_copy(stack_ptr); return z_tls_data_size(); } ```
/content/code_sandbox/arch/sparc/core/tls.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
106
```unknown /* * */ #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #include <zephyr/arch/sparc/sparc.h> GTEXT(__sparc_trap_sw_set_pil) /* * Set processor interrupt level * * Handler for SPARC trap 0x89: trap_instruction, defined as "Reserved for the * operating system" by SPARC-ABI. * * entry: * - %l0: psr * - %l1: pc * - %l2: npc * - %i0: New processor interrupt level * * return: * - %i0: Old processor interrupt level */ SECTION_FUNC(TEXT, __sparc_trap_sw_set_pil) /* %l5: new %psr */ sll %i0, PSR_PIL_BIT, %i0 andn %l0, PSR_PIL, %l5 or %l5, %i0, %l5 wr %l5, %psr nop nop nop and %l0, PSR_PIL, %l3 srl %l3, PSR_PIL_BIT, %i0 jmp %l2 rett %l2 + 4 ```
/content/code_sandbox/arch/sparc/core/sw_trap_set_pil.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
284
```unknown /* * */ #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #include <offsets_short.h> #include <zephyr/arch/sparc/sparc.h> GTEXT(__sparc_trap_except_reason) GTEXT(__sparc_trap_fault) /* * Fault trap handler * * - IU state is saved and restored * * On entry: * %l0: psr (set by trap code) * %l1: pc * %l2: npc * %l6: tbr (set by trap code) * %fp: %sp of current register window at trap time * %g1: reason * * This trap handler will trash some of the global registers, which is OK since * we will not return to where we trapped. */ SECTION_FUNC(TEXT, __sparc_trap_except_reason) mov %g1, %l7 .Ldoit: /* %g2, %g3 are used at manual window overflow so save temporarily */ mov %g2, %l4 mov %g3, %l5 /* We may have trapped into the invalid window. If so, make it valid. */ rd %wim, %g2 mov %g2, %l3 srl %g2, %l0, %g3 cmp %g3, 1 bne .Lwodone nop /* Do the window overflow. */ sll %g2, (CONFIG_SPARC_NWIN-1), %g3 srl %g2, 1, %g2 or %g2, %g3, %g2 /* Enter window to save. */ save /* Install new wim calculated above. */ mov %g2, %wim nop nop nop /* Put registers on the dedicated save area of the ABI stack frame. */ std %l0, [%sp + 0x00] std %l2, [%sp + 0x08] std %l4, [%sp + 0x10] std %l6, [%sp + 0x18] std %i0, [%sp + 0x20] std %i2, [%sp + 0x28] std %i4, [%sp + 0x30] std %i6, [%sp + 0x38] /* Leave saved window. */ restore .Lwodone: mov %l4, %g2 mov %l5, %g3 /* Allocate an ABI stack frame and exception stack frame */ sub %fp, 96 + __struct_arch_esf_SIZEOF, %sp /* * %fp: %sp of interrupted task * %sp: %sp of interrupted task - ABI_frame - esf */ mov %l7, %o0 /* Fill in the content of the exception stack frame */ #if defined(CONFIG_EXTRA_EXCEPTION_INFO) std %i0, [%sp + 96 + __struct_arch_esf_out_OFFSET + 0x00] std %i2, [%sp + 96 + __struct_arch_esf_out_OFFSET + 0x08] std %i4, [%sp + 96 + __struct_arch_esf_out_OFFSET + 0x10] std %i6, [%sp + 96 + __struct_arch_esf_out_OFFSET + 0x18] std %g0, [%sp + 96 + __struct_arch_esf_global_OFFSET + 0x00] std %g2, [%sp + 96 + __struct_arch_esf_global_OFFSET + 0x08] std %g4, [%sp + 96 + __struct_arch_esf_global_OFFSET + 0x10] std %g6, [%sp + 96 + __struct_arch_esf_global_OFFSET + 0x18] #endif std %l0, [%sp + 96 + __struct_arch_esf_psr_OFFSET] /* psr pc */ std %l2, [%sp + 96 + __struct_arch_esf_npc_OFFSET] /* npc wim */ rd %y, %l7 std %l6, [%sp + 96 + __struct_arch_esf_tbr_OFFSET] /* tbr y */ /* Enable traps, raise PIL to mask all maskable interrupts. */ or %l0, PSR_PIL, %o2 wr %o2, PSR_ET, %psr nop nop nop #if defined(CONFIG_EXTRA_EXCEPTION_INFO) /* Flush all register windows to the stack. */ .rept CONFIG_SPARC_NWIN-1 save %sp, -64, %sp .endr .rept CONFIG_SPARC_NWIN-1 restore .endr #endif /* * reason is the first argument. * Exception stack frame prepared earlier is the second argument. */ call z_sparc_fatal_error add %sp, 96, %o1 /* * Entry for trap we don't handle explicitly * * Just drop into __sparc_trap_except_reason with reason set to * K_ERR_CPU_EXCEPTION. Note that "reason" is transported in %l7 of the * trapped-into window and global %g1 is preserved. */ SECTION_FUNC(TEXT, __sparc_trap_fault) b .Ldoit /* K_ERR_CPU_EXCEPTION */ mov %g0, %l7 ```
/content/code_sandbox/arch/sparc/core/fault_trap.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,201
```unknown /* * */ #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #include <offsets_short.h> #include <zephyr/arch/sparc/sparc.h> #include "stack_offsets.h" GTEXT(__sparc_trap_interrupt) GTEXT(__sparc_trap_irq_offload) /* * Interrupt trap handler * * - IU state is saved and restored * * On entry: * %l0: psr (set by trap code) * %l1: pc * %l2: npc * %l3: SPARC interrupt request level (bp_IRL) * %fp: %sp of current register window at trap time * * This module also implements the IRQ offload support. The handling is the * same as for asynchronous maskable interrupts, with the following exceptions: * - Do not re-execute the causing (ta) instruction at trap exit. * - A dedicated interrupt request level (0x8d) is used. * - z_sparc_enter_irq() knows how to interpret this interrupt request level. */ SECTION_SUBSEC_FUNC(TEXT, __sparc_trap_interrupt, __sparc_trap_irq_offload) /* Preparation in the case of synchronous IRQ offload. */ mov %l2, %l1 add %l2, 4, %l2 set 0x8d, %l3 __sparc_trap_interrupt: /* %g2, %g3 are used at manual window overflow so save temporarily */ mov %g2, %l4 mov %g3, %l5 /* We may have trapped into the invalid window. If so, make it valid. */ rd %wim, %g2 srl %g2, %l0, %g3 cmp %g3, 1 bne .Lwodone nop /* Do the window overflow. */ sll %g2, (CONFIG_SPARC_NWIN-1), %g3 srl %g2, 1, %g2 or %g2, %g3, %g2 /* Enter window to save. */ save /* Install new wim calculated above. */ mov %g2, %wim nop nop nop /* Put registers on the dedicated save area of the ABI stack frame. */ std %l0, [%sp + 0x00] std %l2, [%sp + 0x08] std %l4, [%sp + 0x10] std %l6, [%sp + 0x18] std %i0, [%sp + 0x20] std %i2, [%sp + 0x28] std %i4, [%sp + 0x30] std %i6, [%sp + 0x38] /* Leave saved window. */ restore .Lwodone: /* * %l4: %g2 at trap time * %l5: %g3 at trap time * * Save the state of the interrupted task including global registers on * the task stack. * * IMPORTANT: Globals are saved here as well on the task stack, since a * context switch might happen before the context of this interrupted * task is restored. */ /* Allocate stack for isr context. */ sub %fp, ISF_SIZE, %sp /* * %fp: %sp of interrupted task * %sp: %sp of interrupted task - ISF_SIZE. * (fits what we store here) * * Save the interrupted context. */ std %l0, [%sp + ISF_PSR_OFFSET] /* psr pc */ st %l2, [%sp + ISF_NPC_OFFSET] /* npc */ st %g1, [%sp + ISF_G1_OFFSET] /* g1 */ std %l4, [%sp + ISF_G2_OFFSET] /* g2 g3 */ st %g4, [%sp + ISF_G4_OFFSET] /* g4 */ rd %y, %g1 st %g1, [%sp + ISF_Y_OFFSET] /* y */ /* %l5: reference to _kernel */ set _kernel, %l5 /* Switch to interrupt stack. */ mov %sp, %fp ld [%l5 + _kernel_offset_to_irq_stack], %sp /* Allocate a full C stack frame */ sub %sp, STACK_FRAME_SIZE, %sp /* * %fp: %sp of interrupted task - ISF_SIZE. * %sp: irq stack - 96. An ABI frame */ /* Enable traps, raise PIL to mask all maskable interrupts. */ or %l0, PSR_PIL, %l6 #if defined(CONFIG_FPU) /* * We now check if the interrupted context was using the FPU. The * result is stored in register l5 which will either get the value 0 * (FPU not used) or PSR_EF (FPU used). * * If the FPU was used by the interrupted context, then we do two * things: * 1. Store FSR to memory. This has the side-effect of completing all * pending FPU operations. * 2. Disable FPU. Floating point instructions in the ISR will trap. * * The FPU is be enabled again if needed after the ISR has returned. */ set PSR_EF, %l5 andcc %l0, %l5, %l5 bne,a 1f st %fsr, [%sp] 1: andn %l6, %l5, %l6 #endif wr %l6, PSR_ET, %psr nop nop nop #ifdef CONFIG_SCHED_THREAD_USAGE call z_sched_usage_stop nop #endif #ifdef CONFIG_TRACING_ISR call sys_trace_isr_enter nop #endif /* SPARC interrupt request level is the first argument */ call z_sparc_enter_irq mov %l3, %o0 #ifdef CONFIG_TRACING_ISR call sys_trace_isr_exit nop #endif /* * %fp: %sp of interrupted task - ISF_SIZE. * %sp: irq stack - 96. An ABI frame */ #ifdef CONFIG_PREEMPT_ENABLED /* allocate stack for calling C function and for its output value */ sub %fp, (96+8), %sp /* * %fp: %sp of interrupted task - ISF_SIZE. * %sp: %sp of interrupted task - ISF_SIZE - STACK_FRAME_SIZE - 8. */ call z_arch_get_next_switch_handle add %sp, 96, %o0 /* we get old thread as "return value" on stack */ ld [%sp + 96], %o1 /* * o0: new thread * o1: old thread */ cmp %o0, %o1 beq .Lno_reschedule /* z_sparc_context_switch() is a leaf function not using stack. */ add %sp, (96+8-64), %sp #if defined(CONFIG_FPU_SHARING) /* IF PSR_EF at trap time then store the FP context. */ cmp %l5, 0 be .Lno_fp_context nop /* * PSR_EF was 1 at trap time so save the FP registers on stack. * - Set PSR_EF so we can access the FP registers. * - Allocate space for the FP registers above the save area used for * the z_sparc_context_switch() call. */ wr %l6, %l5, %psr nop nop nop sub %sp, 34 * 4, %sp std %f0, [%sp + 64 + 0x00] std %f2, [%sp + 64 + 0x08] std %f4, [%sp + 64 + 0x10] std %f6, [%sp + 64 + 0x18] std %f8, [%sp + 64 + 0x20] std %f10, [%sp + 64 + 0x28] std %f12, [%sp + 64 + 0x30] std %f14, [%sp + 64 + 0x38] std %f16, [%sp + 64 + 0x40] std %f18, [%sp + 64 + 0x48] std %f20, [%sp + 64 + 0x50] std %f22, [%sp + 64 + 0x58] std %f24, [%sp + 64 + 0x60] std %f26, [%sp + 64 + 0x68] std %f28, [%sp + 64 + 0x70] std %f30, [%sp + 64 + 0x78] call z_sparc_context_switch st %fsr, [%sp + 64 + 0x80] ldd [%sp + 64 + 0x00], %f0 ldd [%sp + 64 + 0x08], %f2 ldd [%sp + 64 + 0x10], %f4 ldd [%sp + 64 + 0x18], %f6 ldd [%sp + 64 + 0x20], %f8 ldd [%sp + 64 + 0x28], %f10 ldd [%sp + 64 + 0x30], %f12 ldd [%sp + 64 + 0x38], %f14 ldd [%sp + 64 + 0x40], %f16 ldd [%sp + 64 + 0x48], %f18 ldd [%sp + 64 + 0x50], %f20 ldd [%sp + 64 + 0x58], %f22 ldd [%sp + 64 + 0x60], %f24 ldd [%sp + 64 + 0x68], %f26 ldd [%sp + 64 + 0x70], %f28 ldd [%sp + 64 + 0x78], %f30 ld [%sp + 64 + 0x80], %fsr ba .Lno_reschedule add %sp, 34 * 4, %sp .Lno_fp_context: #endif /* CONFIG_FPU_SHARING */ call z_sparc_context_switch nop .Lno_reschedule: #endif /* CONFIG_PREEMPT_ENABLED */ /* Restore the interrupted context. */ ld [%fp + ISF_Y_OFFSET], %g1 wr %g1, 0, %y ldd [%fp + ISF_PSR_OFFSET], %l0 /* psr, pc */ ld [%fp + ISF_NPC_OFFSET], %l2 /* npc */ /* NOTE: %g1 will be restored later */ /* %g1 is used to access the stack frame later */ mov %fp, %g1 ldd [%fp + ISF_G2_OFFSET], %g2 ld [%fp + ISF_G4_OFFSET], %g4 add %fp, ISF_SIZE, %fp /* * Install the PSR we got from the interrupt context. Current PSR.CWP * is preserved. Keep PSR.ET=0 until we do "rett". */ rd %psr, %l3 and %l3, PSR_CWP, %l3 andn %l0, (PSR_CWP | PSR_ET), %l0 or %l3, %l0, %l0 mov %l0, %psr nop nop nop /* Calculate %l6 := (cwp+1) % NWIN */ rd %wim, %l3 set (CONFIG_SPARC_NWIN), %l7 add %l0, 1, %l6 and %l6, PSR_CWP, %l6 cmp %l6, %l7 bge,a .Lwrapok mov 0, %l6 .Lwrapok: /* Determine if we must prepare the return window. */ /* %l5 := %wim >> (cwp+1) */ srl %l3, %l6, %l5 /* %l5 is 1 if (cwp+1) is an invalid window */ cmp %l5, 1 bne .Lwudone sub %l7, 1, %l7 /* %l7 := NWIN - 1 */ /* Do the window underflow. */ sll %l3, 1, %l4 srl %l3, %l7, %l5 wr %l4, %l5, %wim nop nop nop restore ldd [%g1 + 0x00], %l0 ldd [%g1 + 0x08], %l2 ldd [%g1 + 0x10], %l4 ldd [%g1 + 0x18], %l6 ldd [%g1 + 0x20], %i0 ldd [%g1 + 0x28], %i2 ldd [%g1 + 0x30], %i4 ldd [%g1 + 0x38], %i6 save .Lwudone: /* * Restore %psr since we may have trashed condition codes. PSR.ET is * still 0. */ wr %l0, %psr nop nop nop /* restore g1 */ ld [%g1 + ISF_G1_OFFSET], %g1 jmp %l1 rett %l2 ```
/content/code_sandbox/arch/sparc/core/interrupt_trap.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,220
```unknown /* * */ /* * This file contains standard handlers for the SPARC V8 window overflow and * underflow traps. It also implements the handler for SPARC-ABI * "Flush windows" which is used for example by longjmp() and C++ exceptions. */ #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #include <zephyr/arch/sparc/sparc.h> GTEXT(__sparc_trap_window_overflow) GTEXT(__sparc_trap_window_underflow) GTEXT(__sparc_trap_flush_windows) SECTION_FUNC(TEXT, __sparc_trap_window_overflow) /* Enter the window to be stored. */ save /* Save local register set. */ std %l0, [%sp + 0x00] std %l2, [%sp + 0x08] std %l4, [%sp + 0x10] rd %wim, %l3 std %l6, [%sp + 0x18] /* l2 := WIM << (NWIN-1) */ sll %l3, (CONFIG_SPARC_NWIN-1), %l2 /* Save input register set. */ std %i0, [%sp + 0x20] /* l3 := WIM >> 1 */ srl %l3, 1, %l3 std %i2, [%sp + 0x28] /* WIM := (WIM >> 1) ^ (WIM << (NWIN-1)) */ wr %l3, %l2, %wim /* NOTE: 3 instruction before restore (delayed write instruction) */ std %i4, [%sp + 0x30] nop std %i6, [%sp + 0x38] /* Go back to trap window. */ restore /* Re-execute save. */ jmp %l1 rett %l2 SECTION_FUNC(TEXT, __sparc_trap_window_underflow) rd %wim, %l3 /* l4 := WIM << 1 */ sll %l3, 1, %l4 /* l5 := WIM >> (NWIN-1) */ srl %l3, (CONFIG_SPARC_NWIN-1), %l5 /* WIM := (WIM << 1) ^ (WIM >> (NWIN-1)) */ wr %l4, %l5, %wim /* WIM is implicitly read so nops are needed. */ nop nop nop /* Enter the window to restore requires two restore instructions. */ restore restore ldd [%sp + 0x00], %l0 ldd [%sp + 0x08], %l2 ldd [%sp + 0x10], %l4 ldd [%sp + 0x18], %l6 ldd [%sp + 0x20], %i0 ldd [%sp + 0x28], %i2 ldd [%sp + 0x30], %i4 ldd [%sp + 0x38], %i6 /* Go back to the trap window. */ save save /* Re-execute restore. */ jmp %l1 rett %l2 /* * Handler for SPARC trap 0x83: trap_instruction, defined as "Flush windows" by * SPARC-ABI: * "By executing a type 3 trap, a process asks the system to flush all its * register windows to the stack." * * On entry: * %l0: psr * %l1: pc * %l2: npc */ SECTION_FUNC(TEXT, __sparc_trap_flush_windows) /* Save global registers used by the routine */ mov %g3, %l3 mov %g4, %l4 mov %g5, %l5 mov %g1, %l6 mov %g2, %l7 /* Uses g3=psr, g4=1, g2=wim, g1,g5=scratch */ mov %l0, %g3 set 1, %g4 rd %wim, %g2 /* * We can always restore the previous window. Check if we can restore * the window after that. */ and %l0, PSR_CWP, %g1 add %g1, 2, %g1 ba .LcheckNextWindow restore /* Flush window to stack */ .LflushWindow: std %l0, [%sp + 0x00] std %l2, [%sp + 0x08] std %l4, [%sp + 0x10] std %l6, [%sp + 0x18] std %i0, [%sp + 0x20] std %i2, [%sp + 0x28] std %i4, [%sp + 0x30] std %i6, [%sp + 0x38] /* * Check if next window is invalid by comparing * (1 << ((cwp + 1) % NWIN)) with WIM */ .LcheckNextWindow: set CONFIG_SPARC_NWIN, %g5 cmp %g1, %g5 bge,a .Lnowrap sub %g1, %g5, %g1 .Lnowrap: sll %g4, %g1, %g5 cmp %g5, %g2 be .LflushWindowDone inc %g1 /* We need to flush the next window */ ba .LflushWindow restore /* * All used windows have been flushed. Set WIM to cause trap for CWP+2. * When we return from this trap it will be CWP+1 that will trap, that * is, the next restore or rett. */ .LflushWindowDone: /* We can not restore %psr from %l0 because we may be in any window. */ wr %g3, %psr and %g3, PSR_CWP, %g1 add %g1, 2, %g1 set CONFIG_SPARC_NWIN, %g5 /* We are now back in the trap window. */ cmp %g1, %g5 bge,a .Lnowrap2 sub %g1, %g5, %g1 .Lnowrap2: sll %g4, %g1, %g1 wr %g1, %wim mov %l3, %g3 mov %l4, %g4 mov %l5, %g5 mov %l6, %g1 mov %l7, %g2 jmp %l2 rett %l2 + 4 ```
/content/code_sandbox/arch/sparc/core/window_trap.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,581
```c /* * */ #include <zephyr/kernel.h> #include <kernel_internal.h> #include <kswap.h> #include <zephyr/logging/log.h> LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); FUNC_NORETURN void z_irq_spurious(const void *unused) { uint32_t tbr; ARG_UNUSED(unused); __asm__ volatile ( "rd %%tbr, %0" : "=r" (tbr) ); LOG_ERR("Spurious interrupt detected! IRQ: %d", (tbr >> 4) & 0xf); z_sparc_fatal_error(K_ERR_SPURIOUS_IRQ, NULL); } void z_sparc_enter_irq(uint32_t irl) { struct _isr_table_entry *ite; _current_cpu->nested++; #ifdef CONFIG_IRQ_OFFLOAD if (irl != 141U) { irl = z_sparc_int_get_source(irl); ite = &_sw_isr_table[irl]; ite->isr(ite->arg); } else { z_irq_do_offload(); } #else /* Get the actual interrupt source from the interrupt controller */ irl = z_sparc_int_get_source(irl); ite = &_sw_isr_table[irl]; ite->isr(ite->arg); #endif _current_cpu->nested--; #ifdef CONFIG_STACK_SENTINEL z_check_stack_sentinel(); #endif } ```
/content/code_sandbox/arch/sparc/core/irq_manage.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
306
```c /* * */ #include <zephyr/kernel.h> #include <zephyr/logging/log.h> LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); /* * EXAMPLE OUTPUT * * your_sha256_hash----- * * tt = 0x02, illegal_instruction * * INS LOCALS OUTS GLOBALS * 0: 00000000 f3900fc0 40007c50 00000000 * 1: 00000000 40004bf0 40008d30 40008c00 * 2: 00000000 40004bf4 40008000 00000003 * 3: 40009158 00000000 40009000 00000002 * 4: 40008fa8 40003c00 40008fa8 00000008 * 5: 40009000 f3400fc0 00000000 00000080 * 6: 4000a1f8 40000050 4000a190 00000000 * 7: 40002308 00000000 40001fb8 000000c1 * * psr: f30000c7 wim: 00000008 tbr: 40000020 y: 00000000 * pc: 4000a1f4 npc: 4000a1f8 * * pc sp * #0 4000a1f4 4000a190 * #1 40002308 4000a1f8 * #2 40003b24 4000a258 * * your_sha256_hash----- * * * INTERPRETATION * * INS, LOCALS, OUTS and GLOBALS represent the %i, %l, %o and %g * registers before the trap was taken. * * wim, y, pc and npc are the values before the trap was taken. * tbr has the tbr.tt field (bits 11..4) filled in by hardware * representing the current trap type. psr is read immediately * after the trap was taken so it will have the new CWP and ET=0. * * The "#i pc sp" rows is the stack backtrace. All register * windows are flushed to the stack prior to printing. First row * is the trapping pc and sp (o6). * * * HOW TO USE * * When investigating a crashed program, the first things to look * at is typically the tt, pc and sp (o6). You can lookup the pc * in the assembly list file or use addr2line. In the listing, the * register values in the table above can be used. The linker map * file will give a hint on which stack is active and if it has * overflowed. * * psr bits 11..8 is the processor interrupt (priority) level. 0 * is lowest priority level (all can be taken), and 0xf is the * highest level where only non-maskable interrupts are taken. * * g0 is always zero. g5, g6 are never accessed by the compiler. * g7 is the TLS pointer if enabled. A SAVE instruction decreases * the current window pointer (psr bits 4..0) which results in %o * registers becoming %i registers and a new set of %l registers * appear. RESTORE does the opposite. */ /* * The SPARC V8 ABI guarantees that the stack pointer register * (o6) points to an area organized as "struct savearea" below at * all times when traps are enabled. This is the register save * area where register window registers can be flushed to the * stack. * * We flushed registers to this space in the fault trap entry * handler. Note that the space is allocated by the ABI (compiler) * for each stack frame. * * When printing the registers, we get the "local" and "in" * registers from the ABI stack save area, while the "out" and * "global" registers are taken from the exception stack frame * generated in the fault trap entry. */ struct savearea { uint32_t local[8]; uint32_t in[8]; }; #if CONFIG_EXCEPTION_DEBUG /* * Exception trap type (tt) values according to The SPARC V8 * manual, Table 7-1. */ static const struct { int tt; const char *desc; } TTDESC[] = { { .tt = 0x02, .desc = "illegal_instruction", }, { .tt = 0x07, .desc = "mem_address_not_aligned", }, { .tt = 0x2B, .desc = "data_store_error", }, { .tt = 0x29, .desc = "data_access_error", }, { .tt = 0x09, .desc = "data_access_exception", }, { .tt = 0x21, .desc = "instruction_access_error", }, { .tt = 0x01, .desc = "instruction_access_exception", }, { .tt = 0x04, .desc = "fp_disabled", }, { .tt = 0x08, .desc = "fp_exception", }, { .tt = 0x2A, .desc = "division_by_zero", }, { .tt = 0x03, .desc = "privileged_instruction", }, { .tt = 0x20, .desc = "r_register_access_error", }, { .tt = 0x0B, .desc = "watchpoint_detected", }, { .tt = 0x2C, .desc = "data_access_MMU_miss", }, { .tt = 0x3C, .desc = "instruction_access_MMU_miss", }, { .tt = 0x05, .desc = "window_overflow", }, { .tt = 0x06, .desc = "window_underflow", }, { .tt = 0x0A, .desc = "tag_overflow", }, }; static void print_trap_type(const struct arch_esf *esf) { const int tt = (esf->tbr & TBR_TT) >> TBR_TT_BIT; const char *desc = "unknown"; if (tt & 0x80) { desc = "trap_instruction"; } else if (tt >= 0x11 && tt <= 0x1F) { desc = "interrupt"; } else { for (int i = 0; i < ARRAY_SIZE(TTDESC); i++) { if (TTDESC[i].tt == tt) { desc = TTDESC[i].desc; break; } } } LOG_ERR("tt = 0x%02X, %s", tt, desc); } static void print_integer_registers(const struct arch_esf *esf) { const struct savearea *flushed = (struct savearea *) esf->out[6]; LOG_ERR(" INS LOCALS OUTS GLOBALS"); for (int i = 0; i < 8; i++) { LOG_ERR( " %d: %08x %08x %08x %08x", i, flushed ? flushed->in[i] : 0, flushed ? flushed->local[i] : 0, esf->out[i], esf->global[i] ); } } static void print_special_registers(const struct arch_esf *esf) { LOG_ERR( "psr: %08x wim: %08x tbr: %08x y: %08x", esf->psr, esf->wim, esf->tbr, esf->y ); LOG_ERR(" pc: %08x npc: %08x", esf->pc, esf->npc); } static void print_backtrace(const struct arch_esf *esf) { const int MAX_LOGLINES = 40; const struct savearea *s = (struct savearea *) esf->out[6]; LOG_ERR(" pc sp"); LOG_ERR(" #0 %08x %08x", esf->pc, (unsigned int) s); for (int i = 1; s && i < MAX_LOGLINES; i++) { const uint32_t pc = s->in[7]; const uint32_t sp = s->in[6]; if (sp == 0U && pc == 0U) { break; } LOG_ERR(" #%-2d %08x %08x", i, pc, sp); if (sp == 0U || sp & 7U) { break; } s = (const struct savearea *) sp; } } static void print_all(const struct arch_esf *esf) { LOG_ERR(""); print_trap_type(esf); LOG_ERR(""); print_integer_registers(esf); LOG_ERR(""); print_special_registers(esf); LOG_ERR(""); print_backtrace(esf); LOG_ERR(""); } #endif /* CONFIG_EXCEPTION_DEBUG */ FUNC_NORETURN void z_sparc_fatal_error(unsigned int reason, const struct arch_esf *esf) { #if CONFIG_EXCEPTION_DEBUG if (esf != NULL) { if (IS_ENABLED(CONFIG_EXTRA_EXCEPTION_INFO)) { print_all(esf); } else { print_special_registers(esf); } } #endif /* CONFIG_EXCEPTION_DEBUG */ z_fatal_error(reason, esf); CODE_UNREACHABLE; } ```
/content/code_sandbox/arch/sparc/core/fatal.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,142
```c /* * */ /** * @file * @brief SPARC kernel structure member offset definition file * * This module is responsible for the generation of the absolute symbols whose * value represents the member offsets for various SPARC kernel structures. */ #include <zephyr/kernel.h> #include <kernel_arch_data.h> #include <gen_offset.h> #include <kernel_offsets.h> GEN_OFFSET_SYM(_callee_saved_t, y); GEN_OFFSET_SYM(_callee_saved_t, psr); GEN_OFFSET_SYM(_callee_saved_t, l0_and_l1); GEN_OFFSET_SYM(_callee_saved_t, l2); GEN_OFFSET_SYM(_callee_saved_t, l4); GEN_OFFSET_SYM(_callee_saved_t, l6); GEN_OFFSET_SYM(_callee_saved_t, i0); GEN_OFFSET_SYM(_callee_saved_t, i2); GEN_OFFSET_SYM(_callee_saved_t, i4); GEN_OFFSET_SYM(_callee_saved_t, i6); GEN_OFFSET_SYM(_callee_saved_t, o6); /* esf member offsets */ GEN_OFFSET_STRUCT(arch_esf, out); GEN_OFFSET_STRUCT(arch_esf, global); GEN_OFFSET_STRUCT(arch_esf, npc); GEN_OFFSET_STRUCT(arch_esf, psr); GEN_OFFSET_STRUCT(arch_esf, tbr); GEN_ABSOLUTE_SYM(__struct_arch_esf_SIZEOF, sizeof(struct arch_esf)); GEN_ABS_SYM_END ```
/content/code_sandbox/arch/sparc/core/offsets/offsets.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
281
```objective-c /* * */ /** * @file * @brief Private kernel definitions * * This file contains private kernel function/macro definitions and various * other definitions for the SPARC processor architecture. */ #ifndef ZEPHYR_ARCH_SPARC_INCLUDE_KERNEL_ARCH_FUNC_H_ #define ZEPHYR_ARCH_SPARC_INCLUDE_KERNEL_ARCH_FUNC_H_ #include <kernel_arch_data.h> #ifdef __cplusplus extern "C" { #endif #ifndef _ASMLANGUAGE static ALWAYS_INLINE void arch_kernel_init(void) { } void z_sparc_context_switch(struct k_thread *newt, struct k_thread *oldt); /* * In this implementation, the thread->switch_handle is the thread itself, so * the parameter "switched_from" is assumed to be the address of * thread->switch_handle. */ static inline void arch_switch(void *switch_to, void **switched_from) { struct k_thread *newt = switch_to; struct k_thread *oldt = CONTAINER_OF(switched_from, struct k_thread, switch_handle); z_sparc_context_switch(newt, oldt); } FUNC_NORETURN void z_sparc_fatal_error(unsigned int reason, const struct arch_esf *esf); static inline bool arch_is_in_isr(void) { return _current_cpu->nested != 0U; } #ifdef CONFIG_IRQ_OFFLOAD void z_irq_do_offload(void); #endif #endif /* _ASMLANGUAGE */ #ifdef __cplusplus } #endif #endif /* ZEPHYR_ARCH_SPARC_INCLUDE_KERNEL_ARCH_FUNC_H_ */ ```
/content/code_sandbox/arch/sparc/include/kernel_arch_func.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
323
```objective-c /* * */ /** * @file * @brief Private kernel definitions * * This file contains private kernel structures definitions and various * other definitions for the SPARC processor architecture. */ #ifndef ZEPHYR_ARCH_SPARC_INCLUDE_KERNEL_ARCH_DATA_H_ #define ZEPHYR_ARCH_SPARC_INCLUDE_KERNEL_ARCH_DATA_H_ #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #include <zephyr/arch/cpu.h> #ifndef _ASMLANGUAGE #include <zephyr/kernel.h> #include <zephyr/types.h> #include <zephyr/sys/util.h> #include <zephyr/sys/dlist.h> #ifdef __cplusplus extern "C" { #endif #ifdef __cplusplus } #endif #endif /* _ASMLANGUAGE */ #endif /* ZEPHYR_ARCH_SPARC_INCLUDE_KERNEL_ARCH_DATA_H_ */ ```
/content/code_sandbox/arch/sparc/include/kernel_arch_data.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
183
```objective-c /* * */ #ifndef ZEPHYR_ARCH_SPARC_INCLUDE_OFFSETS_SHORT_ARCH_H_ #define ZEPHYR_ARCH_SPARC_INCLUDE_OFFSETS_SHORT_ARCH_H_ #include <zephyr/offsets.h> #define _thread_offset_to_y \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_y_OFFSET) #define _thread_offset_to_l0_and_l1 \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_l0_and_l1_OFFSET) #define _thread_offset_to_l2 \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_l2_OFFSET) #define _thread_offset_to_l4 \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_l4_OFFSET) #define _thread_offset_to_l6 \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_l6_OFFSET) #define _thread_offset_to_i0 \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_i0_OFFSET) #define _thread_offset_to_i2 \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_i2_OFFSET) #define _thread_offset_to_i4 \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_i4_OFFSET) #define _thread_offset_to_i6 \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_i6_OFFSET) #define _thread_offset_to_o6 \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_o6_OFFSET) #define _thread_offset_to_psr \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_psr_OFFSET) #endif /* ZEPHYR_ARCH_SPARC_INCLUDE_OFFSETS_SHORT_ARCH_H_ */ ```
/content/code_sandbox/arch/sparc/include/offsets_short_arch.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
343
```unknown /* * */ /* * This file contains a full SPARC V8 trap table. The processor redirects * exection to the trap table on trap events. Each trap table entrys is four * instructions. */ #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #define BAD_TRAP \ rd %psr, %l0; \ sethi %hi(__sparc_trap_fault), %l4; \ jmp %l4+%lo(__sparc_trap_fault); \ rd %tbr, %l6; #define INTERRUPT_TRAP(level) \ rd %psr, %l0; \ sethi %hi(INT_HANDLER), %l4; \ jmp %l4+%lo(INT_HANDLER); \ mov (0xf & level), %l3; #define TRAP(handler) \ rd %psr, %l0; \ sethi %hi(handler), %l4; \ jmp %l4+%lo(handler); \ rd %tbr, %l6; #define RESET_TRAP(handler) \ mov %g0, %g4; \ sethi %hi(handler), %g4; \ jmp %g4+%lo(handler); \ nop; /* * Generate the "trap in trap" condition which causes the processor to halt and * enter error mode. Typically used to stop a simulator (QEMU, TSIM) or leave * control to a hardware debug monitor (LEON DSU via GRMON). */ #define TRAP_IN_TRAP \ ta 0x00; \ nop; \ nop; \ nop; #define SOFT_TRAP BAD_TRAP #define WOF_TRAP TRAP(__sparc_trap_window_overflow) #define WUF_TRAP TRAP(__sparc_trap_window_underflow) #define FLW_TRAP TRAP(__sparc_trap_flush_windows) #define INT_HANDLER __sparc_trap_interrupt #ifdef CONFIG_IRQ_OFFLOAD #define IRQ_OFFLOAD_TRAP TRAP(__sparc_trap_irq_offload) #else #define IRQ_OFFLOAD_TRAP BAD_TRAP #endif GTEXT(__sparc_trap_table) GTEXT(__start) SECTION_SUBSEC_FUNC(TEXT, traptable, __sparc_trap_table) __start: /* * Values in the range 0 to 0x5F that are not assigned in SPARC V8 * specification Table 7-1 are reserved for future versions of the * architecture. */ RESET_TRAP(__sparc_trap_reset); ! 00 reset BAD_TRAP; ! 01 instruction_access_exception BAD_TRAP; ! 02 illegal_instruction BAD_TRAP; ! 03 priveleged_instruction BAD_TRAP; ! 04 fp_disabled WOF_TRAP; ! 05 window_overflow WUF_TRAP; ! 06 window_underflow BAD_TRAP; ! 07 mem_address_not_aligned BAD_TRAP; ! 08 fp_exception BAD_TRAP; ! 09 data_access_exception BAD_TRAP; ! 0A tag_overflow BAD_TRAP; ! 0B watchpoint_detected BAD_TRAP; ! 0C reserved BAD_TRAP; ! 0D reserved BAD_TRAP; ! 0E reserved BAD_TRAP; ! 0F reserved BAD_TRAP; ! 10 reserved /* Interrupt traps */ INTERRUPT_TRAP(1); ! 11 interrupt_level_1 INTERRUPT_TRAP(2); ! 12 interrupt_level_2 INTERRUPT_TRAP(3); ! 13 interrupt_level_3 INTERRUPT_TRAP(4); ! 14 interrupt_level_4 INTERRUPT_TRAP(5); ! 15 interrupt_level_5 INTERRUPT_TRAP(6); ! 16 interrupt_level_6 INTERRUPT_TRAP(7); ! 17 interrupt_level_7 INTERRUPT_TRAP(8); ! 18 interrupt_level_8 INTERRUPT_TRAP(9); ! 19 interrupt_level_9 INTERRUPT_TRAP(10); ! 1A interrupt_level_1 INTERRUPT_TRAP(11); ! 1B interrupt_level_11 INTERRUPT_TRAP(12); ! 1C interrupt_level_12 INTERRUPT_TRAP(13); ! 1D interrupt_level_13 INTERRUPT_TRAP(14); ! 1E interrupt_level_14 INTERRUPT_TRAP(15); ! 1F interrupt_level_15 BAD_TRAP; ! 20 r_register_access_error BAD_TRAP; ! 21 instruction_access_error BAD_TRAP; ! 22 reserved BAD_TRAP; ! 23 reserved BAD_TRAP; ! 24 cp_disabled BAD_TRAP; ! 25 unimplemented_FLUSH BAD_TRAP; ! 26 reserved BAD_TRAP; ! 27 reserved BAD_TRAP; ! 28 cp_exception BAD_TRAP; ! 29 data_access_error BAD_TRAP; ! 2A division_by_zero BAD_TRAP; ! 2B data_store_error BAD_TRAP; ! 2C data_access_MMU_miss BAD_TRAP; ! 2D reserved BAD_TRAP; ! 2E reserved BAD_TRAP; ! 2F reserved BAD_TRAP; BAD_TRAP; BAD_TRAP; BAD_TRAP; ! 30 - 33 reserved BAD_TRAP; BAD_TRAP; BAD_TRAP; BAD_TRAP; ! 34 - 37 reserved BAD_TRAP; BAD_TRAP; BAD_TRAP; BAD_TRAP; ! 38 - 3B reserved BAD_TRAP; ! 3C instruction_access_MMU_miss BAD_TRAP; ! 3D reserved BAD_TRAP; ! 3E reserved BAD_TRAP; ! 3F reserved BAD_TRAP; BAD_TRAP; BAD_TRAP; BAD_TRAP; ! 40 - 43 undefined BAD_TRAP; BAD_TRAP; BAD_TRAP; BAD_TRAP; ! 44 - 47 undefined BAD_TRAP; BAD_TRAP; BAD_TRAP; BAD_TRAP; ! 48 - 4B undefined BAD_TRAP; BAD_TRAP; BAD_TRAP; BAD_TRAP; ! 4C - 4F undefined BAD_TRAP; BAD_TRAP; BAD_TRAP; BAD_TRAP; ! 50 - 53 undefined BAD_TRAP; BAD_TRAP; BAD_TRAP; BAD_TRAP; ! 54 - 57 undefined BAD_TRAP; BAD_TRAP; BAD_TRAP; BAD_TRAP; ! 58 - 5B undefined BAD_TRAP; BAD_TRAP; BAD_TRAP; BAD_TRAP; ! 5C - 5F undefined /* * tt values 0x60 to 0x7F are reserved for implementation-dependent * exceptions. */ BAD_TRAP; BAD_TRAP; BAD_TRAP; BAD_TRAP; ! 60 - 63 undefined BAD_TRAP; BAD_TRAP; BAD_TRAP; BAD_TRAP; ! 64 - 67 undefined BAD_TRAP; BAD_TRAP; BAD_TRAP; BAD_TRAP; ! 68 - 6B undefined BAD_TRAP; BAD_TRAP; BAD_TRAP; BAD_TRAP; ! 6C - 6F undefined BAD_TRAP; BAD_TRAP; BAD_TRAP; BAD_TRAP; ! 70 - 73 undefined BAD_TRAP; BAD_TRAP; BAD_TRAP; BAD_TRAP; ! 74 - 77 undefined BAD_TRAP; BAD_TRAP; BAD_TRAP; BAD_TRAP; ! 78 - 7B undefined BAD_TRAP; BAD_TRAP; BAD_TRAP; BAD_TRAP; ! 7C - 7F undefined /* trap_instruction 0x80 - 0xFF */ /* NOTE: "ta 5" can be generated by compiler. */ TRAP_IN_TRAP; ! 0 System calls SOFT_TRAP; ! 1 Breakpoints SOFT_TRAP; ! 2 Division by zero FLW_TRAP; ! 3 Flush windows SOFT_TRAP; ! 4 Clean windows SOFT_TRAP; ! 5 Range checking SOFT_TRAP; ! 6 Fix alignment SOFT_TRAP; ! 7 Integer overflow SOFT_TRAP ! 8 System calls TRAP(__sparc_trap_sw_set_pil); ! 9 Reserved for the os SOFT_TRAP; ! 10 Reserved for the os SOFT_TRAP; ! 11 Reserved for the os /* See SPARC-ABI for purpose of the following software traps */ SOFT_TRAP; ! 12 IRQ_OFFLOAD_TRAP; ! 13 SOFT_TRAP; ! 14 TRAP(__sparc_trap_except_reason); ! 15 SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! 90 - 93 SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! 94 - 97 SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! 98 - 9B SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! 9C - 9F SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! A0 - A3 SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! A4 - A7 SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! A8 - AB SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! AC - AF SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! B0 - B3 SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! B4 - B7 SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! B8 - BB SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! BC - BF SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! C0 - C3 SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! C4 - C7 SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! C8 - CB SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! CC - CF SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! D0 - D3 SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! D4 - D7 SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! D8 - DB SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! DC - DF SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! E0 - E3 SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! E4 - E7 SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! E8 - EB SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! EC - EF SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! F0 - F3 SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! F4 - F7 SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! F8 - FB SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! FC - FF ```
/content/code_sandbox/arch/sparc/core/trap_table_mvt.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,843
```c /* * */ #include <zephyr/toolchain.h> #include <zephyr/arch/xtensa/arch.h> #include <zephyr/arch/xtensa/cache.h> #include <zephyr/kernel/mm.h> #include <zephyr/cache.h> __weak bool sys_mm_is_phys_addr_in_range(uintptr_t phys) { bool valid; uintptr_t cached = (uintptr_t)sys_cache_cached_ptr_get((void *)phys); valid = ((phys >= CONFIG_SRAM_BASE_ADDRESS) && (phys < (CONFIG_SRAM_BASE_ADDRESS + (CONFIG_SRAM_SIZE * 1024UL)))); valid |= ((cached >= CONFIG_SRAM_BASE_ADDRESS) && (cached < (CONFIG_SRAM_BASE_ADDRESS + (CONFIG_SRAM_SIZE * 1024UL)))); return valid; } __weak bool sys_mm_is_virt_addr_in_range(void *virt) { bool valid; uintptr_t addr = (uintptr_t)virt; uintptr_t cached = (uintptr_t)sys_cache_cached_ptr_get(virt); valid = ((addr >= CONFIG_KERNEL_VM_BASE) && (addr < (CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_SIZE))); valid |= ((cached >= CONFIG_KERNEL_VM_BASE) && (cached < (CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_SIZE))); return valid; } ```
/content/code_sandbox/arch/xtensa/core/mem_manage.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
269
```c /* * */ #include "xtensa/corebits.h" #include "xtensa_backtrace.h" #include <zephyr/sys/printk.h> #if defined(CONFIG_SOC_SERIES_ESP32) #include <esp_memory_utils.h> #elif defined(CONFIG_SOC_FAMILY_INTEL_ADSP) #include "debug_helpers.h" #elif defined(CONFIG_SOC_XTENSA_DC233C) #include "backtrace_helpers.h" #endif #include <xtensa_asm2_context.h> #include <xtensa_stack.h> static int mask, cause; static inline uint32_t xtensa_cpu_process_stack_pc(uint32_t pc) { if (pc & 0x80000000) { /* Top two bits of a0 (return address) specify window increment. * Overwrite to map to address space. */ if (cause != EXCCAUSE_INSTR_PROHIBITED) { pc = (pc & 0x3fffffff) | mask; } else { pc = (pc & 0x3fffffff) | 0x40000000; } } /* Minus 3 to get PC of previous instruction * (i.e. instruction executed before return address) */ return pc - 3; } static inline bool xtensa_stack_ptr_is_sane(uint32_t sp) { bool valid; #if defined(CONFIG_SOC_SERIES_ESP32) valid = esp_stack_ptr_is_sane(sp); #elif defined(CONFIG_SOC_FAMILY_INTEL_ADSP) valid = intel_adsp_ptr_is_sane(sp); #else /* Platform does not have additional requirements on * whether stack pointer is valid. So use the generic * test below. */ valid = true; #endif if (valid) { valid = !xtensa_is_outside_stack_bounds(sp, 0, UINT32_MAX); } return valid; } static inline bool xtensa_ptr_executable(const void *p) { #if defined(CONFIG_SOC_SERIES_ESP32) return esp_ptr_executable(p); #elif defined(CONFIG_SOC_FAMILY_INTEL_ADSP) return intel_adsp_ptr_executable(p); #elif defined(CONFIG_SOC_XTENSA_DC233C) return xtensa_dc233c_ptr_executable(p); #else #warning "xtensa_ptr_executable is not defined for this platform" #endif } bool xtensa_backtrace_get_next_frame(struct xtensa_backtrace_frame_t *frame) { /* Do not continue backtrace when we encounter an invalid stack * frame pointer. */ if (xtensa_is_outside_stack_bounds((uintptr_t)frame->sp, 0, UINT32_MAX)) { return false; } /* Use frame(i-1)'s BS area located below frame(i)'s * sp to get frame(i-1)'s sp and frame(i-2)'s pc */ /* Base save area consists of 4 words under SP */ char *base_save = (char *)frame->sp; frame->pc = frame->next_pc; /* If next_pc = 0, indicates frame(i-1) is the last * frame on the stack */ frame->next_pc = *((uint32_t *)(base_save - 16)); frame->sp = *((uint32_t *)(base_save - 12)); /* Return true if both sp and pc of frame(i-1) are sane, * false otherwise */ return (xtensa_stack_ptr_is_sane(frame->sp) && xtensa_ptr_executable((void *) xtensa_cpu_process_stack_pc(frame->pc))); } int xtensa_backtrace_print(int depth, int *interrupted_stack) { /* Check arguments */ if (depth <= 0) { return -1; } _xtensa_irq_stack_frame_raw_t *frame = (void *)interrupted_stack; _xtensa_irq_bsa_t *bsa; /* Don't dump stack if the stack pointer is invalid as * any frame elements obtained via de-referencing the * frame pointer are probably also invalid. Or worse, * cause another access violation. */ if (!xtensa_is_frame_pointer_valid(frame)) { return -1; } bsa = frame->ptr_to_bsa; cause = bsa->exccause; /* Initialize stk_frame with first frame of stack */ struct xtensa_backtrace_frame_t stk_frame; xtensa_backtrace_get_start(&(stk_frame.pc), &(stk_frame.sp), &(stk_frame.next_pc), interrupted_stack); if (cause != EXCCAUSE_INSTR_PROHIBITED) { mask = stk_frame.pc & 0xc0000000; } printk("\r\n\r\nBacktrace:"); printk("0x%08x:0x%08x ", xtensa_cpu_process_stack_pc(stk_frame.pc), stk_frame.sp); /* Check if first frame is valid */ bool corrupted = !(xtensa_stack_ptr_is_sane(stk_frame.sp) && (xtensa_ptr_executable((void *) xtensa_cpu_process_stack_pc(stk_frame.pc)) || /* Ignore the first corrupted PC in case of InstrFetchProhibited */ cause == EXCCAUSE_INSTR_PROHIBITED)); while (depth-- > 0 && stk_frame.next_pc != 0 && !corrupted) { /* Get previous stack frame */ if (!xtensa_backtrace_get_next_frame(&stk_frame)) { corrupted = true; } printk("0x%08x:0x%08x ", xtensa_cpu_process_stack_pc(stk_frame.pc), stk_frame.sp); } /* Print backtrace termination marker */ int ret = 0; if (corrupted) { printk(" |<-CORRUPTED"); ret = -1; } else if (stk_frame.next_pc != 0) { /* Backtrace continues */ printk(" |<-CONTINUES"); } printk("\r\n\r\n"); return ret; } ```
/content/code_sandbox/arch/xtensa/core/xtensa_backtrace.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,267
```unknown # XTENSA architecture configuration options menu "XTENSA Options" depends on XTENSA config ARCH default "xtensa" config SIMULATOR_XTENSA bool "Simulator Target" help Enable if building to run on simulator. config XTENSA_RESET_VECTOR bool "Build reset vector code" default y help This option controls whether the initial reset vector code is built. This is always needed for the simulator. Real boards may already implement this in boot ROM. config XTENSA_GEN_HANDLERS bool "Automatically generate interrupt handlers" default n help When set, an "xtensa_handlers.h" file is generated containing definitions for the interrupt entry code of the target Xtensa core, based automatically on the details in the core-isa.h file. This replaces the previous scheme where a _soc_inthandlers.h file would be generated offline. config XTENSA_USE_CORE_CRT1 bool "Use crt1.S from core" default y help SoC or boards might define their own __start by setting this setting to false. config XTENSA_ENABLE_BACKTRACE bool "Backtrace on panic exception" default y depends on SOC_SERIES_ESP32 || SOC_FAMILY_INTEL_ADSP || SOC_XTENSA_DC233C help Enable this config option to print backtrace on panic exception config XTENSA_SMALL_VECTOR_TABLE_ENTRY bool "Workaround for small vector table entries" help This option enables a small indirection to bypass the size constraint of the vector table entry and moved the default handlers to the end of vector table, renaming them to _Level\LVL\()VectorHelper. config XTENSA_RPO_CACHE bool "Cached/uncached RPO mapping" help Support Cached/uncached RPO mapping. A design trick on multi-core hardware is to map memory twice so that it can be seen in both (incoherent) cached mappings and a coherent "shared" area. if XTENSA_RPO_CACHE config XTENSA_CACHED_REGION int "Cached RPO mapping" range 0 7 help This specifies which 512M region (0-7, as defined by the Xtensa Region Protection Option) contains the "cached" mapping. config XTENSA_UNCACHED_REGION int "Uncached RPO mapping" range 0 7 help As for XTENSA_CACHED_REGION, this specifies which 512M region (0-7) contains the "uncached" mapping. endif config XTENSA_CCOUNT_HZ int "CCOUNT cycle rate" default 1000000 help Rate in HZ of the Xtensa core as measured by the value of the CCOUNT register. config XTENSA_MORE_SPIN_RELAX_NOPS bool "Use Xtensa specific arch_spin_relax() with more NOPs" help Some Xtensa SoCs, especially under SMP, may need extra NOPs after failure to lock a spinlock. This gives the bus extra time to synchronize the RCW transaction among CPUs. config XTENSA_NUM_SPIN_RELAX_NOPS int "Number of NOPs to be used in arch_spin_relax()" default 1 depends on XTENSA_MORE_SPIN_RELAX_NOPS help Specify the number of NOPs in Xtensa specific arch_spin_relax(). config XTENSA_BREAK_ON_UNRECOVERABLE_EXCEPTIONS bool "Use BREAK instruction on unrecoverable exceptions" help Use BREAK instruction when unrecoverable exceptions are encountered. This requires a debugger attached to catch the BREAK. menu "Xtensa HiFi Options" config XTENSA_CPU_HAS_HIFI bool config XTENSA_CPU_HAS_HIFI3 select XTENSA_CPU_HAS_HIFI bool config XTENSA_CPU_HAS_HIFI4 select XTENSA_CPU_HAS_HIFI bool # Selected when at least one XTENSA_HIFIn version has been configured config XTENSA_HIFI bool if XTENSA_CPU_HAS_HIFI config XTENSA_HIFI3 bool "HiFi3 AudioEngine instructions" depends on XTENSA_CPU_HAS_HIFI3 default y select XTENSA_HIFI help This option enables HiFi 3 instruction support. config XTENSA_HIFI4 bool "HiFi4 AudioEngine instructions" depends on XTENSA_CPU_HAS_HIFI4 default y select XTENSA_HIFI help This option enables HiFi 4 instruction support. config XTENSA_HIFI_SHARING bool "HiFi register sharing" depends on XTENSA_HIFI help This option enables preservation of the hardware HiFi registers across context switches to allow multiple threads to perform concurrent HiFi operations. endif # XTENSA_CPU_HAS_HIFI endmenu # Xtensa HiFi Options if CPU_HAS_MMU config XTENSA_MMU bool "Xtensa MMU Support" select MMU select ARCH_MEM_DOMAIN_SYNCHRONOUS_API if USERSPACE select XTENSA_SMALL_VECTOR_TABLE_ENTRY select KERNEL_VM_USE_CUSTOM_MEM_RANGE_CHECK if XTENSA_RPO_CACHE select CURRENT_THREAD_USE_NO_TLS if USERSPACE help Enable support for Xtensa Memory Management Unit. if XTENSA_MMU choice prompt "PageTable virtual address" default XTENSA_MMU_PTEVADDR_20000000 help The virtual address for Xtensa page table (PTEVADDR). config XTENSA_MMU_PTEVADDR_20000000 bool "0x20000000" endchoice config XTENSA_MMU_PTEVADDR hex default 0x20000000 if XTENSA_MMU_PTEVADDR_20000000 help The virtual address for Xtensa page table (PTEVADDR). config XTENSA_MMU_PTEVADDR_SHIFT int default 29 if XTENSA_MMU_PTEVADDR_20000000 help The bit shift number for the virtual address for Xtensa page table (PTEVADDR). config XTENSA_MMU_NUM_L1_TABLES int "Number of L1 page tables" default 1 if !USERSPACE default 4 help This option specifies the maximum number of traslation tables. Translation tables are directly related to the number of memory domains in the target, considering the kernel itself requires one. config XTENSA_MMU_NUM_L2_TABLES int "Number of L2 page tables" default 20 if USERSPACE default 10 help Each table can address up to 4MB memory address. config XTENSA_MMU_DOUBLE_MAP bool "Map memory in cached and uncached region" help This option specifies that the memory is mapped in two distinct region, cached and uncached. config XTENSA_INVALIDATE_MEM_DOMAIN_TLB_ON_SWAP bool help This invalidates all TLBs referred by the incoming thread's memory domain when swapping page tables. config PRIVILEGED_STACK_SIZE # Must be multiple of CONFIG_MMU_PAGE_SIZE default 4096 endif # XTENSA_MMU endif # CPU_HAS_MMU if CPU_HAS_MPU menuconfig XTENSA_MPU bool "Xtensa MPU Support" select MPU select SRAM_REGION_PERMISSIONS select XTENSA_SMALL_VECTOR_TABLE_ENTRY select ARCH_MEM_DOMAIN_SYNCHRONOUS_API if USERSPACE select CURRENT_THREAD_USE_NO_TLS if USERSPACE select EXPERIMENTAL # TODO: the target the MPU code developed on (basically sample_controller # plus MPU minus s32c1i) does not have cache or SMP capability. # Need to verify functionalities with targets supporting these. depends on !CACHE && !SMP help Enable support for Xtensa Memory Protection Unit. if XTENSA_MPU config XTENSA_MPU_DEFAULT_MEM_TYPE hex "Default Memory Type" default 0x18 help Default memory type for memory regions: non-cacheable memory, non-shareable, non-bufferable and interruptible. If userspace is enabled, it will be used to restore the memory type of the region being removed from a memory domain. config XTENSA_MPU_ONLY_SOC_RANGES bool help Enable this by the SoC to indicate to the architecture code to use the MPU ranges specified by SoC only, and skip the common ranges defined in the core architecture code. This gives total control to the SoC on the MPU ranges. endif # XTENSA_MPU endif # CPU_HAS_MPU config XTENSA_SYSCALL_USE_HELPER bool "Use userspace syscall helper" default y if "$(ZEPHYR_TOOLCHAIN_VARIANT)" = "xt-clang" depends on (XTENSA_MMU || XTENSA_MPU) && USERSPACE help Use syscall helpers for passing more then 3 arguments. This is a workaround for toolchains where they have issue modeling register usage. config XTENSA_INSECURE_USERSPACE bool default y depends on (XTENSA_MMU || XTENSA_MPU) && USERSPACE endmenu ```
/content/code_sandbox/arch/xtensa/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,033
```unknown /* * */ #include <zephyr/offsets.h> #include <xtensa/config/tie.h> #include <xtensa/config/tie-asm.h> /* * Load the HiFi registers from the hifi buffer in the BSA. Round the address * of this buffer up to XCHAL_CP1_SA_ALIGN bytes to guarantee the necessary * alignment. * * Upon entry ... * A0 - return address (do not modify) * A1 - address of BSA (do not modify) * A2 - available for use * A3 - available for use */ .global _xtensa_hifi_load .align 4 _xtensa_hifi_load: addi a2, a1, (___xtensa_irq_bsa_t_hifi_OFFSET + XCHAL_CP1_SA_ALIGN - 1) movi a3, ~(XCHAL_CP1_SA_ALIGN - 1) and a2, a2, a3 xchal_cp1_load a2 a3 a3 a3 a3 /* Only A2 and A3 are used by macro */ ret /* * Save the HiFi registers into the hifi buffer in the BSA. Round the address * of this buffer up to XCHAL_CP1_SA_ALIGN bytes to guarantee the necessary * alignment. * * A0 - return address (do not modify) * A1 - address of BSA (do not modify) * A2 - available for use * A3 - available for use */ .global _xtensa_hifi_save .align 4 _xtensa_hifi_save: addi a2, a1, (___xtensa_irq_bsa_t_hifi_OFFSET + XCHAL_CP1_SA_ALIGN - 1) movi a3, ~(XCHAL_CP1_SA_ALIGN - 1) and a2, a2, a3 xchal_cp1_store a2 a3 a3 a3 a3 /* Only A2 and A3 are used by macro */ ret ```
/content/code_sandbox/arch/xtensa/core/xtensa_hifi.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
428
```restructuredtext # How Xtensa register windows work There is a paucity of introductory material on this subject, and Zephyr plays some tricks here that require understanding the base layer. ## Hardware When register windows are configured in the CPU, there are either 32 or 64 "real" registers in hardware, with 16 visible at one time. Registers are grouped and rotated in units of 4, so there are 8 or 16 such "quads" (my term, not Tensilica's) in hardware of which 4 are visible as A0-A15. The first quad (A0-A3) is pointed to by a special register called WINDOWBASE. The register file is cyclic, so for example if NREGS==64 and WINDOWBASE is 15, quads 15, 0, 1, and 2 will be visible as (respectively) A0-A3, A4-A7, A8-A11, and A12-A15. There is a ROTW instruction that can be used to manually rotate the window by a immediate number of quads that are added to WINDOWBASE. Positive rotations "move" high registers into low registers (i.e. after "ROTW 1" the register that used to be called A4 is now A0). There are CALL4/CALL8/CALL12 instructions to effect rotated calls which rotate registers upward (i.e. "hiding" low registers from the callee) by 1, 2 or 3 quads. These do not rotate the window themselves. Instead they place the rotation amount in two places (yes, two; see below): the 2-bit CALLINC field of the PS register, and the top two bits of the return address placed in A0. There is an ENTRY instruction that does the rotation. It adds CALLINC to WINDOWBASE, at the same time copying the old (now hidden) stack pointer in A1 into the "new" A1 in the rotated frame, subtracting an immediate offset from it to make space for the new frame. There is a RETW instruction that undoes the rotation. It reads the top two bits from the return address in A0 and subtracts that value from WINDOWBASE before returning. This is why the CALLINC bits went in two places. They have to be stored on the stack across potentially many calls, so they need to be GPR data that lives in registers and can be spilled. But ENTRY isn't specified to assume a particular return value format and is used immediately, so it makes more sense for it to use processor state instead. Note that we still don't know how to detect when the register file has wrapped around and needs to be spilled or filled. To do this there is a WINDOWSTART register used to detect which register quads are in use. The name "start" is somewhat confusing, this is not a pointer. WINDOWSTART stores a bitmask with one bit per hardware quad (so it's 8 or 16 bits wide). The bit in windowstart corresponding to WINDOWBASE will be set by the ENTRY instruction, and remain set after rotations until cleared by a function return (by RETW, see below). Other bits stay zero. So there is one set bit in WINDOWSTART corresponding to each call frame that is live in hardware registers, and it will be followed by 0, 1 or 2 zero bits that tell you how "big" (how many quads of registers) that frame is. So the CPU executing RETW checks to make sure that the register quad being brought into A0-A3 (i.e. the new WINDOWBASE) has a set bit indicating it's valid. If it does not, the registers must have been spilled and the CPU traps to an exception handler to fill them. Likewise, the processor can tell if a high register is "owned" by another call by seeing if there is a one in WINDOWSTART between that register's quad and WINDOWBASE. If there is, the CPU traps to a spill handler to spill one frame. Note that a frame might be only four registers, but it's possible to hit registers 12 out from WINDOWBASE, so it's actually possible to trap again when the instruction restarts to spill a second quad, and even a third time at maximum. Finally: note that hardware checks the two bits of WINDOWSTART after the frame bit to detect how many quads are represented by the one frame. So there are six separate exception handlers to spill/fill 1/2/3 quads of registers. ## Software & ABI The advantage of the scheme above is that it allows the registers to be spilled naturally into the stack by using the stack pointers embedded in the register file. But the hardware design assumes and to some extent enforces a fairly complicated stack layout to make that work: The spill area for a single frame's A0-A3 registers is not in its own stack frame. It lies in the 16 bytes below its CALLEE's stack pointer. This is so that the callee (and exception handlers invoked on its behalf) can see its caller's potentially-spilled stack pointer register (A1) on the stack and be able to walk back up on return. Other architectures do this too by e.g. pushing the incoming stack pointer onto the stack as a standard "frame pointer" defined in the platform ABI. Xtensa wraps this together with the natural spill area for register windows. By convention spill regions always store the lowest numbered register in the lowest address. The spill area for a frame's A4-A11 registers may or may not exist depending on whether the call was made with CALL8/CALL12. It is legal to write a function using only A0-A3 and CALL4 calls and ignore higher registers. But if those 0-2 register quads are in use, they appear at the top of the stack frame, immediately below the parent call's A0-A3 spill area. There is no spill area for A12-A15. Those registers are always caller-save. When using CALLn, you always need to overlap 4 registers to provide arguments and take a return value. ```
/content/code_sandbox/arch/xtensa/core/README_WINDOWS.rst
restructuredtext
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,349
```python #!/usr/bin/env python3 import sys import re # Xtensa Vector Table linker generator # # Takes a pre-processed (gcc -dM) core-isa.h file as its first # argument, and emits a GNU linker section declartion which will # correctly load the exception vectors and literals as long as their # code is declared using standard conventions (see below). # # The section name will be ".z_xtensa_vectors", and a symbol # "z_xtensa_vecbase" is emitted containing a valid value for the # VECBASE SR at runtime. # # Obviously, this requires that XCHAL_HAVE_VECBASE=1. A similar trick # could be played to load vectors at fixed addresses on hardware that # lacks VECBASE, but the core-isa.h interface is inexplicably # different. # # Because the "standard conventions" (which descend from somewhere in # Cadence) are not documented anywhere and just end up cut and pasted # between devices, here's an attempt at a specification: # # + The six register window exception vectors are defined with offsets # internal to their assembly code. They are linked in a single # section named ".WindowVectors.text". # # + The "kernel", "user" and "double exception" vectors are emitted in # sections named ".KernelExceptionVector.text", # "UserExceptionVector.text" and "DoubleExceptionVector.text" # respectively. # # + XEA2 interrupt vectors are in sections named # ".Level<n>InterruptVector.text", except (!) for ones which are # given special names. The "debug" and "NMI" interrupts (if they # exist) are technically implemented as standard interrupt vectors # (of a platform-dependent level), but the code for them is emitted # in ".DebugExceptionVector.text" and ".NMIExceptionVector.text", # and not a section corresponding to their interrupt level. # # + Any unused bytes at the end of a vector are made available as # storage for immediate values used by the following vector (Xtensa # can only back-reference immediates for MOVI/L32R instructions) as # a "<name>Vector.literal" section. Note that there is no guarantee # of how much space is available, it depends on the previous # vector's code size. Zephyr code has historically not used this # space, as support in existing linker scripts is inconsistent. But # it's exposed here. coreisa = sys.argv[1] debug_level = 0 # Translation for the core-isa.h vs. linker section naming conventions sect_names = { "DOUBLEEXC" : "DoubleException", "KERNEL" : "KernelException", "NMI" : "NMIException", "USER" : "UserException" } offsets = {} with open(coreisa) as infile: for line in infile.readlines(): m = re.match(r"^#define\s+XCHAL_([^ ]+)_VECOFS\s*(.*)", line.rstrip()) if m: (sym, val) = (m.group(1), m.group(2)) if sym == "WINDOW_OF4": # This must be the start of the section assert eval(val) == 0 elif sym.startswith("WINDOW"): # Ignore the other window exceptions, they're internally sorted pass elif sym == "RESET": # Ignore, not actually part of the vector table pass elif sym == "DEBUG": # This one is a recursive macro that doesn't expand, # so handle manually m = re.match(r"XCHAL_INTLEVEL(\d+)_VECOFS", val) if not m: print(f"no intlevel match for debug val {val}") assert m debug_level = eval(m.group(1)) else: if val == "XCHAL_NMI_VECOFS": # This gets recursively defined in the other # direction, so ignore the INTLEVEL pass else: addr = eval(val) m = re.match(r"^INTLEVEL(\d+)", sym) if m: offsets[f"Level{m.group(1)}Interrupt"] = addr else: offsets[sect_names[sym]] = addr if debug_level > 0: old = f"Level{debug_level}Interrupt" offsets[f"DebugException"] = offsets[old] del offsets[old] sects = list(offsets) sects.sort(key=lambda s: offsets[s]) print("/* Automatically Generated Code - Do Not Edit */") print("/* See arch/xtensa/core/gen_vector.py */") print("") # The 1k alignment is experimental, the docs on the Relocatable Vector # Option doesn't specify an alignment at all, but writes to the # bottom bits don't take... print( " .z_xtensa_vectors : ALIGN(1024) {") print( " z_xtensa_vecbase = .;") print(f" KEEP(*(.WindowVectors.text));") for s in sects: print(f" KEEP(*(.{s}Vector.literal));") print( " . = 0x%3.3x;" % (offsets[s])) print(f" KEEP(*(.{s}Vector.text));") print(" }") ```
/content/code_sandbox/arch/xtensa/core/gen_vectors.py
python
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,170
```c /* */ #include <zephyr/kernel.h> #include <zephyr/irq_offload.h> #include <zephyr/zsr.h> #include <zephyr/irq.h> static struct { irq_offload_routine_t fn; const void *arg; } offload_params[CONFIG_MP_MAX_NUM_CPUS]; static void irq_offload_isr(const void *param) { ARG_UNUSED(param); uint8_t cpu_id = _current_cpu->id; offload_params[cpu_id].fn(offload_params[cpu_id].arg); } void arch_irq_offload(irq_offload_routine_t routine, const void *parameter) { IRQ_CONNECT(ZSR_IRQ_OFFLOAD_INT, 0, irq_offload_isr, NULL, 0); unsigned int intenable, key = arch_irq_lock(); uint8_t cpu_id = _current_cpu->id; offload_params[cpu_id].fn = routine; offload_params[cpu_id].arg = parameter; __asm__ volatile("rsr %0, INTENABLE" : "=r"(intenable)); intenable |= BIT(ZSR_IRQ_OFFLOAD_INT); __asm__ volatile("wsr %0, INTENABLE; wsr %0, INTSET; rsync" :: "r"(intenable), "r"(BIT(ZSR_IRQ_OFFLOAD_INT))); arch_irq_unlock(key); } ```
/content/code_sandbox/arch/xtensa/core/irq_offload.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
290
```c /* * */ #include <string.h> #include <zephyr/kernel.h> #include <kernel_internal.h> #include <xtensa_asm2_context.h> #include <xtensa_internal.h> #include <zephyr/logging/log.h> LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); #ifdef CONFIG_USERSPACE #ifdef CONFIG_THREAD_LOCAL_STORAGE /* * Per-thread (TLS) variable indicating whether execution is in user mode. */ __thread uint32_t is_user_mode; #endif #endif /* CONFIG_USERSPACE */ /** * Initializes a stack area such that it can be "restored" later and * begin running with the specified function and three arguments. The * entry function takes three arguments to match the signature of * Zephyr's k_thread_entry_t. Thread will start with EXCM clear and * INTLEVEL set to zero (i.e. it's a user thread, we don't start with * anything masked, so don't assume that!). */ static void *init_stack(struct k_thread *thread, int *stack_top, void (*entry)(void *, void *, void *), void *arg1, void *arg2, void *arg3) { void *ret; _xtensa_irq_stack_frame_a11_t *frame; #ifdef CONFIG_USERSPACE struct xtensa_thread_stack_header *header = (struct xtensa_thread_stack_header *)thread->stack_obj; thread->arch.psp = header->privilege_stack + sizeof(header->privilege_stack); #endif /* Not-a-cpu ID Ensures that the first time this is run, the * stack will be invalidated. That covers the edge case of * restarting a thread on a stack that had previously been run * on one CPU, but then initialized on this one, and * potentially run THERE and not HERE. */ thread->arch.last_cpu = -1; /* We cheat and shave 16 bytes off, the top four words are the * A0-A3 spill area for the caller of the entry function, * which doesn't exist. It will never be touched, so we * arrange to enter the function with a CALLINC of 1 and a * stack pointer 16 bytes above the top, so its ENTRY at the * start will decrement the stack pointer by 16. */ const int bsasz = sizeof(*frame) - 16; frame = (void *)(((char *) stack_top) - bsasz); (void)memset(frame, 0, bsasz); frame->bsa.ps = PS_WOE | PS_UM | PS_CALLINC(1); #ifdef CONFIG_USERSPACE if ((thread->base.user_options & K_USER) == K_USER) { frame->bsa.pc = (uintptr_t)arch_user_mode_enter; } else { frame->bsa.pc = (uintptr_t)z_thread_entry; } #else frame->bsa.pc = (uintptr_t)z_thread_entry; #endif #if XCHAL_HAVE_THREADPTR #ifdef CONFIG_THREAD_LOCAL_STORAGE frame->bsa.threadptr = thread->tls; #elif CONFIG_USERSPACE frame->bsa.threadptr = (uintptr_t)((thread->base.user_options & K_USER) ? thread : NULL); #endif #endif /* Arguments to z_thread_entry(). Remember these start at A6, * which will be rotated into A2 by the ENTRY instruction that * begins the C function. And A4-A7 and A8-A11 are optional * quads that live below the BSA! */ frame->a7 = (uintptr_t)arg1; /* a7 */ frame->a6 = (uintptr_t)entry; /* a6 */ frame->a5 = 0; /* a5 */ frame->a4 = 0; /* a4 */ frame->a11 = 0; /* a11 */ frame->a10 = 0; /* a10 */ frame->a9 = (uintptr_t)arg3; /* a9 */ frame->a8 = (uintptr_t)arg2; /* a8 */ /* Finally push the BSA pointer and return the stack pointer * as the handle */ frame->ptr_to_bsa = (void *)&frame->bsa; ret = &frame->ptr_to_bsa; return ret; } void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, char *stack_ptr, k_thread_entry_t entry, void *p1, void *p2, void *p3) { thread->switch_handle = init_stack(thread, (int *)stack_ptr, entry, p1, p2, p3); #ifdef CONFIG_KERNEL_COHERENCE __ASSERT((((size_t)stack) % XCHAL_DCACHE_LINESIZE) == 0, ""); __ASSERT((((size_t)stack_ptr) % XCHAL_DCACHE_LINESIZE) == 0, ""); sys_cache_data_flush_and_invd_range(stack, (char *)stack_ptr - (char *)stack); #endif } #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) int arch_float_disable(struct k_thread *thread) { /* xtensa always has FPU enabled so cannot be disabled */ return -ENOTSUP; } int arch_float_enable(struct k_thread *thread, unsigned int options) { /* xtensa always has FPU enabled so nothing to do here */ return 0; } #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */ #ifdef CONFIG_USERSPACE FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, void *p1, void *p2, void *p3) { struct k_thread *current = _current; size_t stack_end; /* Transition will reset stack pointer to initial, discarding * any old context since this is a one-way operation */ stack_end = Z_STACK_PTR_ALIGN(current->stack_info.start + current->stack_info.size - current->stack_info.delta); xtensa_userspace_enter(user_entry, p1, p2, p3, stack_end, current->stack_info.start); CODE_UNREACHABLE; } #endif /* CONFIG_USERSPACE */ ```
/content/code_sandbox/arch/xtensa/core/thread.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,315
```c /* * */ #include <string.h> #include <zephyr/debug/coredump.h> #include <xtensa_asm2_context.h> #include <zephyr/offsets.h> #define ARCH_HDR_VER 1 #define XTENSA_BLOCK_HDR_VER 2 enum xtensa_soc_code { XTENSA_SOC_UNKNOWN = 0, XTENSA_SOC_SAMPLE_CONTROLLER, XTENSA_SOC_ESP32, XTENSA_SOC_INTEL_ADSP, XTENSA_SOC_ESP32S2, XTENSA_SOC_ESP32S3, XTENSA_SOC_DC233C, }; struct xtensa_arch_block { /* Each Xtensa SOC can omit registers (e.g. loop * registers) or assign different index numbers * in xtensa-config.c. GDB identifies registers * based on these indices * * (This must be the first field or the GDB server * won't be able to unpack the struct while parsing) */ uint8_t soc; /* Future versions of Xtensa coredump may expand * minimum set of registers * * (This should stay the second field for the same * reason as the first once we have more versions) */ uint16_t version; uint8_t toolchain; struct { /* Minimum set shown by GDB 'info registers', * skipping user-defined register EXPSTATE * * WARNING: IF YOU CHANGE THE ORDER OF THE REGISTERS, * YOU MUST UPDATE THE ORDER OF THE REGISTERS IN * EACH OF THE XtensaSoc_ RegNum enums IN * scripts/coredump/gdbstubs/arch/xtensa.py TO MATCH. * See xtensa.py's map_register function for details */ uint32_t pc; uint32_t exccause; uint32_t excvaddr; uint32_t sar; uint32_t ps; #if XCHAL_HAVE_S32C1I uint32_t scompare1; #endif uint32_t a0; uint32_t a1; uint32_t a2; uint32_t a3; uint32_t a4; uint32_t a5; uint32_t a6; uint32_t a7; uint32_t a8; uint32_t a9; uint32_t a10; uint32_t a11; uint32_t a12; uint32_t a13; uint32_t a14; uint32_t a15; #if XCHAL_HAVE_LOOPS uint32_t lbeg; uint32_t lend; uint32_t lcount; #endif } r; } __packed; /* * This might be too large for stack space if defined * inside function. So do it here. */ static struct xtensa_arch_block arch_blk; void arch_coredump_info_dump(const struct arch_esf *esf) { struct coredump_arch_hdr_t hdr = { .id = COREDUMP_ARCH_HDR_ID, .hdr_version = ARCH_HDR_VER, .num_bytes = sizeof(arch_blk), }; /* Nothing to process */ if (esf == NULL) { return; } (void)memset(&arch_blk, 0, sizeof(arch_blk)); arch_blk.version = XTENSA_BLOCK_HDR_VER; #if CONFIG_SOC_XTENSA_SAMPLE_CONTROLLER arch_blk.soc = XTENSA_SOC_SAMPLE_CONTROLLER; #elif CONFIG_SOC_FAMILY_INTEL_ADSP arch_blk.soc = XTENSA_SOC_INTEL_ADSP; #elif CONFIG_SOC_SERIES_ESP32 arch_blk.soc = XTENSA_SOC_ESP32; #elif CONFIG_SOC_SERIES_ESP32S2 arch_blk.soc = XTENSA_SOC_ESP32S2; #elif CONFIG_SOC_SERIES_ESP32S3 arch_blk.soc = XTENSA_SOC_ESP32S3; #elif CONFIG_SOC_XTENSA_DC233C arch_blk.soc = XTENSA_SOC_DC233C; #else arch_blk.soc = XTENSA_SOC_UNKNOWN; #endif /* Set in top-level CMakeLists.txt for use with Xtensa coredump */ arch_blk.toolchain = XTENSA_TOOLCHAIN_VARIANT; __asm__ volatile("rsr.exccause %0" : "=r"(arch_blk.r.exccause)); _xtensa_irq_stack_frame_raw_t *frame = (void *)esf; _xtensa_irq_bsa_t *bsa = frame->ptr_to_bsa; uintptr_t num_high_regs; int regs_blk_remaining; /* Calculate number of high registers. */ num_high_regs = (uint8_t *)bsa - (uint8_t *)frame + sizeof(void *); num_high_regs /= sizeof(uintptr_t); /* And high registers are always comes in 4 in a block. */ regs_blk_remaining = (int)num_high_regs / 4; arch_blk.r.pc = bsa->pc; __asm__ volatile("rsr.excvaddr %0" : "=r"(arch_blk.r.excvaddr)); arch_blk.r.ps = bsa->ps; #if XCHAL_HAVE_S32C1I arch_blk.r.scompare1 = bsa->scompare1; #endif arch_blk.r.sar = bsa->sar; arch_blk.r.a0 = bsa->a0; arch_blk.r.a1 = (uint32_t)((char *)bsa) + sizeof(*bsa); arch_blk.r.a2 = bsa->a2; arch_blk.r.a3 = bsa->a3; if (regs_blk_remaining > 0) { regs_blk_remaining--; arch_blk.r.a4 = frame->blks[regs_blk_remaining].r0; arch_blk.r.a5 = frame->blks[regs_blk_remaining].r1; arch_blk.r.a6 = frame->blks[regs_blk_remaining].r2; arch_blk.r.a7 = frame->blks[regs_blk_remaining].r3; } if (regs_blk_remaining > 0) { regs_blk_remaining--; arch_blk.r.a8 = frame->blks[regs_blk_remaining].r0; arch_blk.r.a9 = frame->blks[regs_blk_remaining].r1; arch_blk.r.a10 = frame->blks[regs_blk_remaining].r2; arch_blk.r.a11 = frame->blks[regs_blk_remaining].r3; } if (regs_blk_remaining > 0) { arch_blk.r.a12 = frame->blks[regs_blk_remaining].r0; arch_blk.r.a13 = frame->blks[regs_blk_remaining].r1; arch_blk.r.a14 = frame->blks[regs_blk_remaining].r2; arch_blk.r.a15 = frame->blks[regs_blk_remaining].r3; } #if XCHAL_HAVE_LOOPS arch_blk.r.lbeg = bsa->lbeg; arch_blk.r.lend = bsa->lend; arch_blk.r.lcount = bsa->lcount; #endif /* Send for output */ coredump_buffer_output((uint8_t *)&hdr, sizeof(hdr)); coredump_buffer_output((uint8_t *)&arch_blk, sizeof(arch_blk)); } uint16_t arch_coredump_tgt_code_get(void) { return COREDUMP_TGT_XTENSA; } ```
/content/code_sandbox/arch/xtensa/core/coredump.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,641
```unknown /* */ #include <xtensa/coreasm.h> #include <zephyr/zsr.h> /* WINDOW OVERFLOW AND UNDERFLOW EXCEPTION VECTORS AND ALLOCA EXCEPTION * HANDLER * * Here is the code for each window overflow/underflow exception vector and * (interspersed) efficient code for handling the alloca exception cause. * Window exceptions are handled entirely in the vector area and are very tight * for performance. The alloca exception is also handled entirely in the window * vector area so comes at essentially no cost in code size. Users should never * need to modify them and Cadence Design Systems recommends they do not. * * Window handlers go at predetermined vector locations according to the Xtensa * hardware configuration, which is ensured by their placement in a special * section known to the Xtensa linker support package (LSP). Since their * offsets in that section are always the same, the LSPs do not define a * section per vector. * * These things are coded for XEA2 only (XEA1 is not supported). * * Note on Underflow Handlers: * * The underflow handler for returning from call[i+1] to call[i] must preserve * all the registers from call[i+1]'s window. In particular, a0 and a1 must be * preserved because the RETW instruction will be reexecuted (and may even * underflow if an intervening exception has flushed call[i]'s registers). * Registers a2 and up may contain return values. */ #if XCHAL_HAVE_WINDOWED .section .WindowVectors.text, "ax" /* Window Overflow Exception for Call4. * * Invoked if a call[i] referenced a register (a4-a15) * that contains data from ancestor call[j]; * call[j] had done a call4 to call[j+1]. * On entry here: * window rotated to call[j] start point; * a0-a3 are registers to be saved; * a4-a15 must be preserved; * a5 is call[j+1]'s stack pointer. */ .org 0x0 .global _WindowOverflow4 _WindowOverflow4: s32e a0, a5, -16 /* save a0 to call[j+1]'s stack frame */ s32e a1, a5, -12 /* save a1 to call[j+1]'s stack frame */ s32e a2, a5, -8 /* save a2 to call[j+1]'s stack frame */ s32e a3, a5, -4 /* save a3 to call[j+1]'s stack frame */ rfwo /* rotates back to call[i] position */ /* Window Underflow Exception for Call4 * * Invoked by RETW returning from call[i+1] to call[i] * where call[i]'s registers must be reloaded (not live in ARs); * where call[i] had done a call4 to call[i+1]. * On entry here: * window rotated to call[i] start point; * a0-a3 are undefined, must be reloaded with call[i].reg[0..3]; * a4-a15 must be preserved (they are call[i+1].reg[0..11]); * a5 is call[i+1]'s stack pointer. */ .org 0x40 .global _WindowUnderflow4 _WindowUnderflow4: l32e a0, a5, -16 /* restore a0 from call[i+1]'s stack frame */ l32e a1, a5, -12 /* restore a1 from call[i+1]'s stack frame */ l32e a2, a5, -8 /* restore a2 from call[i+1]'s stack frame */ l32e a3, a5, -4 /* restore a3 from call[i+1]'s stack frame */ rfwu /* Handle alloca exception generated by interruptee executing 'movsp'. * This uses space between the window vectors, so is essentially * "free". All interruptee's regs are intact except a0 which is saved * in $ZSR_A0SAVE (assigned at build time, see gen_zsr.py for * details), and PS.EXCM has been set by the exception hardware (can't * be interrupted). The fact the alloca exception was taken means the * registers associated with the base-save area have been spilled and * will be restored by the underflow handler, so those 4 registers are * available for scratch. The code is optimized to avoid unaligned * branches and minimize cache misses. */ .align 4 .global _xt_alloca_exc _xt_alloca_exc: rsr a0, WINDOWBASE /* grab WINDOWBASE before rotw changes it */ rotw -1 /* WINDOWBASE goes to a4, new a0-a3 are scratch */ rsr a2, PS extui a3, a2, XCHAL_PS_OWB_SHIFT, XCHAL_PS_OWB_BITS xor a3, a3, a4 /* bits changed from old to current windowbase */ rsr a4, ZSR_A0SAVE /* restore original a0 (now in a4) */ slli a3, a3, XCHAL_PS_OWB_SHIFT xor a2, a2, a3 /* flip changed bits in old window base */ wsr a2, PS /* update PS.OWB to new window base */ rsync _bbci.l a4, 31, _WindowUnderflow4 rotw -1 /* original a0 goes to a8 */ _bbci.l a8, 30, _WindowUnderflow8 rotw -1 j _WindowUnderflow12 /* Window Overflow Exception for Call8 * * Invoked if a call[i] referenced a register (a4-a15) * that contains data from ancestor call[j]; * call[j] had done a call8 to call[j+1]. * On entry here: * window rotated to call[j] start point; * a0-a7 are registers to be saved; * a8-a15 must be preserved; * a9 is call[j+1]'s stack pointer. */ .org 0x80 .global _WindowOverflow8 _WindowOverflow8: s32e a0, a9, -16 /* save a0 to call[j+1]'s stack frame */ l32e a0, a1, -12 /* a0 <- call[j-1]'s sp (used to find end of call[j]'s frame) */ s32e a1, a9, -12 /* save a1 to call[j+1]'s stack frame */ s32e a2, a9, -8 /* save a2 to call[j+1]'s stack frame */ s32e a3, a9, -4 /* save a3 to call[j+1]'s stack frame */ s32e a4, a0, -32 /* save a4 to call[j]'s stack frame */ s32e a5, a0, -28 /* save a5 to call[j]'s stack frame */ s32e a6, a0, -24 /* save a6 to call[j]'s stack frame */ s32e a7, a0, -20 /* save a7 to call[j]'s stack frame */ rfwo /* rotates back to call[i] position */ /* * Window Underflow Exception for Call8 * * Invoked by RETW returning from call[i+1] to call[i] * where call[i]'s registers must be reloaded (not live in ARs); * where call[i] had done a call8 to call[i+1]. * On entry here: * window rotated to call[i] start point; * a0-a7 are undefined, must be reloaded with call[i].reg[0..7]; * a8-a15 must be preserved (they are call[i+1].reg[0..7]); * a9 is call[i+1]'s stack pointer. */ .org 0xC0 .global _WindowUnderflow8 _WindowUnderflow8: l32e a0, a9, -16 /* restore a0 from call[i+1]'s stack frame */ l32e a1, a9, -12 /* restore a1 from call[i+1]'s stack frame */ l32e a2, a9, -8 /* restore a2 from call[i+1]'s stack frame */ l32e a7, a1, -12 /* a7 <- call[i-1]'s sp (used to find end of call[i]'s frame) */ l32e a3, a9, -4 /* restore a3 from call[i+1]'s stack frame */ l32e a4, a7, -32 /* restore a4 from call[i]'s stack frame */ l32e a5, a7, -28 /* restore a5 from call[i]'s stack frame */ l32e a6, a7, -24 /* restore a6 from call[i]'s stack frame */ l32e a7, a7, -20 /* restore a7 from call[i]'s stack frame */ rfwu /* * Window Overflow Exception for Call12 * * Invoked if a call[i] referenced a register (a4-a15) * that contains data from ancestor call[j]; * call[j] had done a call12 to call[j+1]. * On entry here: * window rotated to call[j] start point; * a0-a11 are registers to be saved; * a12-a15 must be preserved; * a13 is call[j+1]'s stack pointer. */ .org 0x100 .global _WindowOverflow12 _WindowOverflow12: s32e a0, a13, -16 /* save a0 to call[j+1]'s stack frame */ l32e a0, a1, -12 /* a0 <- call[j-1]'s sp (used to find end of call[j]'s frame) */ s32e a1, a13, -12 /* save a1 to call[j+1]'s stack frame */ s32e a2, a13, -8 /* save a2 to call[j+1]'s stack frame */ s32e a3, a13, -4 /* save a3 to call[j+1]'s stack frame */ s32e a4, a0, -48 /* save a4 to end of call[j]'s stack frame */ s32e a5, a0, -44 /* save a5 to end of call[j]'s stack frame */ s32e a6, a0, -40 /* save a6 to end of call[j]'s stack frame */ s32e a7, a0, -36 /* save a7 to end of call[j]'s stack frame */ s32e a8, a0, -32 /* save a8 to end of call[j]'s stack frame */ s32e a9, a0, -28 /* save a9 to end of call[j]'s stack frame */ s32e a10, a0, -24 /* save a10 to end of call[j]'s stack frame */ s32e a11, a0, -20 /* save a11 to end of call[j]'s stack frame */ rfwo /* rotates back to call[i] position */ /* * Window Underflow Exception for Call12 * * Invoked by RETW returning from call[i+1] to call[i] * where call[i]'s registers must be reloaded (not live in ARs); * where call[i] had done a call12 to call[i+1]. * On entry here: * window rotated to call[i] start point; * a0-a11 are undefined, must be reloaded with call[i].reg[0..11]; * a12-a15 must be preserved (they are call[i+1].reg[0..3]); * a13 is call[i+1]'s stack pointer. */ .org 0x140 .global _WindowUnderflow12 _WindowUnderflow12: l32e a0, a13, -16 /* restore a0 from call[i+1]'s stack frame */ l32e a1, a13, -12 /* restore a1 from call[i+1]'s stack frame */ l32e a2, a13, -8 /* restore a2 from call[i+1]'s stack frame */ l32e a11, a1, -12 /* a11 <- call[i-1]'s sp * (used to find end of call[i]'s frame) */ l32e a3, a13, -4 /* restore a3 from call[i+1]'s stack frame */ l32e a4, a11, -48 /* restore a4 from end of call[i]'s stack frame */ l32e a5, a11, -44 /* restore a5 from end of call[i]'s stack frame */ l32e a6, a11, -40 /* restore a6 from end of call[i]'s stack frame */ l32e a7, a11, -36 /* restore a7 from end of call[i]'s stack frame */ l32e a8, a11, -32 /* restore a8 from end of call[i]'s stack frame */ l32e a9, a11, -28 /* restore a9 from end of call[i]'s stack frame */ l32e a10, a11, -24 /* restore a10 from end of call[i]'s stack * frame */ l32e a11, a11, -20 /* restore a11 from end of call[i]'s stack * frame */ rfwu #endif /* XCHAL_HAVE_WINDOWED */ ```
/content/code_sandbox/arch/xtensa/core/window_vectors.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,215
```c /* * */ #include <stdint.h> #include <stdlib.h> #include <string.h> #include <zephyr/kernel.h> #include <zephyr/spinlock.h> #include <zephyr/toolchain.h> #include <zephyr/arch/xtensa/arch_inlines.h> #include <zephyr/arch/xtensa/mpu.h> #include <zephyr/linker/linker-defs.h> #include <zephyr/sys/__assert.h> #include <zephyr/sys/util_macro.h> #include <xtensa/corebits.h> #include <xtensa/config/core-matmap.h> #include <xtensa/config/core-isa.h> #include <xtensa_mpu_priv.h> #ifdef CONFIG_USERSPACE BUILD_ASSERT((CONFIG_PRIVILEGED_STACK_SIZE > 0) && (CONFIG_PRIVILEGED_STACK_SIZE % XCHAL_MPU_ALIGN) == 0); #endif extern char _heap_end[]; extern char _heap_start[]; /** MPU foreground map for kernel mode. */ static struct xtensa_mpu_map xtensa_mpu_map_fg_kernel; /* * Additional information about the MPU maps: foreground and background * maps. * * * Some things to keep in mind: * - Each MPU region is described by TWO entries: * [entry_a_address, entry_b_address). For contiguous memory regions, * this should not much of an issue. However, disjoint memory regions * "waste" another entry to describe the end of those regions. * We might run out of available entries in the MPU map because of * this. * - The last entry is a special case as there is no more "next" * entry in the map. In this case, the end of memory is * the implicit boundary. In another word, the last entry * describes the region between the start address of this entry * and the end of memory. * - Current implementation has following limitations: * - All enabled entries are grouped towards the end of the map. * - Except the last entry which can be disabled. This is * the end of the last foreground region. With a disabled * entry, memory after this will use the background map * for access control. * - No disabled MPU entries allowed in between. * * * For foreground map to be valid, its entries must follow these rules: * - The start addresses must always be in non-descending order. * - The access rights and memory type fields must contain valid values. * - The segment field needs to be correct for each entry. * - MBZ fields must contain only zeroes. * - Although the start address occupies 27 bits of the register, * it does not mean all 27 bits are usable. The macro * XCHAL_MPU_ALIGN_BITS provided by the toolchain indicates * that only bits of and left of this value are valid. This * corresponds to the minimum segment size (MINSEGMENTSIZE) * definied in the processor configuration. */ #ifndef CONFIG_XTENSA_MPU_ONLY_SOC_RANGES /** * Static definition of all code and data memory regions of the * current Zephyr image. This information must be available and * need to be processed upon MPU initialization. */ static const struct xtensa_mpu_range mpu_zephyr_ranges[] = { /* Region for vector handlers. */ { .start = (uintptr_t)XCHAL_VECBASE_RESET_VADDR, /* * There is nothing from the Xtensa overlay about how big * the vector handler region is. So we make an assumption * that vecbase and .text are contiguous. * * SoC can override as needed if this is not the case, * especially if the SoC reset/startup code relocates * vecbase. */ .end = (uintptr_t)__text_region_start, .access_rights = XTENSA_MPU_ACCESS_P_RX_U_RX, .memory_type = CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE, }, /* * Mark the zephyr execution regions (data, bss, noinit, etc.) * cacheable, read / write and non-executable */ { /* This includes .data, .bss and various kobject sections. */ .start = (uintptr_t)_image_ram_start, .end = (uintptr_t)_image_ram_end, .access_rights = XTENSA_MPU_ACCESS_P_RW_U_NA, .memory_type = CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE, }, #if K_HEAP_MEM_POOL_SIZE > 0 /* System heap memory */ { .start = (uintptr_t)_heap_start, .end = (uintptr_t)_heap_end, .access_rights = XTENSA_MPU_ACCESS_P_RW_U_NA, .memory_type = CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE, }, #endif /* Mark text segment cacheable, read only and executable */ { .start = (uintptr_t)__text_region_start, .end = (uintptr_t)__text_region_end, .access_rights = XTENSA_MPU_ACCESS_P_RX_U_RX, .memory_type = CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE, }, /* Mark rodata segment cacheable, read only and non-executable */ { .start = (uintptr_t)__rodata_region_start, .end = (uintptr_t)__rodata_region_end, .access_rights = XTENSA_MPU_ACCESS_P_RO_U_RO, .memory_type = CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE, }, }; #endif /* !CONFIG_XTENSA_MPU_ONLY_SOC_RANGES */ /** * Return the pointer to the entry encompassing @a addr out of an array of MPU entries. * * Returning the entry where @a addr is greater or equal to the entry's start address, * and where @a addr is less than the starting address of the next entry. * * @param[in] entries Array of MPU entries. * @param[in] addr Address to be matched to one background entry. * @param[in] first_enabled_idx The index of the first enabled entry. * Use 0 if not sure. * @param[out] exact Set to true if address matches exactly. * NULL if do not care. * @param[out] entry_idx Set to the index of the entry array if entry is found. * NULL if do not care. * * @return Pointer to the map entry encompassing @a addr, or NULL if no such entry found. */ static const struct xtensa_mpu_entry *check_addr_in_mpu_entries(const struct xtensa_mpu_entry *entries, uintptr_t addr, uint8_t first_enabled_idx, bool *exact, uint8_t *entry_idx) { const struct xtensa_mpu_entry *ret = NULL; uintptr_t s_addr, e_addr; uint8_t idx; if (first_enabled_idx >= XTENSA_MPU_NUM_ENTRIES) { goto out_null; } if (addr < xtensa_mpu_entry_start_address_get(&entries[first_enabled_idx])) { /* Before the start address of very first entry. So no match. */ goto out_null; } /* Loop through the map except the last entry (which is a special case). */ for (idx = first_enabled_idx; idx < (XTENSA_MPU_NUM_ENTRIES - 1); idx++) { s_addr = xtensa_mpu_entry_start_address_get(&entries[idx]); e_addr = xtensa_mpu_entry_start_address_get(&entries[idx + 1]); if ((addr >= s_addr) && (addr < e_addr)) { ret = &entries[idx]; goto out; } } idx = XTENSA_MPU_NUM_ENTRIES - 1; s_addr = xtensa_mpu_entry_start_address_get(&entries[idx]); if (addr >= s_addr) { /* Last entry encompasses the start address to end of memory. */ ret = &entries[idx]; } out: if (ret != NULL) { if (exact != NULL) { if (addr == s_addr) { *exact = true; } else { *exact = false; } } if (entry_idx != NULL) { *entry_idx = idx; } } out_null: return ret; } /** * Find the first enabled MPU entry. * * @param entries Array of MPU entries with XTENSA_MPU_NUM_ENTRIES elements. * * @return Index of the first enabled entry. * @retval XTENSA_MPU_NUM_ENTRIES if no entry is enabled. */ static inline uint8_t find_first_enabled_entry(const struct xtensa_mpu_entry *entries) { int first_enabled_idx; for (first_enabled_idx = 0; first_enabled_idx < XTENSA_MPU_NUM_ENTRIES; first_enabled_idx++) { if (entries[first_enabled_idx].as.p.enable) { break; } } return first_enabled_idx; } /** * Compare two MPU entries. * * This is used by qsort to compare two MPU entries on their ordering * based on starting address. * * @param a First MPU entry. * @param b Second MPU entry. * * @retval -1 First address is less than second address. * @retval 0 First address is equal to second address. * @retval 1 First address is great than second address. */ static int compare_entries(const void *a, const void *b) { struct xtensa_mpu_entry *e_a = (struct xtensa_mpu_entry *)a; struct xtensa_mpu_entry *e_b = (struct xtensa_mpu_entry *)b; uintptr_t addr_a = xtensa_mpu_entry_start_address_get(e_a); uintptr_t addr_b = xtensa_mpu_entry_start_address_get(e_b); if (addr_a < addr_b) { return -1; } else if (addr_a == addr_b) { return 0; } else { return 1; } } /** * Sort the MPU entries base on starting address. * * This sorts the MPU entries in ascending order of starting address. * After sorting, it rewrites the segment numbers of all entries. */ static void sort_entries(struct xtensa_mpu_entry *entries) { qsort(entries, XTENSA_MPU_NUM_ENTRIES, sizeof(entries[0]), compare_entries); for (uint32_t idx = 0; idx < XTENSA_MPU_NUM_ENTRIES; idx++) { /* Segment value must correspond to the index. */ entries[idx].at.p.segment = idx; } } /** * Consolidate the MPU entries. * * This removes consecutive entries where the attributes are the same. * * @param entries Array of MPU entries with XTENSA_MPU_NUM_ENTRIES elements. * @param first_enabled_idx Index of first enabled entry. * * @return Index of the first enabled entry after consolidation. */ static uint8_t consolidate_entries(struct xtensa_mpu_entry *entries, uint8_t first_enabled_idx) { uint8_t new_first; uint8_t idx_0 = first_enabled_idx; uint8_t idx_1 = first_enabled_idx + 1; bool to_consolidate = false; /* For each a pair of entries... */ while (idx_1 < XTENSA_MPU_NUM_ENTRIES) { struct xtensa_mpu_entry *entry_0 = &entries[idx_0]; struct xtensa_mpu_entry *entry_1 = &entries[idx_1]; bool mark_disable_0 = false; bool mark_disable_1 = false; if (xtensa_mpu_entries_has_same_attributes(entry_0, entry_1)) { /* * If both entry has same attributes (access_rights and memory type), * they can be consolidated into one by removing the higher indexed * one. */ mark_disable_1 = true; } else if (xtensa_mpu_entries_has_same_address(entry_0, entry_1)) { /* * If both entries have the same address, the higher index * one always override the lower one. So remove the lower indexed * one. */ mark_disable_0 = true; } /* * Marking an entry as disabled here so it can be removed later. * * The MBZ field of the AS register is re-purposed to indicate that * this is an entry to be removed. */ if (mark_disable_1) { /* Remove the higher indexed entry. */ to_consolidate = true; entry_1->as.p.mbz = 1U; /* Skip ahead for next comparison. */ idx_1++; continue; } else if (mark_disable_0) { /* Remove the lower indexed entry. */ to_consolidate = true; entry_0->as.p.mbz = 1U; } idx_0 = idx_1; idx_1++; } if (to_consolidate) { uint8_t read_idx = XTENSA_MPU_NUM_ENTRIES - 1; uint8_t write_idx = XTENSA_MPU_NUM_ENTRIES; /* Go through the map from the end and copy enabled entries in place. */ while (read_idx >= first_enabled_idx) { struct xtensa_mpu_entry *entry_rd = &entries[read_idx]; if (entry_rd->as.p.mbz != 1U) { struct xtensa_mpu_entry *entry_wr; write_idx--; entry_wr = &entries[write_idx]; *entry_wr = *entry_rd; entry_wr->at.p.segment = write_idx; } read_idx--; } /* New first enabled entry is where the last written entry is. */ new_first = write_idx; for (idx_0 = 0; idx_0 < new_first; idx_0++) { struct xtensa_mpu_entry *e = &entries[idx_0]; /* Shortcut to zero out address and enabled bit. */ e->as.raw = 0U; /* Segment value must correspond to the index. */ e->at.p.segment = idx_0; /* No access at all for both kernel and user modes. */ e->at.p.access_rights = XTENSA_MPU_ACCESS_P_NA_U_NA; /* Use default memory type for disabled entries. */ e->at.p.memory_type = CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE; } } else { /* No need to conlidate entries. Map is same as before. */ new_first = first_enabled_idx; } return new_first; } /** * Add a memory region to the MPU map. * * This adds a memory region to the MPU map, by setting the appropriate * start and end entries. This may re-use existing entries or add new * entries to the map. * * @param[in,out] map Pointer to MPU map. * @param[in] start_addr Start address of the region. * @param[in] end_addr End address of the region. * @param[in] access_rights Access rights of this region. * @param[in] memory_type Memory type of this region. * @param[out] first_idx Return index of first enabled entry if not NULL. * * @retval 0 Successful in adding the region. * @retval -EINVAL Invalid values in function arguments. */ static int mpu_map_region_add(struct xtensa_mpu_map *map, uintptr_t start_addr, uintptr_t end_addr, uint32_t access_rights, uint32_t memory_type, uint8_t *first_idx) { int ret; bool exact_s, exact_e; uint8_t idx_s, idx_e, first_enabled_idx; struct xtensa_mpu_entry *entry_slot_s, *entry_slot_e, prev_entry; struct xtensa_mpu_entry *entries = map->entries; if (start_addr >= end_addr) { ret = -EINVAL; goto out; } first_enabled_idx = find_first_enabled_entry(entries); if (first_enabled_idx >= XTENSA_MPU_NUM_ENTRIES) { /* * If the last entry in the map is not enabled and the start * address is NULL, we can assume the map has not been populated * at all. This is because we group all enabled entries at * the end of map. */ struct xtensa_mpu_entry *last_entry = &entries[XTENSA_MPU_NUM_ENTRIES - 1]; if (!xtensa_mpu_entry_enable_get(last_entry) && (xtensa_mpu_entry_start_address_get(last_entry) == 0U)) { /* Empty table, so populate the entries as-is. */ if (end_addr == 0xFFFFFFFFU) { /* * Region goes to end of memory, so only need to * program one entry. */ entry_slot_s = &entries[XTENSA_MPU_NUM_ENTRIES - 1]; xtensa_mpu_entry_set(entry_slot_s, start_addr, true, access_rights, memory_type); first_enabled_idx = XTENSA_MPU_NUM_ENTRIES - 1; goto end; } else { /* * Populate the last two entries to indicate * a memory region. Notice that the second entry * is not enabled as it is merely marking the end of * a region and is not the starting of another * enabled MPU region. */ entry_slot_s = &entries[XTENSA_MPU_NUM_ENTRIES - 2]; entry_slot_e = &entries[XTENSA_MPU_NUM_ENTRIES - 1]; xtensa_mpu_entry_set(entry_slot_s, start_addr, true, access_rights, memory_type); xtensa_mpu_entry_set(entry_slot_e, end_addr, false, XTENSA_MPU_ACCESS_P_NA_U_NA, CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE); first_enabled_idx = XTENSA_MPU_NUM_ENTRIES - 2; goto end; } ret = 0; goto out; } first_enabled_idx = consolidate_entries(entries, first_enabled_idx); if (first_enabled_idx >= XTENSA_MPU_NUM_ENTRIES) { ret = -EINVAL; goto out; } } entry_slot_s = (struct xtensa_mpu_entry *) check_addr_in_mpu_entries(entries, start_addr, first_enabled_idx, &exact_s, &idx_s); entry_slot_e = (struct xtensa_mpu_entry *) check_addr_in_mpu_entries(entries, end_addr, first_enabled_idx, &exact_e, &idx_e); __ASSERT_NO_MSG(entry_slot_s != NULL); __ASSERT_NO_MSG(entry_slot_e != NULL); __ASSERT_NO_MSG(start_addr < end_addr); if ((entry_slot_s == NULL) || (entry_slot_e == NULL)) { ret = -EINVAL; goto out; } /* * Figure out if we need to add new slots for either addresses. * If the addresses match exactly the addresses current in map, * we can reuse those entries without adding new one. */ if (!exact_s || !exact_e) { uint8_t needed = (exact_s ? 0 : 1) + (exact_e ? 0 : 1); /* Check if there are enough empty slots. */ if (first_enabled_idx < needed) { ret = -ENOMEM; goto out; } } /* * Need to keep track of the attributes of the memory region before * we start adding entries, as we will need to apply the same * attributes to the "ending address" entry to preseve the attributes * of existing map. */ prev_entry = *entry_slot_e; /* * Entry for beginning of new region. * * - Use existing entry if start addresses are the same for existing * and incoming region. We can simply reuse the entry. * - Add an entry if incoming region is within existing region. */ if (!exact_s) { /* * Put a new entry before the first enabled entry. * We will sort the entries later. */ first_enabled_idx--; entry_slot_s = &entries[first_enabled_idx]; } xtensa_mpu_entry_set(entry_slot_s, start_addr, true, access_rights, memory_type); /* * Entry for ending of region. * * - Add an entry if incoming region is within existing region. * - If the end address matches exactly to existing entry, there is * no need to do anything. */ if (!exact_e) { /* * Put a new entry before the first enabled entry. * We will sort the entries later. */ first_enabled_idx--; entry_slot_e = &entries[first_enabled_idx]; /* * Since we are going to punch a hole in the map, * we need to preserve the attribute of existing region * between the end address and next entry. */ *entry_slot_e = prev_entry; xtensa_mpu_entry_start_address_set(entry_slot_e, end_addr); } /* Sort the entries in ascending order of starting address */ sort_entries(entries); /* * Need to figure out where the start and end entries are as sorting * may change their positions. */ entry_slot_s = (struct xtensa_mpu_entry *) check_addr_in_mpu_entries(entries, start_addr, first_enabled_idx, &exact_s, &idx_s); entry_slot_e = (struct xtensa_mpu_entry *) check_addr_in_mpu_entries(entries, end_addr, first_enabled_idx, &exact_e, &idx_e); /* Both must be exact match. */ __ASSERT_NO_MSG(exact_s); __ASSERT_NO_MSG(exact_e); if (end_addr == 0xFFFFFFFFU) { /* * If end_addr = 0xFFFFFFFFU, entry_slot_e and idx_e both * point to the last slot. Because the incoming region goes * to the end of memory, we simply cheat by including * the last entry by incrementing idx_e so the loop to * update entries will change the attribute of last entry * in map. */ idx_e++; } /* * Any existing entries between the "newly" popluated start and * end entries must bear the same attributes. So modify them * here. */ for (int idx = idx_s + 1; idx < idx_e; idx++) { xtensa_mpu_entry_attributes_set(&entries[idx], access_rights, memory_type); } end: if (first_idx != NULL) { *first_idx = first_enabled_idx; } ret = 0; out: return ret; } /** * Write the MPU map to hardware. * * @param map Pointer to foreground MPU map. */ #ifdef CONFIG_USERSPACE /* With userspace enabled, the pointer to per memory domain MPU map is stashed * inside the thread struct. If we still only take struct xtensa_mpu_map as * argument, a wrapper function is needed. To avoid the cost associated with * calling that wrapper function, takes thread pointer directly as argument * when userspace is enabled. Not to mention that writing the map to hardware * is already a costly operation per context switch. So every little bit helps. */ void xtensa_mpu_map_write(struct k_thread *thread) #else void xtensa_mpu_map_write(struct xtensa_mpu_map *map) #endif { int entry; #ifdef CONFIG_USERSPACE struct xtensa_mpu_map *map = thread->arch.mpu_map; #endif /* * Clear MPU entries first, then write MPU entries in reverse order. * * Remember that the boundary of each memory region is marked by * two consecutive entries, and that the addresses of all entries * must not be in descending order (i.e. equal or increasing). * To ensure this, we clear out the entries first then write them * in reverse order. This avoids any intermediate invalid * configuration with regard to ordering. */ for (entry = 0; entry < XTENSA_MPU_NUM_ENTRIES; entry++) { __asm__ volatile("wptlb %0, %1\n\t" : : "a"(entry), "a"(0)); } for (entry = XTENSA_MPU_NUM_ENTRIES - 1; entry >= 0; entry--) { __asm__ volatile("wptlb %0, %1\n\t" : : "a"(map->entries[entry].at), "a"(map->entries[entry].as)); } } /** * Perform necessary steps to enable MPU. */ void xtensa_mpu_init(void) { unsigned int entry; uint8_t first_enabled_idx; /* Disable all foreground segments before we start configuration. */ xtensa_mpu_mpuenb_write(0); /* * Clear the foreground MPU map so we can populate it later with valid entries. * Note that we still need to make sure the map is valid, and cannot be totally * zeroed. */ for (entry = 0; entry < XTENSA_MPU_NUM_ENTRIES; entry++) { /* Make sure to zero out everything as a start, especially the MBZ fields. */ struct xtensa_mpu_entry ent = {0}; /* Segment value must correspond to the index. */ ent.at.p.segment = entry; /* No access at all for both kernel and user modes. */ ent.at.p.access_rights = XTENSA_MPU_ACCESS_P_NA_U_NA; /* Use default memory type for disabled entries. */ ent.at.p.memory_type = CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE; xtensa_mpu_map_fg_kernel.entries[entry] = ent; } #ifndef CONFIG_XTENSA_MPU_ONLY_SOC_RANGES /* * Add necessary MPU entries for the memory regions of base Zephyr image. */ for (entry = 0; entry < ARRAY_SIZE(mpu_zephyr_ranges); entry++) { const struct xtensa_mpu_range *range = &mpu_zephyr_ranges[entry]; int ret = mpu_map_region_add(&xtensa_mpu_map_fg_kernel, range->start, range->end, range->access_rights, range->memory_type, &first_enabled_idx); ARG_UNUSED(ret); __ASSERT(ret == 0, "Unable to add region [0x%08x, 0x%08x): %d", (unsigned int)range->start, (unsigned int)range->end, ret); } #endif /* !CONFIG_XTENSA_MPU_ONLY_SOC_RANGES */ /* * Now for the entries for memory regions needed by SoC. */ for (entry = 0; entry < xtensa_soc_mpu_ranges_num; entry++) { const struct xtensa_mpu_range *range = &xtensa_soc_mpu_ranges[entry]; int ret = mpu_map_region_add(&xtensa_mpu_map_fg_kernel, range->start, range->end, range->access_rights, range->memory_type, &first_enabled_idx); ARG_UNUSED(ret); __ASSERT(ret == 0, "Unable to add region [0x%08x, 0x%08x): %d", (unsigned int)range->start, (unsigned int)range->end, ret); } /* Consolidate entries so we have a compact map at boot. */ consolidate_entries(xtensa_mpu_map_fg_kernel.entries, first_enabled_idx); /* Write the map into hardware. There is no turning back now. */ #ifdef CONFIG_USERSPACE struct k_thread dummy_map_thread; dummy_map_thread.arch.mpu_map = &xtensa_mpu_map_fg_kernel; xtensa_mpu_map_write(&dummy_map_thread); #else xtensa_mpu_map_write(&xtensa_mpu_map_fg_kernel); #endif } #ifdef CONFIG_USERSPACE int arch_mem_domain_init(struct k_mem_domain *domain) { domain->arch.mpu_map = xtensa_mpu_map_fg_kernel; return 0; } int arch_mem_domain_max_partitions_get(void) { /* * Due to each memory region requiring 2 MPU entries to describe, * it is hard to figure out how many partitions are available. * For example, if all those partitions are contiguous, it only * needs 2 entries (1 if the end of region already has an entry). * If they are all disjoint, it will need (2 * n) entries to * describe all of them. So just use CONFIG_MAX_DOMAIN_PARTITIONS * here and let the application set this instead. */ return CONFIG_MAX_DOMAIN_PARTITIONS; } int arch_mem_domain_partition_remove(struct k_mem_domain *domain, uint32_t partition_id) { int ret; uint32_t perm; struct xtensa_mpu_map *map = &domain->arch.mpu_map; struct k_mem_partition *partition = &domain->partitions[partition_id]; uintptr_t end_addr = partition->start + partition->size; if (end_addr <= partition->start) { ret = -EINVAL; goto out; } /* * This is simply to get rid of the user permissions and retain * whatever the kernel permissions are. So that we won't be * setting the memory region permission incorrectly, for example, * marking read only region writable. * * Note that Zephyr does not do RWX partitions so we can treat it * as invalid. */ switch (partition->attr) { case XTENSA_MPU_ACCESS_P_RO_U_NA: __fallthrough; case XTENSA_MPU_ACCESS_P_RX_U_NA: __fallthrough; case XTENSA_MPU_ACCESS_P_RO_U_RO: __fallthrough; case XTENSA_MPU_ACCESS_P_RX_U_RX: perm = XTENSA_MPU_ACCESS_P_RO_U_NA; break; case XTENSA_MPU_ACCESS_P_RW_U_NA: __fallthrough; case XTENSA_MPU_ACCESS_P_RWX_U_NA: __fallthrough; case XTENSA_MPU_ACCESS_P_RW_U_RWX: __fallthrough; case XTENSA_MPU_ACCESS_P_RW_U_RO: __fallthrough; case XTENSA_MPU_ACCESS_P_RWX_U_RX: __fallthrough; case XTENSA_MPU_ACCESS_P_RW_U_RW: __fallthrough; case XTENSA_MPU_ACCESS_P_RWX_U_RWX: perm = XTENSA_MPU_ACCESS_P_RW_U_NA; break; default: /* _P_X_U_NA is not a valid permission for userspace, so ignore. * _P_NA_U_X becomes _P_NA_U_NA when removing user permissions. * _P_WO_U_WO has not kernel only counterpart so just force no access. * If we get here with _P_NA_P_NA, there is something seriously * wrong with the userspace and/or application code. */ perm = XTENSA_MPU_ACCESS_P_NA_U_NA; break; } /* * Reset the memory region attributes by simply "adding" * a region with default attributes. If entries already * exist for the region, the corresponding entries will * be updated with the default attributes. Or new entries * will be added to carve a hole in existing regions. */ ret = mpu_map_region_add(map, partition->start, end_addr, perm, CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE, NULL); out: return ret; } int arch_mem_domain_partition_add(struct k_mem_domain *domain, uint32_t partition_id) { int ret; struct xtensa_mpu_map *map = &domain->arch.mpu_map; struct k_mem_partition *partition = &domain->partitions[partition_id]; uintptr_t end_addr = partition->start + partition->size; if (end_addr <= partition->start) { ret = -EINVAL; goto out; } ret = mpu_map_region_add(map, partition->start, end_addr, (uint8_t)partition->attr, CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE, NULL); out: return ret; } int arch_mem_domain_thread_add(struct k_thread *thread) { int ret = 0; /* New memory domain we are being added to */ struct k_mem_domain *domain = thread->mem_domain_info.mem_domain; /* * this is only set for threads that were migrating from some other * memory domain; new threads this is NULL. */ struct xtensa_mpu_map *old_map = thread->arch.mpu_map; bool is_user = (thread->base.user_options & K_USER) != 0; bool is_migration = (old_map != NULL) && is_user; uintptr_t stack_end_addr = thread->stack_info.start + thread->stack_info.size; if (stack_end_addr < thread->stack_info.start) { /* Account for wrapping around back to 0. */ stack_end_addr = 0xFFFFFFFFU; } /* * Allow USER access to the thread's stack in its new domain if * we are migrating. If we are not migrating this is done in * xtensa_user_stack_perms(). */ if (is_migration) { /* Add stack to new domain's MPU map. */ ret = mpu_map_region_add(&domain->arch.mpu_map, thread->stack_info.start, stack_end_addr, XTENSA_MPU_ACCESS_P_RW_U_RW, CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE, NULL); /* Probably this fails due to no more available slots in MPU map. */ __ASSERT_NO_MSG(ret == 0); } thread->arch.mpu_map = &domain->arch.mpu_map; /* * Remove thread stack from old memory domain if we are * migrating away from old memory domain. This is done * by simply remove USER access from the region. */ if (is_migration) { /* * Remove stack from old MPU map by... * "adding" a new memory region to the map * as this carves a hole in the existing map. */ ret = mpu_map_region_add(old_map, thread->stack_info.start, stack_end_addr, XTENSA_MPU_ACCESS_P_RW_U_NA, CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE, NULL); } /* * Need to switch to new MPU map if this is the current * running thread. */ if (thread == _current_cpu->current) { xtensa_mpu_map_write(thread); } return ret; } int arch_mem_domain_thread_remove(struct k_thread *thread) { uintptr_t stack_end_addr; int ret; struct k_mem_domain *domain = thread->mem_domain_info.mem_domain; if ((thread->base.user_options & K_USER) == 0) { ret = 0; goto out; } if ((thread->base.thread_state & _THREAD_DEAD) == 0) { /* Thread is migrating to another memory domain and not * exiting for good; we weren't called from * z_thread_abort(). Resetting the stack region will * take place in the forthcoming thread_add() call. */ ret = 0; goto out; } stack_end_addr = thread->stack_info.start + thread->stack_info.size; if (stack_end_addr < thread->stack_info.start) { /* Account for wrapping around back to 0. */ stack_end_addr = 0xFFFFFFFFU; } /* * Restore permissions on the thread's stack area since it is no * longer a member of the domain. */ ret = mpu_map_region_add(&domain->arch.mpu_map, thread->stack_info.start, stack_end_addr, XTENSA_MPU_ACCESS_P_RW_U_NA, CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE, NULL); xtensa_mpu_map_write(thread); out: return ret; } int arch_buffer_validate(const void *addr, size_t size, int write) { uintptr_t aligned_addr; size_t aligned_size, addr_offset; int ret = 0; /* addr/size arbitrary, fix this up into an aligned region */ aligned_addr = ROUND_DOWN((uintptr_t)addr, XCHAL_MPU_ALIGN); addr_offset = (uintptr_t)addr - aligned_addr; aligned_size = ROUND_UP(size + addr_offset, XCHAL_MPU_ALIGN); for (size_t offset = 0; offset < aligned_size; offset += XCHAL_MPU_ALIGN) { uint32_t probed = xtensa_pptlb_probe(aligned_addr + offset); if ((probed & XTENSA_MPU_PROBE_VALID_ENTRY_MASK) == 0U) { /* There is no foreground or background entry associated * with the region. */ ret = -EPERM; goto out; } uint8_t access_rights = (probed & XTENSA_MPU_PPTLB_ACCESS_RIGHTS_MASK) >> XTENSA_MPU_PPTLB_ACCESS_RIGHTS_SHIFT; if (write) { /* Need to check write permission. */ switch (access_rights) { case XTENSA_MPU_ACCESS_P_WO_U_WO: __fallthrough; case XTENSA_MPU_ACCESS_P_RW_U_RWX: __fallthrough; case XTENSA_MPU_ACCESS_P_RW_U_RW: __fallthrough; case XTENSA_MPU_ACCESS_P_RWX_U_RWX: /* These permissions are okay. */ break; default: ret = -EPERM; goto out; } } else { /* Only check read permission. */ switch (access_rights) { case XTENSA_MPU_ACCESS_P_RW_U_RWX: __fallthrough; case XTENSA_MPU_ACCESS_P_RW_U_RO: __fallthrough; case XTENSA_MPU_ACCESS_P_RWX_U_RX: __fallthrough; case XTENSA_MPU_ACCESS_P_RO_U_RO: __fallthrough; case XTENSA_MPU_ACCESS_P_RX_U_RX: __fallthrough; case XTENSA_MPU_ACCESS_P_RW_U_RW: __fallthrough; case XTENSA_MPU_ACCESS_P_RWX_U_RWX: /* These permissions are okay. */ break; default: ret = -EPERM; goto out; } } } out: return ret; } bool xtensa_mem_kernel_has_access(void *addr, size_t size, int write) { uintptr_t aligned_addr; size_t aligned_size, addr_offset; bool ret = true; /* addr/size arbitrary, fix this up into an aligned region */ aligned_addr = ROUND_DOWN((uintptr_t)addr, XCHAL_MPU_ALIGN); addr_offset = (uintptr_t)addr - aligned_addr; aligned_size = ROUND_UP(size + addr_offset, XCHAL_MPU_ALIGN); for (size_t offset = 0; offset < aligned_size; offset += XCHAL_MPU_ALIGN) { uint32_t probed = xtensa_pptlb_probe(aligned_addr + offset); if ((probed & XTENSA_MPU_PROBE_VALID_ENTRY_MASK) == 0U) { /* There is no foreground or background entry associated * with the region. */ ret = false; goto out; } uint8_t access_rights = (probed & XTENSA_MPU_PPTLB_ACCESS_RIGHTS_MASK) >> XTENSA_MPU_PPTLB_ACCESS_RIGHTS_SHIFT; if (write != 0) { /* Need to check write permission. */ switch (access_rights) { case XTENSA_MPU_ACCESS_P_RW_U_NA: __fallthrough; case XTENSA_MPU_ACCESS_P_RWX_U_NA: __fallthrough; case XTENSA_MPU_ACCESS_P_WO_U_WO: __fallthrough; case XTENSA_MPU_ACCESS_P_RW_U_RWX: __fallthrough; case XTENSA_MPU_ACCESS_P_RW_U_RO: __fallthrough; case XTENSA_MPU_ACCESS_P_RWX_U_RX: __fallthrough; case XTENSA_MPU_ACCESS_P_RW_U_RW: __fallthrough; case XTENSA_MPU_ACCESS_P_RWX_U_RWX: /* These permissions are okay. */ break; default: ret = false; goto out; } } else { /* Only check read permission. */ switch (access_rights) { case XTENSA_MPU_ACCESS_P_RO_U_NA: __fallthrough; case XTENSA_MPU_ACCESS_P_RX_U_NA: __fallthrough; case XTENSA_MPU_ACCESS_P_RW_U_NA: __fallthrough; case XTENSA_MPU_ACCESS_P_RWX_U_NA: __fallthrough; case XTENSA_MPU_ACCESS_P_RW_U_RWX: __fallthrough; case XTENSA_MPU_ACCESS_P_RW_U_RO: __fallthrough; case XTENSA_MPU_ACCESS_P_RWX_U_RX: __fallthrough; case XTENSA_MPU_ACCESS_P_RO_U_RO: __fallthrough; case XTENSA_MPU_ACCESS_P_RX_U_RX: __fallthrough; case XTENSA_MPU_ACCESS_P_RW_U_RW: __fallthrough; case XTENSA_MPU_ACCESS_P_RWX_U_RWX: /* These permissions are okay. */ break; default: ret = false; goto out; } } } out: return ret; } void xtensa_user_stack_perms(struct k_thread *thread) { int ret; uintptr_t stack_end_addr = thread->stack_info.start + thread->stack_info.size; if (stack_end_addr < thread->stack_info.start) { /* Account for wrapping around back to 0. */ stack_end_addr = 0xFFFFFFFFU; } (void)memset((void *)thread->stack_info.start, (IS_ENABLED(CONFIG_INIT_STACKS)) ? 0xAA : 0x00, thread->stack_info.size - thread->stack_info.delta); /* Add stack to new domain's MPU map. */ ret = mpu_map_region_add(thread->arch.mpu_map, thread->stack_info.start, stack_end_addr, XTENSA_MPU_ACCESS_P_RW_U_RW, CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE, NULL); xtensa_mpu_map_write(thread); /* Probably this fails due to no more available slots in MPU map. */ ARG_UNUSED(ret); __ASSERT_NO_MSG(ret == 0); } #endif /* CONFIG_USERSPACE */ ```
/content/code_sandbox/arch/xtensa/core/mpu.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
9,368
```unknown #include <xtensa/config/core-isa.h> /* * Not a C source code file. * * Intended to be preprocessed only, to produce output for * interpretation by the xtensa-int-handlers.py script. Literally all * this does is emit records for which interrupts are at which level, * available per-hardware by an SDK-provided core-isa.h file. */ __xtensa_int_level_magic__ 0 XCHAL_INT0_LEVEL __xtensa_int_level_magic__ 1 XCHAL_INT1_LEVEL __xtensa_int_level_magic__ 2 XCHAL_INT2_LEVEL __xtensa_int_level_magic__ 3 XCHAL_INT3_LEVEL __xtensa_int_level_magic__ 4 XCHAL_INT4_LEVEL __xtensa_int_level_magic__ 5 XCHAL_INT5_LEVEL __xtensa_int_level_magic__ 6 XCHAL_INT6_LEVEL __xtensa_int_level_magic__ 7 XCHAL_INT7_LEVEL __xtensa_int_level_magic__ 8 XCHAL_INT8_LEVEL __xtensa_int_level_magic__ 9 XCHAL_INT9_LEVEL __xtensa_int_level_magic__ 10 XCHAL_INT10_LEVEL __xtensa_int_level_magic__ 11 XCHAL_INT11_LEVEL __xtensa_int_level_magic__ 12 XCHAL_INT12_LEVEL __xtensa_int_level_magic__ 13 XCHAL_INT13_LEVEL __xtensa_int_level_magic__ 14 XCHAL_INT14_LEVEL __xtensa_int_level_magic__ 15 XCHAL_INT15_LEVEL __xtensa_int_level_magic__ 16 XCHAL_INT16_LEVEL __xtensa_int_level_magic__ 17 XCHAL_INT17_LEVEL __xtensa_int_level_magic__ 18 XCHAL_INT18_LEVEL __xtensa_int_level_magic__ 19 XCHAL_INT19_LEVEL __xtensa_int_level_magic__ 20 XCHAL_INT20_LEVEL __xtensa_int_level_magic__ 21 XCHAL_INT21_LEVEL __xtensa_int_level_magic__ 22 XCHAL_INT22_LEVEL __xtensa_int_level_magic__ 23 XCHAL_INT23_LEVEL __xtensa_int_level_magic__ 24 XCHAL_INT24_LEVEL __xtensa_int_level_magic__ 25 XCHAL_INT25_LEVEL __xtensa_int_level_magic__ 26 XCHAL_INT26_LEVEL __xtensa_int_level_magic__ 27 XCHAL_INT27_LEVEL __xtensa_int_level_magic__ 28 XCHAL_INT28_LEVEL __xtensa_int_level_magic__ 29 XCHAL_INT29_LEVEL __xtensa_int_level_magic__ 30 XCHAL_INT30_LEVEL __xtensa_int_level_magic__ 31 XCHAL_INT31_LEVEL ```
/content/code_sandbox/arch/xtensa/core/xtensa_intgen.tmpl
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
595
```python #!/usr/bin/env python3 import re import fileinput # Pass an Xtensa core-isa.h file on stdin or the command line, emits a # C file on output containing optimized interrupt dispatch routines. # FIXME: looking at the assembly generated by the ESP-32 toolchain, # this isn't as optimal as I'd hoped. the individual cases are tested # using a L32R + BNONE (i.e. a full mask test) instead of a BBSI, and # the handlers are being invoked with CALL8 instead of CALL4, # inexplicably wasting four words of stack. Maybe this should be # emitting assembly instead. Wouldn't be much more complicated and # would share all the same structure. # My manual count of instructions says that a linear search becomes # faster on average when there are three or fewer bits to test. Would # be four, if the compiler would generate BBSI instructions. MAX_TESTS = 3 ints_by_lvl = {} # print() wrapper that automatically handles indentation levels cindent = 0 def cprint(s): global cindent if s.endswith(":"): print(s) return if s.find("}") >= 0: cindent -= 1 s = cindent*"\t" + s print(s) if s.find("{") >= 0: cindent += 1 def emit_int_handler(ints): if len(ints) <= MAX_TESTS: for i in ints: # FIXME: a little work could allow us to extract the # handler pointer and argument as literals, saving a few # instructions and avoiding the need to link in # _sw_isr_table entirely. cprint("if (mask & BIT(%d)) {" % i) cprint("mask = BIT(%d);" % i) cprint("irq = %d;" % i) cprint("goto handle_irq;") cprint("}") else: half = int(len(ints)/2) m = 0 for i in ints[0:half]: m |= 1 << i cprint("if (mask & " + ("0x%x" % (m)) + ") {") emit_int_handler(ints[0:half]) cprint("} else {") emit_int_handler(ints[half:]) cprint("}") ######################################################################## # Annoyingly need to join lines and remove #-marked annotations. Some # versions of the preprocessor (ahem, esp32 SDK) like to include # newlines in the output where the original expressions are expanded # from 100% single line macros. Slurp it into a single string and # parse via whitespace. blob = "" for l in fileinput.input(): l = l if l.find("#") < 0 else l[0:l.find("#")] blob += l.rstrip() + " " for match in re.finditer(r'__xtensa_int_level_magic__\s+(\d+)\s+(\d+)', blob): irq = int(match.group(1)) lvl = int(match.group(2)) if lvl not in ints_by_lvl: ints_by_lvl[lvl] = [] ints_by_lvl[lvl].append(irq) cprint("/*") cprint(" * THIS FILE WAS AUTOMATICALLY GENERATED. DO NOT EDIT.") cprint(" *") cprint(" * Functions here are designed to produce efficient code to") cprint(" * search an Xtensa bitmask of interrupts, inspecting only those bits") cprint(" * declared to be associated with a given interrupt level. Each") cprint(" * dispatcher will handle exactly one flagged interrupt, in numerical") cprint(" * order (low bits first) and will return a mask of that bit that can") cprint(" * then be cleared by the calling code. Unrecognized bits for the") cprint(" * level will invoke an error handler.") cprint(" */") cprint("") # Re-include the core-isa header and be sure our definitions match, for sanity cprint("#include <xtensa/config/core-isa.h>") cprint("#include <zephyr/sys/util.h>") cprint("#include <zephyr/sw_isr_table.h>") cprint("") for l in ints_by_lvl: for i in ints_by_lvl[l]: v = "XCHAL_INT" + str(i) + "_LEVEL" cprint("#if !defined(" + v + ") || " + str(v) + " != " + str(l)) cprint("#error core-isa.h interrupt level does not match dispatcher!") cprint("#endif") cprint("") # Populate all theoretical levels just in case. Odd cores have been # seen in the wild with "empty" interrupt levels that exist in the # hardware but without any interrupts associated with them. The # unused handlers will be ignored if uncalled. max = 15 for lvl in range(0, max+1): if not lvl in ints_by_lvl: ints_by_lvl[lvl] = [] # Emit the handlers for lvl in ints_by_lvl: cprint("static inline int _xtensa_handle_one_int" + str(lvl) + "(unsigned int mask)") cprint("{") if not ints_by_lvl[lvl]: cprint("return 0;") cprint("}") continue cprint("int irq;") print("") emit_int_handler(sorted(ints_by_lvl[lvl])) cprint("return 0;") cprint("handle_irq:") cprint("_sw_isr_table[irq].isr(_sw_isr_table[irq].arg);") cprint("return mask;") cprint("}") cprint("") ```
/content/code_sandbox/arch/xtensa/core/xtensa_intgen.py
python
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,236
```c /* */ #include <zephyr/kernel.h> #include <zephyr/cache.h> #include <zephyr/arch/xtensa/arch.h> #include <zephyr/arch/xtensa/xtensa_mmu.h> #include <zephyr/linker/linker-defs.h> #include <zephyr/logging/log.h> #include <zephyr/kernel/mm.h> #include <zephyr/toolchain.h> #include <xtensa/corebits.h> #include <xtensa_mmu_priv.h> #include <kernel_arch_func.h> #include <mmu.h> /* Skip TLB IPI when updating page tables. * This allows us to send IPI only after the last * changes of a series. */ #define OPTION_NO_TLB_IPI BIT(0) /* Level 1 contains page table entries * necessary to map the page table itself. */ #define XTENSA_L1_PAGE_TABLE_ENTRIES 1024U /* Size of level 1 page table. */ #define XTENSA_L1_PAGE_TABLE_SIZE (XTENSA_L1_PAGE_TABLE_ENTRIES * sizeof(uint32_t)) /* Level 2 contains page table entries * necessary to map the page table itself. */ #define XTENSA_L2_PAGE_TABLE_ENTRIES 1024U /* Size of level 2 page table. */ #define XTENSA_L2_PAGE_TABLE_SIZE (XTENSA_L2_PAGE_TABLE_ENTRIES * sizeof(uint32_t)) LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); BUILD_ASSERT(CONFIG_MMU_PAGE_SIZE == 0x1000, "MMU_PAGE_SIZE value is invalid, only 4 kB pages are supported\n"); /* * Level 1 page table has to be 4Kb to fit into one of the wired entries. * All entries are initialized as INVALID, so an attempt to read an unmapped * area will cause a double exception. * * Each memory domain contains its own l1 page table. The kernel l1 page table is * located at the index 0. */ static uint32_t l1_page_table[CONFIG_XTENSA_MMU_NUM_L1_TABLES][XTENSA_L1_PAGE_TABLE_ENTRIES] __aligned(KB(4)); /* * That is an alias for the page tables set used by the kernel. */ uint32_t *xtensa_kernel_ptables = (uint32_t *)l1_page_table[0]; /* * Each table in the level 2 maps a 4Mb memory range. It consists of 1024 entries each one * covering a 4Kb page. */ static uint32_t l2_page_tables[CONFIG_XTENSA_MMU_NUM_L2_TABLES][XTENSA_L2_PAGE_TABLE_ENTRIES] __aligned(KB(4)); /* * This additional variable tracks which l1 tables are in use. This is kept separated from * the tables to keep alignment easier. * * @note: The first bit is set because it is used for the kernel page tables. */ static ATOMIC_DEFINE(l1_page_table_track, CONFIG_XTENSA_MMU_NUM_L1_TABLES); /* * This additional variable tracks which l2 tables are in use. This is kept separated from * the tables to keep alignment easier. */ static ATOMIC_DEFINE(l2_page_tables_track, CONFIG_XTENSA_MMU_NUM_L2_TABLES); /* * Protects xtensa_domain_list and serializes access to page tables. */ static struct k_spinlock xtensa_mmu_lock; #ifdef CONFIG_USERSPACE /* * Each domain has its own ASID. ASID can go through 1 (kernel) to 255. * When a TLB entry matches, the hw will check the ASID in the entry and finds * the correspondent position in the RASID register. This position will then be * compared with the current ring (CRING) to check the permission. */ static uint8_t asid_count = 3; /* * List with all active and initialized memory domains. */ static sys_slist_t xtensa_domain_list; #endif /* CONFIG_USERSPACE */ extern char _heap_end[]; extern char _heap_start[]; /* * Static definition of all code & data memory regions of the * current Zephyr image. This information must be available & * processed upon MMU initialization. */ static const struct xtensa_mmu_range mmu_zephyr_ranges[] = { /* * Mark the zephyr execution regions (data, bss, noinit, etc.) * cacheable, read / write and non-executable */ { /* This includes .data, .bss and various kobject sections. */ .start = (uint32_t)_image_ram_start, .end = (uint32_t)_image_ram_end, #ifdef CONFIG_XTENSA_RPO_CACHE .attrs = XTENSA_MMU_PERM_W, #else .attrs = XTENSA_MMU_PERM_W | XTENSA_MMU_CACHED_WB, #endif .name = "data", }, #if K_HEAP_MEM_POOL_SIZE > 0 /* System heap memory */ { .start = (uint32_t)_heap_start, .end = (uint32_t)_heap_end, #ifdef CONFIG_XTENSA_RPO_CACHE .attrs = XTENSA_MMU_PERM_W, #else .attrs = XTENSA_MMU_PERM_W | XTENSA_MMU_CACHED_WB, #endif .name = "heap", }, #endif /* Mark text segment cacheable, read only and executable */ { .start = (uint32_t)__text_region_start, .end = (uint32_t)__text_region_end, .attrs = XTENSA_MMU_PERM_X | XTENSA_MMU_CACHED_WB | XTENSA_MMU_MAP_SHARED, .name = "text", }, /* Mark rodata segment cacheable, read only and non-executable */ { .start = (uint32_t)__rodata_region_start, .end = (uint32_t)__rodata_region_end, .attrs = XTENSA_MMU_CACHED_WB | XTENSA_MMU_MAP_SHARED, .name = "rodata", }, }; static inline uint32_t *thread_page_tables_get(const struct k_thread *thread) { #ifdef CONFIG_USERSPACE if ((thread->base.user_options & K_USER) != 0U) { return thread->arch.ptables; } #endif return xtensa_kernel_ptables; } /** * @brief Check if the page table entry is illegal. * * @param[in] Page table entry. */ static inline bool is_pte_illegal(uint32_t pte) { uint32_t attr = pte & XTENSA_MMU_PTE_ATTR_MASK; /* * The ISA manual states only 12 and 14 are illegal values. * 13 and 15 are not. So we need to be specific than simply * testing if bits 2 and 3 are set. */ return (attr == 12) || (attr == 14); } /* * @brief Initialize all page table entries to be illegal. * * @param[in] Pointer to page table. * @param[in] Number of page table entries in the page table. */ static void init_page_table(uint32_t *ptable, size_t num_entries) { int i; for (i = 0; i < num_entries; i++) { ptable[i] = XTENSA_MMU_PTE_ILLEGAL; } } static inline uint32_t *alloc_l2_table(void) { uint16_t idx; for (idx = 0; idx < CONFIG_XTENSA_MMU_NUM_L2_TABLES; idx++) { if (!atomic_test_and_set_bit(l2_page_tables_track, idx)) { return (uint32_t *)&l2_page_tables[idx]; } } return NULL; } static void map_memory_range(const uint32_t start, const uint32_t end, const uint32_t attrs) { uint32_t page, *table; bool shared = !!(attrs & XTENSA_MMU_MAP_SHARED); uint32_t sw_attrs = (attrs & XTENSA_MMU_PTE_ATTR_ORIGINAL) == XTENSA_MMU_PTE_ATTR_ORIGINAL ? attrs : 0; for (page = start; page < end; page += CONFIG_MMU_PAGE_SIZE) { uint32_t pte = XTENSA_MMU_PTE(page, shared ? XTENSA_MMU_SHARED_RING : XTENSA_MMU_KERNEL_RING, sw_attrs, attrs); uint32_t l2_pos = XTENSA_MMU_L2_POS(page); uint32_t l1_pos = XTENSA_MMU_L1_POS(page); if (is_pte_illegal(xtensa_kernel_ptables[l1_pos])) { table = alloc_l2_table(); __ASSERT(table != NULL, "There is no l2 page table available to " "map 0x%08x\n", page); init_page_table(table, XTENSA_L2_PAGE_TABLE_ENTRIES); xtensa_kernel_ptables[l1_pos] = XTENSA_MMU_PTE((uint32_t)table, XTENSA_MMU_KERNEL_RING, sw_attrs, XTENSA_MMU_PAGE_TABLE_ATTR); } table = (uint32_t *)(xtensa_kernel_ptables[l1_pos] & XTENSA_MMU_PTE_PPN_MASK); table[l2_pos] = pte; } } static void map_memory(const uint32_t start, const uint32_t end, const uint32_t attrs) { map_memory_range(start, end, attrs); #ifdef CONFIG_XTENSA_MMU_DOUBLE_MAP if (sys_cache_is_ptr_uncached((void *)start)) { map_memory_range(POINTER_TO_UINT(sys_cache_cached_ptr_get((void *)start)), POINTER_TO_UINT(sys_cache_cached_ptr_get((void *)end)), attrs | XTENSA_MMU_CACHED_WB); } else if (sys_cache_is_ptr_cached((void *)start)) { map_memory_range(POINTER_TO_UINT(sys_cache_uncached_ptr_get((void *)start)), POINTER_TO_UINT(sys_cache_uncached_ptr_get((void *)end)), attrs); } #endif } static void xtensa_init_page_tables(void) { volatile uint8_t entry; static bool already_inited; if (already_inited) { return; } already_inited = true; init_page_table(xtensa_kernel_ptables, XTENSA_L1_PAGE_TABLE_ENTRIES); atomic_set_bit(l1_page_table_track, 0); for (entry = 0; entry < ARRAY_SIZE(mmu_zephyr_ranges); entry++) { const struct xtensa_mmu_range *range = &mmu_zephyr_ranges[entry]; map_memory(range->start, range->end, range->attrs | XTENSA_MMU_PTE_ATTR_ORIGINAL); } for (entry = 0; entry < xtensa_soc_mmu_ranges_num; entry++) { const struct xtensa_mmu_range *range = &xtensa_soc_mmu_ranges[entry]; map_memory(range->start, range->end, range->attrs | XTENSA_MMU_PTE_ATTR_ORIGINAL); } /* Finally, the direct-mapped pages used in the page tables * must be fixed up to use the same cache attribute (but these * must be writable, obviously). They shouldn't be left at * the default. */ map_memory_range((uint32_t) &l1_page_table[0], (uint32_t) &l1_page_table[CONFIG_XTENSA_MMU_NUM_L1_TABLES], XTENSA_MMU_PAGE_TABLE_ATTR | XTENSA_MMU_PERM_W); map_memory_range((uint32_t) &l2_page_tables[0], (uint32_t) &l2_page_tables[CONFIG_XTENSA_MMU_NUM_L2_TABLES], XTENSA_MMU_PAGE_TABLE_ATTR | XTENSA_MMU_PERM_W); sys_cache_data_flush_all(); } __weak void arch_xtensa_mmu_post_init(bool is_core0) { ARG_UNUSED(is_core0); } void xtensa_mmu_init(void) { xtensa_init_page_tables(); xtensa_init_paging(xtensa_kernel_ptables); /* * This is used to determine whether we are faulting inside double * exception if this is not zero. Sometimes SoC starts with this not * being set to zero. So clear it during boot. */ XTENSA_WSR(ZSR_DEPC_SAVE_STR, 0); arch_xtensa_mmu_post_init(_current_cpu->id == 0); } void xtensa_mmu_reinit(void) { /* First initialize the hardware */ xtensa_init_paging(xtensa_kernel_ptables); #ifdef CONFIG_USERSPACE struct k_thread *thread = _current_cpu->current; struct arch_mem_domain *domain = &(thread->mem_domain_info.mem_domain->arch); /* Set the page table for current context */ xtensa_set_paging(domain->asid, domain->ptables); #endif /* CONFIG_USERSPACE */ arch_xtensa_mmu_post_init(_current_cpu->id == 0); } #ifdef CONFIG_ARCH_HAS_RESERVED_PAGE_FRAMES /* Zephyr's linker scripts for Xtensa usually puts * something before z_mapped_start (aka .text), * i.e. vecbase, so that we need to reserve those * space or else k_mem_map() would be mapping those, * resulting in faults. */ __weak void arch_reserved_pages_update(void) { uintptr_t page; int idx; for (page = CONFIG_SRAM_BASE_ADDRESS, idx = 0; page < (uintptr_t)z_mapped_start; page += CONFIG_MMU_PAGE_SIZE, idx++) { k_mem_page_frame_set(&k_mem_page_frames[idx], K_MEM_PAGE_FRAME_RESERVED); } } #endif /* CONFIG_ARCH_HAS_RESERVED_PAGE_FRAMES */ static bool l2_page_table_map(uint32_t *l1_table, void *vaddr, uintptr_t phys, uint32_t flags, bool is_user) { uint32_t l1_pos = XTENSA_MMU_L1_POS((uint32_t)vaddr); uint32_t l2_pos = XTENSA_MMU_L2_POS((uint32_t)vaddr); uint32_t *table; sys_cache_data_invd_range((void *)&l1_table[l1_pos], sizeof(l1_table[0])); if (is_pte_illegal(l1_table[l1_pos])) { table = alloc_l2_table(); if (table == NULL) { return false; } init_page_table(table, XTENSA_L2_PAGE_TABLE_ENTRIES); l1_table[l1_pos] = XTENSA_MMU_PTE((uint32_t)table, XTENSA_MMU_KERNEL_RING, 0, XTENSA_MMU_PAGE_TABLE_ATTR); sys_cache_data_flush_range((void *)&l1_table[l1_pos], sizeof(l1_table[0])); } table = (uint32_t *)(l1_table[l1_pos] & XTENSA_MMU_PTE_PPN_MASK); table[l2_pos] = XTENSA_MMU_PTE(phys, is_user ? XTENSA_MMU_USER_RING : XTENSA_MMU_KERNEL_RING, 0, flags); sys_cache_data_flush_range((void *)&table[l2_pos], sizeof(table[0])); xtensa_tlb_autorefill_invalidate(); return true; } static inline void __arch_mem_map(void *va, uintptr_t pa, uint32_t xtensa_flags, bool is_user) { bool ret; void *vaddr, *vaddr_uc; uintptr_t paddr, paddr_uc; uint32_t flags, flags_uc; if (IS_ENABLED(CONFIG_XTENSA_MMU_DOUBLE_MAP)) { if (sys_cache_is_ptr_cached(va)) { vaddr = va; vaddr_uc = sys_cache_uncached_ptr_get(va); } else { vaddr = sys_cache_cached_ptr_get(va); vaddr_uc = va; } if (sys_cache_is_ptr_cached((void *)pa)) { paddr = pa; paddr_uc = (uintptr_t)sys_cache_uncached_ptr_get((void *)pa); } else { paddr = (uintptr_t)sys_cache_cached_ptr_get((void *)pa); paddr_uc = pa; } flags_uc = (xtensa_flags & ~XTENSA_MMU_PTE_ATTR_CACHED_MASK); flags = flags_uc | XTENSA_MMU_CACHED_WB; } else { vaddr = va; paddr = pa; flags = xtensa_flags; } ret = l2_page_table_map(xtensa_kernel_ptables, (void *)vaddr, paddr, flags, is_user); __ASSERT(ret, "Virtual address (%p) already mapped", va); if (IS_ENABLED(CONFIG_XTENSA_MMU_DOUBLE_MAP) && ret) { ret = l2_page_table_map(xtensa_kernel_ptables, (void *)vaddr_uc, paddr_uc, flags_uc, is_user); __ASSERT(ret, "Virtual address (%p) already mapped", vaddr_uc); } #ifndef CONFIG_USERSPACE ARG_UNUSED(ret); #else if (ret) { sys_snode_t *node; struct arch_mem_domain *domain; k_spinlock_key_t key; key = k_spin_lock(&z_mem_domain_lock); SYS_SLIST_FOR_EACH_NODE(&xtensa_domain_list, node) { domain = CONTAINER_OF(node, struct arch_mem_domain, node); ret = l2_page_table_map(domain->ptables, (void *)vaddr, paddr, flags, is_user); __ASSERT(ret, "Virtual address (%p) already mapped for domain %p", vaddr, domain); if (IS_ENABLED(CONFIG_XTENSA_MMU_DOUBLE_MAP) && ret) { ret = l2_page_table_map(domain->ptables, (void *)vaddr_uc, paddr_uc, flags_uc, is_user); __ASSERT(ret, "Virtual address (%p) already mapped for domain %p", vaddr_uc, domain); } } k_spin_unlock(&z_mem_domain_lock, key); } #endif /* CONFIG_USERSPACE */ } void arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags) { uint32_t va = (uint32_t)virt; uint32_t pa = (uint32_t)phys; uint32_t rem_size = (uint32_t)size; uint32_t xtensa_flags = 0; k_spinlock_key_t key; bool is_user; if (size == 0) { LOG_ERR("Cannot map physical memory at 0x%08X: invalid " "zero size", (uint32_t)phys); k_panic(); } switch (flags & K_MEM_CACHE_MASK) { case K_MEM_CACHE_WB: xtensa_flags |= XTENSA_MMU_CACHED_WB; break; case K_MEM_CACHE_WT: xtensa_flags |= XTENSA_MMU_CACHED_WT; break; case K_MEM_CACHE_NONE: __fallthrough; default: break; } if ((flags & K_MEM_PERM_RW) == K_MEM_PERM_RW) { xtensa_flags |= XTENSA_MMU_PERM_W; } if ((flags & K_MEM_PERM_EXEC) == K_MEM_PERM_EXEC) { xtensa_flags |= XTENSA_MMU_PERM_X; } is_user = (flags & K_MEM_PERM_USER) == K_MEM_PERM_USER; key = k_spin_lock(&xtensa_mmu_lock); while (rem_size > 0) { __arch_mem_map((void *)va, pa, xtensa_flags, is_user); rem_size -= (rem_size >= KB(4)) ? KB(4) : rem_size; va += KB(4); pa += KB(4); } #if CONFIG_MP_MAX_NUM_CPUS > 1 xtensa_mmu_tlb_ipi(); #endif sys_cache_data_flush_and_invd_all(); k_spin_unlock(&xtensa_mmu_lock, key); } /** * @return True if page is executable (thus need to invalidate ITLB), * false if not. */ static bool l2_page_table_unmap(uint32_t *l1_table, void *vaddr) { uint32_t l1_pos = XTENSA_MMU_L1_POS((uint32_t)vaddr); uint32_t l2_pos = XTENSA_MMU_L2_POS((uint32_t)vaddr); uint32_t *l2_table; uint32_t table_pos; bool exec; sys_cache_data_invd_range((void *)&l1_table[l1_pos], sizeof(l1_table[0])); if (is_pte_illegal(l1_table[l1_pos])) { /* We shouldn't be unmapping an illegal entry. * Return true so that we can invalidate ITLB too. */ return true; } exec = l1_table[l1_pos] & XTENSA_MMU_PERM_X; l2_table = (uint32_t *)(l1_table[l1_pos] & XTENSA_MMU_PTE_PPN_MASK); sys_cache_data_invd_range((void *)&l2_table[l2_pos], sizeof(l2_table[0])); l2_table[l2_pos] = XTENSA_MMU_PTE_ILLEGAL; sys_cache_data_flush_range((void *)&l2_table[l2_pos], sizeof(l2_table[0])); for (l2_pos = 0; l2_pos < XTENSA_L2_PAGE_TABLE_ENTRIES; l2_pos++) { if (!is_pte_illegal(l2_table[l2_pos])) { goto end; } } l1_table[l1_pos] = XTENSA_MMU_PTE_ILLEGAL; sys_cache_data_flush_range((void *)&l1_table[l1_pos], sizeof(l1_table[0])); table_pos = (l2_table - (uint32_t *)l2_page_tables) / (XTENSA_L2_PAGE_TABLE_ENTRIES); atomic_clear_bit(l2_page_tables_track, table_pos); end: /* Need to invalidate L2 page table as it is no longer valid. */ xtensa_tlb_autorefill_invalidate(); return exec; } static inline void __arch_mem_unmap(void *va) { bool is_exec; void *vaddr, *vaddr_uc; if (IS_ENABLED(CONFIG_XTENSA_MMU_DOUBLE_MAP)) { if (sys_cache_is_ptr_cached(va)) { vaddr = va; vaddr_uc = sys_cache_uncached_ptr_get(va); } else { vaddr = sys_cache_cached_ptr_get(va); vaddr_uc = va; } } else { vaddr = va; } is_exec = l2_page_table_unmap(xtensa_kernel_ptables, (void *)vaddr); if (IS_ENABLED(CONFIG_XTENSA_MMU_DOUBLE_MAP)) { (void)l2_page_table_unmap(xtensa_kernel_ptables, (void *)vaddr_uc); } #ifdef CONFIG_USERSPACE sys_snode_t *node; struct arch_mem_domain *domain; k_spinlock_key_t key; key = k_spin_lock(&z_mem_domain_lock); SYS_SLIST_FOR_EACH_NODE(&xtensa_domain_list, node) { domain = CONTAINER_OF(node, struct arch_mem_domain, node); (void)l2_page_table_unmap(domain->ptables, (void *)vaddr); if (IS_ENABLED(CONFIG_XTENSA_MMU_DOUBLE_MAP)) { (void)l2_page_table_unmap(domain->ptables, (void *)vaddr_uc); } } k_spin_unlock(&z_mem_domain_lock, key); #endif /* CONFIG_USERSPACE */ } void arch_mem_unmap(void *addr, size_t size) { uint32_t va = (uint32_t)addr; uint32_t rem_size = (uint32_t)size; k_spinlock_key_t key; if (addr == NULL) { LOG_ERR("Cannot unmap NULL pointer"); return; } if (size == 0) { LOG_ERR("Cannot unmap virtual memory with zero size"); return; } key = k_spin_lock(&xtensa_mmu_lock); while (rem_size > 0) { __arch_mem_unmap((void *)va); rem_size -= (rem_size >= KB(4)) ? KB(4) : rem_size; va += KB(4); } #if CONFIG_MP_MAX_NUM_CPUS > 1 xtensa_mmu_tlb_ipi(); #endif sys_cache_data_flush_and_invd_all(); k_spin_unlock(&xtensa_mmu_lock, key); } /* This should be implemented in the SoC layer. * This weak version is here to avoid build errors. */ void __weak xtensa_mmu_tlb_ipi(void) { } void xtensa_mmu_tlb_shootdown(void) { unsigned int key; /* Need to lock interrupts to prevent any context * switching until all the page tables are updated. * Or else we would be switching to another thread * and running that with incorrect page tables * which would result in permission issues. */ key = arch_irq_lock(); K_SPINLOCK(&xtensa_mmu_lock) { /* We don't have information on which page tables have changed, * so we just invalidate the cache for all L1 page tables. */ sys_cache_data_invd_range((void *)l1_page_table, sizeof(l1_page_table)); sys_cache_data_invd_range((void *)l2_page_tables, sizeof(l2_page_tables)); } #ifdef CONFIG_USERSPACE struct k_thread *thread = _current_cpu->current; /* If current thread is a user thread, we need to see if it has * been migrated to another memory domain as the L1 page table * is different from the currently used one. */ if ((thread->base.user_options & K_USER) == K_USER) { uint32_t ptevaddr_entry, ptevaddr, thread_ptables, current_ptables; /* Need to read the currently used L1 page table. * We know that L1 page table is always mapped at way * MMU_PTE_WAY, so we can skip the probing step by * generating the query entry directly. */ ptevaddr = (uint32_t)xtensa_ptevaddr_get(); ptevaddr_entry = XTENSA_MMU_PTE_ENTRY_VADDR(ptevaddr, ptevaddr) | XTENSA_MMU_PTE_WAY; current_ptables = xtensa_dtlb_paddr_read(ptevaddr_entry); thread_ptables = (uint32_t)thread->arch.ptables; if (thread_ptables != current_ptables) { /* Need to remap the thread page tables if the ones * indicated by the current thread are different * than the current mapped page table. */ struct arch_mem_domain *domain = &(thread->mem_domain_info.mem_domain->arch); xtensa_set_paging(domain->asid, (uint32_t *)thread_ptables); } } #endif /* CONFIG_USERSPACE */ /* L2 are done via autofill, so invalidate autofill TLBs * would refresh the L2 page tables. * * L1 will be refreshed during context switch so no need * to do anything here. */ xtensa_tlb_autorefill_invalidate(); arch_irq_unlock(key); } #ifdef CONFIG_USERSPACE static inline uint32_t *alloc_l1_table(void) { uint16_t idx; for (idx = 0; idx < CONFIG_XTENSA_MMU_NUM_L1_TABLES; idx++) { if (!atomic_test_and_set_bit(l1_page_table_track, idx)) { return (uint32_t *)&l1_page_table[idx]; } } return NULL; } static uint32_t *dup_table(void) { uint16_t i, j; uint32_t *dst_table = alloc_l1_table(); if (!dst_table) { return NULL; } for (i = 0; i < XTENSA_L1_PAGE_TABLE_ENTRIES; i++) { uint32_t *l2_table, *src_l2_table; if (is_pte_illegal(xtensa_kernel_ptables[i]) || (i == XTENSA_MMU_L1_POS(XTENSA_MMU_PTEVADDR))) { dst_table[i] = XTENSA_MMU_PTE_ILLEGAL; continue; } src_l2_table = (uint32_t *)(xtensa_kernel_ptables[i] & XTENSA_MMU_PTE_PPN_MASK); l2_table = alloc_l2_table(); if (l2_table == NULL) { goto err; } for (j = 0; j < XTENSA_L2_PAGE_TABLE_ENTRIES; j++) { uint32_t original_attr = XTENSA_MMU_PTE_SW_GET(src_l2_table[j]); l2_table[j] = src_l2_table[j]; if (original_attr != 0x0) { uint8_t ring; ring = XTENSA_MMU_PTE_RING_GET(l2_table[j]); l2_table[j] = XTENSA_MMU_PTE_ATTR_SET(l2_table[j], original_attr); l2_table[j] = XTENSA_MMU_PTE_RING_SET(l2_table[j], ring == XTENSA_MMU_SHARED_RING ? XTENSA_MMU_SHARED_RING : XTENSA_MMU_KERNEL_RING); } } /* The page table is using kernel ASID because we don't * user thread manipulate it. */ dst_table[i] = XTENSA_MMU_PTE((uint32_t)l2_table, XTENSA_MMU_KERNEL_RING, 0, XTENSA_MMU_PAGE_TABLE_ATTR); sys_cache_data_flush_range((void *)l2_table, XTENSA_L2_PAGE_TABLE_SIZE); } sys_cache_data_flush_range((void *)dst_table, XTENSA_L1_PAGE_TABLE_SIZE); return dst_table; err: /* TODO: Cleanup failed allocation*/ return NULL; } int arch_mem_domain_init(struct k_mem_domain *domain) { uint32_t *ptables; k_spinlock_key_t key; int ret; /* * For now, lets just assert if we have reached the maximum number * of asid we assert. */ __ASSERT(asid_count < (XTENSA_MMU_SHARED_ASID), "Reached maximum of ASID available"); key = k_spin_lock(&xtensa_mmu_lock); /* If this is the default domain, we don't need * to create a new set of page tables. We can just * use the kernel page tables and save memory. */ if (domain == &k_mem_domain_default) { domain->arch.ptables = xtensa_kernel_ptables; domain->arch.asid = asid_count; goto end; } ptables = dup_table(); if (ptables == NULL) { ret = -ENOMEM; goto err; } domain->arch.ptables = ptables; domain->arch.asid = ++asid_count; sys_slist_append(&xtensa_domain_list, &domain->arch.node); end: ret = 0; err: k_spin_unlock(&xtensa_mmu_lock, key); return ret; } static int region_map_update(uint32_t *ptables, uintptr_t start, size_t size, uint32_t ring, uint32_t flags) { int ret = 0; for (size_t offset = 0; offset < size; offset += CONFIG_MMU_PAGE_SIZE) { uint32_t *l2_table, pte; uint32_t page = start + offset; uint32_t l1_pos = XTENSA_MMU_L1_POS(page); uint32_t l2_pos = XTENSA_MMU_L2_POS(page); /* Make sure we grab a fresh copy of L1 page table */ sys_cache_data_invd_range((void *)&ptables[l1_pos], sizeof(ptables[0])); l2_table = (uint32_t *)(ptables[l1_pos] & XTENSA_MMU_PTE_PPN_MASK); sys_cache_data_invd_range((void *)&l2_table[l2_pos], sizeof(l2_table[0])); pte = XTENSA_MMU_PTE_RING_SET(l2_table[l2_pos], ring); pte = XTENSA_MMU_PTE_ATTR_SET(pte, flags); l2_table[l2_pos] = pte; sys_cache_data_flush_range((void *)&l2_table[l2_pos], sizeof(l2_table[0])); xtensa_dtlb_vaddr_invalidate((void *)page); } return ret; } static inline int update_region(uint32_t *ptables, uintptr_t start, size_t size, uint32_t ring, uint32_t flags, uint32_t option) { int ret; k_spinlock_key_t key; key = k_spin_lock(&xtensa_mmu_lock); #ifdef CONFIG_XTENSA_MMU_DOUBLE_MAP uintptr_t va, va_uc; uint32_t new_flags, new_flags_uc; if (sys_cache_is_ptr_cached((void *)start)) { va = start; va_uc = (uintptr_t)sys_cache_uncached_ptr_get((void *)start); } else { va = (uintptr_t)sys_cache_cached_ptr_get((void *)start); va_uc = start; } new_flags_uc = (flags & ~XTENSA_MMU_PTE_ATTR_CACHED_MASK); new_flags = new_flags_uc | XTENSA_MMU_CACHED_WB; ret = region_map_update(ptables, va, size, ring, new_flags); if (ret == 0) { ret = region_map_update(ptables, va_uc, size, ring, new_flags_uc); } #else ret = region_map_update(ptables, start, size, ring, flags); #endif /* CONFIG_XTENSA_MMU_DOUBLE_MAP */ #if CONFIG_MP_MAX_NUM_CPUS > 1 if ((option & OPTION_NO_TLB_IPI) != OPTION_NO_TLB_IPI) { xtensa_mmu_tlb_ipi(); } #endif sys_cache_data_flush_and_invd_all(); k_spin_unlock(&xtensa_mmu_lock, key); return ret; } static inline int reset_region(uint32_t *ptables, uintptr_t start, size_t size, uint32_t option) { return update_region(ptables, start, size, XTENSA_MMU_KERNEL_RING, XTENSA_MMU_PERM_W, option); } void xtensa_user_stack_perms(struct k_thread *thread) { (void)memset((void *)thread->stack_info.start, (IS_ENABLED(CONFIG_INIT_STACKS)) ? 0xAA : 0x00, thread->stack_info.size - thread->stack_info.delta); update_region(thread_page_tables_get(thread), thread->stack_info.start, thread->stack_info.size, XTENSA_MMU_USER_RING, XTENSA_MMU_PERM_W | XTENSA_MMU_CACHED_WB, 0); } int arch_mem_domain_max_partitions_get(void) { return CONFIG_MAX_DOMAIN_PARTITIONS; } int arch_mem_domain_partition_remove(struct k_mem_domain *domain, uint32_t partition_id) { struct k_mem_partition *partition = &domain->partitions[partition_id]; /* Reset the partition's region back to defaults */ return reset_region(domain->arch.ptables, partition->start, partition->size, 0); } int arch_mem_domain_partition_add(struct k_mem_domain *domain, uint32_t partition_id) { struct k_mem_partition *partition = &domain->partitions[partition_id]; uint32_t ring = K_MEM_PARTITION_IS_USER(partition->attr) ? XTENSA_MMU_USER_RING : XTENSA_MMU_KERNEL_RING; return update_region(domain->arch.ptables, partition->start, partition->size, ring, partition->attr, 0); } /* These APIs don't need to do anything */ int arch_mem_domain_thread_add(struct k_thread *thread) { int ret = 0; bool is_user, is_migration; uint32_t *old_ptables; struct k_mem_domain *domain; old_ptables = thread->arch.ptables; domain = thread->mem_domain_info.mem_domain; thread->arch.ptables = domain->arch.ptables; is_user = (thread->base.user_options & K_USER) != 0; is_migration = (old_ptables != NULL) && is_user; if (is_migration) { /* Give access to the thread's stack in its new * memory domain if it is migrating. */ update_region(thread_page_tables_get(thread), thread->stack_info.start, thread->stack_info.size, XTENSA_MMU_USER_RING, XTENSA_MMU_PERM_W | XTENSA_MMU_CACHED_WB, OPTION_NO_TLB_IPI); /* and reset thread's stack permission in * the old page tables. */ ret = reset_region(old_ptables, thread->stack_info.start, thread->stack_info.size, 0); } /* Need to switch to new page tables if this is * the current thread running. */ if (thread == _current_cpu->current) { xtensa_set_paging(domain->arch.asid, thread->arch.ptables); } #if CONFIG_MP_MAX_NUM_CPUS > 1 /* Need to tell other CPUs to switch to the new page table * in case the thread is running on one of them. * * Note that there is no need to send TLB IPI if this is * migration as it was sent above during reset_region(). */ if ((thread != _current_cpu->current) && !is_migration) { xtensa_mmu_tlb_ipi(); } #endif return ret; } int arch_mem_domain_thread_remove(struct k_thread *thread) { struct k_mem_domain *domain = thread->mem_domain_info.mem_domain; if ((thread->base.user_options & K_USER) == 0) { return 0; } if ((thread->base.thread_state & _THREAD_DEAD) == 0) { /* Thread is migrating to another memory domain and not * exiting for good; we weren't called from * z_thread_abort(). Resetting the stack region will * take place in the forthcoming thread_add() call. */ return 0; } /* Restore permissions on the thread's stack area since it is no * longer a member of the domain. * * Note that, since every thread must have an associated memory * domain, removing a thread from domain will be followed by * adding it back to another. So there is no need to send TLB IPI * at this point. */ return reset_region(domain->arch.ptables, thread->stack_info.start, thread->stack_info.size, OPTION_NO_TLB_IPI); } static bool page_validate(uint32_t *ptables, uint32_t page, uint8_t ring, bool write) { uint8_t asid_ring; uint32_t rasid, pte, *l2_table; uint32_t l1_pos = XTENSA_MMU_L1_POS(page); uint32_t l2_pos = XTENSA_MMU_L2_POS(page); if (is_pte_illegal(ptables[l1_pos])) { return false; } l2_table = (uint32_t *)(ptables[l1_pos] & XTENSA_MMU_PTE_PPN_MASK); pte = l2_table[l2_pos]; if (is_pte_illegal(pte)) { return false; } asid_ring = 0; rasid = xtensa_rasid_get(); for (uint32_t i = 0; i < 4; i++) { if (XTENSA_MMU_PTE_ASID_GET(pte, rasid) == XTENSA_MMU_RASID_ASID_GET(rasid, i)) { asid_ring = i; break; } } if (ring > asid_ring) { return false; } if (write) { return (XTENSA_MMU_PTE_ATTR_GET((pte)) & XTENSA_MMU_PERM_W) != 0; } return true; } static int mem_buffer_validate(const void *addr, size_t size, int write, int ring) { int ret = 0; uint8_t *virt; size_t aligned_size; const struct k_thread *thread = _current; uint32_t *ptables = thread_page_tables_get(thread); /* addr/size arbitrary, fix this up into an aligned region */ k_mem_region_align((uintptr_t *)&virt, &aligned_size, (uintptr_t)addr, size, CONFIG_MMU_PAGE_SIZE); for (size_t offset = 0; offset < aligned_size; offset += CONFIG_MMU_PAGE_SIZE) { if (!page_validate(ptables, (uint32_t)(virt + offset), ring, write)) { ret = -1; break; } } return ret; } bool xtensa_mem_kernel_has_access(void *addr, size_t size, int write) { return mem_buffer_validate(addr, size, write, XTENSA_MMU_KERNEL_RING) == 0; } int arch_buffer_validate(const void *addr, size_t size, int write) { return mem_buffer_validate(addr, size, write, XTENSA_MMU_USER_RING); } void xtensa_swap_update_page_tables(struct k_thread *incoming) { uint32_t *ptables = incoming->arch.ptables; struct arch_mem_domain *domain = &(incoming->mem_domain_info.mem_domain->arch); xtensa_set_paging(domain->asid, ptables); #ifdef CONFIG_XTENSA_INVALIDATE_MEM_DOMAIN_TLB_ON_SWAP struct k_mem_domain *mem_domain = incoming->mem_domain_info.mem_domain; for (int idx = 0; idx < mem_domain->num_partitions; idx++) { struct k_mem_partition *part = &mem_domain->partitions[idx]; uintptr_t end = part->start + part->size; for (uintptr_t addr = part->start; addr < end; addr += CONFIG_MMU_PAGE_SIZE) { xtensa_dtlb_vaddr_invalidate((void *)addr); } } #endif } #endif /* CONFIG_USERSPACE */ ```
/content/code_sandbox/arch/xtensa/core/ptables.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
9,241
```unknown /* * */ #include <xtensa_asm2_s.h> #include <zephyr/offsets.h> #include <offsets_short.h> #include <zephyr/syscall.h> #include <zephyr/zsr.h> #include <xtensa/config/core-isa.h> /** * syscall number arg1, arg2, arg3, arg4, arg5, arg6 * -------------- ---------------------------------- * a2 a6, a3, a4, a5, a8, a9 * **/ .pushsection .text.xtensa_do_syscall, "ax" .global xtensa_do_syscall .align 4 xtensa_do_syscall: #if XCHAL_HAVE_THREADPTR == 0 wsr a2, ZSR_SYSCALL_SCRATCH rsync movi a0, xtensa_is_user_context_epc rsr.epc1 a2 bne a0, a2, _not_checking_user_context addi a2, a2, 3 wsr.epc1 a2 movi a0, PS_RING_MASK rsr.ps a2 and a2, a2, a0 /* Need to set return to 1 if RING != 0, * so we won't be leaking which ring we are in * right now. */ beqz a2, _is_user_context_return movi a2, 1 _is_user_context_return: rsr a0, ZSR_A0SAVE rfe _not_checking_user_context: rsr a2, ZSR_SYSCALL_SCRATCH #endif rsr a0, ZSR_CPU l32i a0, a0, ___cpu_t_current_OFFSET l32i a0, a0, _thread_offset_to_psp addi a0, a0, -___xtensa_irq_bsa_t_SIZEOF s32i a1, a0, ___xtensa_irq_bsa_t_scratch_OFFSET s32i a2, a0, ___xtensa_irq_bsa_t_a2_OFFSET s32i a3, a0, ___xtensa_irq_bsa_t_a3_OFFSET rsr a2, ZSR_A0SAVE s32i a2, a0, ___xtensa_irq_bsa_t_a0_OFFSET rsr.ps a2 movi a3, ~PS_OWB_MASK and a2, a2, a3 s32i a2, a0, ___xtensa_irq_bsa_t_ps_OFFSET rsr.epc1 a2 s32i a2, a0, ___xtensa_irq_bsa_t_pc_OFFSET #if XCHAL_HAVE_NMI movi a2, PS_WOE|PS_INTLEVEL(XCHAL_NMILEVEL) #elif XCHAL_HAVE_INTERRUPTS movi a2, PS_WOE|PS_INTLEVEL(XCHAL_NUM_INTLEVELS) #else #error Xtensa core with no interrupt support is used #endif rsr.ps a3 or a3, a3, a2 movi a2, ~(PS_EXCM | PS_RING_MASK) and a3, a3, a2 wsr.ps a3 rsync l32i a2, a0, ___xtensa_irq_bsa_t_a2_OFFSET l32i a3, a0, ___xtensa_irq_bsa_t_a3_OFFSET SPILL_ALL_WINDOWS rsr a0, ZSR_CPU l32i a0, a0, ___cpu_t_current_OFFSET l32i a0, a0, _thread_offset_to_psp addi a0, a0, -___xtensa_irq_bsa_t_SIZEOF mov a1, a0 l32i a3, a1, ___xtensa_irq_bsa_t_pc_OFFSET #if XCHAL_HAVE_LOOPS /* If the syscall instruction was the last instruction in the body of * a zero-overhead loop, and the loop will execute again, decrement * the loop count and resume execution at the head of the loop. */ rsr.lend a2 addi a3, a3, 3 bne a2, a3, end_loop rsr.lcount a2 beqz a2, end_loop addi a2, a2, -1 wsr.lcount a2 rsr.lbeg a3 end_loop: #else /* EPC1 (and now a3) contains the address that invoked syscall. * We need to increment it to execute the next instruction when * we return. The instruction size is 3 bytes, so lets just add it. */ addi a3, a3, 3 #endif s32i a3, a1, ___xtensa_irq_bsa_t_pc_OFFSET ODD_REG_SAVE #if defined(CONFIG_XTENSA_HIFI_SHARING) call0 _xtensa_hifi_save #endif call0 xtensa_save_high_regs l32i a2, a1, 0 l32i a2, a2, ___xtensa_irq_bsa_t_a2_OFFSET movi a0, K_SYSCALL_LIMIT bgeu a2, a0, _bad_syscall _id_ok: /* Find the function handler for the given syscall id. */ movi a3, _k_syscall_table slli a2, a2, 2 add a2, a2, a3 l32i a2, a2, 0 #if XCHAL_HAVE_THREADPTR /* Clear up the threadptr because it is used * to check if a thread is running on user mode. Since * we are in a interruption we don't want the system * thinking it is possibly running in user mode. */ #ifdef CONFIG_THREAD_LOCAL_STORAGE movi a0, is_user_mode@tpoff rur.THREADPTR a3 add a0, a3, a0 movi a3, 0 s32i a3, a0, 0 #else movi a0, 0 wur.THREADPTR a0 #endif #endif /* XCHAL_HAVE_THREADPTR */ /* Set syscall parameters by moving them into place before we do * a call4 for the syscall function itself. * arg1 = a6 * arg2 = a3 (clobbered above, so we need to reload it) * arg3 = a4 * arg4 = a5 * arg5 = a8 * arg6 = a9 */ mov a10, a8 mov a11, a9 mov a8, a4 mov a9, a5 /* Stack frame pointer is the 7th argument to z_mrsh_*() * as ssf, and must be put on stack to be consumed. */ mov a3, a1 addi a1, a1, -4 s32i a3, a1, 0 l32i a3, a1, 4 l32i a7, a3, ___xtensa_irq_bsa_t_a3_OFFSET /* Since we are unmasking EXCM, we need to set RING bits to kernel * mode, otherwise we won't be able to run the exception handler in C. */ movi a0, PS_WOE|PS_CALLINC(0)|PS_UM|PS_INTLEVEL(0) wsr.ps a0 rsync callx4 a2 /* Going back before stack frame pointer on stack to * actual the stack frame. So restoration of registers * can be done properly when finishing syscalls. */ addi a1, a1, 4 /* copy return value. Lets put it in the top of stack * because registers will be clobbered in * xtensa_restore_high_regs */ l32i a3, a1, 0 s32i a6, a3, ___xtensa_irq_bsa_t_a2_OFFSET j _syscall_returned _syscall_returned: call0 xtensa_restore_high_regs l32i a3, a1, ___xtensa_irq_bsa_t_sar_OFFSET wsr a3, SAR #if XCHAL_HAVE_LOOPS l32i a3, a1, ___xtensa_irq_bsa_t_lbeg_OFFSET wsr a3, LBEG l32i a3, a1, ___xtensa_irq_bsa_t_lend_OFFSET wsr a3, LEND l32i a3, a1, ___xtensa_irq_bsa_t_lcount_OFFSET wsr a3, LCOUNT #endif #if XCHAL_HAVE_S32C1I l32i a3, a1, ___xtensa_irq_bsa_t_scompare1_OFFSET wsr a3, SCOMPARE1 #endif #if XCHAL_HAVE_THREADPTR #ifdef CONFIG_THREAD_LOCAL_STORAGE l32i a3, a1, ___xtensa_irq_bsa_t_threadptr_OFFSET movi a0, is_user_mode@tpoff add a0, a3, a0 movi a3, 1 s32i a3, a0, 0 #else rsr a3, ZSR_CPU l32i a3, a3, ___cpu_t_current_OFFSET wur.THREADPTR a3 #endif #endif /* XCHAL_HAVE_THREADPTR */ l32i a3, a1, ___xtensa_irq_bsa_t_ps_OFFSET wsr.ps a3 l32i a3, a1, ___xtensa_irq_bsa_t_pc_OFFSET wsr.epc1 a3 l32i a0, a1, ___xtensa_irq_bsa_t_a0_OFFSET l32i a2, a1, ___xtensa_irq_bsa_t_a2_OFFSET l32i a3, a1, ___xtensa_irq_bsa_t_a3_OFFSET l32i a1, a1, ___xtensa_irq_bsa_t_scratch_OFFSET rsync rfe _bad_syscall: movi a2, K_SYSCALL_BAD j _id_ok .popsection /* FUNC_NORETURN void xtensa_userspace_enter(k_thread_entry_t user_entry, * void *p1, void *p2, void *p3, * uint32_t stack_end, * uint32_t stack_start) * * A one-way trip to userspace. */ .global xtensa_userspace_enter .type xtensa_userspace_enter, @function .align 4 xtensa_userspace_enter: /* Call entry to set a bit in the windowstart and * do the rotation, but we are going to set our own * stack. */ entry a1, 16 SPILL_ALL_WINDOWS /* We have to switch to kernel stack before spill kernel data and * erase user stack to avoid leak from previous context. */ mov a1, a7 /* stack start (low address) */ rsr a0, ZSR_CPU l32i a0, a0, ___cpu_t_current_OFFSET addi a1, a1, -28 s32i a0, a1, 24 s32i a2, a1, 20 s32i a3, a1, 16 s32i a4, a1, 12 s32i a5, a1, 8 s32i a6, a1, 4 s32i a7, a1, 0 l32i a6, a1, 24 call4 xtensa_user_stack_perms l32i a6, a1, 24 #ifdef CONFIG_XTENSA_MMU call4 xtensa_swap_update_page_tables #endif #ifdef CONFIG_XTENSA_MPU call4 xtensa_mpu_map_write #endif #if XCHAL_HAVE_THREADPTR #ifdef CONFIG_THREAD_LOCAL_STORAGE rur.threadptr a3 movi a0, is_user_mode@tpoff add a0, a3, a0 movi a3, 1 s32i a3, a0, 0 #else rsr a3, ZSR_CPU l32i a3, a3, ___cpu_t_current_OFFSET wur.THREADPTR a3 #endif #endif /* XCHAL_HAVE_THREADPTR */ /* Set now z_thread_entry parameters, we are simulating a call4 * call, so parameters start at a6, a7, ... */ l32i a6, a1, 20 l32i a7, a1, 16 l32i a8, a1, 12 l32i a9, a1, 8 /* Go back to user stack */ l32i a1, a1, 4 movi a0, z_thread_entry wsr.epc2 a0 /* Configuring PS register. * We have to set callinc as well, since the called * function will do "entry" */ #ifdef CONFIG_XTENSA_MMU movi a0, PS_WOE|PS_CALLINC(1)|PS_UM|PS_RING(2) #endif #ifdef CONFIG_XTENSA_MPU /* MPU only has RING 0 and 1. */ movi a0, PS_WOE|PS_CALLINC(1)|PS_UM|PS_RING(1) #endif wsr a0, EPS2 /* Wipe out a0 (thre is no return from this function */ movi a0, 0 rfi 2 ```
/content/code_sandbox/arch/xtensa/core/userspace.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,967
```unknown /* */ /* * Control arrives here at _start from the reset vector. */ #include <xtensa/coreasm.h> /* Exports */ .global _start /* * Imports * __stack from linker script (see LSP Ref Manual) * _bss_table_start from linker script (see LSP Ref Manual) * _bss_table_end from linker script (see LSP Ref Manual) * z_cstart Entry point into Zephyr C domain * __stack from linker script (see LSP Ref Manual) */ .global __start .type z_prep_c, @function /* Macros to abstract away ABI differences */ #if __XTENSA_CALL0_ABI__ # define CALL call0 # define CALLX callx0 # define ARG1 a2 /* 1st outgoing call argument */ # define ARG2 a3 /* 2nd outgoing call argument */ # define ARG3 a4 /* 3rd outgoing call argument */ # define ARG4 a5 /* 4th outgoing call argument */ # define ARG5 a6 /* 5th outgoing call argument */ #else # define CALL call4 # define CALLX callx4 # define ARG1 a6 /* 1st outgoing call argument */ # define ARG2 a7 /* 2nd outgoing call argument */ # define ARG3 a8 /* 3rd outgoing call argument */ # define ARG4 a9 /* 4th outgoing call argument */ # define ARG5 a10 /* 5th outgoing call argument */ #endif .text .align 4 _start: /* * _start is typically NOT at the beginning of the text segment -- * it is always called from either the reset vector (__start) or other * code that does equivalent initialization. * * Assumptions on entry to _start: * - low (level-one) and medium priority interrupts are disabled * via PS.INTLEVEL and/or INTENABLE * - C calling context not initialized: * - PS not initialized * - SP not initialized * - the following are initialized: * - LITBASE, cache attributes, WindowBase, WindowStart, * CPENABLE, FP's FCR and FSR, EXCSAVE[n] * Keep a0 zero. It is used to initialize a few things. * It is also the return address, where zero indicates * that the frame used by _start is the bottommost frame. * */ /* not needed for Xtensa TX */ #if !XCHAL_HAVE_HALT || !XCHAL_HAVE_BOOTLOADER movi a0, 0 /* keep this register zero. */ #endif /* * Initialize the stack pointer. * See the "ABI and Software Conventions" chapter in the * Xtensa ISA Reference manual for details. * * NOTE: Because the _start routine does not use any memory in its * stack frame, and because all of its CALL instructions use a * window size of 4, the stack frame for _start can be empty. */ movi sp, __stack /* * Now that sp (a1) is set, we can set PS as per the application (user * vector mode, disable interrupts, enable window exceptions if * applicable). */ #if XCHAL_HAVE_EXCEPTIONS # ifdef __XTENSA_CALL0_ABI__ /* * PS.WOE = 0 * PS.UM = 1 * PS.EXCM = 0 * PS.INTLEVEL = XCHAL_EXCM_LEVEL */ movi a3, PS_UM|PS_INTLEVEL(XCHAL_EXCM_LEVEL) # else /* * PS.WOE = 1 * PS.UM = 1 * PS.EXCM = 0 * PS.INTLEVEL = XCHAL_EXCM_LEVEL */ movi a3, PS_UM|PS_WOE|PS_INTLEVEL(XCHAL_EXCM_LEVEL) # endif wsr a3, PS rsync #endif /* * Do any initialization that affects the memory map, such as * setting up TLB entries, that needs to be done before we can * successfully clear BSS (e.g. if some BSS segments are in * remapped areas). * * NOTE: This hook works where the reset vector does not unpack * segments (see "ROM packing" in the LSP manual), or where * unpacking of segments is not affected by memory remapping. * If ROM unpacking is affected, TLB setup must be done in * assembler from the reset vector. * * The __memmap_init() routine can be a C function, however it * does not have BSS initialized! In particular, __memmap_init() * cannot set BSS variables, i.e. uninitialized global variables * (they'll be wiped out by the following BSS clear), nor can it * assume they are yet initialized to zero. * * The __memmap_init() function is optional. It is marked as a * weak symbol, so that it gets valued zero if not defined. */ .weak __memmap_init movi a4, __memmap_init beqz a4, 1f CALLX a4 1: #if !XCHAL_HAVE_BOOTLOADER /* boot loader takes care of zeroing BSS */ # ifdef __XTENSA_CALL0_ABI__ /* Clear a0 again as possible CALLX to __memmap_init changed it. */ movi a0, 0 # endif /* * Clear the BSS (uninitialized data) segments. * This code supports multiple zeroed sections (*.bss). * * Register allocation: * a0 = 0 * a6 = pointer to start of table, and through table * a7 = pointer to end of table * a8 = start address of bytes to be zeroed * a9 = end address of bytes to be zeroed * a10 = length of bytes to be zeroed */ movi a6, _bss_table_start movi a7, _bss_table_end bgeu a6, a7, .L3zte .L0zte: l32i a8, a6, 0 /* get start address, assumed multiple of 4 */ l32i a9, a6, 4 /* get end address, assumed multiple of 4 */ addi a6, a6, 8 /* next entry */ sub a10, a9, a8 /* a10 = length, assumed a multiple of 4 */ bbci.l a10, 2, .L1zte s32i a0, a8, 0 /* clear 4 bytes to make len multiple of 8 */ addi a8, a8, 4 .L1zte: bbci.l a10, 3, .L2zte s32i a0, a8, 0 /* clear 8 bytes to make len multiple of 16 */ s32i a0, a8, 4 addi a8, a8, 8 .L2zte: srli a10, a10, 4 /* len is now multiple of 16, divide by 16 */ floopnez a10, clearzte s32i a0, a8, 0 /* clear 16 bytes at a time... */ s32i a0, a8, 4 s32i a0, a8, 8 s32i a0, a8, 12 addi a8, a8, 16 floopend a10, clearzte bltu a6, a7, .L0zte /* loop until end of table of *.bss sections */ .L3zte: #endif /* !XCHAL_HAVE_BOOTLOADER */ /* Enter C domain, never returns from here */ CALL z_prep_c .size _start, . - _start ```
/content/code_sandbox/arch/xtensa/core/crt1.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,816
```python #!/usr/bin/env python3 import argparse import re # Scratch register allocator. Zephyr uses multiple Xtensa SRs as # scratch space for various special purposes. Unfortunately the # configurable architecture means that not all registers will be the # same on every device. This script parses a pre-cooked ("gcc -E # -dM") core-isa.h file for the current architecture and assigns # registers to usages. def parse_args(): parser = argparse.ArgumentParser(allow_abbrev=False) parser.add_argument("--coherence", action="store_true", help="Enable scratch registers for CONFIG_KERNEL_COHERENCE") parser.add_argument("--mmu", action="store_true", help="Enable scratch registers for MMU usage") parser.add_argument("--syscall-scratch", action="store_true", help="Enable scratch registers for syscalls if needed") parser.add_argument("coreisa", help="Path to preprocessed core-isa.h") parser.add_argument("outfile", help="Output file") return parser.parse_args() args = parse_args() NEEDED = ["A0SAVE", "CPU"] if args.mmu: NEEDED += ["DBLEXC", "DEPC_SAVE", "EXCCAUSE_SAVE"] if args.coherence: NEEDED += ["FLUSH"] coreisa = args.coreisa outfile = args.outfile syms = {} def get(s): return syms[s] if s in syms else 0 with open(coreisa) as infile: for line in infile.readlines(): m = re.match(r"^#define\s+([^ ]+)\s*(.*)", line.rstrip()) if m: syms[m.group(1)] = m.group(2) # Use MISC registers first if available, that's what they're for regs = [ f"MISC{n}" for n in range(0, int(get("XCHAL_NUM_MISC_REGS"))) ] if args.syscall_scratch: # If there is no THREADPTR, we need to use syscall for # arch_is_user_context() where the code needs a scratch # register. have_threadptr = int(get("XCHAL_HAVE_THREADPTR")) if have_threadptr == 0: NEEDED.append("SYSCALL_SCRATCH") # Next come EXCSAVE. Also record our highest non-debug interrupt level. maxint = 0 for il in range(1, 1 + int(get("XCHAL_NUM_INTLEVELS"))): regs.append(f"EXCSAVE{il}") if il != int(get("XCHAL_DEBUGLEVEL")): maxint = max(il, maxint) # Find the highest priority software interrupt. We'll use that for # arch_irq_offload(). irqoff_level = -1 irqoff_int = -1 for sym, val in syms.items(): if val == "XTHAL_INTTYPE_SOFTWARE": m = re.match(r"XCHAL_INT(\d+)_TYPE", sym) if m: intnum = int(m.group(1)) levelsym = f"XCHAL_INT{intnum}_LEVEL" if levelsym in syms: intlevel = int(syms[levelsym]) if intlevel > irqoff_level: irqoff_int = intnum irqoff_level = intlevel # Now emit our output header with the assignments we chose with open(outfile, "w") as f: f.write("/* Generated File, see gen_zsr.py */\n") f.write("#ifndef ZEPHYR_ZSR_H\n") f.write("#define ZEPHYR_ZSR_H\n") for i, need in enumerate(NEEDED): f.write(f"# define ZSR_{need} {regs[i]}\n") f.write(f"# define ZSR_{need}_STR \"{regs[i]}\"\n") # Emit any remaining registers as generics for i in range(len(NEEDED), len(regs)): f.write(f"# define ZSR_EXTRA{i - len(NEEDED)} {regs[i]}\n") f.write(f"# define ZSR_EXTRA{i - len(NEEDED)}_STR \"{regs[i]}\"\n") # Also, our highest level EPC/EPS registers f.write(f"# define ZSR_RFI_LEVEL {maxint}\n") f.write(f"# define ZSR_EPC EPC{maxint}\n") f.write(f"# define ZSR_EPS EPS{maxint}\n") # And the irq offset interrupt if irqoff_int >= 0: f.write(f"# define ZSR_IRQ_OFFLOAD_INT {irqoff_int}\n") f.write("#endif\n") ```
/content/code_sandbox/arch/xtensa/core/gen_zsr.py
python
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,014
```c /* * */ #include <zephyr/kernel.h> #include <kernel_internal.h> extern FUNC_NORETURN void z_cstart(void); /* defined by the SoC in case of CONFIG_SOC_HAS_RUNTIME_NUM_CPUS=y */ extern void soc_num_cpus_init(void); /** * * @brief Prepare to and run C code * * This routine prepares for the execution of and runs C code. * */ void z_prep_c(void) { #if CONFIG_SOC_HAS_RUNTIME_NUM_CPUS soc_num_cpus_init(); #endif _cpu_t *cpu0 = &_kernel.cpus[0]; #ifdef CONFIG_KERNEL_COHERENCE /* Make sure we don't have live data for unexpected cached * regions due to boot firmware */ sys_cache_data_flush_and_invd_all(); /* Our cache top stash location might have junk in it from a * pre-boot environment. Must be zero or valid! */ XTENSA_WSR(ZSR_FLUSH_STR, 0); #endif cpu0->nested = 0; /* The asm2 scheme keeps the kernel pointer in a scratch SR * (see zsr.h for generation specifics) for easy access. That * saves 4 bytes of immediate value to store the address when * compared to the legacy scheme. But in SMP this record is a * per-CPU thing and having it stored in a SR already is a big * win. */ XTENSA_WSR(ZSR_CPU_STR, cpu0); #ifdef CONFIG_INIT_STACKS char *stack_start = K_KERNEL_STACK_BUFFER(z_interrupt_stacks[0]); size_t stack_sz = K_KERNEL_STACK_SIZEOF(z_interrupt_stacks[0]); char *stack_end = stack_start + stack_sz; uint32_t sp; __asm__ volatile("mov %0, sp" : "=a"(sp)); /* Only clear the interrupt stack if the current stack pointer * is not within the interrupt stack. Or else we would be * wiping the in-use stack. */ if (((uintptr_t)sp < (uintptr_t)stack_start) || ((uintptr_t)sp >= (uintptr_t)stack_end)) { memset(stack_start, 0xAA, stack_sz); } #endif #ifdef CONFIG_XTENSA_MMU xtensa_mmu_init(); #endif #ifdef CONFIG_XTENSA_MPU xtensa_mpu_init(); #endif z_cstart(); CODE_UNREACHABLE; } ```
/content/code_sandbox/arch/xtensa/core/prep_c.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
523
```c /* */ #include <stdint.h> #include <stdbool.h> #include <zephyr/kernel.h> #include <xtensa/config/core-isa.h> #include <xtensa_mmu_priv.h> #include <zephyr/cache.h> #ifdef CONFIG_USERSPACE BUILD_ASSERT((CONFIG_PRIVILEGED_STACK_SIZE > 0) && (CONFIG_PRIVILEGED_STACK_SIZE % CONFIG_MMU_PAGE_SIZE) == 0); #endif #define ASID_INVALID 0 struct tlb_regs { uint32_t rasid; uint32_t ptevaddr; uint32_t ptepin_as; uint32_t ptepin_at; uint32_t vecpin_as; uint32_t vecpin_at; }; static void compute_regs(uint32_t user_asid, uint32_t *l1_page, struct tlb_regs *regs) { uint32_t vecbase = XTENSA_RSR("VECBASE"); __ASSERT_NO_MSG((((uint32_t)l1_page) & 0xfff) == 0); __ASSERT_NO_MSG((user_asid == 0) || ((user_asid > 2) && (user_asid < XTENSA_MMU_SHARED_ASID))); /* We don't use ring 1, ring 0 ASID must be 1 */ regs->rasid = (XTENSA_MMU_SHARED_ASID << 24) | (user_asid << 16) | 0x000201; /* Derive PTEVADDR from ASID so each domain gets its own PTE area */ regs->ptevaddr = CONFIG_XTENSA_MMU_PTEVADDR + user_asid * 0x400000; /* The ptables code doesn't add the mapping for the l1 page itself */ l1_page[XTENSA_MMU_L1_POS(regs->ptevaddr)] = (uint32_t)l1_page | XTENSA_MMU_PAGE_TABLE_ATTR; regs->ptepin_at = (uint32_t)l1_page; regs->ptepin_as = XTENSA_MMU_PTE_ENTRY_VADDR(regs->ptevaddr, regs->ptevaddr) | XTENSA_MMU_PTE_WAY; /* Pin mapping for refilling the vector address into the ITLB * (for handling TLB miss exceptions). Note: this is NOT an * instruction TLB entry for the vector code itself, it's a * DATA TLB entry for the page containing the vector mapping * so the refill on instruction fetch can find it. The * hardware doesn't have a 4k pinnable instruction TLB way, * frustratingly. */ uint32_t vb_pte = l1_page[XTENSA_MMU_L1_POS(vecbase)]; regs->vecpin_at = vb_pte; regs->vecpin_as = XTENSA_MMU_PTE_ENTRY_VADDR(regs->ptevaddr, vecbase) | XTENSA_MMU_VECBASE_WAY; } /* Switch to a new page table. There are four items we have to set in * the hardware: the PTE virtual address, the ring/ASID mapping * register, and two pinned entries in the data TLB handling refills * for the page tables and the vector handlers. * * These can be done in any order, provided that we ensure that no * memory access which cause a TLB miss can happen during the process. * This means that we must work entirely within registers in a single * asm block. Also note that instruction fetches are memory accesses * too, which means we cannot cross a page boundary which might reach * a new page not in the TLB (a single jump to an aligned address that * holds our five instructions is sufficient to guarantee that: I * couldn't think of a way to do the alignment statically that also * interoperated well with inline assembly). */ void xtensa_set_paging(uint32_t user_asid, uint32_t *l1_page) { /* Optimization note: the registers computed here are pure * functions of the two arguments. With a minor API tweak, * they could be cached in e.g. a thread struct instead of * being recomputed. This is called on context switch paths * and is performance-sensitive. */ struct tlb_regs regs; compute_regs(user_asid, l1_page, &regs); __asm__ volatile("j 1f\n" ".align 16\n" /* enough for 5 insns */ "1:\n" "wsr %0, PTEVADDR\n" "wsr %1, RASID\n" "wdtlb %2, %3\n" "wdtlb %4, %5\n" "isync" :: "r"(regs.ptevaddr), "r"(regs.rasid), "r"(regs.ptepin_at), "r"(regs.ptepin_as), "r"(regs.vecpin_at), "r"(regs.vecpin_as)); } /* This is effectively the same algorithm from xtensa_set_paging(), * but it also disables the hardware-initialized 512M TLB entries in * way 6 (because the hardware disallows duplicate TLB mappings). For * instruction fetches this produces a critical ordering constraint: * the instruction following the invalidation of ITLB entry mapping * the current PC will by definition create a refill condition, which * will (because the data TLB was invalidated) cause a refill * exception. Therefore this step must be the very last one, once * everything else is setup up and working, which includes the * invalidation of the virtual PTEVADDR area so that the resulting * refill can complete. * * Note that we can't guarantee that the compiler won't insert a data * fetch from our stack memory after exit from the asm block (while it * might be double-mapped), so we invalidate that data TLB inside the * asm for correctness. The other 13 entries get invalidated in a C * loop at the end. */ void xtensa_init_paging(uint32_t *l1_page) { extern char z_xt_init_pc; /* defined in asm below */ struct tlb_regs regs; unsigned int initial_rasid; /* The initial rasid after hardware initialization is 0x04030201. * 1 is hardwired to ring 0, other slots must be different * from each other and must not be 0. * * For our initial implementation we just set the 4th slot (ring 3), * to use the ASID value used for memory that is shared with all threads. */ initial_rasid = 0xff030201; #if CONFIG_MP_MAX_NUM_CPUS > 1 /* The incoherent cache can get into terrible trouble if it's * allowed to cache PTEs differently across CPUs. We require * that all page tables supplied by the OS have exclusively * uncached mappings for page data, but can't do anything * about earlier code/firmware. Dump the cache to be safe. */ sys_cache_data_flush_and_invd_all(); #endif compute_regs(ASID_INVALID, l1_page, &regs); uint32_t idtlb_pte = (regs.ptevaddr & 0xe0000000) | XCHAL_SPANNING_WAY; uint32_t idtlb_stk = (((uint32_t)&regs) & ~0xfff) | XCHAL_SPANNING_WAY; uint32_t iitlb_pc = (((uint32_t)&z_xt_init_pc) & ~0xfff) | XCHAL_SPANNING_WAY; /* Note: the jump is mostly pedantry, as it's almost * inconceivable that a hardware memory region at boot is * going to cross a 512M page boundary. But we need the entry * symbol to get the address above, so the jump is here for * symmetry with the set_paging() code. */ __asm__ volatile("j z_xt_init_pc\n" ".align 32\n" /* room for 10 insns */ ".globl z_xt_init_pc\n" "z_xt_init_pc:\n" "wsr %0, PTEVADDR\n" "wsr %1, RASID\n" "wdtlb %2, %3\n" "wdtlb %4, %5\n" "idtlb %6\n" /* invalidate pte */ "idtlb %7\n" /* invalidate stk */ "isync\n" "iitlb %8\n" /* invalidate pc */ "isync\n" /* <--- traps a ITLB miss */ :: "r"(regs.ptevaddr), "r"(initial_rasid), "r"(regs.ptepin_at), "r"(regs.ptepin_as), "r"(regs.vecpin_at), "r"(regs.vecpin_as), "r"(idtlb_pte), "r"(idtlb_stk), "r"(iitlb_pc)); /* Invalidate the remaining (unused by this function) * initialization entries. Now we're flying free with our own * page table. */ for (uint32_t i = 0; i < 8; i++) { uint32_t ixtlb = (i * 0x20000000) | XCHAL_SPANNING_WAY; if (ixtlb != iitlb_pc) { __asm__ volatile("iitlb %0" :: "r"(ixtlb)); } if (ixtlb != idtlb_stk && ixtlb != idtlb_pte) { __asm__ volatile("idtlb %0" :: "r"(ixtlb)); } } __asm__ volatile("isync"); } ```
/content/code_sandbox/arch/xtensa/core/mmu.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,160
```c /* */ #include <zephyr/toolchain.h> #include <zephyr/tracing/tracing.h> #ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_IDLE void arch_cpu_idle(void) { sys_trace_idle(); __asm__ volatile ("waiti 0"); } #endif #ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_ATOMIC_IDLE void arch_cpu_atomic_idle(unsigned int key) { sys_trace_idle(); __asm__ volatile ("waiti 0\n\t" "wsr.ps %0\n\t" "rsync" :: "a"(key)); } #endif ```
/content/code_sandbox/arch/xtensa/core/cpu_idle.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
116
```c /* * */ #include <zephyr/kernel.h> #include <zephyr/kernel_structs.h> #include <kernel_internal.h> #include <kernel_tls.h> #include <zephyr/app_memory/app_memdomain.h> #include <zephyr/sys/util.h> #if XCHAL_HAVE_THREADPTR == 0 #error SoC does not support THREADPTR for thread local storage. #endif size_t arch_tls_stack_setup(struct k_thread *new_thread, char *stack_ptr) { /* * TLS area has some data fields following by * thread data and bss. These fields are supposed to be * used by toolchain and OS TLS code to aid in locating * the TLS data/bss. Zephyr currently has no use for * this so we can simply skip these. However, since GCC * is generating code assuming these fields are there, * we simply skip them when setting the TLS pointer. */ /* * Since we are populating things backwards, * setup the TLS data/bss area first. */ stack_ptr -= z_tls_data_size(); z_tls_copy(stack_ptr); /* Skip two pointers due to toolchain */ stack_ptr -= sizeof(uintptr_t) * 2; /* * Set thread TLS pointer which is used in * context switch to point to TLS area. */ new_thread->tls = POINTER_TO_UINT(stack_ptr); return (z_tls_data_size() + (sizeof(uintptr_t) * 2)); } ```
/content/code_sandbox/arch/xtensa/core/tls.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
316
```c /* * */ #include <zephyr/toolchain.h> __weak int atexit(void (*function)(void)) { ARG_UNUSED(function); return 0; } ```
/content/code_sandbox/arch/xtensa/core/xcc_stubs.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
35
```c /* * */ #include <string.h> #include <xtensa_asm2_context.h> #include <zephyr/kernel.h> #include <ksched.h> #include <zephyr/kernel_structs.h> #include <kernel_internal.h> #include <kswap.h> #include <zephyr/toolchain.h> #include <zephyr/logging/log.h> #include <zephyr/offsets.h> #include <zephyr/zsr.h> #include <zephyr/arch/common/exc_handle.h> #ifdef CONFIG_XTENSA_GEN_HANDLERS #include <xtensa_handlers.h> #else #include <_soc_inthandlers.h> #endif #include <kernel_internal.h> #include <xtensa_internal.h> #include <xtensa_stack.h> LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); extern char xtensa_arch_except_epc[]; extern char xtensa_arch_kernel_oops_epc[]; bool xtensa_is_outside_stack_bounds(uintptr_t addr, size_t sz, uint32_t ps) { uintptr_t start, end; struct k_thread *thread = _current; bool was_in_isr, invalid; /* Without userspace, there is no privileged stack so the thread stack * is the whole stack (minus reserved area). So there is no need to * check for PS == UINT32_MAX for special treatment. */ ARG_UNUSED(ps); /* Since both level 1 interrupts and exceptions go through * the same interrupt vector, both of them increase the nested * counter in the CPU struct. The architecture vector handler * moves execution to the interrupt stack when nested goes from * zero to one. Afterwards, any nested interrupts/exceptions will * continue running in interrupt stack. Therefore, only when * nested > 1, then it was running in the interrupt stack, and * we should check bounds against the interrupt stack. */ was_in_isr = arch_curr_cpu()->nested > 1; if ((thread == NULL) || was_in_isr) { /* We were servicing an interrupt or in early boot environment * and are supposed to be on the interrupt stack. */ int cpu_id; #ifdef CONFIG_SMP cpu_id = arch_curr_cpu()->id; #else cpu_id = 0; #endif start = (uintptr_t)K_KERNEL_STACK_BUFFER(z_interrupt_stacks[cpu_id]); end = start + CONFIG_ISR_STACK_SIZE; #ifdef CONFIG_USERSPACE } else if (ps == UINT32_MAX) { /* Since the stashed PS is inside struct pointed by frame->ptr_to_bsa, * we need to verify that both frame and frame->ptr_to_bsa are valid * pointer within the thread stack. Also without PS, we have no idea * whether we were in kernel mode (using privileged stack) or user * mode (normal thread stack). So we need to check the whole stack * area. * * And... we cannot account for reserved area since we have no idea * which to use: ARCH_KERNEL_STACK_RESERVED or ARCH_THREAD_STACK_RESERVED * as we don't know whether we were in kernel or user mode. */ start = (uintptr_t)thread->stack_obj; end = Z_STACK_PTR_ALIGN(thread->stack_info.start + thread->stack_info.size); } else if (((ps & PS_RING_MASK) == 0U) && ((thread->base.user_options & K_USER) == K_USER)) { /* Check if this is a user thread, and that it was running in * kernel mode. If so, we must have been doing a syscall, so * check with privileged stack bounds. */ start = thread->stack_info.start - CONFIG_PRIVILEGED_STACK_SIZE; end = thread->stack_info.start; #endif } else { start = thread->stack_info.start; end = Z_STACK_PTR_ALIGN(thread->stack_info.start + thread->stack_info.size); } invalid = (addr <= start) || ((addr + sz) >= end); return invalid; } bool xtensa_is_frame_pointer_valid(_xtensa_irq_stack_frame_raw_t *frame) { _xtensa_irq_bsa_t *bsa; /* Check if the pointer to the frame is within stack bounds. If not, there is no * need to test if the BSA (base save area) pointer is also valid as it is * possibly invalid. */ if (xtensa_is_outside_stack_bounds((uintptr_t)frame, sizeof(*frame), UINT32_MAX)) { return false; } /* Need to test if the BSA area is also within stack bounds. The information * contained within the BSA is only valid if within stack bounds. */ bsa = frame->ptr_to_bsa; if (xtensa_is_outside_stack_bounds((uintptr_t)bsa, sizeof(*bsa), UINT32_MAX)) { return false; } #ifdef CONFIG_USERSPACE /* With usespace, we have privileged stack and normal thread stack within * one stack object. So we need to further test whether the frame pointer * resides in the correct stack based on kernel/user mode. */ if (xtensa_is_outside_stack_bounds((uintptr_t)frame, sizeof(*frame), bsa->ps)) { return false; } #endif return true; } void xtensa_dump_stack(const void *stack) { _xtensa_irq_stack_frame_raw_t *frame = (void *)stack; _xtensa_irq_bsa_t *bsa; uintptr_t num_high_regs; int reg_blks_remaining; /* Don't dump stack if the stack pointer is invalid as any frame elements * obtained via de-referencing the frame pointer are probably also invalid. * Or worse, cause another access violation. */ if (!xtensa_is_frame_pointer_valid(frame)) { return; } bsa = frame->ptr_to_bsa; /* Calculate number of high registers. */ num_high_regs = (uint8_t *)bsa - (uint8_t *)frame + sizeof(void *); num_high_regs /= sizeof(uintptr_t); /* And high registers are always comes in 4 in a block. */ reg_blks_remaining = (int)num_high_regs / 4; LOG_ERR(" ** A0 %p SP %p A2 %p A3 %p", (void *)bsa->a0, (void *)((char *)bsa + sizeof(*bsa)), (void *)bsa->a2, (void *)bsa->a3); if (reg_blks_remaining > 0) { reg_blks_remaining--; LOG_ERR(" ** A4 %p A5 %p A6 %p A7 %p", (void *)frame->blks[reg_blks_remaining].r0, (void *)frame->blks[reg_blks_remaining].r1, (void *)frame->blks[reg_blks_remaining].r2, (void *)frame->blks[reg_blks_remaining].r3); } if (reg_blks_remaining > 0) { reg_blks_remaining--; LOG_ERR(" ** A8 %p A9 %p A10 %p A11 %p", (void *)frame->blks[reg_blks_remaining].r0, (void *)frame->blks[reg_blks_remaining].r1, (void *)frame->blks[reg_blks_remaining].r2, (void *)frame->blks[reg_blks_remaining].r3); } if (reg_blks_remaining > 0) { reg_blks_remaining--; LOG_ERR(" ** A12 %p A13 %p A14 %p A15 %p", (void *)frame->blks[reg_blks_remaining].r0, (void *)frame->blks[reg_blks_remaining].r1, (void *)frame->blks[reg_blks_remaining].r2, (void *)frame->blks[reg_blks_remaining].r3); } #if XCHAL_HAVE_LOOPS LOG_ERR(" ** LBEG %p LEND %p LCOUNT %p", (void *)bsa->lbeg, (void *)bsa->lend, (void *)bsa->lcount); #endif LOG_ERR(" ** SAR %p", (void *)bsa->sar); #if XCHAL_HAVE_THREADPTR LOG_ERR(" ** THREADPTR %p", (void *)bsa->threadptr); #endif } static inline unsigned int get_bits(int offset, int num_bits, unsigned int val) { int mask; mask = BIT(num_bits) - 1; val = val >> offset; return val & mask; } static void print_fatal_exception(void *print_stack, int cause, bool is_dblexc, uint32_t depc) { void *pc; uint32_t ps, vaddr; _xtensa_irq_bsa_t *bsa = (void *)*(int **)print_stack; __asm__ volatile("rsr.excvaddr %0" : "=r"(vaddr)); if (is_dblexc) { LOG_ERR(" ** FATAL EXCEPTION (DOUBLE)"); } else { LOG_ERR(" ** FATAL EXCEPTION"); } LOG_ERR(" ** CPU %d EXCCAUSE %d (%s)", arch_curr_cpu()->id, cause, xtensa_exccause(cause)); /* Don't print information if the BSA area is invalid as any elements * obtained via de-referencing the pointer are probably also invalid. * Or worse, cause another access violation. */ if (xtensa_is_outside_stack_bounds((uintptr_t)bsa, sizeof(*bsa), UINT32_MAX)) { LOG_ERR(" ** VADDR %p Invalid SP %p", (void *)vaddr, print_stack); return; } ps = bsa->ps; pc = (void *)bsa->pc; LOG_ERR(" ** PC %p VADDR %p", pc, (void *)vaddr); if (is_dblexc) { LOG_ERR(" ** DEPC %p", (void *)depc); } LOG_ERR(" ** PS %p", (void *)bsa->ps); LOG_ERR(" ** (INTLEVEL:%d EXCM: %d UM:%d RING:%d WOE:%d OWB:%d CALLINC:%d)", get_bits(0, 4, ps), get_bits(4, 1, ps), get_bits(5, 1, ps), get_bits(6, 2, ps), get_bits(18, 1, ps), get_bits(8, 4, ps), get_bits(16, 2, ps)); } static ALWAYS_INLINE void usage_stop(void) { #ifdef CONFIG_SCHED_THREAD_USAGE z_sched_usage_stop(); #endif } static inline void *return_to(void *interrupted) { #ifdef CONFIG_MULTITHREADING return _current_cpu->nested <= 1 ? z_get_next_switch_handle(interrupted) : interrupted; #else return interrupted; #endif /* CONFIG_MULTITHREADING */ } /* The wrapper code lives here instead of in the python script that * generates _xtensa_handle_one_int*(). Seems cleaner, still kind of * ugly. * * This may be unused depending on number of interrupt levels * supported by the SoC. */ #define DEF_INT_C_HANDLER(l) \ __unused void *xtensa_int##l##_c(void *interrupted_stack) \ { \ uint32_t irqs, intenable, m; \ usage_stop(); \ __asm__ volatile("rsr.interrupt %0" : "=r"(irqs)); \ __asm__ volatile("rsr.intenable %0" : "=r"(intenable)); \ irqs &= intenable; \ while ((m = _xtensa_handle_one_int##l(irqs))) { \ irqs ^= m; \ __asm__ volatile("wsr.intclear %0" : : "r"(m)); \ } \ return return_to(interrupted_stack); \ } #if XCHAL_HAVE_NMI #define MAX_INTR_LEVEL XCHAL_NMILEVEL #elif XCHAL_HAVE_INTERRUPTS #define MAX_INTR_LEVEL XCHAL_NUM_INTLEVELS #else #error Xtensa core with no interrupt support is used #define MAX_INTR_LEVEL 0 #endif #if MAX_INTR_LEVEL >= 2 DEF_INT_C_HANDLER(2) #endif #if MAX_INTR_LEVEL >= 3 DEF_INT_C_HANDLER(3) #endif #if MAX_INTR_LEVEL >= 4 DEF_INT_C_HANDLER(4) #endif #if MAX_INTR_LEVEL >= 5 DEF_INT_C_HANDLER(5) #endif #if MAX_INTR_LEVEL >= 6 DEF_INT_C_HANDLER(6) #endif #if MAX_INTR_LEVEL >= 7 DEF_INT_C_HANDLER(7) #endif static inline DEF_INT_C_HANDLER(1) /* C handler for level 1 exceptions/interrupts. Hooked from the * DEF_EXCINT 1 vector declaration in assembly code. This one looks * different because exceptions and interrupts land at the same * vector; other interrupt levels have their own vectors. */ void *xtensa_excint1_c(void *esf) { int cause, reason; int *interrupted_stack = &((struct arch_esf *)esf)->dummy; _xtensa_irq_bsa_t *bsa = (void *)*(int **)interrupted_stack; bool is_fatal_error = false; bool is_dblexc = false; uint32_t ps; void *pc, *print_stack = (void *)interrupted_stack; uint32_t depc = 0; #ifdef CONFIG_XTENSA_MMU depc = XTENSA_RSR(ZSR_DEPC_SAVE_STR); cause = XTENSA_RSR(ZSR_EXCCAUSE_SAVE_STR); is_dblexc = (depc != 0U); #else /* CONFIG_XTENSA_MMU */ __asm__ volatile("rsr.exccause %0" : "=r"(cause)); #endif /* CONFIG_XTENSA_MMU */ switch (cause) { case EXCCAUSE_LEVEL1_INTERRUPT: #ifdef CONFIG_XTENSA_MMU if (!is_dblexc) { return xtensa_int1_c(interrupted_stack); } #else return xtensa_int1_c(interrupted_stack); #endif /* CONFIG_XTENSA_MMU */ break; #ifndef CONFIG_USERSPACE /* Syscalls are handled earlier in assembly if MMU is enabled. * So we don't need this here. */ case EXCCAUSE_SYSCALL: /* Just report it to the console for now */ LOG_ERR(" ** SYSCALL PS %p PC %p", (void *)bsa->ps, (void *)bsa->pc); xtensa_dump_stack(interrupted_stack); /* Xtensa exceptions don't automatically advance PC, * have to skip the SYSCALL instruction manually or * else it will just loop forever */ bsa->pc += 3; break; #endif /* !CONFIG_USERSPACE */ default: reason = K_ERR_CPU_EXCEPTION; /* If the BSA area is invalid, we cannot trust anything coming out of it. */ if (xtensa_is_outside_stack_bounds((uintptr_t)bsa, sizeof(*bsa), UINT32_MAX)) { goto skip_checks; } ps = bsa->ps; pc = (void *)bsa->pc; /* Default for exception */ is_fatal_error = true; /* We need to distinguish between an ill in xtensa_arch_except, * e.g for k_panic, and any other ill. For exceptions caused by * xtensa_arch_except calls, we also need to pass the reason_p * to xtensa_fatal_error. Since the ARCH_EXCEPT frame is in the * BSA, the first arg reason_p is stored at the A2 offset. * We assign EXCCAUSE the unused, reserved code 63; this may be * problematic if the app or new boards also decide to repurpose * this code. * * Another intentionally ill is from xtensa_arch_kernel_oops. * Kernel OOPS has to be explicity raised so we can simply * set the reason and continue. */ if (cause == EXCCAUSE_ILLEGAL) { if (pc == (void *)&xtensa_arch_except_epc) { cause = 63; __asm__ volatile("wsr.exccause %0" : : "r"(cause)); reason = bsa->a2; } else if (pc == (void *)&xtensa_arch_kernel_oops_epc) { cause = 64; /* kernel oops */ reason = K_ERR_KERNEL_OOPS; /* A3 contains the second argument to * xtensa_arch_kernel_oops(reason, ssf) * where ssf is the stack frame causing * the kernel oops. */ print_stack = (void *)bsa->a3; } } skip_checks: if (reason != K_ERR_KERNEL_OOPS) { print_fatal_exception(print_stack, cause, is_dblexc, depc); } /* FIXME: legacy xtensa port reported "HW" exception * for all unhandled exceptions, which seems incorrect * as these are software errors. Should clean this * up. */ xtensa_fatal_error(reason, (void *)print_stack); break; } #ifdef CONFIG_XTENSA_MMU switch (cause) { case EXCCAUSE_LEVEL1_INTERRUPT: #ifndef CONFIG_USERSPACE case EXCCAUSE_SYSCALL: #endif /* !CONFIG_USERSPACE */ is_fatal_error = false; break; default: is_fatal_error = true; break; } #endif /* CONFIG_XTENSA_MMU */ if (is_dblexc || is_fatal_error) { uint32_t ignore; /* We are going to manipulate _current_cpu->nested manually. * Since the error is fatal, for recoverable errors, code * execution must not return back to the current thread as * it is being terminated (via above xtensa_fatal_error()). * So we need to prevent more interrupts coming in which * will affect the nested value as we are going outside of * normal interrupt handling procedure. * * Setting nested to 1 has two effects: * 1. Force return_to() to choose a new thread. * Since the current thread is being terminated, it will * not be chosen again. * 2. When context switches to the newly chosen thread, * nested must be zero for normal code execution, * as that is not in interrupt context at all. * After returning from this function, the rest of * interrupt handling code will decrement nested, * resulting it being zero before switching to another * thread. */ __asm__ volatile("rsil %0, %1" : "=r" (ignore) : "i"(XCHAL_EXCM_LEVEL)); _current_cpu->nested = 1; } #if defined(CONFIG_XTENSA_MMU) if (is_dblexc) { XTENSA_WSR(ZSR_DEPC_SAVE_STR, 0); } #endif /* CONFIG_XTENSA_MMU */ return return_to(interrupted_stack); } #if defined(CONFIG_GDBSTUB) void *xtensa_debugint_c(int *interrupted_stack) { extern void z_gdb_isr(struct arch_esf *esf); z_gdb_isr((void *)interrupted_stack); return return_to(interrupted_stack); } #endif ```
/content/code_sandbox/arch/xtensa/core/vector_handlers.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,281
```c */ #include <zephyr/kernel.h> void arch_timing_init(void) { } void arch_timing_start(void) { } void arch_timing_stop(void) { } uint64_t arch_timing_freq_get(void) { return CONFIG_XTENSA_CCOUNT_HZ; } timing_t arch_timing_counter_get(void) { uint32_t ccount; __asm__ volatile ("rsr %0, CCOUNT" : "=r"(ccount)); return ccount; } uint64_t arch_timing_cycles_get(volatile timing_t *const start, volatile timing_t *const end) { int64_t dt = (int64_t) (*end - *start); if (dt < 0) { dt += 0x100000000ULL; } return (uint64_t) dt; } uint64_t arch_timing_cycles_to_ns(uint64_t cycles) { return cycles * 1000000000ULL / CONFIG_XTENSA_CCOUNT_HZ; } uint64_t arch_timing_cycles_to_ns_avg(uint64_t cycles, uint32_t count) { /* Why is this an arch API? This is just math! */ return arch_timing_cycles_to_ns(cycles) / (uint64_t) count; } uint32_t arch_timing_freq_get_mhz(void) { return arch_timing_freq_get() / 1000000ULL; } ```
/content/code_sandbox/arch/xtensa/core/timing.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
281
```c /* * */ #include <zephyr/toolchain.h> #include <zephyr/sys/util_macro.h> #ifdef CONFIG_XTENSA_MORE_SPIN_RELAX_NOPS /* Some compilers might "optimize out" (i.e. remove) continuous NOPs. * So force no optimization to avoid that. */ __no_optimization void arch_spin_relax(void) { #define NOP1(_, __) __asm__ volatile("nop.n;"); LISTIFY(CONFIG_XTENSA_NUM_SPIN_RELAX_NOPS, NOP1, (;)) #undef NOP1 } #endif /* CONFIG_XTENSA_MORE_SPIN_RELAX_NOPS */ /** * init for multi-core/smp is done on the SoC level. Add this here for * compatibility with other SMP systems. */ int arch_smp_init(void) { return 0; } ```
/content/code_sandbox/arch/xtensa/core/smp.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
175
```c /* * */ #include <zephyr/llext/elf.h> #include <zephyr/llext/llext.h> #include <zephyr/llext/loader.h> #include <zephyr/logging/log.h> LOG_MODULE_DECLARE(llext); #define R_XTENSA_NONE 0 #define R_XTENSA_32 1 #define R_XTENSA_RTLD 2 #define R_XTENSA_GLOB_DAT 3 #define R_XTENSA_JMP_SLOT 4 #define R_XTENSA_RELATIVE 5 #define R_XTENSA_PLT 6 /** * @brief Architecture specific function for relocating shared elf * * Elf files contain a series of relocations described in multiple sections. * These relocation instructions are architecture specific and each architecture * supporting modules must implement this. */ void arch_elf_relocate_local(struct llext_loader *ldr, struct llext *ext, const elf_rela_t *rel, const elf_sym_t *sym, size_t got_offset) { uint8_t *text = ext->mem[LLEXT_MEM_TEXT]; int type = ELF32_R_TYPE(rel->r_info); elf_word *got_entry = (elf_word *)(text + got_offset); uintptr_t sh_addr; if (ELF_ST_TYPE(sym->st_info) == STT_SECTION) { elf_shdr_t *shdr = llext_peek(ldr, ldr->hdr.e_shoff + sym->st_shndx * ldr->hdr.e_shentsize); sh_addr = shdr->sh_addr ? : (uintptr_t)llext_peek(ldr, shdr->sh_offset); } else { sh_addr = ldr->sects[LLEXT_MEM_TEXT].sh_addr; } switch (type) { case R_XTENSA_RELATIVE: /* Relocate a local symbol: Xtensa specific */ *got_entry += (uintptr_t)text - sh_addr; break; case R_XTENSA_32: *got_entry += sh_addr; break; default: LOG_DBG("unsupported relocation type %u", type); return; } LOG_DBG("relocation to %#x type %u at %p", *got_entry, type, (void *)got_entry); } ```
/content/code_sandbox/arch/xtensa/core/elf.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
501
```unknown /* * */ #include <xtensa/coreasm.h> #include <xtensa/corebits.h> #include <xtensa/config/system.h> #include <xtensa/hal.h> #include <xtensa_asm2_context.h> #include <zephyr/offsets.h> .section .iram1, "ax" .align 4 .global xtensa_backtrace_get_start .type xtensa_backtrace_get_start, @function xtensa_backtrace_get_start: entry a1, 32 /* Spill registers onto stack (excluding this function) */ call8 xthal_window_spill /* a2, a3, a4 should be out arguments for i PC, i SP, i-1 PC respectively. * Use a6 and a7 as scratch */ /* Load address for interrupted stack */ l32i a6, a5, 0 /* Load i PC in a7 */ l32i a7, a6, ___xtensa_irq_bsa_t_pc_OFFSET /* Store value of i PC in a2 */ s32i a7, a2, 0 /* Load value for (i-1) PC, which return address of i into a7 */ l32i a7, a6, ___xtensa_irq_bsa_t_a0_OFFSET /* Store value of (i-1) PC in a4 */ s32i a7, a4, 0 /* Add base stack frame size in interrupted stack to get i SP */ addi a6, a6, ___xtensa_irq_bsa_t_SIZEOF /* Store i SP in a3 */ s32i a6, a3, 0 retw ```
/content/code_sandbox/arch/xtensa/core/debug_helpers_asm.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
372
```c /* */ #include <zephyr/kernel.h> #include <zephyr/arch/cpu.h> #include <zephyr/kernel_structs.h> #include <inttypes.h> #include <xtensa/config/specreg.h> #include <xtensa_backtrace.h> #include <zephyr/arch/common/exc_handle.h> #include <xtensa_internal.h> #include <zephyr/logging/log.h> LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); #if defined(CONFIG_SIMULATOR_XTENSA) || defined(XT_SIMULATOR) #include <xtensa/simcall.h> #endif char *xtensa_exccause(unsigned int cause_code) { #if defined(CONFIG_PRINTK) || defined(CONFIG_LOG) switch (cause_code) { case 0: return "illegal instruction"; case 1: return "syscall"; case 2: return "instr fetch error"; case 3: return "load/store error"; case 4: return "level-1 interrupt"; case 5: return "alloca"; case 6: return "divide by zero"; case 8: return "privileged"; case 9: return "load/store alignment"; case 12: return "instr PIF data error"; case 13: return "load/store PIF data error"; case 14: return "instr PIF addr error"; case 15: return "load/store PIF addr error"; case 16: return "instr TLB miss"; case 17: return "instr TLB multi hit"; case 18: return "instr fetch privilege"; case 20: return "inst fetch prohibited"; case 24: return "load/store TLB miss"; case 25: return "load/store TLB multi hit"; case 26: return "load/store privilege"; case 28: return "load prohibited"; case 29: return "store prohibited"; case 32: case 33: case 34: case 35: case 36: case 37: case 38: case 39: return "coprocessor disabled"; case 63: /* i.e. z_except_reason */ return "zephyr exception"; case 64: return "kernel oops"; default: return "unknown/reserved"; } #else ARG_UNUSED(cause_code); return "na"; #endif } void xtensa_fatal_error(unsigned int reason, const struct arch_esf *esf) { #ifdef CONFIG_EXCEPTION_DEBUG if (esf != NULL) { /* Don't want to get elbowed by xtensa_switch * in between printing registers and dumping them; * corrupts backtrace */ unsigned int key = arch_irq_lock(); xtensa_dump_stack(esf); #if defined(CONFIG_XTENSA_ENABLE_BACKTRACE) #if XCHAL_HAVE_WINDOWED xtensa_backtrace_print(100, (int *)esf); #endif #endif arch_irq_unlock(key); } #endif /* CONFIG_EXCEPTION_DEBUG */ z_fatal_error(reason, esf); } #if defined(CONFIG_SIMULATOR_XTENSA) || defined(XT_SIMULATOR) void xtensa_simulator_exit(int return_code) { __asm__ ( "mov a3, %[code]\n\t" "movi a2, %[call]\n\t" "simcall\n\t" : : [code] "r" (return_code), [call] "i" (SYS_exit) : "a3", "a2"); CODE_UNREACHABLE; } FUNC_NORETURN void arch_system_halt(unsigned int reason) { xtensa_simulator_exit(255 - reason); CODE_UNREACHABLE; } #endif FUNC_NORETURN void arch_syscall_oops(void *ssf) { xtensa_arch_kernel_oops(K_ERR_KERNEL_OOPS, ssf); CODE_UNREACHABLE; } #ifdef CONFIG_USERSPACE void z_impl_xtensa_user_fault(unsigned int reason) { if ((_current->base.user_options & K_USER) != 0) { if ((reason != K_ERR_KERNEL_OOPS) && (reason != K_ERR_STACK_CHK_FAIL)) { reason = K_ERR_KERNEL_OOPS; } } xtensa_arch_except(reason); } static void z_vrfy_xtensa_user_fault(unsigned int reason) { z_impl_xtensa_user_fault(reason); } #include <zephyr/syscalls/xtensa_user_fault_mrsh.c> #endif /* CONFIG_USERSPACE */ ```
/content/code_sandbox/arch/xtensa/core/fatal.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
957
```c /* */ #include <zephyr/types.h> #include <stdio.h> #include <zephyr/arch/xtensa/irq.h> #include <zephyr/sys/__assert.h> #include <kernel_arch_func.h> #include <xtensa_internal.h> #include <zephyr/logging/log.h> LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); /** * @internal * * @brief Set an interrupt's priority * * The priority is verified if ASSERT_ON is enabled. * * The priority is verified if ASSERT_ON is enabled. The maximum number of * priority levels is a little complex, as there are some hardware priority * levels which are reserved: three for various types of exceptions, and * possibly one additional to support zero latency interrupts. * * Valid values are from 1 to 6. Interrupts of priority 1 are not masked when * interrupts are locked system-wide, so care must be taken when using them. * ISR installed with priority 0 interrupts cannot make kernel calls. */ void z_irq_priority_set(unsigned int irq, unsigned int prio, uint32_t flags) { __ASSERT(prio < XCHAL_EXCM_LEVEL + 1, "invalid priority %d! values must be less than %d\n", prio, XCHAL_EXCM_LEVEL + 1); /* TODO: Write code to set priority if this is ever possible on * Xtensa */ } #ifdef CONFIG_DYNAMIC_INTERRUPTS #ifndef CONFIG_MULTI_LEVEL_INTERRUPTS int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, void (*routine)(const void *parameter), const void *parameter, uint32_t flags) { ARG_UNUSED(flags); ARG_UNUSED(priority); z_isr_install(irq, routine, parameter); return irq; } #else /* !CONFIG_MULTI_LEVEL_INTERRUPTS */ int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, void (*routine)(const void *parameter), const void *parameter, uint32_t flags) { return z_soc_irq_connect_dynamic(irq, priority, routine, parameter, flags); } #endif /* !CONFIG_MULTI_LEVEL_INTERRUPTS */ #endif /* CONFIG_DYNAMIC_INTERRUPTS */ void z_irq_spurious(const void *arg) { int irqs, ie; ARG_UNUSED(arg); __asm__ volatile("rsr.interrupt %0" : "=r"(irqs)); __asm__ volatile("rsr.intenable %0" : "=r"(ie)); LOG_ERR(" ** Spurious INTERRUPT(s) %p, INTENABLE = %p", (void *)irqs, (void *)ie); xtensa_fatal_error(K_ERR_SPURIOUS_IRQ, NULL); } int xtensa_irq_is_enabled(unsigned int irq) { uint32_t ie; __asm__ volatile("rsr.intenable %0" : "=r"(ie)); return (ie & (1 << irq)) != 0U; } ```
/content/code_sandbox/arch/xtensa/core/irq_manage.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
607
```c /* * */ #include <zephyr/kernel.h> #include <kernel_internal.h> #include <zephyr/toolchain.h> #include <zephyr/debug/gdbstub.h> #include <xtensa_asm2_context.h> #include <xtensa/corebits.h> static bool not_first_break; extern struct gdb_ctx xtensa_gdb_ctx; /* * Special register number (from specreg.h). * * These should be the same across different Xtensa SoCs. */ enum { LBEG = 0, LEND = 1, LCOUNT = 2, SAR = 3, SCOMPARE1 = 12, WINDOWBASE = 72, WINDOWSTART = 73, IBREAKENABLE = 96, MEMCTL = 97, ATOMCTL = 99, IBREAKA0 = 128, IBREAKA1 = 129, CONFIGID0 = 176, EPC_1 = 177, EPC_2 = 178, EPC_3 = 179, EPC_4 = 180, EPC_5 = 181, EPC_6 = 182, EPC_7 = 183, DEPC = 192, EPS_2 = 194, EPS_3 = 195, EPS_4 = 196, EPS_5 = 197, EPS_6 = 198, EPS_7 = 199, CONFIGID1 = 208, EXCSAVE_1 = 209, EXCSAVE_2 = 210, EXCSAVE_3 = 211, EXCSAVE_4 = 212, EXCSAVE_5 = 213, EXCSAVE_6 = 214, EXCSAVE_7 = 215, CPENABLE = 224, INTERRUPT = 226, INTENABLE = 228, PS = 230, THREADPTR = 231, EXCCAUSE = 232, DEBUGCAUSE = 233, CCOUNT = 234, PRID = 235, ICOUNT = 236, ICOUNTLEVEL = 237, EXCVADDR = 238, CCOMPARE_0 = 240, CCOMPARE_1 = 241, CCOMPARE_2 = 242, MISC_REG_0 = 244, MISC_REG_1 = 245, MISC_REG_2 = 246, MISC_REG_3 = 247, }; #define get_one_sreg(regnum_p) ({ \ unsigned int retval; \ __asm__ volatile( \ "rsr %[retval], %[regnum]\n\t" \ : [retval] "=r" (retval) \ : [regnum] "i" (regnum_p)); \ retval; \ }) #define set_one_sreg(regnum_p, regval) { \ __asm__ volatile( \ "wsr %[val], %[regnum]\n\t" \ :: \ [val] "r" (regval), \ [regnum] "i" (regnum_p)); \ } /** * Read one special register. * * @param ctx GDB context * @param reg Register descriptor */ static void read_sreg(struct gdb_ctx *ctx, struct xtensa_register *reg) { uint8_t regno; uint32_t val; bool has_val = true; if (!gdb_xtensa_is_special_reg(reg)) { return; } /* * Special registers have 0x300 added to the register number * in the register descriptor. So need to extract the actual * special register number recognized by architecture, * which is 0-255. */ regno = reg->regno & 0xFF; /* * Each special register has to be done separately * as the register number in RSR/WSR needs to be * hard-coded at compile time. */ switch (regno) { case SAR: val = get_one_sreg(SAR); break; case PS: val = get_one_sreg(PS); break; case MEMCTL: val = get_one_sreg(MEMCTL); break; case ATOMCTL: val = get_one_sreg(ATOMCTL); break; case CONFIGID0: val = get_one_sreg(CONFIGID0); break; case CONFIGID1: val = get_one_sreg(CONFIGID1); break; case DEBUGCAUSE: val = get_one_sreg(DEBUGCAUSE); break; case EXCCAUSE: val = get_one_sreg(EXCCAUSE); break; case DEPC: val = get_one_sreg(DEPC); break; case EPC_1: val = get_one_sreg(EPC_1); break; case EXCSAVE_1: val = get_one_sreg(EXCSAVE_1); break; case EXCVADDR: val = get_one_sreg(EXCVADDR); break; #if XCHAL_HAVE_LOOPS case LBEG: val = get_one_sreg(LBEG); break; case LEND: val = get_one_sreg(LEND); break; case LCOUNT: val = get_one_sreg(LCOUNT); break; #endif #if XCHAL_HAVE_S32C1I case SCOMPARE1: val = get_one_sreg(SCOMPARE1); break; #endif #if XCHAL_HAVE_WINDOWED case WINDOWBASE: val = get_one_sreg(WINDOWBASE); break; case WINDOWSTART: val = get_one_sreg(WINDOWSTART); break; #endif #if XCHAL_NUM_INTLEVELS > 0 case EPS_2: val = get_one_sreg(EPS_2); break; case EPC_2: val = get_one_sreg(EPC_2); break; case EXCSAVE_2: val = get_one_sreg(EXCSAVE_2); break; #endif #if XCHAL_NUM_INTLEVELS > 1 case EPS_3: val = get_one_sreg(EPS_3); break; case EPC_3: val = get_one_sreg(EPC_3); break; case EXCSAVE_3: val = get_one_sreg(EXCSAVE_3); break; #endif #if XCHAL_NUM_INTLEVELS > 2 case EPC_4: val = get_one_sreg(EPC_4); break; case EPS_4: val = get_one_sreg(EPS_4); break; case EXCSAVE_4: val = get_one_sreg(EXCSAVE_4); break; #endif #if XCHAL_NUM_INTLEVELS > 3 case EPC_5: val = get_one_sreg(EPC_5); break; case EPS_5: val = get_one_sreg(EPS_5); break; case EXCSAVE_5: val = get_one_sreg(EXCSAVE_5); break; #endif #if XCHAL_NUM_INTLEVELS > 4 case EPC_6: val = get_one_sreg(EPC_6); break; case EPS_6: val = get_one_sreg(EPS_6); break; case EXCSAVE_6: val = get_one_sreg(EXCSAVE_6); break; #endif #if XCHAL_NUM_INTLEVELS > 5 case EPC_7: val = get_one_sreg(EPC_7); break; case EPS_7: val = get_one_sreg(EPS_7); break; case EXCSAVE_7: val = get_one_sreg(EXCSAVE_7); break; #endif #if XCHAL_HAVE_CP case CPENABLE: val = get_one_sreg(CPENABLE); break; #endif #if XCHAL_HAVE_INTERRUPTS case INTERRUPT: val = get_one_sreg(INTERRUPT); break; case INTENABLE: val = get_one_sreg(INTENABLE); break; #endif #if XCHAL_HAVE_THREADPTR case THREADPTR: val = get_one_sreg(THREADPTR); break; #endif #if XCHAL_HAVE_CCOUNT case CCOUNT: val = get_one_sreg(CCOUNT); break; #endif #if XCHAL_HAVE_PRID case PRID: val = get_one_sreg(PRID); break; #endif #if XCHAL_NUM_TIMERS > 0 case CCOMPARE_0: val = get_one_sreg(CCOMPARE_0); break; #endif #if XCHAL_NUM_TIMERS > 1 case CCOMPARE_1: val = get_one_sreg(CCOMPARE_1); break; #endif #if XCHAL_NUM_TIMERS > 2 case CCOMPARE_2: val = get_one_sreg(CCOMPARE_2); break; #endif #if XCHAL_NUM_MISC_REGS > 0 case MISC_REG_0: val = get_one_sreg(MISC_REG_0); break; #endif #if XCHAL_NUM_MISC_REGS > 1 case MISC_REG_1: val = get_one_sreg(MISC_REG_1); break; #endif #if XCHAL_NUM_MISC_REGS > 2 case MISC_REG_2: val = get_one_sreg(MISC_REG_2); break; #endif #if XCHAL_NUM_MISC_REGS > 3 case MISC_REG_3: val = get_one_sreg(MISC_REG_3); break; #endif default: has_val = false; break; } if (has_val) { reg->val = val; reg->seqno = ctx->seqno; } } /** * Translate exception into GDB exception reason. * * @param reason Reason for exception */ static unsigned int get_gdb_exception_reason(unsigned int reason) { unsigned int exception; switch (reason) { case EXCCAUSE_ILLEGAL: /* illegal instruction */ exception = GDB_EXCEPTION_INVALID_INSTRUCTION; break; case EXCCAUSE_INSTR_ERROR: /* instr fetch error */ exception = GDB_EXCEPTION_MEMORY_FAULT; break; case EXCCAUSE_LOAD_STORE_ERROR: /* load/store error */ exception = GDB_EXCEPTION_MEMORY_FAULT; break; case EXCCAUSE_DIVIDE_BY_ZERO: /* divide by zero */ exception = GDB_EXCEPTION_DIVIDE_ERROR; break; case EXCCAUSE_UNALIGNED: /* load/store alignment */ exception = GDB_EXCEPTION_MEMORY_FAULT; break; case EXCCAUSE_INSTR_DATA_ERROR: /* instr PIF data error */ exception = GDB_EXCEPTION_MEMORY_FAULT; break; case EXCCAUSE_LOAD_STORE_DATA_ERROR: /* load/store PIF data error */ exception = GDB_EXCEPTION_MEMORY_FAULT; break; case EXCCAUSE_INSTR_ADDR_ERROR: /* instr PIF addr error */ exception = GDB_EXCEPTION_MEMORY_FAULT; break; case EXCCAUSE_LOAD_STORE_ADDR_ERROR: /* load/store PIF addr error */ exception = GDB_EXCEPTION_MEMORY_FAULT; break; case EXCCAUSE_INSTR_PROHIBITED: /* inst fetch prohibited */ exception = GDB_EXCEPTION_INVALID_MEMORY; break; case EXCCAUSE_LOAD_STORE_RING: /* load/store privilege */ exception = GDB_EXCEPTION_INVALID_MEMORY; break; case EXCCAUSE_LOAD_PROHIBITED: /* load prohibited */ exception = GDB_EXCEPTION_INVALID_MEMORY; break; case EXCCAUSE_STORE_PROHIBITED: /* store prohibited */ exception = GDB_EXCEPTION_INVALID_MEMORY; break; case EXCCAUSE_CP0_DISABLED: __fallthrough; case EXCCAUSE_CP1_DISABLED: __fallthrough; case EXCCAUSE_CP2_DISABLED: __fallthrough; case EXCCAUSE_CP3_DISABLED: __fallthrough; case EXCCAUSE_CP4_DISABLED: __fallthrough; case EXCCAUSE_CP5_DISABLED: __fallthrough; case EXCCAUSE_CP6_DISABLED: __fallthrough; case EXCCAUSE_CP7_DISABLED: /* coprocessor disabled */ exception = GDB_EXCEPTION_INVALID_INSTRUCTION; break; default: exception = GDB_EXCEPTION_MEMORY_FAULT; break; } return exception; } /** * Copy debug information from stack into GDB context. * * This copies the information stored in the stack into the GDB * context for the thread being debugged. * * @param ctx GDB context * @param stack Pointer to the stack frame */ static void copy_to_ctx(struct gdb_ctx *ctx, const struct arch_esf *stack) { struct xtensa_register *reg; int idx, num_laddr_regs; uint32_t *bsa = *(int **)stack; if ((int *)bsa - stack > 4) { num_laddr_regs = 8; } else if ((int *)bsa - stack > 8) { num_laddr_regs = 12; } else if ((int *)bsa - stack > 12) { num_laddr_regs = 16; } else { num_laddr_regs = 4; } /* Get logical address registers A0 - A<num_laddr_regs> from stack */ for (idx = 0; idx < num_laddr_regs; idx++) { reg = &xtensa_gdb_ctx.regs[xtensa_gdb_ctx.a0_idx + idx]; if (reg->regno == SOC_GDB_REGNO_A1) { /* A1 is calculated */ reg->val = POINTER_TO_UINT( ((char *)bsa) + BASE_SAVE_AREA_SIZE); reg->seqno = ctx->seqno; } else { reg->val = bsa[reg->stack_offset / 4]; reg->seqno = ctx->seqno; } } /* For registers other than logical address registers */ for (idx = 0; idx < xtensa_gdb_ctx.num_regs; idx++) { reg = &xtensa_gdb_ctx.regs[idx]; if (gdb_xtensa_is_logical_addr_reg(reg)) { /* Logical address registers are handled above */ continue; } else if (reg->stack_offset != 0) { /* For those registers stashed in stack */ reg->val = bsa[reg->stack_offset / 4]; reg->seqno = ctx->seqno; } else if (gdb_xtensa_is_special_reg(reg)) { read_sreg(ctx, reg); } } #if XCHAL_HAVE_WINDOWED uint8_t a0_idx, ar_idx, wb_start; wb_start = (uint8_t)xtensa_gdb_ctx.regs[xtensa_gdb_ctx.wb_idx].val; /* * Copied the logical registers A0-A15 to physical registers (AR*) * according to WINDOWBASE. */ for (idx = 0; idx < num_laddr_regs; idx++) { /* Index to register description array for A */ a0_idx = xtensa_gdb_ctx.a0_idx + idx; /* Find the start of window (== WINDOWBASE * 4) */ ar_idx = wb_start * 4; /* Which logical register we are working on... */ ar_idx += idx; /* Wrap around A64 (or A32) -> A0 */ ar_idx %= XCHAL_NUM_AREGS; /* Index to register description array for AR */ ar_idx += xtensa_gdb_ctx.ar_idx; xtensa_gdb_ctx.regs[ar_idx].val = xtensa_gdb_ctx.regs[a0_idx].val; xtensa_gdb_ctx.regs[ar_idx].seqno = xtensa_gdb_ctx.regs[a0_idx].seqno; } #endif /* Disable stepping */ set_one_sreg(ICOUNT, 0); set_one_sreg(ICOUNTLEVEL, 0); __asm__ volatile("isync"); } /** * Restore debug information from stack into GDB context. * * This copies the information stored the GDB context back into * the stack. So that the thread being debugged has new values * after context switch from GDB stub back to the thread. * * @param ctx GDB context * @param stack Pointer to the stack frame */ static void restore_from_ctx(struct gdb_ctx *ctx, const struct arch_esf *stack) { struct xtensa_register *reg; int idx, num_laddr_regs; _xtensa_irq_bsa_t *bsa = (void *)*(int **)stack; if ((int *)bsa - stack > 4) { num_laddr_regs = 8; } else if ((int *)bsa - stack > 8) { num_laddr_regs = 12; } else if ((int *)bsa - stack > 12) { num_laddr_regs = 16; } else { num_laddr_regs = 4; } /* * Note that we don't need to copy AR* back to A* for * windowed registers. GDB manipulates A0-A15 directly * without going through AR*. */ /* * Push values of logical address registers A0 - A<num_laddr_regs> * back to stack. */ for (idx = 0; idx < num_laddr_regs; idx++) { reg = &xtensa_gdb_ctx.regs[xtensa_gdb_ctx.a0_idx + idx]; if (reg->regno == SOC_GDB_REGNO_A1) { /* Shouldn't be changing stack pointer */ continue; } else { bsa[reg->stack_offset / 4] = reg->val; } } for (idx = 0; idx < xtensa_gdb_ctx.num_regs; idx++) { reg = &xtensa_gdb_ctx.regs[idx]; if (gdb_xtensa_is_logical_addr_reg(reg)) { /* Logical address registers are handled above */ continue; } else if (reg->stack_offset != 0) { /* For those registers stashed in stack */ bsa[reg->stack_offset / 4] = reg->val; } else if (gdb_xtensa_is_special_reg(reg)) { /* * Currently not writing back any special * registers. */ continue; } } if (!not_first_break) { /* * Need to go past the BREAK.N instruction (16-bit) * in arch_gdb_init(). Or else the SoC will simply * go back to execute the BREAK.N instruction, * which raises debug interrupt, and we will be * stuck in an infinite loop. */ bsa->pc += 2; not_first_break = true; } } void arch_gdb_continue(void) { /* * No need to do anything. Simply let the GDB stub main * loop to return from debug interrupt for code to * continue running. */ } void arch_gdb_step(void) { set_one_sreg(ICOUNT, 0xFFFFFFFEU); set_one_sreg(ICOUNTLEVEL, XCHAL_DEBUGLEVEL); __asm__ volatile("isync"); } /** * Convert a register value into hex string. * * Note that this assumes the output buffer always has enough * space. * * @param reg Xtensa register * @param hex Pointer to output buffer * @return Number of bytes written to output buffer */ static size_t reg2hex(const struct xtensa_register *reg, char *hex) { uint8_t *bin = (uint8_t *)&reg->val; size_t binlen = reg->byte_size; for (size_t i = 0; i < binlen; i++) { if (hex2char(bin[i] >> 4, &hex[2 * i]) < 0) { return 0; } if (hex2char(bin[i] & 0xf, &hex[2 * i + 1]) < 0) { return 0; } } return 2 * binlen; } size_t arch_gdb_reg_readall(struct gdb_ctx *ctx, uint8_t *buf, size_t buflen) { struct xtensa_register *reg; int idx; uint8_t *output; size_t ret; if (buflen < SOC_GDB_GPKT_HEX_SIZE) { ret = 0; goto out; } /* * Fill with 'x' to mark them as available since most registers * are not available in the stack. */ memset(buf, 'x', SOC_GDB_GPKT_HEX_SIZE); ret = 0; for (idx = 0; idx < ctx->num_regs; idx++) { reg = &ctx->regs[idx]; if (reg->seqno != ctx->seqno) { /* * Register struct has stale value from * previous debug interrupt. Don't * send it out. */ continue; } if ((reg->gpkt_offset < 0) || (reg->gpkt_offset >= SOC_GDB_GPKT_BIN_SIZE)) { /* * Register is not in G-packet, or * beyond maximum size of G-packet. * * xtensa-config.c may specify G-packet * offset beyond what GDB expects, so * need to make sure we won't write beyond * the buffer. */ continue; } /* Two hex characters per byte */ output = &buf[reg->gpkt_offset * 2]; if (reg2hex(reg, output) == 0) { goto out; } } ret = SOC_GDB_GPKT_HEX_SIZE; out: return ret; } size_t arch_gdb_reg_writeall(struct gdb_ctx *ctx, uint8_t *hex, size_t hexlen) { /* * GDB on Xtensa does not seem to use G-packet to write register * values. So we can skip this function. */ ARG_UNUSED(ctx); ARG_UNUSED(hex); ARG_UNUSED(hexlen); return 0; } size_t arch_gdb_reg_readone(struct gdb_ctx *ctx, uint8_t *buf, size_t buflen, uint32_t regno) { struct xtensa_register *reg; int idx; size_t ret; ret = 0; for (idx = 0; idx < ctx->num_regs; idx++) { reg = &ctx->regs[idx]; /* * GDB sends the G-packet index as register number * instead of the actual Xtensa register number. */ if (reg->idx == regno) { if (reg->seqno != ctx->seqno) { /* * Register value has stale value from * previous debug interrupt. Report * register value as unavailable. * * Don't report error here, or else GDB * may stop the debug session. */ if (buflen < 2) { /* Output buffer cannot hold 'xx' */ goto out; } buf[0] = 'x'; buf[1] = 'x'; ret = 2; goto out; } /* Make sure output buffer is large enough */ if (buflen < (reg->byte_size * 2)) { goto out; } ret = reg2hex(reg, buf); break; } } out: return ret; } size_t arch_gdb_reg_writeone(struct gdb_ctx *ctx, uint8_t *hex, size_t hexlen, uint32_t regno) { struct xtensa_register *reg = NULL; int idx; size_t ret; ret = 0; for (idx = 0; idx < ctx->num_regs; idx++) { reg = &ctx->regs[idx]; /* * Remember GDB sends index number instead of * actual register number (as defined in Xtensa * architecture). */ if (reg->idx != regno) { continue; } if (hexlen < (reg->byte_size * 2)) { /* Not enough hex digits to fill the register */ goto out; } /* Register value is now up-to-date */ reg->seqno = ctx->seqno; /* Convert from hexadecimal into binary */ ret = hex2bin(hex, hexlen, (uint8_t *)&reg->val, reg->byte_size); break; } out: return ret; } int arch_gdb_add_breakpoint(struct gdb_ctx *ctx, uint8_t type, uintptr_t addr, uint32_t kind) { int ret, idx; uint32_t ibreakenable; switch (type) { case 1: /* Hardware breakpoint */ ibreakenable = get_one_sreg(IBREAKENABLE); for (idx = 0; idx < MAX(XCHAL_NUM_IBREAK, 2); idx++) { /* Find an empty IBREAK slot */ if ((ibreakenable & BIT(idx)) == 0) { /* Set breakpoint address */ if (idx == 0) { set_one_sreg(IBREAKA0, addr); } else if (idx == 1) { set_one_sreg(IBREAKA1, addr); } else { ret = -1; goto out; } /* Enable the breakpoint */ ibreakenable |= BIT(idx); set_one_sreg(IBREAKENABLE, ibreakenable); ret = 0; goto out; } } /* Cannot find an empty slot, return error */ ret = -1; break; case 0: /* * Software breakpoint is to replace the instruction at * target address with BREAK or BREAK.N. GDB, by default, * does this by using memory write packets to replace * instructions. So there is no need to implement * software breakpoint here. */ __fallthrough; default: /* Breakpoint type not supported */ ret = -2; break; } out: return ret; } int arch_gdb_remove_breakpoint(struct gdb_ctx *ctx, uint8_t type, uintptr_t addr, uint32_t kind) { int ret, idx; uint32_t ibreakenable, ibreak; switch (type) { case 1: /* Hardware breakpoint */ ibreakenable = get_one_sreg(IBREAKENABLE); for (idx = 0; idx < MAX(XCHAL_NUM_IBREAK, 2); idx++) { /* Find an active IBREAK slot and compare address */ if ((ibreakenable & BIT(idx)) == BIT(idx)) { if (idx == 0) { ibreak = get_one_sreg(IBREAKA0); } else if (idx == 1) { ibreak = get_one_sreg(IBREAKA1); } else { ret = -1; goto out; } if (ibreak == addr) { /* Clear breakpoint address */ if (idx == 0) { set_one_sreg(IBREAKA0, 0U); } else if (idx == 1) { set_one_sreg(IBREAKA1, 0U); } else { ret = -1; goto out; } /* Disable the breakpoint */ ibreakenable &= ~BIT(idx); set_one_sreg(IBREAKENABLE, ibreakenable); ret = 0; goto out; } } } /* * Cannot find matching breakpoint address, * return error. */ ret = -1; break; case 0: /* * Software breakpoint is to replace the instruction at * target address with BREAK or BREAK.N. GDB, by default, * does this by using memory write packets to restore * instructions. So there is no need to implement * software breakpoint here. */ __fallthrough; default: /* Breakpoint type not supported */ ret = -2; break; } out: return ret; } void z_gdb_isr(struct arch_esf *esf) { uint32_t reg; reg = get_one_sreg(DEBUGCAUSE); if (reg != 0) { /* Manual breaking */ xtensa_gdb_ctx.exception = GDB_EXCEPTION_BREAKPOINT; } else { /* Actual exception */ reg = get_one_sreg(EXCCAUSE); xtensa_gdb_ctx.exception = get_gdb_exception_reason(reg); } xtensa_gdb_ctx.seqno++; /* Copy registers into GDB context */ copy_to_ctx(&xtensa_gdb_ctx, esf); z_gdb_main_loop(&xtensa_gdb_ctx); /* Restore registers from GDB context */ restore_from_ctx(&xtensa_gdb_ctx, esf); } void arch_gdb_init(void) { int idx; /* * Find out the starting index in the register * description array of certain registers. */ for (idx = 0; idx < xtensa_gdb_ctx.num_regs; idx++) { switch (xtensa_gdb_ctx.regs[idx].regno) { case 0x0000: /* A0: 0x0000 */ xtensa_gdb_ctx.a0_idx = idx; break; case XTREG_GRP_ADDR: /* AR0: 0x0100 */ xtensa_gdb_ctx.ar_idx = idx; break; case (XTREG_GRP_SPECIAL + WINDOWBASE): /* WINDOWBASE (Special Register) */ xtensa_gdb_ctx.wb_idx = idx; break; default: break; }; } /* * The interrupt enable bits for higher level interrupts * (level 2+) sit just after the level-1 interrupts. * The need to do a minus 2 is simply that the first bit * after level-1 interrupts is for level-2 interrupt. * So need to do an offset by subtraction. */ xtensa_irq_enable(XCHAL_NUM_EXTINTERRUPTS + XCHAL_DEBUGLEVEL - 2); /* * Break and go into the GDB stub. * The underscore in front is to avoid toolchain * converting BREAK.N into BREAK which is bigger. * This is needed as the GDB stub will need to change * the program counter past this instruction to * continue working. Or else SoC would repeatedly * raise debug exception on this instruction and * won't go forward. */ __asm__ volatile ("_break.n 0"); } ```
/content/code_sandbox/arch/xtensa/core/gdbstub.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
6,769
```c /* * */ #include <string.h> #include <zephyr/arch/xtensa/syscall.h> #include <zephyr/internal/syscall_handler.h> #include <xtensa_internal.h> #ifdef CONFIG_XTENSA_SYSCALL_USE_HELPER uintptr_t xtensa_syscall_helper(uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, uintptr_t arg4, uintptr_t arg5, uintptr_t arg6, uintptr_t call_id) { register uintptr_t a2 __asm__("%a2") = call_id; register uintptr_t a6 __asm__("%a6") = arg1; register uintptr_t a3 __asm__("%a3") = arg2; register uintptr_t a4 __asm__("%a4") = arg3; register uintptr_t a5 __asm__("%a5") = arg4; register uintptr_t a8 __asm__("%a8") = arg5; register uintptr_t a9 __asm__("%a9") = arg6; __asm__ volatile("syscall\n\t" : "=r" (a2) : "r" (a2), "r" (a6), "r" (a3), "r" (a4), "r" (a5), "r" (a8), "r" (a9) : "memory"); return a2; } #endif /* CONFIG_XTENSA_SYSCALL_USE_HELPER */ #if XCHAL_HAVE_THREADPTR == 0 #include <xtensa/config/core-isa.h> #include <xtensa/config/core.h> bool xtensa_is_user_context(void) { uint32_t ret; __asm__ volatile(".global xtensa_is_user_context_epc\n" " xtensa_is_user_context_epc:\n" " syscall\n" " mov %0, a2\n" : "=r"(ret) : : "a2"); return ret != 0; } #endif /* XCHAL_HAVE_THREADPTR */ size_t arch_user_string_nlen(const char *s, size_t maxsize, int *err_arg) { /* Check if we can actually read the whole length. * * arch_user_string_nlen() is supposed to naively go through * the string passed from user thread, and relies on page faults * to catch inaccessible strings, such that user thread can pass * a string that is shorter than the max length this function * caller expects. So at least we want to make sure kernel has * access to the whole length, aka. memory being mapped. * Note that arch_user_string_nlen() should never result in * thread termination due to page faults, and must always * return to the caller with err_arg set or cleared. * For MMU systems, unmapped memory will result in a DTLB miss * and that might trigger an infinite DTLB miss storm if * the corresponding L2 page table never exists in the first * place (which would result in DTLB misses through L1 page * table), until some other exceptions occur to break * the cycle. * For MPU systems, this would simply results in access errors * and the exception handler will terminate the thread. */ if (!xtensa_mem_kernel_has_access((void *)s, maxsize, 0)) { /* * API says we need to set err_arg to -1 if there are * any errors. */ *err_arg = -1; return 0; } /* No error and we can proceed to getting the string length. */ *err_arg = 0; return strnlen(s, maxsize); } ```
/content/code_sandbox/arch/xtensa/core/syscall_helper.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
796
```c /* */ #include <gen_offset.h> #include <kernel_offsets.h> #include <zephyr/arch/xtensa/thread.h> #include <xtensa_asm2_context.h> GEN_ABSOLUTE_SYM(___xtensa_irq_bsa_t_SIZEOF, sizeof(_xtensa_irq_bsa_t)); GEN_ABSOLUTE_SYM(___xtensa_irq_stack_frame_raw_t_SIZEOF, sizeof(_xtensa_irq_stack_frame_raw_t)); GEN_ABSOLUTE_SYM(___xtensa_irq_stack_frame_a15_t_SIZEOF, sizeof(_xtensa_irq_stack_frame_a15_t)); GEN_ABSOLUTE_SYM(___xtensa_irq_stack_frame_a11_t_SIZEOF, sizeof(_xtensa_irq_stack_frame_a11_t)); GEN_ABSOLUTE_SYM(___xtensa_irq_stack_frame_a7_t_SIZEOF, sizeof(_xtensa_irq_stack_frame_a7_t)); GEN_ABSOLUTE_SYM(___xtensa_irq_stack_frame_a3_t_SIZEOF, sizeof(_xtensa_irq_stack_frame_a3_t)); GEN_OFFSET_SYM(_xtensa_irq_bsa_t, a0); GEN_OFFSET_SYM(_xtensa_irq_bsa_t, scratch); GEN_OFFSET_SYM(_xtensa_irq_bsa_t, a2); GEN_OFFSET_SYM(_xtensa_irq_bsa_t, a3); GEN_OFFSET_SYM(_xtensa_irq_bsa_t, exccause); GEN_OFFSET_SYM(_xtensa_irq_bsa_t, pc); GEN_OFFSET_SYM(_xtensa_irq_bsa_t, ps); GEN_OFFSET_SYM(_xtensa_irq_bsa_t, sar); #if XCHAL_HAVE_LOOPS GEN_OFFSET_SYM(_xtensa_irq_bsa_t, lcount); GEN_OFFSET_SYM(_xtensa_irq_bsa_t, lbeg); GEN_OFFSET_SYM(_xtensa_irq_bsa_t, lend); #endif #if XCHAL_HAVE_S32C1I GEN_OFFSET_SYM(_xtensa_irq_bsa_t, scompare1); #endif #if XCHAL_HAVE_THREADPTR GEN_OFFSET_SYM(_xtensa_irq_bsa_t, threadptr); #endif #if XCHAL_HAVE_FP && defined(CONFIG_CPU_HAS_FPU) && defined(CONFIG_FPU_SHARING) GEN_OFFSET_SYM(_xtensa_irq_bsa_t, fcr); GEN_OFFSET_SYM(_xtensa_irq_bsa_t, fsr); GEN_OFFSET_SYM(_xtensa_irq_bsa_t, fpu0); GEN_OFFSET_SYM(_xtensa_irq_bsa_t, fpu1); GEN_OFFSET_SYM(_xtensa_irq_bsa_t, fpu2); GEN_OFFSET_SYM(_xtensa_irq_bsa_t, fpu3); GEN_OFFSET_SYM(_xtensa_irq_bsa_t, fpu4); GEN_OFFSET_SYM(_xtensa_irq_bsa_t, fpu5); GEN_OFFSET_SYM(_xtensa_irq_bsa_t, fpu6); GEN_OFFSET_SYM(_xtensa_irq_bsa_t, fpu7); GEN_OFFSET_SYM(_xtensa_irq_bsa_t, fpu8); GEN_OFFSET_SYM(_xtensa_irq_bsa_t, fpu9); GEN_OFFSET_SYM(_xtensa_irq_bsa_t, fpu10); GEN_OFFSET_SYM(_xtensa_irq_bsa_t, fpu11); GEN_OFFSET_SYM(_xtensa_irq_bsa_t, fpu12); GEN_OFFSET_SYM(_xtensa_irq_bsa_t, fpu13); GEN_OFFSET_SYM(_xtensa_irq_bsa_t, fpu14); GEN_OFFSET_SYM(_xtensa_irq_bsa_t, fpu15); #endif #if defined(CONFIG_XTENSA_HIFI_SHARING) GEN_OFFSET_SYM(_xtensa_irq_bsa_t, hifi); #endif #ifdef CONFIG_USERSPACE GEN_OFFSET_SYM(_thread_arch_t, psp); #ifdef CONFIG_XTENSA_MMU GEN_OFFSET_SYM(_thread_arch_t, ptables); #endif #ifdef CONFIG_XTENSA_MPU GEN_OFFSET_SYM(_thread_arch_t, mpu_map); #endif #endif GEN_ABS_SYM_END ```
/content/code_sandbox/arch/xtensa/core/offsets/offsets.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
792
```unknown /* * */ #include <xtensa_asm2_s.h> #include <zephyr/offsets.h> #include <zephyr/zsr.h> #if defined(CONFIG_SIMULATOR_XTENSA) || defined(XT_SIMULATOR) #include <xtensa/simcall.h> #endif /* * xtensa_spill_reg_windows * * Spill all register windows. Not a C function, enter this via CALL0 * (so you have to save off A0, but no other registers need to be * spilled). On return, all registers not part of the current * function will be spilled to memory. The WINDOWSTART SR will have a * single 1 bit corresponding to the current frame at WINDOWBASE. */ .global xtensa_spill_reg_windows .align 4 xtensa_spill_reg_windows: SPILL_ALL_WINDOWS ret /* * xtensa_save_high_regs * * Call with CALL0, with A2/A3 available as scratch. Pushes the high * A4-A15 GPRs to the stack if needed (i.e. if those registers are not * part of wrapped-around frames higher up the call stack), returning * to the caller with the stack pointer HAVING BEEN MODIFIED to * contain them. */ .global xtensa_save_high_regs .align 4 xtensa_save_high_regs: /* Generate a rotated (modulo NREGS/4 bits!) WINDOWSTART in A2 * by duplicating the bits twice and shifting down by WINDOWBASE * bits. Now the LSB is the register quad at WINDOWBASE. */ rsr a2, WINDOWSTART slli a3, a2, (XCHAL_NUM_AREGS / 4) or a2, a2, a3 rsr a3, WINDOWBASE ssr a3 srl a2, a2 mov a3, a1 /* Stash our original stack pointer */ /* For the next three bits in WINDOWSTART (which correspond to * the A4-A7, A8-A11 and A12-A15 quads), if we find a one, * that means that the quad is owned by a wrapped-around call * in the registers, so we don't need to spill it or any * further registers from the GPRs and can skip to the end. */ bbsi a2, 1, _high_gpr_spill_done addi a1, a1, -16 s32i a4, a1, 0 s32i a5, a1, 4 s32i a6, a1, 8 s32i a7, a1, 12 bbsi a2, 2, _high_gpr_spill_done addi a1, a1, -16 s32i a8, a1, 0 s32i a9, a1, 4 s32i a10, a1, 8 s32i a11, a1, 12 bbsi a2, 3, _high_gpr_spill_done addi a1, a1, -16 s32i a12, a1, 0 s32i a13, a1, 4 s32i a14, a1, 8 s32i a15, a1, 12 _high_gpr_spill_done: /* Push the original stack pointer so we know at restore * time how many registers were spilled, then return, leaving the * modified SP in A1. */ addi a1, a1, -4 s32i a3, a1, 0 ret /* * xtensa_restore_high_regs * * Does the inverse of xtensa_save_high_regs, taking a stack pointer * in A1 that resulted and restoring the A4-A15 state (and the stack * pointer) to the state they had at the earlier call. Call with * CALL0, leaving A2/A3 available as scratch. */ .global xtensa_restore_high_regs .align 4 xtensa_restore_high_regs: /* pop our "original" stack pointer into a2, stash in a3 also */ l32i a2, a1, 0 addi a1, a1, 4 mov a3, a2 beq a1, a2, _high_restore_done addi a2, a2, -16 l32i a4, a2, 0 l32i a5, a2, 4 l32i a6, a2, 8 l32i a7, a2, 12 beq a1, a2, _high_restore_done addi a2, a2, -16 l32i a8, a2, 0 l32i a9, a2, 4 l32i a10, a2, 8 l32i a11, a2, 12 beq a1, a2, _high_restore_done addi a2, a2, -16 l32i a12, a2, 0 l32i a13, a2, 4 l32i a14, a2, 8 l32i a15, a2, 12 _high_restore_done: mov a1, a3 /* Original stack */ ret /* * _restore_context * * Arrive here via a jump. Enters into the restored context and does * not return. A1 should have a context pointer in it as received * from switch or an interrupt exit. Interrupts must be disabled, * and register windows should have been spilled. * * Note that exit from the restore is done with the RFI instruction, * using the EPCn/EPSn registers. Those will have been saved already * by any interrupt entry so they are save to use. Note that EPC1 and * RFE are NOT usable (they can't preserve PS). Per the ISA spec, all * RFI levels do the same thing and differ only in the special * registers used to hold PC/PS, but Qemu has been observed to behave * strangely when RFI doesn't "return" to a INTLEVEL strictly lower * than it started from. So we leverage the zsr.h framework to pick * the highest level available for our specific platform. */ .global _restore_context _restore_context: call0 xtensa_restore_high_regs l32i a0, a1, ___xtensa_irq_bsa_t_pc_OFFSET wsr a0, ZSR_EPC l32i a0, a1, ___xtensa_irq_bsa_t_ps_OFFSET wsr a0, ZSR_EPS #if XCHAL_HAVE_FP && defined(CONFIG_CPU_HAS_FPU) && defined(CONFIG_FPU_SHARING) FPU_REG_RESTORE #endif #if defined(CONFIG_XTENSA_HIFI_SHARING) .extern _xtensa_hifi_load call0 _xtensa_hifi_load #endif l32i a0, a1, ___xtensa_irq_bsa_t_sar_OFFSET wsr a0, SAR #if XCHAL_HAVE_LOOPS l32i a0, a1, ___xtensa_irq_bsa_t_lbeg_OFFSET wsr a0, LBEG l32i a0, a1, ___xtensa_irq_bsa_t_lend_OFFSET wsr a0, LEND l32i a0, a1, ___xtensa_irq_bsa_t_lcount_OFFSET wsr a0, LCOUNT #endif #if XCHAL_HAVE_S32C1I l32i a0, a1, ___xtensa_irq_bsa_t_scompare1_OFFSET wsr a0, SCOMPARE1 #endif #if XCHAL_HAVE_THREADPTR && \ (defined(CONFIG_USERSPACE) || defined(CONFIG_THREAD_LOCAL_STORAGE)) l32i a0, a1, ___xtensa_irq_bsa_t_threadptr_OFFSET wur a0, THREADPTR #endif rsync l32i a0, a1, ___xtensa_irq_bsa_t_a0_OFFSET l32i a2, a1, ___xtensa_irq_bsa_t_a2_OFFSET l32i a3, a1, ___xtensa_irq_bsa_t_a3_OFFSET addi a1, a1, ___xtensa_irq_bsa_t_SIZEOF rfi ZSR_RFI_LEVEL /* * void xtensa_arch_except(int reason_p); * * Implements hardware exception for Xtensa ARCH_EXCEPT to save * interrupted stack frame and reason_p for use in exception handler * and coredump */ .global xtensa_arch_except .global xtensa_arch_except_epc .align 4 xtensa_arch_except: entry a1, 16 xtensa_arch_except_epc: ill retw /* * void xtensa_arch_kernel_oops(int reason_p, void *ssf); * * Simply to raise hardware exception for Kernel OOPS. */ .global xtensa_arch_kernel_oops .global xtensa_arch_kernel_oops_epc .align 4 xtensa_arch_kernel_oops: entry a1, 16 xtensa_arch_kernel_oops_epc: ill retw /* * void xtensa_switch(void *new, void **old_return); * * Context switches into the previously-saved "new" handle, placing * the saved "old" handle into the address provided by old_return. */ .global xtensa_switch .align 4 xtensa_switch: entry a1, 16 SPILL_ALL_WINDOWS addi a1, a1, -___xtensa_irq_bsa_t_SIZEOF /* Stash our A0/2/3 and the shift/loop registers into the base * save area so they get restored as they are now. A2/A3 * don't actually get used post-restore, but they need to be * stashed across the xtensa_save_high_regs call and this is a * convenient place. */ s32i a0, a1, ___xtensa_irq_bsa_t_a0_OFFSET s32i a2, a1, ___xtensa_irq_bsa_t_a2_OFFSET s32i a3, a1, ___xtensa_irq_bsa_t_a3_OFFSET ODD_REG_SAVE /* Stash our PS register contents and a "restore" PC. */ rsr a0, PS s32i a0, a1, ___xtensa_irq_bsa_t_ps_OFFSET movi a0, _switch_restore_pc s32i a0, a1, ___xtensa_irq_bsa_t_pc_OFFSET #if defined(CONFIG_XTENSA_HIFI_SHARING) call0 _xtensa_hifi_save #endif /* Now the high registers */ call0 xtensa_save_high_regs #ifdef CONFIG_KERNEL_COHERENCE /* Flush the stack. The top of stack was stored for us by * arch_cohere_stacks(). It can be NULL for a dummy thread. */ rsr a0, ZSR_FLUSH beqz a0, noflush mov a3, a1 flushloop: dhwb a3, 0 addi a3, a3, XCHAL_DCACHE_LINESIZE blt a3, a0, flushloop noflush: #endif /* Restore the A3 argument we spilled earlier (via the base * save pointer pushed at the bottom of the stack) and set the * stack to the "new" context out of the A2 spill slot. */ l32i a2, a1, 0 l32i a3, a2, ___xtensa_irq_bsa_t_a3_OFFSET s32i a1, a3, 0 #ifdef CONFIG_USERSPACE /* Switch page tables */ rsr a6, ZSR_CPU l32i a6, a6, ___cpu_t_current_OFFSET #ifdef CONFIG_XTENSA_MMU call4 xtensa_swap_update_page_tables #endif #ifdef CONFIG_XTENSA_MPU call4 xtensa_mpu_map_write #endif l32i a2, a3, 0 l32i a2, a2, 0 #endif /* Switch stack pointer and restore. The jump to * _restore_context does not return as such, but we arrange * for the restored "next" address to be immediately after for * sanity. */ l32i a1, a2, ___xtensa_irq_bsa_t_a2_OFFSET #ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING call4 z_thread_mark_switched_in #endif j _restore_context _switch_restore_pc: retw /* Define our entry handler to load the struct kernel_t from the * MISC0 special register, and to find the nest and irq_stack values * at the precomputed offsets. */ .align 4 _handle_excint: EXCINT_HANDLER ___cpu_t_nested_OFFSET, ___cpu_t_irq_stack_OFFSET /* Define the actual vectors for the hardware-defined levels with * DEF_EXCINT. These load a C handler address and jump to our handler * above. */ DEF_EXCINT 1, _handle_excint, xtensa_excint1_c /* In code below we are using XCHAL_NMILEVEL and XCHAL_NUM_INTLEVELS * (whichever is higher), since not all Xtensa configurations support * NMI. In such case we will use XCHAL_NUM_INTLEVELS. */ #if XCHAL_HAVE_NMI #define MAX_INTR_LEVEL XCHAL_NMILEVEL #elif XCHAL_HAVE_INTERRUPTS #define MAX_INTR_LEVEL XCHAL_NUM_INTLEVELS #else #error Xtensa core with no interrupt support is used #define MAX_INTR_LEVEL 0 #endif #if MAX_INTR_LEVEL >= 2 #if !(defined(CONFIG_GDBSTUB) && (XCHAL_DEBUGLEVEL == 2)) DEF_EXCINT 2, _handle_excint, xtensa_int2_c #endif #endif #if MAX_INTR_LEVEL >= 3 #if !(defined(CONFIG_GDBSTUB) && (XCHAL_DEBUGLEVEL == 3)) DEF_EXCINT 3, _handle_excint, xtensa_int3_c #endif #endif #if MAX_INTR_LEVEL >= 4 #if !(defined(CONFIG_GDBSTUB) && (XCHAL_DEBUGLEVEL == 4)) DEF_EXCINT 4, _handle_excint, xtensa_int4_c #endif #endif #if MAX_INTR_LEVEL >= 5 #if !(defined(CONFIG_GDBSTUB) && (XCHAL_DEBUGLEVEL == 5)) DEF_EXCINT 5, _handle_excint, xtensa_int5_c #endif #endif #if MAX_INTR_LEVEL >= 6 #if !(defined(CONFIG_GDBSTUB) && (XCHAL_DEBUGLEVEL == 6)) DEF_EXCINT 6, _handle_excint, xtensa_int6_c #endif #endif #if MAX_INTR_LEVEL >= 7 #if !(defined(CONFIG_GDBSTUB) && (XCHAL_DEBUGLEVEL == 7)) DEF_EXCINT 7, _handle_excint, xtensa_int7_c #endif #endif #if defined(CONFIG_GDBSTUB) DEF_EXCINT XCHAL_DEBUGLEVEL, _handle_excint, xtensa_debugint_c #endif /* The user exception vector is defined here, as we need to handle * MOVSP exceptions in assembly (the result has to be to unspill the * caller function of the code that took the exception, and that can't * be done in C). A prototype exists which mucks with the stack frame * from the C handler instead, but that would add a LARGE overhead to * some alloca() calls (those whent he caller has been spilled) just * to save these five cycles during other exceptions and L1 * interrupts. Maybe revisit at some point, with better benchmarking. * Note that _xt_alloca_exc is Xtensa-authored code which expects A0 * to have been saved to EXCSAVE1, we've modified it to use the zsr.h * API to get assigned a scratch register. */ .pushsection .UserExceptionVector.text, "ax" .global _Level1RealVector _Level1RealVector: wsr a0, ZSR_A0SAVE rsync rsr.exccause a0 #ifdef CONFIG_XTENSA_MMU beqi a0, EXCCAUSE_ITLB_MISS, _handle_tlb_miss_user #endif /* CONFIG_XTENSA_MMU */ #ifdef CONFIG_USERSPACE beqi a0, EXCCAUSE_SYSCALL, _syscall #endif /* CONFIG_USERSPACE */ #ifdef CONFIG_XTENSA_MMU addi a0, a0, -EXCCAUSE_DTLB_MISS beqz a0, _handle_tlb_miss_user rsr.exccause a0 #endif /* CONFIG_XTENSA_MMU */ bnei a0, EXCCAUSE_ALLOCA, _not_alloca j _xt_alloca_exc _not_alloca: rsr a0, ZSR_A0SAVE j _Level1Vector #ifdef CONFIG_XTENSA_MMU _handle_tlb_miss_user: /** * Handle TLB miss by loading the PTE page: * The way it works is, when we try to access an address that is not * mapped, we will have a miss. The HW then will try to get the * correspondent memory in the page table. As the page table is not * mapped in memory we will have a second miss, which will trigger * an exception. In the exception (here) what we do is to exploit * this hardware capability just trying to load the page table * (not mapped address), which will cause a miss, but then the hardware * will automatically map it again from the page table. This time * it will work since the page necessary to map the page table itself * are wired map. */ rsr.ptevaddr a0 l32i a0, a0, 0 rsr a0, ZSR_A0SAVE rfe #endif /* CONFIG_XTENSA_MMU */ #ifdef CONFIG_USERSPACE _syscall: rsr a0, ZSR_A0SAVE j xtensa_do_syscall #endif /* CONFIG_USERSPACE */ .popsection /* In theory you can have levels up to 15, but known hardware only uses 7. */ #if XCHAL_NMILEVEL > 7 #error More interrupts than expected. #endif /* We don't actually use "kernel mode" currently. Populate the vector * out of simple caution in case app code clears the UM bit by mistake. */ .pushsection .KernelExceptionVector.text, "ax" .global _KernelExceptionVector _KernelExceptionVector: #ifdef CONFIG_XTENSA_MMU wsr a0, ZSR_A0SAVE rsr.exccause a0 beqi a0, EXCCAUSE_ITLB_MISS, _handle_tlb_miss_kernel addi a0, a0, -EXCCAUSE_DTLB_MISS beqz a0, _handle_tlb_miss_kernel rsr a0, ZSR_A0SAVE #endif j _Level1Vector #ifdef CONFIG_XTENSA_MMU _handle_tlb_miss_kernel: /* The TLB miss handling is used only during xtensa_mmu_init() * where vecbase is at a different address, as the offset used * in the jump ('j') instruction will not jump to correct * address (... remember the vecbase is moved). * So we handle TLB misses in a very simple way here until * we move back to using UserExceptionVector above. */ rsr.ptevaddr a0 l32i a0, a0, 0 rsr a0, ZSR_A0SAVE rfe #endif .popsection #ifdef XCHAL_DOUBLEEXC_VECTOR_VADDR .pushsection .DoubleExceptionVector.text, "ax" .global _DoubleExceptionVector _DoubleExceptionVector: #ifdef CONFIG_XTENSA_MMU wsr a0, ZSR_DBLEXC rsync rsr.exccause a0 addi a0, a0, -EXCCAUSE_DTLB_MISS beqz a0, _handle_tlb_miss_dblexc /* Need to stash the DEPC for used by the C handler. * If we encounter any DTLB misses when PS.EXCM is set, * this vector will be used and the DEPC register will * have the new address instead of the one resulted in * double exception. */ rsr.depc a0 wsr a0, ZSR_DEPC_SAVE rsr a0, ZSR_DBLEXC j _Level1Vector _TripleFault: #endif /* CONFIG_XTENSA_MMU */ #if XCHAL_HAVE_DEBUG && defined(CONFIG_XTENSA_BREAK_ON_UNRECOVERABLE_EXCEPTIONS) /* Signals an unhandled double exception, and unrecoverable exceptions. * Definitely needs debugger to be attached to the hardware or simulator * to catch this. */ break 1, 4 #elif defined(CONFIG_SIMULATOR_XTENSA) || defined(XT_SIMULATOR) /* Tell simulator to stop executing here, instead of trying to do * an infinite loop (see below). Greatly help with using tracing in * simulator so that traces will not have infinite iterations of * jumps. */ movi a3, 1 movi a2, SYS_exit simcall #endif 1: j 1b #ifdef CONFIG_XTENSA_MMU _handle_tlb_miss_dblexc: /* Handle all data TLB misses here. * These data TLB misses are mostly caused by preloading * page table entries in the level 1 exception handler. * Failure to load the PTE will result in another exception * with different failure (exccause), which can be handled * when the CPU re-enters the double exception handler. */ rsr.ptevaddr a0 l32i a0, a0, 0 rsr a0, ZSR_DBLEXC rfde #endif .popsection #endif ```
/content/code_sandbox/arch/xtensa/core/xtensa_asm2_util.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,837
```unknown /* memerror-vector.S -- Memory Error Exception Vector and Handler */ /* * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include <xtensa/coreasm.h> #include <xtensa/corebits.h> /* This file just contains this one symbol, used by the reset code. * It is here rather than in reset-vector.S because we want the symbol * to be external, so resolution is delayed until link time. * * To define your own value to override this default, redefine the * symbol _MemErrorHandler to the desired value, e.g. - * * xt-xcc test.c -g -o test -Wl,--defsym=_MemErrorHandler=0x08080808 */ .global _MemErrorHandler .weak _MemErrorHandler .equ _MemErrorHandler, XCHAL_CACHE_MEMCTL_DEFAULT ```
/content/code_sandbox/arch/xtensa/core/startup/memerror_vector.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
388
```unknown // memctl_default.S - Default startup value for MEMCTL register. // // Permission is hereby granted, free of charge, to any person obtaining // a copy of this software and associated documentation files (the // "Software"), to deal in the Software without restriction, including // without limitation the rights to use, copy, modify, merge, publish, // distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so, subject to // the following conditions: // // The above copyright notice and this permission notice shall be included // in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. // IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY // CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, // TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE // SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include <xtensa/coreasm.h> #include <xtensa/config/system.h> // This file just contains this one symbol, used by the reset code. // It is here rather than in reset-vector.S because we want the symbol // to be external, so resolution is delayed until link time. // // To define your own value to override this default, redefine the // symbol __memctl_default to the desired value, e.g. - // // xt-xcc test.c -g -o test -Wl,--defsym=__memctl_default=0x08080808 // .global __memctl_default .weak __memctl_default .equ __memctl_default, XCHAL_CACHE_MEMCTL_DEFAULT ```
/content/code_sandbox/arch/xtensa/core/startup/memctl_default.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
391
```objective-c /* * */ #ifndef ZEPHYR_ARCH_XTENSA_INCLUDE_XTENSA_INTERNAL_H_ #define ZEPHYR_ARCH_XTENSA_INCLUDE_XTENSA_INTERNAL_H_ #include <stdint.h> #include <zephyr/arch/xtensa/exception.h> #include <zephyr/arch/arch_interface.h> /** * @ingroup xtensa_internal_apis * @{ */ /** * @brief Dump and print out the stack frame content. * * This mainly prints out the registers stashed in the stack frame. * * @param stack Pointer to stack frame. */ void xtensa_dump_stack(const void *stack); /** * @brief Get string description from an exception code. * * @param cause_code Exception code. * * @return String description. */ char *xtensa_exccause(unsigned int cause_code); /** * @brief Called upon a fatal error. * * @param reason The reason for the fatal error * @param esf Exception context, with details and partial or full register * state when the error occurred. May in some cases be NULL. */ void xtensa_fatal_error(unsigned int reason, const struct arch_esf *esf); /** * @brief Perform a one-way transition from supervisor to user mode. * * @see arch_user_mode_enter */ void xtensa_userspace_enter(k_thread_entry_t user_entry, void *p1, void *p2, void *p3, uintptr_t stack_end, uintptr_t stack_start); /** * @brief Check if kernel threads have access to a memory region. * * Given a memory region, return whether the current memory management * hardware configuration would allow kernel threads to read/write * that region. * * This is mainly used to make sure kernel has access to avoid relying * on page fault to detect invalid mappings. * * @param addr Start address of the buffer * @param size Size of the buffer * @param write If non-zero, additionally check if the area is writable. * Otherwise, just check if the memory can be read. * * @return False if the permissions don't match. */ bool xtensa_mem_kernel_has_access(void *addr, size_t size, int write); /** * @} */ #endif /* ZEPHYR_ARCH_XTENSA_INCLUDE_XTENSA_INTERNAL_H_ */ ```
/content/code_sandbox/arch/xtensa/include/xtensa_internal.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
483
```objective-c /* */ /* this file is only meant to be included by kernel_structs.h */ #ifndef ZEPHYR_ARCH_XTENSA_INCLUDE_KERNEL_ARCH_FUNC_H_ #define ZEPHYR_ARCH_XTENSA_INCLUDE_KERNEL_ARCH_FUNC_H_ #ifndef _ASMLANGUAGE #include <kernel_internal.h> #include <string.h> #include <zephyr/cache.h> #include <zephyr/zsr.h> #ifdef __cplusplus extern "C" { #endif K_KERNEL_STACK_ARRAY_DECLARE(z_interrupt_stacks, CONFIG_MP_MAX_NUM_CPUS, CONFIG_ISR_STACK_SIZE); static ALWAYS_INLINE void arch_kernel_init(void) { } void xtensa_switch(void *switch_to, void **switched_from); static ALWAYS_INLINE void arch_switch(void *switch_to, void **switched_from) { return xtensa_switch(switch_to, switched_from); } #ifdef CONFIG_KERNEL_COHERENCE static ALWAYS_INLINE void arch_cohere_stacks(struct k_thread *old_thread, void *old_switch_handle, struct k_thread *new_thread) { int32_t curr_cpu = _current_cpu->id; size_t ostack = old_thread->stack_info.start; size_t osz = old_thread->stack_info.size; size_t osp = (size_t) old_switch_handle; size_t nstack = new_thread->stack_info.start; size_t nsz = new_thread->stack_info.size; size_t nsp = (size_t) new_thread->switch_handle; int zero = 0; __asm__ volatile("wsr %0, " ZSR_FLUSH_STR :: "r"(zero)); if (old_switch_handle != NULL) { int32_t a0save; __asm__ volatile("mov %0, a0;" "call0 xtensa_spill_reg_windows;" "mov a0, %0" : "=r"(a0save)); } /* The following option ensures that a living thread will never * be executed in a different CPU so we can safely return without * invalidate and/or flush threads cache. */ if (IS_ENABLED(CONFIG_SCHED_CPU_MASK_PIN_ONLY)) { return; } /* The "live" area (the region between the switch handle, * which is the stack pointer, and the top of the stack * memory) of the inbound stack needs to be invalidated if we * last ran on another cpu: it may contain data that was * modified there, and our cache may be stale. * * The corresponding "dead area" of the inbound stack can be * ignored. We may have cached data in that region, but by * definition any unused stack memory will always be written * before being read (well, unless the code has an * uninitialized data error) so our stale cache will be * automatically overwritten as needed. */ if (curr_cpu != new_thread->arch.last_cpu) { sys_cache_data_invd_range((void *)nsp, (nstack + nsz) - nsp); } old_thread->arch.last_cpu = curr_cpu; /* Dummy threads appear at system initialization, but don't * have stack_info data and will never be saved. Ignore. */ if (old_thread->base.thread_state & _THREAD_DUMMY) { return; } /* For the outbound thread, we obviousy want to flush any data * in the live area (for the benefit of whichever CPU runs * this thread next). But we ALSO have to invalidate the dead * region of the stack. Those lines may have DIRTY data in * our own cache, and we cannot be allowed to write them back * later on top of the stack's legitimate owner! * * This work comes in two flavors. In interrupts, the * outgoing context has already been saved for us, so we can * do the flush right here. In direct context switches, we * are still using the stack, so we do the invalidate of the * bottom here, (and flush the line containing SP to handle * the overlap). The remaining flush of the live region * happens in the assembly code once the context is pushed, up * to the stack top stashed in a special register. */ if (old_switch_handle != NULL) { sys_cache_data_flush_range((void *)osp, (ostack + osz) - osp); sys_cache_data_invd_range((void *)ostack, osp - ostack); } else { /* When in a switch, our current stack is the outbound * stack. Flush the single line containing the stack * bottom (which is live data) before invalidating * everything below that. Remember that the 16 bytes * below our SP are the calling function's spill area * and may be live too. */ __asm__ volatile("mov %0, a1" : "=r"(osp)); osp -= 16; sys_cache_data_flush_range((void *)osp, 1); sys_cache_data_invd_range((void *)ostack, osp - ostack); uint32_t end = ostack + osz; __asm__ volatile("wsr %0, " ZSR_FLUSH_STR :: "r"(end)); } } #endif static inline bool arch_is_in_isr(void) { return arch_curr_cpu()->nested != 0U; } #ifdef __cplusplus } #endif #endif /* _ASMLANGUAGE */ #endif /* ZEPHYR_ARCH_XTENSA_INCLUDE_KERNEL_ARCH_FUNC_H_ */ ```
/content/code_sandbox/arch/xtensa/include/kernel_arch_func.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,202
```objective-c /* * */ #ifndef ZEPHYR_ARCH_XTENSA_XTENSA_STACK_H_ #define ZEPHYR_ARCH_XTENSA_XTENSA_STACK_H_ #include <stdbool.h> #include <stddef.h> #include <stdint.h> #include <xtensa_asm2_context.h> /** * @defgroup xtensa_stack_internal_apis Xtensa Stack related Internal APIs * @ingroup xtensa_stack_apis * @{ */ /** * @brief Check if memory region is within correct stack boundaries. * * Check if the memory region [@a addr, (@a addr + @a sz)) is within * correct stack boundaries: * - Interrupt stack if servicing interrupts. * - Privileged stack if in kernel mode doing syscalls. * - Thread stack otherwise. * * @note When @ps == UINT32_MAX, it checks the whole range of stack * object because we cannot get PS via frame pointer yet. * * @param addr Beginning address of memory region to check. * @param sz Size of memory region to check. Can be zero. * @param ps PS register value of interrupted context. Use UINT32_MAX if * PS cannot be determined at time of call. * * @return True if memory region is outside stack bounds, false otherwise. */ bool xtensa_is_outside_stack_bounds(uintptr_t addr, size_t sz, uint32_t ps); /** * @brief Check if frame pointer is within correct stack boundaries. * * Check if the frame pointer and its associated BSA (base save area) are * within correct stack boundaries. Use @ref xtensa_is_outside_stack_bounds * to determine validity. * * @param frame Frame Pointer. Cannot be NULL. */ bool xtensa_is_frame_pointer_valid(_xtensa_irq_stack_frame_raw_t *frame); /** * @} */ #endif /* ZEPHYR_ARCH_XTENSA_XTENSA_STACK_H_ */ ```
/content/code_sandbox/arch/xtensa/include/xtensa_stack.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
397
```unknown /* */ #include <xtensa/coreasm.h> #include <xtensa/corebits.h> #include <xtensa/cacheasm.h> #include <xtensa/cacheattrasm.h> #include <xtensa/xtensa-xer.h> #include <xtensa/xdm-regs.h> #include <xtensa/config/specreg.h> #include <xtensa/config/system.h> /* for XSHAL_USE_ABSOLUTE_LITERALS only */ #include <xtensa/xtruntime-core-state.h> /* * The following reset vector avoids initializing certain registers already * initialized by processor reset. But it does initialize some of them * anyway, for minimal support of warm restart (restarting in software by * jumping to the reset vector rather than asserting hardware reset). */ .begin literal_prefix .ResetVector .section .ResetVector.text, "ax" .align 4 .global __start __start: #if (!XCHAL_HAVE_HALT || defined(XTOS_UNPACK)) && XCHAL_HAVE_IMEM_LOADSTORE /* * NOTE: * * IMPORTANT: If you move the _ResetHandler portion to a section * other than .ResetVector.text that is outside the range of * the reset vector's 'j' instruction, the _ResetHandler symbol * and a more elaborate j/movi/jx sequence are needed in * .ResetVector.text to dispatch to the new location. */ j _ResetHandler .size __start, . - __start #if XCHAL_HAVE_HALT /* * Xtensa TX: reset vector segment is only 4 bytes, so must place the * unpacker code elsewhere in the memory that contains the reset * vector. */ #if XCHAL_RESET_VECTOR_VADDR == XCHAL_INSTRAM0_VADDR .section .iram0.text, "ax" #elif XCHAL_RESET_VECTOR_VADDR == XCHAL_INSTROM0_VADDR .section .irom0.text, "ax" #elif XCHAL_RESET_VECTOR_VADDR == XCHAL_URAM0_VADDR .section .uram0.text, "ax" #else #warning "Xtensa TX reset vector not at start of iram0, irom0, or uram0 -- ROMing LSPs may not work" .text #endif #endif /* XCHAL_HAVE_HALT */ .extern __memctl_default .align 4 /* tells the assembler/linker to place literals here */ .literal_position .align 4 .global _ResetHandler _ResetHandler: #endif #if !XCHAL_HAVE_HALT /* * Even if the processor supports the non-PC-relative L32R option, * it will always start up in PC-relative mode. We take advantage of * this, and use PC-relative mode at least until we're sure the .lit4 * section is in place (which is sometimes only after unpacking). */ .begin no-absolute-literals /* * If we have dynamic cache way support, init the caches as soon * as we can, which is now. Except, if we are waking up from a * PSO event, then we need to do this slightly later. */ #if XCHAL_HAVE_ICACHE_DYN_WAYS || XCHAL_HAVE_DCACHE_DYN_WAYS # if XCHAL_HAVE_PSO_CDM && !XCHAL_HAVE_PSO_FULL_RETENTION /* Do this later on in the code -- see below */ # else movi a0, __memctl_default wsr a0, MEMCTL # endif #endif /* * If we have PSO support, then we must check for a warm start with * caches left powered on. If the caches had been left powered on, * we must restore the state of MEMCTL to the saved state if any. * Note that MEMCTL may not be present depending on config. */ #if XCHAL_HAVE_PSO_CDM && !XCHAL_HAVE_PSO_FULL_RETENTION /* Read PWRSTAT */ movi a2, XDM_MISC_PWRSTAT /* Save area address - retained for later */ movi a3, _xtos_pso_savearea /* Signature for compare - retained for later */ movi a5, CORE_STATE_SIGNATURE /* PWRSTAT value - retained for later */ rer a7, a2 /* Now bottom 2 bits are core wakeup and cache power lost */ extui a4, a7, 1, 2 /* a4==1 means PSO wakeup, caches did not lose power */ bnei a4, 1, .Lcold_start /* Load save area signature field */ l32i a4, a3, CS_SA_signature sub a4, a4, a5 /* If signature mismatch then do cold start */ bnez a4, .Lcold_start #if XCHAL_USE_MEMCTL /* Load saved MEMCTL value */ l32i a4, a3, CS_SA_memctl movi a0, ~MEMCTL_INV_EN /* Clear invalidate bit */ and a0, a4, a0 wsr a0, MEMCTL #endif j .Lwarm_start .Lcold_start: #if XCHAL_HAVE_ICACHE_DYN_WAYS || XCHAL_HAVE_DCACHE_DYN_WAYS /* * Enable and invalidate all ways of both caches. If there is no * dynamic way support then this write will have no effect. */ movi a0, __memctl_default wsr a0, MEMCTL #endif .Lwarm_start: #endif /* a0 is always 0 in this code, used to initialize lots of things */ movi a0, 0 /* technically this should be under !FULL_RESET, assuming hard reset */ #if XCHAL_HAVE_INTERRUPTS /* make sure that interrupts are shut off (*before* we lower * PS.INTLEVEL and PS.EXCM!) */ wsr a0, INTENABLE #endif #if !XCHAL_HAVE_FULL_RESET /* pre-LX2 cores only */ #if XCHAL_HAVE_CCOUNT && (XCHAL_HW_MIN_VERSION < XTENSA_HWVERSION_RB_2006_0) /* not really necessary, but nice; best done very early */ wsr a0, CCOUNT #endif /* * For full MMU configs, put page table at an unmapped virtual address. * This ensures that accesses outside the static maps result * in miss exceptions rather than random behaviour. * Assumes XCHAL_SEG_MAPPABLE_VADDR == 0 (true in released MMU). */ #if XCHAL_ITLB_ARF_WAYS > 0 || XCHAL_DTLB_ARF_WAYS > 0 wsr a0, PTEVADDR #endif /* * Debug initialization * * NOTE: DBREAKCn must be initialized before the combination of these * two things: any load/store, and a lowering of PS.INTLEVEL below * DEBUG_LEVEL. The processor already resets IBREAKENABLE * appropriately. */ #if XCHAL_HAVE_DEBUG #if XCHAL_NUM_DBREAK #if XCHAL_NUM_DBREAK >= 2 wsr a0, DBREAKC1 #endif wsr a0, DBREAKC0 dsync /* wait for WSRs to DBREAKCn to complete */ #endif /* XCHAL_NUM_DBREAK */ /* pre-LX cores only */ # if XCHAL_HW_MIN_VERSION < XTENSA_HWVERSION_RA_2004_1 /* * Starting in Xtensa LX, ICOUNTLEVEL resets to zero (not 15), so no * need to initialize it. Prior to that we do, otherwise we get an * ICOUNT exception, 2^32 instructions after reset. */ /* are we being debugged? (detected by ICOUNTLEVEL not 15, or dropped * below 12) */ rsr a2, ICOUNTLEVEL /* if so, avoid initializing ICOUNTLEVEL which drops single-steps * through here * */ bltui a2, 12, 1f /* avoid ICOUNT exceptions */ wsr a0, ICOUNTLEVEL /* wait for WSR to ICOUNTLEVEL to complete */ isync 1: #endif #endif /* XCHAL_HAVE_DEBUG */ #endif /* !XCHAL_HAVE_FULL_RESET */ #if XCHAL_HAVE_ABSOLUTE_LITERALS /* Technically, this only needs to be done under !FULL_RESET, * assuming hard reset: */ wsr a0, LITBASE rsync #endif #if XCHAL_HAVE_PSO_CDM && ! XCHAL_HAVE_PSO_FULL_RETENTION /* * If we're powering up from a temporary power shut-off (PSO), * restore state saved just prior to shut-off. Note that the * MEMCTL register was already restored earlier, and as a side * effect, registers a3, a5, a7 are now preloaded with values * that we will use here. * a3 - pointer to save area base address (_xtos_pso_savearea) * a5 - saved state signature (CORE_STATE_SIGNATURE) * a7 - contents of PWRSTAT register */ /* load save area signature */ l32i a4, a3, CS_SA_signature /* compare signature with expected one */ sub a4, a4, a5 # if XTOS_PSO_TEST /* pretend PSO warm start with warm caches */ movi a7, PWRSTAT_WAKEUP_RESET # endif /* wakeup from PSO? (branch if not) */ bbci.l a7, PWRSTAT_WAKEUP_RESET_SHIFT, 1f /* Yes, wakeup from PSO. Check whether state was properly saved. * speculatively clear PSO-wakeup bit */ addi a5, a7, - PWRSTAT_WAKEUP_RESET /* if state not saved (corrupted?), mark as cold start */ movnez a7, a5, a4 /* if state not saved, just continue with reset */ bnez a4, 1f /* Wakeup from PSO with good signature. Now check cache status: * if caches warm, restore now */ bbci.l a7, PWRSTAT_CACHES_LOST_POWER_SHIFT, .Lpso_restore /* Caches got shutoff. Continue reset, we'll end up initializing * caches, and check again later for PSO. */ # if XCHAL_HAVE_PRID && XCHAL_HAVE_S32C1I j .Ldonesync /* skip reset sync, only done for cold start */ # endif 1: /* Cold start. (Not PSO wakeup.) Proceed with normal full reset. */ #endif #if XCHAL_HAVE_PRID && XCHAL_HAVE_S32C1I /* Core 0 initializes the XMP synchronization variable, if present. * This operation needs to happen as early as possible in the startup * sequence so that the other cores can be released from reset. */ .weak _ResetSync movi a2, _ResetSync /* address of sync variable */ rsr.prid a3 /* core and multiprocessor ID */ extui a3, a3, 0, 8 /* extract core ID (FIXME: need proper * constants for PRID bits to extract) */ beqz a2, .Ldonesync /* skip if no sync variable */ bnez a3, .Ldonesync /* only do this on core 0 */ s32i a0, a2, 0 /* clear sync variable */ .Ldonesync: #endif #if XCHAL_HAVE_EXTERN_REGS && XCHAL_HAVE_MP_RUNSTALL /* On core 0, this releases other cores. On other cores this has no * effect, because runstall control is unconnected */ movi a2, XER_MPSCORE wer a0, a2 #endif /* * For processors with relocatable vectors, apply any alternate * vector base given to xt-genldscripts, which sets the * _memmap_vecbase_reset symbol accordingly. */ #if XCHAL_HAVE_VECBASE /* note: absolute symbol, not a ptr */ movi a2, _memmap_vecbase_reset wsr a2, vecbase #endif /* have ATOMCTL ? */ #if XCHAL_HAVE_S32C1I && (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0) #if XCHAL_DCACHE_IS_COHERENT /* MX -- internal for writeback, RCW otherwise */ movi a3, 0x25 #else /* non-MX -- always RCW */ movi a3, 0x15 #endif /* XCHAL_DCACHE_IS_COHERENT */ wsr a3, ATOMCTL #endif #if XCHAL_HAVE_INTERRUPTS && XCHAL_HAVE_DEBUG /* lower PS.INTLEVEL here to make reset vector easier to debug */ rsil a2, 1 #endif /* If either of the caches does not have dynamic way support, then * use the old (slow) method to init them. If the cache is absent * the macros will expand to empty. */ #if ! XCHAL_HAVE_ICACHE_DYN_WAYS icache_reset a2, a3 #endif #if ! XCHAL_HAVE_DCACHE_DYN_WAYS dcache_reset a2, a3 #endif #if XCHAL_HAVE_PSO_CDM && ! XCHAL_HAVE_PSO_FULL_RETENTION /* Here, a7 still contains status from the power status register, * or zero if signature check failed. */ /* wakeup from PSO with good signature? */ bbci.l a7, PWRSTAT_WAKEUP_RESET_SHIFT, .Lcoldstart /* Yes, wakeup from PSO. Caches had been powered down, now are * initialized. */ .Lpso_restore: /* Assume memory still initialized, so all code still unpacked etc. * So we can just jump/call to relevant state restore code (wherever * located). */ /* make shutoff routine return zero */ movi a2, 0 movi a3, _xtos_pso_savearea /* Here, as below for _start, call0 is used as an unlimited-range * jump. */ call0 _xtos_core_restore_nw /* (does not return) */ .Lcoldstart: #endif #if XCHAL_HAVE_PREFETCH /* Enable cache prefetch if present. */ movi a2, XCHAL_CACHE_PREFCTL_DEFAULT wsr a2, PREFCTL #endif /* * Now setup the memory attributes. On some cores this "enables" * caches. We do this ahead of unpacking, so it can proceed more * efficiently. * * The _memmap_cacheattr_reset symbol's value (address) is defined by * the LSP's linker script, as generated by xt-genldscripts. If * defines 4-bit attributes for eight 512MB regions. * * (NOTE: for cores with the older MMU v1 or v2, or without any * memory protection mechanism, the following code has no effect.) */ #if XCHAL_HAVE_MPU /* If there's an empty background map, setup foreground maps to mimic * region protection: */ # if XCHAL_MPU_ENTRIES >= 8 && XCHAL_MPU_BACKGROUND_ENTRIES <= 2 .pushsection .rodata, "a" .global _xtos_mpu_attribs .align 4 _xtos_mpu_attribs: /* Illegal (---) */ .word 0x00006000+XCHAL_MPU_ENTRIES-8 /* Writeback (rwx Cacheable Non-shareable wb rd-alloc wr-alloc) */ .word 0x000F7700+XCHAL_MPU_ENTRIES-8 /* WBNA (rwx Cacheable Non-shareable wb rd-alloc) */ .word 0x000D5700+XCHAL_MPU_ENTRIES-8 /* Writethru (rwx Cacheable Non-shareable wt rd-alloc) */ .word 0x000C4700+XCHAL_MPU_ENTRIES-8 /* Bypass (rwx Device non-interruptible system-shareable) */ .word 0x00006700+XCHAL_MPU_ENTRIES-8 .popsection /* * We assume reset state: all MPU entries zeroed and disabled. * Otherwise we'd need a loop to zero everything. */ /* note: absolute symbol, not a ptr */ movi a2, _memmap_cacheattr_reset movi a3, _xtos_mpu_attribs movi a4, 0x20000000 /* 512 MB delta */ movi a6, 8 movi a7, 1 /* MPU entry vaddr 0, with valid bit set */ movi a9, 0 /* cacheadrdis value */ /* enable everything temporarily while MPU updates */ wsr.cacheadrdis a9 /* Write eight MPU entries, from the last one going backwards * (entries n-1 thru n-8) */ 2: extui a8, a2, 28, 4 /* get next attribute nibble (msb first) */ extui a5, a8, 0, 2 /* lower two bit indicate whether cached */ slli a9, a9, 1 /* add a bit to cacheadrdis... */ addi a10, a9, 1 /* set that new bit if... */ moveqz a9, a10, a5 /* ... that region is non-cacheable */ addx4 a5, a8, a3 /* index into _xtos_mpu_attribs table */ addi a8, a8, -5 /* make valid attrib indices negative */ movgez a5, a3, a8 /* if not valid attrib, use Illegal */ l32i a5, a5, 0 /* load access rights, memtype from table * entry */ slli a2, a2, 4 sub a7, a7, a4 /* next 512MB region (last to first) */ addi a6, a6, -1 add a5, a5, a6 /* add the index */ wptlb a5, a7 /* write the MPU entry */ bnez a6, 2b /* loop until done */ # else /* default value of CACHEADRDIS for bgnd map */ movi a9, XCHAL_MPU_BG_CACHEADRDIS # endif wsr.cacheadrdis a9 /* update cacheadrdis */ #elif XCHAL_HAVE_CACHEATTR || XCHAL_HAVE_MIMIC_CACHEATTR \ || XCHAL_HAVE_XLT_CACHEATTR \ || (XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY) /* note: absolute symbol, not a ptr */ movi a2, _memmap_cacheattr_reset /* set CACHEATTR from a2 (clobbers a3-a8) */ cacheattr_set #endif /* Now that caches are initialized, cache coherency can be enabled. */ #if XCHAL_DCACHE_IS_COHERENT # if XCHAL_HAVE_EXTERN_REGS && XCHAL_HAVE_MX && \ (XCHAL_HW_MIN_VERSION < XTENSA_HWVERSION_RE_2012_0) /* Opt into coherence for MX (for backward compatibility / testing). */ movi a3, 1 movi a2, XER_CCON wer a3, a2 # endif #endif /* Enable zero-overhead loop instr buffer, and snoop responses, if * configured. If HW erratum 453 fix is to be applied, then don't * enable loop instr buffer. */ #if XCHAL_USE_MEMCTL && XCHAL_SNOOP_LB_MEMCTL_DEFAULT movi a3, XCHAL_SNOOP_LB_MEMCTL_DEFAULT rsr a2, MEMCTL or a2, a2, a3 wsr a2, MEMCTL #endif /* Caches are all up and running, clear PWRCTL.ShutProcOffOnPWait. */ #if XCHAL_HAVE_PSO_CDM movi a2, XDM_MISC_PWRCTL movi a4, ~PWRCTL_CORE_SHUTOFF rer a3, a2 and a3, a3, a4 wer a3, a2 #endif #endif /* !XCHAL_HAVE_HALT */ /* * Unpack code and data (eg. copy ROMed segments to RAM, vectors into * their proper location, etc). */ #if defined(XTOS_UNPACK) movi a2, _rom_store_table beqz a2, unpackdone unpack: l32i a3, a2, 0 /* start vaddr */ l32i a4, a2, 4 /* end vaddr */ l32i a5, a2, 8 /* store vaddr */ addi a2, a2, 12 bgeu a3, a4, upnext /* skip unless start < end */ uploop: l32i a6, a5, 0 addi a5, a5, 4 s32i a6, a3, 0 addi a3, a3, 4 bltu a3, a4, uploop j unpack upnext: bnez a3, unpack bnez a5, unpack #endif /* XTOS_UNPACK */ unpackdone: #if defined(XTOS_UNPACK) || defined(XTOS_MP) /* * If writeback caches are configured and enabled, unpacked data must * be written out to memory before trying to execute it: */ dcache_writeback_all a2, a3, a4, 0 /* ensure data written back is visible to i-fetch */ icache_sync a2 /* * Note: no need to invalidate the i-cache after the above, because * we already invalidated it further above and did not execute * anything within unpacked regions afterwards. [Strictly speaking, * if an unpacked region follows this code very closely, it's possible * for cache-ahead to have cached a bit of that unpacked region, so in * the future we may need to invalidate the entire i-cache here again * anyway.] */ #endif #if !XCHAL_HAVE_HALT /* skip for TX */ /* * Now that we know the .lit4 section is present (if got unpacked) * (and if absolute literals are used), initialize LITBASE to use it. */ #if XCHAL_HAVE_ABSOLUTE_LITERALS && XSHAL_USE_ABSOLUTE_LITERALS /* * Switch from PC-relative to absolute (litbase-relative) L32R mode. * Set LITBASE to 256 kB beyond the start of the literals in .lit4 * (aligns to the nearest 4 kB boundary, LITBASE does not have bits * 1..11) and set the enable bit (_lit4_start is assumed 4-byte * aligned). */ movi a2, _lit4_start + 0x40001 wsr a2, LITBASE rsync #endif /* have and use absolute literals */ /* we can now start using absolute literals */ .end no-absolute-literals /* Technically, this only needs to be done pre-LX2, assuming hard * reset: */ # if XCHAL_HAVE_WINDOWED && defined(__XTENSA_WINDOWED_ABI__) /* Windowed register init, so we can call windowed code (eg. C code). */ movi a1, 1 wsr a1, WINDOWSTART /* * The processor always clears WINDOWBASE at reset, so no need to * clear it here. It resets WINDOWSTART to 1 starting with LX2.0/X7.0 * (RB-2006.0). However, assuming hard reset is not yet always * practical, so do this anyway: */ wsr a0, WINDOWBASE rsync movi a0, 0 /* possibly a different a0, clear it */ # endif /* only pre-LX2 needs this */ #if XCHAL_HW_MIN_VERSION < XTENSA_HWVERSION_RB_2006_0 /* Coprocessor option initialization */ # if XCHAL_HAVE_CP /* * To allow creating new coprocessors using TC that are not known * at GUI build time without having to explicitly enable them, * all CPENABLE bits must be set, even though they may not always * correspond to a coprocessor. */ movi a2, 0xFF /* enable *all* bits, to allow dynamic TIE */ wsr a2, CPENABLE # endif /* * Floating point coprocessor option initialization (at least * rounding mode, so that floating point ops give predictable results) */ # if XCHAL_HAVE_FP && !XCHAL_HAVE_VECTORFPU2005 /* floating-point control register (user register number) */ # define FCR 232 /* floating-point status register (user register number) */ # define FSR 233 /* wait for WSR to CPENABLE to complete before accessing FP coproc * state */ rsync wur a0, FCR /* clear FCR (default rounding mode, round-nearest) */ wur a0, FSR /* clear FSR */ # endif #endif /* pre-LX2 */ /* * Initialize memory error handler address. * Putting this address in a register allows multiple instances of * the same configured core (with separate program images but shared * code memory, thus forcing memory error vector to be shared given * it is not VECBASE relative) to have the same memory error vector, * yet each have their own handler and associated data save area. */ #if XCHAL_HAVE_MEM_ECC_PARITY movi a4, _MemErrorHandler wsr a4, MESAVE #endif /* * Initialize medium and high priority interrupt dispatchers: */ #if HAVE_XSR /* For asm macros; works for positive a,b smaller than 1000: */ # define GREATERTHAN(a,b) (((b)-(a)) & ~0xFFF) # ifndef XCHAL_DEBUGLEVEL /* debug option not selected? */ # define XCHAL_DEBUGLEVEL 99 /* bogus value outside 2..6 */ # endif .macro init_vector level .if GREATERTHAN(XCHAL_NUM_INTLEVELS+1,\level) .if XCHAL_DEBUGLEVEL-\level .weak _Level&level&FromVector movi a4, _Level&level&FromVector wsr a4, EXCSAVE+\level .if GREATERTHAN(\level,XCHAL_EXCM_LEVEL) movi a5, _Pri_&level&_HandlerAddress s32i a4, a5, 0 /* If user provides their own handler, that handler might * not provide its own _Pri_<n>_HandlerAddress variable for * linking handlers. In that case, the reference below * would pull in the XTOS handler anyway, causing a conflict. * To avoid that, provide a weak version of it here: */ .pushsection .data, "aw" .global _Pri_&level&_HandlerAddress .weak _Pri_&level&_HandlerAddress .align 4 _Pri_&level&_HandlerAddress: .space 4 .popsection .endif .endif .endif .endm init_vector 2 init_vector 3 init_vector 4 init_vector 5 init_vector 6 #endif /*HAVE_XSR*/ /* * Complete reset initialization outside the vector, to avoid * requiring a vector that is larger than necessary. This 2nd-stage * startup code sets up the C Run-Time (CRT) and calls main(). * * Here we use call0 not because we expect any return, but because the * assembler/linker dynamically sizes call0 as needed (with * -mlongcalls) which it doesn't with j or jx. Note: This needs to * be call0 regardless of the selected ABI. */ call0 _start /* jump to _start (in crt1-*.S) */ /* does not return */ #else /* XCHAL_HAVE_HALT */ j _start /* jump to _start (in crt1-*.S) */ /* (TX has max 64kB IRAM, so J always in range) */ /* Paranoia -- double-check requirements / assumptions of this Xtensa * TX code: */ # if !defined(__XTENSA_CALL0_ABI__) || !XCHAL_HAVE_FULL_RESET \ || XCHAL_HAVE_INTERRUPTS || XCHAL_HAVE_CCOUNT \ || XCHAL_DTLB_ARF_WAYS || XCHAL_HAVE_DEBUG \ || XCHAL_HAVE_S32C1I || XCHAL_HAVE_ABSOLUTE_LITERALS \ || XCHAL_DCACHE_SIZE || XCHAL_ICACHE_SIZE || XCHAL_HAVE_PIF \ || XCHAL_HAVE_WINDOWED # error "Halt architecture (Xtensa TX) requires: call0 ABI, all flops reset, no exceptions or interrupts, no TLBs, no debug, no S32C1I, no LITBASE, no cache, no PIF, no windowed regs" # endif #endif /* XCHAL_HAVE_HALT */ #if (!XCHAL_HAVE_HALT || defined(XTOS_UNPACK)) && XCHAL_HAVE_IMEM_LOADSTORE .size _ResetHandler, . - _ResetHandler #else .size __start, . - __start #endif .text .global xthals_hw_configid0, xthals_hw_configid1 .global xthals_release_major, xthals_release_minor .end literal_prefix ```
/content/code_sandbox/arch/xtensa/core/startup/reset_vector.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
6,811
```objective-c /* * */ #ifndef ZEPHYR_ARCH_XTENSA_XTENSA_MPU_PRIV_H_ #define ZEPHYR_ARCH_XTENSA_XTENSA_MPU_PRIV_H_ #include <stdint.h> #include <zephyr/toolchain.h> #include <zephyr/arch/xtensa/mpu.h> #include <zephyr/sys/util_macro.h> #include <xtensa/config/core-isa.h> /** * @defgroup xtensa_mpu_internal_apis Xtensa Memory Protection Unit (MPU) Internal APIs * @ingroup xtensa_mpu_apis * @{ */ /** * @name Bit shifts and masks for MPU entry registers. * * @{ */ /** * Number of bits to shift for start address in MPU entry register. * * This is only used for aligning the value to the MPU entry register, * and is different than the hardware alignment requirement. */ #define XTENSA_MPU_ENTRY_REG_START_ADDR_SHIFT 5U /** * Bit mask of start address in MPU entry register. * * This is only used for aligning the value to the MPU entry register, * and is different than the hardware alignment requirement. */ #define XTENSA_MPU_ENTRY_REG_START_ADDR_MASK 0xFFFFFFE0U /** Number of bits to shift for enable bit in MPU entry register. */ #define XTENSA_MPU_ENTRY_REG_ENABLE_SHIFT 0U /** Bit mask of enable bit in MPU entry register. */ #define XTENSA_MPU_ENTRY_REG_ENABLE_MASK BIT(XTENSA_MPU_ENTRY_ENABLE_SHIFT) /** Number of bits to shift for lock bit in MPU entry register. */ #define XTENSA_MPU_ENTRY_REG_LOCK_SHIFT 1U /** Bit mask of lock bit in MPU entry register. */ #define XTENSA_MPU_ENTRY_REG_LOCK_MASK BIT(XTENSA_MPU_ENTRY_LOCK_SHIFT) /** Number of bits to shift for access rights in MPU entry register. */ #define XTENSA_MPU_ENTRY_REG_ACCESS_RIGHTS_SHIFT 8U /** Bit mask of access rights in MPU entry register. */ #define XTENSA_MPU_ENTRY_REG_ACCESS_RIGHTS_MASK \ (0xFU << XTENSA_MPU_ENTRY_REG_ACCESS_RIGHTS_SHIFT) /** Number of bits to shift for memory type in MPU entry register. */ #define XTENSA_MPU_ENTRY_REG_MEMORY_TYPE_SHIFT 12U /** Bit mask of memory type in MPU entry register. */ #define XTENSA_MPU_ENTRY_REG_MEMORY_TYPE_MASK \ (0x1FFU << XTENSA_MPU_ENTRY_REG_MEMORY_TYPE_SHIFT) /** Bit mask for foreground entry returned by probing. */ #define XTENSA_MPU_PROBE_IS_FG_ENTRY_MASK BIT(31) /** Bit mask for background entry returned by probing. */ #define XTENSA_MPU_PROBE_IS_BG_ENTRY_MASK BIT(30) /** Bit mask used to determine if entry is valid returned by probing. */ #define XTENSA_MPU_PROBE_VALID_ENTRY_MASK \ (XTENSA_MPU_PROBE_IS_FG_ENTRY_MASK | XTENSA_MPU_PROBE_IS_BG_ENTRY_MASK) /** * @} */ /** * @name Bit shifts and masks for MPU PPTLB return value. * * @{ */ /** Bit shift for segment value. */ #define XTENSA_MPU_PPTLB_ACCESS_RIGHTS_SHIFT 8U /** Mask for segment value. */ #define XTENSA_MPU_PPTLB_ACCESS_RIGHTS_MASK 0x00000F00U /** * @} */ /** * Define one MPU entry of type struct xtensa_mpu_entry. * * @note This needs a comma at the end if used in array declaration. * * @param saddr Start address. * @param en Enable bit * @param rights Access rights. * @param memtype Memory type. */ #define XTENSA_MPU_ENTRY(saddr, en, rights, memtype) \ { \ .as.p.enable = en, \ .as.p.lock = 0, \ .as.p.mbz = 0, \ .as.p.start_addr = (saddr >> XTENSA_MPU_ENTRY_START_ADDR_SHIFT), \ .at.p.segment = 0, \ .at.p.mbz1 = 0, \ .at.p.access_rights = rights, \ .at.p.memory_type = memtype, \ .at.p.mbz2 = 0, \ } /** * @brief Read MPUCFG register. * * This returns the bitmask of enabled MPU entries (foreground segments). * * @return Value of MPUCFG register. */ static ALWAYS_INLINE uint32_t xtensa_mpu_mpucfg_read(void) { uint32_t mpucfg; __asm__ __volatile__("rsr.mpucfg %0" : "=a" (mpucfg)); return mpucfg; } /** * @brief Read MPUENB register. * * This returns the enable bits for MPU entries. * * @return Value of MPUENB register. */ static ALWAYS_INLINE uint32_t xtensa_mpu_mpuenb_read(void) { uint32_t mpuenb; __asm__ __volatile__("rsr.mpuenb %0" : "=a" (mpuenb)); return mpuenb; } /** * @brief Write MPUENB register. * * This writes the enable bits for MPU entries. * * @param mpuenb Value to be written. */ static ALWAYS_INLINE void xtensa_mpu_mpuenb_write(uint32_t mpuenb) { __asm__ __volatile__("wsr.mpuenb %0" : : "a"(mpuenb)); } /** * @brief Probe for protection TLB entry from an address. * * @param addr Probe address. * * @return Return of the PPTLB instruction. */ static ALWAYS_INLINE uint32_t xtensa_pptlb_probe(uintptr_t addr) { uint32_t ret; __asm__ __volatile__("pptlb %0, %1\n\t" : "=a"(ret) : "a"(addr)); return ret; } /** * @name MPU entry internal helper functions. * * @{ */ /** * @brief Return the start address encoded in the MPU entry. * * @param entry Pointer to the MPU entry. * * @return Start address. */ static ALWAYS_INLINE uintptr_t xtensa_mpu_entry_start_address_get(const struct xtensa_mpu_entry *entry) { return (entry->as.p.start_addr << XTENSA_MPU_ENTRY_REG_START_ADDR_SHIFT); } /** * @brief Set the start address encoded in the MPU entry. * * @param entry Pointer to the MPU entry. * @param addr Start address. */ static ALWAYS_INLINE void xtensa_mpu_entry_start_address_set(struct xtensa_mpu_entry *entry, uintptr_t addr) { entry->as.p.start_addr = addr >> XTENSA_MPU_ENTRY_REG_START_ADDR_SHIFT; } /** * @brief Return the lock bit encoded in the MPU entry. * * @param entry Pointer to the MPU entry. * * @retval True Lock bit is set. * @retval False Lock bit is not set. */ static ALWAYS_INLINE bool xtensa_mpu_entry_lock_get(const struct xtensa_mpu_entry *entry) { return entry->as.p.lock != 0; } /** * @brief Set the lock bit encoded in the MPU entry. * * @param entry Pointer to the MPU entry. * @param lock True if to lock the MPU entry. */ static ALWAYS_INLINE void xtensa_mpu_entry_lock_set(struct xtensa_mpu_entry *entry, bool lock) { entry->as.p.lock = lock ? 1 : 0; } /** * @brief Return the enable bit encoded in the MPU entry. * * @param entry Pointer to the MPU entry. * * @retval True Enable bit is set. * @retval False Enable bit is not set. */ static ALWAYS_INLINE bool xtensa_mpu_entry_enable_get(const struct xtensa_mpu_entry *entry) { return entry->as.p.enable != 0; } /** * @brief Set the enable bit encoded in the MPU entry. * * @param entry Pointer to the MPU entry. * @param en True if to enable the MPU entry. */ static ALWAYS_INLINE void xtensa_mpu_entry_enable_set(struct xtensa_mpu_entry *entry, bool en) { entry->as.p.enable = en ? 1 : 0; } /** * @brief Return the access rights encoded in the MPU entry. * * @param entry Pointer to the MPU entry. * * @return Access right value. */ static ALWAYS_INLINE uint8_t xtensa_mpu_entry_access_rights_get(const struct xtensa_mpu_entry *entry) { return entry->at.p.access_rights; } /** * @brief Set the lock bit encoded in the MPU entry. * * @param entry Pointer to the MPU entry. * @param access_rights Access rights to be set. */ static ALWAYS_INLINE void xtensa_mpu_entry_access_rights_set(struct xtensa_mpu_entry *entry, uint8_t access_rights) { entry->at.p.access_rights = access_rights; } /** * @brief Return the memory type encoded in the MPU entry. * * @param entry Pointer to the MPU entry. * * @return Memory type value. */ static ALWAYS_INLINE uint16_t xtensa_mpu_entry_memory_type_get(const struct xtensa_mpu_entry *entry) { return entry->at.p.memory_type; } /** * @brief Set the memory type in the MPU entry. * * @param entry Pointer to the MPU entry. * @param memory_type Memory type to be set. */ static ALWAYS_INLINE void xtensa_mpu_entry_memory_type_set(struct xtensa_mpu_entry *entry, uint16_t memory_type) { entry->at.p.memory_type = memory_type; } /** * @brief Set both access rights and memory type of a MPU entry. * * @param entry Pointer to the MPU entry. * @param access_rights Access rights value. * @param memory_type Memory type value. */ static inline void xtensa_mpu_entry_attributes_set(struct xtensa_mpu_entry *entry, uint8_t access_rights, uint16_t memory_type) { xtensa_mpu_entry_access_rights_set(entry, access_rights); xtensa_mpu_entry_memory_type_set(entry, memory_type); } /** * @brief Set fields in MPU entry so it will be functional. * * This sets the starting address, enable bit, access rights and memory type * of an entry. * * Note that this preserves the valud of the segment field. * * @param entry Pointer to the entry to be manipulated. * @param start_address Start address to be set. * @param enable Whether this entry should be enabled. * @param access_rights Access rights for the entry. * @param memory_type Memory type for the entry. */ static inline void xtensa_mpu_entry_set(struct xtensa_mpu_entry *entry, uintptr_t start_address, bool enable, uint8_t access_rights, uint16_t memory_type) { uint8_t segment = entry->at.p.segment; /* Clear out the fields, and make sure MBZ fields are zero. */ entry->as.raw = 0; entry->at.raw = 0; xtensa_mpu_entry_start_address_set(entry, start_address); xtensa_mpu_entry_enable_set(entry, enable); xtensa_mpu_entry_access_rights_set(entry, access_rights); xtensa_mpu_entry_memory_type_set(entry, memory_type); entry->at.p.segment = segment; } /** * @brief Test if two MPU entries have same access rights. * * @param entry1 MPU entry #1 * @param entry2 MPU entry #2. * * @return True if access rights are the same, false otherwise. */ static inline bool xtensa_mpu_entries_has_same_access_rights(const struct xtensa_mpu_entry *entry1, const struct xtensa_mpu_entry *entry2) { return entry1->at.p.access_rights == entry2->at.p.access_rights; } /** * @brief Test if two MPU entries have same memory types. * * @param entry1 MPU entry #1. * @param entry2 MPU entry #2. * * @return True if memory types are the same, false otherwise. */ static inline bool xtensa_mpu_entries_has_same_memory_type(const struct xtensa_mpu_entry *entry1, const struct xtensa_mpu_entry *entry2) { return entry1->at.p.memory_type == entry2->at.p.memory_type; } /** * @brief Test if two MPU entries have same access rights and memory types. * * @param entry1 MPU entry #1. * @param entry2 MPU entry #2. * * @return True if access rights and memory types are the same, false otherwise. */ static inline bool xtensa_mpu_entries_has_same_attributes(const struct xtensa_mpu_entry *entry1, const struct xtensa_mpu_entry *entry2) { return xtensa_mpu_entries_has_same_access_rights(entry1, entry2) && xtensa_mpu_entries_has_same_memory_type(entry1, entry2); } /** * @brief Test if two entries has the same addresses. * * @param entry1 MPU entry #1. * @param entry2 MPU entry #2. * * @return True if they have the same address, false otherwise. */ static inline bool xtensa_mpu_entries_has_same_address(const struct xtensa_mpu_entry *entry1, const struct xtensa_mpu_entry *entry2) { return xtensa_mpu_entry_start_address_get(entry1) == xtensa_mpu_entry_start_address_get(entry2); } /** * @} */ /** * @name MPU access rights helper functions. * * @{ */ /** * @brief Test if the access rights is valid. * * @param access_rights Access rights value. * * @return True if access rights is valid, false otherwise. */ static ALWAYS_INLINE bool xtensa_mpu_access_rights_is_valid(uint8_t access_rights) { return (access_rights != 1) && (access_rights <= 15); } /** * @} */ /** * @} */ #endif /* ZEPHYR_ARCH_XTENSA_XTENSA_MPU_PRIV_H_ */ ```
/content/code_sandbox/arch/xtensa/include/xtensa_mpu_priv.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,010
```objective-c /* * */ #ifndef ZEPHYR_ARCH_XTENSA_INCLUDE_XTENSA_ASM2_CONTEXT_H_ #define ZEPHYR_ARCH_XTENSA_INCLUDE_XTENSA_ASM2_CONTEXT_H_ #include <xtensa/corebits.h> #include <xtensa/config/core-isa.h> #include <xtensa/config/tie.h> /* * Stack frame layout for a saved processor context, in memory order, * high to low address: * * SP-0 <-- Interrupted stack pointer points here * * SP-4 Caller A3 spill slot \ * SP-8 Caller A2 spill slot | * SP-12 Caller A1 spill slot + (Part of ABI standard) * SP-16 Caller A0 spill slot / * * SP-20 Saved A3 * SP-24 Saved A2 * SP-28 Unused (not "Saved A1" because the SP is saved externally as a handle) * SP-32 Saved A0 * * SP-36 Saved PC (address to jump to following restore) * SP-40 Saved/interrupted PS special register * * SP-44 Saved SAR special register * * SP-48 Saved LBEG special register (if loops enabled) * SP-52 Saved LEND special register (if loops enabled) * SP-56 Saved LCOUNT special register (if loops enabled) * * SP-60 Saved SCOMPARE special register (if S32C1I enabled) * * SP-64 Saved EXCCAUSE special register * * SP-68 Saved THREADPTR special register (if processor has thread pointer) * * (The above fixed-size region is known as the "base save area" in the * code below) * * - 18 FPU registers (if FPU is present and CONFIG_FPU_SHARING enabled) * * - Saved A7 \ * - Saved A6 | * - Saved A5 +- If not in-use by another frame * - Saved A4 / * * - Saved A11 \ * - Saved A10 | * - Saved A9 +- If not in-use by another frame * - Saved A8 / * * - Saved A15 \ * - Saved A14 | * - Saved A13 +- If not in-use by another frame * - Saved A12 / * * - Saved intermediate stack pointer (points to low word of base save * area, i.e. the saved LCOUNT or SAR). The pointer to this value * (i.e. the final stack pointer) is stored externally as the * "restore handle" in the thread context. * * Essentially, you can recover a pointer to the BSA by loading *SP. * Adding the fixed BSA size to that gets you back to the * original/interrupted stack pointer. */ #ifndef __ASSEMBLER__ #include <stdint.h> #include <zephyr/toolchain.h> /** * Base Save Area (BSA) during interrupt. * * This saves the registers during interrupt entrance * so they can be restored later. * * Note that only A0-A3 are saved here. High registers * are saved after the BSA. */ struct xtensa_irq_base_save_area { #if XCHAL_HAVE_FP && defined(CONFIG_CPU_HAS_FPU) && defined(CONFIG_FPU_SHARING) uintptr_t fcr; uintptr_t fsr; uintptr_t fpu0; uintptr_t fpu1; uintptr_t fpu2; uintptr_t fpu3; uintptr_t fpu4; uintptr_t fpu5; uintptr_t fpu6; uintptr_t fpu7; uintptr_t fpu8; uintptr_t fpu9; uintptr_t fpu10; uintptr_t fpu11; uintptr_t fpu12; uintptr_t fpu13; uintptr_t fpu14; uintptr_t fpu15; #endif #if defined(CONFIG_XTENSA_HIFI_SHARING) /* * Carve space for the registers used by the HiFi audio engine * coprocessor (which is always CP1). Carve additional space to * manage alignment at run-time as we can not yet guarantee the * alignment of the BSA. */ uint8_t hifi[XCHAL_CP1_SA_SIZE + XCHAL_CP1_SA_ALIGN]; #endif #if XCHAL_HAVE_THREADPTR uintptr_t threadptr; #endif #if XCHAL_HAVE_S32C1I uintptr_t scompare1; #endif uintptr_t exccause; #if XCHAL_HAVE_LOOPS uintptr_t lcount; uintptr_t lend; uintptr_t lbeg; #endif uintptr_t sar; uintptr_t ps; uintptr_t pc; uintptr_t a0; uintptr_t scratch; uintptr_t a2; uintptr_t a3; uintptr_t caller_a0; uintptr_t caller_a1; uintptr_t caller_a2; uintptr_t caller_a3; }; typedef struct xtensa_irq_base_save_area _xtensa_irq_bsa_t; /** * Raw interrupt stack frame. * * This provides a raw interrupt stack frame to make it * easier to construct general purpose code in loops. * Avoid using this if possible. */ struct xtensa_irq_stack_frame_raw { _xtensa_irq_bsa_t *ptr_to_bsa; struct { uintptr_t r0; uintptr_t r1; uintptr_t r2; uintptr_t r3; } blks[3]; }; typedef struct xtensa_irq_stack_frame_raw _xtensa_irq_stack_frame_raw_t; /** * Interrupt stack frame containing A0 - A15. */ struct xtensa_irq_stack_frame_a15 { _xtensa_irq_bsa_t *ptr_to_bsa; uintptr_t a12; uintptr_t a13; uintptr_t a14; uintptr_t a15; uintptr_t a8; uintptr_t a9; uintptr_t a10; uintptr_t a11; uintptr_t a4; uintptr_t a5; uintptr_t a6; uintptr_t a7; _xtensa_irq_bsa_t bsa; }; typedef struct xtensa_irq_stack_frame_a15 _xtensa_irq_stack_frame_a15_t; /** * Interrupt stack frame containing A0 - A11. */ struct xtensa_irq_stack_frame_a11 { _xtensa_irq_bsa_t *ptr_to_bsa; uintptr_t a8; uintptr_t a9; uintptr_t a10; uintptr_t a11; uintptr_t a4; uintptr_t a5; uintptr_t a6; uintptr_t a7; _xtensa_irq_bsa_t bsa; }; typedef struct xtensa_irq_stack_frame_a11 _xtensa_irq_stack_frame_a11_t; /** * Interrupt stack frame containing A0 - A7. */ struct xtensa_irq_stack_frame_a7 { _xtensa_irq_bsa_t *ptr_to_bsa; uintptr_t a4; uintptr_t a5; uintptr_t a6; uintptr_t a7; _xtensa_irq_bsa_t bsa; }; typedef struct xtensa_irq_stack_frame_a7 _xtensa_irq_stack_frame_a7_t; /** * Interrupt stack frame containing A0 - A3. */ struct xtensa_irq_stack_frame_a3 { _xtensa_irq_bsa_t *ptr_to_bsa; _xtensa_irq_bsa_t bsa; }; typedef struct xtensa_irq_stack_frame_a3 _xtensa_irq_stack_frame_a3_t; #endif /* __ASSEMBLER__ */ #endif /* ZEPHYR_ARCH_XTENSA_INCLUDE_XTENSA_ASM2_CONTEXT_H_ */ ```
/content/code_sandbox/arch/xtensa/include/xtensa_asm2_context.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,628
```objective-c /* */ #ifndef ZEPHYR_ARCH_XTENSA_INCLUDE_OFFSETS_SHORT_ARCH_H_ #define ZEPHYR_ARCH_XTENSA_INCLUDE_OFFSETS_SHORT_ARCH_H_ #define _thread_offset_to_flags \ (___thread_t_arch_OFFSET + ___thread_arch_t_flags_OFFSET) #ifdef CONFIG_USERSPACE #define _thread_offset_to_psp \ (___thread_t_arch_OFFSET + ___thread_arch_t_psp_OFFSET) #define _thread_offset_to_ptables \ (___thread_t_arch_OFFSET + ___thread_arch_t_ptables_OFFSET) #endif /* CONFIG_USERSPACE */ #endif /* ZEPHYR_ARCH_XTENSA_INCLUDE_OFFSETS_SHORT_ARCH_H_ */ ```
/content/code_sandbox/arch/xtensa/include/offsets_short_arch.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
134
```objective-c /* * */ #ifndef ZEPHYR_ARCH_XTENSA_INCLUDE_XTENSA_ASM2_S_H #define ZEPHYR_ARCH_XTENSA_INCLUDE_XTENSA_ASM2_S_H #include <zephyr/zsr.h> #include "xtensa_asm2_context.h" #include <zephyr/offsets.h> /* Assembler header! This file contains macros designed to be included * only by the assembler. */ #if defined(CONFIG_XTENSA_HIFI_SHARING) .extern _xtensa_hifi_save #endif /* * SPILL_ALL_WINDOWS * * Spills all windowed registers (i.e. registers not visible as * A0-A15) to their ABI-defined spill regions on the stack. * * Unlike the Xtensa HAL implementation, this code requires that the * EXCM and WOE bit be enabled in PS, and relies on repeated hardware * exception handling to do the register spills. The trick is to do a * noop write to the high registers, which the hardware will trap * (into an overflow exception) in the case where those registers are * already used by an existing call frame. Then it rotates the window * and repeats until all but the A0-A3 registers of the original frame * are guaranteed to be spilled, eventually rotating back around into * the original frame. Advantages: * * - Vastly smaller code size * * - More easily maintained if changes are needed to window over/underflow * exception handling. * * - Requires no scratch registers to do its work, so can be used safely in any * context. * * - If the WOE bit is not enabled (for example, in code written for * the CALL0 ABI), this becomes a silent noop and operates compatibly. * * - In memory protection situations, this relies on the existing * exception handlers (and thus their use of the L/S32E * instructions) to execute stores in the protected space. AFAICT, * the HAL routine does not handle this situation and isn't safe: it * will happily write through the "stack pointers" found in * registers regardless of where they might point. * * - Hilariously it's ACTUALLY FASTER than the HAL routine. And not * just a little bit, it's MUCH faster. With a mostly full register * file on an LX6 core (ESP-32) I'm measuring 145 cycles to spill * registers with this vs. 279 (!) to do it with * xthal_spill_windows(). Apparently Xtensa exception handling is * really fast, and no one told their software people. * * Note that as with the Xtensa HAL spill routine, and unlike context * switching code on most sane architectures, the intermediate states * here will have an invalid stack pointer. That means that this code * must not be preempted in any context (i.e. all Zephyr situations) * where the interrupt code will need to use the stack to save the * context. But unlike the HAL, which runs with exceptions masked via * EXCM, this will not: hit needs the overflow handlers unmasked. Use * INTLEVEL instead (which, happily, is what Zephyr's locking does * anyway). */ .macro SPILL_ALL_WINDOWS #if XCHAL_NUM_AREGS == 64 and a12, a12, a12 rotw 3 and a12, a12, a12 rotw 3 and a12, a12, a12 rotw 3 and a12, a12, a12 rotw 3 and a12, a12, a12 rotw 4 #elif XCHAL_NUM_AREGS == 32 and a12, a12, a12 rotw 3 and a12, a12, a12 rotw 3 and a4, a4, a4 rotw 2 #else #error Unrecognized XCHAL_NUM_AREGS #endif .endm #if XCHAL_HAVE_FP && defined(CONFIG_CPU_HAS_FPU) && defined(CONFIG_FPU_SHARING) /* * FPU_REG_SAVE * * Saves the Float Point Unit context registers in the base save * area pointed to by the current stack pointer A1. The Floating-Point * Coprocessor Option adds the FR register file and two User Registers * called FCR and FSR.The FR register file consists of 16 registers of * 32 bits each and is used for all data computation. */ .macro FPU_REG_SAVE rur.fcr a0 s32i a0, a1, ___xtensa_irq_bsa_t_fcr_OFFSET rur.fsr a0 s32i a0, a1, ___xtensa_irq_bsa_t_fsr_OFFSET ssi f0, a1, ___xtensa_irq_bsa_t_fpu0_OFFSET ssi f1, a1, ___xtensa_irq_bsa_t_fpu1_OFFSET ssi f2, a1, ___xtensa_irq_bsa_t_fpu2_OFFSET ssi f3, a1, ___xtensa_irq_bsa_t_fpu3_OFFSET ssi f4, a1, ___xtensa_irq_bsa_t_fpu4_OFFSET ssi f5, a1, ___xtensa_irq_bsa_t_fpu5_OFFSET ssi f6, a1, ___xtensa_irq_bsa_t_fpu6_OFFSET ssi f7, a1, ___xtensa_irq_bsa_t_fpu7_OFFSET ssi f8, a1, ___xtensa_irq_bsa_t_fpu8_OFFSET ssi f9, a1, ___xtensa_irq_bsa_t_fpu9_OFFSET ssi f10, a1, ___xtensa_irq_bsa_t_fpu10_OFFSET ssi f11, a1, ___xtensa_irq_bsa_t_fpu11_OFFSET ssi f12, a1, ___xtensa_irq_bsa_t_fpu12_OFFSET ssi f13, a1, ___xtensa_irq_bsa_t_fpu13_OFFSET ssi f14, a1, ___xtensa_irq_bsa_t_fpu14_OFFSET ssi f15, a1, ___xtensa_irq_bsa_t_fpu15_OFFSET .endm .macro FPU_REG_RESTORE l32i.n a0, a1, ___xtensa_irq_bsa_t_fcr_OFFSET wur.fcr a0 l32i.n a0, a1, ___xtensa_irq_bsa_t_fsr_OFFSET wur.fsr a0 lsi f0, a1, ___xtensa_irq_bsa_t_fpu0_OFFSET lsi f1, a1, ___xtensa_irq_bsa_t_fpu1_OFFSET lsi f2, a1, ___xtensa_irq_bsa_t_fpu2_OFFSET lsi f3, a1, ___xtensa_irq_bsa_t_fpu3_OFFSET lsi f4, a1, ___xtensa_irq_bsa_t_fpu4_OFFSET lsi f5, a1, ___xtensa_irq_bsa_t_fpu5_OFFSET lsi f6, a1, ___xtensa_irq_bsa_t_fpu6_OFFSET lsi f7, a1, ___xtensa_irq_bsa_t_fpu7_OFFSET lsi f8, a1, ___xtensa_irq_bsa_t_fpu8_OFFSET lsi f9, a1, ___xtensa_irq_bsa_t_fpu9_OFFSET lsi f10, a1, ___xtensa_irq_bsa_t_fpu10_OFFSET lsi f11, a1, ___xtensa_irq_bsa_t_fpu11_OFFSET lsi f12, a1, ___xtensa_irq_bsa_t_fpu12_OFFSET lsi f13, a1, ___xtensa_irq_bsa_t_fpu13_OFFSET lsi f14, a1, ___xtensa_irq_bsa_t_fpu14_OFFSET lsi f15, a1, ___xtensa_irq_bsa_t_fpu15_OFFSET .endm #endif /* * ODD_REG_SAVE * * Stashes the oddball shift/loop context registers in the base save * area pointed to by the current stack pointer. On exit, A0 will * have been modified but A2/A3 have not, and the shift/loop * instructions can be used freely (though note loops don't work in * exceptions for other reasons!). * * Does not populate or modify the PS/PC save locations. */ .macro ODD_REG_SAVE rsr.sar a0 s32i a0, a1, ___xtensa_irq_bsa_t_sar_OFFSET #if XCHAL_HAVE_LOOPS rsr.lbeg a0 s32i a0, a1, ___xtensa_irq_bsa_t_lbeg_OFFSET rsr.lend a0 s32i a0, a1, ___xtensa_irq_bsa_t_lend_OFFSET rsr.lcount a0 s32i a0, a1, ___xtensa_irq_bsa_t_lcount_OFFSET #endif rsr.exccause a0 s32i a0, a1, ___xtensa_irq_bsa_t_exccause_OFFSET #if XCHAL_HAVE_S32C1I rsr.scompare1 a0 s32i a0, a1, ___xtensa_irq_bsa_t_scompare1_OFFSET #endif #if XCHAL_HAVE_THREADPTR && \ (defined(CONFIG_USERSPACE) || defined(CONFIG_THREAD_LOCAL_STORAGE)) rur.THREADPTR a0 s32i a0, a1, ___xtensa_irq_bsa_t_threadptr_OFFSET #endif #if XCHAL_HAVE_FP && defined(CONFIG_CPU_HAS_FPU) && defined(CONFIG_FPU_SHARING) FPU_REG_SAVE #endif .endm #ifdef CONFIG_XTENSA_MMU /* * CALC_PTEVADDR_BASE * * This calculates the virtual address of the first PTE page * (PTEVADDR base, the one mapping 0x00000000) so that we can * use this to obtain the virtual address of the PTE page we are * interested in. This can be obtained via * (1 << CONFIG_XTENSA_MMU_PTEVADDR_SHIFT). * * Note that this is done this way is to avoid any TLB * miss if we are to use l32r to load the PTEVADDR base. * If the page containing the PTEVADDR base address is * not in TLB, we will need to handle the TLB miss which * we are trying to avoid here. * * @param ADDR_REG Register to store the calculated * PTEVADDR base address. * * @note The content of ADDR_REG will be modified. * Save and restore it around this macro usage. */ .macro CALC_PTEVADDR_BASE ADDR_REG movi \ADDR_REG, 1 slli \ADDR_REG, \ADDR_REG, CONFIG_XTENSA_MMU_PTEVADDR_SHIFT .endm /* * PRELOAD_PTEVADDR * * This preloads the page table entries for a 4MB region to avoid TLB * misses. This 4MB region is mapped via a page (4KB) of page table * entries (PTE). Each entry is 4 bytes mapping a 4KB region. Each page, * then, has 1024 entries mapping a 4MB region. Filling TLB entries is * automatically done via hardware, as long as the PTE page associated * with a particular address is also in TLB. If the PTE page is not in * TLB, an exception will be raised that must be handled. This TLB miss * is problematic when we are in the middle of dealing with another * exception or handling an interrupt. So we need to put the PTE page * into TLB by simply do a load operation. * * @param ADDR_REG Register containing the target address * @param PTEVADDR_BASE_REG Register containing the PTEVADDR base * * @note Both the content of ADDR_REG will be modified. * Save and restore it around this macro usage. */ .macro PRELOAD_PTEVADDR ADDR_REG, PTEVADDR_BASE_REG /* * Calculate the offset to first PTE page of all memory. * * Every page (4KB) of page table entries contains * 1024 entires (as each entry is 4 bytes). Each entry * maps one 4KB page. So one page of entries maps 4MB of * memory. * * 1. We need to find the virtual address of the PTE page * having the page table entry mapping the address in * register ADDR_REG. To do this, we first need to find * the offset of this PTE page from the first PTE page * (the one mapping memory 0x00000000): * a. Find the beginning address of the 4KB page * containing address in ADDR_REG. This can simply * be done by discarding 11 bits (or shifting right * and then left 12 bits). * b. Since each PTE page contains 1024 entries, * we divide the address obtained in step (a) by * further dividing it by 1024 (shifting right and * then left 10 bits) to obtain the offset of * the PTE page. * * Step (a) and (b) can be obtained together so that * we can shift right 22 bits, and then shift left * 12 bits. * * 2. Once we have combine the results from step (1) and * PTEVADDR_BASE_REG to get the virtual address of * the PTE page. * * 3. Do a l32i to force the PTE page to be in TLB. */ /* Step 1 */ srli \ADDR_REG, \ADDR_REG, 22 slli \ADDR_REG, \ADDR_REG, 12 /* Step 2 */ add \ADDR_REG, \ADDR_REG, \PTEVADDR_BASE_REG /* Step 3 */ l32i \ADDR_REG, \ADDR_REG, 0 .endm #endif /* CONFIG_XTENSA_MMU */ /* * CROSS_STACK_CALL * * Sets the stack up carefully such that a "cross stack" call can spill * correctly, then invokes an immediate handler. Note that: * * 0. When spilling a frame, functions find their callEE's stack pointer * (to save A0-A3) from registers. But they find their * already-spilled callER's stack pointer (to save higher GPRs) from * their own stack memory. * * 1. The function that was interrupted ("interruptee") does not need to * be spilled, because it already has been as part of the context * save. So it doesn't need registers allocated for it anywhere. * * 2. Interruptee's caller needs to spill into the space below the * interrupted stack frame, which means that the A1 register it finds * below it needs to contain the old/interrupted stack and not the * context saved one. * * 3. The ISR dispatcher (called "underneath" interruptee) needs to spill * high registers into the space immediately above its own stack frame, * so it needs to find a caller with the "new" stack pointer instead. * * We make this work by inserting TWO 4-register frames between * "interruptee's caller" and "ISR dispatcher". The top one (which * occupies the slot formerly held by "interruptee", whose registers * were saved via external means) holds the "interrupted A1" and the * bottom has the "top of the interrupt stack" which can be either the * word above a new memory area (when handling an interrupt from user * mode) OR the existing "post-context-save" stack pointer (when * handling a nested interrupt). The code works either way. Because * these are both only 4-registers, neither needs its own caller for * spilling. * * The net cost is 32 wasted bytes on the interrupt stack frame to * spill our two "phantom frames" (actually not quite, as we'd need a * few of those words used somewhere for tracking the stack pointers * anyway). But the benefit is that NO REGISTER FRAMES NEED TO BE * SPILLED on interrupt entry. And if we return back into the same * context we interrupted (a common case) no windows need to be * explicitly spilled at all. And in fact in the case where the ISR * uses significant depth on its own stack, the interrupted frames * will be spilled naturally as a standard cost of a function call, * giving register windows something like "zero cost interrupts". * * FIXME: a terrible awful really nifty idea to fix the stack waste * problem would be to use a SINGLE frame between the two stacks, * pre-spill it with one stack pointer for the "lower" call to see and * leave the register SP in place for the "upper" frame to use. * Would require modifying the Window{Over|Under}flow4 exceptions to * know not to spill/fill these special frames, but that's not too * hard, maybe... * * Enter this macro with a valid "context saved" pointer (i.e. SP * should point to a stored pointer which points to one BSA below the * interrupted/old stack) in A1, a handler function in A2, and a "new" * stack pointer (i.e. a pointer to the word ABOVE the allocated stack * area) in A3. Exceptions should be enabled via PS.EXCM, but * PS.INTLEVEL must (!) be set such that no nested interrupts can * arrive (we restore the natural INTLEVEL from the value in ZSR_EPS * just before entering the call). On return A0/1 will be unchanged, * A2 has the return value of the called function, and A3 is * clobbered. A4-A15 become part of called frames and MUST NOT BE IN * USE by the code that expands this macro. The called function gets * the context save handle in A1 as it's first argument. */ .macro CROSS_STACK_CALL mov a6, a3 /* place "new sp" in the next frame's A2 */ mov a10, a1 /* pass "context handle" in 2nd frame's A2 */ mov a3, a1 /* stash it locally in A3 too */ mov a11, a2 /* handler in 2nd frame's A3, next frame's A7 */ /* Recover the interrupted SP from the BSA */ l32i a1, a1, 0 l32i a0, a1, ___xtensa_irq_bsa_t_a0_OFFSET addi a1, a1, ___xtensa_irq_bsa_t_SIZEOF call4 _xstack_call0_\@ mov a1, a3 /* restore original SP */ mov a2, a6 /* copy return value */ j _xstack_returned_\@ .align 4 _xstack_call0_\@: /* We want an ENTRY to set a bit in windowstart and do the * rotation, but we want our own SP. After that, we are * running in a valid frame, so re-enable interrupts. */ entry a1, 16 mov a1, a2 rsr.ZSR_EPS a2 wsr.ps a2 call4 _xstack_call1_\@ mov a2, a6 /* copy return value */ retw .align 4 _xstack_call1_\@: /* Remember the handler is going to do our ENTRY, so the * handler pointer is still in A6 (not A2) even though this is * after the second CALL4. */ jx a7 _xstack_returned_\@: .endm /* Entry setup for all exceptions and interrupts. Arrive here with * the stack pointer decremented across a base save area, A0-A3 and * PS/PC already spilled to the stack in the BSA, and A2 containing a * level-specific C handler function. * * This is a macro (to allow for unit testing) that expands to a * handler body to which the vectors can jump. It takes two static * (!) arguments: a special register name (which should be set up to * point to some kind of per-CPU record struct) and offsets within * that struct which contains an interrupt stack top and a "nest * count" word. */ .macro EXCINT_HANDLER NEST_OFF, INTSTACK_OFF /* A2 contains our handler function which will get clobbered * by the save. Stash it into the unused "a1" slot in the * BSA and recover it immediately after. Kind of a hack. */ s32i a2, a1, ___xtensa_irq_bsa_t_scratch_OFFSET ODD_REG_SAVE #if defined(CONFIG_XTENSA_HIFI_SHARING) call0 _xtensa_hifi_save /* Save HiFi registers */ #endif call0 xtensa_save_high_regs l32i a2, a1, 0 l32i a2, a2, ___xtensa_irq_bsa_t_scratch_OFFSET #if XCHAL_HAVE_THREADPTR && defined(CONFIG_USERSPACE) /* Clear up the threadptr because it is used * to check if a thread is runnig on user mode. Since * we are in a interruption we don't want the system * thinking it is possbly running in user mode. */ movi.n a0, 0 wur.THREADPTR a0 #endif /* XCHAL_HAVE_THREADPTR && CONFIG_USERSPACE */ /* There's a gotcha with level 1 handlers: the INTLEVEL field * gets left at zero and not set like high priority interrupts * do. That works fine for exceptions, but for L1 interrupts, * when we unmask EXCM below, the CPU will just fire the * interrupt again and get stuck in a loop blasting save * frames down the stack to the bottom of memory. It would be * good to put this code into the L1 handler only, but there's * not enough room in the vector without some work there to * squash it some. Next choice would be to make this a macro * argument and expand two versions of this handler. An * optimization FIXME, I guess. */ rsr.ps a0 movi a3, PS_INTLEVEL_MASK and a0, a0, a3 bnez a0, _not_l1 rsr.ps a0 movi a3, PS_INTLEVEL(1) or a0, a0, a3 wsr.ps a0 _not_l1: /* Setting up the cross stack call below has states where the * resulting frames are invalid/non-reentrant, so we can't * allow nested interrupts. But we do need EXCM unmasked, as * we use CALL/ENTRY instructions in the process and need to * handle exceptions to spill caller/interruptee frames. Use * PS.INTLEVEL at maximum to mask all interrupts and stash the * current value in our designated EPS register (which is * guaranteed unused across the call) */ rsil a0, 0xf /* Since we are unmasking EXCM, we need to set RING bits to kernel * mode, otherwise we won't be able to run the exception handler in C. */ movi a3, ~(PS_EXCM_MASK) & ~(PS_RING_MASK) and a0, a0, a3 wsr.ZSR_EPS a0 wsr.ps a0 rsync /* A1 already contains our saved stack, and A2 our handler. * So all that's needed for CROSS_STACK_CALL is to put the * "new" stack into A3. This can be either a copy of A1 or an * entirely new area depending on whether we find a 1 in our * SR[off] macro argument. */ rsr.ZSR_CPU a3 l32i a0, a3, \NEST_OFF beqz a0, _switch_stacks_\@ /* Use the same stack, just copy A1 to A3 after incrementing NEST */ addi a0, a0, 1 s32i a0, a3, \NEST_OFF mov a3, a1 j _do_call_\@ _switch_stacks_\@: addi a0, a0, 1 s32i a0, a3, \NEST_OFF l32i a3, a3, \INTSTACK_OFF _do_call_\@: CROSS_STACK_CALL /* Mask interrupts (which have been unmasked during the handler * execution) while we muck with the windows and decrement the nested * count. The restore will unmask them correctly. */ rsil a0, XCHAL_NUM_INTLEVELS /* Decrement nest count */ rsr.ZSR_CPU a3 l32i a0, a3, \NEST_OFF addi a0, a0, -1 s32i a0, a3, \NEST_OFF /* Last trick: the called function returned the "next" handle * to restore to in A6 (the call4'd function's A2). If this * is not the same handle as we started with, we need to do a * register spill before restoring, for obvious reasons. * Remember to restore the A1 stack pointer as it existed at * interrupt time so the caller of the interrupted function * spills to the right place. */ beq a6, a1, _restore_\@ #ifndef CONFIG_USERSPACE l32i a1, a1, 0 l32i a0, a1, ___xtensa_irq_bsa_t_a0_OFFSET addi a1, a1, ___xtensa_irq_bsa_t_SIZEOF #ifndef CONFIG_KERNEL_COHERENCE /* When using coherence, the registers of the interrupted * context got spilled upstream in arch_cohere_stacks() */ SPILL_ALL_WINDOWS #endif /* Restore A1 stack pointer from "next" handle. */ mov a1, a6 #else /* With userspace, we cannot simply restore A1 stack pointer * at this pointer because we need to swap page tables to * the incoming thread, and we do not want to call that * function with thread's stack. So we stash the new stack * pointer into A2 first, then move it to A1 after we have * swapped the page table. */ mov a2, a6 /* Need to switch page tables because the "next" handle * returned above is not the same handle as we started * with. This means we are being restored to another * thread. */ rsr a6, ZSR_CPU l32i a6, a6, ___cpu_t_current_OFFSET #ifdef CONFIG_XTENSA_MMU call4 xtensa_swap_update_page_tables #endif #ifdef CONFIG_XTENSA_MPU call4 xtensa_mpu_map_write #endif l32i a1, a1, 0 l32i a0, a1, ___xtensa_irq_bsa_t_a0_OFFSET addi a1, a1, ___xtensa_irq_bsa_t_SIZEOF SPILL_ALL_WINDOWS /* Moved stashed stack pointer to A1 to restore stack. */ mov a1, a2 #endif _restore_\@: j _restore_context .endm /* Defines an exception/interrupt vector for a specified level. Saves * off the interrupted A0-A3 registers and the per-level PS/PC * registers to the stack before jumping to a handler (defined with * EXCINT_HANDLER) to do the rest of the work. * * Arguments are a numeric interrupt level and symbol names for the * entry code (defined via EXCINT_HANDLER) and a C handler for this * particular level. * * Note that the linker sections for some levels get special names for * no particularly good reason. Only level 1 has any code generation * difference, because it is the legacy exception level that predates * the EPS/EPC registers. It also lives in the "iram0.text" segment * (which is linked immediately after the vectors) so that an assembly * stub can be loaded into the vector area instead and reach this code * with a simple jump instruction. */ .macro DEF_EXCINT LVL, ENTRY_SYM, C_HANDLER_SYM #if defined(CONFIG_XTENSA_SMALL_VECTOR_TABLE_ENTRY) .pushsection .iram.text, "ax" .global _Level\LVL\()VectorHelper _Level\LVL\()VectorHelper : #else .if \LVL == 1 .pushsection .iram0.text, "ax" .elseif \LVL == XCHAL_DEBUGLEVEL .pushsection .DebugExceptionVector.text, "ax" .elseif \LVL == XCHAL_NMILEVEL .pushsection .NMIExceptionVector.text, "ax" .else .pushsection .Level\LVL\()InterruptVector.text, "ax" .endif .global _Level\LVL\()Vector _Level\LVL\()Vector: #endif #ifdef CONFIG_XTENSA_MMU .if \LVL == 1 /* If there are any TLB misses during interrupt handling, * the user/kernel/double exception vector will be triggered * to handle these misses. This results in DEPC and EXCCAUSE * being overwritten, and then execution returned back to * this site of TLB misses. When it gets to the C handler, * it will not see the original cause. So stash * the EXCCAUSE here so C handler can see the original cause. * * For double exception, DEPC in saved in earlier vector * code. */ wsr a0, ZSR_EXCCAUSE_SAVE esync rsr a0, ZSR_DEPC_SAVE beqz a0, _not_triple_fault /* If stashed DEPC is not zero, we have started servicing * a double exception and yet we are here because there is * another exception (through user/kernel if PS.EXCM is * cleared, or through double if PS.EXCM is set). This can * be considered triple fault. Although there is no triple * faults on Xtensa. Once PS.EXCM is set, it keeps going * through double exception vector for any new exceptions. * However, our exception code needs to unmask PS.EXCM to * enable register window operations. So after that, any * new exceptions will go through the kernel or user vectors * depending on PS.UM. If there is continuous faults, it may * keep ping-ponging between double and kernel/user exception * vectors that may never get resolved. Since we stash DEPC * during double exception, and the stashed one is only cleared * once the double exception has been processed, we can use * the stashed DEPC value to detect if the next exception could * be considered a triple fault. If such a case exists, simply * jump to an infinite loop, or quit the simulator, or invoke * debugger. */ rsr a0, ZSR_EXCCAUSE_SAVE j _TripleFault _not_triple_fault: rsr.exccause a0 xsr a0, ZSR_EXCCAUSE_SAVE esync .endif #endif addi a1, a1, -___xtensa_irq_bsa_t_SIZEOF s32i a0, a1, ___xtensa_irq_bsa_t_a0_OFFSET s32i a2, a1, ___xtensa_irq_bsa_t_a2_OFFSET s32i a3, a1, ___xtensa_irq_bsa_t_a3_OFFSET /* Level "1" is the exception handler, which uses a different * calling convention. No special register holds the * interrupted PS, instead we just assume that the CPU has * turned on the EXCM bit and set INTLEVEL. */ .if \LVL == 1 rsr.ps a0 #ifdef CONFIG_XTENSA_MMU /* TLB misses also come through level 1 interrupts. * We do not want to unconditionally unmask interrupts. * Execution continues after a TLB miss is handled, * and we need to preserve the interrupt mask. * The interrupt mask will be cleared for non-TLB-misses * level 1 interrupt later in the handler code. */ movi a2, ~PS_EXCM_MASK #else movi a2, ~(PS_EXCM_MASK | PS_INTLEVEL_MASK) #endif and a0, a0, a2 s32i a0, a1, ___xtensa_irq_bsa_t_ps_OFFSET .else rsr.eps\LVL a0 s32i a0, a1, ___xtensa_irq_bsa_t_ps_OFFSET .endif rsr.epc\LVL a0 s32i a0, a1, ___xtensa_irq_bsa_t_pc_OFFSET /* What's happening with this jump is that the L32R * instruction to load a full 32 bit immediate must use an * offset that is negative from PC. Normally the assembler * fixes this up for you by putting the "literal pool" * somewhere at the start of the section. But vectors start * at a fixed address in their own section, and don't (in our * current linker setup) have anywhere "definitely before * vectors" to place immediates. Some platforms and apps will * link by dumb luck, others won't. We add an extra jump just * to clear space we know to be legal. * * The right way to fix this would be to use a "literal_prefix" * to put the literals into a per-vector section, then link * that section into the PREVIOUS vector's area right after * the vector code. Requires touching a lot of linker scripts * though. */ j _after_imms\LVL\() .align 4 _handle_excint_imm\LVL: .word \ENTRY_SYM _c_handler_imm\LVL: .word \C_HANDLER_SYM _after_imms\LVL: l32r a2, _c_handler_imm\LVL l32r a0, _handle_excint_imm\LVL jx a0 .popsection #if defined(CONFIG_XTENSA_SMALL_VECTOR_TABLE_ENTRY) .if \LVL == 1 .pushsection .iram0.text, "ax" .elseif \LVL == XCHAL_DEBUGLEVEL .pushsection .DebugExceptionVector.text, "ax" .elseif \LVL == XCHAL_NMILEVEL .pushsection .NMIExceptionVector.text, "ax" .else .pushsection .Level\LVL\()InterruptVector.text, "ax" .endif .global _Level\LVL\()Vector _Level\LVL\()Vector : j _Level\LVL\()VectorHelper .popsection #endif .endm #endif /* ZEPHYR_ARCH_XTENSA_INCLUDE_XTENSA_ASM2_S_H */ ```
/content/code_sandbox/arch/xtensa/include/xtensa_asm2_s.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
7,834
```objective-c /* * */ #ifndef ZEPHYR_ARCH_XTENSA_CORE_INCLUDE_XTENSA_BACKTRACE_H_ #define ZEPHYR_ARCH_XTENSA_CORE_INCLUDE_XTENSA_BACKTRACE_H_ #ifdef __cplusplus extern "C" { #endif #ifndef __ASSEMBLER__ #include <stdbool.h> #include <stdint.h> /** * @ingroup xtensa_internal_apis * @{ */ /** * @brief Structure used for backtracing * * This structure stores the backtrace information of a particular stack frame * (i.e. the PC and SP). This structure is used iteratively with the * xtensa_cpu_get_next_backtrace_frame() function to traverse each frame * within a single stack. The next_pc represents the PC of the current * frame's caller, thus a next_pc of 0 indicates that the current frame * is the last frame on the stack. * * @note Call esp_backtrace_get_start() to obtain initialization values for * this structure */ struct xtensa_backtrace_frame_t { uint32_t pc; /* PC of the current frame */ uint32_t sp; /* SP of the current frame */ uint32_t next_pc; /* PC of the current frame's caller */ }; /** * Get the first frame of the current stack's backtrace * * Given the following function call flow * (B -> A -> X -> esp_backtrace_get_start), * this function will do the following. * - Flush CPU registers and window frames onto the current stack * - Return PC and SP of function A (i.e. start of the stack's backtrace) * - Return PC of function B (i.e. next_pc) * * @note This function is implemented in assembly * * @param[out] pc PC of the first frame in the backtrace * @param[out] sp SP of the first frame in the backtrace * @param[out] next_pc PC of the first frame's caller * @param[in] interrupted_stack Pointer to interrupted stack */ void xtensa_backtrace_get_start(uint32_t *pc, uint32_t *sp, uint32_t *next_pc, int *interrupted_stack); /** * @brief Get the next frame on a stack for backtracing * * Given a stack frame(i), this function will obtain the next * stack frame(i-1) on the same call stack (i.e. the caller of frame(i)). * This function is meant to be called iteratively when doing a backtrace. * * Entry Conditions: Frame structure containing valid SP and next_pc * Exit Conditions: * - Frame structure updated with SP and PC of frame(i-1). * next_pc now points to frame(i-2). * - If a next_pc of 0 is returned, it indicates that frame(i-1) * is last frame on the stack * * @param[inout] frame Pointer to frame structure * * @return * - True if the SP and PC of the next frame(i-1) are sane * - False otherwise */ bool xtensa_backtrace_get_next_frame(struct xtensa_backtrace_frame_t *frame); /** * @brief Print the backtrace of the current stack * * @param depth The maximum number of stack frames to print (should be > 0) * @param interrupted_stack Pointer to interrupted stack * * @return * - 0 Backtrace successfully printed to completion or to depth limit * - -1 Backtrace is corrupted */ int xtensa_backtrace_print(int depth, int *interrupted_stack); /** * @} */ #endif #ifdef __cplusplus } #endif #endif /* ZEPHYR_ARCH_XTENSA_CORE_INCLUDE_XTENSA_BACKTRACE_H_ */ ```
/content/code_sandbox/arch/xtensa/include/xtensa_backtrace.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
804
```unknown # ARM64 architecture configuration options menu "ARM64 Options" depends on ARM64 config ARCH default "arm64" config CPU_CORTEX bool help This option signifies the use of a CPU of the Cortex family. config ARM_CUSTOM_INTERRUPT_CONTROLLER bool help This option indicates that the ARM CPU is connected to a custom (i.e. non-GIC) interrupt controller. A number of Cortex-A and Cortex-R cores (Cortex-A5, Cortex-R4/5, ...) allow interfacing to a custom external interrupt controller and this option must be selected when such cores are connected to an interrupt controller that is not the ARM Generic Interrupt Controller (GIC). When this option is selected, the architecture interrupt control functions are mapped to the SoC interrupt control interface, which is implemented at the SoC level. rsource "core/Kconfig" endmenu ```
/content/code_sandbox/arch/arm64/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
195
```unknown /* * */ /* * This file implements the common calling mechanism to be used with the Secure * Monitor Call (SMC) and Hypervisor Call (HVC). * * See path_to_url */ #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #include <zephyr/arch/cpu.h> #include <offsets_short.h> .macro SMCCC instr \instr #0 ldr x4, [sp] stp x0, x1, [x4, __arm_smccc_res_t_a0_a1_OFFSET] stp x2, x3, [x4, __arm_smccc_res_t_a2_a3_OFFSET] stp x4, x5, [x4, __arm_smccc_res_t_a4_a5_OFFSET] stp x6, x7, [x4, __arm_smccc_res_t_a6_a7_OFFSET] ret .endm /* * The SMC instruction is used to generate a synchronous exception that is * handled by Secure Monitor code running in EL3. */ GTEXT(arm_smccc_smc) SECTION_FUNC(TEXT, arm_smccc_smc) SMCCC smc /* * The HVC instruction is used to generate a synchronous exception that is * handled by a hypervisor running in EL2. */ GTEXT(arm_smccc_hvc) SECTION_FUNC(TEXT, arm_smccc_hvc) SMCCC hvc ```
/content/code_sandbox/arch/arm64/core/smccc-call.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
312
```objective-c /* * Xtensa MMU support * * Private data declarations * */ #ifndef ZEPHYR_ARCH_XTENSA_XTENSA_MMU_PRIV_H_ #define ZEPHYR_ARCH_XTENSA_XTENSA_MMU_PRIV_H_ #include <stdint.h> #include <xtensa/config/core-isa.h> #include <zephyr/toolchain.h> #include <zephyr/sys/util_macro.h> /** * @defgroup xtensa_mmu_internal_apis Xtensa Memory Management Unit (MMU) Internal APIs * @ingroup xtensa_mmu_apis * @{ */ /** Mask for VPN in PTE */ #define XTENSA_MMU_PTE_VPN_MASK 0xFFFFF000U /** Mask for PPN in PTE */ #define XTENSA_MMU_PTE_PPN_MASK 0xFFFFF000U /** Mask for attributes in PTE */ #define XTENSA_MMU_PTE_ATTR_MASK 0x0000000FU /** Mask for cache mode in PTE */ #define XTENSA_MMU_PTE_ATTR_CACHED_MASK 0x0000000CU /** Mask used to figure out which L1 page table to use */ #define XTENSA_MMU_L1_MASK 0x3FF00000U /** Mask used to figure out which L2 page table to use */ #define XTENSA_MMU_L2_MASK 0x3FFFFFU #define XTENSA_MMU_PTEBASE_MASK 0xFFC00000 /** Number of bits to shift for PPN in PTE */ #define XTENSA_MMU_PTE_PPN_SHIFT 12U /** Mask for ring in PTE */ #define XTENSA_MMU_PTE_RING_MASK 0x00000030U /** Number of bits to shift for ring in PTE */ #define XTENSA_MMU_PTE_RING_SHIFT 4U /** Number of bits to shift for SW reserved ared in PTE */ #define XTENSA_MMU_PTE_SW_SHIFT 6U /** Mask for SW bits in PTE */ #define XTENSA_MMU_PTE_SW_MASK 0x00000FC0U /** * Internal bit just used to indicate that the attr field must * be set in the SW bits too. It is used later when duplicating the * kernel page tables. */ #define XTENSA_MMU_PTE_ATTR_ORIGINAL BIT(31) /** Construct a page table entry (PTE) */ #define XTENSA_MMU_PTE(paddr, ring, sw, attr) \ (((paddr) & XTENSA_MMU_PTE_PPN_MASK) | \ (((ring) << XTENSA_MMU_PTE_RING_SHIFT) & XTENSA_MMU_PTE_RING_MASK) | \ (((sw) << XTENSA_MMU_PTE_SW_SHIFT) & XTENSA_MMU_PTE_SW_MASK) | \ ((attr) & XTENSA_MMU_PTE_ATTR_MASK)) /** Get the attributes from a PTE */ #define XTENSA_MMU_PTE_ATTR_GET(pte) \ ((pte) & XTENSA_MMU_PTE_ATTR_MASK) /** Set the attributes in a PTE */ #define XTENSA_MMU_PTE_ATTR_SET(pte, attr) \ (((pte) & ~XTENSA_MMU_PTE_ATTR_MASK) | (attr & XTENSA_MMU_PTE_ATTR_MASK)) /** Set the SW field in a PTE */ #define XTENSA_MMU_PTE_SW_SET(pte, sw) \ (((pte) & ~XTENSA_MMU_PTE_SW_MASK) | (sw << XTENSA_MMU_PTE_SW_SHIFT)) /** Get the SW field from a PTE */ #define XTENSA_MMU_PTE_SW_GET(pte) \ (((pte) & XTENSA_MMU_PTE_SW_MASK) >> XTENSA_MMU_PTE_SW_SHIFT) /** Set the ring in a PTE */ #define XTENSA_MMU_PTE_RING_SET(pte, ring) \ (((pte) & ~XTENSA_MMU_PTE_RING_MASK) | \ ((ring) << XTENSA_MMU_PTE_RING_SHIFT)) /** Get the ring from a PTE */ #define XTENSA_MMU_PTE_RING_GET(pte) \ (((pte) & XTENSA_MMU_PTE_RING_MASK) >> XTENSA_MMU_PTE_RING_SHIFT) /** Get the ASID from the RASID register corresponding to the ring in a PTE */ #define XTENSA_MMU_PTE_ASID_GET(pte, rasid) \ (((rasid) >> ((((pte) & XTENSA_MMU_PTE_RING_MASK) \ >> XTENSA_MMU_PTE_RING_SHIFT) * 8)) & 0xFF) /** Calculate the L2 page table position from a virtual address */ #define XTENSA_MMU_L2_POS(vaddr) \ (((vaddr) & XTENSA_MMU_L2_MASK) >> 12U) /** Calculate the L1 page table position from a virtual address */ #define XTENSA_MMU_L1_POS(vaddr) \ ((vaddr) >> 22U) /** * @def XTENSA_MMU_PAGE_TABLE_ATTR * * PTE attributes for entries in the L1 page table. Should never be * writable, may be cached in non-SMP contexts only */ #if CONFIG_MP_MAX_NUM_CPUS == 1 #define XTENSA_MMU_PAGE_TABLE_ATTR XTENSA_MMU_CACHED_WB #else #define XTENSA_MMU_PAGE_TABLE_ATTR 0 #endif /** This ASID is shared between all domains and kernel. */ #define XTENSA_MMU_SHARED_ASID 255 /** Fixed data TLB way to map the page table */ #define XTENSA_MMU_PTE_WAY 7 /** Fixed data TLB way to map the vecbase */ #define XTENSA_MMU_VECBASE_WAY 8 /** Kernel specific ASID. Ring field in the PTE */ #define XTENSA_MMU_KERNEL_RING 0 /** User specific ASID. Ring field in the PTE */ #define XTENSA_MMU_USER_RING 2 /** Ring value for MMU_SHARED_ASID */ #define XTENSA_MMU_SHARED_RING 3 /** Number of data TLB ways [0-9] */ #define XTENSA_MMU_NUM_DTLB_WAYS 10 /** Number of instruction TLB ways [0-6] */ #define XTENSA_MMU_NUM_ITLB_WAYS 7 /** Number of auto-refill ways */ #define XTENSA_MMU_NUM_TLB_AUTOREFILL_WAYS 4 /** Indicate PTE is illegal. */ #define XTENSA_MMU_PTE_ILLEGAL (BIT(3) | BIT(2)) /** * PITLB HIT bit. * * For more information see * Xtensa Instruction Set Architecture (ISA) Reference Manual * 4.6.5.7 Formats for Probing MMU Option TLB Entries */ #define XTENSA_MMU_PITLB_HIT BIT(3) /** * PDTLB HIT bit. * * For more information see * Xtensa Instruction Set Architecture (ISA) Reference Manual * 4.6.5.7 Formats for Probing MMU Option TLB Entries */ #define XTENSA_MMU_PDTLB_HIT BIT(4) /** * Virtual address where the page table is mapped */ #define XTENSA_MMU_PTEVADDR CONFIG_XTENSA_MMU_PTEVADDR /** * Find the PTE entry address of a given vaddr. * * For example, assuming PTEVADDR in 0xE0000000, * the page spans from 0xE0000000 - 0xE03FFFFF * * address 0x00 is in 0xE0000000 * address 0x1000 is in 0xE0000004 * ..... * address 0xE0000000 (where the page is) is in 0xE0380000 * * Generalizing it, any PTE virtual address can be calculated this way: * * PTE_ENTRY_ADDRESS = PTEVADDR + ((VADDR / 4096) * 4) */ #define XTENSA_MMU_PTE_ENTRY_VADDR(base, vaddr) \ ((base) + (((vaddr) / KB(4)) * 4)) /** * Get ASID for a given ring from RASID register. * * RASID contains four 8-bit ASIDs, one per ring. */ #define XTENSA_MMU_RASID_ASID_GET(rasid, ring) \ (((rasid) >> ((ring) * 8)) & 0xff) /** * @brief Set RASID register. * * @param rasid Value to be set. */ static ALWAYS_INLINE void xtensa_rasid_set(uint32_t rasid) { __asm__ volatile("wsr %0, rasid\n\t" "isync\n" : : "a"(rasid)); } /** * @brief Get RASID register. * * @return Register value. */ static ALWAYS_INLINE uint32_t xtensa_rasid_get(void) { uint32_t rasid; __asm__ volatile("rsr %0, rasid" : "=a"(rasid)); return rasid; } /** * @brief Set a ring in RASID register to be particular value. * * @param asid ASID to be set. * @param ring ASID of which ring to be manipulated. */ static ALWAYS_INLINE void xtensa_rasid_asid_set(uint8_t asid, uint8_t ring) { uint32_t rasid = xtensa_rasid_get(); rasid = (rasid & ~(0xff << (ring * 8))) | ((uint32_t)asid << (ring * 8)); xtensa_rasid_set(rasid); } /** * @brief Invalidate a particular instruction TLB entry. * * @param entry Entry to be invalidated. */ static ALWAYS_INLINE void xtensa_itlb_entry_invalidate(uint32_t entry) { __asm__ volatile("iitlb %0\n\t" : : "a" (entry)); } /** * @brief Synchronously invalidate of a particular instruction TLB entry. * * @param entry Entry to be invalidated. */ static ALWAYS_INLINE void xtensa_itlb_entry_invalidate_sync(uint32_t entry) { __asm__ volatile("iitlb %0\n\t" "isync\n\t" : : "a" (entry)); } /** * @brief Synchronously invalidate of a particular data TLB entry. * * @param entry Entry to be invalidated. */ static ALWAYS_INLINE void xtensa_dtlb_entry_invalidate_sync(uint32_t entry) { __asm__ volatile("idtlb %0\n\t" "dsync\n\t" : : "a" (entry)); } /** * @brief Invalidate a particular data TLB entry. * * @param entry Entry to be invalidated. */ static ALWAYS_INLINE void xtensa_dtlb_entry_invalidate(uint32_t entry) { __asm__ volatile("idtlb %0\n\t" : : "a" (entry)); } /** * @brief Synchronously write to a particular data TLB entry. * * @param pte Value to be written. * @param entry Entry to be written. */ static ALWAYS_INLINE void xtensa_dtlb_entry_write_sync(uint32_t pte, uint32_t entry) { __asm__ volatile("wdtlb %0, %1\n\t" "dsync\n\t" : : "a" (pte), "a"(entry)); } /** * @brief Write to a particular data TLB entry. * * @param pte Value to be written. * @param entry Entry to be written. */ static ALWAYS_INLINE void xtensa_dtlb_entry_write(uint32_t pte, uint32_t entry) { __asm__ volatile("wdtlb %0, %1\n\t" : : "a" (pte), "a"(entry)); } /** * @brief Synchronously write to a particular instruction TLB entry. * * @param pte Value to be written. * @param entry Entry to be written. */ static ALWAYS_INLINE void xtensa_itlb_entry_write(uint32_t pte, uint32_t entry) { __asm__ volatile("witlb %0, %1\n\t" : : "a" (pte), "a"(entry)); } /** * @brief Synchronously write to a particular instruction TLB entry. * * @param pte Value to be written. * @param entry Entry to be written. */ static ALWAYS_INLINE void xtensa_itlb_entry_write_sync(uint32_t pte, uint32_t entry) { __asm__ volatile("witlb %0, %1\n\t" "isync\n\t" : : "a" (pte), "a"(entry)); } /** * @brief Invalidate all autorefill DTLB and ITLB entries. * * This should be used carefully since all refill entries in the data * and instruction TLB. At least two pages, the current code page and * the current stack, will be repopulated by this code as it returns. * * This needs to be called in any circumstance where the mappings for * a previously-used page table change. It does not need to be called * on context switch, where ASID tagging isolates entries for us. */ static inline void xtensa_tlb_autorefill_invalidate(void) { uint8_t way, i, entries; entries = BIT(MAX(XCHAL_ITLB_ARF_ENTRIES_LOG2, XCHAL_DTLB_ARF_ENTRIES_LOG2)); for (way = 0; way < XTENSA_MMU_NUM_TLB_AUTOREFILL_WAYS; way++) { for (i = 0; i < entries; i++) { uint32_t entry = way + (i << XTENSA_MMU_PTE_PPN_SHIFT); xtensa_dtlb_entry_invalidate(entry); xtensa_itlb_entry_invalidate(entry); } } __asm__ volatile("isync"); } /** * @brief Set the page tables. * * The page tables is set writing ptevaddr address. * * @param ptables The page tables address (virtual address) */ static ALWAYS_INLINE void xtensa_ptevaddr_set(void *ptables) { __asm__ volatile("wsr.ptevaddr %0" : : "a"((uint32_t)ptables)); } /** * @brief Get the current page tables. * * The page tables is obtained by reading ptevaddr address. * * @return ptables The page tables address (virtual address) */ static ALWAYS_INLINE void *xtensa_ptevaddr_get(void) { uint32_t ptables; __asm__ volatile("rsr.ptevaddr %0" : "=a" (ptables)); return (void *)(ptables & XTENSA_MMU_PTEBASE_MASK); } /** * @brief Get the virtual address associated with a particular data TLB entry. * * @param entry TLB entry to be queried. */ static ALWAYS_INLINE void *xtensa_dtlb_vaddr_read(uint32_t entry) { uint32_t vaddr; __asm__ volatile("rdtlb0 %0, %1\n\t" : "=a" (vaddr) : "a" (entry)); return (void *)(vaddr & XTENSA_MMU_PTE_VPN_MASK); } /** * @brief Get the physical address associated with a particular data TLB entry. * * @param entry TLB entry to be queried. */ static ALWAYS_INLINE uint32_t xtensa_dtlb_paddr_read(uint32_t entry) { uint32_t paddr; __asm__ volatile("rdtlb1 %0, %1\n\t" : "=a" (paddr) : "a" (entry)); return (paddr & XTENSA_MMU_PTE_PPN_MASK); } /** * @brief Get the virtual address associated with a particular instruction TLB entry. * * @param entry TLB entry to be queried. */ static ALWAYS_INLINE void *xtensa_itlb_vaddr_read(uint32_t entry) { uint32_t vaddr; __asm__ volatile("ritlb0 %0, %1\n\t" : "=a" (vaddr), "+a" (entry)); return (void *)(vaddr & XTENSA_MMU_PTE_VPN_MASK); } /** * @brief Get the physical address associated with a particular instruction TLB entry. * * @param entry TLB entry to be queried. */ static ALWAYS_INLINE uint32_t xtensa_itlb_paddr_read(uint32_t entry) { uint32_t paddr; __asm__ volatile("ritlb1 %0, %1\n\t" : "=a" (paddr), "+a" (entry)); return (paddr & XTENSA_MMU_PTE_PPN_MASK); } /** * @brief Probe for instruction TLB entry from a virtual address. * * @param vaddr Virtual address. * * @return Return of the PITLB instruction. */ static ALWAYS_INLINE uint32_t xtensa_itlb_probe(void *vaddr) { uint32_t ret; __asm__ __volatile__("pitlb %0, %1\n\t" : "=a" (ret) : "a" ((uint32_t)vaddr)); return ret; } /** * @brief Probe for data TLB entry from a virtual address. * * @param vaddr Virtual address. * * @return Return of the PDTLB instruction. */ static ALWAYS_INLINE uint32_t xtensa_dtlb_probe(void *vaddr) { uint32_t ret; __asm__ __volatile__("pdtlb %0, %1\n\t" : "=a" (ret) : "a" ((uint32_t)vaddr)); return ret; } /** * @brief Invalidate an instruction TLB entry associated with a virtual address. * * This invalidated an instruction TLB entry associated with a virtual address * if such TLB entry exists. Otherwise, do nothing. * * @param vaddr Virtual address. */ static inline void xtensa_itlb_vaddr_invalidate(void *vaddr) { uint32_t entry = xtensa_itlb_probe(vaddr); if (entry & XTENSA_MMU_PITLB_HIT) { xtensa_itlb_entry_invalidate_sync(entry); } } /** * @brief Invalidate a data TLB entry associated with a virtual address. * * This invalidated a data TLB entry associated with a virtual address * if such TLB entry exists. Otherwise, do nothing. * * @param vaddr Virtual address. */ static inline void xtensa_dtlb_vaddr_invalidate(void *vaddr) { uint32_t entry = xtensa_dtlb_probe(vaddr); if (entry & XTENSA_MMU_PDTLB_HIT) { xtensa_dtlb_entry_invalidate_sync(entry); } } /** * @brief Tell hardware to use a page table very first time after boot. * * @param l1_page Pointer to the page table to be used. */ void xtensa_init_paging(uint32_t *l1_page); /** * @brief Switch to a new page table. * * @param asid The ASID of the memory domain associated with the incoming page table. * @param l1_page Page table to be switched to. */ void xtensa_set_paging(uint32_t asid, uint32_t *l1_page); /** * @} */ #endif /* ZEPHYR_ARCH_XTENSA_XTENSA_MMU_PRIV_H_ */ ```
/content/code_sandbox/arch/xtensa/include/xtensa_mmu_priv.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,242
```unknown /* * */ /* * Thread context switching for ARM64 Cortex-A (AArch64) * * This module implements the routines necessary for thread context switching * on ARM64 Cortex-A (AArch64) */ #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #include <offsets_short.h> #include <zephyr/arch/cpu.h> #include <zephyr/syscall.h> #include "macro_priv.inc" _ASM_FILE_PROLOGUE /* * Routine to handle context switches * * This function is directly called either by _isr_wrapper() in case of * preemption, or arch_switch() in case of cooperative switching. * * void z_arm64_context_switch(struct k_thread *new, struct k_thread *old); */ GTEXT(z_arm64_context_switch) SECTION_FUNC(TEXT, z_arm64_context_switch) #ifndef CONFIG_ARM64_SAFE_EXCEPTION_STACK /* Save the current SP_EL0 */ mrs x4, sp_el0 #endif stp x19, x20, [x1, #_thread_offset_to_callee_saved_x19_x20] stp x21, x22, [x1, #_thread_offset_to_callee_saved_x21_x22] stp x23, x24, [x1, #_thread_offset_to_callee_saved_x23_x24] stp x25, x26, [x1, #_thread_offset_to_callee_saved_x25_x26] stp x27, x28, [x1, #_thread_offset_to_callee_saved_x27_x28] #ifndef CONFIG_ARM64_SAFE_EXCEPTION_STACK stp x29, x4, [x1, #_thread_offset_to_callee_saved_x29_sp_el0] #else str x29, [x1, #_thread_offset_to_callee_saved_x29_sp_el0] #endif /* Save the current SP_ELx and return address */ mov x4, sp stp x4, lr, [x1, #_thread_offset_to_callee_saved_sp_elx_lr] /* save current thread's exception depth */ mrs x4, tpidrro_el0 lsr x2, x4, #TPIDRROEL0_EXC_SHIFT strb w2, [x1, #_thread_offset_to_exception_depth] /* retrieve next thread's exception depth */ ldrb w2, [x0, #_thread_offset_to_exception_depth] bic x4, x4, #TPIDRROEL0_EXC_DEPTH orr x4, x4, x2, lsl #TPIDRROEL0_EXC_SHIFT msr tpidrro_el0, x4 #ifdef CONFIG_FPU_SHARING /* * Do this after tpidrro_el0 is updated with the new exception * depth value, and before old->switch_handle is updated (making * it available for grab by another CPU) as we still use its stack. */ stp x0, x1, [sp, #-16]! bl z_arm64_fpu_thread_context_switch ldp x0, x1, [sp], #16 #endif /* save old thread into switch handle which is required by * z_sched_switch_spin() */ str x1, [x1, #___thread_t_switch_handle_OFFSET] #ifdef CONFIG_THREAD_LOCAL_STORAGE /* Grab the TLS pointer */ ldr x2, [x0, #_thread_offset_to_tls] /* Store in the "Thread ID" register. * This register is used as a base pointer to all * thread variables with offsets added by toolchain. */ msr tpidr_el0, x2 #endif ldp x19, x20, [x0, #_thread_offset_to_callee_saved_x19_x20] ldp x21, x22, [x0, #_thread_offset_to_callee_saved_x21_x22] ldp x23, x24, [x0, #_thread_offset_to_callee_saved_x23_x24] ldp x25, x26, [x0, #_thread_offset_to_callee_saved_x25_x26] ldp x27, x28, [x0, #_thread_offset_to_callee_saved_x27_x28] #ifndef CONFIG_ARM64_SAFE_EXCEPTION_STACK ldp x29, x4, [x0, #_thread_offset_to_callee_saved_x29_sp_el0] /* Restore SP_EL0 */ msr sp_el0, x4 #else ldr x29, [x0, #_thread_offset_to_callee_saved_x29_sp_el0] #endif /* Restore SP_EL1 and return address */ ldp x4, lr, [x0, #_thread_offset_to_callee_saved_sp_elx_lr] mov sp, x4 #if defined(CONFIG_ARM64_SAFE_EXCEPTION_STACK) /* arch_curr_cpu()->arch.current_stack_limit = thread->arch.stack_limit */ get_cpu x4 ldr x2, [x0, #_thread_offset_to_stack_limit] str x2, [x4, #_cpu_offset_to_current_stack_limit] #endif #if defined(CONFIG_USERSPACE) || defined(CONFIG_ARM64_STACK_PROTECTION) str lr, [sp, #-16]! bl z_arm64_swap_mem_domains ldr lr, [sp], #16 #endif #ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING str lr, [sp, #-16]! bl z_thread_mark_switched_in ldr lr, [sp], #16 #endif /* Return to arch_switch() or _isr_wrapper() */ ret /* * Synchronous exceptions handler * * The service call (SVC) is used in the following occasions: * - Cooperative context switching * - IRQ offloading */ GTEXT(z_arm64_sync_exc) SECTION_FUNC(TEXT, z_arm64_sync_exc) mrs x0, esr_el1 lsr x1, x0, #26 #ifdef CONFIG_FPU_SHARING cmp x1, #0x07 /*Access to SIMD or floating-point */ bne 1f mov x0, sp bl z_arm64_fpu_trap b z_arm64_exit_exc_fpu_done 1: #endif cmp x1, #0x15 /* 0x15 = SVC */ bne inv /* Demux the SVC call */ and x1, x0, #0xff cmp x1, #_SVC_CALL_RUNTIME_EXCEPT beq oops #ifdef CONFIG_USERSPACE cmp x1, #_SVC_CALL_SYSTEM_CALL beq z_arm64_do_syscall #endif #ifdef CONFIG_IRQ_OFFLOAD cmp x1, #_SVC_CALL_IRQ_OFFLOAD beq offload b inv offload: /* * Retrieve provided routine and argument from the stack. * Routine pointer is in saved x0, argument in saved x1 * so we load them with x1/x0 (reversed). */ ldp x1, x0, [sp, ___esf_t_x0_x1_OFFSET] /* ++_current_cpu->nested to be checked by arch_is_in_isr() */ get_cpu x2 ldr w3, [x2, #___cpu_t_nested_OFFSET] add w4, w3, #1 str w4, [x2, #___cpu_t_nested_OFFSET] /* If not nested: switch to IRQ stack and save current sp on it. */ cbnz w3, 1f ldr x3, [x2, #___cpu_t_irq_stack_OFFSET] mov x4, sp mov sp, x3 str x4, [sp, #-16]! #if defined(CONFIG_ARM64_SAFE_EXCEPTION_STACK) /* update the stack limit with IRQ stack limit */ sub x3, x3, #CONFIG_ISR_STACK_SIZE str x3, [x2, #_cpu_offset_to_current_stack_limit] #endif 1: /* Execute provided routine (argument is in x0 already). */ blr x1 /* Exit through regular IRQ exit path */ b z_arm64_irq_done #endif b inv oops: mov x0, sp b z_arm64_do_kernel_oops inv: mov x0, #0 /* K_ERR_CPU_EXCEPTION */ mov x1, sp bl z_arm64_fatal_error /* Return here only in case of recoverable error */ b z_arm64_exit_exc ```
/content/code_sandbox/arch/arm64/core/switch.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,836
```unknown /* * */ #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #include <zephyr/arch/cpu.h> _ASM_FILE_PROLOGUE /* * Switch TTBR0 */ GTEXT(z_arm64_set_ttbr0) SECTION_FUNC(TEXT, z_arm64_set_ttbr0) /* Switch the TTBR0 */ msr ttbr0_el1, x0 isb ret ```
/content/code_sandbox/arch/arm64/core/mmu.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
94
```c /* * Written by: Nicolas Pitre * */ #include <zephyr/kernel.h> #include <zephyr/kernel_structs.h> #include <kernel_arch_interface.h> #include <zephyr/arch/cpu.h> #include <zephyr/sys/barrier.h> #include <zephyr/sys/atomic.h> /* to be found in fpu.S */ extern void z_arm64_fpu_save(struct z_arm64_fp_context *saved_fp_context); extern void z_arm64_fpu_restore(struct z_arm64_fp_context *saved_fp_context); #define FPU_DEBUG 0 #if FPU_DEBUG /* * Debug traces have to be produced without printk() or any other functions * using a va_list as va_start() always copy the FPU registers that could be * used to pass float arguments, and that triggers an FPU access trap. */ #include <string.h> static void DBG(char *msg, struct k_thread *th) { char buf[80], *p; unsigned int v; strcpy(buf, "CPU# exc# "); buf[3] = '0' + _current_cpu->id; buf[8] = '0' + arch_exception_depth(); strcat(buf, _current->name); strcat(buf, ": "); strcat(buf, msg); strcat(buf, " "); strcat(buf, th->name); v = *(unsigned char *)&th->arch.saved_fp_context; p = buf + strlen(buf); *p++ = ' '; *p++ = ((v >> 4) < 10) ? ((v >> 4) + '0') : ((v >> 4) - 10 + 'a'); *p++ = ((v & 15) < 10) ? ((v & 15) + '0') : ((v & 15) - 10 + 'a'); *p++ = '\n'; *p = 0; k_str_out(buf, p - buf); } #else static inline void DBG(char *msg, struct k_thread *t) { } #endif /* FPU_DEBUG */ /* * Flush FPU content and disable access. * This is called locally and also from flush_fpu_ipi_handler(). */ void arch_flush_local_fpu(void) { __ASSERT(read_daif() & DAIF_IRQ_BIT, "must be called with IRQs disabled"); struct k_thread *owner = atomic_ptr_get(&_current_cpu->arch.fpu_owner); if (owner != NULL) { uint64_t cpacr = read_cpacr_el1(); /* turn on FPU access */ write_cpacr_el1(cpacr | CPACR_EL1_FPEN_NOTRAP); barrier_isync_fence_full(); /* save current owner's content */ z_arm64_fpu_save(&owner->arch.saved_fp_context); /* make sure content made it to memory before releasing */ barrier_dsync_fence_full(); /* release ownership */ atomic_ptr_clear(&_current_cpu->arch.fpu_owner); DBG("disable", owner); /* disable FPU access */ write_cpacr_el1(cpacr & ~CPACR_EL1_FPEN_NOTRAP); barrier_isync_fence_full(); } } #ifdef CONFIG_SMP static void flush_owned_fpu(struct k_thread *thread) { __ASSERT(read_daif() & DAIF_IRQ_BIT, "must be called with IRQs disabled"); int i; /* search all CPUs for the owner we want */ unsigned int num_cpus = arch_num_cpus(); for (i = 0; i < num_cpus; i++) { if (atomic_ptr_get(&_kernel.cpus[i].arch.fpu_owner) != thread) { continue; } /* we found it live on CPU i */ if (i == _current_cpu->id) { arch_flush_local_fpu(); } else { /* the FPU context is live on another CPU */ arch_flush_fpu_ipi(i); /* * Wait for it only if this is about the thread * currently running on this CPU. Otherwise the * other CPU running some other thread could regain * ownership the moment it is removed from it and * we would be stuck here. * * Also, if this is for the thread running on this * CPU, then we preemptively flush any live context * on this CPU as well since we're likely to * replace it, and this avoids a deadlock where * two CPUs want to pull each other's FPU context. */ if (thread == _current) { arch_flush_local_fpu(); while (atomic_ptr_get(&_kernel.cpus[i].arch.fpu_owner) == thread) { barrier_dsync_fence_full(); } } } break; } } #endif void z_arm64_fpu_enter_exc(void) { __ASSERT(read_daif() & DAIF_IRQ_BIT, "must be called with IRQs disabled"); /* always deny FPU access whenever an exception is entered */ write_cpacr_el1(read_cpacr_el1() & ~CPACR_EL1_FPEN_NOTRAP); barrier_isync_fence_full(); } /* * Simulate some FPU store instructions. * * In many cases, the FPU trap is triggered by va_start() that copies * the content of FP registers used for floating point argument passing * into the va_list object in case there were actual float arguments from * the caller. In practice this is almost never the case, especially if * FPU access is disabled and we're trapped while in exception context. * Rather than flushing the FPU context to its owner and enabling access * just to let the corresponding STR instructions execute, we simply * simulate them and leave the FPU access disabled. This also avoids the * need for disabling interrupts in syscalls and IRQ handlers as well. */ static bool simulate_str_q_insn(struct arch_esf *esf) { /* * Support only the "FP in exception" cases for now. * We know there is no saved FPU context to check nor any * userspace stack memory to validate in that case. */ if (arch_exception_depth() <= 1) { return false; } uint32_t *pc = (uint32_t *)esf->elr; /* The original (interrupted) sp is the top of the esf structure */ uintptr_t sp = (uintptr_t)esf + sizeof(*esf); for (;;) { uint32_t insn = *pc; /* * We're looking for STR (immediate, SIMD&FP) of the form: * * STR Q<n>, [SP, #<pimm>] * * where 0 <= <n> <= 7 and <pimm> is a 12-bits multiple of 16. */ if ((insn & 0xffc003f8) != 0x3d8003e0) { break; } uint32_t pimm = (insn >> 10) & 0xfff; /* Zero the location as the above STR would have done */ *(__int128 *)(sp + pimm * 16) = 0; /* move to the next instruction */ pc++; } /* did we do something? */ if (pc != (uint32_t *)esf->elr) { /* resume execution past the simulated instructions */ esf->elr = (uintptr_t)pc; return true; } return false; } /* * Process the FPU trap. * * This usually means that FP regs belong to another thread. Save them * to that thread's save area and restore the current thread's content. * * We also get here when FP regs are used while in exception as FP access * is always disabled by default in that case. If so we save the FPU content * to the owning thread and simply enable FPU access. Exceptions should be * short and don't have persistent register contexts when they're done so * there is nothing to save/restore for that context... as long as we * don't get interrupted that is. To ensure that we mask interrupts to * the triggering exception context. */ void z_arm64_fpu_trap(struct arch_esf *esf) { __ASSERT(read_daif() & DAIF_IRQ_BIT, "must be called with IRQs disabled"); /* check if a quick simulation can do it */ if (simulate_str_q_insn(esf)) { return; } /* turn on FPU access */ write_cpacr_el1(read_cpacr_el1() | CPACR_EL1_FPEN_NOTRAP); barrier_isync_fence_full(); /* save current owner's content if any */ struct k_thread *owner = atomic_ptr_get(&_current_cpu->arch.fpu_owner); if (owner) { z_arm64_fpu_save(&owner->arch.saved_fp_context); barrier_dsync_fence_full(); atomic_ptr_clear(&_current_cpu->arch.fpu_owner); DBG("save", owner); } if (arch_exception_depth() > 1) { /* * We were already in exception when the FPU access trap. * We give it access and prevent any further IRQ recursion * by disabling IRQs as we wouldn't be able to preserve the * interrupted exception's FPU context. */ esf->spsr |= DAIF_IRQ_BIT; return; } #ifdef CONFIG_SMP /* * Make sure the FPU context we need isn't live on another CPU. * The current CPU's FPU context is NULL at this point. */ flush_owned_fpu(_current); #endif /* become new owner */ atomic_ptr_set(&_current_cpu->arch.fpu_owner, _current); /* restore our content */ z_arm64_fpu_restore(&_current->arch.saved_fp_context); DBG("restore", _current); } /* * Perform lazy FPU context switching by simply granting or denying * access to FP regs based on FPU ownership before leaving the last * exception level in case of exceptions, or during a thread context * switch with the exception level of the new thread being 0. * If current thread doesn't own the FP regs then it will trap on its * first access and then the actual FPU context switching will occur. */ static void fpu_access_update(unsigned int exc_update_level) { __ASSERT(read_daif() & DAIF_IRQ_BIT, "must be called with IRQs disabled"); uint64_t cpacr = read_cpacr_el1(); if (arch_exception_depth() == exc_update_level) { /* We're about to execute non-exception code */ if (atomic_ptr_get(&_current_cpu->arch.fpu_owner) == _current) { /* turn on FPU access */ write_cpacr_el1(cpacr | CPACR_EL1_FPEN_NOTRAP); } else { /* deny FPU access */ write_cpacr_el1(cpacr & ~CPACR_EL1_FPEN_NOTRAP); } } else { /* * Any new exception level should always trap on FPU * access as we want to make sure IRQs are disabled before * granting it access (see z_arm64_fpu_trap() documentation). */ write_cpacr_el1(cpacr & ~CPACR_EL1_FPEN_NOTRAP); } barrier_isync_fence_full(); } /* * This is called on every exception exit except for z_arm64_fpu_trap(). * In that case the exception level of interest is 1 (soon to be 0). */ void z_arm64_fpu_exit_exc(void) { fpu_access_update(1); } /* * This is called from z_arm64_context_switch(). FPU access may be granted * only if exception level is 0. If we switch to a thread that is still in * some exception context then FPU access would be re-evaluated at exception * exit time via z_arm64_fpu_exit_exc(). */ void z_arm64_fpu_thread_context_switch(void) { fpu_access_update(0); } int arch_float_disable(struct k_thread *thread) { if (thread != NULL) { unsigned int key = arch_irq_lock(); #ifdef CONFIG_SMP flush_owned_fpu(thread); #else if (thread == atomic_ptr_get(&_current_cpu->arch.fpu_owner)) { arch_flush_local_fpu(); } #endif arch_irq_unlock(key); } return 0; } int arch_float_enable(struct k_thread *thread, unsigned int options) { /* floats always gets enabled automatically at the moment */ return 0; } ```
/content/code_sandbox/arch/arm64/core/fpu.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,718
```c /* * */ /** * @file * @brief ARM64 Cortex-A interrupt initialisation */ #include <zephyr/arch/cpu.h> #include <zephyr/drivers/interrupt_controller/gic.h> /** * @brief Initialise interrupts * * This function invokes the ARM Generic Interrupt Controller (GIC) driver to * initialise the interrupt system on the SoCs that use the GIC as the primary * interrupt controller. * * When a custom interrupt controller is used, however, the SoC layer function * is invoked for SoC-specific interrupt system initialisation. */ void z_arm64_interrupt_init(void) { #ifdef CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER /* Invoke SoC-specific interrupt controller initialisation */ z_soc_irq_init(); #endif } ```
/content/code_sandbox/arch/arm64/core/irq_init.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
155
```objective-c /* * */ /** * @file * @brief Definitions for boot code */ #ifndef _BOOT_H_ #define _BOOT_H_ #ifndef _ASMLANGUAGE extern void *_vector_table[]; extern void __start(void); #endif /* _ASMLANGUAGE */ /* Offsets into the boot_params structure */ #define BOOT_PARAM_MPID_OFFSET 0 #define BOOT_PARAM_SP_OFFSET 8 #define BOOT_PARAM_VOTING_OFFSET 16 #endif /* _BOOT_H_ */ ```
/content/code_sandbox/arch/arm64/core/boot.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
98
```c /* * */ /** * @file * @brief Software interrupts utility code - ARM64 implementation */ #include <zephyr/kernel.h> #include <zephyr/irq_offload.h> #include <exception.h> void arch_irq_offload(irq_offload_routine_t routine, const void *parameter) { register const void *x0 __asm__("x0") = routine; register const void *x1 __asm__("x1") = parameter; __asm__ volatile ("svc %[svid]" : : [svid] "i" (_SVC_CALL_IRQ_OFFLOAD), "r" (x0), "r" (x1)); } ```
/content/code_sandbox/arch/arm64/core/irq_offload.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
142
```linker script /* * */ #if LINKER_ZEPHYR_FINAL && defined(CONFIG_ISR_TABLES_LOCAL_DECLARATION) INCLUDE isr_tables_swi.ld #endif ```
/content/code_sandbox/arch/arm64/core/swi_tables.ld
linker script
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
31
```c /* * */ /** * @file * @brief New thread creation for ARM64 Cortex-A * * Core thread related primitives for the ARM64 Cortex-A */ #include <zephyr/kernel.h> #include <ksched.h> #include <zephyr/arch/cpu.h> /* * Note about stack usage: * * [ see also comments in include/arch/arm64/thread_stack.h ] * * - kernel threads are running in EL1 using SP_EL1 as stack pointer during * normal execution and during exceptions. They are by definition already * running in a privileged stack that is their own. * * - user threads are running in EL0 using SP_EL0 as stack pointer during * normal execution. When at exception is taken or a syscall is called the * stack pointer switches to SP_EL1 and the execution starts using the * privileged portion of the user stack without touching SP_EL0. This portion * is marked as not user accessible in the MMU/MPU. * * - a stack guard region will be added bellow the kernel stack when * ARM64_STACK_PROTECTION is enabled. In this case, SP_EL0 will always point * to the safe exception stack in the kernel space. For the kernel thread, * SP_EL0 will not change always pointing to safe exception stack. For the * userspace thread, SP_EL0 will switch from the user stack to the safe * exception stack when entering the EL1 mode, and restore to the user stack * when backing to userspace (EL0). * * Kernel threads: * * High memory addresses * * +---------------+ <- stack_ptr * E | ESF | * L |<<<<<<<<<<<<<<<| <- SP_EL1 * 1 | | * +---------------+ <- stack limit * | Stack guard | } Z_ARM64_STACK_GUARD_SIZE (protected by MMU/MPU) * +---------------+ <- stack_obj * * Low Memory addresses * * * User threads: * * High memory addresses * * +---------------+ <- stack_ptr * E | | * L |<<<<<<<<<<<<<<<| <- SP_EL0 * 0 | | * +---------------+ ..............| * E | ESF | | Privileged portion of the stack * L +>>>>>>>>>>>>>>>+ <- SP_EL1 |_ used during exceptions and syscalls * 1 | | | of size ARCH_THREAD_STACK_RESERVED * +---------------+ <- stack limit| * | Stack guard | } Z_ARM64_STACK_GUARD_SIZE (protected by MMU/MPU) * +---------------+ <- stack_obj * * Low Memory addresses * * When a kernel thread switches to user mode the SP_EL0 and SP_EL1 * values are reset accordingly in arch_user_mode_enter(). */ #ifdef CONFIG_USERSPACE static bool is_user(struct k_thread *thread) { return (thread->base.user_options & K_USER) != 0; } #endif void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, char *stack_ptr, k_thread_entry_t entry, void *p1, void *p2, void *p3) { extern void z_arm64_exit_exc(void); struct arch_esf *pInitCtx; /* * Clean the thread->arch to avoid unexpected behavior because the * thread->arch might be dirty */ memset(&thread->arch, 0, sizeof(thread->arch)); /* * The ESF is now hosted at the top of the stack. For user threads this * is also fine because at this stage they are still running in EL1. * The context will be relocated by arch_user_mode_enter() before * dropping into EL0. */ pInitCtx = Z_STACK_PTR_TO_FRAME(struct arch_esf, stack_ptr); pInitCtx->x0 = (uint64_t)entry; pInitCtx->x1 = (uint64_t)p1; pInitCtx->x2 = (uint64_t)p2; pInitCtx->x3 = (uint64_t)p3; /* * - ELR_ELn: to be used by eret in z_arm64_exit_exc() to return * to z_thread_entry() with entry in x0(entry_point) and the * parameters already in place in x1(arg1), x2(arg2), x3(arg3). * - SPSR_ELn: to enable IRQs (we are masking FIQs). */ #ifdef CONFIG_USERSPACE /* * If the new thread is a user thread we jump into * arch_user_mode_enter() when still in EL1. */ if (is_user(thread)) { pInitCtx->elr = (uint64_t)arch_user_mode_enter; } else { pInitCtx->elr = (uint64_t)z_thread_entry; } #else pInitCtx->elr = (uint64_t)z_thread_entry; #endif /* Keep using SP_EL1 */ pInitCtx->spsr = SPSR_MODE_EL1H | DAIF_FIQ_BIT; /* thread birth happens through the exception return path */ thread->arch.exception_depth = 1; /* * We are saving SP_EL1 to pop out entry and parameters when going * through z_arm64_exit_exc(). For user threads the definitive location * of SP_EL1 will be set in arch_user_mode_enter(). */ thread->callee_saved.sp_elx = (uint64_t)pInitCtx; thread->callee_saved.lr = (uint64_t)z_arm64_exit_exc; thread->switch_handle = thread; #if defined(CONFIG_ARM64_STACK_PROTECTION) thread->arch.stack_limit = (uint64_t)stack + Z_ARM64_STACK_GUARD_SIZE; z_arm64_thread_mem_domains_init(thread); #endif } #ifdef CONFIG_USERSPACE FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, void *p1, void *p2, void *p3) { uintptr_t stack_el0, stack_el1; uint64_t tmpreg; /* Map the thread stack */ z_arm64_thread_mem_domains_init(_current); /* Top of the user stack area */ stack_el0 = Z_STACK_PTR_ALIGN(_current->stack_info.start + _current->stack_info.size - _current->stack_info.delta); /* Top of the privileged non-user-accessible part of the stack */ stack_el1 = (uintptr_t)(_current->stack_obj + ARCH_THREAD_STACK_RESERVED); register void *x0 __asm__("x0") = user_entry; register void *x1 __asm__("x1") = p1; register void *x2 __asm__("x2") = p2; register void *x3 __asm__("x3") = p3; /* we don't want to be disturbed when playing with SPSR and ELR */ arch_irq_lock(); /* set up and drop into EL0 */ __asm__ volatile ( "mrs %[tmp], tpidrro_el0\n\t" "orr %[tmp], %[tmp], %[is_usermode_flag]\n\t" "msr tpidrro_el0, %[tmp]\n\t" "msr elr_el1, %[elr]\n\t" "msr spsr_el1, %[spsr]\n\t" "msr sp_el0, %[sp_el0]\n\t" "mov sp, %[sp_el1]\n\t" "eret" : [tmp] "=&r" (tmpreg) : "r" (x0), "r" (x1), "r" (x2), "r" (x3), [is_usermode_flag] "i" (TPIDRROEL0_IN_EL0), [elr] "r" (z_thread_entry), [spsr] "r" (DAIF_FIQ_BIT | SPSR_MODE_EL0T), [sp_el0] "r" (stack_el0), [sp_el1] "r" (stack_el1) : "memory"); CODE_UNREACHABLE; } #endif ```
/content/code_sandbox/arch/arm64/core/thread.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,790
```c /* * */ #include <string.h> #include <zephyr/debug/coredump.h> /* Identify the version of this block (in case of architecture changes). * To be interpreted by the target architecture specific block parser. */ #define ARCH_HDR_VER 1 /* Structure to store the architecture registers passed arch_coredump_info_dump * As callee saved registers are not provided in struct arch_esf structure in Zephyr * we just need 22 registers. */ struct arm64_arch_block { struct { uint64_t x0; uint64_t x1; uint64_t x2; uint64_t x3; uint64_t x4; uint64_t x5; uint64_t x6; uint64_t x7; uint64_t x8; uint64_t x9; uint64_t x10; uint64_t x11; uint64_t x12; uint64_t x13; uint64_t x14; uint64_t x15; uint64_t x16; uint64_t x17; uint64_t x18; uint64_t lr; uint64_t spsr; uint64_t elr; } r; } __packed; /* * Register block takes up too much stack space * if defined within function. So define it here. */ static struct arm64_arch_block arch_blk; void arch_coredump_info_dump(const struct arch_esf *esf) { /* Target architecture information header */ /* Information just relevant to the python parser */ struct coredump_arch_hdr_t hdr = { .id = COREDUMP_ARCH_HDR_ID, .hdr_version = ARCH_HDR_VER, .num_bytes = sizeof(arch_blk), }; /* Nothing to process */ if (esf == NULL) { return; } (void)memset(&arch_blk, 0, sizeof(arch_blk)); /* * Copies the thread registers to a memory block that will be printed out * The thread registers are already provided by structure struct arch_esf */ arch_blk.r.x0 = esf->x0; arch_blk.r.x1 = esf->x1; arch_blk.r.x2 = esf->x2; arch_blk.r.x3 = esf->x3; arch_blk.r.x4 = esf->x4; arch_blk.r.x5 = esf->x5; arch_blk.r.x6 = esf->x6; arch_blk.r.x7 = esf->x7; arch_blk.r.x8 = esf->x8; arch_blk.r.x9 = esf->x9; arch_blk.r.x10 = esf->x10; arch_blk.r.x11 = esf->x11; arch_blk.r.x12 = esf->x12; arch_blk.r.x13 = esf->x13; arch_blk.r.x14 = esf->x14; arch_blk.r.x15 = esf->x15; arch_blk.r.x16 = esf->x16; arch_blk.r.x17 = esf->x17; arch_blk.r.x18 = esf->x18; arch_blk.r.lr = esf->lr; arch_blk.r.spsr = esf->spsr; arch_blk.r.elr = esf->elr; /* Send for output */ coredump_buffer_output((uint8_t *)&hdr, sizeof(hdr)); coredump_buffer_output((uint8_t *)&arch_blk, sizeof(arch_blk)); } uint16_t arch_coredump_tgt_code_get(void) { return COREDUMP_TGT_ARM64; } ```
/content/code_sandbox/arch/arm64/core/coredump.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
794
```sourcepawn /* * */ #ifndef _MACRO_PRIV_INC_ #define _MACRO_PRIV_INC_ #include <zephyr/arch/arm64/tpidrro_el0.h> #ifdef _ASMLANGUAGE /* * Get CPU id */ .macro get_cpu_id xreg0 mrs \xreg0, mpidr_el1 /* FIMXME: aff3 not taken into consideration */ ubfx \xreg0, \xreg0, #0, #24 .endm /* * Get CPU logic id by looking up cpu_node_list * returns * xreg0: MPID * xreg1: logic id (0 ~ CONFIG_MP_MAX_NUM_CPUS - 1) * clobbers: xreg0, xreg1, xreg2, xreg3 */ .macro get_cpu_logic_id xreg0, xreg1, xreg2, xreg3 get_cpu_id \xreg0 ldr \xreg3, =cpu_node_list mov \xreg1, 0 1: ldr \xreg2, [\xreg3, \xreg1, lsl 3] cmp \xreg2, \xreg0 beq 2f add \xreg1, \xreg1, 1 cmp \xreg1, #CONFIG_MP_MAX_NUM_CPUS bne 1b b . 2: .endm /* * Get CPU pointer * Note: keep in sync with `arch_curr_cpu` in include/zephyr/arch/arm64/arch_inlines.h */ .macro get_cpu xreg0 mrs \xreg0, tpidrro_el0 and \xreg0, \xreg0, #TPIDRROEL0_CURR_CPU .endm #endif /* _ASMLANGUAGE */ #endif /* _MACRO_PRIV_INC_ */ ```
/content/code_sandbox/arch/arm64/core/macro_priv.inc
sourcepawn
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
405
```objective-c /* * The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. * */ /* Set below flag to get debug prints */ #define MMU_DEBUG_PRINTS 0 #if MMU_DEBUG_PRINTS /* To dump page table entries while filling them, set DUMP_PTE macro */ #define DUMP_PTE 0 #define MMU_DEBUG(fmt, ...) printk(fmt, ##__VA_ARGS__) #else #define MMU_DEBUG(...) #endif /* * 48-bit address with 4KB granule size: * * +------------+------------+------------+------------+-----------+ * | VA [47:39] | VA [38:30] | VA [29:21] | VA [20:12] | VA [11:0] | * +---------------------------------------------------------------+ * | L0 | L1 | L2 | L3 | block off | * +------------+------------+------------+------------+-----------+ */ /* Only 4K granule is supported */ #define PAGE_SIZE_SHIFT 12U /* 48-bit VA address */ #define VA_SIZE_SHIFT_MAX 48U /* Maximum 4 XLAT table levels (L0 - L3) */ #define XLAT_LAST_LEVEL 3U /* The VA shift of L3 depends on the granule size */ #define L3_XLAT_VA_SIZE_SHIFT PAGE_SIZE_SHIFT /* Number of VA bits to assign to each table (9 bits) */ #define Ln_XLAT_VA_SIZE_SHIFT (PAGE_SIZE_SHIFT - 3) /* Starting bit in the VA address for each level */ #define L2_XLAT_VA_SIZE_SHIFT (L3_XLAT_VA_SIZE_SHIFT + Ln_XLAT_VA_SIZE_SHIFT) #define L1_XLAT_VA_SIZE_SHIFT (L2_XLAT_VA_SIZE_SHIFT + Ln_XLAT_VA_SIZE_SHIFT) #define L0_XLAT_VA_SIZE_SHIFT (L1_XLAT_VA_SIZE_SHIFT + Ln_XLAT_VA_SIZE_SHIFT) #define LEVEL_TO_VA_SIZE_SHIFT(level) \ (PAGE_SIZE_SHIFT + (Ln_XLAT_VA_SIZE_SHIFT * \ (XLAT_LAST_LEVEL - (level)))) /* Number of entries for each table (512) */ #define Ln_XLAT_NUM_ENTRIES ((1U << PAGE_SIZE_SHIFT) / 8U) /* Virtual Address Index within a given translation table level */ #define XLAT_TABLE_VA_IDX(va_addr, level) \ ((va_addr >> LEVEL_TO_VA_SIZE_SHIFT(level)) & (Ln_XLAT_NUM_ENTRIES - 1)) /* * Calculate the initial translation table level from CONFIG_ARM64_VA_BITS * For a 4 KB page size: * * (va_bits <= 21) - base level 3 * (22 <= va_bits <= 30) - base level 2 * (31 <= va_bits <= 39) - base level 1 * (40 <= va_bits <= 48) - base level 0 */ #define GET_BASE_XLAT_LEVEL(va_bits) \ ((va_bits > L0_XLAT_VA_SIZE_SHIFT) ? 0U \ : (va_bits > L1_XLAT_VA_SIZE_SHIFT) ? 1U \ : (va_bits > L2_XLAT_VA_SIZE_SHIFT) ? 2U : 3U) /* Level for the base XLAT */ #define BASE_XLAT_LEVEL GET_BASE_XLAT_LEVEL(CONFIG_ARM64_VA_BITS) #if (CONFIG_ARM64_PA_BITS == 48) #define TCR_PS_BITS TCR_PS_BITS_256TB #elif (CONFIG_ARM64_PA_BITS == 44) #define TCR_PS_BITS TCR_PS_BITS_16TB #elif (CONFIG_ARM64_PA_BITS == 42) #define TCR_PS_BITS TCR_PS_BITS_4TB #elif (CONFIG_ARM64_PA_BITS == 40) #define TCR_PS_BITS TCR_PS_BITS_1TB #elif (CONFIG_ARM64_PA_BITS == 36) #define TCR_PS_BITS TCR_PS_BITS_64GB #else #define TCR_PS_BITS TCR_PS_BITS_4GB #endif /* Upper and lower attributes mask for page/block descriptor */ #define DESC_ATTRS_UPPER_MASK GENMASK(63, 51) #define DESC_ATTRS_LOWER_MASK GENMASK(11, 2) #define DESC_ATTRS_MASK (DESC_ATTRS_UPPER_MASK | DESC_ATTRS_LOWER_MASK) /* * PTE descriptor can be Block descriptor or Table descriptor * or Page descriptor. */ #define PTE_DESC_TYPE_MASK 3ULL #define PTE_BLOCK_DESC 1ULL #define PTE_TABLE_DESC 3ULL #define PTE_PAGE_DESC 3ULL #define PTE_INVALID_DESC 0ULL /* * Block and Page descriptor attributes fields */ #define PTE_BLOCK_DESC_MEMTYPE(x) (x << 2) #define PTE_BLOCK_DESC_NS (1ULL << 5) #define PTE_BLOCK_DESC_AP_ELx (1ULL << 6) #define PTE_BLOCK_DESC_AP_EL_HIGHER (0ULL << 6) #define PTE_BLOCK_DESC_AP_RO (1ULL << 7) #define PTE_BLOCK_DESC_AP_RW (0ULL << 7) #define PTE_BLOCK_DESC_NON_SHARE (0ULL << 8) #define PTE_BLOCK_DESC_OUTER_SHARE (2ULL << 8) #define PTE_BLOCK_DESC_INNER_SHARE (3ULL << 8) #define PTE_BLOCK_DESC_AF (1ULL << 10) #define PTE_BLOCK_DESC_NG (1ULL << 11) #define PTE_BLOCK_DESC_PXN (1ULL << 53) #define PTE_BLOCK_DESC_UXN (1ULL << 54) /* * Descriptor physical address field bits */ #define PTE_PHYSADDR_MASK GENMASK64(47, PAGE_SIZE_SHIFT) /* * TCR definitions. */ #define TCR_EL1_IPS_SHIFT 32U #define TCR_EL2_PS_SHIFT 16U #define TCR_EL3_PS_SHIFT 16U #define TCR_T0SZ_SHIFT 0U #define TCR_T0SZ(x) ((64 - (x)) << TCR_T0SZ_SHIFT) #define TCR_IRGN_NC (0ULL << 8) #define TCR_IRGN_WBWA (1ULL << 8) #define TCR_IRGN_WT (2ULL << 8) #define TCR_IRGN_WBNWA (3ULL << 8) #define TCR_IRGN_MASK (3ULL << 8) #define TCR_ORGN_NC (0ULL << 10) #define TCR_ORGN_WBWA (1ULL << 10) #define TCR_ORGN_WT (2ULL << 10) #define TCR_ORGN_WBNWA (3ULL << 10) #define TCR_ORGN_MASK (3ULL << 10) #define TCR_SHARED_NON (0ULL << 12) #define TCR_SHARED_OUTER (2ULL << 12) #define TCR_SHARED_INNER (3ULL << 12) #define TCR_TG0_4K (0ULL << 14) #define TCR_TG0_64K (1ULL << 14) #define TCR_TG0_16K (2ULL << 14) #define TCR_EPD1_DISABLE (1ULL << 23) #define TCR_TG1_16K (1ULL << 30) #define TCR_TG1_4K (2ULL << 30) #define TCR_TG1_64K (3ULL << 30) #define TCR_PS_BITS_4GB 0x0ULL #define TCR_PS_BITS_64GB 0x1ULL #define TCR_PS_BITS_1TB 0x2ULL #define TCR_PS_BITS_4TB 0x3ULL #define TCR_PS_BITS_16TB 0x4ULL #define TCR_PS_BITS_256TB 0x5ULL /* * ARM guarantees at least 8 ASID bits. * We may have more available, but do not make use of them for the time being. */ #define VM_ASID_BITS 8 #define TTBR_ASID_SHIFT 48 ```
/content/code_sandbox/arch/arm64/core/mmu.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,778
```c /* * */ #include <kernel_internal.h> #include <zephyr/sys/barrier.h> #include "boot.h" void z_arm64_el2_init(void); void __weak z_arm64_el_highest_plat_init(void) { /* do nothing */ } void __weak z_arm64_el3_plat_init(void) { /* do nothing */ } void __weak z_arm64_el2_plat_init(void) { /* do nothing */ } void __weak z_arm64_el1_plat_init(void) { /* do nothing */ } void z_arm64_el_highest_init(void) { if (is_el_highest_implemented()) { write_cntfrq_el0(CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC); } z_arm64_el_highest_plat_init(); barrier_isync_fence_full(); } #if !defined(CONFIG_ARMV8_R) enum el3_next_el { EL3_TO_EL2, EL3_TO_EL1_NO_EL2, EL3_TO_EL1_SKIP_EL2 }; static inline enum el3_next_el el3_get_next_el(void) { if (!is_el_implemented(2)) { return EL3_TO_EL1_NO_EL2; } else if (is_in_secure_state() && !is_el2_sec_supported()) { /* * Is considered an illegal return "[..] a return to EL2 when EL3 is * implemented and the value of the SCR_EL3.NS bit is 0 if * ARMv8.4-SecEL2 is not implemented" (D1.11.2 from ARM DDI 0487E.a) */ return EL3_TO_EL1_SKIP_EL2; } else { return EL3_TO_EL2; } } void z_arm64_el3_init(void) { uint64_t reg; /* Setup vector table */ write_vbar_el3((uint64_t)_vector_table); barrier_isync_fence_full(); reg = 0U; /* Mostly RES0 */ reg &= ~(CPTR_TTA_BIT | /* Do not trap sysreg accesses */ CPTR_TFP_BIT | /* Do not trap SVE, SIMD and FP */ CPTR_TCPAC_BIT); /* Do not trap CPTR_EL2 / CPACR_EL1 accesses */ write_cptr_el3(reg); reg = 0U; /* Reset */ #ifdef CONFIG_ARMV8_A_NS reg |= SCR_NS_BIT; /* EL2 / EL3 non-secure */ #else if (is_in_secure_state() && is_el2_sec_supported()) { reg |= SCR_EEL2_BIT; /* Enable EL2 secure */ } #endif reg |= (SCR_RES1 | /* RES1 */ SCR_RW_BIT | /* EL2 execution state is AArch64 */ SCR_ST_BIT | /* Do not trap EL1 accesses to timer */ SCR_HCE_BIT | /* Do not trap HVC */ SCR_SMD_BIT); /* Do not trap SMC */ write_scr_el3(reg); #if defined(CONFIG_GIC_V3) reg = read_sysreg(ICC_SRE_EL3); reg |= (ICC_SRE_ELx_DFB_BIT | /* Disable FIQ bypass */ ICC_SRE_ELx_DIB_BIT | /* Disable IRQ bypass */ ICC_SRE_ELx_SRE_BIT | /* System register interface is used */ ICC_SRE_EL3_EN_BIT); /* Enables lower Exception level access to ICC_SRE_EL1 */ write_sysreg(reg, ICC_SRE_EL3); #endif z_arm64_el3_plat_init(); barrier_isync_fence_full(); if (el3_get_next_el() == EL3_TO_EL1_SKIP_EL2) { /* * handle EL2 init in EL3, as it still needs to be done, * but we are going to be skipping EL2. */ z_arm64_el2_init(); } } #endif /* CONFIG_ARMV8_R */ void z_arm64_el2_init(void) { uint64_t reg; reg = read_sctlr_el2(); reg |= (SCTLR_EL2_RES1 | /* RES1 */ SCTLR_I_BIT | /* Enable i-cache */ SCTLR_SA_BIT); /* Enable SP alignment check */ write_sctlr_el2(reg); reg = read_hcr_el2(); /* when EL2 is enable in current security status: * Clear TGE bit: All exceptions that would not be routed to EL2; * Clear AMO bit: Physical SError interrupts are not taken to EL2 and EL3. * Clear IMO bit: Physical IRQ interrupts are not taken to EL2 and EL3. */ reg &= ~(HCR_IMO_BIT | HCR_AMO_BIT | HCR_TGE_BIT); reg |= HCR_RW_BIT; /* EL1 Execution state is AArch64 */ write_hcr_el2(reg); reg = 0U; /* RES0 */ reg |= CPTR_EL2_RES1; /* RES1 */ reg &= ~(CPTR_TFP_BIT | /* Do not trap SVE, SIMD and FP */ CPTR_TCPAC_BIT); /* Do not trap CPACR_EL1 accesses */ write_cptr_el2(reg); zero_cntvoff_el2(); /* Set 64-bit virtual timer offset to 0 */ zero_cnthctl_el2(); #ifdef CONFIG_CPU_AARCH64_CORTEX_R zero_cnthps_ctl_el2(); #else zero_cnthp_ctl_el2(); #endif #ifdef CONFIG_ARM64_SET_VMPIDR_EL2 reg = read_mpidr_el1(); write_vmpidr_el2(reg); #endif /* * Enable this if/when we use the hypervisor timer. * write_cnthp_cval_el2(~(uint64_t)0); */ z_arm64_el2_plat_init(); barrier_isync_fence_full(); } void z_arm64_el1_init(void) { uint64_t reg; /* Setup vector table */ write_vbar_el1((uint64_t)_vector_table); barrier_isync_fence_full(); reg = 0U; /* RES0 */ reg |= CPACR_EL1_FPEN_NOTRAP; /* Do not trap NEON/SIMD/FP initially */ /* TODO: CONFIG_FLOAT_*_FORBIDDEN */ write_cpacr_el1(reg); reg = read_sctlr_el1(); reg |= (SCTLR_EL1_RES1 | /* RES1 */ SCTLR_I_BIT | /* Enable i-cache */ SCTLR_C_BIT | /* Enable d-cache */ SCTLR_SA_BIT); /* Enable SP alignment check */ write_sctlr_el1(reg); write_cntv_cval_el0(~(uint64_t)0); /* * Enable these if/when we use the corresponding timers. * write_cntp_cval_el0(~(uint64_t)0); * write_cntps_cval_el1(~(uint64_t)0); */ z_arm64_el1_plat_init(); barrier_isync_fence_full(); } #if !defined(CONFIG_ARMV8_R) void z_arm64_el3_get_next_el(uint64_t switch_addr) { uint64_t spsr; write_elr_el3(switch_addr); /* Mask the DAIF */ spsr = SPSR_DAIF_MASK; if (el3_get_next_el() == EL3_TO_EL2) { /* Dropping into EL2 */ spsr |= SPSR_MODE_EL2T; } else { /* Dropping into EL1 */ spsr |= SPSR_MODE_EL1T; } write_spsr_el3(spsr); } #endif /* CONFIG_ARMV8_R */ ```
/content/code_sandbox/arch/arm64/core/reset.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,686
```unknown /* * */ #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #include <offsets_short.h> #include <zephyr/arch/cpu.h> #include <zephyr/syscall.h> #include <zephyr/arch/arm64/mm.h> #include "macro_priv.inc" _ASM_FILE_PROLOGUE /* * size_t arch_user_string_nlen(const char *s, size_t maxsize, int *err_arg) */ GTEXT(z_arm64_user_string_nlen_fault_start) GTEXT(z_arm64_user_string_nlen_fault_end) GTEXT(z_arm64_user_string_nlen_fixup) GTEXT(arch_user_string_nlen) SECTION_FUNC(TEXT, arch_user_string_nlen) mov x3, x0 mov x0, #0 mov x4, #0 strlen_loop: cmp x0, x1 beq strlen_done z_arm64_user_string_nlen_fault_start: ldrb w5, [x3, x0] z_arm64_user_string_nlen_fault_end: cbz x5, strlen_done add x0, x0, #1 b strlen_loop z_arm64_user_string_nlen_fixup: mov x4, #-1 mov x0, #0 strlen_done: str w4, [x2] ret /* * int arch_buffer_validate(const void *addr, size_t size, int write) */ GTEXT(arch_buffer_validate) SECTION_FUNC(TEXT, arch_buffer_validate) add x1, x1, x0 mrs x3, DAIF msr DAIFSET, #DAIFSET_IRQ_BIT abv_loop: cbnz w2, 1f at S1E0R, x0 b 2f 1: at S1E0W, x0 2: orr x0, x0, #(MEM_DOMAIN_ALIGN_AND_SIZE - 1) add x0, x0, #1 isb mrs x4, PAR_EL1 tbnz x4, #0, abv_fail cmp x0, x1 blo abv_loop msr DAIF, x3 mov x0, #0 ret abv_fail: msr DAIF, x3 mov x0, #-1 ret /* * System call entry point. */ GTEXT(z_arm64_do_syscall) SECTION_FUNC(TEXT, z_arm64_do_syscall) /* Recover the syscall parameters from the ESF */ ldp x0, x1, [sp, ___esf_t_x0_x1_OFFSET] ldp x2, x3, [sp, ___esf_t_x2_x3_OFFSET] ldp x4, x5, [sp, ___esf_t_x4_x5_OFFSET] /* Use the ESF as SSF */ mov x6, sp /* Recover the syscall ID */ ldr x8, [sp, ___esf_t_x8_x9_OFFSET] /* Check whether the ID is valid */ ldr x9, =K_SYSCALL_LIMIT cmp x8, x9 blo valid_syscall_id /* Save the bad ID for handler_bad_syscall() */ mov x0, x8 ldr x8, =K_SYSCALL_BAD valid_syscall_id: ldr x9, =_k_syscall_table ldr x9, [x9, x8, lsl #3] /* Jump into the syscall */ msr daifclr, #(DAIFSET_IRQ_BIT) blr x9 msr daifset, #(DAIFSET_IRQ_BIT) /* Save the return value into the ESF */ str x0, [sp, ___esf_t_x0_x1_OFFSET] /* Return from exception */ b z_arm64_exit_exc ```
/content/code_sandbox/arch/arm64/core/userspace.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
827
```unknown /* * Written by: Nicolas Pitre * */ #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> _ASM_FILE_PROLOGUE GTEXT(z_arm64_fpu_save) SECTION_FUNC(TEXT, z_arm64_fpu_save) stp q0, q1, [x0, #(16 * 0)] stp q2, q3, [x0, #(16 * 2)] stp q4, q5, [x0, #(16 * 4)] stp q6, q7, [x0, #(16 * 6)] stp q8, q9, [x0, #(16 * 8)] stp q10, q11, [x0, #(16 * 10)] stp q12, q13, [x0, #(16 * 12)] stp q14, q15, [x0, #(16 * 14)] stp q16, q17, [x0, #(16 * 16)] stp q18, q19, [x0, #(16 * 18)] stp q20, q21, [x0, #(16 * 20)] stp q22, q23, [x0, #(16 * 22)] stp q24, q25, [x0, #(16 * 24)] stp q26, q27, [x0, #(16 * 26)] stp q28, q29, [x0, #(16 * 28)] stp q30, q31, [x0, #(16 * 30)] mrs x1, fpsr mrs x2, fpcr str w1, [x0, #(16 * 32 + 0)] str w2, [x0, #(16 * 32 + 4)] ret GTEXT(z_arm64_fpu_restore) SECTION_FUNC(TEXT, z_arm64_fpu_restore) ldp q0, q1, [x0, #(16 * 0)] ldp q2, q3, [x0, #(16 * 2)] ldp q4, q5, [x0, #(16 * 4)] ldp q6, q7, [x0, #(16 * 6)] ldp q8, q9, [x0, #(16 * 8)] ldp q10, q11, [x0, #(16 * 10)] ldp q12, q13, [x0, #(16 * 12)] ldp q14, q15, [x0, #(16 * 14)] ldp q16, q17, [x0, #(16 * 16)] ldp q18, q19, [x0, #(16 * 18)] ldp q20, q21, [x0, #(16 * 20)] ldp q22, q23, [x0, #(16 * 22)] ldp q24, q25, [x0, #(16 * 24)] ldp q26, q27, [x0, #(16 * 26)] ldp q28, q29, [x0, #(16 * 28)] ldp q30, q31, [x0, #(16 * 30)] ldr w1, [x0, #(16 * 32 + 0)] ldr w2, [x0, #(16 * 32 + 4)] msr fpsr, x1 msr fpcr, x2 ret ```
/content/code_sandbox/arch/arm64/core/fpu.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
809
```c /* * */ /** * @file * @brief Full C support initialization * * Initialization of full C support: zero the .bss and call z_cstart(). * * Stack is available in this module, but not the global data/bss until their * initialization is performed. */ #include <kernel_internal.h> #include <zephyr/linker/linker-defs.h> extern void z_arm64_mm_init(bool is_primary_core); __weak void z_arm64_mm_init(bool is_primary_core) { } /** * * @brief Prepare to and run C code * * This routine prepares for the execution of and runs C code. * */ void z_prep_c(void) { /* Initialize tpidrro_el0 with our struct _cpu instance address */ write_tpidrro_el0((uintptr_t)&_kernel.cpus[0]); z_bss_zero(); z_data_copy(); #ifdef CONFIG_ARM64_SAFE_EXCEPTION_STACK /* After bss clean, _kernel.cpus is in bss section */ z_arm64_safe_exception_stack_init(); #endif z_arm64_mm_init(true); z_arm64_interrupt_init(); z_cstart(); CODE_UNREACHABLE; } #if CONFIG_MP_MAX_NUM_CPUS > 1 extern FUNC_NORETURN void arch_secondary_cpu_init(void); void z_arm64_secondary_prep_c(void) { arch_secondary_cpu_init(); CODE_UNREACHABLE; } #endif ```
/content/code_sandbox/arch/arm64/core/prep_c.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
295
```unknown /* * */ /* * ARM64 Cortex-A ISRs wrapper */ #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #include <offsets_short.h> #include <zephyr/arch/cpu.h> #include <zephyr/sw_isr_table.h> #include <zephyr/drivers/interrupt_controller/gic.h> #include "macro_priv.inc" _ASM_FILE_PROLOGUE GDATA(_sw_isr_table) /* * Wrapper around ISRs when inserted in software ISR table * * When inserted in the vector table, _isr_wrapper() demuxes the ISR table * using the running interrupt number as the index, and invokes the registered * ISR with its corresponding argument. When returning from the ISR, it * determines if a context switch needs to happen. */ GTEXT(_isr_wrapper) SECTION_FUNC(TEXT, _isr_wrapper) /* ++_current_cpu->nested to be checked by arch_is_in_isr() */ get_cpu x0 ldr w1, [x0, #___cpu_t_nested_OFFSET] add w2, w1, #1 str w2, [x0, #___cpu_t_nested_OFFSET] /* If not nested: switch to IRQ stack and save current sp on it. */ cbnz w1, 1f ldr x1, [x0, #___cpu_t_irq_stack_OFFSET] mov x2, sp mov sp, x1 str x2, [sp, #-16]! #if defined(CONFIG_ARM64_SAFE_EXCEPTION_STACK) sub x1, x1, #CONFIG_ISR_STACK_SIZE str x1, [x0, #_cpu_offset_to_current_stack_limit] #endif 1: #ifdef CONFIG_SCHED_THREAD_USAGE bl z_sched_usage_stop #endif #ifdef CONFIG_TRACING bl sys_trace_isr_enter #endif /* Get active IRQ number from the interrupt controller */ #if !defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER) bl arm_gic_get_active #else bl z_soc_irq_get_active #endif /* !CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER */ #if CONFIG_GIC_VER >= 3 /* * Ignore Special INTIDs 1020..1023 see 2.2.1 of Arm Generic Interrupt Controller * Architecture Specification GIC architecture version 3 and version 4 */ cmp x0, 1019 b.le oob cmp x0, 1023 b.gt oob b spurious_continue oob: #endif /* IRQ out of bounds */ mov x1, #(CONFIG_NUM_IRQS - 1) cmp x0, x1 b.hi spurious_continue stp x0, xzr, [sp, #-16]! /* Retrieve the interrupt service routine */ ldr x1, =_sw_isr_table add x1, x1, x0, lsl #4 /* table is 16-byte wide */ ldp x0, x3, [x1] /* arg in x0, ISR in x3 */ /* * Call the ISR. Unmask and mask again the IRQs to support nested * exception handlers */ msr daifclr, #(DAIFCLR_IRQ_BIT) blr x3 msr daifset, #(DAIFSET_IRQ_BIT) /* Signal end-of-interrupt */ ldp x0, xzr, [sp], #16 spurious_continue: #if !defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER) bl arm_gic_eoi #else bl z_soc_irq_eoi #endif /* !CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER */ #ifdef CONFIG_TRACING bl sys_trace_isr_exit #endif GTEXT(z_arm64_irq_done) z_arm64_irq_done: /* if (--_current_cpu->nested != 0) exit */ get_cpu x0 ldr w1, [x0, #___cpu_t_nested_OFFSET] subs w1, w1, #1 str w1, [x0, #___cpu_t_nested_OFFSET] bne exit /* No more nested: retrieve the task's stack. */ ldr x1, [sp] mov sp, x1 /* retrieve pointer to the current thread */ ldr x1, [x0, #___cpu_t_current_OFFSET] #if defined(CONFIG_ARM64_SAFE_EXCEPTION_STACK) /* arch_curr_cpu()->arch.current_stack_limit = thread->arch.stack_limit */ ldr x2, [x1, #_thread_offset_to_stack_limit] str x2, [x0, #_cpu_offset_to_current_stack_limit] #endif /* * Get next thread to schedule with z_get_next_switch_handle(). * We pass it a NULL as we didn't save the whole thread context yet. * If no scheduling is necessary then NULL will be returned. */ str x1, [sp, #-16]! mov x0, xzr bl z_get_next_switch_handle ldr x1, [sp], #16 cbz x0, exit /* * Switch thread * x0: new thread * x1: old thread */ bl z_arm64_context_switch exit: #ifdef CONFIG_STACK_SENTINEL bl z_check_stack_sentinel #endif b z_arm64_exit_exc ```
/content/code_sandbox/arch/arm64/core/isr_wrapper.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,122
```c /* * */ #include <zephyr/kernel.h> #include <zephyr/kernel_structs.h> #include <kernel_internal.h> #include <kernel_tls.h> #include <zephyr/app_memory/app_memdomain.h> #include <zephyr/sys/util.h> size_t arch_tls_stack_setup(struct k_thread *new_thread, char *stack_ptr) { /* * TLS area for ARM has some data fields following by * thread data and bss. These fields are supposed to be * used by toolchain and OS TLS code to aid in locating * the TLS data/bss. Zephyr currently has no use for * this so we can simply skip these. However, since GCC * is generating code assuming these fields are there, * we simply skip them when setting the TLS pointer. */ /* * Since we are populating things backwards, * setup the TLS data/bss area first. */ stack_ptr -= z_tls_data_size(); z_tls_copy(stack_ptr); /* Skip two pointers due to toolchain */ stack_ptr -= sizeof(uintptr_t) * 2; /* * Set thread TLS pointer which is used in * context switch to point to TLS area. */ new_thread->tls = POINTER_TO_UINT(stack_ptr); return (z_tls_data_size() + (sizeof(uintptr_t) * 2)); } ```
/content/code_sandbox/arch/arm64/core/tls.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
292
```unknown /* * */ #include <zephyr/linker/sections.h> #include <zephyr/arch/cpu.h> #include "mmu.h" #if CONFIG_MMU_PAGE_SIZE == 4096 || defined(CONFIG_ARM_MPU) #define HEADER_PGSIZE 1 #elif CONFIG_MMU_PAGE_SIZE == 16384 #define HEADER_PGSIZE 2 #elif CONFIG_MMU_PAGE_SIZE == 65536 #define HEADER_PGSIZE 3 #else #define HEADER_PGSIZE 0 #warning "Can't determine page size for header flags" #endif #define HEADER_FLAGS (HEADER_PGSIZE << 1) _ASM_FILE_PROLOGUE SECTION_SUBSEC_FUNC(image_header,_image_header_section,_image_header) b __start // branch to kernel start .long 0 // reserved .quad 0 // Image load offset from start // of RAM, little-endian .quad _flash_used // Effective size of kernel // image, little-endian .quad HEADER_FLAGS // Informative flags, // little-endian .quad 0 // reserved .quad 0 // reserved .quad 0 // reserved .ascii "ARM\x64" // Magic number .long 0 // reserved ```
/content/code_sandbox/arch/arm64/core/header.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
291
```unknown /* * */ #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> _ASM_FILE_PROLOGUE /* * These simple memset and memcpy alternatives are necessary as the optimized * ones depend on the MMU to be active (see commit c5b898743a20). * * Furthermore, we can't implement those in C as the compiler is just too * smart for its own good and replaces our simple loops into direct calls * to memset or memcpy on its own. */ /* void z_early_memset(void *dst, int c, size_t n) */ GTEXT(z_early_memset) SECTION_FUNC(TEXT, z_early_memset) /* is dst pointer 8-bytes aligned? */ tst x0, #0x7 b.ne 2f /* at least 8 bytes to set? */ cmp x2, #8 b.lo 2f /* spread the byte value across whole 64 bits */ and x8, x1, #0xff mov x9, #0x0101010101010101 mul x8, x8, x9 1: /* 8 bytes at a time */ sub x2, x2, #8 cmp x2, #7 str x8, [x0], #8 b.hi 1b 2: /* at least one byte to set? */ cbz x2, 4f 3: /* one byte at a time */ subs x2, x2, #1 strb w8, [x0], #1 b.ne 3b 4: ret /* void z_early_memcpy(void *dst, const void *src, size_t n) */ GTEXT(z_early_memcpy) SECTION_FUNC(TEXT, z_early_memcpy) /* are dst and src pointers 8-bytes aligned? */ orr x8, x1, x0 tst x8, #0x7 b.ne 2f /* at least 8 bytes to copy? */ cmp x2, #8 b.lo 2f 1: /* 8 bytes at a time */ ldr x8, [x1], #8 sub x2, x2, #8 cmp x2, #7 str x8, [x0], #8 b.hi 1b 2: /* at least one byte to copy? */ cbz x2, 4f 3: /* one byte at a time */ ldrb w8, [x1], #1 subs x2, x2, #1 strb w8, [x0], #1 b.ne 3b 4: ret ```
/content/code_sandbox/arch/arm64/core/early_mem_funcs.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
596
```unknown /* * */ /* * Populated vector table */ #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #include <zephyr/offsets.h> #include <zephyr/arch/cpu.h> #include <zephyr/arch/arm64/tpidrro_el0.h> #include <offsets_short.h> #include "macro_priv.inc" _ASM_FILE_PROLOGUE /* * Save volatile registers, LR, SPSR_EL1 and ELR_EL1 * * Save the volatile registers and LR on the process stack. This is * needed if the thread is switched out because they can be clobbered by the * ISR and/or context switch. */ .macro z_arm64_enter_exc xreg0, xreg1, el /* * Two things can happen to the remaining registers: * * - No context-switch: in this case x19-x28 are callee-saved register * so we can be sure they are not going to be clobbered by ISR. * - Context-switch: the callee-saved registers are saved by * z_arm64_context_switch() in the kernel structure. */ sub sp, sp, ___esf_t_SIZEOF #ifdef CONFIG_ARM64_SAFE_EXCEPTION_STACK .if \el == el1 /* * EL1t mode cannot access sp_el1, so set x0 to sp_el1 without corrupt * other registers */ add sp, sp, x0 // sp' = sp + x0 sub x0, sp, x0 // x0' = sp' - x0 = sp msr SPSel, #0 stp x16, x17, [sp, -(___esf_t_SIZEOF - ___esf_t_x16_x17_OFFSET)] stp x18, lr, [sp, -(___esf_t_SIZEOF - ___esf_t_x18_lr_OFFSET)] bl z_arm64_quick_stack_check .endif #endif stp x0, x1, [sp, ___esf_t_x0_x1_OFFSET] stp x2, x3, [sp, ___esf_t_x2_x3_OFFSET] stp x4, x5, [sp, ___esf_t_x4_x5_OFFSET] stp x6, x7, [sp, ___esf_t_x6_x7_OFFSET] stp x8, x9, [sp, ___esf_t_x8_x9_OFFSET] stp x10, x11, [sp, ___esf_t_x10_x11_OFFSET] stp x12, x13, [sp, ___esf_t_x12_x13_OFFSET] stp x14, x15, [sp, ___esf_t_x14_x15_OFFSET] #ifdef CONFIG_ARM64_SAFE_EXCEPTION_STACK /* The expection from el1 does not need to save x16, x17, x18 and lr */ .if \el == el0 #endif stp x16, x17, [sp, ___esf_t_x16_x17_OFFSET] stp x18, lr, [sp, ___esf_t_x18_lr_OFFSET] #ifdef CONFIG_ARM64_SAFE_EXCEPTION_STACK .endif #endif #ifdef CONFIG_FRAME_POINTER str x29, [sp, ___esf_t_fp_OFFSET] #endif mrs \xreg0, spsr_el1 mrs \xreg1, elr_el1 stp \xreg0, \xreg1, [sp, ___esf_t_spsr_elr_OFFSET] #ifdef CONFIG_ARM64_SAFE_EXCEPTION_STACK .if \el == el0 mrs x0, sp_el0 str x0, [sp, ___esf_t_sp_el0_OFFSET] /* Retrieving safe exception stack */ get_cpu x0 ldr x1, [x0, #_cpu_offset_to_safe_exception_stack] msr sp_el0, x1 .endif #endif /* Clear usermode flag and increment exception depth */ mrs \xreg0, tpidrro_el0 mov \xreg1, #TPIDRROEL0_EXC_UNIT bic \xreg0, \xreg0, #TPIDRROEL0_IN_EL0 add \xreg0, \xreg0, \xreg1 msr tpidrro_el0, \xreg0 #ifdef CONFIG_FPU_SHARING bl z_arm64_fpu_enter_exc #endif .endm /* * Four types of exceptions: * - synchronous: aborts from MMU, SP/CP alignment checking, unallocated * instructions, SVCs/SMCs/HVCs, ...) * - IRQ: group 1 (normal) interrupts * - FIQ: group 0 or secure interrupts * - SError: fatal system errors * * Four different contexts: * - from same exception level, when using the SP_EL0 stack pointer * - from same exception level, when using the SP_ELx stack pointer * - from lower exception level, when this is AArch64 * - from lower exception level, when this is AArch32 * * +------------------+------------------+-------------------------+ * | Address | Exception type | Description | * +------------------+------------------+-------------------------+ * | VBAR_ELn + 0x000 | Synchronous | Current EL with SP0 | * | + 0x080 | IRQ / vIRQ | | * | + 0x100 | FIQ / vFIQ | | * | + 0x180 | SError / vSError | | * +------------------+------------------+-------------------------+ * | + 0x200 | Synchronous | Current EL with SPx | * | + 0x280 | IRQ / vIRQ | | * | + 0x300 | FIQ / vFIQ | | * | + 0x380 | SError / vSError | | * +------------------+------------------+-------------------------+ * | + 0x400 | Synchronous | Lower EL using AArch64 | * | + 0x480 | IRQ / vIRQ | | * | + 0x500 | FIQ / vFIQ | | * | + 0x580 | SError / vSError | | * +------------------+------------------+-------------------------+ * | + 0x600 | Synchronous | Lower EL using AArch32 | * | + 0x680 | IRQ / vIRQ | | * | + 0x700 | FIQ / vFIQ | | * | + 0x780 | SError / vSError | | * +------------------+------------------+-------------------------+ */ GDATA(_vector_table) SECTION_SUBSEC_FUNC(exc_vector_table,_vector_table_section,_vector_table) /* The whole table must be 2K aligned */ .align 11 /* Current EL with SP0 / Synchronous */ .align 7 z_arm64_enter_exc x0, x1, el1 b z_arm64_sync_exc /* Current EL with SP0 / IRQ */ .align 7 z_arm64_enter_exc x0, x1, el1 #ifdef CONFIG_GEN_SW_ISR_TABLE b _isr_wrapper #else b z_irq_spurious #endif /* Current EL with SP0 / FIQ */ .align 7 b . /* Current EL with SP0 / SError */ .align 7 z_arm64_enter_exc x0, x1, el1 b z_arm64_serror /* Current EL with SPx / Synchronous */ .align 7 z_arm64_enter_exc x0, x1, el1 b z_arm64_sync_exc /* Current EL with SPx / IRQ */ .align 7 z_arm64_enter_exc x0, x1, el1 #ifdef CONFIG_GEN_SW_ISR_TABLE b _isr_wrapper #else b z_irq_spurious #endif /* Current EL with SPx / FIQ */ .align 7 b . /* Current EL with SPx / SError */ .align 7 z_arm64_enter_exc x0, x1, el1 b z_arm64_serror /* Lower EL using AArch64 / Synchronous */ .align 7 z_arm64_enter_exc x0, x1, el0 b z_arm64_sync_exc /* Lower EL using AArch64 / IRQ */ .align 7 z_arm64_enter_exc x0, x1, el0 #ifdef CONFIG_GEN_SW_ISR_TABLE b _isr_wrapper #else b z_irq_spurious #endif /* Lower EL using AArch64 / FIQ */ .align 7 b . /* Lower EL using AArch64 / SError */ .align 7 z_arm64_enter_exc x0, x1, el0 b z_arm64_serror /* Lower EL using AArch32 / Synchronous */ .align 7 b . /* Lower EL using AArch32 / IRQ */ .align 7 b . /* Lower EL using AArch32 / FIQ */ .align 7 b . /* Lower EL using AArch32 / SError */ .align 7 b . GTEXT(z_arm64_serror) SECTION_FUNC(TEXT, z_arm64_serror) mov x1, sp mov x0, #0 /* K_ERR_CPU_EXCEPTION */ bl z_arm64_fatal_error /* Return here only in case of recoverable error */ b z_arm64_exit_exc #ifdef CONFIG_ARM64_SAFE_EXCEPTION_STACK GTEXT(z_arm64_quick_stack_check) SECTION_FUNC(TEXT, z_arm64_quick_stack_check) /* * x0 is SP_EL1 * Retrieve the current stack limit */ get_cpu x16 ldr x17, [x16, #_cpu_offset_to_current_stack_limit] /* * If priv sp <= the stack limit, then keep the safe exception stack * go to the stack overflow process. */ cmp x0, x17 /* Restore the sp_el1 */ msr SPSel, #1 // switch sp to sp_el1 sub x0, sp, x0 // x0'' = sp' - x0' = x0 sub sp, sp, x0 // sp'' = sp' - x0 = sp ble 1f /* * If the stack does not overflow, keep using sp_el1, copy the original * x16, x17, x18, lr from sp_el0 (safe_exception_stack) to sp_el1. So * the four registers can be restroed directly from sp_el1 without a * stack mode switch. */ mrs x18, sp_el0 ldp x16, x17, [x18, -(___esf_t_SIZEOF - ___esf_t_x16_x17_OFFSET)] stp x16, x17, [sp, ___esf_t_x16_x17_OFFSET] ldp x16, x17, [x18, -(___esf_t_SIZEOF - ___esf_t_x18_lr_OFFSET)] stp x16, x17, [sp, ___esf_t_x18_lr_OFFSET] ret 1: /* * If stack overflow, save the current sp and then switch sp to safe * exception stack * x16 is still the current _cpu */ mrs x18, sp_el0 mov x17, sp str x17, [x16, #_cpu_offset_to_corrupted_sp] /* * switch sp to safe exception stack, which means we handle the fatal * error with safe exception stack. */ sub sp, x18, ___esf_t_SIZEOF ret #endif /* * Restore volatile registers, LR, SPSR_EL1 and ELR_EL1 * * This is the common exit point for z_arm64_sync_exc() and _isr_wrapper(). */ GTEXT(z_arm64_exit_exc) SECTION_FUNC(TEXT, z_arm64_exit_exc) #ifdef CONFIG_FPU_SHARING bl z_arm64_fpu_exit_exc GTEXT(z_arm64_exit_exc_fpu_done) z_arm64_exit_exc_fpu_done: #endif ldp x0, x1, [sp, ___esf_t_spsr_elr_OFFSET] msr spsr_el1, x0 msr elr_el1, x1 /* Restore the kernel/user mode flag and decrement exception depth */ tst x0, #SPSR_MODE_MASK /* EL0 == 0 */ mrs x0, tpidrro_el0 mov x1, #TPIDRROEL0_EXC_UNIT orr x2, x0, #TPIDRROEL0_IN_EL0 csel x0, x2, x0, eq sub x0, x0, x1 msr tpidrro_el0, x0 #ifdef CONFIG_ARM64_SAFE_EXCEPTION_STACK bne 1f ldr x0, [sp, ___esf_t_sp_el0_OFFSET] msr sp_el0, x0 1: #endif ldp x0, x1, [sp, ___esf_t_x0_x1_OFFSET] ldp x2, x3, [sp, ___esf_t_x2_x3_OFFSET] ldp x4, x5, [sp, ___esf_t_x4_x5_OFFSET] ldp x6, x7, [sp, ___esf_t_x6_x7_OFFSET] ldp x8, x9, [sp, ___esf_t_x8_x9_OFFSET] ldp x10, x11, [sp, ___esf_t_x10_x11_OFFSET] ldp x12, x13, [sp, ___esf_t_x12_x13_OFFSET] ldp x14, x15, [sp, ___esf_t_x14_x15_OFFSET] ldp x16, x17, [sp, ___esf_t_x16_x17_OFFSET] ldp x18, lr, [sp, ___esf_t_x18_lr_OFFSET] #ifdef CONFIG_FRAME_POINTER ldr x29, [sp, ___esf_t_fp_OFFSET] #endif add sp, sp, ___esf_t_SIZEOF /* * In general in the ELR_EL1 register we can find: * * - The address of ret in z_arm64_call_svc() * - The address of the next instruction at the time of the IRQ when the * thread was switched out. * - The address of z_thread_entry() for new threads (see thread.c). */ eret ```
/content/code_sandbox/arch/arm64/core/vector_table.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,222
```unknown /* * */ /* * ARM64 Cortex-A power management */ #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #include <zephyr/arch/cpu.h> _ASM_FILE_PROLOGUE #ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_IDLE GTEXT(arch_cpu_idle) SECTION_FUNC(TEXT, arch_cpu_idle) #ifdef CONFIG_TRACING str lr, [sp, #-16]! bl sys_trace_idle ldr lr, [sp], #16 #endif dsb sy wfi msr daifclr, #(DAIFCLR_IRQ_BIT) ret #endif #ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_ATOMIC_IDLE GTEXT(arch_cpu_atomic_idle) SECTION_FUNC(TEXT, arch_cpu_atomic_idle) #ifdef CONFIG_TRACING stp x0, lr, [sp, #-16]! bl sys_trace_idle ldp x0, lr, [sp], #16 #endif msr daifset, #(DAIFSET_IRQ_BIT) isb wfe tst x0, #(DAIF_IRQ_BIT) beq _irq_disabled msr daifclr, #(DAIFCLR_IRQ_BIT) _irq_disabled: ret #endif ```
/content/code_sandbox/arch/arm64/core/cpu_idle.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
256
```c /* * The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. * * */ #include <zephyr/cache.h> #include <zephyr/device.h> #include <zephyr/init.h> #include <zephyr/kernel.h> #include <kernel_arch_func.h> #include <kernel_arch_interface.h> #include <kernel_internal.h> #include <zephyr/logging/log.h> #include <zephyr/arch/arm64/cpu.h> #include <zephyr/arch/arm64/lib_helpers.h> #include <zephyr/arch/arm64/mm.h> #include <zephyr/linker/linker-defs.h> #include <zephyr/spinlock.h> #include <zephyr/sys/util.h> #include "mmu.h" LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); static uint64_t xlat_tables[CONFIG_MAX_XLAT_TABLES * Ln_XLAT_NUM_ENTRIES] __aligned(Ln_XLAT_NUM_ENTRIES * sizeof(uint64_t)); static int xlat_use_count[CONFIG_MAX_XLAT_TABLES]; static struct k_spinlock xlat_lock; /* Usage count value range */ #define XLAT_PTE_COUNT_MASK GENMASK(15, 0) #define XLAT_REF_COUNT_UNIT BIT(16) /* Returns a reference to a free table */ static uint64_t *new_table(void) { uint64_t *table; unsigned int i; /* Look for a free table. */ for (i = 0U; i < CONFIG_MAX_XLAT_TABLES; i++) { if (xlat_use_count[i] == 0) { table = &xlat_tables[i * Ln_XLAT_NUM_ENTRIES]; xlat_use_count[i] = XLAT_REF_COUNT_UNIT; MMU_DEBUG("allocating table [%d]%p\n", i, table); return table; } } LOG_ERR("CONFIG_MAX_XLAT_TABLES, too small"); return NULL; } static inline unsigned int table_index(uint64_t *pte) { unsigned int i = (pte - xlat_tables) / Ln_XLAT_NUM_ENTRIES; __ASSERT(i < CONFIG_MAX_XLAT_TABLES, "table %p out of range", pte); return i; } /* Adjusts usage count and returns current count. */ static int table_usage(uint64_t *table, int adjustment) { unsigned int i = table_index(table); int prev_count = xlat_use_count[i]; int new_count = prev_count + adjustment; /* be reasonable not to always create a debug flood */ if ((IS_ENABLED(DUMP_PTE) && adjustment != 0) || new_count == 0) { MMU_DEBUG("table [%d]%p: usage %#x -> %#x\n", i, table, prev_count, new_count); } __ASSERT(new_count >= 0, "table use count underflow"); __ASSERT(new_count == 0 || new_count >= XLAT_REF_COUNT_UNIT, "table in use with no reference to it"); __ASSERT((new_count & XLAT_PTE_COUNT_MASK) <= Ln_XLAT_NUM_ENTRIES, "table PTE count overflow"); xlat_use_count[i] = new_count; return new_count; } static inline void inc_table_ref(uint64_t *table) { table_usage(table, XLAT_REF_COUNT_UNIT); } static inline void dec_table_ref(uint64_t *table) { int ref_unit = XLAT_REF_COUNT_UNIT; table_usage(table, -ref_unit); } static inline bool is_table_unused(uint64_t *table) { return (table_usage(table, 0) & XLAT_PTE_COUNT_MASK) == 0; } static inline bool is_table_single_referenced(uint64_t *table) { return table_usage(table, 0) < (2 * XLAT_REF_COUNT_UNIT); } #ifdef CONFIG_TEST /* Hooks to let test code peek at table states */ int arm64_mmu_nb_free_tables(void) { int count = 0; for (int i = 0; i < CONFIG_MAX_XLAT_TABLES; i++) { if (xlat_use_count[i] == 0) { count++; } } return count; } int arm64_mmu_tables_total_usage(void) { int count = 0; for (int i = 0; i < CONFIG_MAX_XLAT_TABLES; i++) { count += xlat_use_count[i]; } return count; } #endif /* CONFIG_TEST */ static inline bool is_free_desc(uint64_t desc) { return (desc & PTE_DESC_TYPE_MASK) == PTE_INVALID_DESC; } static inline bool is_table_desc(uint64_t desc, unsigned int level) { return level != XLAT_LAST_LEVEL && (desc & PTE_DESC_TYPE_MASK) == PTE_TABLE_DESC; } static inline bool is_block_desc(uint64_t desc) { return (desc & PTE_DESC_TYPE_MASK) == PTE_BLOCK_DESC; } static inline uint64_t *pte_desc_table(uint64_t desc) { uint64_t address = desc & PTE_PHYSADDR_MASK; /* tables use a 1:1 physical:virtual mapping */ return (uint64_t *)address; } static inline bool is_desc_block_aligned(uint64_t desc, unsigned int level_size) { bool aligned = (desc & PTE_PHYSADDR_MASK & (level_size - 1)) == 0; if (!aligned) { MMU_DEBUG("misaligned desc 0x%016llx for block size 0x%x\n", desc, level_size); } return aligned; } static inline bool is_desc_superset(uint64_t desc1, uint64_t desc2, unsigned int level) { uint64_t mask = DESC_ATTRS_MASK | GENMASK64(47, LEVEL_TO_VA_SIZE_SHIFT(level)); return (desc1 & mask) == (desc2 & mask); } #if DUMP_PTE static void debug_show_pte(uint64_t *pte, unsigned int level) { MMU_DEBUG("%.*s", level * 2U, ". . . "); MMU_DEBUG("[%d]%p: ", table_index(pte), pte); if (is_free_desc(*pte)) { MMU_DEBUG("---\n"); return; } MMU_DEBUG("0x%016llx ", *pte); if (is_table_desc(*pte, level)) { uint64_t *table = pte_desc_table(*pte); MMU_DEBUG("[Table] [%d]%p\n", table_index(table), table); return; } if (is_block_desc(*pte)) { MMU_DEBUG("[Block] "); } else { MMU_DEBUG("[Page] "); } uint8_t mem_type = (*pte >> 2) & MT_TYPE_MASK; MMU_DEBUG((mem_type == MT_NORMAL) ? "MEM" : ((mem_type == MT_NORMAL_NC) ? "NC" : "DEV")); MMU_DEBUG((*pte & PTE_BLOCK_DESC_AP_RO) ? "-RO" : "-RW"); MMU_DEBUG((*pte & PTE_BLOCK_DESC_NS) ? "-NS" : "-S"); MMU_DEBUG((*pte & PTE_BLOCK_DESC_AP_ELx) ? "-ELx" : "-ELh"); MMU_DEBUG((*pte & PTE_BLOCK_DESC_PXN) ? "-PXN" : "-PX"); MMU_DEBUG((*pte & PTE_BLOCK_DESC_UXN) ? "-UXN" : "-UX"); MMU_DEBUG("\n"); } #else static inline void debug_show_pte(uint64_t *pte, unsigned int level) { } #endif static void set_pte_table_desc(uint64_t *pte, uint64_t *table, unsigned int level) { /* Point pte to new table */ *pte = PTE_TABLE_DESC | (uint64_t)table; debug_show_pte(pte, level); } static void set_pte_block_desc(uint64_t *pte, uint64_t desc, unsigned int level) { if (desc) { desc |= (level == XLAT_LAST_LEVEL) ? PTE_PAGE_DESC : PTE_BLOCK_DESC; } *pte = desc; debug_show_pte(pte, level); } static uint64_t *expand_to_table(uint64_t *pte, unsigned int level) { uint64_t *table; __ASSERT(level < XLAT_LAST_LEVEL, "can't expand last level"); table = new_table(); if (!table) { return NULL; } if (!is_free_desc(*pte)) { /* * If entry at current level was already populated * then we need to reflect that in the new table. */ uint64_t desc = *pte; unsigned int i, stride_shift; MMU_DEBUG("expanding PTE 0x%016llx into table [%d]%p\n", desc, table_index(table), table); __ASSERT(is_block_desc(desc), ""); if (level + 1 == XLAT_LAST_LEVEL) { desc |= PTE_PAGE_DESC; } stride_shift = LEVEL_TO_VA_SIZE_SHIFT(level + 1); for (i = 0U; i < Ln_XLAT_NUM_ENTRIES; i++) { table[i] = desc | (i << stride_shift); } table_usage(table, Ln_XLAT_NUM_ENTRIES); } else { /* * Adjust usage count for parent table's entry * that will no longer be free. */ table_usage(pte, 1); } /* Link the new table in place of the pte it replaces */ set_pte_table_desc(pte, table, level); return table; } static int set_mapping(uint64_t *top_table, uintptr_t virt, size_t size, uint64_t desc, bool may_overwrite) { uint64_t *table = top_table; uint64_t *pte; uint64_t level_size; unsigned int level = BASE_XLAT_LEVEL; while (size) { __ASSERT(level <= XLAT_LAST_LEVEL, "max translation table level exceeded\n"); /* Locate PTE for given virtual address and page table level */ pte = &table[XLAT_TABLE_VA_IDX(virt, level)]; if (is_table_desc(*pte, level)) { /* Move to the next translation table level */ level++; table = pte_desc_table(*pte); continue; } if (!may_overwrite && !is_free_desc(*pte)) { /* the entry is already allocated */ LOG_ERR("entry already in use: " "level %d pte %p *pte 0x%016llx", level, pte, *pte); return -EBUSY; } level_size = 1ULL << LEVEL_TO_VA_SIZE_SHIFT(level); if (is_desc_superset(*pte, desc, level)) { /* This block already covers our range */ level_size -= (virt & (level_size - 1)); if (level_size > size) { level_size = size; } goto move_on; } if ((size < level_size) || (virt & (level_size - 1)) || !is_desc_block_aligned(desc, level_size)) { /* Range doesn't fit, create subtable */ table = expand_to_table(pte, level); if (!table) { return -ENOMEM; } level++; continue; } /* Adjust usage count for corresponding table */ if (is_free_desc(*pte)) { table_usage(pte, 1); } /* Create block/page descriptor */ set_pte_block_desc(pte, desc, level); move_on: virt += level_size; desc += level_size; size -= level_size; /* Range is mapped, start again for next range */ table = top_table; level = BASE_XLAT_LEVEL; } return 0; } static void del_mapping(uint64_t *table, uintptr_t virt, size_t size, unsigned int level) { size_t step, level_size = 1ULL << LEVEL_TO_VA_SIZE_SHIFT(level); uint64_t *pte, *subtable; for ( ; size; virt += step, size -= step) { step = level_size - (virt & (level_size - 1)); if (step > size) { step = size; } pte = &table[XLAT_TABLE_VA_IDX(virt, level)]; if (is_free_desc(*pte)) { continue; } if (is_table_desc(*pte, level)) { subtable = pte_desc_table(*pte); del_mapping(subtable, virt, step, level + 1); if (!is_table_unused(subtable)) { continue; } dec_table_ref(subtable); } else { /* * We assume that block mappings will be unmapped * as a whole and not partially. */ __ASSERT(step == level_size, ""); } /* free this entry */ *pte = 0; table_usage(pte, -1); } } #ifdef CONFIG_USERSPACE static uint64_t *dup_table(uint64_t *src_table, unsigned int level) { uint64_t *dst_table = new_table(); int i, usage_count = 0; if (!dst_table) { return NULL; } MMU_DEBUG("dup (level %d) [%d]%p to [%d]%p\n", level, table_index(src_table), src_table, table_index(dst_table), dst_table); for (i = 0; i < Ln_XLAT_NUM_ENTRIES; i++) { /* * After the table duplication, each table can be independently * updated. Thus, entries may become non-global. * To keep the invariants very simple, we thus force the non-global * bit on duplication. Moreover, there is no process to revert this * (e.g. in `globalize_table`). Could be improved in future work. */ if (!is_free_desc(src_table[i]) && !is_table_desc(src_table[i], level)) { src_table[i] |= PTE_BLOCK_DESC_NG; } dst_table[i] = src_table[i]; if (is_table_desc(dst_table[i], level)) { inc_table_ref(pte_desc_table(dst_table[i])); } if (!is_free_desc(dst_table[i])) { usage_count++; } } table_usage(dst_table, usage_count); return dst_table; } static int privatize_table(uint64_t *dst_table, uint64_t *src_table, uintptr_t virt, size_t size, unsigned int level) { size_t step, level_size = 1ULL << LEVEL_TO_VA_SIZE_SHIFT(level); unsigned int i; int ret; for ( ; size; virt += step, size -= step) { step = level_size - (virt & (level_size - 1)); if (step > size) { step = size; } i = XLAT_TABLE_VA_IDX(virt, level); if (!is_table_desc(dst_table[i], level) || !is_table_desc(src_table[i], level)) { /* this entry is already private */ continue; } uint64_t *dst_subtable = pte_desc_table(dst_table[i]); uint64_t *src_subtable = pte_desc_table(src_table[i]); if (dst_subtable == src_subtable) { /* need to make a private copy of this table */ dst_subtable = dup_table(src_subtable, level + 1); if (!dst_subtable) { return -ENOMEM; } set_pte_table_desc(&dst_table[i], dst_subtable, level); dec_table_ref(src_subtable); } ret = privatize_table(dst_subtable, src_subtable, virt, step, level + 1); if (ret) { return ret; } } return 0; } /* * Make the given virtual address range private in dst_pt with regards to * src_pt. By "private" this means that corresponding page tables in dst_pt * will be duplicated so not to share the same table(s) with src_pt. * If corresponding page tables in dst_pt are already distinct from src_pt * then nothing is done. This allows for subsequent mapping changes in that * range to affect only dst_pt. */ static int privatize_page_range(struct arm_mmu_ptables *dst_pt, struct arm_mmu_ptables *src_pt, uintptr_t virt_start, size_t size, const char *name) { k_spinlock_key_t key; int ret; MMU_DEBUG("privatize [%s]: virt %lx size %lx\n", name, virt_start, size); key = k_spin_lock(&xlat_lock); ret = privatize_table(dst_pt->base_xlat_table, src_pt->base_xlat_table, virt_start, size, BASE_XLAT_LEVEL); k_spin_unlock(&xlat_lock, key); return ret; } static void discard_table(uint64_t *table, unsigned int level) { unsigned int i; int free_count = 0; for (i = 0U; i < Ln_XLAT_NUM_ENTRIES; i++) { if (is_table_desc(table[i], level)) { uint64_t *subtable = pte_desc_table(table[i]); if (is_table_single_referenced(subtable)) { discard_table(subtable, level + 1); } dec_table_ref(subtable); } if (!is_free_desc(table[i])) { table[i] = 0U; free_count++; } } table_usage(table, -free_count); } static int globalize_table(uint64_t *dst_table, uint64_t *src_table, uintptr_t virt, size_t size, unsigned int level) { size_t step, level_size = 1ULL << LEVEL_TO_VA_SIZE_SHIFT(level); unsigned int i; int ret; for ( ; size; virt += step, size -= step) { step = level_size - (virt & (level_size - 1)); if (step > size) { step = size; } i = XLAT_TABLE_VA_IDX(virt, level); if (dst_table[i] == src_table[i]) { /* already identical to global table */ continue; } if (is_free_desc(src_table[i]) && is_table_desc(dst_table[i], level)) { uint64_t *subtable = pte_desc_table(dst_table[i]); del_mapping(subtable, virt, step, level + 1); if (is_table_unused(subtable)) { /* unreference the empty table */ dst_table[i] = 0; table_usage(dst_table, -1); dec_table_ref(subtable); } continue; } if (step != level_size) { /* boundary falls in the middle of this pte */ __ASSERT(is_table_desc(src_table[i], level), "can't have partial block pte here"); if (!is_table_desc(dst_table[i], level)) { /* we need more fine grained boundaries */ if (!expand_to_table(&dst_table[i], level)) { return -ENOMEM; } } ret = globalize_table(pte_desc_table(dst_table[i]), pte_desc_table(src_table[i]), virt, step, level + 1); if (ret) { return ret; } continue; } /* we discard current pte and replace with global one */ uint64_t *old_table = is_table_desc(dst_table[i], level) ? pte_desc_table(dst_table[i]) : NULL; if (is_free_desc(dst_table[i])) { table_usage(dst_table, 1); } if (is_free_desc(src_table[i])) { table_usage(dst_table, -1); } if (is_table_desc(src_table[i], level)) { inc_table_ref(pte_desc_table(src_table[i])); } dst_table[i] = src_table[i]; debug_show_pte(&dst_table[i], level); if (old_table) { /* we can discard the whole branch */ discard_table(old_table, level + 1); dec_table_ref(old_table); } } return 0; } /* * Globalize the given virtual address range in dst_pt from src_pt. We make * it global by sharing as much page table content from src_pt as possible, * including page tables themselves, and corresponding private tables in * dst_pt are then discarded. If page tables in the given range are already * shared then nothing is done. If page table sharing is not possible then * page table entries in dst_pt are synchronized with those from src_pt. */ static int globalize_page_range(struct arm_mmu_ptables *dst_pt, struct arm_mmu_ptables *src_pt, uintptr_t virt_start, size_t size, const char *name) { k_spinlock_key_t key; int ret; MMU_DEBUG("globalize [%s]: virt %lx size %lx\n", name, virt_start, size); key = k_spin_lock(&xlat_lock); ret = globalize_table(dst_pt->base_xlat_table, src_pt->base_xlat_table, virt_start, size, BASE_XLAT_LEVEL); k_spin_unlock(&xlat_lock, key); return ret; } #endif /* CONFIG_USERSPACE */ static uint64_t get_region_desc(uint32_t attrs) { unsigned int mem_type; uint64_t desc = 0U; /* NS bit for security memory access from secure state */ desc |= (attrs & MT_NS) ? PTE_BLOCK_DESC_NS : 0; /* * AP bits for EL0 / ELh Data access permission * * AP[2:1] ELh EL0 * +--------------------+ * 00 RW NA * 01 RW RW * 10 RO NA * 11 RO RO */ /* AP bits for Data access permission */ desc |= (attrs & MT_RW) ? PTE_BLOCK_DESC_AP_RW : PTE_BLOCK_DESC_AP_RO; /* Mirror permissions to EL0 */ desc |= (attrs & MT_RW_AP_ELx) ? PTE_BLOCK_DESC_AP_ELx : PTE_BLOCK_DESC_AP_EL_HIGHER; /* the access flag */ desc |= PTE_BLOCK_DESC_AF; /* memory attribute index field */ mem_type = MT_TYPE(attrs); desc |= PTE_BLOCK_DESC_MEMTYPE(mem_type); switch (mem_type) { case MT_DEVICE_nGnRnE: case MT_DEVICE_nGnRE: case MT_DEVICE_GRE: /* Access to Device memory and non-cacheable memory are coherent * for all observers in the system and are treated as * Outer shareable, so, for these 2 types of memory, * it is not strictly needed to set shareability field */ desc |= PTE_BLOCK_DESC_OUTER_SHARE; /* Map device memory as execute-never */ desc |= PTE_BLOCK_DESC_PXN; desc |= PTE_BLOCK_DESC_UXN; break; case MT_NORMAL_NC: case MT_NORMAL: /* Make Normal RW memory as execute never */ if ((attrs & MT_RW) || (attrs & MT_P_EXECUTE_NEVER)) desc |= PTE_BLOCK_DESC_PXN; if (((attrs & MT_RW) && (attrs & MT_RW_AP_ELx)) || (attrs & MT_U_EXECUTE_NEVER)) desc |= PTE_BLOCK_DESC_UXN; if (mem_type == MT_NORMAL) desc |= PTE_BLOCK_DESC_INNER_SHARE; else desc |= PTE_BLOCK_DESC_OUTER_SHARE; } /* non-Global bit */ if (attrs & MT_NG) { desc |= PTE_BLOCK_DESC_NG; } return desc; } static int __add_map(struct arm_mmu_ptables *ptables, const char *name, uintptr_t phys, uintptr_t virt, size_t size, uint32_t attrs) { uint64_t desc = get_region_desc(attrs); bool may_overwrite = !(attrs & MT_NO_OVERWRITE); MMU_DEBUG("mmap [%s]: virt %lx phys %lx size %lx attr %llx %s overwrite\n", name, virt, phys, size, desc, may_overwrite ? "may" : "no"); __ASSERT(((virt | phys | size) & (CONFIG_MMU_PAGE_SIZE - 1)) == 0, "address/size are not page aligned\n"); desc |= phys; return set_mapping(ptables->base_xlat_table, virt, size, desc, may_overwrite); } static int add_map(struct arm_mmu_ptables *ptables, const char *name, uintptr_t phys, uintptr_t virt, size_t size, uint32_t attrs) { k_spinlock_key_t key; int ret; key = k_spin_lock(&xlat_lock); ret = __add_map(ptables, name, phys, virt, size, attrs); k_spin_unlock(&xlat_lock, key); return ret; } static void remove_map(struct arm_mmu_ptables *ptables, const char *name, uintptr_t virt, size_t size) { k_spinlock_key_t key; MMU_DEBUG("unmmap [%s]: virt %lx size %lx\n", name, virt, size); __ASSERT(((virt | size) & (CONFIG_MMU_PAGE_SIZE - 1)) == 0, "address/size are not page aligned\n"); key = k_spin_lock(&xlat_lock); del_mapping(ptables->base_xlat_table, virt, size, BASE_XLAT_LEVEL); k_spin_unlock(&xlat_lock, key); } static void invalidate_tlb_all(void) { __asm__ volatile ( "dsb ishst; tlbi vmalle1; dsb ish; isb" : : : "memory"); } /* zephyr execution regions with appropriate attributes */ struct arm_mmu_flat_range { char *name; void *start; void *end; uint32_t attrs; }; static const struct arm_mmu_flat_range mmu_zephyr_ranges[] = { /* Mark the zephyr execution regions (data, bss, noinit, etc.) * cacheable, read-write * Note: read-write region is marked execute-never internally */ { .name = "zephyr_data", .start = _image_ram_start, .end = _image_ram_end, .attrs = MT_NORMAL | MT_P_RW_U_NA | MT_DEFAULT_SECURE_STATE }, /* Mark text segment cacheable,read only and executable */ { .name = "zephyr_code", .start = __text_region_start, .end = __text_region_end, .attrs = MT_NORMAL | MT_P_RX_U_RX | MT_DEFAULT_SECURE_STATE }, /* Mark rodata segment cacheable, read only and execute-never */ { .name = "zephyr_rodata", .start = __rodata_region_start, .end = __rodata_region_end, .attrs = MT_NORMAL | MT_P_RO_U_RO | MT_DEFAULT_SECURE_STATE }, #ifdef CONFIG_NOCACHE_MEMORY /* Mark nocache segment noncachable, read-write and execute-never */ { .name = "nocache_data", .start = _nocache_ram_start, .end = _nocache_ram_end, .attrs = MT_NORMAL_NC | MT_P_RW_U_RW | MT_DEFAULT_SECURE_STATE }, #endif }; static inline void add_arm_mmu_flat_range(struct arm_mmu_ptables *ptables, const struct arm_mmu_flat_range *range, uint32_t extra_flags) { uintptr_t address = (uintptr_t)range->start; size_t size = (uintptr_t)range->end - address; if (size) { /* MMU not yet active: must use unlocked version */ __add_map(ptables, range->name, address, address, size, range->attrs | extra_flags); } } static inline void add_arm_mmu_region(struct arm_mmu_ptables *ptables, const struct arm_mmu_region *region, uint32_t extra_flags) { if (region->size || region->attrs) { /* MMU not yet active: must use unlocked version */ __add_map(ptables, region->name, region->base_pa, region->base_va, region->size, region->attrs | extra_flags); } } static inline void inv_dcache_after_map_helper(void *virt, size_t size, uint32_t attrs) { /* * DC IVAC instruction requires write access permission to the VA, * otherwise it can generate a permission fault */ if ((attrs & MT_RW) != MT_RW) { return; } if (MT_TYPE(attrs) == MT_NORMAL || MT_TYPE(attrs) == MT_NORMAL_WT) { sys_cache_data_invd_range(virt, size); } } static void setup_page_tables(struct arm_mmu_ptables *ptables) { unsigned int index; const struct arm_mmu_flat_range *range; const struct arm_mmu_region *region; uintptr_t max_va = 0, max_pa = 0; MMU_DEBUG("xlat tables:\n"); for (index = 0U; index < CONFIG_MAX_XLAT_TABLES; index++) MMU_DEBUG("%d: %p\n", index, xlat_tables + index * Ln_XLAT_NUM_ENTRIES); for (index = 0U; index < mmu_config.num_regions; index++) { region = &mmu_config.mmu_regions[index]; max_va = MAX(max_va, region->base_va + region->size); max_pa = MAX(max_pa, region->base_pa + region->size); } __ASSERT(max_va <= (1ULL << CONFIG_ARM64_VA_BITS), "Maximum VA not supported\n"); __ASSERT(max_pa <= (1ULL << CONFIG_ARM64_PA_BITS), "Maximum PA not supported\n"); /* setup translation table for zephyr execution regions */ for (index = 0U; index < ARRAY_SIZE(mmu_zephyr_ranges); index++) { range = &mmu_zephyr_ranges[index]; add_arm_mmu_flat_range(ptables, range, 0); } /* * Create translation tables for user provided platform regions. * Those must not conflict with our default mapping. */ for (index = 0U; index < mmu_config.num_regions; index++) { region = &mmu_config.mmu_regions[index]; add_arm_mmu_region(ptables, region, MT_NO_OVERWRITE); } invalidate_tlb_all(); for (index = 0U; index < ARRAY_SIZE(mmu_zephyr_ranges); index++) { size_t size; range = &mmu_zephyr_ranges[index]; size = POINTER_TO_UINT(range->end) - POINTER_TO_UINT(range->start); inv_dcache_after_map_helper(range->start, size, range->attrs); } for (index = 0U; index < mmu_config.num_regions; index++) { region = &mmu_config.mmu_regions[index]; inv_dcache_after_map_helper(UINT_TO_POINTER(region->base_va), region->size, region->attrs); } } /* Translation table control register settings */ static uint64_t get_tcr(int el) { uint64_t tcr; uint64_t va_bits = CONFIG_ARM64_VA_BITS; uint64_t tcr_ps_bits; tcr_ps_bits = TCR_PS_BITS; if (el == 1) { tcr = (tcr_ps_bits << TCR_EL1_IPS_SHIFT); /* * TCR_EL1.EPD1: Disable translation table walk for addresses * that are translated using TTBR1_EL1. */ tcr |= TCR_EPD1_DISABLE; } else { tcr = (tcr_ps_bits << TCR_EL3_PS_SHIFT); } tcr |= TCR_T0SZ(va_bits); /* * Translation table walk is cacheable, inner/outer WBWA and * inner shareable. Due to Cortex-A57 erratum #822227 we must * set TG1[1] = 4KB. */ tcr |= TCR_TG1_4K | TCR_TG0_4K | TCR_SHARED_INNER | TCR_ORGN_WBWA | TCR_IRGN_WBWA; return tcr; } static void enable_mmu_el1(struct arm_mmu_ptables *ptables, unsigned int flags) { ARG_UNUSED(flags); uint64_t val; /* Set MAIR, TCR and TBBR registers */ write_mair_el1(MEMORY_ATTRIBUTES); write_tcr_el1(get_tcr(1)); write_ttbr0_el1((uint64_t)ptables->base_xlat_table); /* Ensure these changes are seen before MMU is enabled */ barrier_isync_fence_full(); /* Enable the MMU and data cache */ val = read_sctlr_el1(); write_sctlr_el1(val | SCTLR_M_BIT | SCTLR_C_BIT); /* Ensure the MMU enable takes effect immediately */ barrier_isync_fence_full(); MMU_DEBUG("MMU enabled with dcache\n"); } /* ARM MMU Driver Initial Setup */ static struct arm_mmu_ptables kernel_ptables; #ifdef CONFIG_USERSPACE static sys_slist_t domain_list; #endif /* * @brief MMU default configuration * * This function provides the default configuration mechanism for the Memory * Management Unit (MMU). */ void z_arm64_mm_init(bool is_primary_core) { unsigned int flags = 0U; __ASSERT(CONFIG_MMU_PAGE_SIZE == KB(4), "Only 4K page size is supported\n"); __ASSERT(GET_EL(read_currentel()) == MODE_EL1, "Exception level not EL1, MMU not enabled!\n"); /* Ensure that MMU is already not enabled */ __ASSERT((read_sctlr_el1() & SCTLR_M_BIT) == 0, "MMU is already enabled\n"); /* * Only booting core setup up the page tables. */ if (is_primary_core) { kernel_ptables.base_xlat_table = new_table(); setup_page_tables(&kernel_ptables); } /* currently only EL1 is supported */ enable_mmu_el1(&kernel_ptables, flags); } static void sync_domains(uintptr_t virt, size_t size, const char *name) { #ifdef CONFIG_USERSPACE sys_snode_t *node; struct arch_mem_domain *domain; struct arm_mmu_ptables *domain_ptables; k_spinlock_key_t key; int ret; key = k_spin_lock(&z_mem_domain_lock); SYS_SLIST_FOR_EACH_NODE(&domain_list, node) { domain = CONTAINER_OF(node, struct arch_mem_domain, node); domain_ptables = &domain->ptables; ret = globalize_page_range(domain_ptables, &kernel_ptables, virt, size, name); if (ret) { LOG_ERR("globalize_page_range() returned %d", ret); } } k_spin_unlock(&z_mem_domain_lock, key); #endif } static int __arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags) { struct arm_mmu_ptables *ptables; uint32_t entry_flags = MT_DEFAULT_SECURE_STATE | MT_P_RX_U_NA | MT_NO_OVERWRITE; /* Always map in the kernel page tables */ ptables = &kernel_ptables; /* Translate flags argument into HW-recognized entry flags. */ switch (flags & K_MEM_CACHE_MASK) { /* * K_MEM_CACHE_NONE, K_MEM_ARM_DEVICE_nGnRnE => MT_DEVICE_nGnRnE * (Device memory nGnRnE) * K_MEM_ARM_DEVICE_nGnRE => MT_DEVICE_nGnRE * (Device memory nGnRE) * K_MEM_ARM_DEVICE_GRE => MT_DEVICE_GRE * (Device memory GRE) * K_MEM_ARM_NORMAL_NC => MT_NORMAL_NC * (Normal memory Non-cacheable) * K_MEM_CACHE_WB => MT_NORMAL * (Normal memory Outer WB + Inner WB) * K_MEM_CACHE_WT => MT_NORMAL_WT * (Normal memory Outer WT + Inner WT) */ case K_MEM_CACHE_NONE: /* K_MEM_CACHE_NONE equal to K_MEM_ARM_DEVICE_nGnRnE */ /* case K_MEM_ARM_DEVICE_nGnRnE: */ entry_flags |= MT_DEVICE_nGnRnE; break; case K_MEM_ARM_DEVICE_nGnRE: entry_flags |= MT_DEVICE_nGnRE; break; case K_MEM_ARM_DEVICE_GRE: entry_flags |= MT_DEVICE_GRE; break; case K_MEM_ARM_NORMAL_NC: entry_flags |= MT_NORMAL_NC; break; case K_MEM_CACHE_WT: entry_flags |= MT_NORMAL_WT; break; case K_MEM_CACHE_WB: entry_flags |= MT_NORMAL; break; default: return -ENOTSUP; } if ((flags & K_MEM_PERM_RW) != 0U) { entry_flags |= MT_RW; } if ((flags & K_MEM_PERM_EXEC) == 0U) { entry_flags |= MT_P_EXECUTE_NEVER; } if ((flags & K_MEM_PERM_USER) != 0U) { entry_flags |= MT_RW_AP_ELx; } return add_map(ptables, "generic", phys, (uintptr_t)virt, size, entry_flags); } void arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags) { int ret = __arch_mem_map(virt, phys, size, flags); if (ret) { LOG_ERR("__arch_mem_map() returned %d", ret); k_panic(); } else { uint32_t mem_flags = flags & K_MEM_CACHE_MASK; sync_domains((uintptr_t)virt, size, "mem_map"); invalidate_tlb_all(); switch (mem_flags) { case K_MEM_CACHE_WB: case K_MEM_CACHE_WT: mem_flags = (mem_flags == K_MEM_CACHE_WB) ? MT_NORMAL : MT_NORMAL_WT; mem_flags |= (flags & K_MEM_PERM_RW) ? MT_RW : 0; inv_dcache_after_map_helper(virt, size, mem_flags); default: break; } } } void arch_mem_unmap(void *addr, size_t size) { remove_map(&kernel_ptables, "generic", (uintptr_t)addr, size); sync_domains((uintptr_t)addr, size, "mem_unmap"); invalidate_tlb_all(); } int arch_page_phys_get(void *virt, uintptr_t *phys) { uint64_t par; int key; key = arch_irq_lock(); __asm__ volatile ("at S1E1R, %0" : : "r" (virt)); barrier_isync_fence_full(); par = read_par_el1(); arch_irq_unlock(key); if (par & BIT(0)) { return -EFAULT; } if (phys) { *phys = par & GENMASK64(47, 12); } return 0; } size_t arch_virt_region_align(uintptr_t phys, size_t size) { size_t alignment = CONFIG_MMU_PAGE_SIZE; size_t level_size; int level; for (level = XLAT_LAST_LEVEL; level >= BASE_XLAT_LEVEL; level--) { level_size = 1 << LEVEL_TO_VA_SIZE_SHIFT(level); if (size < level_size) { break; } if ((phys & (level_size - 1))) { break; } alignment = level_size; } return alignment; } #ifdef CONFIG_USERSPACE static uint16_t next_asid = 1; static uint16_t get_asid(uint64_t ttbr0) { return ttbr0 >> TTBR_ASID_SHIFT; } static void z_arm64_swap_ptables(struct k_thread *incoming); int arch_mem_domain_max_partitions_get(void) { return CONFIG_MAX_DOMAIN_PARTITIONS; } int arch_mem_domain_init(struct k_mem_domain *domain) { struct arm_mmu_ptables *domain_ptables = &domain->arch.ptables; k_spinlock_key_t key; uint16_t asid; MMU_DEBUG("%s\n", __func__); key = k_spin_lock(&xlat_lock); /* * Pick a new ASID. We use round-robin * Note: `next_asid` is an uint16_t and `VM_ASID_BITS` could * be up to 16, hence `next_asid` might overflow to 0 below. */ asid = next_asid++; if ((next_asid >= (1UL << VM_ASID_BITS)) || (next_asid == 0)) { next_asid = 1; } domain_ptables->base_xlat_table = dup_table(kernel_ptables.base_xlat_table, BASE_XLAT_LEVEL); k_spin_unlock(&xlat_lock, key); if (!domain_ptables->base_xlat_table) { return -ENOMEM; } domain_ptables->ttbr0 = (((uint64_t)asid) << TTBR_ASID_SHIFT) | ((uint64_t)(uintptr_t)domain_ptables->base_xlat_table); sys_slist_append(&domain_list, &domain->arch.node); return 0; } static int private_map(struct arm_mmu_ptables *ptables, const char *name, uintptr_t phys, uintptr_t virt, size_t size, uint32_t attrs) { int ret; ret = privatize_page_range(ptables, &kernel_ptables, virt, size, name); __ASSERT(ret == 0, "privatize_page_range() returned %d", ret); ret = add_map(ptables, name, phys, virt, size, attrs | MT_NG); __ASSERT(ret == 0, "add_map() returned %d", ret); invalidate_tlb_all(); inv_dcache_after_map_helper(UINT_TO_POINTER(virt), size, attrs); return ret; } static int reset_map(struct arm_mmu_ptables *ptables, const char *name, uintptr_t addr, size_t size) { int ret; ret = globalize_page_range(ptables, &kernel_ptables, addr, size, name); __ASSERT(ret == 0, "globalize_page_range() returned %d", ret); invalidate_tlb_all(); return ret; } int arch_mem_domain_partition_add(struct k_mem_domain *domain, uint32_t partition_id) { struct arm_mmu_ptables *domain_ptables = &domain->arch.ptables; struct k_mem_partition *ptn = &domain->partitions[partition_id]; return private_map(domain_ptables, "partition", ptn->start, ptn->start, ptn->size, ptn->attr.attrs | MT_NORMAL); } int arch_mem_domain_partition_remove(struct k_mem_domain *domain, uint32_t partition_id) { struct arm_mmu_ptables *domain_ptables = &domain->arch.ptables; struct k_mem_partition *ptn = &domain->partitions[partition_id]; return reset_map(domain_ptables, "partition removal", ptn->start, ptn->size); } static int map_thread_stack(struct k_thread *thread, struct arm_mmu_ptables *ptables) { return private_map(ptables, "thread_stack", thread->stack_info.start, thread->stack_info.start, thread->stack_info.size, MT_P_RW_U_RW | MT_NORMAL); } int arch_mem_domain_thread_add(struct k_thread *thread) { struct arm_mmu_ptables *old_ptables, *domain_ptables; struct k_mem_domain *domain; bool is_user, is_migration; int ret = 0; domain = thread->mem_domain_info.mem_domain; domain_ptables = &domain->arch.ptables; old_ptables = thread->arch.ptables; is_user = (thread->base.user_options & K_USER) != 0; is_migration = (old_ptables != NULL) && is_user; if (is_migration) { ret = map_thread_stack(thread, domain_ptables); } thread->arch.ptables = domain_ptables; if (thread == _current) { z_arm64_swap_ptables(thread); } else { #ifdef CONFIG_SMP /* the thread could be running on another CPU right now */ z_arm64_mem_cfg_ipi(); #endif } if (is_migration) { ret = reset_map(old_ptables, __func__, thread->stack_info.start, thread->stack_info.size); } return ret; } int arch_mem_domain_thread_remove(struct k_thread *thread) { struct arm_mmu_ptables *domain_ptables; struct k_mem_domain *domain; domain = thread->mem_domain_info.mem_domain; domain_ptables = &domain->arch.ptables; if ((thread->base.user_options & K_USER) == 0) { return 0; } if ((thread->base.thread_state & _THREAD_DEAD) == 0) { return 0; } return reset_map(domain_ptables, __func__, thread->stack_info.start, thread->stack_info.size); } static void z_arm64_swap_ptables(struct k_thread *incoming) { struct arm_mmu_ptables *ptables = incoming->arch.ptables; uint64_t curr_ttbr0 = read_ttbr0_el1(); uint64_t new_ttbr0 = ptables->ttbr0; if (curr_ttbr0 == new_ttbr0) { return; /* Already the right tables */ } MMU_DEBUG("TTBR0 switch from %#llx to %#llx\n", curr_ttbr0, new_ttbr0); z_arm64_set_ttbr0(new_ttbr0); if (get_asid(curr_ttbr0) == get_asid(new_ttbr0)) { invalidate_tlb_all(); } } void z_arm64_thread_mem_domains_init(struct k_thread *incoming) { struct arm_mmu_ptables *ptables; if ((incoming->base.user_options & K_USER) == 0) return; ptables = incoming->arch.ptables; /* Map the thread stack */ map_thread_stack(incoming, ptables); z_arm64_swap_ptables(incoming); } void z_arm64_swap_mem_domains(struct k_thread *incoming) { z_arm64_swap_ptables(incoming); } #endif /* CONFIG_USERSPACE */ ```
/content/code_sandbox/arch/arm64/core/mmu.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
10,139
```unknown # ARM64 core configuration options config CPU_CORTEX_A bool select CPU_CORTEX select HAS_FLASH_LOAD_OFFSET select SCHED_IPI_SUPPORTED if SMP select CPU_HAS_FPU select ARCH_HAS_SINGLE_THREAD_SUPPORT select CPU_HAS_DCACHE select CPU_HAS_ICACHE imply FPU imply FPU_SHARING help This option signifies the use of a CPU of the Cortex-A family. config CPU_AARCH64_CORTEX_R bool select CPU_CORTEX select HAS_FLASH_LOAD_OFFSET select CPU_HAS_DCACHE select CPU_HAS_ICACHE select ARCH_HAS_STACK_PROTECTION select CPU_HAS_FPU imply FPU imply FPU_SHARING help This option signifies the use of a CPU of the Cortex-R 64-bit family. config CPU_CORTEX_A53 bool select CPU_CORTEX_A select ARMV8_A help This option signifies the use of a Cortex-A53 CPU config CPU_CORTEX_A55 bool select CPU_CORTEX_A select ARMV8_A help This option signifies the use of a Cortex-A55 CPU config CPU_CORTEX_A57 bool select CPU_CORTEX_A select ARMV8_A help This option signifies the use of a Cortex-A57 CPU config CPU_CORTEX_A72 bool select CPU_CORTEX_A select ARMV8_A help This option signifies the use of a Cortex-A72 CPU config CPU_CORTEX_A76 bool select CPU_CORTEX_A select ARMV8_A help This option signifies the use of a Cortex-A76 CPU config CPU_CORTEX_A76_A55 bool select CPU_CORTEX_A select ARMV8_A help This option signifies the use of a Cortex-A76 and A55 big little CPU cluster config CPU_CORTEX_R82 bool select CPU_AARCH64_CORTEX_R select ARMV8_R help This option signifies the use of a Cortex-R82 CPU config HAS_ARM_SMCCC bool help Include support for the Secure Monitor Call (SMC) and Hypervisor Call (HVC) instructions on Armv7 and above architectures. config NUM_IRQS int config MAIN_STACK_SIZE default 4096 config IDLE_STACK_SIZE default 4096 config ISR_STACK_SIZE default 4096 config TEST_EXTRA_STACK_SIZE default 2048 config SYSTEM_WORKQUEUE_STACK_SIZE default 4096 config CMSIS_THREAD_MAX_STACK_SIZE default 4096 config CMSIS_V2_THREAD_MAX_STACK_SIZE default 4096 config CMSIS_V2_THREAD_DYNAMIC_STACK_SIZE default 4096 config IPM_CONSOLE_STACK_SIZE default 2048 config AARCH64_IMAGE_HEADER bool "Add image header" default y if ARM_MMU || ARM_MPU help This option enables standard ARM64 boot image header used by Linux and understood by loaders such as u-boot on Xen xl tool. config PRIVILEGED_STACK_SIZE default 4096 config KOBJECT_TEXT_AREA default 512 if TEST config WAIT_AT_RESET_VECTOR bool "Wait at reset vector" default n help Spin at reset vector waiting for debugger to attach and resume execution config ARM64_SAFE_EXCEPTION_STACK bool "To enable the safe exception stack" help The safe exception stack is used for checking whether the kernel stack overflows during the exception happens from EL1. This stack is not used for user stack overflow checking, because kernel stack support the checking work. config ARM64_ENABLE_FRAME_POINTER bool depends on OVERRIDE_FRAME_POINTER_DEFAULT && !OMIT_FRAME_POINTER depends on !FRAME_POINTER select DEPRECATED help Deprecated. Use CONFIG_FRAME_POINTER instead. Hidden option to simplify access to OVERRIDE_FRAME_POINTER_DEFAULT and OMIT_FRAME_POINTER. It is automatically enabled when the frame pointer unwinding is enabled. config ARM64_EXCEPTION_STACK_TRACE bool default y depends on FRAME_POINTER help Internal config to enable runtime stack traces on fatal exceptions. config ARM64_SAFE_EXCEPTION_STACK_SIZE int "The stack size of the safe exception stack" default 4096 depends on ARM64_SAFE_EXCEPTION_STACK help The stack size of the safe exception stack. The safe exception stack requires to be enough to do the stack overflow check. config ARM64_FALLBACK_ON_RESERVED_CORES bool "To enable fallback on reserved cores" help Give the ability to define more cores in the device tree than required via CONFIG_MP_MAX_NUM_CPUS. The extra cores in the device tree become reserved. If there is an issue powering on a core during boot then that core will be skipped and the next core in the device tree will be used. config ARM64_STACK_PROTECTION bool default y if HW_STACK_PROTECTION depends on ARM_MPU select THREAD_STACK_INFO select ARM64_SAFE_EXCEPTION_STACK help This option leverages the MMU or MPU to cause a system fatal error if the bounds of the current process stack are overflowed. This is done by preceding all stack areas with a fixed guard region. if CPU_CORTEX_A config ARMV8_A_NS bool "ARMv8-A Normal World (Non-Secure world of Trustzone)" help This option signifies that Zephyr is entered in TrustZone Non-Secure state config ARMV8_A bool select ATOMIC_OPERATIONS_BUILTIN select CPU_HAS_MMU select ARCH_HAS_USERSPACE if ARM_MMU select ARCH_HAS_NOCACHE_MEMORY_SUPPORT if ARM_MMU help This option signifies the use of an ARMv8-A processor implementation. From path_to_url The Armv8-A architecture introduces the ability to use 64-bit and 32-bit Execution states, known as AArch64 and AArch32 respectively. The AArch64 Execution state supports the A64 instruction set, holds addresses in 64-bit registers and allows instructions in the base instruction set to use 64-bit registers for their processing. The AArch32 Execution state is a 32-bit Execution state that preserves backwards compatibility with the Armv7-A architecture and enhances that profile so that it can support some features included in the AArch64 state. It supports the T32 and A32 instruction sets. rsource "xen/Kconfig" endif # CPU_CORTEX_A if CPU_AARCH64_CORTEX_R config ARMV8_R bool select ATOMIC_OPERATIONS_BUILTIN select SCHED_IPI_SUPPORTED if SMP select ARCH_HAS_USERSPACE if ARM_MPU help This option signifies the use of an ARMv8-R processor implementation. From path_to_url The Armv8-R architecture targets at the Real-time profile. It introduces virtualization at the highest security level while retaining the Protected Memory System Architecture (PMSA) based on a Memory Protection Unit (MPU). It supports the A32 and T32 instruction sets. rsource "cortex_r/Kconfig" endif # CPU_AARCH64_CORTEX_R if CPU_CORTEX_A || CPU_AARCH64_CORTEX_R config GEN_ISR_TABLES default y config GEN_IRQ_VECTOR_TABLE default n config ARM_MMU bool "ARM MMU Support" default n if CPU_AARCH64_CORTEX_R default y select MMU select SRAM_REGION_PERMISSIONS select ARCH_MEM_DOMAIN_SYNCHRONOUS_API if USERSPACE select ARCH_MEM_DOMAIN_DATA if USERSPACE help Memory Management Unit support. config XIP select AARCH64_IMAGE_HEADER config ARM64_SET_VMPIDR_EL2 bool "Set VMPIDR_EL2 at EL2 stage" help VMPIDR_EL2 holds the value of the Virtualization Multiprocessor ID. This is the value returned by EL1 reads of MPIDR_EL1. This register may already be set by bootloader at the EL2 stage, if not, Zephyr should set it. if ARM_MMU config MMU_PAGE_SIZE default 0x1000 choice ARM64_VA_BITS prompt "Virtual address space size" default ARM64_VA_BITS_32 help Allows choosing one of multiple possible virtual address space sizes. The level of translation table is determined by a combination of page size and virtual address space size. config ARM64_VA_BITS_32 bool "32-bit" config ARM64_VA_BITS_36 bool "36-bit" config ARM64_VA_BITS_40 bool "40-bit" config ARM64_VA_BITS_42 bool "42-bit" config ARM64_VA_BITS_48 bool "48-bit" endchoice config ARM64_VA_BITS int default 32 if ARM64_VA_BITS_32 default 36 if ARM64_VA_BITS_36 default 40 if ARM64_VA_BITS_40 default 42 if ARM64_VA_BITS_42 default 48 if ARM64_VA_BITS_48 choice ARM64_PA_BITS prompt "Physical address space size" default ARM64_PA_BITS_32 help Choose the maximum physical address range that the kernel will support. config ARM64_PA_BITS_32 bool "32-bit" config ARM64_PA_BITS_36 bool "36-bit" config ARM64_PA_BITS_40 bool "40-bit" config ARM64_PA_BITS_42 bool "42-bit" config ARM64_PA_BITS_48 bool "48-bit" endchoice config ARM64_PA_BITS int default 32 if ARM64_PA_BITS_32 default 36 if ARM64_PA_BITS_36 default 40 if ARM64_PA_BITS_40 default 42 if ARM64_PA_BITS_42 default 48 if ARM64_PA_BITS_48 config MAX_XLAT_TABLES int "Maximum numbers of translation tables" default 20 if USERSPACE && (ARM64_VA_BITS >= 40) default 16 if USERSPACE default 12 if (ARM64_VA_BITS >= 40) default 8 help This option specifies the maximum numbers of translation tables. Based on this, translation tables are allocated at compile time and used at runtime as needed. If the runtime need exceeds preallocated numbers of translation tables, it will result in assert. Number of translation tables required is decided based on how many discrete memory regions (both normal and device memory) are present on given platform and how much granularity is required while assigning attributes to these memory regions. endif # ARM_MMU endif # CPU_CORTEX_A || CPU_AARCH64_CORTEX_R ```
/content/code_sandbox/arch/arm64/core/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,313
```c /* * Organisation (CSIRO) ABN 41 687 119 230. * */ #include <zephyr/arch/common/semihost.h> long semihost_exec(enum semihost_instr instr, void *args) { register unsigned long w0 __asm__ ("w0") = instr; register void *x1 __asm__ ("x1") = args; register long ret __asm__ ("x0"); __asm__ volatile ("hlt 0xf000" : "=r" (ret) : "r" (w0), "r" (x1) : "memory"); return ret; } ```
/content/code_sandbox/arch/arm64/core/semihost.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
132
```c /* * * */ /** * @file * @brief codes required for AArch64 multicore and Zephyr smp support */ #include <zephyr/cache.h> #include <zephyr/device.h> #include <zephyr/devicetree.h> #include <zephyr/kernel.h> #include <zephyr/kernel_structs.h> #include <ksched.h> #include <ipi.h> #include <zephyr/init.h> #include <zephyr/arch/arm64/mm.h> #include <zephyr/arch/cpu.h> #include <zephyr/drivers/interrupt_controller/gic.h> #include <zephyr/drivers/pm_cpu_ops.h> #include <zephyr/arch/arch_interface.h> #include <zephyr/sys/barrier.h> #include <zephyr/irq.h> #include "boot.h" #define INV_MPID UINT64_MAX #define SGI_SCHED_IPI 0 #define SGI_MMCFG_IPI 1 #define SGI_FPU_IPI 2 struct boot_params { uint64_t mpid; char *sp; uint8_t voting[CONFIG_MP_MAX_NUM_CPUS]; arch_cpustart_t fn; void *arg; int cpu_num; }; /* Offsets used in reset.S */ BUILD_ASSERT(offsetof(struct boot_params, mpid) == BOOT_PARAM_MPID_OFFSET); BUILD_ASSERT(offsetof(struct boot_params, sp) == BOOT_PARAM_SP_OFFSET); BUILD_ASSERT(offsetof(struct boot_params, voting) == BOOT_PARAM_VOTING_OFFSET); volatile struct boot_params __aligned(L1_CACHE_BYTES) arm64_cpu_boot_params = { .mpid = -1, }; const uint64_t cpu_node_list[] = { DT_FOREACH_CHILD_STATUS_OKAY_SEP(DT_PATH(cpus), DT_REG_ADDR, (,)) }; /* cpu_map saves the maping of core id and mpid */ static uint64_t cpu_map[CONFIG_MP_MAX_NUM_CPUS] = { [0 ... (CONFIG_MP_MAX_NUM_CPUS - 1)] = INV_MPID }; extern void z_arm64_mm_init(bool is_primary_core); /* Called from Zephyr initialization */ void arch_cpu_start(int cpu_num, k_thread_stack_t *stack, int sz, arch_cpustart_t fn, void *arg) { int cpu_count; static int i; uint64_t cpu_mpid = 0; uint64_t master_core_mpid; /* Now it is on master core */ __ASSERT(arch_curr_cpu()->id == 0, ""); master_core_mpid = MPIDR_TO_CORE(GET_MPIDR()); cpu_count = ARRAY_SIZE(cpu_node_list); #ifdef CONFIG_ARM64_FALLBACK_ON_RESERVED_CORES __ASSERT(cpu_count >= CONFIG_MP_MAX_NUM_CPUS, "The count of CPU Core nodes in dts is not greater or equal to CONFIG_MP_MAX_NUM_CPUS\n"); #else __ASSERT(cpu_count == CONFIG_MP_MAX_NUM_CPUS, "The count of CPU Cores nodes in dts is not equal to CONFIG_MP_MAX_NUM_CPUS\n"); #endif arm64_cpu_boot_params.sp = K_KERNEL_STACK_BUFFER(stack) + sz; arm64_cpu_boot_params.fn = fn; arm64_cpu_boot_params.arg = arg; arm64_cpu_boot_params.cpu_num = cpu_num; for (; i < cpu_count; i++) { if (cpu_node_list[i] == master_core_mpid) { continue; } cpu_mpid = cpu_node_list[i]; barrier_dsync_fence_full(); /* store mpid last as this is our synchronization point */ arm64_cpu_boot_params.mpid = cpu_mpid; sys_cache_data_flush_range((void *)&arm64_cpu_boot_params, sizeof(arm64_cpu_boot_params)); if (pm_cpu_on(cpu_mpid, (uint64_t)&__start)) { printk("Failed to boot secondary CPU core %d (MPID:%#llx)\n", cpu_num, cpu_mpid); #ifdef CONFIG_ARM64_FALLBACK_ON_RESERVED_CORES printk("Falling back on reserved cores\n"); continue; #else k_panic(); #endif } break; } if (i++ == cpu_count) { printk("Can't find CPU Core %d from dts and failed to boot it\n", cpu_num); k_panic(); } /* Wait secondary cores up, see arch_secondary_cpu_init */ while (arm64_cpu_boot_params.fn) { wfe(); } cpu_map[cpu_num] = cpu_mpid; printk("Secondary CPU core %d (MPID:%#llx) is up\n", cpu_num, cpu_mpid); } /* the C entry of secondary cores */ void arch_secondary_cpu_init(int cpu_num) { cpu_num = arm64_cpu_boot_params.cpu_num; arch_cpustart_t fn; void *arg; __ASSERT(arm64_cpu_boot_params.mpid == MPIDR_TO_CORE(GET_MPIDR()), ""); /* Initialize tpidrro_el0 with our struct _cpu instance address */ write_tpidrro_el0((uintptr_t)&_kernel.cpus[cpu_num]); z_arm64_mm_init(false); #ifdef CONFIG_ARM64_SAFE_EXCEPTION_STACK z_arm64_safe_exception_stack_init(); #endif #ifdef CONFIG_SMP arm_gic_secondary_init(); irq_enable(SGI_SCHED_IPI); #ifdef CONFIG_USERSPACE irq_enable(SGI_MMCFG_IPI); #endif #ifdef CONFIG_FPU_SHARING irq_enable(SGI_FPU_IPI); #endif #endif fn = arm64_cpu_boot_params.fn; arg = arm64_cpu_boot_params.arg; barrier_dsync_fence_full(); /* * Secondary core clears .fn to announce its presence. * Primary core is polling for this. We no longer own * arm64_cpu_boot_params afterwards. */ arm64_cpu_boot_params.fn = NULL; barrier_dsync_fence_full(); sev(); fn(arg); } #ifdef CONFIG_SMP static void send_ipi(unsigned int ipi, uint32_t cpu_bitmap) { uint64_t mpidr = MPIDR_TO_CORE(GET_MPIDR()); /* * Send SGI to all cores except itself */ unsigned int num_cpus = arch_num_cpus(); for (int i = 0; i < num_cpus; i++) { if ((cpu_bitmap & BIT(i)) == 0) { continue; } uint64_t target_mpidr = cpu_map[i]; uint8_t aff0; if (mpidr == target_mpidr || target_mpidr == INV_MPID) { continue; } aff0 = MPIDR_AFFLVL(target_mpidr, 0); gic_raise_sgi(ipi, target_mpidr, 1 << aff0); } } void sched_ipi_handler(const void *unused) { ARG_UNUSED(unused); z_sched_ipi(); } void arch_sched_broadcast_ipi(void) { send_ipi(SGI_SCHED_IPI, IPI_ALL_CPUS_MASK); } void arch_sched_directed_ipi(uint32_t cpu_bitmap) { send_ipi(SGI_SCHED_IPI, cpu_bitmap); } #ifdef CONFIG_USERSPACE void mem_cfg_ipi_handler(const void *unused) { ARG_UNUSED(unused); unsigned int key = arch_irq_lock(); /* * Make sure a domain switch by another CPU is effective on this CPU. * This is a no-op if the page table is already the right one. * Lock irq to prevent the interrupt during mem region switch. */ z_arm64_swap_mem_domains(_current); arch_irq_unlock(key); } void z_arm64_mem_cfg_ipi(void) { send_ipi(SGI_MMCFG_IPI, IPI_ALL_CPUS_MASK); } #endif #ifdef CONFIG_FPU_SHARING void flush_fpu_ipi_handler(const void *unused) { ARG_UNUSED(unused); disable_irq(); arch_flush_local_fpu(); /* no need to re-enable IRQs here */ } void arch_flush_fpu_ipi(unsigned int cpu) { const uint64_t mpidr = cpu_map[cpu]; uint8_t aff0; if (mpidr == INV_MPID) { return; } aff0 = MPIDR_AFFLVL(mpidr, 0); gic_raise_sgi(SGI_FPU_IPI, mpidr, 1 << aff0); } /* * Make sure there is no pending FPU flush request for this CPU while * waiting for a contended spinlock to become available. This prevents * a deadlock when the lock we need is already taken by another CPU * that also wants its FPU content to be reinstated while such content * is still live in this CPU's FPU. */ void arch_spin_relax(void) { if (arm_gic_irq_is_pending(SGI_FPU_IPI)) { arm_gic_irq_clear_pending(SGI_FPU_IPI); /* * We may not be in IRQ context here hence cannot use * arch_flush_local_fpu() directly. */ arch_float_disable(_current_cpu->arch.fpu_owner); } } #endif int arch_smp_init(void) { cpu_map[0] = MPIDR_TO_CORE(GET_MPIDR()); /* * SGI0 is use for sched ipi, this might be changed to use Kconfig * option */ IRQ_CONNECT(SGI_SCHED_IPI, IRQ_DEFAULT_PRIORITY, sched_ipi_handler, NULL, 0); irq_enable(SGI_SCHED_IPI); #ifdef CONFIG_USERSPACE IRQ_CONNECT(SGI_MMCFG_IPI, IRQ_DEFAULT_PRIORITY, mem_cfg_ipi_handler, NULL, 0); irq_enable(SGI_MMCFG_IPI); #endif #ifdef CONFIG_FPU_SHARING IRQ_CONNECT(SGI_FPU_IPI, IRQ_DEFAULT_PRIORITY, flush_fpu_ipi_handler, NULL, 0); irq_enable(SGI_FPU_IPI); #endif return 0; } #endif ```
/content/code_sandbox/arch/arm64/core/smp.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,124
```unknown /* * */ #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #include <zephyr/arch/cpu.h> #include <zephyr/offsets.h> #include "boot.h" #include "macro_priv.inc" _ASM_FILE_PROLOGUE /* * Platform specific pre-C init code * * Note: - Stack is not yet available * - x23, x24 and x25 must be preserved */ WTEXT(z_arm64_el3_plat_prep_c) SECTION_FUNC(TEXT,z_arm64_el3_plat_prep_c) ret WTEXT(z_arm64_el2_plat_prep_c) SECTION_FUNC(TEXT,z_arm64_el2_plat_prep_c) ret WTEXT(z_arm64_el1_plat_prep_c) SECTION_FUNC(TEXT,z_arm64_el1_plat_prep_c) ret /* * Set the minimum necessary to safely call C code */ GTEXT(__reset_prep_c) SECTION_SUBSEC_FUNC(TEXT,_reset_section,__reset_prep_c) /* return address: x23 */ mov x23, lr switch_el x0, 3f, 2f, 1f 3: #if !defined(CONFIG_ARMV8_R) /* Reinitialize SCTLR from scratch in EL3 */ ldr w0, =(SCTLR_EL3_RES1 | SCTLR_I_BIT | SCTLR_SA_BIT) msr sctlr_el3, x0 isb /* Custom plat prep_c init */ bl z_arm64_el3_plat_prep_c /* Set SP_EL1 */ msr sp_el1, x24 b out #endif /* CONFIG_ARMV8_R */ 2: /* Disable alignment fault checking */ mrs x0, sctlr_el2 bic x0, x0, SCTLR_A_BIT msr sctlr_el2, x0 isb /* Custom plat prep_c init */ bl z_arm64_el2_plat_prep_c /* Set SP_EL1 */ msr sp_el1, x24 b out 1: /* Disable alignment fault checking */ mrs x0, sctlr_el1 bic x0, x0, SCTLR_A_BIT msr sctlr_el1, x0 isb /* Custom plat prep_c init */ bl z_arm64_el1_plat_prep_c /* Set SP_EL1. We cannot use sp_el1 at EL1 */ msr SPSel, #1 mov sp, x24 out: isb /* Select SP_EL0 */ msr SPSel, #0 /* Initialize stack */ mov sp, x24 /* fp = NULL */ mov fp, xzr ret x23 /* * Reset vector * * Ran when the system comes out of reset. The processor is in thread mode with * privileged level. At this point, neither SP_EL0 nor SP_ELx point to a valid * area in SRAM. */ GTEXT(__reset) SECTION_SUBSEC_FUNC(TEXT,_reset_section,__reset) GTEXT(__start) SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start) #ifdef CONFIG_WAIT_AT_RESET_VECTOR resetwait: wfe b resetwait #endif /* Mask all exceptions */ msr DAIFSet, #0xf #if CONFIG_MP_MAX_NUM_CPUS > 1 /* * Deal with multi core booting simultaneously to race for being the primary core. * Use voting lock[1] with reasonable but minimal requirements on the memory system * to make sure only one core wins at last. * * [1] kernel.org/doc/html/next/arch/arm/vlocks.html */ ldr x0, =arm64_cpu_boot_params /* * Get the "logic" id defined by cpu_node_list statically for voting lock self-identify. * It is worth noting that this is NOT the final logic id (arch_curr_cpu()->id) */ get_cpu_logic_id x1, x2, x3, x4 //x1: MPID, x2: logic id add x4, x0, #BOOT_PARAM_VOTING_OFFSET /* signal our desire to vote */ mov w5, #1 strb w5, [x4, x2] ldr x3, [x0, #BOOT_PARAM_MPID_OFFSET] cmn x3, #1 beq 1f /* some core already won, release */ strb wzr, [x4, x2] b secondary_core /* suggest current core then release */ 1: str x1, [x0, #BOOT_PARAM_MPID_OFFSET] strb wzr, [x4, x2] dmb ish /* then wait until every core else is done voting */ mov x5, #0 2: ldrb w3, [x4, x5] tst w3, #255 /* wait */ bne 2b add x5, x5, #1 cmp x5, #CONFIG_MP_MAX_NUM_CPUS bne 2b /* check if current core won */ dmb ish ldr x3, [x0, #BOOT_PARAM_MPID_OFFSET] cmp x3, x1 beq primary_core /* fallthrough secondary */ /* loop until our turn comes */ secondary_core: dmb ish ldr x2, [x0, #BOOT_PARAM_MPID_OFFSET] cmp x1, x2 bne secondary_core /* we can now load our stack pointer value and move on */ ldr x24, [x0, #BOOT_PARAM_SP_OFFSET] ldr x25, =z_arm64_secondary_prep_c b boot primary_core: #endif /* load primary stack and entry point */ ldr x24, =(z_interrupt_stacks + __z_interrupt_stack_SIZEOF) ldr x25, =z_prep_c boot: /* Prepare for calling C code */ bl __reset_prep_c /* * Initialize the interrupt stack with 0xaa so stack utilization * can be measured. This needs to be done before using the stack * so that we don't clobber any data. */ #ifdef CONFIG_INIT_STACKS mov_imm x0, CONFIG_ISR_STACK_SIZE sub x0, sp, x0 sub x9, sp, #8 mov x10, 0xaaaaaaaaaaaaaaaa stack_init_loop: cmp x0, x9 beq stack_init_done str x10, [x0], #8 b stack_init_loop stack_init_done: #endif /* Platform hook for highest EL */ bl z_arm64_el_highest_init switch_el: switch_el x0, 3f, 2f, 1f 3: #if !defined(CONFIG_ARMV8_R) /* EL3 init */ bl z_arm64_el3_init /* Get next EL */ adr x0, switch_el bl z_arm64_el3_get_next_el eret #endif /* CONFIG_ARMV8_R */ 2: /* EL2 init */ bl z_arm64_el2_init /* Move to EL1 with all exceptions masked */ mov_imm x0, (SPSR_DAIF_MASK | SPSR_MODE_EL1T) msr spsr_el2, x0 adr x0, 1f msr elr_el2, x0 eret 1: /* EL1 init */ bl z_arm64_el1_init /* We want to use SP_ELx from now on */ msr SPSel, #1 /* Enable SError interrupts */ msr DAIFClr, #(DAIFCLR_ABT_BIT) isb ret x25 /* either z_prep_c or z_arm64_secondary_prep_c */ ```
/content/code_sandbox/arch/arm64/core/reset.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,689
```c /* * */ /** * @file * @brief ARM64 Cortex-A interrupt management */ #include <zephyr/kernel.h> #include <zephyr/arch/cpu.h> #include <zephyr/tracing/tracing.h> #include <zephyr/irq.h> #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #include <zephyr/sw_isr_table.h> #include <zephyr/drivers/interrupt_controller/gic.h> void z_arm64_fatal_error(unsigned int reason, struct arch_esf *esf); #if !defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER) /* * The default interrupt controller for AArch64 is the ARM Generic Interrupt * Controller (GIC) and therefore the architecture interrupt control functions * are mapped to the GIC driver interface. * * When a custom interrupt controller is used (i.e. * CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER is enabled), the architecture * interrupt control functions are mapped to the SoC layer in * `include/arch/arm64/irq.h`. */ void arch_irq_enable(unsigned int irq) { arm_gic_irq_enable(irq); } void arch_irq_disable(unsigned int irq) { arm_gic_irq_disable(irq); } int arch_irq_is_enabled(unsigned int irq) { return arm_gic_irq_is_enabled(irq); } void z_arm64_irq_priority_set(unsigned int irq, unsigned int prio, uint32_t flags) { arm_gic_irq_set_priority(irq, prio, flags); } #endif /* !CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER */ #ifdef CONFIG_DYNAMIC_INTERRUPTS int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, void (*routine)(const void *parameter), const void *parameter, uint32_t flags) { z_isr_install(irq, routine, parameter); z_arm64_irq_priority_set(irq, priority, flags); return irq; } #endif void z_irq_spurious(const void *unused) { ARG_UNUSED(unused); z_arm64_fatal_error(K_ERR_SPURIOUS_IRQ, NULL); } ```
/content/code_sandbox/arch/arm64/core/irq_manage.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
422
```c /* * */ /** * @file * @brief ARM64 kernel structure member offset definition file * * This module is responsible for the generation of the absolute symbols whose * value represents the member offsets for various ARM kernel structures. * * All of the absolute symbols defined by this module will be present in the * final kernel ELF image (due to the linker's reference to the _OffsetAbsSyms * symbol). * * INTERNAL * It is NOT necessary to define the offset for every member of a structure. * Typically, only those members that are accessed by assembly language routines * are defined; however, it doesn't hurt to define all fields for the sake of * completeness. */ #ifndef _ARM_OFFSETS_INC_ #define _ARM_OFFSETS_INC_ #include <gen_offset.h> #include <zephyr/kernel.h> #include <kernel_arch_data.h> #include <kernel_offsets.h> GEN_OFFSET_SYM(_thread_arch_t, exception_depth); GEN_NAMED_OFFSET_SYM(_callee_saved_t, x19, x19_x20); GEN_NAMED_OFFSET_SYM(_callee_saved_t, x21, x21_x22); GEN_NAMED_OFFSET_SYM(_callee_saved_t, x23, x23_x24); GEN_NAMED_OFFSET_SYM(_callee_saved_t, x25, x25_x26); GEN_NAMED_OFFSET_SYM(_callee_saved_t, x27, x27_x28); GEN_NAMED_OFFSET_SYM(_callee_saved_t, x29, x29_sp_el0); GEN_NAMED_OFFSET_SYM(_callee_saved_t, sp_elx, sp_elx_lr); #ifdef CONFIG_FRAME_POINTER GEN_NAMED_OFFSET_SYM(_esf_t, fp, fp); #endif GEN_NAMED_OFFSET_SYM(_esf_t, spsr, spsr_elr); GEN_NAMED_OFFSET_SYM(_esf_t, x18, x18_lr); GEN_NAMED_OFFSET_SYM(_esf_t, x16, x16_x17); GEN_NAMED_OFFSET_SYM(_esf_t, x14, x14_x15); GEN_NAMED_OFFSET_SYM(_esf_t, x12, x12_x13); GEN_NAMED_OFFSET_SYM(_esf_t, x10, x10_x11); GEN_NAMED_OFFSET_SYM(_esf_t, x8, x8_x9); GEN_NAMED_OFFSET_SYM(_esf_t, x6, x6_x7); GEN_NAMED_OFFSET_SYM(_esf_t, x4, x4_x5); GEN_NAMED_OFFSET_SYM(_esf_t, x2, x2_x3); GEN_NAMED_OFFSET_SYM(_esf_t, x0, x0_x1); GEN_ABSOLUTE_SYM(___esf_t_SIZEOF, sizeof(_esf_t)); #ifdef CONFIG_ARM64_SAFE_EXCEPTION_STACK GEN_OFFSET_SYM(_cpu_arch_t, safe_exception_stack); GEN_OFFSET_SYM(_cpu_arch_t, current_stack_limit); GEN_OFFSET_SYM(_cpu_arch_t, corrupted_sp); GEN_OFFSET_SYM(_thread_arch_t, stack_limit); GEN_NAMED_OFFSET_SYM(_esf_t, sp, sp_el0); #endif #ifdef CONFIG_HAS_ARM_SMCCC #include <zephyr/arch/arm64/arm-smccc.h> GEN_NAMED_OFFSET_SYM(arm_smccc_res_t, a0, a0_a1); GEN_NAMED_OFFSET_SYM(arm_smccc_res_t, a2, a2_a3); GEN_NAMED_OFFSET_SYM(arm_smccc_res_t, a4, a4_a5); GEN_NAMED_OFFSET_SYM(arm_smccc_res_t, a6, a6_a7); #endif /* CONFIG_HAS_ARM_SMCCC */ GEN_ABS_SYM_END #endif /* _ARM_OFFSETS_INC_ */ ```
/content/code_sandbox/arch/arm64/core/offsets/offsets.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
754
```unknown /* */ #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #include <zephyr/xen/public/arch-arm.h> #include <zephyr/xen/public/xen.h> #define HYPERCALL(hypercall) \ GTEXT(HYPERVISOR_##hypercall); \ SECTION_FUNC(TEXT, HYPERVISOR_##hypercall) \ mov x16, #__HYPERVISOR_##hypercall; \ hvc XEN_HYPERCALL_TAG; \ ret; _ASM_FILE_PROLOGUE HYPERCALL(console_io); HYPERCALL(grant_table_op); HYPERCALL(sched_op); HYPERCALL(event_channel_op); HYPERCALL(hvm_op); HYPERCALL(memory_op); #ifdef CONFIG_XEN_DOM0 HYPERCALL(domctl); #endif ```
/content/code_sandbox/arch/arm64/core/xen/hypercall.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
178
```c /* * */ #include <zephyr/arch/arm64/hypercall.h> #include <zephyr/xen/events.h> #include <zephyr/xen/generic.h> #include <zephyr/xen/public/xen.h> #include <zephyr/xen/public/memory.h> #include <zephyr/device.h> #include <zephyr/init.h> #include <zephyr/kernel.h> #include <zephyr/kernel/thread.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(xen_enlighten); /* * During Xen Enlighten initialization we need to have allocated memory page, * where hypervisor shared_info will be mapped. k_aligned_alloc() is not * available on PRE_KERNEL_1 stage, so we will use statically allocated buffer, * which will be casted to 'struct shared_info'. It is needed to initialize Xen * event channels as soon as possible after start. */ static uint8_t shared_info_buf[XEN_PAGE_SIZE] __aligned(XEN_PAGE_SIZE); /* Remains NULL until mapping will be finished by Xen */ shared_info_t *HYPERVISOR_shared_info; static int xen_map_shared_info(const shared_info_t *shared_page) { struct xen_add_to_physmap xatp; xatp.domid = DOMID_SELF; xatp.idx = 0; xatp.space = XENMAPSPACE_shared_info; xatp.gpfn = (((xen_pfn_t) shared_page) >> XEN_PAGE_SHIFT); return HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp); } static int xen_enlighten_init(void) { int ret = 0; shared_info_t *info = (shared_info_t *) shared_info_buf; ret = xen_map_shared_info(info); if (ret) { LOG_ERR("%s: failed to map for Xen shared page, ret = %d\n", __func__, ret); return ret; } /* Set value for globally visible pointer */ HYPERVISOR_shared_info = info; ret = xen_events_init(); if (ret) { LOG_ERR("%s: failed init Xen event channels, ret = %d\n", __func__, ret); return ret; } return 0; } SYS_INIT(xen_enlighten_init, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_DEVICE); ```
/content/code_sandbox/arch/arm64/core/xen/enlighten.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
496
```unknown # Xen hypervisor configuration options config XEN bool default y depends on ARMV8_A depends on DT_HAS_XEN_XEN_ENABLED help Enables support of Xen hypervisor on arm64 platform. Get enabled when board device tree contains "hypervisor" node with "xen,xen" compatible enabled. config XEN_DOM0 bool "Zephyr as Xen Domain 0" depends on XEN help Built binary will be used as Xen privileged domain (Domain 0). config XEN_DOM0LESS bool "Zephyr for Xen Dom0less setup" depends on XEN && !XEN_DOM0 help Configures Zephyr as DomU, that can be started on Dom0less setup. ```
/content/code_sandbox/arch/arm64/core/xen/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
170
```c /* * */ /** * @file * @brief Kernel fatal error handler for ARM64 Cortex-A * * This module provides the z_arm64_fatal_error() routine for ARM64 Cortex-A * CPUs and z_arm64_do_kernel_oops() routine to manage software-generated fatal * exceptions */ #include <zephyr/debug/symtab.h> #include <zephyr/drivers/pm_cpu_ops.h> #include <zephyr/arch/common/exc_handle.h> #include <zephyr/kernel.h> #include <zephyr/logging/log.h> #include <zephyr/sys/poweroff.h> #include <kernel_arch_func.h> LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); #ifdef CONFIG_ARM64_SAFE_EXCEPTION_STACK K_KERNEL_PINNED_STACK_ARRAY_DEFINE(z_arm64_safe_exception_stacks, CONFIG_MP_MAX_NUM_CPUS, CONFIG_ARM64_SAFE_EXCEPTION_STACK_SIZE); void z_arm64_safe_exception_stack_init(void) { int cpu_id; char *safe_exc_sp; cpu_id = arch_curr_cpu()->id; safe_exc_sp = K_KERNEL_STACK_BUFFER(z_arm64_safe_exception_stacks[cpu_id]) + CONFIG_ARM64_SAFE_EXCEPTION_STACK_SIZE; arch_curr_cpu()->arch.safe_exception_stack = (uint64_t)safe_exc_sp; write_sp_el0((uint64_t)safe_exc_sp); arch_curr_cpu()->arch.current_stack_limit = 0UL; arch_curr_cpu()->arch.corrupted_sp = 0UL; } #endif #ifdef CONFIG_USERSPACE Z_EXC_DECLARE(z_arm64_user_string_nlen); static const struct z_exc_handle exceptions[] = { Z_EXC_HANDLE(z_arm64_user_string_nlen), }; #endif /* CONFIG_USERSPACE */ #ifdef CONFIG_EXCEPTION_DEBUG static void dump_esr(uint64_t esr, bool *dump_far) { const char *err; switch (GET_ESR_EC(esr)) { case 0b000000: /* 0x00 */ err = "Unknown reason"; break; case 0b000001: /* 0x01 */ err = "Trapped WFI or WFE instruction execution"; break; case 0b000011: /* 0x03 */ err = "Trapped MCR or MRC access with (coproc==0b1111) that " "is not reported using EC 0b000000"; break; case 0b000100: /* 0x04 */ err = "Trapped MCRR or MRRC access with (coproc==0b1111) " "that is not reported using EC 0b000000"; break; case 0b000101: /* 0x05 */ err = "Trapped MCR or MRC access with (coproc==0b1110)"; break; case 0b000110: /* 0x06 */ err = "Trapped LDC or STC access"; break; case 0b000111: /* 0x07 */ err = "Trapped access to SVE, Advanced SIMD, or " "floating-point functionality"; break; case 0b001100: /* 0x0c */ err = "Trapped MRRC access with (coproc==0b1110)"; break; case 0b001101: /* 0x0d */ err = "Branch Target Exception"; break; case 0b001110: /* 0x0e */ err = "Illegal Execution state"; break; case 0b010001: /* 0x11 */ err = "SVC instruction execution in AArch32 state"; break; case 0b011000: /* 0x18 */ err = "Trapped MSR, MRS or System instruction execution in " "AArch64 state, that is not reported using EC " "0b000000, 0b000001 or 0b000111"; break; case 0b011001: /* 0x19 */ err = "Trapped access to SVE functionality"; break; case 0b100000: /* 0x20 */ *dump_far = true; err = "Instruction Abort from a lower Exception level, that " "might be using AArch32 or AArch64"; break; case 0b100001: /* 0x21 */ *dump_far = true; err = "Instruction Abort taken without a change in Exception " "level."; break; case 0b100010: /* 0x22 */ *dump_far = true; err = "PC alignment fault exception."; break; case 0b100100: /* 0x24 */ *dump_far = true; err = "Data Abort from a lower Exception level, that might " "be using AArch32 or AArch64"; break; case 0b100101: /* 0x25 */ *dump_far = true; err = "Data Abort taken without a change in Exception level"; break; case 0b100110: /* 0x26 */ err = "SP alignment fault exception"; break; case 0b101000: /* 0x28 */ err = "Trapped floating-point exception taken from AArch32 " "state"; break; case 0b101100: /* 0x2c */ err = "Trapped floating-point exception taken from AArch64 " "state."; break; case 0b101111: /* 0x2f */ err = "SError interrupt"; break; case 0b110000: /* 0x30 */ err = "Breakpoint exception from a lower Exception level, " "that might be using AArch32 or AArch64"; break; case 0b110001: /* 0x31 */ err = "Breakpoint exception taken without a change in " "Exception level"; break; case 0b110010: /* 0x32 */ err = "Software Step exception from a lower Exception level, " "that might be using AArch32 or AArch64"; break; case 0b110011: /* 0x33 */ err = "Software Step exception taken without a change in " "Exception level"; break; case 0b110100: /* 0x34 */ *dump_far = true; err = "Watchpoint exception from a lower Exception level, " "that might be using AArch32 or AArch64"; break; case 0b110101: /* 0x35 */ *dump_far = true; err = "Watchpoint exception taken without a change in " "Exception level."; break; case 0b111000: /* 0x38 */ err = "BKPT instruction execution in AArch32 state"; break; case 0b111100: /* 0x3c */ err = "BRK instruction execution in AArch64 state."; break; default: err = "Unknown"; } LOG_ERR("ESR_ELn: 0x%016llx", esr); LOG_ERR(" EC: 0x%llx (%s)", GET_ESR_EC(esr), err); LOG_ERR(" IL: 0x%llx", GET_ESR_IL(esr)); LOG_ERR(" ISS: 0x%llx", GET_ESR_ISS(esr)); } static void esf_dump(const struct arch_esf *esf) { LOG_ERR("x0: 0x%016llx x1: 0x%016llx", esf->x0, esf->x1); LOG_ERR("x2: 0x%016llx x3: 0x%016llx", esf->x2, esf->x3); LOG_ERR("x4: 0x%016llx x5: 0x%016llx", esf->x4, esf->x5); LOG_ERR("x6: 0x%016llx x7: 0x%016llx", esf->x6, esf->x7); LOG_ERR("x8: 0x%016llx x9: 0x%016llx", esf->x8, esf->x9); LOG_ERR("x10: 0x%016llx x11: 0x%016llx", esf->x10, esf->x11); LOG_ERR("x12: 0x%016llx x13: 0x%016llx", esf->x12, esf->x13); LOG_ERR("x14: 0x%016llx x15: 0x%016llx", esf->x14, esf->x15); LOG_ERR("x16: 0x%016llx x17: 0x%016llx", esf->x16, esf->x17); LOG_ERR("x18: 0x%016llx lr: 0x%016llx", esf->x18, esf->lr); } #ifdef CONFIG_EXCEPTION_STACK_TRACE static void esf_unwind(const struct arch_esf *esf) { /* * For GCC: * * ^ +-----------------+ * | | | * | | | * | | | * | | | * | | function stack | * | | | * | | | * | | | * | | | * | +-----------------+ * | | LR | * | +-----------------+ * | | previous FP | <---+ FP * + +-----------------+ */ uint64_t *fp = (uint64_t *) esf->fp; unsigned int count = 0; uint64_t lr; LOG_ERR(""); for (int i = 0; (fp != NULL) && (i < CONFIG_EXCEPTION_STACK_TRACE_MAX_FRAMES); i++) { lr = fp[1]; #ifdef CONFIG_SYMTAB uint32_t offset = 0; const char *name = symtab_find_symbol_name(lr, &offset); LOG_ERR("backtrace %2d: fp: 0x%016llx lr: 0x%016llx [%s+0x%x]", count++, (uint64_t) fp, lr, name, offset); #else LOG_ERR("backtrace %2d: fp: 0x%016llx lr: 0x%016llx", count++, (uint64_t) fp, lr); #endif fp = (uint64_t *) fp[0]; } LOG_ERR(""); } #endif #endif /* CONFIG_EXCEPTION_DEBUG */ #ifdef CONFIG_ARM64_STACK_PROTECTION static bool z_arm64_stack_corruption_check(struct arch_esf *esf, uint64_t esr, uint64_t far) { uint64_t sp, sp_limit, guard_start; /* 0x25 means data abort from current EL */ if (GET_ESR_EC(esr) == 0x25) { sp_limit = arch_curr_cpu()->arch.current_stack_limit; guard_start = sp_limit - Z_ARM64_STACK_GUARD_SIZE; sp = arch_curr_cpu()->arch.corrupted_sp; if ((sp != 0 && sp <= sp_limit) || (guard_start <= far && far <= sp_limit)) { #ifdef CONFIG_FPU_SHARING /* * We are in exception stack, and now we are sure the stack does overflow, * so flush the fpu context to its owner, and then set no fpu trap to avoid * a new nested exception triggered by FPU accessing (var_args). */ arch_flush_local_fpu(); write_cpacr_el1(read_cpacr_el1() | CPACR_EL1_FPEN_NOTRAP); #endif arch_curr_cpu()->arch.corrupted_sp = 0UL; LOG_ERR("STACK OVERFLOW FROM KERNEL, SP: 0x%llx OR FAR: 0x%llx INVALID," " SP LIMIT: 0x%llx", sp, far, sp_limit); return true; } } #ifdef CONFIG_USERSPACE else if ((_current->base.user_options & K_USER) != 0 && GET_ESR_EC(esr) == 0x24) { sp_limit = (uint64_t)_current->stack_info.start; guard_start = sp_limit - Z_ARM64_STACK_GUARD_SIZE; sp = esf->sp; if (sp <= sp_limit || (guard_start <= far && far <= sp_limit)) { LOG_ERR("STACK OVERFLOW FROM USERSPACE, SP: 0x%llx OR FAR: 0x%llx INVALID," " SP LIMIT: 0x%llx", sp, far, sp_limit); return true; } } #endif return false; } #endif static bool is_recoverable(struct arch_esf *esf, uint64_t esr, uint64_t far, uint64_t elr) { if (!esf) return false; #ifdef CONFIG_USERSPACE for (int i = 0; i < ARRAY_SIZE(exceptions); i++) { /* Mask out instruction mode */ uint64_t start = (uint64_t)exceptions[i].start; uint64_t end = (uint64_t)exceptions[i].end; if (esf->elr >= start && esf->elr < end) { esf->elr = (uint64_t)(exceptions[i].fixup); return true; } } #endif return false; } void z_arm64_fatal_error(unsigned int reason, struct arch_esf *esf) { uint64_t esr = 0; uint64_t elr = 0; uint64_t far = 0; uint64_t el; if (reason != K_ERR_SPURIOUS_IRQ) { el = read_currentel(); switch (GET_EL(el)) { case MODE_EL1: esr = read_esr_el1(); far = read_far_el1(); elr = read_elr_el1(); break; #if !defined(CONFIG_ARMV8_R) case MODE_EL3: esr = read_esr_el3(); far = read_far_el3(); elr = read_elr_el3(); break; #endif /* CONFIG_ARMV8_R */ } #ifdef CONFIG_ARM64_STACK_PROTECTION if (z_arm64_stack_corruption_check(esf, esr, far)) { reason = K_ERR_STACK_CHK_FAIL; } #endif if (GET_EL(el) != MODE_EL0) { #ifdef CONFIG_EXCEPTION_DEBUG bool dump_far = false; LOG_ERR("ELR_ELn: 0x%016llx", elr); dump_esr(esr, &dump_far); if (dump_far) LOG_ERR("FAR_ELn: 0x%016llx", far); LOG_ERR("TPIDRRO: 0x%016llx", read_tpidrro_el0()); #endif /* CONFIG_EXCEPTION_DEBUG */ if (is_recoverable(esf, esr, far, elr) && reason != K_ERR_STACK_CHK_FAIL) { return; } } } #ifdef CONFIG_EXCEPTION_DEBUG if (esf != NULL) { esf_dump(esf); } #ifdef CONFIG_EXCEPTION_STACK_TRACE esf_unwind(esf); #endif /* CONFIG_EXCEPTION_STACK_TRACE */ #endif /* CONFIG_EXCEPTION_DEBUG */ z_fatal_error(reason, esf); CODE_UNREACHABLE; } /** * @brief Handle a software-generated fatal exception * (e.g. kernel oops, panic, etc.). * * @param esf exception frame */ void z_arm64_do_kernel_oops(struct arch_esf *esf) { /* x8 holds the exception reason */ unsigned int reason = esf->x8; #if defined(CONFIG_USERSPACE) /* * User mode is only allowed to induce oopses and stack check * failures via software-triggered system fatal exceptions. */ if (((_current->base.user_options & K_USER) != 0) && reason != K_ERR_STACK_CHK_FAIL) { reason = K_ERR_KERNEL_OOPS; } #endif z_arm64_fatal_error(reason, esf); } #ifdef CONFIG_USERSPACE FUNC_NORETURN void arch_syscall_oops(void *ssf_ptr) { z_arm64_fatal_error(K_ERR_KERNEL_OOPS, ssf_ptr); CODE_UNREACHABLE; } #endif #if defined(CONFIG_PM_CPU_OPS_PSCI) FUNC_NORETURN void arch_system_halt(unsigned int reason) { ARG_UNUSED(reason); (void)arch_irq_lock(); #ifdef CONFIG_POWEROFF sys_poweroff(); #endif /* CONFIG_POWEROFF */ for (;;) { /* Spin endlessly as fallback */ } } #endif ```
/content/code_sandbox/arch/arm64/core/fatal.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,695
```unknown # Memory Protection Unit (MPU) configuration options if CPU_HAS_MPU config ARM_MPU bool "ARM MPU Support" select THREAD_STACK_INFO select MPU select SRAM_REGION_PERMISSIONS select ARCH_MEM_DOMAIN_SYNCHRONOUS_API if USERSPACE default y help MPU implements Memory Protection Unit. Notes: The ARMv8-R MPU architecture requires a power-of-two alignment of MPU region base address and size(64 bytes aligned). The ARMv8-R MPU requires the active MPU regions be non-overlapping. As a result of this, the ARMv8-R MPU needs to fully partition the memory map when programming dynamic memory regions (e.g. PRIV stack guard, user thread stack, and application memory domains), if the system requires PRIV access policy different from the access policy of the ARMv8-R background memory map. The application developer may enforce full PRIV (kernel) memory partition by enabling the CONFIG_MPU_GAP_FILLING option. By not enforcing full partition, MPU may leave part of kernel SRAM area covered only by the default ARMv8-R memory map. This is fine for User Mode, since the background ARM map does not allow nPRIV access at all. However, since the background map policy allows instruction fetches by privileged code, forcing this Kconfig option off prevents the system from directly triggering MemManage exceptions upon accidental attempts to execute code from SRAM in XIP builds. Since this does not compromise User Mode, we make the skipping of full partitioning the default behavior for the ARMv8-R MPU driver. config ARM_MPU_REGION_MIN_ALIGN_AND_SIZE int default 64 if ARM_MPU default 4 help Minimum size (and alignment) of an ARM MPU region. Use this symbol to guarantee minimum size and alignment of MPU regions. A minimum 4-byte alignment is enforced in ARM builds without support for Memory Protection. if ARM_MPU config MPU_ALLOW_FLASH_WRITE bool "Add MPU access to write to flash" help Enable this to allow MPU RWX access to flash memory endif # ARM_MPU endif # CPU_HAS_MPU ```
/content/code_sandbox/arch/arm64/core/cortex_r/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
480
```objective-c /* * */ /** * @file * @brief Private kernel definitions (ARM64) * * This file contains private kernel function definitions and various * other definitions for the ARM Cortex-A processor architecture family. * * This file is also included by assembly language files which must #define * _ASMLANGUAGE before including this header file. Note that kernel * assembly source files obtains structure offset values via "absolute symbols" * in the offsets.o module. */ #ifndef ZEPHYR_ARCH_ARM64_INCLUDE_KERNEL_ARCH_FUNC_H_ #define ZEPHYR_ARCH_ARM64_INCLUDE_KERNEL_ARCH_FUNC_H_ #include <kernel_arch_data.h> #ifdef __cplusplus extern "C" { #endif #ifndef _ASMLANGUAGE static ALWAYS_INLINE void arch_kernel_init(void) { } static inline void arch_switch(void *switch_to, void **switched_from) { extern void z_arm64_context_switch(struct k_thread *new, struct k_thread *old); struct k_thread *new = switch_to; struct k_thread *old = CONTAINER_OF(switched_from, struct k_thread, switch_handle); z_arm64_context_switch(new, old); } extern void z_arm64_fatal_error(unsigned int reason, struct arch_esf *esf); extern void z_arm64_set_ttbr0(uint64_t ttbr0); extern void z_arm64_mem_cfg_ipi(void); #ifdef CONFIG_FPU_SHARING void arch_flush_local_fpu(void); void arch_flush_fpu_ipi(unsigned int cpu); #endif #ifdef CONFIG_ARM64_SAFE_EXCEPTION_STACK void z_arm64_safe_exception_stack_init(void); #endif #endif /* _ASMLANGUAGE */ #ifdef __cplusplus } #endif #endif /* ZEPHYR_ARCH_ARM64_INCLUDE_KERNEL_ARCH_FUNC_H_ */ ```
/content/code_sandbox/arch/arm64/include/kernel_arch_func.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
361
```objective-c /* * */ /** * @file * @brief Exception/interrupt context helpers for Cortex-A CPUs * * Exception/interrupt context helpers. */ #ifndef ZEPHYR_ARCH_ARM64_INCLUDE_EXCEPTION_H_ #define ZEPHYR_ARCH_ARM64_INCLUDE_EXCEPTION_H_ #include <zephyr/arch/cpu.h> #ifdef _ASMLANGUAGE /* nothing */ #else #ifdef __cplusplus extern "C" { #endif static ALWAYS_INLINE bool arch_is_in_isr(void) { return arch_curr_cpu()->nested != 0U; } #ifdef __cplusplus } #endif #endif /* _ASMLANGUAGE */ #endif /* ZEPHYR_ARCH_ARM64_INCLUDE_EXCEPTION_H_ */ ```
/content/code_sandbox/arch/arm64/include/exception.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
137
```objective-c /* * */ /** * @file * @brief Private kernel definitions (ARM) * * This file contains private kernel structures definitions and various * other definitions for the ARM Cortex-A/R/M processor architecture family. * * This file is also included by assembly language files which must #define * _ASMLANGUAGE before including this header file. Note that kernel * assembly source files obtains structure offset values via "absolute symbols" * in the offsets.o module. */ #ifndef ZEPHYR_ARCH_ARM64_INCLUDE_KERNEL_ARCH_DATA_H_ #define ZEPHYR_ARCH_ARM64_INCLUDE_KERNEL_ARCH_DATA_H_ #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #include <zephyr/arch/cpu.h> #include <exception.h> #ifndef _ASMLANGUAGE #include <zephyr/kernel.h> #include <zephyr/types.h> #include <zephyr/sys/dlist.h> #include <zephyr/sys/atomic.h> #ifdef __cplusplus extern "C" { #endif typedef struct arch_esf _esf_t; typedef struct __basic_sf _basic_sf_t; #ifdef __cplusplus } #endif #endif /* _ASMLANGUAGE */ #endif /* ZEPHYR_ARCH_ARM64_INCLUDE_KERNEL_ARCH_DATA_H_ */ ```
/content/code_sandbox/arch/arm64/include/kernel_arch_data.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
262
```objective-c /* * */ #ifndef ZEPHYR_ARCH_ARM64_INCLUDE_OFFSETS_SHORT_ARCH_H_ #define ZEPHYR_ARCH_ARM64_INCLUDE_OFFSETS_SHORT_ARCH_H_ #include <zephyr/offsets.h> #define _thread_offset_to_exception_depth \ (___thread_t_arch_OFFSET + ___thread_arch_t_exception_depth_OFFSET) #define _thread_offset_to_callee_saved_x19_x20 \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_x19_x20_OFFSET) #define _thread_offset_to_callee_saved_x21_x22 \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_x21_x22_OFFSET) #define _thread_offset_to_callee_saved_x23_x24 \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_x23_x24_OFFSET) #define _thread_offset_to_callee_saved_x25_x26 \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_x25_x26_OFFSET) #define _thread_offset_to_callee_saved_x27_x28 \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_x27_x28_OFFSET) #define _thread_offset_to_callee_saved_x29_sp_el0 \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_x29_sp_el0_OFFSET) #define _thread_offset_to_callee_saved_sp_elx_lr \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_sp_elx_lr_OFFSET) #ifdef CONFIG_ARM64_SAFE_EXCEPTION_STACK #define _cpu_offset_to_safe_exception_stack \ (___cpu_t_arch_OFFSET + ___cpu_arch_t_safe_exception_stack_OFFSET) #define _cpu_offset_to_current_stack_limit \ (___cpu_t_arch_OFFSET + ___cpu_arch_t_current_stack_limit_OFFSET) #define _cpu_offset_to_corrupted_sp \ (___cpu_t_arch_OFFSET + ___cpu_arch_t_corrupted_sp_OFFSET) #define _thread_offset_to_stack_limit \ (___thread_t_arch_OFFSET + ___thread_arch_t_stack_limit_OFFSET) #endif #endif /* ZEPHYR_ARCH_ARM64_INCLUDE_OFFSETS_SHORT_ARCH_H_ */ ```
/content/code_sandbox/arch/arm64/include/offsets_short_arch.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
418