text
stringlengths
9
39.2M
dir
stringlengths
25
226
lang
stringclasses
163 values
created_date
timestamp[s]
updated_date
timestamp[s]
repo_name
stringclasses
751 values
repo_full_name
stringclasses
752 values
star
int64
1.01k
183k
len_tokens
int64
1
18.5M
```c /* * */ /** * @file * @brief New thread creation for ARM Cortex-A and Cortex-R * * Core thread related primitives for the ARM Cortex-A and * Cortex-R processor architecture. */ #include <zephyr/kernel.h> #include <zephyr/llext/symbol.h> #include <ksched.h> #include <zephyr/sys/barrier.h> #include <stdbool.h> #include <cmsis_core.h> #if (MPU_GUARD_ALIGN_AND_SIZE_FLOAT > MPU_GUARD_ALIGN_AND_SIZE) #define FP_GUARD_EXTRA_SIZE (MPU_GUARD_ALIGN_AND_SIZE_FLOAT - \ MPU_GUARD_ALIGN_AND_SIZE) #else #define FP_GUARD_EXTRA_SIZE 0 #endif #ifndef EXC_RETURN_FTYPE /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */ #define EXC_RETURN_FTYPE (0x00000010UL) #endif /* Default last octet of EXC_RETURN, for threads that have not run yet. * The full EXC_RETURN value will be e.g. 0xFFFFFFBC. */ #define DEFAULT_EXC_RETURN 0xFD; /* An initial context, to be "restored" by z_arm_pendsv(), is put at the other * end of the stack, and thus reusable by the stack when not needed anymore. * * The initial context is an exception stack frame (ESF) since exiting the * PendSV exception will want to pop an ESF. Interestingly, even if the lsb of * an instruction address to jump to must always be set since the CPU always * runs in thumb mode, the ESF expects the real address of the instruction, * with the lsb *not* set (instructions are always aligned on 16 bit * halfwords). Since the compiler automatically sets the lsb of function * addresses, we have to unset it manually before storing it in the 'pc' field * of the ESF. */ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, char *stack_ptr, k_thread_entry_t entry, void *p1, void *p2, void *p3) { struct __basic_sf *iframe; #ifdef CONFIG_MPU_STACK_GUARD #if defined(CONFIG_USERSPACE) if (z_stack_is_user_capable(stack)) { /* Guard area is carved-out of the buffer instead of reserved * for stacks that can host user threads */ thread->stack_info.start += MPU_GUARD_ALIGN_AND_SIZE; thread->stack_info.size -= MPU_GUARD_ALIGN_AND_SIZE; } #endif /* CONFIG_USERSPACE */ #if FP_GUARD_EXTRA_SIZE > 0 if ((thread->base.user_options & K_FP_REGS) != 0) { /* Larger guard needed due to lazy stacking of FP regs may * overshoot the guard area without writing anything. We * carve it out of the stack buffer as-needed instead of * unconditionally reserving it. */ thread->stack_info.start += FP_GUARD_EXTRA_SIZE; thread->stack_info.size -= FP_GUARD_EXTRA_SIZE; } #endif /* FP_GUARD_EXTRA_SIZE */ #endif /* CONFIG_MPU_STACK_GUARD */ iframe = Z_STACK_PTR_TO_FRAME(struct __basic_sf, stack_ptr); #if defined(CONFIG_USERSPACE) if ((thread->base.user_options & K_USER) != 0) { iframe->pc = (uint32_t)arch_user_mode_enter; } else { iframe->pc = (uint32_t)z_thread_entry; } #else iframe->pc = (uint32_t)z_thread_entry; #endif iframe->a1 = (uint32_t)entry; iframe->a2 = (uint32_t)p1; iframe->a3 = (uint32_t)p2; iframe->a4 = (uint32_t)p3; iframe->xpsr = A_BIT | MODE_SYS; #if defined(CONFIG_BIG_ENDIAN) iframe->xpsr |= E_BIT; #endif /* CONFIG_BIG_ENDIAN */ #if defined(CONFIG_COMPILER_ISA_THUMB2) iframe->xpsr |= T_BIT; #endif /* CONFIG_COMPILER_ISA_THUMB2 */ #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) iframe = (struct __basic_sf *) ((uintptr_t)iframe - sizeof(struct __fpu_sf)); memset(iframe, 0, sizeof(struct __fpu_sf)); #endif thread->callee_saved.psp = (uint32_t)iframe; thread->arch.basepri = 0; #if defined(CONFIG_ARM_STORE_EXC_RETURN) || defined(CONFIG_USERSPACE) thread->arch.mode = 0; #if defined(CONFIG_ARM_STORE_EXC_RETURN) thread->arch.mode_exc_return = DEFAULT_EXC_RETURN; #endif #if FP_GUARD_EXTRA_SIZE > 0 if ((thread->base.user_options & K_FP_REGS) != 0) { thread->arch.mode |= Z_ARM_MODE_MPU_GUARD_FLOAT_Msk; } #endif #if defined(CONFIG_USERSPACE) thread->arch.priv_stack_start = 0; #endif #endif /* * initial values in all other registers/thread entries are * irrelevant. */ #if defined(CONFIG_USE_SWITCH) extern void z_arm_cortex_ar_exit_exc(void); thread->switch_handle = thread; /* thread birth happens through the exception return path */ thread->arch.exception_depth = 1; thread->callee_saved.lr = (uint32_t)z_arm_cortex_ar_exit_exc; #endif } #if defined(CONFIG_MPU_STACK_GUARD) && defined(CONFIG_FPU) \ && defined(CONFIG_FPU_SHARING) static inline void z_arm_thread_stack_info_adjust(struct k_thread *thread, bool use_large_guard) { if (use_large_guard) { /* Switch to use a large MPU guard if not already. */ if ((thread->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) == 0) { /* Default guard size is used. Update required. */ thread->arch.mode |= Z_ARM_MODE_MPU_GUARD_FLOAT_Msk; #if defined(CONFIG_USERSPACE) if (thread->arch.priv_stack_start) { /* User thread */ thread->arch.priv_stack_start += FP_GUARD_EXTRA_SIZE; } else #endif /* CONFIG_USERSPACE */ { /* Privileged thread */ thread->stack_info.start += FP_GUARD_EXTRA_SIZE; thread->stack_info.size -= FP_GUARD_EXTRA_SIZE; } } } else { /* Switch to use the default MPU guard size if not already. */ if ((thread->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) { /* Large guard size is used. Update required. */ thread->arch.mode &= ~Z_ARM_MODE_MPU_GUARD_FLOAT_Msk; #if defined(CONFIG_USERSPACE) if (thread->arch.priv_stack_start) { /* User thread */ thread->arch.priv_stack_start -= FP_GUARD_EXTRA_SIZE; } else #endif /* CONFIG_USERSPACE */ { /* Privileged thread */ thread->stack_info.start -= FP_GUARD_EXTRA_SIZE; thread->stack_info.size += FP_GUARD_EXTRA_SIZE; } } } } #endif #ifdef CONFIG_USERSPACE FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, void *p1, void *p2, void *p3) { /* Set up privileged stack before entering user mode */ _current->arch.priv_stack_start = (uint32_t)z_priv_stack_find(_current->stack_obj); #if defined(CONFIG_MPU_STACK_GUARD) #if defined(CONFIG_THREAD_STACK_INFO) /* We're dropping to user mode which means the guard area is no * longer used here, it instead is moved to the privilege stack * to catch stack overflows there. Un-do the calculations done * which accounted for memory borrowed from the thread stack. */ #if FP_GUARD_EXTRA_SIZE > 0 if ((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) { _current->stack_info.start -= FP_GUARD_EXTRA_SIZE; _current->stack_info.size += FP_GUARD_EXTRA_SIZE; } #endif /* FP_GUARD_EXTRA_SIZE */ _current->stack_info.start -= MPU_GUARD_ALIGN_AND_SIZE; _current->stack_info.size += MPU_GUARD_ALIGN_AND_SIZE; #endif /* CONFIG_THREAD_STACK_INFO */ /* Stack guard area reserved at the bottom of the thread's * privileged stack. Adjust the available (writable) stack * buffer area accordingly. */ #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) _current->arch.priv_stack_start += ((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ? MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE; #else _current->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE; #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */ #endif /* CONFIG_MPU_STACK_GUARD */ #if defined(CONFIG_CPU_AARCH32_CORTEX_R) _current->arch.priv_stack_end = _current->arch.priv_stack_start + CONFIG_PRIVILEGED_STACK_SIZE; #endif z_arm_userspace_enter(user_entry, p1, p2, p3, (uint32_t)_current->stack_info.start, _current->stack_info.size - _current->stack_info.delta); CODE_UNREACHABLE; } bool z_arm_thread_is_in_user_mode(void) { uint32_t value; /* * For Cortex-R, the mode (lower 5) bits will be 0x10 for user mode. */ value = __get_CPSR(); return ((value & CPSR_M_Msk) == CPSR_M_USR); } EXPORT_SYMBOL(z_arm_thread_is_in_user_mode); #endif #if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE) #define IS_MPU_GUARD_VIOLATION(guard_start, guard_len, fault_addr, stack_ptr) \ ((fault_addr != -EINVAL) ? \ ((fault_addr >= guard_start) && \ (fault_addr < (guard_start + guard_len)) && \ (stack_ptr < (guard_start + guard_len))) \ : \ (stack_ptr < (guard_start + guard_len))) /** * @brief Assess occurrence of current thread's stack corruption * * This function performs an assessment whether a memory fault (on a * given memory address) is the result of stack memory corruption of * the current thread. * * Thread stack corruption for supervisor threads or user threads in * privilege mode (when User Space is supported) is reported upon an * attempt to access the stack guard area (if MPU Stack Guard feature * is supported). Additionally the current PSP (process stack pointer) * must be pointing inside or below the guard area. * * Thread stack corruption for user threads in user mode is reported, * if the current PSP is pointing below the start of the current * thread's stack. * * Notes: * - we assume a fully descending stack, * - we assume a stacking error has occurred, * - the function shall be called when handling MemManage and Bus fault, * and only if a Stacking error has been reported. * * If stack corruption is detected, the function returns the lowest * allowed address where the Stack Pointer can safely point to, to * prevent from errors when un-stacking the corrupted stack frame * upon exception return. * * @param fault_addr memory address on which memory access violation * has been reported. It can be invalid (-EINVAL), * if only Stacking error has been reported. * @param psp current address the PSP points to * * @return The lowest allowed stack frame pointer, if error is a * thread stack corruption, otherwise return 0. */ uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp) { #if defined(CONFIG_MULTITHREADING) const struct k_thread *thread = _current; if (thread == NULL) { return 0; } #endif #if (defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)) && \ defined(CONFIG_MPU_STACK_GUARD) uint32_t guard_len = ((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ? MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE; #else /* If MPU_STACK_GUARD is not enabled, the guard length is * effectively zero. Stack overflows may be detected only * for user threads in nPRIV mode. */ uint32_t guard_len = MPU_GUARD_ALIGN_AND_SIZE; #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */ #if defined(CONFIG_USERSPACE) if (thread->arch.priv_stack_start) { /* User thread */ if (z_arm_thread_is_in_user_mode() == false) { /* User thread in privilege mode */ if (IS_MPU_GUARD_VIOLATION( thread->arch.priv_stack_start - guard_len, guard_len, fault_addr, psp)) { /* Thread's privilege stack corruption */ return thread->arch.priv_stack_start; } } else { if (psp < (uint32_t)thread->stack_obj) { /* Thread's user stack corruption */ return (uint32_t)thread->stack_obj; } } } else { /* Supervisor thread */ if (IS_MPU_GUARD_VIOLATION(thread->stack_info.start - guard_len, guard_len, fault_addr, psp)) { /* Supervisor thread stack corruption */ return thread->stack_info.start; } } #else /* CONFIG_USERSPACE */ #if defined(CONFIG_MULTITHREADING) if (IS_MPU_GUARD_VIOLATION(thread->stack_info.start - guard_len, guard_len, fault_addr, psp)) { /* Thread stack corruption */ return thread->stack_info.start; } #else if (IS_MPU_GUARD_VIOLATION((uint32_t)z_main_stack, guard_len, fault_addr, psp)) { /* Thread stack corruption */ return (uint32_t)K_THREAD_STACK_BUFFER(z_main_stack); } #endif #endif /* CONFIG_USERSPACE */ return 0; } #endif /* CONFIG_MPU_STACK_GUARD || CONFIG_USERSPACE */ #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) int arch_float_disable(struct k_thread *thread) { if (thread != _current) { return -EINVAL; } if (arch_is_in_isr()) { return -EINVAL; } /* Disable all floating point capabilities for the thread */ /* K_FP_REG flag is used in SWAP and stack check fail. Locking * interrupts here prevents a possible context-switch or MPU * fault to take an outdated thread user_options flag into * account. */ int key = arch_irq_lock(); thread->base.user_options &= ~K_FP_REGS; __set_FPEXC(0); /* No need to add an ISB barrier after setting the CONTROL * register; arch_irq_unlock() already adds one. */ arch_irq_unlock(key); return 0; } int arch_float_enable(struct k_thread *thread, unsigned int options) { /* This is not supported in Cortex-A and Cortex-R */ return -ENOTSUP; } #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */ ```
/content/code_sandbox/arch/arm/core/cortex_a_r/thread.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,343
```c /* * ARMv7 MMU support * * This implementation supports the Short-descriptor translation * table format. The standard page size is 4 kB, 1 MB sections * are only used for mapping the code and data of the Zephyr image. * Secure mode and PL1 is always assumed. LPAE and PXN extensions * as well as TEX remapping are not supported. The AP[2:1] plus * Access flag permissions model is used, as the AP[2:0] model is * deprecated. As the AP[2:1] model can only disable write access, * the read permission flag is always implied. * * Reference documentation: * ARM Architecture Reference Manual, ARMv7-A and ARMv7-R edition, * ARM document ID DDI0406C Rev. d, March 2018 * */ #include <zephyr/device.h> #include <zephyr/init.h> #include <zephyr/kernel.h> #include <zephyr/linker/linker-defs.h> #include <zephyr/logging/log.h> #include <zephyr/sys/__assert.h> #include <zephyr/sys/util.h> #include <zephyr/kernel/mm.h> #include <zephyr/sys/barrier.h> #include <cmsis_core.h> #include <zephyr/arch/arm/mmu/arm_mmu.h> #include "arm_mmu_priv.h" LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); /* Level 1 page table: always required, must be 16k-aligned */ static struct arm_mmu_l1_page_table l1_page_table __aligned(KB(16)) = {0}; /* * Array of level 2 page tables with 4k granularity: * each table covers a range of 1 MB, the number of L2 tables * is configurable. */ static struct arm_mmu_l2_page_table l2_page_tables[CONFIG_ARM_MMU_NUM_L2_TABLES] __aligned(KB(1)) = {0}; /* * For each level 2 page table, a separate dataset tracks * if the respective table is in use, if so, to which 1 MB * virtual address range it is assigned, and how many entries, * each mapping a 4 kB page, it currently contains. */ static struct arm_mmu_l2_page_table_status l2_page_tables_status[CONFIG_ARM_MMU_NUM_L2_TABLES] = {0}; /* Available L2 tables count & next free index for an L2 table request */ static uint32_t arm_mmu_l2_tables_free = CONFIG_ARM_MMU_NUM_L2_TABLES; static uint32_t arm_mmu_l2_next_free_table; /* * Static definition of all code & data memory regions of the * current Zephyr image. This information must be available & * processed upon MMU initialization. */ static const struct arm_mmu_flat_range mmu_zephyr_ranges[] = { /* * Mark the zephyr execution regions (data, bss, noinit, etc.) * cacheable, read / write and non-executable */ { .name = "zephyr_data", .start = (uint32_t)_image_ram_start, .end = (uint32_t)_image_ram_end, .attrs = MT_NORMAL | MATTR_SHARED | MPERM_R | MPERM_W | MATTR_CACHE_OUTER_WB_WA | MATTR_CACHE_INNER_WB_WA}, /* Mark text segment cacheable, read only and executable */ { .name = "zephyr_code", .start = (uint32_t)__text_region_start, .end = (uint32_t)__text_region_end, .attrs = MT_NORMAL | MATTR_SHARED | /* The code needs to have write permission in order for * software breakpoints (which modify instructions) to work */ #if defined(CONFIG_GDBSTUB) MPERM_R | MPERM_X | MPERM_W | #else MPERM_R | MPERM_X | #endif MATTR_CACHE_OUTER_WB_nWA | MATTR_CACHE_INNER_WB_nWA | MATTR_MAY_MAP_L1_SECTION}, /* Mark rodata segment cacheable, read only and non-executable */ { .name = "zephyr_rodata", .start = (uint32_t)__rodata_region_start, .end = (uint32_t)__rodata_region_end, .attrs = MT_NORMAL | MATTR_SHARED | MPERM_R | MATTR_CACHE_OUTER_WB_nWA | MATTR_CACHE_INNER_WB_nWA | MATTR_MAY_MAP_L1_SECTION}, #ifdef CONFIG_NOCACHE_MEMORY /* Mark nocache segment read / write and non-executable */ { .name = "nocache", .start = (uint32_t)_nocache_ram_start, .end = (uint32_t)_nocache_ram_end, .attrs = MT_STRONGLY_ORDERED | MPERM_R | MPERM_W}, #endif }; static void arm_mmu_l2_map_page(uint32_t va, uint32_t pa, struct arm_mmu_perms_attrs perms_attrs); /** * @brief Invalidates the TLB * Helper function which invalidates the entire TLB. This action * is performed whenever the MMU is (re-)enabled or changes to the * page tables are made at run-time, as the TLB might contain entries * which are no longer valid once the changes are applied. */ static void invalidate_tlb_all(void) { __set_TLBIALL(0); /* 0 = opc2 = invalidate entire TLB */ barrier_dsync_fence_full(); barrier_isync_fence_full(); } /** * @brief Returns a free level 2 page table * Initializes and returns the next free L2 page table whenever * a page is to be mapped in a 1 MB virtual address range that * is not yet covered by a level 2 page table. * * @param va 32-bit virtual address to be mapped. * @retval pointer to the L2 table now assigned to the 1 MB * address range the target virtual address is in. */ static struct arm_mmu_l2_page_table *arm_mmu_assign_l2_table(uint32_t va) { struct arm_mmu_l2_page_table *l2_page_table; __ASSERT(arm_mmu_l2_tables_free > 0, "Cannot set up L2 page table for VA 0x%08X: " "no more free L2 page tables available\n", va); __ASSERT(l2_page_tables_status[arm_mmu_l2_next_free_table].entries == 0, "Cannot set up L2 page table for VA 0x%08X: " "expected empty L2 table at index [%u], but the " "entries value is %u\n", va, arm_mmu_l2_next_free_table, l2_page_tables_status[arm_mmu_l2_next_free_table].entries); /* * Store in the status dataset of the L2 table to be returned * which 1 MB virtual address range it is being assigned to. * Set the current page table entry count to 0. */ l2_page_tables_status[arm_mmu_l2_next_free_table].l1_index = ((va >> ARM_MMU_PTE_L1_INDEX_PA_SHIFT) & ARM_MMU_PTE_L1_INDEX_MASK); l2_page_tables_status[arm_mmu_l2_next_free_table].entries = 0; l2_page_table = &l2_page_tables[arm_mmu_l2_next_free_table]; /* * Decrement the available L2 page table count. As long as at * least one more L2 table is available afterwards, update the * L2 next free table index. If we're about to return the last * available L2 table, calculating a next free table index is * impossible. */ --arm_mmu_l2_tables_free; if (arm_mmu_l2_tables_free > 0) { do { arm_mmu_l2_next_free_table = (arm_mmu_l2_next_free_table + 1) % CONFIG_ARM_MMU_NUM_L2_TABLES; } while (l2_page_tables_status[arm_mmu_l2_next_free_table].entries != 0); } return l2_page_table; } /** * @brief Releases a level 2 page table * Releases a level 2 page table, marking it as no longer in use. * From that point on, it can be re-used for mappings in another * 1 MB virtual address range. This function is called whenever * it is determined during an unmap call at run-time that the page * table entry count in the respective page table has reached 0. * * @param l2_page_table Pointer to L2 page table to be released. */ static void arm_mmu_release_l2_table(struct arm_mmu_l2_page_table *l2_page_table) { uint32_t l2_page_table_index = ARM_MMU_L2_PT_INDEX(l2_page_table); l2_page_tables_status[l2_page_table_index].l1_index = 0; if (arm_mmu_l2_tables_free == 0) { arm_mmu_l2_next_free_table = l2_page_table_index; } ++arm_mmu_l2_tables_free; } /** * @brief Increments the page table entry counter of a L2 page table * Increments the page table entry counter of a level 2 page table. * Contains a check to ensure that no attempts are made to set up * more page table entries than the table can hold. * * @param l2_page_table Pointer to the L2 page table whose entry * counter shall be incremented. */ static void arm_mmu_inc_l2_table_entries(struct arm_mmu_l2_page_table *l2_page_table) { uint32_t l2_page_table_index = ARM_MMU_L2_PT_INDEX(l2_page_table); __ASSERT(l2_page_tables_status[l2_page_table_index].entries < ARM_MMU_PT_L2_NUM_ENTRIES, "Cannot increment entry count of the L2 page table at index " "[%u] / addr %p / ref L1[%u]: maximum entry count already reached", l2_page_table_index, l2_page_table, l2_page_tables_status[l2_page_table_index].l1_index); ++l2_page_tables_status[l2_page_table_index].entries; } /** * @brief Decrements the page table entry counter of a L2 page table * Decrements the page table entry counter of a level 2 page table. * Contains a check to ensure that no attempts are made to remove * entries from the respective table that aren't actually there. * * @param l2_page_table Pointer to the L2 page table whose entry * counter shall be decremented. */ static void arm_mmu_dec_l2_table_entries(struct arm_mmu_l2_page_table *l2_page_table) { uint32_t l2_page_table_index = ARM_MMU_L2_PT_INDEX(l2_page_table); __ASSERT(l2_page_tables_status[l2_page_table_index].entries > 0, "Cannot decrement entry count of the L2 page table at index " "[%u] / addr %p / ref L1[%u]: entry count is already zero", l2_page_table_index, l2_page_table, l2_page_tables_status[l2_page_table_index].l1_index); if (--l2_page_tables_status[l2_page_table_index].entries == 0) { arm_mmu_release_l2_table(l2_page_table); } } /** * @brief Converts memory attributes and permissions to MMU format * Converts memory attributes and permissions as used in the boot- * time memory mapping configuration data array (MT_..., MATTR_..., * MPERM_...) to the equivalent bit (field) values used in the MMU's * L1 and L2 page table entries. Contains plausibility checks. * * @param attrs type/attribute/permissions flags word obtained from * an entry of the mmu_config mapping data array. * @retval A struct containing the information from the input flags * word converted to the bits / bit fields used in L1 and * L2 page table entries. */ static struct arm_mmu_perms_attrs arm_mmu_convert_attr_flags(uint32_t attrs) { struct arm_mmu_perms_attrs perms_attrs = {0}; __ASSERT(((attrs & MT_MASK) > 0), "Cannot convert attrs word to PTE control bits: no " "memory type specified"); __ASSERT(!((attrs & MPERM_W) && !(attrs & MPERM_R)), "attrs must not define write permission without read " "permission"); __ASSERT(!((attrs & MPERM_W) && (attrs & MPERM_X)), "attrs must not define executable memory with write " "permission"); /* * The translation of the memory type / permissions / attributes * flags in the attrs word to the TEX, C, B, S and AP bits of the * target PTE is based on the reference manual: * TEX, C, B, S: Table B3-10, chap. B3.8.2, p. B3-1363f. * AP : Table B3-6, chap. B3.7.1, p. B3-1353. * Device / strongly ordered memory is always assigned to a domain * other than that used for normal memory. Assuming that userspace * support utilizing the MMU is eventually implemented, a single * modification of the DACR register when entering/leaving unprivi- * leged mode could be used in order to enable/disable all device * memory access without having to modify any PTs/PTEs. */ if (attrs & MT_STRONGLY_ORDERED) { /* Strongly ordered is always shareable, S bit is ignored */ perms_attrs.tex = 0; perms_attrs.cacheable = 0; perms_attrs.bufferable = 0; perms_attrs.shared = 0; perms_attrs.domain = ARM_MMU_DOMAIN_DEVICE; } else if (attrs & MT_DEVICE) { /* * Shareability of device memory is determined by TEX, C, B. * The S bit is ignored. C is always 0 for device memory. */ perms_attrs.shared = 0; perms_attrs.cacheable = 0; perms_attrs.domain = ARM_MMU_DOMAIN_DEVICE; if (attrs & MATTR_SHARED) { perms_attrs.tex = 0; perms_attrs.bufferable = 1; } else { perms_attrs.tex = 2; perms_attrs.bufferable = 0; } } else if (attrs & MT_NORMAL) { /* * TEX[2] is always 1. TEX[1:0] contain the outer cache attri- * butes encoding, C and B contain the inner cache attributes * encoding. */ perms_attrs.tex |= ARM_MMU_TEX2_CACHEABLE_MEMORY; perms_attrs.domain = ARM_MMU_DOMAIN_OS; /* For normal memory, shareability depends on the S bit */ if (attrs & MATTR_SHARED) { perms_attrs.shared = 1; } if (attrs & MATTR_CACHE_OUTER_WB_WA) { perms_attrs.tex |= ARM_MMU_TEX_CACHE_ATTRS_WB_WA; } else if (attrs & MATTR_CACHE_OUTER_WT_nWA) { perms_attrs.tex |= ARM_MMU_TEX_CACHE_ATTRS_WT_nWA; } else if (attrs & MATTR_CACHE_OUTER_WB_nWA) { perms_attrs.tex |= ARM_MMU_TEX_CACHE_ATTRS_WB_nWA; } if (attrs & MATTR_CACHE_INNER_WB_WA) { perms_attrs.cacheable = ARM_MMU_C_CACHE_ATTRS_WB_WA; perms_attrs.bufferable = ARM_MMU_B_CACHE_ATTRS_WB_WA; } else if (attrs & MATTR_CACHE_INNER_WT_nWA) { perms_attrs.cacheable = ARM_MMU_C_CACHE_ATTRS_WT_nWA; perms_attrs.bufferable = ARM_MMU_B_CACHE_ATTRS_WT_nWA; } else if (attrs & MATTR_CACHE_INNER_WB_nWA) { perms_attrs.cacheable = ARM_MMU_C_CACHE_ATTRS_WB_nWA; perms_attrs.bufferable = ARM_MMU_B_CACHE_ATTRS_WB_nWA; } } if (attrs & MATTR_NON_SECURE) { perms_attrs.non_sec = 1; } if (attrs & MATTR_NON_GLOBAL) { perms_attrs.not_global = 1; } /* * Up next is the consideration of the case that a PTE shall be configured * for a page that shall not be accessible at all (e.g. guard pages), and * therefore has neither read nor write permissions. In the AP[2:1] access * permission specification model, the only way to indicate this is to * actually mask out the PTE's identifier bits, as otherwise, read permission * is always granted for any valid PTE, it can't be revoked explicitly, * unlike the write permission. */ if (!((attrs & MPERM_R) || (attrs & MPERM_W))) { perms_attrs.id_mask = 0x0; } else { perms_attrs.id_mask = 0x3; } if (!(attrs & MPERM_W)) { perms_attrs.acc_perms |= ARM_MMU_PERMS_AP2_DISABLE_WR; } if (attrs & MPERM_UNPRIVILEGED) { perms_attrs.acc_perms |= ARM_MMU_PERMS_AP1_ENABLE_PL0; } if (!(attrs & MPERM_X)) { perms_attrs.exec_never = 1; } return perms_attrs; } /** * @brief Maps a 1 MB memory range via a level 1 page table entry * Maps a 1 MB memory range using a level 1 page table entry of type * 'section'. This type of entry saves a level 2 page table, but has * two pre-conditions: the memory area to be mapped must contain at * least 1 MB of contiguous memory, starting at an address with suit- * able alignment. This mapping method should only be used for map- * pings for which it is unlikely that the attributes of those mappings * will mappings will change at run-time (e.g. code sections will al- * ways be read-only and executable). Should the case occur that the * permissions or attributes of a subset of a 1 MB section entry shall * be re-configured at run-time, a L1 section entry will be broken * down into 4k segments using a L2 table with identical attributes * before any modifications are performed for the subset of the affec- * ted 1 MB range. This comes with an undeterministic performance * penalty at the time of re-configuration, therefore, any mappings * for which L1 section entries are a valid option, shall be marked in * their declaration with the MATTR_MAY_MAP_L1_SECTION flag. * * @param va 32-bit target virtual address to be mapped. * @param pa 32-bit physical address to be mapped. * @param perms_attrs Permission and attribute bits in the format * used in the MMU's L1 page table entries. */ static void arm_mmu_l1_map_section(uint32_t va, uint32_t pa, struct arm_mmu_perms_attrs perms_attrs) { uint32_t l1_index = (va >> ARM_MMU_PTE_L1_INDEX_PA_SHIFT) & ARM_MMU_PTE_L1_INDEX_MASK; __ASSERT(l1_page_table.entries[l1_index].undefined.id == ARM_MMU_PTE_ID_INVALID, "Unexpected non-zero L1 PTE ID %u for VA 0x%08X / PA 0x%08X", l1_page_table.entries[l1_index].undefined.id, va, pa); l1_page_table.entries[l1_index].l1_section_1m.id = (ARM_MMU_PTE_ID_SECTION & perms_attrs.id_mask); l1_page_table.entries[l1_index].l1_section_1m.bufferable = perms_attrs.bufferable; l1_page_table.entries[l1_index].l1_section_1m.cacheable = perms_attrs.cacheable; l1_page_table.entries[l1_index].l1_section_1m.exec_never = perms_attrs.exec_never; l1_page_table.entries[l1_index].l1_section_1m.domain = perms_attrs.domain; l1_page_table.entries[l1_index].l1_section_1m.impl_def = 0; l1_page_table.entries[l1_index].l1_section_1m.acc_perms10 = ((perms_attrs.acc_perms & 0x1) << 1) | 0x1; l1_page_table.entries[l1_index].l1_section_1m.tex = perms_attrs.tex; l1_page_table.entries[l1_index].l1_section_1m.acc_perms2 = (perms_attrs.acc_perms >> 1) & 0x1; l1_page_table.entries[l1_index].l1_section_1m.shared = perms_attrs.shared; l1_page_table.entries[l1_index].l1_section_1m.not_global = perms_attrs.not_global; l1_page_table.entries[l1_index].l1_section_1m.zero = 0; l1_page_table.entries[l1_index].l1_section_1m.non_sec = perms_attrs.non_sec; l1_page_table.entries[l1_index].l1_section_1m.base_address = (pa >> ARM_MMU_PTE_L1_INDEX_PA_SHIFT); } /** * @brief Converts a L1 1 MB section mapping to a full L2 table * When this function is called, something has happened that shouldn't * happen for the sake of run-time performance and determinism: the * attributes and/or permissions of a subset of a 1 MB memory range * currently represented by a level 1 page table entry of type 'section' * shall be modified so that they differ from the rest of the 1 MB * range's attributes/permissions. Therefore, the single L1 page table * entry has to be broken down to the full 256 4k-wide entries of a * L2 page table with identical properties so that afterwards, the * modification of the subset can be performed with a 4k granularity. * The risk at this point is that all L2 tables are already in use, * which will result in an assertion failure in the first contained * #arm_mmu_l2_map_page() call. * @warning While the conversion is being performed, interrupts are * locked globally and the MMU is disabled (the required * Zephyr code & data are still accessible in this state as * those are identity mapped). Expect non-deterministic be- * haviour / interrupt latencies while the conversion is in * progress! * * @param va 32-bit virtual address within the 1 MB range that shall * be converted from L1 1 MB section mapping to L2 4 kB page * mappings. * @param l2_page_table Pointer to an empty L2 page table allocated * for the purpose of replacing the L1 section * mapping. */ static void arm_mmu_remap_l1_section_to_l2_table(uint32_t va, struct arm_mmu_l2_page_table *l2_page_table) { struct arm_mmu_perms_attrs perms_attrs = {0}; uint32_t l1_index = (va >> ARM_MMU_PTE_L1_INDEX_PA_SHIFT) & ARM_MMU_PTE_L1_INDEX_MASK; uint32_t rem_size = MB(1); uint32_t reg_val; int lock_key; /* * Extract the permissions and attributes from the current 1 MB section entry. * This data will be carried over to the resulting L2 page table. */ perms_attrs.acc_perms = (l1_page_table.entries[l1_index].l1_section_1m.acc_perms2 << 1) | ((l1_page_table.entries[l1_index].l1_section_1m.acc_perms10 >> 1) & 0x1); perms_attrs.bufferable = l1_page_table.entries[l1_index].l1_section_1m.bufferable; perms_attrs.cacheable = l1_page_table.entries[l1_index].l1_section_1m.cacheable; perms_attrs.domain = l1_page_table.entries[l1_index].l1_section_1m.domain; perms_attrs.id_mask = (l1_page_table.entries[l1_index].l1_section_1m.id == ARM_MMU_PTE_ID_INVALID) ? 0x0 : 0x3; perms_attrs.not_global = l1_page_table.entries[l1_index].l1_section_1m.not_global; perms_attrs.non_sec = l1_page_table.entries[l1_index].l1_section_1m.non_sec; perms_attrs.shared = l1_page_table.entries[l1_index].l1_section_1m.shared; perms_attrs.tex = l1_page_table.entries[l1_index].l1_section_1m.tex; perms_attrs.exec_never = l1_page_table.entries[l1_index].l1_section_1m.exec_never; /* * Disable interrupts - no interrupts shall occur before the L2 table has * been set up in place of the former L1 section entry. */ lock_key = arch_irq_lock(); /* * Disable the MMU. The L1 PTE array and the L2 PT array may actually be * covered by the L1 PTE we're about to replace, so access to this data * must remain functional during the entire remap process. Yet, the only * memory areas for which L1 1 MB section entries are even considered are * those belonging to the Zephyr image. Those areas are *always* identity * mapped, so the MMU can be turned off and the relevant data will still * be available. */ reg_val = __get_SCTLR(); __set_SCTLR(reg_val & (~ARM_MMU_SCTLR_MMU_ENABLE_BIT)); /* * Clear the entire L1 PTE & re-configure it as a L2 PT reference * -> already sets the correct values for: zero0, zero1, impl_def. */ l1_page_table.entries[l1_index].word = 0; l1_page_table.entries[l1_index].l2_page_table_ref.id = ARM_MMU_PTE_ID_L2_PT; l1_page_table.entries[l1_index].l2_page_table_ref.domain = perms_attrs.domain; l1_page_table.entries[l1_index].l2_page_table_ref.non_sec = perms_attrs.non_sec; l1_page_table.entries[l1_index].l2_page_table_ref.l2_page_table_address = (((uint32_t)l2_page_table >> ARM_MMU_PT_L2_ADDR_SHIFT) & ARM_MMU_PT_L2_ADDR_MASK); /* Align the target VA to the base address of the section we're converting */ va &= ~(MB(1) - 1); while (rem_size > 0) { arm_mmu_l2_map_page(va, va, perms_attrs); rem_size -= KB(4); va += KB(4); } /* Remap complete, re-enable the MMU, unlock the interrupts. */ invalidate_tlb_all(); __set_SCTLR(reg_val); arch_irq_unlock(lock_key); } /** * @brief Maps a 4 kB memory page using a L2 page table entry * Maps a single 4 kB page of memory from the specified physical * address to the specified virtual address, using the provided * attributes and permissions which have already been converted * from the system's format provided to arch_mem_map() to the * bits / bit masks used in the L2 page table entry. * * @param va 32-bit target virtual address. * @param pa 32-bit physical address. * @param perms_attrs Permission and attribute bits in the format * used in the MMU's L2 page table entries. */ static void arm_mmu_l2_map_page(uint32_t va, uint32_t pa, struct arm_mmu_perms_attrs perms_attrs) { struct arm_mmu_l2_page_table *l2_page_table = NULL; uint32_t l1_index = (va >> ARM_MMU_PTE_L1_INDEX_PA_SHIFT) & ARM_MMU_PTE_L1_INDEX_MASK; uint32_t l2_index = (va >> ARM_MMU_PTE_L2_INDEX_PA_SHIFT) & ARM_MMU_PTE_L2_INDEX_MASK; /* * Use the calculated L1 index in order to determine if a L2 page * table is required in order to complete the current mapping. * -> See below for an explanation of the possible scenarios. */ if (l1_page_table.entries[l1_index].undefined.id == ARM_MMU_PTE_ID_INVALID || (l1_page_table.entries[l1_index].undefined.id & ARM_MMU_PTE_ID_SECTION) != 0) { l2_page_table = arm_mmu_assign_l2_table(pa); __ASSERT(l2_page_table != NULL, "Unexpected L2 page table NULL pointer for VA 0x%08X", va); } /* * Check what is currently present at the corresponding L1 table entry. * The following scenarios are possible: * 1) The L1 PTE's ID bits are zero, as is the rest of the entry. * In this case, the L1 PTE is currently unused. A new L2 PT to * refer to in this entry has already been allocated above. * 2) The L1 PTE's ID bits indicate a L2 PT reference entry (01). * The corresponding L2 PT's address will be resolved using this * entry. * 3) The L1 PTE's ID bits may or may not be zero, and the rest of * the descriptor contains some non-zero data. This always indicates * an existing 1 MB section entry in this place. Checking only the * ID bits wouldn't be enough, as the only way to indicate a section * with neither R nor W permissions is to set the ID bits to 00 in * the AP[2:1] permissions model. As we're now about to map a single * page overlapping with the 1 MB section, the section has to be * converted into a L2 table. Afterwards, the current page mapping * can be added/modified. */ if (l1_page_table.entries[l1_index].word == 0) { /* The matching L1 PT entry is currently unused */ l1_page_table.entries[l1_index].l2_page_table_ref.id = ARM_MMU_PTE_ID_L2_PT; l1_page_table.entries[l1_index].l2_page_table_ref.zero0 = 0; l1_page_table.entries[l1_index].l2_page_table_ref.zero1 = 0; l1_page_table.entries[l1_index].l2_page_table_ref.impl_def = 0; l1_page_table.entries[l1_index].l2_page_table_ref.domain = 0; /* TODO */ l1_page_table.entries[l1_index].l2_page_table_ref.non_sec = perms_attrs.non_sec; l1_page_table.entries[l1_index].l2_page_table_ref.l2_page_table_address = (((uint32_t)l2_page_table >> ARM_MMU_PT_L2_ADDR_SHIFT) & ARM_MMU_PT_L2_ADDR_MASK); } else if (l1_page_table.entries[l1_index].undefined.id == ARM_MMU_PTE_ID_L2_PT) { /* The matching L1 PT entry already points to a L2 PT */ l2_page_table = (struct arm_mmu_l2_page_table *) ((l1_page_table.entries[l1_index].word & (ARM_MMU_PT_L2_ADDR_MASK << ARM_MMU_PT_L2_ADDR_SHIFT))); /* * The only configuration bit contained in the L2 PT entry is the * NS bit. Set it according to the attributes passed to this function, * warn if there is a mismatch between the current page's NS attribute * value and the value currently contained in the L2 PT entry. */ if (l1_page_table.entries[l1_index].l2_page_table_ref.non_sec != perms_attrs.non_sec) { LOG_WRN("NS bit mismatch in L2 PT reference at L1 index [%u], " "re-configuring from %u to %u", l1_index, l1_page_table.entries[l1_index].l2_page_table_ref.non_sec, perms_attrs.non_sec); l1_page_table.entries[l1_index].l2_page_table_ref.non_sec = perms_attrs.non_sec; } } else if (l1_page_table.entries[l1_index].undefined.reserved != 0) { /* * The matching L1 PT entry currently holds a 1 MB section entry * in order to save a L2 table (as it's neither completely blank * nor a L2 PT reference), but now we have to map an overlapping * 4 kB page, so the section entry must be converted to a L2 table * first before the individual L2 entry for the page to be mapped is * accessed. A blank L2 PT has already been assigned above. */ arm_mmu_remap_l1_section_to_l2_table(va, l2_page_table); } /* * If the matching L2 PTE is blank, increment the number of used entries * in the L2 table. If the L2 PTE already contains some data, we're re- * placing the entry's data instead, the used entry count remains unchanged. * Once again, checking the ID bits might be misleading if the PTE declares * a page which has neither R nor W permissions. */ if (l2_page_table->entries[l2_index].word == 0) { arm_mmu_inc_l2_table_entries(l2_page_table); } l2_page_table->entries[l2_index].l2_page_4k.id = (ARM_MMU_PTE_ID_SMALL_PAGE & perms_attrs.id_mask); l2_page_table->entries[l2_index].l2_page_4k.id |= perms_attrs.exec_never; /* XN in [0] */ l2_page_table->entries[l2_index].l2_page_4k.bufferable = perms_attrs.bufferable; l2_page_table->entries[l2_index].l2_page_4k.cacheable = perms_attrs.cacheable; l2_page_table->entries[l2_index].l2_page_4k.acc_perms10 = ((perms_attrs.acc_perms & 0x1) << 1) | 0x1; l2_page_table->entries[l2_index].l2_page_4k.tex = perms_attrs.tex; l2_page_table->entries[l2_index].l2_page_4k.acc_perms2 = ((perms_attrs.acc_perms >> 1) & 0x1); l2_page_table->entries[l2_index].l2_page_4k.shared = perms_attrs.shared; l2_page_table->entries[l2_index].l2_page_4k.not_global = perms_attrs.not_global; l2_page_table->entries[l2_index].l2_page_4k.pa_base = ((pa >> ARM_MMU_PTE_L2_SMALL_PAGE_ADDR_SHIFT) & ARM_MMU_PTE_L2_SMALL_PAGE_ADDR_MASK); } /** * @brief Unmaps a 4 kB memory page by clearing its L2 page table entry * Unmaps a single 4 kB page of memory from the specified virtual * address by clearing its respective L2 page table entry. * * @param va 32-bit virtual address to be unmapped. */ static void arm_mmu_l2_unmap_page(uint32_t va) { struct arm_mmu_l2_page_table *l2_page_table; uint32_t l1_index = (va >> ARM_MMU_PTE_L1_INDEX_PA_SHIFT) & ARM_MMU_PTE_L1_INDEX_MASK; uint32_t l2_index = (va >> ARM_MMU_PTE_L2_INDEX_PA_SHIFT) & ARM_MMU_PTE_L2_INDEX_MASK; if (l1_page_table.entries[l1_index].undefined.id != ARM_MMU_PTE_ID_L2_PT) { /* * No L2 PT currently exists for the given VA - this should be * tolerated without an error, just as in the case that while * a L2 PT exists, the corresponding PTE is blank - see explanation * below, the same applies here. */ return; } l2_page_table = (struct arm_mmu_l2_page_table *) ((l1_page_table.entries[l1_index].word & (ARM_MMU_PT_L2_ADDR_MASK << ARM_MMU_PT_L2_ADDR_SHIFT))); if (l2_page_table->entries[l2_index].word == 0) { /* * We're supposed to unmap a page at the given VA, but there currently * isn't anything mapped at this address, the L2 PTE is blank. * -> This is normal if a memory area is being mapped via k_mem_map, * which contains two calls to arch_mem_unmap (which effectively end up * here) in order to unmap the leading and trailing guard pages. * Therefore, it has to be expected that unmap calls are made for unmapped * memory which hasn't been in use before. * -> Just return, don't decrement the entry counter of the corresponding * L2 page table, as we're not actually clearing any PTEs. */ return; } if ((l2_page_table->entries[l2_index].undefined.id & ARM_MMU_PTE_ID_SMALL_PAGE) != ARM_MMU_PTE_ID_SMALL_PAGE) { LOG_ERR("Cannot unmap virtual memory at 0x%08X: invalid " "page table entry type in level 2 page table at " "L1 index [%u], L2 index [%u]", va, l1_index, l2_index); return; } l2_page_table->entries[l2_index].word = 0; arm_mmu_dec_l2_table_entries(l2_page_table); } /** * @brief MMU boot-time initialization function * Initializes the MMU at boot time. Sets up the page tables and * applies any specified memory mappings for either the different * sections of the Zephyr binary image, or for device memory as * specified at the SoC level. * * @retval Always 0, errors are handled by assertions. */ int z_arm_mmu_init(void) { uint32_t mem_range; uint32_t pa; uint32_t va; uint32_t attrs; uint32_t pt_attrs = 0; uint32_t rem_size; uint32_t reg_val = 0; struct arm_mmu_perms_attrs perms_attrs; __ASSERT(KB(4) == CONFIG_MMU_PAGE_SIZE, "MMU_PAGE_SIZE value %u is invalid, only 4 kB pages are supported\n", CONFIG_MMU_PAGE_SIZE); /* Set up the memory regions pre-defined by the image */ for (mem_range = 0; mem_range < ARRAY_SIZE(mmu_zephyr_ranges); mem_range++) { pa = mmu_zephyr_ranges[mem_range].start; rem_size = mmu_zephyr_ranges[mem_range].end - pa; attrs = mmu_zephyr_ranges[mem_range].attrs; perms_attrs = arm_mmu_convert_attr_flags(attrs); /* * Check if the L1 page table is within the region currently * being mapped. If so, store the permissions and attributes * of the current section. This information is required when * writing to the TTBR0 register. */ if (((uint32_t)&l1_page_table >= pa) && ((uint32_t)&l1_page_table < (pa + rem_size))) { pt_attrs = attrs; } while (rem_size > 0) { if (rem_size >= MB(1) && (pa & 0xFFFFF) == 0 && (attrs & MATTR_MAY_MAP_L1_SECTION)) { /* * Remaining area size > 1 MB & matching alignment * -> map a 1 MB section instead of individual 4 kB * pages with identical configuration. */ arm_mmu_l1_map_section(pa, pa, perms_attrs); rem_size -= MB(1); pa += MB(1); } else { arm_mmu_l2_map_page(pa, pa, perms_attrs); rem_size -= (rem_size >= KB(4)) ? KB(4) : rem_size; pa += KB(4); } } } /* Set up the memory regions defined at the SoC level */ for (mem_range = 0; mem_range < mmu_config.num_regions; mem_range++) { pa = (uint32_t)(mmu_config.mmu_regions[mem_range].base_pa); va = (uint32_t)(mmu_config.mmu_regions[mem_range].base_va); rem_size = (uint32_t)(mmu_config.mmu_regions[mem_range].size); attrs = mmu_config.mmu_regions[mem_range].attrs; perms_attrs = arm_mmu_convert_attr_flags(attrs); while (rem_size > 0) { arm_mmu_l2_map_page(va, pa, perms_attrs); rem_size -= (rem_size >= KB(4)) ? KB(4) : rem_size; va += KB(4); pa += KB(4); } } /* Clear TTBR1 */ __asm__ __volatile__("mcr p15, 0, %0, c2, c0, 1" : : "r"(reg_val)); /* Write TTBCR: EAE, security not yet relevant, N[2:0] = 0 */ __asm__ __volatile__("mcr p15, 0, %0, c2, c0, 2" : : "r"(reg_val)); /* Write TTBR0 */ reg_val = ((uint32_t)&l1_page_table.entries[0] & ~0x3FFF); /* * Set IRGN, RGN, S in TTBR0 based on the configuration of the * memory area the actual page tables are located in. */ if (pt_attrs & MATTR_SHARED) { reg_val |= ARM_MMU_TTBR_SHAREABLE_BIT; } if (pt_attrs & MATTR_CACHE_OUTER_WB_WA) { reg_val |= (ARM_MMU_TTBR_RGN_OUTER_WB_WA_CACHEABLE << ARM_MMU_TTBR_RGN_SHIFT); } else if (pt_attrs & MATTR_CACHE_OUTER_WT_nWA) { reg_val |= (ARM_MMU_TTBR_RGN_OUTER_WT_CACHEABLE << ARM_MMU_TTBR_RGN_SHIFT); } else if (pt_attrs & MATTR_CACHE_OUTER_WB_nWA) { reg_val |= (ARM_MMU_TTBR_RGN_OUTER_WB_nWA_CACHEABLE << ARM_MMU_TTBR_RGN_SHIFT); } if (pt_attrs & MATTR_CACHE_INNER_WB_WA) { reg_val |= ARM_MMU_TTBR_IRGN0_BIT_MP_EXT_ONLY; } else if (pt_attrs & MATTR_CACHE_INNER_WT_nWA) { reg_val |= ARM_MMU_TTBR_IRGN1_BIT_MP_EXT_ONLY; } else if (pt_attrs & MATTR_CACHE_INNER_WB_nWA) { reg_val |= ARM_MMU_TTBR_IRGN0_BIT_MP_EXT_ONLY; reg_val |= ARM_MMU_TTBR_IRGN1_BIT_MP_EXT_ONLY; } __set_TTBR0(reg_val); /* Write DACR -> all domains to client = 01b. */ reg_val = ARM_MMU_DACR_ALL_DOMAINS_CLIENT; __set_DACR(reg_val); invalidate_tlb_all(); /* Enable the MMU and Cache in SCTLR */ reg_val = __get_SCTLR(); reg_val |= ARM_MMU_SCTLR_AFE_BIT; reg_val |= ARM_MMU_SCTLR_ICACHE_ENABLE_BIT; reg_val |= ARM_MMU_SCTLR_DCACHE_ENABLE_BIT; reg_val |= ARM_MMU_SCTLR_MMU_ENABLE_BIT; __set_SCTLR(reg_val); return 0; } /** * @brief ARMv7-specific implementation of memory mapping at run-time * Maps memory according to the parameters provided by the caller * at run-time. * * @param virt 32-bit target virtual address. * @param phys 32-bit physical address. * @param size Size (in bytes) of the memory area to map. * @param flags Memory attributes & permissions. Comp. K_MEM_... * flags in kernel/mm.h. * @retval 0 on success, -EINVAL if an invalid parameter is detected. */ static int __arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags) { uint32_t va = (uint32_t)virt; uint32_t pa = (uint32_t)phys; uint32_t rem_size = (uint32_t)size; uint32_t conv_flags = MPERM_R; struct arm_mmu_perms_attrs perms_attrs; int key; if (size == 0) { LOG_ERR("Cannot map physical memory at 0x%08X: invalid " "zero size", (uint32_t)phys); return -EINVAL; } switch (flags & K_MEM_CACHE_MASK) { case K_MEM_CACHE_NONE: default: conv_flags |= MT_DEVICE; break; case K_MEM_CACHE_WB: conv_flags |= MT_NORMAL; conv_flags |= MATTR_SHARED; if (flags & K_MEM_PERM_RW) { conv_flags |= MATTR_CACHE_OUTER_WB_WA; conv_flags |= MATTR_CACHE_INNER_WB_WA; } else { conv_flags |= MATTR_CACHE_OUTER_WB_nWA; conv_flags |= MATTR_CACHE_INNER_WB_nWA; } break; case K_MEM_CACHE_WT: conv_flags |= MT_NORMAL; conv_flags |= MATTR_SHARED; conv_flags |= MATTR_CACHE_OUTER_WT_nWA; conv_flags |= MATTR_CACHE_INNER_WT_nWA; break; } if (flags & K_MEM_PERM_RW) { conv_flags |= MPERM_W; } if (flags & K_MEM_PERM_EXEC) { conv_flags |= MPERM_X; } perms_attrs = arm_mmu_convert_attr_flags(conv_flags); key = arch_irq_lock(); while (rem_size > 0) { arm_mmu_l2_map_page(va, pa, perms_attrs); rem_size -= (rem_size >= KB(4)) ? KB(4) : rem_size; va += KB(4); pa += KB(4); } arch_irq_unlock(key); return 0; } /** * @brief Arch-specific wrapper function for memory mapping at run-time * Maps memory according to the parameters provided by the caller * at run-time. This function wraps the ARMv7 MMU specific implementation * #__arch_mem_map() for the upper layers of the memory management. * If the map operation fails, a kernel panic will be triggered. * * @param virt 32-bit target virtual address. * @param phys 32-bit physical address. * @param size Size (in bytes) of the memory area to map. * @param flags Memory attributes & permissions. Comp. K_MEM_... * flags in kernel/mm.h. */ void arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags) { int ret = __arch_mem_map(virt, phys, size, flags); if (ret) { LOG_ERR("__arch_mem_map() returned %d", ret); k_panic(); } else { invalidate_tlb_all(); } } /** * @brief ARMv7-specific implementation of memory unmapping at run-time * Unmaps memory according to the parameters provided by the caller * at run-time. * * @param addr 32-bit virtual address to unmap. * @param size Size (in bytes) of the memory area to unmap. * @retval 0 on success, -EINVAL if an invalid parameter is detected. */ static int __arch_mem_unmap(void *addr, size_t size) { uint32_t va = (uint32_t)addr; uint32_t rem_size = (uint32_t)size; int key; if (addr == NULL) { LOG_ERR("Cannot unmap virtual memory: invalid NULL pointer"); return -EINVAL; } if (size == 0) { LOG_ERR("Cannot unmap virtual memory at 0x%08X: invalid " "zero size", (uint32_t)addr); return -EINVAL; } key = arch_irq_lock(); while (rem_size > 0) { arm_mmu_l2_unmap_page(va); rem_size -= (rem_size >= KB(4)) ? KB(4) : rem_size; va += KB(4); } arch_irq_unlock(key); return 0; } /** * @brief Arch-specific wrapper function for memory unmapping at run-time * Unmaps memory according to the parameters provided by the caller * at run-time. This function wraps the ARMv7 MMU specific implementation * #__arch_mem_unmap() for the upper layers of the memory management. * * @param addr 32-bit virtual address to unmap. * @param size Size (in bytes) of the memory area to unmap. */ void arch_mem_unmap(void *addr, size_t size) { int ret = __arch_mem_unmap(addr, size); if (ret) { LOG_ERR("__arch_mem_unmap() returned %d", ret); } else { invalidate_tlb_all(); } } /** * @brief Arch-specific virtual-to-physical address resolver function * ARMv7 MMU specific implementation of a function that resolves the * physical address corresponding to the given virtual address. * * @param virt 32-bit target virtual address to resolve. * @param phys Pointer to a variable to which the resolved physical * address will be written. May be NULL if this information * is not actually required by the caller. * @retval 0 if the physical address corresponding to the specified * virtual address could be resolved successfully, -EFAULT * if the specified virtual address is not currently mapped. */ int arch_page_phys_get(void *virt, uintptr_t *phys) { uint32_t l1_index = ((uint32_t)virt >> ARM_MMU_PTE_L1_INDEX_PA_SHIFT) & ARM_MMU_PTE_L1_INDEX_MASK; uint32_t l2_index = ((uint32_t)virt >> ARM_MMU_PTE_L2_INDEX_PA_SHIFT) & ARM_MMU_PTE_L2_INDEX_MASK; struct arm_mmu_l2_page_table *l2_page_table; uint32_t pa_resolved = 0; uint32_t l2_pt_resolved; int rc = 0; int key; key = arch_irq_lock(); if (l1_page_table.entries[l1_index].undefined.id == ARM_MMU_PTE_ID_SECTION) { /* * If the virtual address points to a level 1 PTE whose ID bits * identify it as a 1 MB section entry rather than a level 2 PT * entry, the given VA belongs to a memory region used by the * Zephyr image itself - it is only for those static regions that * L1 Section entries are used to save L2 tables if a sufficient- * ly large block of memory is specified. The memory regions be- * longing to the Zephyr image are identity mapped -> just return * the value of the VA as the value of the PA. */ pa_resolved = (uint32_t)virt; } else if (l1_page_table.entries[l1_index].undefined.id == ARM_MMU_PTE_ID_L2_PT) { /* * The VA points to a level 1 PTE which re-directs to a level 2 * PT. -> Assemble the level 2 PT pointer and resolve the PA for * the specified VA from there. */ l2_pt_resolved = l1_page_table.entries[l1_index].l2_page_table_ref.l2_page_table_address; l2_pt_resolved <<= ARM_MMU_PT_L2_ADDR_SHIFT; l2_page_table = (struct arm_mmu_l2_page_table *)l2_pt_resolved; /* * Check if the PTE for the specified VA is actually in use before * assembling & returning the corresponding PA. k_mem_unmap will * call this function for the leading & trailing guard pages when * unmapping a VA. As those guard pages were explicitly unmapped * when the VA was originally mapped, their L2 PTEs will be empty. * In that case, the return code of this function must not be 0. */ if (l2_page_table->entries[l2_index].word == 0) { rc = -EFAULT; } pa_resolved = l2_page_table->entries[l2_index].l2_page_4k.pa_base; pa_resolved <<= ARM_MMU_PTE_L2_SMALL_PAGE_ADDR_SHIFT; pa_resolved |= ((uint32_t)virt & ARM_MMU_ADDR_BELOW_PAGE_GRAN_MASK); } else { /* The level 1 PTE is invalid -> the specified VA is not mapped */ rc = -EFAULT; } arch_irq_unlock(key); if (phys) { *phys = (uintptr_t)pa_resolved; } return rc; } ```
/content/code_sandbox/arch/arm/core/mmu/arm_mmu.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
11,881
```c /* * */ /** * @file * @brief ARM Cortex-A and Cortex-R System Control Block interface */ #include <zephyr/kernel.h> #include <zephyr/arch/cpu.h> #include <zephyr/sys/util.h> /** * * @brief Reset the system * * This routine resets the processor. * */ void __weak sys_arch_reboot(int type) { ARG_UNUSED(type); } ```
/content/code_sandbox/arch/arm/core/cortex_a_r/reboot.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
86
```sourcepawn /* */ #ifndef _MACRO_PRIV_INC_ #define _MACRO_PRIV_INC_ #include <zephyr/arch/arm/cortex_a_r/tpidruro.h> /* * Get CPU id */ .macro get_cpu_id rreg0 /* Read MPIDR register */ mrc p15, 0, \rreg0, c0, c0, 5 ubfx \rreg0, \rreg0, #0, #24 .endm /* * Get CPU logic id by looking up cpu_node_list * returns * reg0: MPID * reg1: logic id (0 ~ CONFIG_MP_MAX_NUM_CPUS - 1) * clobbers: reg0, reg1, reg2, reg3 */ .macro get_cpu_logic_id reg0, reg1, reg2, reg3 get_cpu_id \reg0 ldr \reg3, =cpu_node_list mov \reg1, #0 1: ldr \reg2, [\reg3, \reg1, lsl #2] cmp \reg2, \reg0 beq 2f add \reg1, \reg1, #1 cmp \reg1, #CONFIG_MP_MAX_NUM_CPUS bne 1b b . 2: .endm .macro get_cpu rreg0 /* * Get CPU pointer. */ mrc p15, 0, \rreg0, c13, c0, 3 and \rreg0, #TPIDRURO_CURR_CPU .endm .macro z_arm_cortex_ar_enter_exc /* * Store r0-r3, r12, lr into the stack to construct an exception * stack frame. */ srsdb sp!, #MODE_SYS cps #MODE_SYS push {r0-r3, r12, lr} /* TODO: EXTRA_EXCEPTION_INFO */ mov r0, sp /* increment exception depth */ get_cpu r2 ldrb r1, [r2, #_cpu_offset_to_exc_depth] add r1, r1, #1 strb r1, [r2, #_cpu_offset_to_exc_depth] .endm #endif /* _MACRO_PRIV_INC_ */ ```
/content/code_sandbox/arch/arm/core/cortex_a_r/macro_priv.inc
sourcepawn
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
499
```c /* * */ #include "zephyr/kernel/thread_stack.h" #include <zephyr/kernel.h> #include <cortex_a_r/stack.h> #include <string.h> #include <kernel_internal.h> K_KERNEL_STACK_ARRAY_DEFINE(z_arm_fiq_stack, CONFIG_MP_MAX_NUM_CPUS, CONFIG_ARMV7_FIQ_STACK_SIZE); K_KERNEL_STACK_ARRAY_DEFINE(z_arm_abort_stack, CONFIG_MP_MAX_NUM_CPUS, CONFIG_ARMV7_EXCEPTION_STACK_SIZE); K_KERNEL_STACK_ARRAY_DEFINE(z_arm_undef_stack, CONFIG_MP_MAX_NUM_CPUS, CONFIG_ARMV7_EXCEPTION_STACK_SIZE); K_KERNEL_STACK_ARRAY_DEFINE(z_arm_svc_stack, CONFIG_MP_MAX_NUM_CPUS, CONFIG_ARMV7_SVC_STACK_SIZE); K_KERNEL_STACK_ARRAY_DEFINE(z_arm_sys_stack, CONFIG_MP_MAX_NUM_CPUS, CONFIG_ARMV7_SYS_STACK_SIZE); #if defined(CONFIG_INIT_STACKS) void z_arm_init_stacks(void) { memset(z_arm_fiq_stack, 0xAA, CONFIG_ARMV7_FIQ_STACK_SIZE); memset(z_arm_svc_stack, 0xAA, CONFIG_ARMV7_SVC_STACK_SIZE); memset(z_arm_abort_stack, 0xAA, CONFIG_ARMV7_EXCEPTION_STACK_SIZE); memset(z_arm_undef_stack, 0xAA, CONFIG_ARMV7_EXCEPTION_STACK_SIZE); memset(K_KERNEL_STACK_BUFFER(z_interrupt_stacks[0]), 0xAA, K_KERNEL_STACK_SIZEOF(z_interrupt_stacks[0])); } #endif ```
/content/code_sandbox/arch/arm/core/cortex_a_r/stacks.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
306
```unknown /* * */ /** * @file * @brief Exception handlers for ARM Cortex-A and Cortex-R * * This file implements the exception handlers (undefined instruction, prefetch * abort and data abort) for ARM Cortex-A and Cortex-R processors. * * All exception handlers save the exception stack frame into the exception * mode stack rather than the system mode stack, in order to ensure predictable * exception behaviour (i.e. an arbitrary thread stack overflow cannot cause * exception handling and thereby subsequent total system failure). * * In case the exception is due to a fatal (unrecoverable) fault, the fault * handler is responsible for invoking the architecture fatal exception handler * (z_arm_fatal_error) which invokes the kernel fatal exception handler * (z_fatal_error) that either locks up the system or aborts the current thread * depending on the application exception handler implementation. */ #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #include <offsets_short.h> #include <zephyr/arch/cpu.h> #include "macro_priv.inc" _ASM_FILE_PROLOGUE #if defined(CONFIG_FPU_SHARING) GTEXT(z_arm_fault_undef_instruction_fp) #endif GTEXT(z_arm_fault_undef_instruction) GTEXT(z_arm_fault_prefetch) GTEXT(z_arm_fault_data) GTEXT(z_arm_undef_instruction) GTEXT(z_arm_prefetch_abort) GTEXT(z_arm_data_abort) #ifndef CONFIG_USE_SWITCH .macro exception_entry mode /* * Store r0-r3, r12, lr, lr_und and spsr_und into the stack to * construct an exception stack frame. */ srsdb sp!, #\mode stmfd sp, {r0-r3, r12, lr}^ sub sp, #24 #if defined(CONFIG_FPU_SHARING) sub sp, #___fpu_t_SIZEOF vmrs r1, fpexc mov r0, #FPEXC_EN vmsr fpexc, r0 vmrs r0, fpscr mov r2, sp vstmia r2!, {s0-s15} #ifdef CONFIG_VFP_FEATURE_REGS_S64_D32 vstmia r2!, {d16-d31} #endif stm r2, {r0, r1} #endif #if defined(CONFIG_EXTRA_EXCEPTION_INFO) /* Pointer to extra esf info */ sub sp, #___extra_esf_info_t_SIZEOF mov r0, #0 str r0, [sp, #4] str r0, [sp, #8] sub r1, sp, #___callee_saved_t_SIZEOF str r1, [sp] cps #MODE_SYS stm r1, {r4-r11, sp} cps #\mode mov r0, sp mov sp, r1 #else mov r0, sp #endif /* Increment exception nesting count */ get_cpu r2 ldr r1, [r2, #___cpu_t_nested_OFFSET] add r1, r1, #1 str r1, [r2, #___cpu_t_nested_OFFSET] .endm .macro exception_exit /* Exit exception */ #if defined(CONFIG_EXTRA_EXCEPTION_INFO) add sp, #___extra_esf_info_t_SIZEOF add sp, #___callee_saved_t_SIZEOF #endif .endm /** * @brief Undefined instruction exception handler * * An undefined instruction (UNDEF) exception is generated when an undefined * instruction, or a VFP instruction when the VFP is not enabled, is * encountered. */ SECTION_SUBSEC_FUNC(TEXT, __exc, z_arm_undef_instruction) /* * The undefined instruction address is offset by 2 if the previous * mode is Thumb; otherwise, it is offset by 4. */ push {r0} mrs r0, spsr tst r0, #T_BIT subeq lr, #4 /* ARM (!T_BIT) */ subne lr, #2 /* Thumb (T_BIT) */ pop {r0} /* * Store r0-r3, r12, lr, lr_und and spsr_und into the stack to * construct an exception stack frame. */ srsdb sp!, #MODE_UND stmfd sp, {r0-r3, r12, lr}^ sub sp, #24 /* Increment exception nesting count */ get_cpu r2 ldr r1, [r2, #___cpu_t_nested_OFFSET] add r1, r1, #1 str r1, [r2, #___cpu_t_nested_OFFSET] #if defined(CONFIG_FPU_SHARING) sub sp, #___fpu_t_SIZEOF bl z_arm_fault_undef_instruction_fp cmp r0, #0 beq z_arm_exc_exit vmrs r1, fpexc mov r0, #FPEXC_EN vmsr fpexc, r0 vmrs r0, fpscr mov r2, sp vstmia r2!, {s0-s15} #ifdef CONFIG_VFP_FEATURE_REGS_S64_D32 vstmia r2!, {d16-d31} #endif stm r2, {r0, r1} #endif #if defined(CONFIG_EXTRA_EXCEPTION_INFO) /* Pointer to extra esf info */ sub sp, #___extra_esf_info_t_SIZEOF mov r0, #0 str r0, [sp, #4] str r0, [sp, #8] sub r1, sp, #___callee_saved_t_SIZEOF str r1, [sp] cps #MODE_SYS stm r1, {r4-r11, sp} cps #MODE_UND mov r0, sp mov sp, r1 #else mov r0, sp #endif bl z_arm_fault_undef_instruction exception_exit b z_arm_exc_exit /** * @brief Prefetch abort exception handler * * A prefetch abort (PABT) exception is generated when the processor marks the * prefetched instruction as invalid and the instruction is executed. */ SECTION_SUBSEC_FUNC(TEXT, __exc, z_arm_prefetch_abort) /* * The faulting instruction address is always offset by 4 for the * prefetch abort exceptions. */ sub lr, #4 exception_entry MODE_ABT bl z_arm_fault_prefetch exception_exit b z_arm_exc_exit #if defined(CONFIG_FPU_SHARING) #define FPU_SF_SIZE ___fpu_t_SIZEOF #else #define FPU_SF_SIZE 0 #endif /** * @brief Data abort exception handler * * A data abort (DABT) exception is generated when an error occurs on a data * memory access. This exception can be either synchronous or asynchronous, * depending on the type of fault that caused it. */ SECTION_SUBSEC_FUNC(TEXT, __exc, z_arm_data_abort) /* * The faulting instruction address is always offset by 8 for the data * abort exceptions. */ sub lr, #8 exception_entry MODE_ABT bl z_arm_fault_data /* * If z_arm_fault_data returns false, then we recovered from * the error. It may have updated $pc, so copy $pc back to * the true esf from the one passed to z_arm_fault_data. */ cmp r0, #0 ldreq r1, [sp, #24 + FPU_SF_SIZE] exception_exit streq r1, [sp, #24 + FPU_SF_SIZE] b z_arm_exc_exit #else /** * @brief Undefined instruction exception handler * * An undefined instruction (UNDEF) exception is generated when an undefined * instruction, or a VFP instruction when the VFP is not enabled, is * encountered. */ SECTION_SUBSEC_FUNC(TEXT, __exc, z_arm_undef_instruction) /* * The undefined instruction address is offset by 2 if the previous * mode is Thumb; otherwise, it is offset by 4. */ push {r0} mrs r0, spsr tst r0, #T_BIT subeq lr, #4 /* ARM (!T_BIT) */ subne lr, #2 /* Thumb (T_BIT) */ pop {r0} z_arm_cortex_ar_enter_exc bl z_arm_fault_undef_instruction b z_arm_cortex_ar_exit_exc /** * @brief Prefetch abort exception handler * * A prefetch abort (PABT) exception is generated when the processor marks the * prefetched instruction as invalid and the instruction is executed. */ SECTION_SUBSEC_FUNC(TEXT, __exc, z_arm_prefetch_abort) /* * The faulting instruction address is always offset by 4 for the * prefetch abort exceptions. */ sub lr, #4 z_arm_cortex_ar_enter_exc bl z_arm_fault_prefetch b z_arm_cortex_ar_exit_exc /** * @brief Data abort exception handler * * A data abort (DABT) exception is generated when an error occurs on a data * memory access. This exception can be either synchronous or asynchronous, * depending on the type of fault that caused it. */ SECTION_SUBSEC_FUNC(TEXT, __exc, z_arm_data_abort) sub lr, #8 z_arm_cortex_ar_enter_exc bl z_arm_fault_data b z_arm_cortex_ar_exit_exc #endif ```
/content/code_sandbox/arch/arm/core/cortex_a_r/exc.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,032
```c /* * */ /** * @file * @brief Full C support initialization * * * Initialization of full C support: zero the .bss, copy the .data if XIP, * call z_cstart(). * * Stack is available in this module, but not the global data/bss until their * initialization is performed. */ #include <zephyr/kernel.h> #include <kernel_internal.h> #include <zephyr/linker/linker-defs.h> #include <zephyr/sys/barrier.h> #include <zephyr/arch/arm/cortex_a_r/lib_helpers.h> #if defined(CONFIG_ARMV7_R) || defined(CONFIG_ARMV7_A) #include <cortex_a_r/stack.h> #endif #if defined(__GNUC__) /* * GCC can detect if memcpy is passed a NULL argument, however one of * the cases of relocate_vector_table() it is valid to pass NULL, so we * suppress the warning for this case. We need to do this before * string.h is included to get the declaration of memcpy. */ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wnonnull" #endif #include <string.h> #if defined(CONFIG_SW_VECTOR_RELAY) || defined(CONFIG_SW_VECTOR_RELAY_CLIENT) Z_GENERIC_SECTION(.vt_pointer_section) __attribute__((used)) void *_vector_table_pointer; #endif #ifdef CONFIG_ARM_MPU extern void z_arm_mpu_init(void); extern void z_arm_configure_static_mpu_regions(void); #elif defined(CONFIG_ARM_AARCH32_MMU) extern int z_arm_mmu_init(void); #endif #if defined(CONFIG_AARCH32_ARMV8_R) #define VECTOR_ADDRESS ((uintptr_t)_vector_start) static inline void relocate_vector_table(void) { write_sctlr(read_sctlr() & ~HIVECS); write_vbar(VECTOR_ADDRESS & VBAR_MASK); barrier_isync_fence_full(); } #else #define VECTOR_ADDRESS 0 void __weak relocate_vector_table(void) { #if defined(CONFIG_XIP) && (CONFIG_FLASH_BASE_ADDRESS != 0) || \ !defined(CONFIG_XIP) && (CONFIG_SRAM_BASE_ADDRESS != 0) write_sctlr(read_sctlr() & ~HIVECS); size_t vector_size = (size_t)_vector_end - (size_t)_vector_start; (void)memcpy(VECTOR_ADDRESS, _vector_start, vector_size); #elif defined(CONFIG_SW_VECTOR_RELAY) || defined(CONFIG_SW_VECTOR_RELAY_CLIENT) _vector_table_pointer = _vector_start; #endif } #if defined(__GNUC__) #pragma GCC diagnostic pop #endif #endif /* CONFIG_AARCH32_ARMV8_R */ #if defined(CONFIG_CPU_HAS_FPU) static inline void z_arm_floating_point_init(void) { #if defined(CONFIG_FPU) uint32_t reg_val = 0; /* * CPACR : Coprocessor Access Control Register -> CP15 1/0/2 * comp. ARM Architecture Reference Manual, ARMv7-A and ARMv7-R edition, * chap. B4.1.40 * * Must be accessed in >= PL1! * [23..22] = CP11 access control bits, * [21..20] = CP10 access control bits. * 11b = Full access as defined for the respective CP, * 10b = UNDEFINED, * 01b = Access at PL1 only, * 00b = No access. */ reg_val = __get_CPACR(); /* Enable PL1 access to CP10, CP11 */ reg_val |= (CPACR_CP10(CPACR_FA) | CPACR_CP11(CPACR_FA)); __set_CPACR(reg_val); barrier_isync_fence_full(); #if !defined(CONFIG_FPU_SHARING) /* * FPEXC: Floating-Point Exception Control register * comp. ARM Architecture Reference Manual, ARMv7-A and ARMv7-R edition, * chap. B6.1.38 * * Must be accessed in >= PL1! * [31] EX bit = determines which registers comprise the current state * of the FPU. The effects of setting this bit to 1 are * subarchitecture defined. If EX=0, the following * registers contain the complete current state * information of the FPU and must therefore be saved * during a context switch: * * D0-D15 * * D16-D31 if implemented * * FPSCR * * FPEXC. * [30] EN bit = Advanced SIMD/Floating Point Extensions enable bit. * [29..00] = Subarchitecture defined -> not relevant here. */ __set_FPEXC(FPEXC_EN); #endif #endif } #endif /* CONFIG_CPU_HAS_FPU */ extern FUNC_NORETURN void z_cstart(void); /** * * @brief Prepare to and run C code * * This routine prepares for the execution of and runs C code. * */ void z_prep_c(void) { /* Initialize tpidruro with our struct _cpu instance address */ write_tpidruro((uintptr_t)&_kernel.cpus[0]); relocate_vector_table(); #if defined(CONFIG_CPU_HAS_FPU) z_arm_floating_point_init(); #endif z_bss_zero(); z_data_copy(); #if ((defined(CONFIG_ARMV7_R) || defined(CONFIG_ARMV7_A)) && defined(CONFIG_INIT_STACKS)) z_arm_init_stacks(); #endif z_arm_interrupt_init(); #ifdef CONFIG_ARM_MPU z_arm_mpu_init(); z_arm_configure_static_mpu_regions(); #elif defined(CONFIG_ARM_AARCH32_MMU) z_arm_mmu_init(); #endif z_cstart(); CODE_UNREACHABLE; } ```
/content/code_sandbox/arch/arm/core/cortex_a_r/prep_c.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,224
```c /* * */ #include <zephyr/kernel.h> #include <kernel_internal.h> #include <errno.h> /* The 'key' actually represents the BASEPRI register * prior to disabling interrupts via the BASEPRI mechanism. * * arch_swap() itself does not do much. */ int arch_swap(unsigned int key) { /* store off key and return value */ _current->arch.basepri = key; _current->arch.swap_return_value = -EAGAIN; z_arm_cortex_r_svc(); irq_unlock(key); /* Context switch is performed here. Returning implies the * thread has been context-switched-in again. */ return _current->arch.swap_return_value; } ```
/content/code_sandbox/arch/arm/core/cortex_a_r/swap.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
143
```unknown /* * */ /** * @file * @brief ARM Cortex-A and Cortex-R wrapper for ISRs with parameter * * Wrapper installed in vector table for handling dynamic interrupts that accept * a parameter. */ /* * Tell armclang that stack alignment are ensured. */ .eabi_attribute Tag_ABI_align_preserved, 1 #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #include <offsets_short.h> #include <zephyr/arch/cpu.h> #include <zephyr/sw_isr_table.h> #include "macro_priv.inc" _ASM_FILE_PROLOGUE GDATA(_sw_isr_table) GTEXT(_isr_wrapper) GTEXT(z_arm_int_exit) #ifndef CONFIG_USE_SWITCH /** * * @brief Wrapper around ISRs when inserted in software ISR table * * When inserted in the vector table, _isr_wrapper() demuxes the ISR table * using the running interrupt number as the index, and invokes the registered * ISR with its corresponding argument. When returning from the ISR, it * determines if a context switch needs to happen (see documentation for * z_arm_pendsv()) and pends the PendSV exception if so: the latter will * perform the context switch itself. * */ SECTION_FUNC(TEXT, _isr_wrapper) #if defined(CONFIG_USERSPACE) /* See comment below about svc stack usage */ cps #MODE_SVC push {r0} /* Determine if interrupted thread was in user context */ cps #MODE_IRQ mrs r0, spsr and r0, #MODE_MASK cmp r0, #MODE_USR bne isr_system_thread get_cpu r0 ldr r0, [r0, #___cpu_t_current_OFFSET] /* Save away user stack pointer */ cps #MODE_SYS str sp, [r0, #_thread_offset_to_sp_usr] /* sp_usr */ /* Switch to privileged stack */ ldr sp, [r0, #_thread_offset_to_priv_stack_end] /* priv stack end */ isr_system_thread: cps #MODE_SVC pop {r0} cps #MODE_IRQ #endif /* * Save away r0-r3, r12 and lr_irq for the previous context to the * process stack since they are clobbered here. Also, save away lr * and spsr_irq since we may swap processes and return to a different * thread. */ sub lr, lr, #4 srsdb #MODE_SYS! cps #MODE_SYS push {r0-r3, r12, lr} #if defined(CONFIG_FPU_SHARING) sub sp, sp, #___fpu_t_SIZEOF /* * Note that this handler was entered with the VFP unit enabled. * The undefined instruction handler uses this to know that it * needs to save the current floating context. */ vmrs r0, fpexc str r0, [sp, #___fpu_t_SIZEOF - 4] tst r0, #FPEXC_EN beq _vfp_not_enabled vmrs r0, fpscr str r0, [sp, #___fpu_t_SIZEOF - 8] /* Disable VFP */ mov r0, #0 vmsr fpexc, r0 _vfp_not_enabled: /* * Mark where to store the floating context for the undefined * instruction handler */ get_cpu r2 ldr r0, [r2, #___cpu_t_fp_ctx_OFFSET] cmp r0, #0 streq sp, [r2, #___cpu_t_fp_ctx_OFFSET] #endif /* CONFIG_FPU_SHARING */ /* * Use SVC mode stack for predictable interrupt behaviour; running ISRs * in the SYS/USR mode stack (i.e. interrupted thread stack) leaves the * ISR stack usage at the mercy of the interrupted thread and this can * be prone to stack overflows if any of the ISRs and/or preemptible * threads have high stack usage. * * When userspace is enabled, this also prevents leaking privileged * information to the user mode. */ cps #MODE_SVC /* * Preserve lr_svc which may contain the branch return address of the * interrupted context in case of a nested interrupt. This value will * be restored prior to exiting the interrupt in z_arm_int_exit. */ push {lr} /* Align stack at double-word boundary */ and r3, sp, #4 sub sp, sp, r3 push {r2, r3} /* Increment interrupt nesting count */ get_cpu r2 ldr r0, [r2, #___cpu_t_nested_OFFSET] add r0, r0, #1 str r0, [r2, #___cpu_t_nested_OFFSET] #ifdef CONFIG_TRACING_ISR bl sys_trace_isr_enter #endif #ifdef CONFIG_PM /* * All interrupts are disabled when handling idle wakeup. For tickless * idle, this ensures that the calculation and programming of the * device for the next timer deadline is not interrupted. For * non-tickless idle, this ensures that the clearing of the kernel idle * state is not interrupted. In each case, pm_system_resume * is called with interrupts disabled. */ /* is this a wakeup from idle ? */ ldr r2, =_kernel /* requested idle duration, in ticks */ ldr r0, [r2, #_kernel_offset_to_idle] cmp r0, #0 beq _idle_state_cleared movs r1, #0 /* clear kernel idle state */ str r1, [r2, #_kernel_offset_to_idle] bl pm_system_resume _idle_state_cleared: #endif /* CONFIG_PM */ /* Get active IRQ number from the interrupt controller */ #if !defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER) bl arm_gic_get_active #else bl z_soc_irq_get_active #endif /* !CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER */ push {r0, r1} lsl r0, r0, #3 /* table is 8-byte wide */ /* * Enable interrupts to allow nesting. * * Note that interrupts are disabled up to this point on the ARM * architecture variants other than the Cortex-M. It is also important * to note that most interrupt controllers require that the nested * interrupts are handled after the active interrupt is acknowledged; * this is be done through the `get_active` interrupt controller * interface function. */ cpsie i /* * Skip calling the isr if it is a spurious interrupt. */ mov r1, #CONFIG_NUM_IRQS lsl r1, r1, #3 cmp r0, r1 bge spurious_continue ldr r1, =_sw_isr_table add r1, r1, r0 /* table entry: ISRs must have their MSB set to stay * in thumb mode */ ldm r1!,{r0,r3} /* arg in r0, ISR in r3 */ blx r3 /* call ISR */ spurious_continue: /* Signal end-of-interrupt */ pop {r0, r1} #if !defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER) bl arm_gic_eoi #else bl z_soc_irq_eoi #endif /* !CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER */ #ifdef CONFIG_TRACING_ISR bl sys_trace_isr_exit #endif /* Use 'bx' instead of 'b' because 'bx' can jump further, and use * 'bx' instead of 'blx' because exception return is done in * z_arm_int_exit() */ ldr r1, =z_arm_int_exit bx r1 #else /** * * @brief Wrapper around ISRs when inserted in software ISR table * * When inserted in the vector table, _isr_wrapper() demuxes the ISR table * using the running interrupt number as the index, and invokes the registered * ISR with its corresponding argument. When returning from the ISR, it * determines if a context switch needs to happen and invoke the arch_switch * function if so. * */ SECTION_FUNC(TEXT, _isr_wrapper) sub lr, #4 z_arm_cortex_ar_enter_exc /* Increment interrupt nesting count */ get_cpu r2 ldr r0, [r2, #___cpu_t_nested_OFFSET] add r0, #1 str r0, [r2, #___cpu_t_nested_OFFSET] /* If not nested: switch to IRQ stack and save current sp on it. */ cmp r0, #1 bhi 1f mov r0, sp cps #MODE_IRQ push {r0} 1: #ifdef CONFIG_TRACING_ISR bl sys_trace_isr_enter #endif /* CONFIG_TRACING_ISR */ #ifdef CONFIG_PM /* * All interrupts are disabled when handling idle wakeup. For tickless * idle, this ensures that the calculation and programming of the * device for the next timer deadline is not interrupted. For * non-tickless idle, this ensures that the clearing of the kernel idle * state is not interrupted. In each case, pm_system_resume * is called with interrupts disabled. */ /* is this a wakeup from idle ? */ ldr r2, =_kernel /* requested idle duration, in ticks */ ldr r0, [r2, #_kernel_offset_to_idle] cmp r0, #0 beq _idle_state_cleared movs r1, #0 /* clear kernel idle state */ str r1, [r2, #_kernel_offset_to_idle] bl pm_system_resume _idle_state_cleared: #endif /* CONFIG_PM */ /* Get active IRQ number from the interrupt controller */ #if !defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER) bl arm_gic_get_active #else bl z_soc_irq_get_active #endif /* !CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER */ push {r0, r1} lsl r0, r0, #3 /* table is 8-byte wide */ /* * Skip calling the isr if it is a spurious interrupt. */ mov r1, #CONFIG_NUM_IRQS lsl r1, r1, #3 cmp r0, r1 bge spurious_continue ldr r1, =_sw_isr_table add r1, r1, r0 /* table entry: ISRs must have their MSB set to stay * in thumb mode */ ldm r1!,{r0,r3} /* arg in r0, ISR in r3 */ /* * Enable and disable interrupts again to allow nested in exception handlers. */ cpsie i blx r3 /* call ISR */ cpsid i spurious_continue: /* Signal end-of-interrupt */ pop {r0, r1} #if !defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER) bl arm_gic_eoi #else bl z_soc_irq_eoi #endif /* !CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER */ #ifdef CONFIG_TRACING_ISR bl sys_trace_isr_exit #endif GTEXT(z_arm_cortex_ar_irq_done) z_arm_cortex_ar_irq_done: /* Decrement interrupt nesting count */ get_cpu r2 ldr r0, [r2, #___cpu_t_nested_OFFSET] sub r0, r0, #1 str r0, [r2, #___cpu_t_nested_OFFSET] /* Do not context switch if exiting a nested interrupt */ cmp r0, #0 /* Note that this function is only called from `z_arm_svc`, * while handling irq_offload, with below modes set: * ``` * if (cpu interrupts are nested) * mode=MODE_SYS * else * mode=MODE_IRQ * ``` */ bhi __EXIT_INT /* retrieve pointer to the current thread */ pop {r0} cps #MODE_SYS mov sp, r0 ldr r1, [r2, #___cpu_t_current_OFFSET] push {r1} mov r0, #0 bl z_get_next_switch_handle pop {r1} cmp r0, #0 beq __EXIT_INT /* * Switch thread * r0: new thread * r1: old thread */ bl z_arm_context_switch __EXIT_INT: #ifdef CONFIG_STACK_SENTINEL bl z_check_stack_sentinel #endif /* CONFIG_STACK_SENTINEL */ b z_arm_cortex_ar_exit_exc #endif ```
/content/code_sandbox/arch/arm/core/cortex_a_r/isr_wrapper.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,734
```objective-c /* * */ /** * @file * @brief Definitions for the boot vector table * * * Definitions for the boot vector table. * * System exception handler names all have the same format: * * __<exception name with underscores> * * No other symbol has the same format, so they are easy to spot. */ #ifndef _VECTOR_TABLE__H_ #define _VECTOR_TABLE__H_ #ifdef _ASMLANGUAGE #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #include <zephyr/sys/util.h> GTEXT(__start) GDATA(_vector_table) GTEXT(z_arm_nmi) GTEXT(z_arm_undef_instruction) GTEXT(z_arm_svc) GTEXT(z_arm_prefetch_abort) GTEXT(z_arm_data_abort) GTEXT(z_arm_pendsv) GTEXT(z_arm_reserved) GTEXT(z_prep_c) GTEXT(_isr_wrapper) #else /* _ASMLANGUAGE */ #ifdef __cplusplus extern "C" { #endif extern void *_vector_table[]; #ifdef __cplusplus } #endif #endif /* _ASMLANGUAGE */ #endif /* _VECTOR_TABLE__H_ */ ```
/content/code_sandbox/arch/arm/core/cortex_a_r/vector_table.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
239
```unknown /* * */ /** * @file * @brief Populated vector table in ROM */ #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #include "vector_table.h" #include "offsets_short.h" #include "macro_priv.inc" _ASM_FILE_PROLOGUE SECTION_SUBSEC_FUNC(exc_vector_table,_vector_table_section,_vector_table) ldr pc, =z_arm_reset /* offset 0 */ ldr pc, =z_arm_undef_instruction /* undef instruction offset 4 */ ldr pc, =z_arm_svc /* svc offset 8 */ ldr pc, =z_arm_prefetch_abort /* prefetch abort offset 0xc */ ldr pc, =z_arm_data_abort /* data abort offset 0x10 */ nop /* offset 0x14 */ #ifdef CONFIG_GEN_SW_ISR_TABLE ldr pc, =_isr_wrapper /* IRQ offset 0x18 */ #else ldr pc, =z_irq_spurious #endif #ifndef CONFIG_USE_SWITCH ldr pc, =z_arm_nmi /* FIQ offset 0x1c */ #else ldr pc,=z_irq_spurious #endif #ifdef CONFIG_USE_SWITCH GTEXT(z_arm_cortex_ar_exit_exc) SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, z_arm_cortex_ar_exit_exc) /* Note: * This function is expected to be *always* called with * processor mode set to MODE_SYS. */ /* decrement exception depth */ get_cpu r2 ldrb r1, [r2, #_cpu_offset_to_exc_depth] sub r1, r1, #1 strb r1, [r2, #_cpu_offset_to_exc_depth] /* * Restore r0-r3, r12, lr, lr_und and spsr_und from the exception stack * and return to the current thread. */ pop {r0-r3, r12, lr} rfeia sp! #endif ```
/content/code_sandbox/arch/arm/core/cortex_a_r/vector_table.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
436
```unknown /* * */ /** * @file * @brief ARM Cortex-A and Cortex-R power management * */ #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #if defined(CONFIG_ARM_ON_EXIT_CPU_IDLE) #include <soc_cpu_idle.h> #endif _ASM_FILE_PROLOGUE GTEXT(arch_cpu_idle) GTEXT(arch_cpu_atomic_idle) .macro _sleep_if_allowed wait_instruction #if defined(CONFIG_ARM_ON_ENTER_CPU_IDLE_HOOK) push {r0, lr} bl z_arm_on_enter_cpu_idle /* Skip the wait instruction if on_enter_cpu_idle() returns false. */ cmp r0, #0 beq _skip_\@ #endif /* CONFIG_ARM_ON_ENTER_CPU_IDLE_HOOK */ /* * Wait for all memory transactions to complete before entering low * power state. */ dsb \wait_instruction #if defined(CONFIG_ARM_ON_EXIT_CPU_IDLE) /* Inline the macro provided by SoC-specific code */ SOC_ON_EXIT_CPU_IDLE #endif /* CONFIG_ARM_ON_EXIT_CPU_IDLE */ #if defined(CONFIG_ARM_ON_ENTER_CPU_IDLE_HOOK) _skip_\@: pop {r0, lr} #endif /* CONFIG_ARM_ON_ENTER_CPU_IDLE_HOOK */ .endm #ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_IDLE SECTION_FUNC(TEXT, arch_cpu_idle) #ifdef CONFIG_TRACING push {r0, lr} bl sys_trace_idle pop {r0, lr} #endif /* CONFIG_TRACING */ /* Enter low power state */ _sleep_if_allowed wfi /* * Clear PRIMASK and flush instruction buffer to immediately service * the wake-up interrupt. */ cpsie i isb bx lr #endif #ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_ATOMIC_IDLE SECTION_FUNC(TEXT, arch_cpu_atomic_idle) #ifdef CONFIG_TRACING push {r0, lr} bl sys_trace_idle pop {r0, lr} #endif /* CONFIG_TRACING */ /* * Lock PRIMASK while sleeping: wfe will still get interrupted by * incoming interrupts but the CPU will not service them right away. */ cpsid i /* r0: interrupt mask from caller */ /* No BASEPRI, call wfe directly */ _sleep_if_allowed wfe cmp r0, #0 bne _irq_disabled cpsie i _irq_disabled: bx lr #endif ```
/content/code_sandbox/arch/arm/core/cortex_a_r/cpu_idle.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
510
```unknown /* * */ /** * @file * @brief Thread context switching for ARM Cortex-A and Cortex-R * * This module implements the routines necessary for thread context switching * on ARM Cortex-A and Cortex-R CPUs. */ #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #include <offsets_short.h> #include <zephyr/arch/cpu.h> #include <zephyr/syscall.h> #include <zephyr/kernel.h> #include "macro_priv.inc" _ASM_FILE_PROLOGUE GTEXT(z_arm_svc) GTEXT(z_arm_do_swap) GTEXT(z_do_kernel_oops) #if defined(CONFIG_USERSPACE) GTEXT(z_arm_do_syscall) #endif GDATA(_kernel) /** * * @brief Routine to handle context switches * * For Cortex-R, this function is directly called either by z_arm_{exc,int}_exit * in case of preemption, or z_arm_svc in case of cooperative switching. */ SECTION_FUNC(TEXT, z_arm_do_swap) #ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING /* Register the context switch */ push {r0, lr} bl z_thread_mark_switched_out pop {r0, lr} #endif /* CONFIG_INSTRUMENT_THREAD_SWITCHING */ /* load current _cpu into r1 and current k_thread into r2 */ get_cpu r1 ldr r2, [r1, #___cpu_t_current_OFFSET] #if defined(CONFIG_ARM_STORE_EXC_RETURN) /* Store LSB of LR (EXC_RETURN) to the thread's 'mode' word. */ strb lr, [r2, #_thread_offset_to_mode_exc_return] #endif /* addr of callee-saved regs in thread in r0 */ ldr r0, =_thread_offset_to_callee_saved add r0, r2 /* Store rest of process context */ cps #MODE_SYS stm r0, {r4-r11, sp} cps #MODE_SVC #if defined(CONFIG_FPU_SHARING) ldrb r0, [r2, #_thread_offset_to_user_options] tst r0, #K_FP_REGS /* _current->base.user_options & K_FP_REGS */ beq out_fp_inactive mov ip, #FPEXC_EN vmsr fpexc, ip /* * If the float context pointer is not null, then the VFP has not been * used since this thread has used it. Consequently, the caller-saved * float registers have not been saved away, so write them to the * exception stack frame. */ ldr r0, [r1, #___cpu_t_fp_ctx_OFFSET] cmp r0, #0 beq out_store_thread_context vstmia r0!, {s0-s15} #ifdef CONFIG_VFP_FEATURE_REGS_S64_D32 vstmia r0!, {d16-d31} #endif vmrs r3, fpscr stm r0, {r3, ip} out_store_thread_context: /* Store s16-s31 to thread context */ add r0, r2, #_thread_offset_to_preempt_float vstmia r0, {s16-s31} mov ip, #0 vmsr fpexc, ip out_fp_inactive: /* * The floating context has now been saved to the exception stack * frame, so zero out the global pointer to note this. */ mov r0, #0 str r0, [r1, #___cpu_t_fp_ctx_OFFSET] #endif /* CONFIG_FPU_SHARING */ /* fetch the thread to run from the ready queue cache */ ldr r3, =_kernel ldr r2, [r3, #_kernel_offset_to_ready_q_cache] str r2, [r1, #___cpu_t_current_OFFSET] #if defined(CONFIG_THREAD_LOCAL_STORAGE) /* Grab the TLS pointer */ ldr r4, =_thread_offset_to_tls adds r4, r2, r4 ldr r0, [r4] /* Store TLS pointer in the "Process ID" register. * TPIDRURW is used as a base pointer to all * thread variables with offsets added by toolchain. */ mcr 15, 0, r0, c13, c0, 2 #endif #if defined(CONFIG_ARM_STORE_EXC_RETURN) /* Restore EXC_RETURN value. */ ldrsb lr, [r2, #_thread_offset_to_mode_exc_return] #endif /* Restore previous interrupt disable state (irq_lock key) * (We clear the arch.basepri field after restoring state) */ ldr r0, [r2, #_thread_offset_to_basepri] movs r3, #0 str r3, [r2, #_thread_offset_to_basepri] /* addr of callee-saved regs in thread in r0 */ ldr r0, =_thread_offset_to_callee_saved add r0, r2 /* restore r4-r11 and sp for incoming thread */ cps #MODE_SYS ldm r0, {r4-r11, sp} cps #MODE_SVC #if defined(CONFIG_FPU_SHARING) ldrb r0, [r2, #_thread_offset_to_user_options] tst r0, #K_FP_REGS /* _current->base.user_options & K_FP_REGS */ beq in_fp_inactive mov r3, #FPEXC_EN vmsr fpexc, r3 /* Restore s16-s31 from thread context */ add r0, r2, #_thread_offset_to_preempt_float vldmia r0, {s16-s31} mov r3, #0 vmsr fpexc, r3 in_fp_inactive: #endif /* CONFIG_FPU_SHARING */ #if defined (CONFIG_ARM_MPU) /* r2 contains k_thread */ mov r0, r2 /* Re-program dynamic memory map */ push {r0, lr} bl z_arm_configure_dynamic_mpu_regions pop {r0, lr} #endif #ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING /* Register the context switch */ push {r0, lr} bl z_thread_mark_switched_in pop {r0, lr} #endif /* CONFIG_INSTRUMENT_THREAD_SWITCHING */ /* * Cortex-R: return to the caller (z_arm_{exc,int}_exit, or z_arm_svc) */ bx lr #if defined(CONFIG_FPU_SHARING) #define FPU_SF_SIZE ___fpu_t_SIZEOF #else #define FPU_SF_SIZE 0 #endif /** * * @brief Service call handler * * The service call (svc) is used in the following occasions: * - Cooperative context switching * - IRQ offloading * - Kernel run-time exceptions * */ SECTION_FUNC(TEXT, z_arm_svc) #if defined(CONFIG_USERSPACE) /* Determine if incoming thread was in user context */ push {r0} mrs r0, spsr and r0, #MODE_MASK cmp r0, #MODE_USR bne svc_system_thread get_cpu r0 ldr r0, [r0, #___cpu_t_current_OFFSET] /* Save away user stack pointer */ cps #MODE_SYS str sp, [r0, #_thread_offset_to_sp_usr] /* sp_usr */ /* Switch to privileged stack */ ldr sp, [r0, #_thread_offset_to_priv_stack_end] /* priv stack end */ cps #MODE_SVC svc_system_thread: pop {r0} #endif /* * Switch to system mode to store r0-r3 to the process stack pointer. * Save r12 and the lr as we could be swapping in another process and * returning to a different location. */ srsdb #MODE_SYS! cps #MODE_SYS push {r0-r3, r12, lr} #if defined(CONFIG_FPU_SHARING) sub sp, sp, #___fpu_t_SIZEOF /* * Note that this handler was entered with the VFP unit enabled. * The undefined instruction handler uses this to know that it * needs to save the current floating context. */ vmrs r0, fpexc str r0, [sp, #___fpu_t_SIZEOF - 4] tst r0, #FPEXC_EN beq _vfp_not_enabled vmrs r0, fpscr str r0, [sp, #___fpu_t_SIZEOF - 8] /* Disable VFP */ mov r0, #0 vmsr fpexc, r0 _vfp_not_enabled: /* * Mark where to store the floating context for the undefined * instruction handler */ get_cpu r2 ldr r0, [r2, #___cpu_t_fp_ctx_OFFSET] cmp r0, #0 streq sp, [r2, #___cpu_t_fp_ctx_OFFSET] #endif /* CONFIG_FPU_SHARING */ mov ip, sp cps #MODE_SVC /* * Store lr_svc to the SVC mode stack. This value will be restored prior to * exiting the SVC call in z_arm_int_exit. */ push {lr} /* Align stack at double-word boundary */ /* TODO: Question, why push {r2, r3} here */ and r3, sp, #4 sub sp, sp, r3 push {r2, r3} /* Increment interrupt nesting count */ get_cpu r2 ldr r0, [r2, #___cpu_t_nested_OFFSET] add r0, r0, #1 str r0, [r2, #___cpu_t_nested_OFFSET] /* Get SVC number */ mrs r0, spsr tst r0, #0x20 ldreq r1, [lr, #-4] biceq r1, #0xff000000 beq demux ldr r1, [lr, #-2] and r1, #0xff /* * grab service call number: * 0: context switch * 1: irq_offload (if configured) * 2: kernel panic or oops (software generated fatal exception) * 3: system calls for memory protection */ demux: #if defined(CONFIG_USERSPACE) cmp r1, #_SVC_CALL_SYSTEM_CALL beq _do_syscall #endif cmp r1, #_SVC_CALL_CONTEXT_SWITCH beq _context_switch cmp r1, #_SVC_CALL_RUNTIME_EXCEPT beq _oops #if CONFIG_IRQ_OFFLOAD blx z_irq_do_offload /* call C routine which executes the offload */ /* exception return is done in z_arm_int_exit() */ b z_arm_int_exit #endif _context_switch: /* handler mode exit, to PendSV */ bl z_arm_do_swap b z_arm_int_exit _oops: /* * Pass the exception frame to z_do_kernel_oops. r0 contains the * exception reason. */ cps #MODE_SYS mov r0, sp cps #MODE_SVC bl z_do_kernel_oops b z_arm_int_exit #if defined(CONFIG_USERSPACE) /* * System call will setup a jump to the _do_arm_syscall function * running in system mode when returning from the exception. * * There is some trickery involved here because we have to preserve * the original PC value so that we can return back to the caller of * the SVC. * * On SVC exception, the USER/SYSTEM stack looks like the following: * { possible FPU space } - r0 - r1 - r2 - r3 - r12 - LR - PC - SPSR * * Registers look like: * r0 - arg1 * r1 - arg2 * r2 - arg3 * r3 - arg4 * r4 - arg5 * r5 - arg6 * r6 - call_id * r8 - saved link register */ _do_syscall: /* grab address of LR from stack frame */ ldr r8, [ip, #(FPU_SF_SIZE + ___basic_sf_t_pc_OFFSET)] /* Make the exception return to system state */ ldr r1, [ip, #(FPU_SF_SIZE + ___basic_sf_t_xpsr_OFFSET)] /* If leaving thumb mode, set the return address to thumb mode */ tst r1, #T_BIT orrne r8, #1 bic r1, #(MODE_MASK | T_BIT) orr r1, r1, #MODE_SYS str r1, [ip, #(FPU_SF_SIZE + ___basic_sf_t_xpsr_OFFSET)] /* * Store the address of z_arm_do_syscall for the exit so the exception * return goes there in system state. */ ldr r1, =z_arm_do_syscall str r1, [ip, #(FPU_SF_SIZE + ___basic_sf_t_pc_OFFSET)] /* validate syscall limit, only set priv mode if valid */ ldr ip, =K_SYSCALL_LIMIT cmp r6, ip blo valid_syscall_id /* bad syscall id. Set arg0 to bad id and set call_id to SYSCALL_BAD */ cps #MODE_SYS str r6, [sp] cps #MODE_SVC ldr r6, =K_SYSCALL_BAD valid_syscall_id: get_cpu r0 ldr r0, [r0, #___cpu_t_current_OFFSET] ldr r1, [r0, #_thread_offset_to_mode] bic r1, #1 /* Store (privileged) mode in thread's mode state variable */ str r1, [r0, #_thread_offset_to_mode] dsb /* ISB is not strictly necessary here (stack pointer is not being * touched), but it's recommended to avoid executing pre-fetched * instructions with the previous privilege. */ isb /* Return to _arm_do_syscall in system state. */ b z_arm_int_exit #endif GTEXT(z_arm_cortex_r_svc) SECTION_FUNC(TEXT, z_arm_cortex_r_svc) svc #_SVC_CALL_CONTEXT_SWITCH bx lr ```
/content/code_sandbox/arch/arm/core/cortex_a_r/swap_helper.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,216
```c /* */ #include <zephyr/kernel/thread_stack.h> #include <zephyr/kernel.h> #include <zephyr/arch/arm/cortex_a_r/lib_helpers.h> #include <zephyr/drivers/interrupt_controller/gic.h> #include <ipi.h> #include "boot.h" #include "zephyr/cache.h" #include "zephyr/kernel/thread_stack.h" #include "zephyr/toolchain/gcc.h" #define INV_MPID UINT32_MAX #define SGI_SCHED_IPI 0 #define SGI_MMCFG_IPI 1 #define SGI_FPU_IPI 2 K_KERNEL_PINNED_STACK_ARRAY_DECLARE(z_interrupt_stacks, CONFIG_MP_MAX_NUM_CPUS, CONFIG_ISR_STACK_SIZE); K_KERNEL_STACK_ARRAY_DECLARE(z_arm_fiq_stack, CONFIG_MP_MAX_NUM_CPUS, CONFIG_ARMV7_FIQ_STACK_SIZE); K_KERNEL_STACK_ARRAY_DECLARE(z_arm_abort_stack, CONFIG_MP_MAX_NUM_CPUS, CONFIG_ARMV7_EXCEPTION_STACK_SIZE); K_KERNEL_STACK_ARRAY_DECLARE(z_arm_undef_stack, CONFIG_MP_MAX_NUM_CPUS, CONFIG_ARMV7_EXCEPTION_STACK_SIZE); K_KERNEL_STACK_ARRAY_DECLARE(z_arm_svc_stack, CONFIG_MP_MAX_NUM_CPUS, CONFIG_ARMV7_SVC_STACK_SIZE); K_KERNEL_STACK_ARRAY_DECLARE(z_arm_sys_stack, CONFIG_MP_MAX_NUM_CPUS, CONFIG_ARMV7_SVC_STACK_SIZE); struct boot_params { uint32_t mpid; char *irq_sp; char *fiq_sp; char *abt_sp; char *udf_sp; char *svc_sp; char *sys_sp; uint8_t voting[CONFIG_MP_MAX_NUM_CPUS]; arch_cpustart_t fn; void *arg; int cpu_num; }; /* Offsets used in reset.S */ BUILD_ASSERT(offsetof(struct boot_params, mpid) == BOOT_PARAM_MPID_OFFSET); BUILD_ASSERT(offsetof(struct boot_params, irq_sp) == BOOT_PARAM_IRQ_SP_OFFSET); BUILD_ASSERT(offsetof(struct boot_params, fiq_sp) == BOOT_PARAM_FIQ_SP_OFFSET); BUILD_ASSERT(offsetof(struct boot_params, abt_sp) == BOOT_PARAM_ABT_SP_OFFSET); BUILD_ASSERT(offsetof(struct boot_params, udf_sp) == BOOT_PARAM_UDF_SP_OFFSET); BUILD_ASSERT(offsetof(struct boot_params, svc_sp) == BOOT_PARAM_SVC_SP_OFFSET); BUILD_ASSERT(offsetof(struct boot_params, sys_sp) == BOOT_PARAM_SYS_SP_OFFSET); BUILD_ASSERT(offsetof(struct boot_params, voting) == BOOT_PARAM_VOTING_OFFSET); volatile struct boot_params arm_cpu_boot_params = { .mpid = -1, .irq_sp = (char *)(z_interrupt_stacks + CONFIG_ISR_STACK_SIZE), .fiq_sp = (char *)(z_arm_fiq_stack + CONFIG_ARMV7_FIQ_STACK_SIZE), .abt_sp = (char *)(z_arm_abort_stack + CONFIG_ARMV7_EXCEPTION_STACK_SIZE), .udf_sp = (char *)(z_arm_undef_stack + CONFIG_ARMV7_EXCEPTION_STACK_SIZE), .svc_sp = (char *)(z_arm_svc_stack + CONFIG_ARMV7_SVC_STACK_SIZE), .sys_sp = (char *)(z_arm_sys_stack + CONFIG_ARMV7_SYS_STACK_SIZE), }; const uint32_t cpu_node_list[] = { DT_FOREACH_CHILD_STATUS_OKAY_SEP(DT_PATH(cpus), DT_REG_ADDR, (,))}; /* cpu_map saves the maping of core id and mpid */ static uint32_t cpu_map[CONFIG_MP_MAX_NUM_CPUS] = { [0 ... (CONFIG_MP_MAX_NUM_CPUS - 1)] = INV_MPID }; #ifdef CONFIG_ARM_MPU extern void z_arm_mpu_init(void); extern void z_arm_configure_static_mpu_regions(void); #elif defined(CONFIG_ARM_AARCH32_MMU) extern int z_arm_mmu_init(void); #endif /* Called from Zephyr initialization */ void arch_cpu_start(int cpu_num, k_thread_stack_t *stack, int sz, arch_cpustart_t fn, void *arg) { int cpu_count, i, j; uint32_t cpu_mpid = 0; uint32_t master_core_mpid; /* Now it is on master core */ __ASSERT(arch_curr_cpu()->id == 0, ""); master_core_mpid = MPIDR_TO_CORE(GET_MPIDR()); cpu_count = ARRAY_SIZE(cpu_node_list); __ASSERT(cpu_count == CONFIG_MP_MAX_NUM_CPUS, "The count of CPU Cores nodes in dts is not equal to CONFIG_MP_MAX_NUM_CPUS\n"); for (i = 0, j = 0; i < cpu_count; i++) { if (cpu_node_list[i] == master_core_mpid) { continue; } if (j == cpu_num - 1) { cpu_mpid = cpu_node_list[i]; break; } j++; } if (i == cpu_count) { printk("Can't find CPU Core %d from dts and failed to boot it\n", cpu_num); return; } /* Pass stack address to secondary core */ arm_cpu_boot_params.irq_sp = K_KERNEL_STACK_BUFFER(stack) + sz; arm_cpu_boot_params.fiq_sp = K_KERNEL_STACK_BUFFER(z_arm_fiq_stack[cpu_num]) + CONFIG_ARMV7_FIQ_STACK_SIZE; arm_cpu_boot_params.abt_sp = K_KERNEL_STACK_BUFFER(z_arm_abort_stack[cpu_num]) + CONFIG_ARMV7_EXCEPTION_STACK_SIZE; arm_cpu_boot_params.udf_sp = K_KERNEL_STACK_BUFFER(z_arm_undef_stack[cpu_num]) + CONFIG_ARMV7_EXCEPTION_STACK_SIZE; arm_cpu_boot_params.svc_sp = K_KERNEL_STACK_BUFFER(z_arm_svc_stack[cpu_num]) + CONFIG_ARMV7_SVC_STACK_SIZE; arm_cpu_boot_params.sys_sp = K_KERNEL_STACK_BUFFER(z_arm_sys_stack[cpu_num]) + CONFIG_ARMV7_SYS_STACK_SIZE; arm_cpu_boot_params.fn = fn; arm_cpu_boot_params.arg = arg; arm_cpu_boot_params.cpu_num = cpu_num; /* store mpid last as this is our synchronization point */ arm_cpu_boot_params.mpid = cpu_mpid; barrier_dsync_fence_full(); sys_cache_data_invd_range( (void *)&arm_cpu_boot_params, sizeof(arm_cpu_boot_params)); /*! TODO: Support PSCI * \todo Support PSCI */ /* Wait secondary cores up, see arch_secondary_cpu_init */ while (arm_cpu_boot_params.fn) { wfe(); } cpu_map[cpu_num] = cpu_mpid; printk("Secondary CPU core %d (MPID:%#x) is up\n", cpu_num, cpu_mpid); } /* the C entry of secondary cores */ void arch_secondary_cpu_init(void) { int cpu_num = arm_cpu_boot_params.cpu_num; arch_cpustart_t fn; void *arg; __ASSERT(arm_cpu_boot_params.mpid == MPIDR_TO_CORE(GET_MPIDR()), ""); /* Initialize tpidrro_el0 with our struct _cpu instance address */ write_tpidruro((uintptr_t)&_kernel.cpus[cpu_num]); #ifdef CONFIG_ARM_MPU /*! TODO: Unify mpu and mmu initialization function * \todo Unify mpu and mmu initialization function */ z_arm_mpu_init(); z_arm_configure_static_mpu_regions(); #elif defined(CONFIG_ARM_AARCH32_MMU) z_arm_mmu_init(); #endif #ifdef CONFIG_SMP arm_gic_secondary_init(); irq_enable(SGI_SCHED_IPI); /*! TODO: FPU irq * \todo FPU irq */ #endif fn = arm_cpu_boot_params.fn; arg = arm_cpu_boot_params.arg; barrier_dsync_fence_full(); /* * Secondary core clears .fn to announce its presence. * Primary core is polling for this. We no longer own * arm_cpu_boot_params afterwards. */ arm_cpu_boot_params.fn = NULL; barrier_dsync_fence_full(); sev(); fn(arg); } #ifdef CONFIG_SMP static void send_ipi(unsigned int ipi, uint32_t cpu_bitmap) { uint32_t mpidr = MPIDR_TO_CORE(GET_MPIDR()); /* * Send SGI to all cores except itself */ unsigned int num_cpus = arch_num_cpus(); for (int i = 0; i < num_cpus; i++) { if ((cpu_bitmap & BIT(i)) == 0) { continue; } uint32_t target_mpidr = cpu_map[i]; uint8_t aff0; if (mpidr == target_mpidr || mpidr == INV_MPID) { continue; } aff0 = MPIDR_AFFLVL(target_mpidr, 0); gic_raise_sgi(ipi, (uint64_t)target_mpidr, 1 << aff0); } } void sched_ipi_handler(const void *unused) { ARG_UNUSED(unused); z_sched_ipi(); } void arch_sched_broadcast_ipi(void) { send_ipi(SGI_SCHED_IPI, IPI_ALL_CPUS_MASK); } void arch_sched_directed_ipi(uint32_t cpu_bitmap) { send_ipi(SGI_SCHED_IPI, cpu_bitmap); } int arch_smp_init(void) { cpu_map[0] = MPIDR_TO_CORE(GET_MPIDR()); /* * SGI0 is use for sched ipi, this might be changed to use Kconfig * option */ IRQ_CONNECT(SGI_SCHED_IPI, IRQ_DEFAULT_PRIORITY, sched_ipi_handler, NULL, 0); irq_enable(SGI_SCHED_IPI); return 0; } #endif ```
/content/code_sandbox/arch/arm/core/cortex_a_r/smp.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,061
```unknown # ARM Cortex-A and Cortex-R platform configuration options # # NOTE: We have the specific core implementations first and outside of the # if CPU_AARCH32_CORTEX_A / if CPU_AARCH32_CORTEX_R block so that SoCs can # select which core they are using without having to select all the options # related to that core. Everything else is captured inside the if # CPU_AARCH32_CORTEX_A / if CPU_AARCH32_CORTEX_R blocks so they are not # exposed if one selects a different ARM Cortex Family (Cortex-M). config CPU_CORTEX_A9 bool select CPU_AARCH32_CORTEX_A select ARMV7_A help This option signifies the use of a Cortex-A9 CPU. if CPU_AARCH32_CORTEX_A config ARMV7_A bool select ATOMIC_OPERATIONS_BUILTIN select ISA_ARM config ARMV7_EXCEPTION_STACK_SIZE int "Undefined Instruction and Abort stack size (in bytes)" default 256 help This option specifies the size of the stack used by the undefined instruction and data abort exception handlers. config ARMV7_FIQ_STACK_SIZE int "FIQ stack size (in bytes)" default 256 help This option specifies the size of the stack used by the FIQ handler. config ARMV7_SVC_STACK_SIZE int "SVC stack size (in bytes)" default 512 help This option specifies the size of the stack used by the SVC handler. config ARMV7_SYS_STACK_SIZE int "SYS stack size (in bytes)" default 1024 help This option specifies the size of the stack used by the system mode. config RUNTIME_NMI default y config GEN_ISR_TABLES default y config GEN_IRQ_VECTOR_TABLE default n config DCACHE_LINE_SIZE default 32 config ICACHE_LINE_SIZE default 32 endif # CPU_AARCH32_CORTEX_A config CPU_CORTEX_R4 bool select CPU_AARCH32_CORTEX_R select ARMV7_R select ARMV7_R_FP if CPU_HAS_FPU help This option signifies the use of a Cortex-R4 CPU config CPU_CORTEX_R5 bool select CPU_AARCH32_CORTEX_R select ARMV7_R select ARMV7_R_FP if CPU_HAS_FPU help This option signifies the use of a Cortex-R5 CPU config CPU_CORTEX_R7 bool select CPU_AARCH32_CORTEX_R select ARMV7_R select ARMV7_R_FP if CPU_HAS_FPU help This option signifies the use of a Cortex-R7 CPU config CPU_CORTEX_R52 bool select CPU_AARCH32_CORTEX_R select AARCH32_ARMV8_R select CPU_HAS_ICACHE select CPU_HAS_DCACHE select VFP_SP_D16 if !USE_SWITCH help This option signifies the use of a Cortex-R52 CPU if CPU_AARCH32_CORTEX_R config ARMV7_R bool select ATOMIC_OPERATIONS_BUILTIN select ISA_ARM select ISA_THUMB2 help This option signifies the use of an ARMv7-R processor implementation. From path_to_url The Armv7-R architecture implements a traditional Arm architecture with multiple modes and supports a Protected Memory System Architecture (PMSA) based on a Memory Protection Unit (MPU). It supports the Arm (32) and Thumb (T32) instruction sets. config ARMV7_R_FP bool depends on ARMV7_R help This option signifies the use of an ARMv7-R processor implementation supporting the Floating-Point Extension. config AARCH32_ARMV8_R bool select ATOMIC_OPERATIONS_BUILTIN select SCHED_IPI_SUPPORTED if SMP select ARCH_HAS_DIRECTED_IPIS help This option signifies the use of an ARMv8-R AArch32 processor implementation. From path_to_url The Armv8-R architecture targets at the Real-time profile. It introduces virtualization at the highest security level while retaining the Protected Memory System Architecture (PMSA) based on a Memory Protection Unit (MPU). It supports the A32 and T32 instruction sets. config ARMV7_EXCEPTION_STACK_SIZE int "Undefined Instruction and Abort stack size (in bytes)" default 256 help This option specifies the size of the stack used by the undefined instruction and data abort exception handlers. config ARMV7_FIQ_STACK_SIZE int "FIQ stack size (in bytes)" default 256 help This option specifies the size of the stack used by the FIQ handler. config ARMV7_SVC_STACK_SIZE int "SVC stack size (in bytes)" default 512 help This option specifies the size of the stack used by the SVC handler. config ARMV7_SYS_STACK_SIZE int "SYS stack size (in bytes)" default 1024 help This option specifies the size of the stack used by the system mode. config RUNTIME_NMI default y config GEN_ISR_TABLES default y config GEN_IRQ_VECTOR_TABLE default n config DISABLE_TCM_ECC bool "Disable ECC on TCM" help This option disables ECC checks on Tightly Coupled Memory. config DCACHE_LINE_SIZE default 64 if CPU_CORTEX_R52 default 32 config ICACHE_LINE_SIZE default 64 if CPU_CORTEX_R52 default 32 endif # CPU_AARCH32_CORTEX_R config TEST_EXTRA_STACK_SIZE default 1024 if SMP ```
/content/code_sandbox/arch/arm/core/cortex_a_r/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,205
```c /* * Organisation (CSIRO) ABN 41 687 119 230. * */ #include <zephyr/sys/util_macro.h> #include <zephyr/arch/common/semihost.h> #if !(defined(CONFIG_ISA_ARM) || defined(CONFIG_ISA_THUMB2)) #error Unsupported ISA #endif long semihost_exec(enum semihost_instr instr, void *args) { register unsigned long r0 __asm__ ("r0") = instr; register void *r1 __asm__ ("r1") = args; register long ret __asm__ ("r0"); if (IS_ENABLED(CONFIG_ISA_THUMB2)) { __asm__ __volatile__ ("svc 0xab" : "=r" (ret) : "r" (r0), "r" (r1) : "memory"); } else { __asm__ __volatile__ ("svc 0x123456" : "=r" (ret) : "r" (r0), "r" (r1) : "memory"); } return ret; } ```
/content/code_sandbox/arch/arm/core/cortex_a_r/semihost.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
227
```unknown /* * */ /** * @file * @brief ARM Cortex-A and Cortex-R exception/interrupt exit API * * Provides functions for performing kernel handling when exiting exceptions, * or interrupts that are installed directly in the vector table (i.e. that are * not wrapped around by _isr_wrapper()). */ #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #include <offsets_short.h> #include <zephyr/arch/cpu.h> #include "macro_priv.inc" _ASM_FILE_PROLOGUE GTEXT(z_arm_exc_exit) GTEXT(z_arm_int_exit) GTEXT(z_arm_do_swap) GDATA(_kernel) .macro userspace_exc_exit #if defined(CONFIG_USERSPACE) cps #MODE_SVC sub sp, #8 push {r0-r1} /* * Copy return state from sys/usr state onto the svc stack. * We have to put $sp_usr back into $sp since we switched to * the privileged stack on exception entry. The return state * is on the privileged stack so it needs to be copied to the * svc stack since we cannot trust the usr stack. */ cps #MODE_SYS pop {r0-r1} cps #MODE_SVC str r0, [sp, #8] str r1, [sp, #12] /* Only switch the stacks if returning to a user thread */ and r1, #MODE_MASK cmp r1, #MODE_USR bne system_thread_exit\@ /* Restore user stack pointer */ get_cpu r0 ldr r0, [r0, #___cpu_t_current_OFFSET] cps #MODE_SYS ldr sp, [r0, #_thread_offset_to_sp_usr] /* sp_usr */ cps #MODE_SVC system_thread_exit\@: pop {r0-r1} #endif .endm .macro fpu_exc_exit #if defined(CONFIG_FPU_SHARING) /* * If the floating point context pointer is null, then a context was * saved so restore the float context from the exception stack frame. */ get_cpu r2 ldr r1, [r2, #___cpu_t_fp_ctx_OFFSET] cmp r1, #0 beq vfp_restore\@ /* * If leaving the last interrupt context, remove the floating point * context pointer. */ cmp r0, #0 moveq r1, #0 streq r1, [r2, #___cpu_t_fp_ctx_OFFSET] b vfp_exit\@ vfp_restore\@: add r3, sp, #___fpu_sf_t_fpscr_OFFSET ldm r3, {r1, r2} tst r2, #FPEXC_EN beq vfp_exit\@ vmsr fpexc, r2 vmsr fpscr, r1 mov r3, sp vldmia r3!, {s0-s15} #ifdef CONFIG_VFP_FEATURE_REGS_S64_D32 vldmia r3!, {d16-d31} #endif vfp_exit\@: /* Leave the VFP disabled when leaving */ mov r1, #0 vmsr fpexc, r1 add sp, sp, #___fpu_t_SIZEOF #endif .endm /** * @brief Kernel housekeeping when exiting interrupt handler installed directly * in the vector table * * Kernel allows installing interrupt handlers (ISRs) directly into the vector * table to get the lowest interrupt latency possible. This allows the ISR to * be invoked directly without going through a software interrupt table. * However, upon exiting the ISR, some kernel work must still be performed, * namely possible context switching. While ISRs connected in the software * interrupt table do this automatically via a wrapper, ISRs connected directly * in the vector table must invoke z_arm_int_exit() as the *very last* action * before returning. * * e.g. * * void myISR(void) * { * printk("in %s\n", __FUNCTION__); * doStuff(); * z_arm_int_exit(); * } */ SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, z_arm_int_exit) #ifdef CONFIG_STACK_SENTINEL bl z_check_stack_sentinel #endif /* CONFIG_STACK_SENTINEL */ /* Disable nested interrupts while exiting, this should happens * before context switch also, to ensure interrupts are disabled. */ cpsid i #ifdef CONFIG_PREEMPT_ENABLED /* Do not context switch if exiting a nested interrupt */ get_cpu r3 ldr r0, [r3, #___cpu_t_nested_OFFSET] cmp r0, #1 bhi __EXIT_INT ldr r1, [r3, #___cpu_t_current_OFFSET] ldr r2, =_kernel ldr r0, [r2, #_kernel_offset_to_ready_q_cache] cmp r0, r1 blne z_arm_do_swap __EXIT_INT: #endif /* CONFIG_PREEMPT_ENABLED */ /* Decrement interrupt nesting count */ get_cpu r2 ldr r0, [r2, #___cpu_t_nested_OFFSET] sub r0, r0, #1 str r0, [r2, #___cpu_t_nested_OFFSET] /* Restore previous stack pointer */ pop {r2, r3} add sp, sp, r3 /* * Restore lr_svc stored into the SVC mode stack by the mode entry * function. This ensures that the return address of the interrupted * context is preserved in case of interrupt nesting. */ pop {lr} /* * Restore r0-r3, r12 and lr_irq stored into the process stack by the * mode entry function. These registers are saved by _isr_wrapper for * IRQ mode and z_arm_svc for SVC mode. * * r0-r3 are either the values from the thread before it was switched * out or they are the args to _new_thread for a new thread. */ cps #MODE_SYS #if defined(CONFIG_FPU_SHARING) fpu_exc_exit #endif pop {r0-r3, r12, lr} userspace_exc_exit rfeia sp! /** * @brief Kernel housekeeping when exiting exception handler * * The exception exit routine performs appropriate housekeeping tasks depending * on the mode of exit: * * If exiting a nested or non-fatal exception, the exit routine restores the * saved exception stack frame to resume the excepted context. * * If exiting a non-nested fatal exception, the exit routine, assuming that the * current faulting thread is aborted, discards the saved exception stack * frame containing the aborted thread context and switches to the next * scheduled thread. * * void z_arm_exc_exit(bool fatal) * * @param fatal True if exiting from a fatal fault; otherwise, false */ SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, z_arm_exc_exit) /* Do not context switch if exiting a nested exception */ get_cpu r3 ldr r1, [r3, #___cpu_t_nested_OFFSET] cmp r1, #1 bhi __EXIT_EXC /* If the fault is not fatal, return to the current thread context */ cmp r0, #0 beq __EXIT_EXC /* * If the fault is fatal, the current thread must have been aborted by * the exception handler. Clean up the exception stack frame and switch * to the next scheduled thread. */ /* Clean up exception stack frame */ #if defined(CONFIG_FPU_SHARING) add sp, sp, #___fpu_t_SIZEOF #endif add sp, #32 /* * Switch in the next scheduled thread. * * Note that z_arm_do_swap must be called in the SVC mode because it * switches to the SVC mode during context switch and returns to the * caller using lr_svc. */ cps #MODE_SVC bl z_arm_do_swap /* Decrement exception nesting count */ get_cpu r3 ldr r0, [r3, #___cpu_t_nested_OFFSET] sub r0, r0, #1 str r0, [r3, #___cpu_t_nested_OFFSET] /* Return to the switched thread */ cps #MODE_SYS #if defined(CONFIG_FPU_SHARING) fpu_exc_exit #endif pop {r0-r3, r12, lr} userspace_exc_exit rfeia sp! __EXIT_EXC: /* Decrement exception nesting count */ ldr r0, [r3, #___cpu_t_nested_OFFSET] sub r0, r0, #1 str r0, [r3, #___cpu_t_nested_OFFSET] #if defined(CONFIG_FPU_SHARING) add sp, sp, #___fpu_t_SIZEOF #endif /* * Restore r0-r3, r12, lr, lr_und and spsr_und from the exception stack * and return to the current thread. */ ldmia sp, {r0-r3, r12, lr}^ add sp, #24 rfeia sp! ```
/content/code_sandbox/arch/arm/core/cortex_a_r/exc_exit.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,984
```c /* * */ /** * @file * @brief ARM Cortex-A and Cortex-R interrupt management * * * Interrupt management: enabling/disabling and dynamic ISR * connecting/replacing. SW_ISR_TABLE_DYNAMIC has to be enabled for * connecting ISRs at runtime. */ #include <zephyr/kernel.h> #include <zephyr/arch/cpu.h> #include <zephyr/drivers/interrupt_controller/gic.h> #include <zephyr/sys/__assert.h> #include <zephyr/sys/barrier.h> #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #include <zephyr/sw_isr_table.h> #include <zephyr/irq.h> #include <zephyr/tracing/tracing.h> #include <zephyr/pm/pm.h> extern void z_arm_reserved(void); /* * For Cortex-A and Cortex-R cores, the default interrupt controller is the ARM * Generic Interrupt Controller (GIC) and therefore the architecture interrupt * control functions are mapped to the GIC driver interface. * * When a custom interrupt controller is used (i.e. * CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER is enabled), the architecture * interrupt control functions are mapped to the SoC layer in * `include/arch/arm/irq.h`. */ #if !defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER) void arch_irq_enable(unsigned int irq) { arm_gic_irq_enable(irq); } void arch_irq_disable(unsigned int irq) { arm_gic_irq_disable(irq); } int arch_irq_is_enabled(unsigned int irq) { return arm_gic_irq_is_enabled(irq); } /** * @internal * * @brief Set an interrupt's priority * * The priority is verified if ASSERT_ON is enabled. The maximum number * of priority levels is a little complex, as there are some hardware * priority levels which are reserved: three for various types of exceptions, * and possibly one additional to support zero latency interrupts. */ void z_arm_irq_priority_set(unsigned int irq, unsigned int prio, uint32_t flags) { arm_gic_irq_set_priority(irq, prio, flags); } #endif /* !CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER */ void z_arm_fatal_error(unsigned int reason, const struct arch_esf *esf); /** * * @brief Spurious interrupt handler * * Installed in all _sw_isr_table slots at boot time. Throws an error if * called. * */ void z_irq_spurious(const void *unused) { ARG_UNUSED(unused); z_arm_fatal_error(K_ERR_SPURIOUS_IRQ, NULL); } #ifdef CONFIG_PM void _arch_isr_direct_pm(void) { unsigned int key; /* irq_lock() does what we want for this CPU */ key = irq_lock(); if (_kernel.idle) { _kernel.idle = 0; pm_system_resume(); } irq_unlock(key); } #endif #ifdef CONFIG_DYNAMIC_INTERRUPTS #ifdef CONFIG_GEN_ISR_TABLES int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, void (*routine)(const void *parameter), const void *parameter, uint32_t flags) { z_isr_install(irq, routine, parameter); z_arm_irq_priority_set(irq, priority, flags); return irq; } #endif /* CONFIG_GEN_ISR_TABLES */ #ifdef CONFIG_DYNAMIC_DIRECT_INTERRUPTS static inline void z_arm_irq_dynamic_direct_isr_dispatch(void) { uint32_t irq = __get_IPSR() - 16; if (irq < IRQ_TABLE_SIZE) { struct _isr_table_entry *isr_entry = &_sw_isr_table[irq]; isr_entry->isr(isr_entry->arg); } } ISR_DIRECT_DECLARE(z_arm_irq_direct_dynamic_dispatch_reschedule) { z_arm_irq_dynamic_direct_isr_dispatch(); return 1; } ISR_DIRECT_DECLARE(z_arm_irq_direct_dynamic_dispatch_no_reschedule) { z_arm_irq_dynamic_direct_isr_dispatch(); return 0; } #endif /* CONFIG_DYNAMIC_DIRECT_INTERRUPTS */ #endif /* CONFIG_DYNAMIC_INTERRUPTS */ ```
/content/code_sandbox/arch/arm/core/cortex_a_r/irq_manage.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
827
```unknown /* * */ #include <zephyr/toolchain.h> _ASM_FILE_PROLOGUE GTEXT(__aeabi_read_tp) SECTION_FUNC(text, __aeabi_read_tp) /* * TPIDRURW will be used as a base pointer point to TLS aera. */ mrc 15, 0, r0, c13, c0, 2 bx lr ```
/content/code_sandbox/arch/arm/core/cortex_a_r/__aeabi_read_tp.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
85
```unknown /* * */ /** * @file * @brief Reset handler * * Reset handler that prepares the system for running C code. */ #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #include <zephyr/arch/cpu.h> #include <offsets_short.h> #include <cortex_a_r/tcm.h> #include "vector_table.h" #include "boot.h" #include "macro_priv.inc" _ASM_FILE_PROLOGUE GTEXT(z_arm_reset) GDATA(z_interrupt_stacks) GDATA(z_arm_svc_stack) GDATA(z_arm_sys_stack) GDATA(z_arm_fiq_stack) GDATA(z_arm_abort_stack) GDATA(z_arm_undef_stack) #if defined(CONFIG_PLATFORM_SPECIFIC_INIT) GTEXT(z_arm_platform_init) #endif /** * * @brief Reset vector * * Ran when the system comes out of reset. The processor is in Supervisor mode * and interrupts are disabled. The processor architectural registers are in * an indeterminate state. * * When these steps are completed, jump to z_prep_c(), which will finish * setting up the system for running C code. * */ SECTION_SUBSEC_FUNC(TEXT, _reset_section, z_arm_reset) SECTION_SUBSEC_FUNC(TEXT, _reset_section, __start) #if defined(CONFIG_AARCH32_ARMV8_R) /* Check if we are starting in HYP mode */ mrs r0, cpsr and r0, r0, #MODE_MASK cmp r0, #MODE_HYP bne EL1_Reset_Handler /* Init HSCTLR see Armv8-R AArch32 architecture profile */ ldr r0, =(HSCTLR_RES1 | SCTLR_I_BIT | SCTLR_C_BIT) mcr p15, 4, r0, c1, c0, 0 /* Init HACTLR: Enable EL1 access to all IMP DEF registers */ ldr r0, =HACTLR_INIT mcr p15, 4, r0, c1, c0, 1 /* Go to SVC mode */ mrs r0, cpsr bic r0, #MODE_MASK orr r0, #MODE_SVC msr spsr_cxsf, r0 ldr r0, =EL1_Reset_Handler msr elr_hyp, r0 dsb isb eret EL1_Reset_Handler: #endif #if defined(CONFIG_DCLS) /* * Initialise CPU registers to a defined state if the processor is * configured as Dual-redundant Core Lock-step (DCLS). This is required * for state convergence of the two parallel executing cores. */ /* Common and SVC mode registers */ mov r0, #0 mov r1, #0 mov r2, #0 mov r3, #0 mov r4, #0 mov r5, #0 mov r6, #0 mov r7, #0 mov r8, #0 mov r9, #0 mov r10, #0 mov r11, #0 mov r12, #0 mov r13, #0 /* r13_svc */ mov r14, #0 /* r14_svc */ mrs r0, cpsr msr spsr_cxsf, r0 /* spsr_svc */ /* FIQ mode registers */ cps #MODE_FIQ mov r8, #0 /* r8_fiq */ mov r9, #0 /* r9_fiq */ mov r10, #0 /* r10_fiq */ mov r11, #0 /* r11_fiq */ mov r12, #0 /* r12_fiq */ mov r13, #0 /* r13_fiq */ mov r14, #0 /* r14_fiq */ mrs r0, cpsr msr spsr_cxsf, r0 /* spsr_fiq */ /* IRQ mode registers */ cps #MODE_IRQ mov r13, #0 /* r13_irq */ mov r14, #0 /* r14_irq */ mrs r0, cpsr msr spsr_cxsf, r0 /* spsr_irq */ /* ABT mode registers */ cps #MODE_ABT mov r13, #0 /* r13_abt */ mov r14, #0 /* r14_abt */ mrs r0, cpsr msr spsr_cxsf, r0 /* spsr_abt */ /* UND mode registers */ cps #MODE_UND mov r13, #0 /* r13_und */ mov r14, #0 /* r14_und */ mrs r0, cpsr msr spsr_cxsf, r0 /* spsr_und */ /* SYS mode registers */ cps #MODE_SYS mov r13, #0 /* r13_sys */ mov r14, #0 /* r14_sys */ #if defined(CONFIG_FPU) && defined(CONFIG_CPU_HAS_VFP) /* * Initialise FPU registers to a defined state. */ /* Allow VFP coprocessor access */ mrc p15, 0, r0, c1, c0, 2 orr r0, r0, #(CPACR_CP10(CPACR_FA) | CPACR_CP11(CPACR_FA)) mcr p15, 0, r0, c1, c0, 2 /* Enable VFP */ mov r0, #FPEXC_EN vmsr fpexc, r0 /* Initialise VFP registers */ fmdrr d0, r1, r1 fmdrr d1, r1, r1 fmdrr d2, r1, r1 fmdrr d3, r1, r1 fmdrr d4, r1, r1 fmdrr d5, r1, r1 fmdrr d6, r1, r1 fmdrr d7, r1, r1 fmdrr d8, r1, r1 fmdrr d9, r1, r1 fmdrr d10, r1, r1 fmdrr d11, r1, r1 fmdrr d12, r1, r1 fmdrr d13, r1, r1 fmdrr d14, r1, r1 fmdrr d15, r1, r1 #if defined(CONFIG_VFP_FEATURE_REGS_S64_D32) fmdrr d16, r1, r1 fmdrr d17, r1, r1 fmdrr d18, r1, r1 fmdrr d19, r1, r1 fmdrr d20, r1, r1 fmdrr d21, r1, r1 fmdrr d22, r1, r1 fmdrr d23, r1, r1 fmdrr d24, r1, r1 fmdrr d25, r1, r1 fmdrr d26, r1, r1 fmdrr d27, r1, r1 fmdrr d28, r1, r1 fmdrr d29, r1, r1 fmdrr d30, r1, r1 fmdrr d31, r1, r1 #endif /* CONFIG_VFP_FEATURE_REGS_S64_D32 */ vmsr fpscr, r1 vmsr fpexc, r1 #endif /* CONFIG_FPU && CONFIG_CPU_HAS_VFP */ #endif /* CONFIG_DCLS */ ldr r0, =arm_cpu_boot_params #if CONFIG_MP_MAX_NUM_CPUS > 1 /* * This code uses voting locks, like arch/arm64/core/reset.S, to determine primary CPU. */ /* * Get the "logic" id defined by cpu_node_list statically for voting lock self-identify. * It is worth noting that this is NOT the final logic id (arch_curr_cpu()->id) */ get_cpu_logic_id r1, r2, r3, r4 // r1: MPID, r2: logic id add r4, r0, #BOOT_PARAM_VOTING_OFFSET /* signal our desire to vote */ mov r5, #1 strb r5, [r4, r2] ldr r3, [r0, #BOOT_PARAM_MPID_OFFSET] cmn r3, #1 beq 1f /* some core already won, release */ mov r7, #0 strb r7, [r4, r2] b _secondary_core /* suggest current core then release */ 1: str r1, [r0, #BOOT_PARAM_MPID_OFFSET] strb r7, [r4, r2] dmb /* then wait until every core else is done voting */ mov r5, #0 2: ldrb r3, [r4, r5] tst r3, #255 /* wait */ bne 2b add r5, r5, #1 cmp r5, #CONFIG_MP_MAX_NUM_CPUS bne 2b /* check if current core won */ dmb ldr r3, [r0, #BOOT_PARAM_MPID_OFFSET] cmp r3, r1 beq _primary_core /* fallthrough secondary */ /* loop until our turn comes */ _secondary_core: dmb ldr r2, [r0, #BOOT_PARAM_MPID_OFFSET] cmp r1, r2 bne _secondary_core /* we can now load our stack pointer values and move on */ ldr r4, =arch_secondary_cpu_init ldr r5, [r0, #BOOT_PARAM_FIQ_SP_OFFSET] ldr r6, [r0, #BOOT_PARAM_IRQ_SP_OFFSET] ldr r7, [r0, #BOOT_PARAM_ABT_SP_OFFSET] ldr r8, [r0, #BOOT_PARAM_UDF_SP_OFFSET] ldr r9, [r0, #BOOT_PARAM_SVC_SP_OFFSET] ldr r10, [r0, #BOOT_PARAM_SYS_SP_OFFSET] b 2f _primary_core: #endif ldr r4, =z_prep_c ldr r5, =(z_arm_fiq_stack + CONFIG_ARMV7_FIQ_STACK_SIZE) ldr r6, =(z_interrupt_stacks + CONFIG_ISR_STACK_SIZE) ldr r7, =(z_arm_abort_stack + CONFIG_ARMV7_EXCEPTION_STACK_SIZE) ldr r8, =(z_arm_undef_stack + CONFIG_ARMV7_EXCEPTION_STACK_SIZE) ldr r9, =(z_arm_svc_stack + CONFIG_ARMV7_SVC_STACK_SIZE) ldr r10, =(z_arm_sys_stack + CONFIG_ARMV7_SYS_STACK_SIZE) 2: /* * Configure stack. */ /* FIQ mode stack */ msr CPSR_c, #(MODE_FIQ | I_BIT | F_BIT) mov sp, r5 /* IRQ mode stack */ msr CPSR_c, #(MODE_IRQ | I_BIT | F_BIT) mov sp, r6 /* ABT mode stack */ msr CPSR_c, #(MODE_ABT | I_BIT | F_BIT) mov sp, r7 /* UND mode stack */ msr CPSR_c, #(MODE_UND | I_BIT | F_BIT) mov sp, r8 /* SVC mode stack */ msr CPSR_c, #(MODE_SVC | I_BIT | F_BIT) mov sp, r9 /* SYS mode stack */ msr CPSR_c, #(MODE_SYS | I_BIT | F_BIT) mov sp, r10 #if defined(CONFIG_PLATFORM_SPECIFIC_INIT) /* Execute platform-specific initialisation if applicable */ bl z_arm_platform_init #endif #if defined(CONFIG_WDOG_INIT) /* board-specific watchdog initialization is necessary */ bl z_arm_watchdog_init #endif #if defined(CONFIG_DISABLE_TCM_ECC) bl z_arm_tcm_disable_ecc #endif bx r4 ```
/content/code_sandbox/arch/arm/core/cortex_a_r/reset.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,798
```objective-c /* * */ #ifndef ZEPHYR_ARCH_ARM_CORE_AARCH32_MPU_ARM_MPU_V7_INTERNAL_H_ #define ZEPHYR_ARCH_ARM_CORE_AARCH32_MPU_ARM_MPU_V7_INTERNAL_H_ #include <zephyr/sys/math_extras.h> #include <arm_mpu_internal.h> #define LOG_LEVEL CONFIG_MPU_LOG_LEVEL #include <zephyr/logging/log.h> /* Global MPU configuration at system initialization. */ static void mpu_init(void) { /* No specific configuration at init for ARMv7-M MPU. */ } /* This internal function performs MPU region initialization. * * Note: * The caller must provide a valid region index. */ static void region_init(const uint32_t index, const struct arm_mpu_region *region_conf) { /* Select the region you want to access */ set_region_number(index); /* Configure the region */ #if defined(CONFIG_CPU_AARCH32_CORTEX_R) /* * Clear size register, which disables the entry. It cannot be * enabled as we reconfigure it. */ set_region_size(0); set_region_base_address(region_conf->base & MPU_RBAR_ADDR_Msk); set_region_attributes(region_conf->attr.rasr); set_region_size(region_conf->size | MPU_RASR_ENABLE_Msk); #else MPU->RBAR = (region_conf->base & MPU_RBAR_ADDR_Msk) | MPU_RBAR_VALID_Msk | index; MPU->RASR = region_conf->attr.rasr | MPU_RASR_ENABLE_Msk; LOG_DBG("[%d] 0x%08x 0x%08x", index, region_conf->base, region_conf->attr.rasr); #endif } /* @brief Partition sanity check * * This internal function performs run-time sanity check for * MPU region start address and size. * * @param part Pointer to the data structure holding the partition * information (must be valid). */ static int mpu_partition_is_valid(const struct z_arm_mpu_partition *part) { /* Partition size must be power-of-two, * and greater or equal to the minimum * MPU region size. Start address of the * partition must align with size. */ int partition_is_valid = ((part->size & (part->size - 1U)) == 0U) && (part->size >= CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE) && ((part->start & (part->size - 1U)) == 0U); return partition_is_valid; } /** * This internal function converts the region size to * the SIZE field value of MPU_RASR. * * Note: If size is not a power-of-two, it is rounded-up to the next * power-of-two value, and the returned SIZE field value corresponds * to that power-of-two value. */ static inline uint32_t size_to_mpu_rasr_size(uint32_t size) { /* The minimal supported region size is 32 bytes */ if (size <= 32U) { return REGION_32B; } /* * A size value greater than 2^31 could not be handled by * round_up_to_next_power_of_two() properly. We handle * it separately here. */ if (size > (1UL << 31)) { return REGION_4G; } return ((32 - __builtin_clz(size - 1U) - 2 + 1) << MPU_RASR_SIZE_Pos) & MPU_RASR_SIZE_Msk; } /** * This internal function is utilized by the MPU driver to combine a given * region attribute configuration and size and fill-in a driver-specific * structure with the correct MPU region configuration. */ static inline void get_region_attr_from_mpu_partition_info( arm_mpu_region_attr_t *p_attr, const k_mem_partition_attr_t *attr, uint32_t base, uint32_t size) { /* in ARMv7-M MPU the base address is not required * to determine region attributes */ (void) base; #if defined(CONFIG_CPU_AARCH32_CORTEX_R) (void) size; p_attr->rasr = attr->rasr_attr; #else p_attr->rasr = attr->rasr_attr | size_to_mpu_rasr_size(size); #endif } #if defined(CONFIG_USERSPACE) /** * This internal function returns the minimum HW MPU region index * that may hold the configuration of a dynamic memory region. * * Trivial for ARMv7-M MPU, where dynamic memory areas are programmed * in MPU regions indices right after the static regions. */ static inline int get_dyn_region_min_index(void) { return static_regions_num; } /* Only a single bit is set for all user accessible permissions. * In ARMv7-M MPU this is bit AP[1]. */ #define MPU_USER_READ_ACCESSIBLE_Msk (P_RW_U_RO & P_RW_U_RW & P_RO_U_RO & RO) /** * This internal function checks if the region is user accessible or not. * * Note: * The caller must provide a valid region number. */ static inline int is_user_accessible_region(uint32_t r_index, int write) { uint32_t r_ap = get_region_ap(r_index); if (write != 0) { return r_ap == P_RW_U_RW; } return r_ap & MPU_USER_READ_ACCESSIBLE_Msk; } /** * This internal function validates whether a given memory buffer * is user accessible or not. */ static inline int mpu_buffer_validate(const void *addr, size_t size, int write) { int32_t r_index; int rc = -EPERM; int key = arch_irq_lock(); /* Iterate all mpu regions in reversed order */ for (r_index = get_num_regions() - 1U; r_index >= 0; r_index--) { if (!is_enabled_region(r_index) || !is_in_region(r_index, (uint32_t)addr, size)) { continue; } /* For ARM MPU, higher region number takes priority. * Since we iterate all mpu regions in reversed order, so * we can stop the iteration immediately once we find the * matched region that grants permission or denies access. */ if (is_user_accessible_region(r_index, write)) { rc = 0; } else { rc = -EPERM; } break; } arch_irq_unlock(key); return rc; } #endif /* CONFIG_USERSPACE */ static int mpu_configure_region(const uint8_t index, const struct z_arm_mpu_partition *new_region); static int mpu_configure_regions(const struct z_arm_mpu_partition regions[], uint8_t regions_num, uint8_t start_reg_index, bool do_sanity_check); /* This internal function programs the static MPU regions. * * It returns the number of MPU region indices configured. * * Note: * If the static MPU regions configuration has not been successfully * performed, the error signal is propagated to the caller of the function. */ static int mpu_configure_static_mpu_regions(const struct z_arm_mpu_partition static_regions[], const uint8_t regions_num, const uint32_t background_area_base, const uint32_t background_area_end) { int mpu_reg_index = static_regions_num; /* In ARMv7-M architecture the static regions are * programmed on top of SRAM region configuration. */ ARG_UNUSED(background_area_base); ARG_UNUSED(background_area_end); mpu_reg_index = mpu_configure_regions(static_regions, regions_num, mpu_reg_index, true); static_regions_num = mpu_reg_index; return mpu_reg_index; } /* This internal function programs the dynamic MPU regions. * * It returns the number of MPU region indices configured. * * Note: * If the dynamic MPU regions configuration has not been successfully * performed, the error signal is propagated to the caller of the function. */ static int mpu_configure_dynamic_mpu_regions(const struct z_arm_mpu_partition dynamic_regions[], uint8_t regions_num) { int mpu_reg_index = static_regions_num; /* In ARMv7-M architecture the dynamic regions are * programmed on top of existing SRAM region configuration. */ mpu_reg_index = mpu_configure_regions(dynamic_regions, regions_num, mpu_reg_index, false); if (mpu_reg_index != -EINVAL) { /* Disable the non-programmed MPU regions. */ for (int i = mpu_reg_index; i < get_num_regions(); i++) { ARM_MPU_ClrRegion(i); } } return mpu_reg_index; } static inline void mpu_clear_region(uint32_t rnr) { ARM_MPU_ClrRegion(rnr); } #endif /* ZEPHYR_ARCH_ARM_CORE_AARCH32_MPU_ARM_MPU_V7_INTERNAL_H_ */ ```
/content/code_sandbox/arch/arm/core/mpu/arm_mpu_v7_internal.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,900
```c /* * */ #include <zephyr/sys/slist.h> #include <zephyr/arch/arm/mpu/arm_mpu.h> #include <zephyr/arch/arm/cortex_m/arm_mpu_mem_cfg.h> static const struct arm_mpu_region mpu_regions[] = { /* Region 0 */ MPU_REGION_ENTRY("FLASH_0", CONFIG_FLASH_BASE_ADDRESS, #if defined(CONFIG_ARMV8_M_BASELINE) || defined(CONFIG_ARMV8_M_MAINLINE) REGION_FLASH_ATTR(CONFIG_FLASH_BASE_ADDRESS, \ CONFIG_FLASH_SIZE * 1024)), #else REGION_FLASH_ATTR(REGION_FLASH_SIZE)), #endif /* Region 1 */ MPU_REGION_ENTRY("SRAM_0", CONFIG_SRAM_BASE_ADDRESS, #if defined(CONFIG_ARMV8_M_BASELINE) || defined(CONFIG_ARMV8_M_MAINLINE) REGION_RAM_ATTR(CONFIG_SRAM_BASE_ADDRESS, \ CONFIG_SRAM_SIZE * 1024)), #else REGION_RAM_ATTR(REGION_SRAM_SIZE)), #endif }; const struct arm_mpu_config mpu_config = { .num_regions = ARRAY_SIZE(mpu_regions), .mpu_regions = mpu_regions, }; ```
/content/code_sandbox/arch/arm/core/mpu/arm_mpu_regions.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
243
```objective-c /* * */ #ifndef ZEPHYR_ARCH_ARM_CORE_AARCH32_MPU_ARM_MPU_V8_INTERNAL_H_ #define ZEPHYR_ARCH_ARM_CORE_AARCH32_MPU_ARM_MPU_V8_INTERNAL_H_ #include <cortex_m/cmse.h> #define LOG_LEVEL CONFIG_MPU_LOG_LEVEL #include <zephyr/logging/log.h> #include <zephyr/sys/math_extras.h> #include <zephyr/sys/barrier.h> /** * @brief internal structure holding information of * memory areas where dynamic MPU programming * is allowed. */ struct dynamic_region_info { int index; struct arm_mpu_region region_conf; }; /** * Global array, holding the MPU region index of * the memory region inside which dynamic memory * regions may be configured. */ static struct dynamic_region_info dyn_reg_info[MPU_DYNAMIC_REGION_AREAS_NUM]; #if defined(CONFIG_CPU_CORTEX_M23) || defined(CONFIG_CPU_CORTEX_M33) || \ defined(CONFIG_CPU_CORTEX_M55) || defined(CONFIG_CPU_CORTEX_M85) static inline void mpu_set_mair0(uint32_t mair0) { MPU->MAIR0 = mair0; } static inline void mpu_set_rnr(uint32_t rnr) { MPU->RNR = rnr; } static inline void mpu_set_rbar(uint32_t rbar) { MPU->RBAR = rbar; } static inline uint32_t mpu_get_rbar(void) { return MPU->RBAR; } static inline void mpu_set_rlar(uint32_t rlar) { MPU->RLAR = rlar; } static inline uint32_t mpu_get_rlar(void) { return MPU->RLAR; } static inline uint8_t mpu_get_num_regions(void) { uint32_t type = MPU->TYPE; type = (type & MPU_TYPE_DREGION_Msk) >> MPU_TYPE_DREGION_Pos; return (uint8_t)type; } static inline void mpu_clear_region(uint32_t rnr) { ARM_MPU_ClrRegion(rnr); } #elif defined(CONFIG_AARCH32_ARMV8_R) static inline void mpu_set_mair0(uint32_t mair0) { write_mair0(mair0); barrier_dsync_fence_full(); barrier_isync_fence_full(); } static inline void mpu_set_rnr(uint32_t rnr) { write_prselr(rnr); barrier_dsync_fence_full(); } static inline void mpu_set_rbar(uint32_t rbar) { write_prbar(rbar); barrier_dsync_fence_full(); barrier_isync_fence_full(); } static inline uint32_t mpu_get_rbar(void) { return read_prbar(); } static inline void mpu_set_rlar(uint32_t rlar) { write_prlar(rlar); barrier_dsync_fence_full(); barrier_isync_fence_full(); } static inline uint32_t mpu_get_rlar(void) { return read_prlar(); } static inline uint8_t mpu_get_num_regions(void) { uint32_t type = read_mpuir(); type = (type >> MPU_IR_REGION_Pos) & MPU_IR_REGION_Msk; return (uint8_t)type; } static inline void mpu_clear_region(uint32_t rnr) { mpu_set_rnr(rnr); mpu_set_rbar(0); mpu_set_rlar(0); } #else #error "Unsupported ARM CPU" #endif /* Global MPU configuration at system initialization. */ static void mpu_init(void) { /* Configure the cache-ability attributes for all the * different types of memory regions. */ mpu_set_mair0(MPU_MAIR_ATTRS); } static void mpu_set_region(uint32_t rnr, uint32_t rbar, uint32_t rlar) { mpu_set_rnr(rnr); mpu_set_rbar(rbar); mpu_set_rlar(rlar); } /* This internal function performs MPU region initialization. * * Note: * The caller must provide a valid region index. */ static void region_init(const uint32_t index, const struct arm_mpu_region *region_conf) { mpu_set_region( /* RNR */ index, /* RBAR */ (region_conf->base & MPU_RBAR_BASE_Msk) | (region_conf->attr.rbar & (MPU_RBAR_XN_Msk | MPU_RBAR_AP_Msk | MPU_RBAR_SH_Msk)), /* RLAR */ (region_conf->attr.r_limit & MPU_RLAR_LIMIT_Msk) | ((region_conf->attr.mair_idx << MPU_RLAR_AttrIndx_Pos) & MPU_RLAR_AttrIndx_Msk) | MPU_RLAR_EN_Msk ); LOG_DBG("[%d] 0x%08x 0x%08x 0x%08x 0x%08x", index, region_conf->base, region_conf->attr.rbar, region_conf->attr.mair_idx, region_conf->attr.r_limit); } /* @brief Partition sanity check * * This internal function performs run-time sanity check for * MPU region start address and size. * * @param part Pointer to the data structure holding the partition * information (must be valid). * */ static int mpu_partition_is_valid(const struct z_arm_mpu_partition *part) { /* Partition size must be a multiple of the minimum MPU region * size. Start address of the partition must align with the * minimum MPU region size. */ int partition_is_valid = (part->size >= CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE) && ((part->size & (~(CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE - 1))) == part->size) && ((part->start & (CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE - 1)) == 0U); return partition_is_valid; } /** * This internal function returns the MPU region, in which a * buffer, specified by its start address and size, lies. If * a valid MPU region cannot be derived the function returns * -EINVAL. * * Note that, for the function to work properly, the ARM MPU * needs to be enabled. * */ #if defined(CONFIG_AARCH32_ARMV8_R) static inline int get_region_index(uint32_t start, uint32_t size) { uint32_t limit = (start + size - 1) & MPU_RLAR_LIMIT_Msk; for (uint8_t idx = 0; idx < mpu_get_num_regions(); idx++) { mpu_set_rnr(idx); if (start >= (mpu_get_rbar() & MPU_RBAR_BASE_Msk) && limit <= (mpu_get_rlar() & MPU_RLAR_LIMIT_Msk)) { return idx; } } return -EINVAL; } #else static inline int get_region_index(uint32_t start, uint32_t size) { uint32_t region_start_addr = arm_cmse_mpu_region_get(start); uint32_t region_end_addr = arm_cmse_mpu_region_get(start + size - 1); /* MPU regions are contiguous so return the region number, * if both start and end address are in the same region. */ if (region_start_addr == region_end_addr) { return region_start_addr; } return -EINVAL; } #endif static inline uint32_t mpu_region_get_base(const uint32_t index) { mpu_set_rnr(index); return mpu_get_rbar() & MPU_RBAR_BASE_Msk; } static inline void mpu_region_set_base(const uint32_t index, const uint32_t base) { mpu_set_rnr(index); mpu_set_rbar((mpu_get_rbar() & (~MPU_RBAR_BASE_Msk)) | (base & MPU_RBAR_BASE_Msk)); } static inline uint32_t mpu_region_get_last_addr(const uint32_t index) { mpu_set_rnr(index); return (mpu_get_rlar() & MPU_RLAR_LIMIT_Msk) | (~MPU_RLAR_LIMIT_Msk); } static inline void mpu_region_set_limit(const uint32_t index, const uint32_t limit) { mpu_set_rnr(index); mpu_set_rlar((mpu_get_rlar() & (~MPU_RLAR_LIMIT_Msk)) | (limit & MPU_RLAR_LIMIT_Msk)); } static inline void mpu_region_get_access_attr(const uint32_t index, arm_mpu_region_attr_t *attr) { mpu_set_rnr(index); attr->rbar = mpu_get_rbar() & (MPU_RBAR_XN_Msk | MPU_RBAR_AP_Msk | MPU_RBAR_SH_Msk); attr->mair_idx = (mpu_get_rlar() & MPU_RLAR_AttrIndx_Msk) >> MPU_RLAR_AttrIndx_Pos; } static inline void mpu_region_get_conf(const uint32_t index, struct arm_mpu_region *region_conf) { mpu_set_rnr(index); /* Region attribution: * - Cache-ability * - Share-ability * - Access Permissions */ mpu_region_get_access_attr(index, &region_conf->attr); /* Region base address */ region_conf->base = mpu_get_rbar() & MPU_RBAR_BASE_Msk; /* Region limit address */ region_conf->attr.r_limit = mpu_get_rlar() & MPU_RLAR_LIMIT_Msk; } /** * This internal function is utilized by the MPU driver to combine a given * region attribute configuration and size and fill-in a driver-specific * structure with the correct MPU region configuration. */ static inline void get_region_attr_from_mpu_partition_info( arm_mpu_region_attr_t *p_attr, const k_mem_partition_attr_t *attr, uint32_t base, uint32_t size) { p_attr->rbar = attr->rbar & (MPU_RBAR_XN_Msk | MPU_RBAR_AP_Msk | MPU_RBAR_SH_Msk); p_attr->mair_idx = attr->mair_idx; p_attr->r_limit = REGION_LIMIT_ADDR(base, size); } #if defined(CONFIG_USERSPACE) /** * This internal function returns the minimum HW MPU region index * that may hold the configuration of a dynamic memory region. * * Browse through the memory areas marked for dynamic MPU programming, * pick the one with the minimum MPU region index. Return that index. * * The function is optimized for the (most common) use-case of a single * marked area for dynamic memory regions. */ static inline int get_dyn_region_min_index(void) { int dyn_reg_min_index = dyn_reg_info[0].index; #if MPU_DYNAMIC_REGION_AREAS_NUM > 1 for (int i = 1; i < MPU_DYNAMIC_REGION_AREAS_NUM; i++) { if ((dyn_reg_info[i].index != -EINVAL) && (dyn_reg_info[i].index < dyn_reg_min_index) ) { dyn_reg_min_index = dyn_reg_info[i].index; } } #endif return dyn_reg_min_index; } static inline uint32_t mpu_region_get_size(uint32_t index) { return mpu_region_get_last_addr(index) + 1 - mpu_region_get_base(index); } /** * This internal function checks if region is enabled or not. * * Note: * The caller must provide a valid region number. */ static inline int is_enabled_region(uint32_t index) { mpu_set_rnr(index); return (mpu_get_rlar() & MPU_RLAR_EN_Msk) ? 1 : 0; } #if defined(CONFIG_AARCH32_ARMV8_R) /** * This internal function checks if the given buffer is in the region. * * Note: * The caller must provide a valid region number. */ static inline int is_in_region(uint32_t rnr, uint32_t start, uint32_t size) { uint32_t r_addr_start; uint32_t r_addr_end; uint32_t end; r_addr_start = mpu_region_get_base(rnr); r_addr_end = mpu_region_get_last_addr(rnr); size = size == 0U ? 0U : size - 1U; if (u32_add_overflow(start, size, &end)) { return 0; } if ((start >= r_addr_start) && (end <= r_addr_end)) { return 1; } return 0; } static inline int is_user_accessible_region(uint32_t rnr, int write) { uint32_t r_ap; mpu_set_rnr(rnr); r_ap = (mpu_get_rbar() & MPU_RBAR_AP_Msk) >> MPU_RBAR_AP_Pos; if (write != 0) { return r_ap == P_RW_U_RW; } return ((r_ap == P_RW_U_RW) || (r_ap == P_RO_U_RO)); } /** * This internal function validates whether a given memory buffer * is user accessible or not. */ static inline int mpu_buffer_validate(const void *addr, size_t size, int write) { int32_t rnr; int rc = -EPERM; int key = arch_irq_lock(); /* Iterate all mpu regions in reversed order */ for (rnr = 0; rnr < mpu_get_num_regions(); rnr++) { if (!is_enabled_region(rnr) || !is_in_region(rnr, (uint32_t)addr, size)) { continue; } if (is_user_accessible_region(rnr, write)) { rc = 0; } } arch_irq_unlock(key); return rc; } #else /** * This internal function validates whether a given memory buffer * is user accessible or not. * * Note: [Doc. number: ARM-ECM-0359818] * "Some SAU, IDAU, and MPU configurations block the efficient implementation * of an address range check. The CMSE intrinsic operates under the assumption * that the configuration of the SAU, IDAU, and MPU is constrained as follows: * - An object is allocated in a single MPU/SAU/IDAU region. * - A stack is allocated in a single region. * * These points imply that the memory buffer does not span across multiple MPU, * SAU, or IDAU regions." * * MPU regions are configurable, however, some platforms might have fixed-size * SAU or IDAU regions. So, even if a buffer is allocated inside a single MPU * region, it might span across multiple SAU/IDAU regions, which will make the * TT-based address range check fail. * * Therefore, the function performs a second check, which is based on MPU only, * in case the fast address range check fails. * */ static inline int mpu_buffer_validate(const void *addr, size_t size, int write) { uint32_t _addr = (uint32_t)addr; uint32_t _size = (uint32_t)size; if (write) { if (arm_cmse_addr_range_readwrite_ok(_addr, _size, 1)) { return 0; } } else { if (arm_cmse_addr_range_read_ok(_addr, _size, 1)) { return 0; } } #if defined(CONFIG_CPU_HAS_TEE) /* * Validation failure may be due to SAU/IDAU presence. * We re-check user accessibility based on MPU only. */ int32_t r_index_base = arm_cmse_mpu_region_get(_addr); int32_t r_index_last = arm_cmse_mpu_region_get(_addr + _size - 1); if ((r_index_base != -EINVAL) && (r_index_base == r_index_last)) { /* Valid MPU region, check permissions on base address only. */ if (write) { if (arm_cmse_addr_readwrite_ok(_addr, 1)) { return 0; } } else { if (arm_cmse_addr_read_ok(_addr, 1)) { return 0; } } } #endif /* CONFIG_CPU_HAS_TEE */ return -EPERM; } #endif /* CONFIG_AARCH32_ARMV8_R */ #endif /* CONFIG_USERSPACE */ static int region_allocate_and_init(const uint8_t index, const struct arm_mpu_region *region_conf); static int mpu_configure_region(const uint8_t index, const struct z_arm_mpu_partition *new_region); #if !defined(CONFIG_MPU_GAP_FILLING) static int mpu_configure_regions(const struct z_arm_mpu_partition regions[], uint8_t regions_num, uint8_t start_reg_index, bool do_sanity_check); #endif /* This internal function programs a set of given MPU regions * over a background memory area, optionally performing a * sanity check of the memory regions to be programmed. * * The function performs a full partition of the background memory * area, effectively, leaving no space in this area uncovered by MPU. */ static int mpu_configure_regions_and_partition(const struct z_arm_mpu_partition regions[], uint8_t regions_num, uint8_t start_reg_index, bool do_sanity_check) { int i; int reg_index = start_reg_index; for (i = 0; i < regions_num; i++) { if (regions[i].size == 0U) { continue; } /* Non-empty region. */ if (do_sanity_check && (!mpu_partition_is_valid(&regions[i]))) { LOG_ERR("Partition %u: sanity check failed.", i); return -EINVAL; } /* Derive the index of the underlying MPU region, * inside which the new region will be configured. */ int u_reg_index = get_region_index(regions[i].start, regions[i].size); if ((u_reg_index == -EINVAL) || (u_reg_index > (reg_index - 1))) { LOG_ERR("Invalid underlying region index %u", u_reg_index); return -EINVAL; } /* * The new memory region is to be placed inside the underlying * region, possibly splitting the underlying region into two. */ uint32_t u_reg_base = mpu_region_get_base(u_reg_index); uint32_t u_reg_last = mpu_region_get_last_addr(u_reg_index); uint32_t reg_last = regions[i].start + regions[i].size - 1; if ((regions[i].start == u_reg_base) && (reg_last == u_reg_last)) { /* The new region overlaps entirely with the * underlying region. In this case we simply * update the partition attributes of the * underlying region with those of the new * region. */ mpu_configure_region(u_reg_index, &regions[i]); } else if (regions[i].start == u_reg_base) { /* The new region starts exactly at the start of the * underlying region; the start of the underlying * region needs to be set to the end of the new region. */ mpu_region_set_base(u_reg_index, regions[i].start + regions[i].size); reg_index = mpu_configure_region(reg_index, &regions[i]); if (reg_index == -EINVAL) { return reg_index; } reg_index++; } else if (reg_last == u_reg_last) { /* The new region ends exactly at the end of the * underlying region; the end of the underlying * region needs to be set to the start of the * new region. */ mpu_region_set_limit(u_reg_index, regions[i].start - 1); reg_index = mpu_configure_region(reg_index, &regions[i]); if (reg_index == -EINVAL) { return reg_index; } reg_index++; } else { /* The new regions lies strictly inside the * underlying region, which needs to split * into two regions. */ mpu_region_set_limit(u_reg_index, regions[i].start - 1); reg_index = mpu_configure_region(reg_index, &regions[i]); if (reg_index == -EINVAL) { return reg_index; } reg_index++; /* The additional region shall have the same * access attributes as the initial underlying * region. */ struct arm_mpu_region fill_region; mpu_region_get_access_attr(u_reg_index, &fill_region.attr); fill_region.base = regions[i].start + regions[i].size; fill_region.attr.r_limit = REGION_LIMIT_ADDR((regions[i].start + regions[i].size), (u_reg_last - reg_last)); reg_index = region_allocate_and_init(reg_index, (const struct arm_mpu_region *) &fill_region); if (reg_index == -EINVAL) { return reg_index; } reg_index++; } } return reg_index; } /* This internal function programs the static MPU regions. * * It returns the number of MPU region indices configured. * * Note: * If the static MPU regions configuration has not been successfully * performed, the error signal is propagated to the caller of the function. */ static int mpu_configure_static_mpu_regions(const struct z_arm_mpu_partition static_regions[], const uint8_t regions_num, const uint32_t background_area_base, const uint32_t background_area_end) { int mpu_reg_index = static_regions_num; /* In ARMv8-M architecture the static regions are programmed on SRAM, * forming a full partition of the background area, specified by the * given boundaries. */ ARG_UNUSED(background_area_base); ARG_UNUSED(background_area_end); mpu_reg_index = mpu_configure_regions_and_partition(static_regions, regions_num, mpu_reg_index, true); static_regions_num = mpu_reg_index; return mpu_reg_index; } /* This internal function marks and stores the configuration of memory areas * where dynamic region programming is allowed. Return zero on success, or * -EINVAL on error. */ static int mpu_mark_areas_for_dynamic_regions( const struct z_arm_mpu_partition dyn_region_areas[], const uint8_t dyn_region_areas_num) { /* In ARMv8-M architecture we need to store the index values * and the default configuration of the MPU regions, inside * which dynamic memory regions may be programmed at run-time. */ for (int i = 0; i < dyn_region_areas_num; i++) { if (dyn_region_areas[i].size == 0U) { continue; } /* Non-empty area */ /* Retrieve HW MPU region index */ dyn_reg_info[i].index = get_region_index(dyn_region_areas[i].start, dyn_region_areas[i].size); if (dyn_reg_info[i].index == -EINVAL) { return -EINVAL; } if (dyn_reg_info[i].index >= static_regions_num) { return -EINVAL; } /* Store default configuration */ mpu_region_get_conf(dyn_reg_info[i].index, &dyn_reg_info[i].region_conf); } return 0; } /** * Get the number of supported MPU regions. */ static inline uint8_t get_num_regions(void) { return mpu_get_num_regions(); } /* This internal function programs the dynamic MPU regions. * * It returns the number of MPU region indices configured. * * Note: * If the dynamic MPU regions configuration has not been successfully * performed, the error signal is propagated to the caller of the function. */ static int mpu_configure_dynamic_mpu_regions(const struct z_arm_mpu_partition dynamic_regions[], uint8_t regions_num) { int mpu_reg_index = static_regions_num; /* Disable all MPU regions except for the static ones. */ for (int i = mpu_reg_index; i < get_num_regions(); i++) { mpu_clear_region(i); } #if defined(CONFIG_MPU_GAP_FILLING) /* Reset MPU regions inside which dynamic memory regions may * be programmed. */ for (int i = 0; i < MPU_DYNAMIC_REGION_AREAS_NUM; i++) { region_init(dyn_reg_info[i].index, &dyn_reg_info[i].region_conf); } /* In ARMv8-M architecture the dynamic regions are programmed on SRAM, * forming a full partition of the background area, specified by the * given boundaries. */ mpu_reg_index = mpu_configure_regions_and_partition(dynamic_regions, regions_num, mpu_reg_index, true); #else /* We are going to skip the full partition of the background areas. * So we can disable MPU regions inside which dynamic memory regions * may be programmed. */ for (int i = 0; i < MPU_DYNAMIC_REGION_AREAS_NUM; i++) { mpu_clear_region(dyn_reg_info[i].index); } /* The dynamic regions are now programmed on top of * existing SRAM region configuration. */ mpu_reg_index = mpu_configure_regions(dynamic_regions, regions_num, mpu_reg_index, true); #endif /* CONFIG_MPU_GAP_FILLING */ return mpu_reg_index; } #endif /* ZEPHYR_ARCH_ARM_CORE_AARCH32_MPU_ARM_MPU_V8_INTERNAL_H_ */ ```
/content/code_sandbox/arch/arm/core/mpu/arm_mpu_v8_internal.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,474
```c /* * */ #include <zephyr/device.h> #include <zephyr/init.h> #include <zephyr/kernel.h> #include <zephyr/kernel_structs.h> #include "arm_core_mpu_dev.h" #include <zephyr/linker/linker-defs.h> #define LOG_LEVEL CONFIG_MPU_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(mpu); extern void arm_core_mpu_enable(void); extern void arm_core_mpu_disable(void); /* * Maximum number of dynamic memory partitions that may be supplied to the MPU * driver for programming during run-time. Note that the actual number of the * available MPU regions for dynamic programming depends on the number of the * static MPU regions currently being programmed, and the total number of HW- * available MPU regions. This macro is only used internally in function * z_arm_configure_dynamic_mpu_regions(), to reserve sufficient area for the * array of dynamic regions passed to the underlying driver. */ #if defined(CONFIG_USERSPACE) #define _MAX_DYNAMIC_MPU_REGIONS_NUM \ CONFIG_MAX_DOMAIN_PARTITIONS + /* User thread stack */ 1 + \ (IS_ENABLED(CONFIG_MPU_STACK_GUARD) ? 1 : 0) #else #define _MAX_DYNAMIC_MPU_REGIONS_NUM \ (IS_ENABLED(CONFIG_MPU_STACK_GUARD) ? 1 : 0) #endif /* CONFIG_USERSPACE */ /* Convenience macros to denote the start address and the size of the system * memory area, where dynamic memory regions may be programmed at run-time. */ #if defined(CONFIG_USERSPACE) #define _MPU_DYNAMIC_REGIONS_AREA_START ((uint32_t)&_app_smem_start) #else #define _MPU_DYNAMIC_REGIONS_AREA_START ((uint32_t)&__kernel_ram_start) #endif /* CONFIG_USERSPACE */ #define _MPU_DYNAMIC_REGIONS_AREA_SIZE ((uint32_t)&__kernel_ram_end - \ _MPU_DYNAMIC_REGIONS_AREA_START) #if !defined(CONFIG_MULTITHREADING) && defined(CONFIG_MPU_STACK_GUARD) K_THREAD_STACK_DECLARE(z_main_stack, CONFIG_MAIN_STACK_SIZE); #endif #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) \ && defined(CONFIG_MPU_STACK_GUARD) uint32_t z_arm_mpu_stack_guard_and_fpu_adjust(struct k_thread *thread); #endif #if defined(CONFIG_CODE_DATA_RELOCATION_SRAM) extern char __ram_text_reloc_start[]; extern char __ram_text_reloc_size[]; #endif static const struct z_arm_mpu_partition static_regions[] = { #if defined(CONFIG_COVERAGE_GCOV) && defined(CONFIG_USERSPACE) { /* GCOV code coverage accounting area. Needs User permissions * to function */ .start = (uint32_t)&__gcov_bss_start, .size = (uint32_t)&__gcov_bss_size, .attr = K_MEM_PARTITION_P_RW_U_RW, }, #endif /* CONFIG_COVERAGE_GCOV && CONFIG_USERSPACE */ #if defined(CONFIG_NOCACHE_MEMORY) { /* Special non-cacheable RAM area */ .start = (uint32_t)&_nocache_ram_start, .size = (uint32_t)&_nocache_ram_size, .attr = K_MEM_PARTITION_P_RW_U_NA_NOCACHE, }, #endif /* CONFIG_NOCACHE_MEMORY */ #if defined(CONFIG_ARCH_HAS_RAMFUNC_SUPPORT) { /* Special RAM area for program text */ .start = (uint32_t)&__ramfunc_start, .size = (uint32_t)&__ramfunc_size, .attr = K_MEM_PARTITION_P_RX_U_RX, }, #endif /* CONFIG_ARCH_HAS_RAMFUNC_SUPPORT */ #if defined(CONFIG_CODE_DATA_RELOCATION_SRAM) { /* RAM area for relocated text */ .start = (uint32_t)&__ram_text_reloc_start, .size = (uint32_t)&__ram_text_reloc_size, .attr = K_MEM_PARTITION_P_RX_U_RX, }, #endif /* CONFIG_CODE_DATA_RELOCATION_SRAM */ #if !defined(CONFIG_MULTITHREADING) && defined(CONFIG_MPU_STACK_GUARD) /* Main stack MPU guard to detect overflow. * Note: * FPU_SHARING and USERSPACE are not supported features * under CONFIG_MULTITHREADING=n, so the MPU guard (if * exists) is reserved aside of CONFIG_MAIN_STACK_SIZE * and there is no requirement for larger guard area (FP * context is not stacked). */ { .start = (uint32_t)z_main_stack, .size = (uint32_t)MPU_GUARD_ALIGN_AND_SIZE, .attr = K_MEM_PARTITION_P_RO_U_NA, }, #endif /* !CONFIG_MULTITHREADING && CONFIG_MPU_STACK_GUARD */ }; /** * @brief Use the HW-specific MPU driver to program * the static MPU regions. * * Program the static MPU regions using the HW-specific MPU driver. The * function is meant to be invoked only once upon system initialization. * * If the function attempts to configure a number of regions beyond the * MPU HW limitations, the system behavior will be undefined. * * For some MPU architectures, such as the unmodified ARMv8-M MPU, * the function must execute with MPU enabled. */ void z_arm_configure_static_mpu_regions(void) { /* Configure the static MPU regions within firmware SRAM boundaries. * Start address of the image is given by _image_ram_start. The end * of the firmware SRAM area is marked by __kernel_ram_end, taking * into account the unused SRAM area, as well. */ #ifdef CONFIG_AARCH32_ARMV8_R arm_core_mpu_disable(); #endif arm_core_mpu_configure_static_mpu_regions(static_regions, ARRAY_SIZE(static_regions), (uint32_t)&_image_ram_start, (uint32_t)&__kernel_ram_end); #ifdef CONFIG_AARCH32_ARMV8_R arm_core_mpu_enable(); #endif #if defined(CONFIG_MPU_REQUIRES_NON_OVERLAPPING_REGIONS) && \ defined(CONFIG_MULTITHREADING) /* Define a constant array of z_arm_mpu_partition objects that holds the * boundaries of the areas, inside which dynamic region programming * is allowed. The information is passed to the underlying driver at * initialization. */ const struct z_arm_mpu_partition dyn_region_areas[] = { { .start = _MPU_DYNAMIC_REGIONS_AREA_START, .size = _MPU_DYNAMIC_REGIONS_AREA_SIZE, } }; arm_core_mpu_mark_areas_for_dynamic_regions(dyn_region_areas, ARRAY_SIZE(dyn_region_areas)); #endif /* CONFIG_MPU_REQUIRES_NON_OVERLAPPING_REGIONS */ } /** * @brief Use the HW-specific MPU driver to program * the dynamic MPU regions. * * Program the dynamic MPU regions using the HW-specific MPU * driver. This function is meant to be invoked every time the * memory map is to be re-programmed, e.g during thread context * switch, entering user mode, reconfiguring memory domain, etc. * * For some MPU architectures, such as the unmodified ARMv8-M MPU, * the function must execute with MPU enabled. * * This function is not inherently thread-safe, but the memory domain * spinlock needs to be held anyway. */ void z_arm_configure_dynamic_mpu_regions(struct k_thread *thread) { /* Define an array of z_arm_mpu_partition objects to hold the configuration * of the respective dynamic MPU regions to be programmed for * the given thread. The array of partitions (along with its * actual size) will be supplied to the underlying MPU driver. * * The drivers of what regions get configured are CONFIG_USERSPACE, * CONFIG_MPU_STACK_GUARD, and K_USER/supervisor threads. * * If CONFIG_USERSPACE is defined and the thread is a member of any * memory domain then any partitions defined within that domain get a * defined region. * * If CONFIG_USERSPACE is defined and the thread is a user thread * (K_USER) the usermode thread stack is defined a region. * * IF CONFIG_MPU_STACK_GUARD is defined the thread is a supervisor * thread, the stack guard will be defined in front of the * thread->stack_info.start. On a K_USER thread, the guard is defined * in front of the privilege mode stack, thread->arch.priv_stack_start. */ static struct z_arm_mpu_partition dynamic_regions[_MAX_DYNAMIC_MPU_REGIONS_NUM]; uint8_t region_num = 0U; #if defined(CONFIG_USERSPACE) /* Memory domain */ LOG_DBG("configure thread %p's domain", thread); struct k_mem_domain *mem_domain = thread->mem_domain_info.mem_domain; if (mem_domain) { LOG_DBG("configure domain: %p", mem_domain); uint32_t num_partitions = mem_domain->num_partitions; struct k_mem_partition *partition; int i; LOG_DBG("configure domain: %p", mem_domain); for (i = 0; i < CONFIG_MAX_DOMAIN_PARTITIONS; i++) { partition = &mem_domain->partitions[i]; if (partition->size == 0) { /* Zero size indicates a non-existing * memory partition. */ continue; } LOG_DBG("set region 0x%lx 0x%x", partition->start, partition->size); __ASSERT(region_num < _MAX_DYNAMIC_MPU_REGIONS_NUM, "Out-of-bounds error for dynamic region map."); dynamic_regions[region_num].start = partition->start; dynamic_regions[region_num].size = partition->size; dynamic_regions[region_num].attr = partition->attr; region_num++; num_partitions--; if (num_partitions == 0U) { break; } } } /* Thread user stack */ LOG_DBG("configure user thread %p's context", thread); if (thread->arch.priv_stack_start) { /* K_USER thread stack needs a region */ uintptr_t base = (uintptr_t)thread->stack_obj; size_t size = thread->stack_info.size + (thread->stack_info.start - base); __ASSERT(region_num < _MAX_DYNAMIC_MPU_REGIONS_NUM, "Out-of-bounds error for dynamic region map."); dynamic_regions[region_num].start = base; dynamic_regions[region_num].size = size; dynamic_regions[region_num].attr = K_MEM_PARTITION_P_RW_U_RW; region_num++; } #endif /* CONFIG_USERSPACE */ #if defined(CONFIG_MPU_STACK_GUARD) /* Define a stack guard region for either the thread stack or the * supervisor/privilege mode stack depending on the type of thread * being mapped. */ /* Privileged stack guard */ uintptr_t guard_start; size_t guard_size = MPU_GUARD_ALIGN_AND_SIZE; #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) guard_size = z_arm_mpu_stack_guard_and_fpu_adjust(thread); #endif #if defined(CONFIG_USERSPACE) if (thread->arch.priv_stack_start) { /* A K_USER thread has the stack guard protecting the privilege * stack and not on the usermode stack because the user mode * stack already has its own defined memory region. */ guard_start = thread->arch.priv_stack_start - guard_size; __ASSERT((uintptr_t)&z_priv_stacks_ram_start <= guard_start, "Guard start: (0x%lx) below privilege stacks boundary: (%p)", guard_start, z_priv_stacks_ram_start); } else #endif /* CONFIG_USERSPACE */ { /* A supervisor thread only has the normal thread stack to * protect with a stack guard. */ guard_start = thread->stack_info.start - guard_size; #ifdef CONFIG_USERSPACE __ASSERT((uintptr_t)thread->stack_obj == guard_start, "Guard start (0x%lx) not beginning at stack object (%p)\n", guard_start, thread->stack_obj); #endif /* CONFIG_USERSPACE */ } __ASSERT(region_num < _MAX_DYNAMIC_MPU_REGIONS_NUM, "Out-of-bounds error for dynamic region map."); dynamic_regions[region_num].start = guard_start; dynamic_regions[region_num].size = guard_size; dynamic_regions[region_num].attr = K_MEM_PARTITION_P_RO_U_NA; region_num++; #endif /* CONFIG_MPU_STACK_GUARD */ /* Configure the dynamic MPU regions */ #ifdef CONFIG_AARCH32_ARMV8_R arm_core_mpu_disable(); #endif arm_core_mpu_configure_dynamic_mpu_regions(dynamic_regions, region_num); #ifdef CONFIG_AARCH32_ARMV8_R arm_core_mpu_enable(); #endif } #if defined(CONFIG_USERSPACE) int arch_mem_domain_max_partitions_get(void) { int available_regions = arm_core_mpu_get_max_available_dyn_regions(); available_regions -= ARM_CORE_MPU_NUM_MPU_REGIONS_FOR_THREAD_STACK; if (IS_ENABLED(CONFIG_MPU_STACK_GUARD)) { available_regions -= ARM_CORE_MPU_NUM_MPU_REGIONS_FOR_MPU_STACK_GUARD; } return ARM_CORE_MPU_MAX_DOMAIN_PARTITIONS_GET(available_regions); } int arch_buffer_validate(const void *addr, size_t size, int write) { return arm_core_mpu_buffer_validate(addr, size, write); } #endif /* CONFIG_USERSPACE */ ```
/content/code_sandbox/arch/arm/core/mpu/arm_core_mpu.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,923
```c /* * */ #include <zephyr/device.h> #include <zephyr/init.h> #include <zephyr/kernel.h> #include <soc.h> #include "arm_core_mpu_dev.h" #include <zephyr/sys/__assert.h> #include <zephyr/sys/math_extras.h> #include <zephyr/sys/barrier.h> #include <zephyr/linker/linker-defs.h> #include <zephyr/mem_mgmt/mem_attr.h> #include <zephyr/dt-bindings/memory-attr/memory-attr-arm.h> #define LOG_LEVEL CONFIG_MPU_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_DECLARE(mpu); #define NODE_HAS_PROP_AND_OR(node_id, prop) \ DT_NODE_HAS_PROP(node_id, prop) || BUILD_ASSERT((DT_FOREACH_STATUS_OKAY_NODE_VARGS( NODE_HAS_PROP_AND_OR, zephyr_memory_region_mpu) false) == false, "`zephyr,memory-region-mpu` was deprecated in favor of `zephyr,memory-attr`"); /* * Global status variable holding the number of HW MPU region indices, which * have been reserved by the MPU driver to program the static (fixed) memory * regions. */ static uint8_t static_regions_num; /* Global MPU configuration at system initialization. */ static void mpu_init(void) { #if defined(CONFIG_SOC_FAMILY_KINETIS) /* Enable clock for the Memory Protection Unit (MPU). */ CLOCK_EnableClock(kCLOCK_Sysmpu0); #endif } /** * Get the number of supported MPU regions. */ static inline uint8_t get_num_regions(void) { return FSL_FEATURE_SYSMPU_DESCRIPTOR_COUNT; } /* @brief Partition sanity check * * This internal function performs run-time sanity check for * MPU region start address and size. * * @param part Pointer to the data structure holding the partition * information (must be valid). */ static int mpu_partition_is_valid(const struct z_arm_mpu_partition *part) { /* Partition size must be a multiple of the minimum MPU region * size. Start address of the partition must align with the * minimum MPU region size. */ int partition_is_valid = (part->size != 0U) && ((part->size & (~(CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE - 1))) == part->size) && ((part->start & (CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE - 1)) == 0U); return partition_is_valid; } /* This internal function performs MPU region initialization. * * Note: * The caller must provide a valid region index. */ static void region_init(const uint32_t index, const struct nxp_mpu_region *region_conf) { uint32_t region_base = region_conf->base; uint32_t region_end = region_conf->end; uint32_t region_attr = region_conf->attr.attr; if (index == 0U) { /* The MPU does not allow writes from the core to affect the * RGD0 start or end addresses nor the permissions associated * with the debugger; it can only write the permission fields * associated with the other masters. These protections * guarantee that the debugger always has access to the entire * address space. */ __ASSERT(region_base == SYSMPU->WORD[index][0], "Region %d base address got 0x%08x expected 0x%08x", index, region_base, (uint32_t)SYSMPU->WORD[index][0]); __ASSERT(region_end == SYSMPU->WORD[index][1], "Region %d end address got 0x%08x expected 0x%08x", index, region_end, (uint32_t)SYSMPU->WORD[index][1]); /* Changes to the RGD0_WORD2 alterable fields should be done * via a write to RGDAAC0. */ SYSMPU->RGDAAC[index] = region_attr; } else { SYSMPU->WORD[index][0] = region_base; SYSMPU->WORD[index][1] = region_end; SYSMPU->WORD[index][2] = region_attr; SYSMPU->WORD[index][3] = SYSMPU_WORD_VLD_MASK; } LOG_DBG("[%02d] 0x%08x 0x%08x 0x%08x 0x%08x", index, (uint32_t)SYSMPU->WORD[index][0], (uint32_t)SYSMPU->WORD[index][1], (uint32_t)SYSMPU->WORD[index][2], (uint32_t)SYSMPU->WORD[index][3]); } static int region_allocate_and_init(const uint8_t index, const struct nxp_mpu_region *region_conf) { /* Attempt to allocate new region index. */ if (index > (get_num_regions() - 1)) { /* No available MPU region index. */ LOG_ERR("Failed to allocate new MPU region %u\n", index); return -EINVAL; } LOG_DBG("Program MPU region at index 0x%x", index); /* Program region */ region_init(index, region_conf); return index; } #define _BUILD_REGION_CONF(reg, _ATTR) \ (struct nxp_mpu_region) { .name = (reg).dt_name, \ .base = (reg).dt_addr, \ .end = (reg).dt_addr + (reg).dt_size, \ .attr = _ATTR, \ } /* This internal function programs the MPU regions defined in the DT when using * the `zephyr,memory-attr = <( DT_MEM_ARM(...) )>` property. */ static int mpu_configure_regions_from_dt(uint8_t *reg_index) { const struct mem_attr_region_t *region; size_t num_regions; num_regions = mem_attr_get_regions(&region); for (size_t idx = 0; idx < num_regions; idx++) { struct nxp_mpu_region region_conf; switch (DT_MEM_ARM_GET(region[idx].dt_attr)) { case DT_MEM_ARM_MPU_RAM: region_conf = _BUILD_REGION_CONF(region[idx], REGION_RAM_ATTR); break; #ifdef REGION_FLASH_ATTR case DT_MEM_ARM_MPU_FLASH: region_conf = _BUILD_REGION_CONF(region[idx], REGION_FLASH_ATTR); break; #endif #ifdef REGION_IO_ATTR case DT_MEM_ARM_MPU_IO: region_conf = _BUILD_REGION_CONF(region[idx], REGION_IO_ATTR); break; #endif default: /* Either the specified `ATTR_MPU_*` attribute does not * exists or the `REGION_*_ATTR` macro is not defined * for that attribute. */ LOG_ERR("Invalid attribute for the region\n"); return -EINVAL; } if (region_allocate_and_init((*reg_index), (const struct nxp_mpu_region *) &region_conf) < 0) { return -EINVAL; } (*reg_index)++; } return 0; } /** * This internal function is utilized by the MPU driver to combine a given * region attribute configuration and size and fill-in a driver-specific * structure with the correct MPU region attribute configuration. */ static inline void get_region_attr_from_mpu_partition_info( nxp_mpu_region_attr_t *p_attr, const k_mem_partition_attr_t *attr, uint32_t base, uint32_t size) { /* in NXP MPU the base address and size are not required * to determine region attributes */ (void) base; (void) size; p_attr->attr = attr->ap_attr; } /* This internal function programs an MPU region * of a given configuration at a given MPU index. */ static int mpu_configure_region(const uint8_t index, const struct z_arm_mpu_partition *new_region) { struct nxp_mpu_region region_conf; LOG_DBG("Configure MPU region at index 0x%x", index); /* Populate internal NXP MPU region configuration structure. */ region_conf.base = new_region->start; region_conf.end = (new_region->start + new_region->size - 1); get_region_attr_from_mpu_partition_info(&region_conf.attr, &new_region->attr, new_region->start, new_region->size); /* Allocate and program region */ return region_allocate_and_init(index, (const struct nxp_mpu_region *)&region_conf); } #if defined(CONFIG_MPU_STACK_GUARD) /* This internal function partitions the SRAM MPU region */ static int mpu_sram_partitioning(uint8_t index, const struct z_arm_mpu_partition *p_region) { /* * The NXP MPU manages the permissions of the overlapping regions * doing the logical OR in between them, hence they can't be used * for stack/stack guard protection. For this reason we need to * perform a partitioning of the SRAM area in such a way that the * guard region does not overlap with the (background) SRAM regions * holding the default SRAM access permission configuration. * In other words, the SRAM is split in two different regions. */ /* * SRAM partitioning needs to be performed in a strict order. * First, we program a new MPU region with the default SRAM * access permissions for the SRAM area _after_ the stack * guard. Note that the permissions are stored in the global * array: * 'mpu_config.mpu_regions[]', on 'sram_region' index. */ struct nxp_mpu_region added_sram_region; added_sram_region.base = p_region->start + p_region->size; added_sram_region.end = mpu_config.mpu_regions[mpu_config.sram_region].end; added_sram_region.attr.attr = mpu_config.mpu_regions[mpu_config.sram_region].attr.attr; if (region_allocate_and_init(index, (const struct nxp_mpu_region *)&added_sram_region) < 0) { return -EINVAL; } /* Increment, as an additional region index has been consumed. */ index++; /* Second, adjust the original SRAM region to end at the beginning * of the stack guard. */ struct nxp_mpu_region adjusted_sram_region; adjusted_sram_region.base = mpu_config.mpu_regions[mpu_config.sram_region].base; adjusted_sram_region.end = p_region->start - 1; adjusted_sram_region.attr.attr = mpu_config.mpu_regions[mpu_config.sram_region].attr.attr; region_init(mpu_config.sram_region, (const struct nxp_mpu_region *)&adjusted_sram_region); return index; } #endif /* CONFIG_MPU_STACK_GUARD */ /* This internal function programs a set of given MPU regions * over a background memory area, optionally performing a * sanity check of the memory regions to be programmed. */ static int mpu_configure_regions(const struct z_arm_mpu_partition regions[], uint8_t regions_num, uint8_t start_reg_index, bool do_sanity_check) { int i; int reg_index = start_reg_index; for (i = 0; i < regions_num; i++) { if (regions[i].size == 0U) { continue; } /* Non-empty region. */ if (do_sanity_check && (!mpu_partition_is_valid(&regions[i]))) { LOG_ERR("Partition %u: sanity check failed.", i); return -EINVAL; } #if defined(CONFIG_MPU_STACK_GUARD) if (regions[i].attr.ap_attr == MPU_REGION_SU_RX) { unsigned int key; /* Attempt to configure an MPU Stack Guard region; this * will require splitting of the underlying SRAM region * into two SRAM regions, leaving out the guard area to * be programmed afterwards. */ key = irq_lock(); reg_index = mpu_sram_partitioning(reg_index, &regions[i]); irq_unlock(key); } #endif /* CONFIG_MPU_STACK_GUARD */ if (reg_index == -EINVAL) { return reg_index; } reg_index = mpu_configure_region(reg_index, &regions[i]); if (reg_index == -EINVAL) { return reg_index; } /* Increment number of programmed MPU indices. */ reg_index++; } return reg_index; } /* This internal function programs the static MPU regions. * * It returns the number of MPU region indices configured. * * Note: * If the static MPU regions configuration has not been successfully * performed, the error signal is propagated to the caller of the function. */ static int mpu_configure_static_mpu_regions( const struct z_arm_mpu_partition static_regions[], const uint8_t regions_num, const uint32_t background_area_base, const uint32_t background_area_end) { int mpu_reg_index = static_regions_num; /* In NXP MPU architecture the static regions are * programmed on top of SRAM region configuration. */ ARG_UNUSED(background_area_base); ARG_UNUSED(background_area_end); mpu_reg_index = mpu_configure_regions(static_regions, regions_num, mpu_reg_index, true); static_regions_num = mpu_reg_index; return mpu_reg_index; } /* This internal function programs the dynamic MPU regions. * * It returns the number of MPU region indices configured. * * Note: * If the dynamic MPU regions configuration has not been successfully * performed, the error signal is propagated to the caller of the function. */ static int mpu_configure_dynamic_mpu_regions( const struct z_arm_mpu_partition dynamic_regions[], uint8_t regions_num) { unsigned int key; /* * Programming the NXP MPU has to be done with care to avoid race * conditions that will cause memory faults. The NXP MPU is composed * of a number of memory region descriptors. The number of descriptors * varies depending on the SOC. Each descriptor has a start addr, end * addr, attribute, and valid. When the MPU is enabled, access to * memory space is checked for access protection errors through an * OR operation of all of the valid MPU descriptors. * * Writing the start/end/attribute descriptor register will clear the * valid bit for that descriptor. This presents a problem because if * the current program stack is in that region or if an ISR occurs * that switches state and uses that region a memory fault will be * triggered. Note that local variable access can also cause stack * accesses while programming these registers depending on the compiler * optimization level. * * To avoid the race condition a temporary descriptor is set to enable * access to all of memory before the call to mpu_configure_regions() * to configure the dynamic memory regions. After, the temporary * descriptor is invalidated if the mpu_configure_regions() didn't * overwrite it. */ key = irq_lock(); /* Use last descriptor region as temporary descriptor */ region_init(get_num_regions()-1, (const struct nxp_mpu_region *) &mpu_config.mpu_regions[mpu_config.sram_region]); /* Now reset the main SRAM region */ region_init(mpu_config.sram_region, (const struct nxp_mpu_region *) &mpu_config.mpu_regions[mpu_config.sram_region]); irq_unlock(key); int mpu_reg_index = static_regions_num; /* In NXP MPU architecture the dynamic regions are * programmed on top of existing SRAM region configuration. */ mpu_reg_index = mpu_configure_regions(dynamic_regions, regions_num, mpu_reg_index, false); if (mpu_reg_index != -EINVAL) { /* Disable the non-programmed MPU regions. */ for (int i = mpu_reg_index; i < get_num_regions(); i++) { LOG_DBG("disable region 0x%x", i); /* Disable region */ SYSMPU->WORD[i][0] = 0; SYSMPU->WORD[i][1] = 0; SYSMPU->WORD[i][2] = 0; SYSMPU->WORD[i][3] = 0; } } return mpu_reg_index; } /* ARM Core MPU Driver API Implementation for NXP MPU */ /** * @brief enable the MPU */ void arm_core_mpu_enable(void) { /* Enable MPU */ SYSMPU->CESR |= SYSMPU_CESR_VLD_MASK; /* Make sure that all the registers are set before proceeding */ barrier_dsync_fence_full(); barrier_isync_fence_full(); } /** * @brief disable the MPU */ void arm_core_mpu_disable(void) { /* Force any outstanding transfers to complete before disabling MPU */ barrier_dmem_fence_full(); /* Disable MPU */ SYSMPU->CESR &= ~SYSMPU_CESR_VLD_MASK; /* Clear MPU error status */ SYSMPU->CESR |= SYSMPU_CESR_SPERR_MASK; } #if defined(CONFIG_USERSPACE) static inline uint32_t mpu_region_get_base(uint32_t r_index) { return SYSMPU->WORD[r_index][0]; } static inline uint32_t mpu_region_get_size(uint32_t r_index) { /* <END> + 1 - <BASE> */ return (SYSMPU->WORD[r_index][1] + 1) - SYSMPU->WORD[r_index][0]; } /** * This internal function checks if region is enabled or not. * * Note: * The caller must provide a valid region number. */ static inline int is_enabled_region(uint32_t r_index) { return SYSMPU->WORD[r_index][3] & SYSMPU_WORD_VLD_MASK; } /** * This internal function checks if the given buffer is in the region. * * Note: * The caller must provide a valid region number. */ static inline int is_in_region(uint32_t r_index, uint32_t start, uint32_t size) { uint32_t r_addr_start; uint32_t r_addr_end; uint32_t end; r_addr_start = SYSMPU->WORD[r_index][0]; r_addr_end = SYSMPU->WORD[r_index][1]; size = size == 0U ? 0U : size - 1U; if (u32_add_overflow(start, size, &end)) { return 0; } if ((start >= r_addr_start) && (end <= r_addr_end)) { return 1; } return 0; } /** * @brief update configuration of an active memory partition */ void arm_core_mpu_mem_partition_config_update( struct z_arm_mpu_partition *partition, k_mem_partition_attr_t *new_attr) { /* Find the partition. ASSERT if not found. */ uint8_t i; uint8_t reg_index = get_num_regions(); for (i = static_regions_num; i < get_num_regions(); i++) { if (!is_enabled_region(i)) { continue; } uint32_t base = mpu_region_get_base(i); if (base != partition->start) { continue; } uint32_t size = mpu_region_get_size(i); if (size != partition->size) { continue; } /* Region found */ reg_index = i; break; } __ASSERT(reg_index != get_num_regions(), "Memory domain partition not found\n"); /* Modify the permissions */ partition->attr = *new_attr; mpu_configure_region(reg_index, partition); } /** * @brief get the maximum number of available (free) MPU region indices * for configuring dynamic MPU partitions */ int arm_core_mpu_get_max_available_dyn_regions(void) { return get_num_regions() - static_regions_num; } /** * This internal function checks if the region is user accessible or not * * Note: * The caller must provide a valid region number. */ static inline int is_user_accessible_region(uint32_t r_index, int write) { uint32_t r_ap = SYSMPU->WORD[r_index][2]; if (write != 0) { return (r_ap & MPU_REGION_WRITE) == MPU_REGION_WRITE; } return (r_ap & MPU_REGION_READ) == MPU_REGION_READ; } /** * @brief validate the given buffer is user accessible or not */ int arm_core_mpu_buffer_validate(const void *addr, size_t size, int write) { uint8_t r_index; /* Iterate through all MPU regions */ for (r_index = 0U; r_index < get_num_regions(); r_index++) { if (!is_enabled_region(r_index) || !is_in_region(r_index, (uint32_t)addr, size)) { continue; } /* For NXP MPU, priority is given to granting permission over * denying access for overlapping region. * So we can stop the iteration immediately once we find the * matched region that grants permission. */ if (is_user_accessible_region(r_index, write)) { return 0; } } return -EPERM; } #endif /* CONFIG_USERSPACE */ /** * @brief configure fixed (static) MPU regions. */ void arm_core_mpu_configure_static_mpu_regions( const struct z_arm_mpu_partition static_regions[], const uint8_t regions_num, const uint32_t background_area_start, const uint32_t background_area_end) { if (mpu_configure_static_mpu_regions(static_regions, regions_num, background_area_start, background_area_end) == -EINVAL) { __ASSERT(0, "Configuring %u static MPU regions failed\n", regions_num); } } /** * @brief configure dynamic MPU regions. */ void arm_core_mpu_configure_dynamic_mpu_regions( const struct z_arm_mpu_partition dynamic_regions[], uint8_t regions_num) { if (mpu_configure_dynamic_mpu_regions(dynamic_regions, regions_num) == -EINVAL) { __ASSERT(0, "Configuring %u dynamic MPU regions failed\n", regions_num); } } /* NXP MPU Driver Initial Setup */ /* * @brief MPU default configuration * * This function provides the default configuration mechanism for the Memory * Protection Unit (MPU). */ int z_arm_mpu_init(void) { uint32_t r_index; if (mpu_config.num_regions > get_num_regions()) { /* Attempt to configure more MPU regions than * what is supported by hardware. As this operation * may be executed during system (pre-kernel) initialization, * we want to ensure we can detect an attempt to * perform invalid configuration. */ __ASSERT(0, "Request to configure: %u regions (supported: %u)\n", mpu_config.num_regions, get_num_regions() ); return -1; } LOG_DBG("total region count: %d", get_num_regions()); arm_core_mpu_disable(); /* Architecture-specific configuration */ mpu_init(); /* Program fixed regions configured at SOC definition. */ for (r_index = 0U; r_index < mpu_config.num_regions; r_index++) { region_init(r_index, &mpu_config.mpu_regions[r_index]); } /* Update the number of programmed MPU regions. */ static_regions_num = mpu_config.num_regions; /* DT-defined MPU regions. */ if (mpu_configure_regions_from_dt(&static_regions_num) == -EINVAL) { __ASSERT(0, "Failed to allocate MPU regions from DT\n"); return -EINVAL; } arm_core_mpu_enable(); return 0; } ```
/content/code_sandbox/arch/arm/core/mpu/nxp_mpu.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,164
```objective-c /* * */ #ifndef ZEPHYR_INCLUDE_ARCH_ARM_CORTEX_M_MPU_ARM_CORE_MPU_DEV_H_ #define ZEPHYR_INCLUDE_ARCH_ARM_CORTEX_M_MPU_ARM_CORE_MPU_DEV_H_ #include <zephyr/types.h> #include <kernel_arch_data.h> #ifdef __cplusplus extern "C" { #endif #if defined(CONFIG_ARM_MPU) struct k_thread; #if defined(CONFIG_USERSPACE) /** * @brief Maximum number of memory domain partitions * * This internal macro returns the maximum number of memory partitions, which * may be defined in a memory domain, given the amount of available HW MPU * regions. * * @param mpu_regions_num the number of available HW MPU regions. */ #if defined(CONFIG_MPU_REQUIRES_NON_OVERLAPPING_REGIONS) && \ defined(CONFIG_MPU_GAP_FILLING) /* * For ARM MPU architectures, where the domain partitions cannot be defined * on top of the statically configured memory regions, the maximum number of * memory domain partitions is set to half of the number of available MPU * regions. This ensures that in the worst-case where there are gaps between * the memory partitions of the domain, the desired memory map can still be * programmed using the available number of HW MPU regions. */ #define ARM_CORE_MPU_MAX_DOMAIN_PARTITIONS_GET(mpu_regions_num) \ (mpu_regions_num/2) #else /* * For ARM MPU architectures, where the domain partitions can be defined * on top of the statically configured memory regions, the maximum number * of memory domain partitions is equal to the number of available MPU regions. */ #define ARM_CORE_MPU_MAX_DOMAIN_PARTITIONS_GET(mpu_regions_num) \ (mpu_regions_num) #endif /* CONFIG_MPU_REQUIRES_NON_OVERLAPPING_REGIONS */ /** * @brief Maximum number of MPU regions required to configure a * memory region for (user) Thread Stack. */ #if defined(CONFIG_MPU_REQUIRES_NON_OVERLAPPING_REGIONS) && \ defined(CONFIG_MPU_GAP_FILLING) /* When dynamic regions may not be defined on top of statically * allocated memory regions, defining a region for a thread stack * requires two additional MPU regions to be configured; one for * defining the thread stack and an additional one for partitioning * the underlying memory area. */ #define ARM_CORE_MPU_NUM_MPU_REGIONS_FOR_THREAD_STACK 2 #else /* When dynamic regions may be defined on top of statically allocated * memory regions, a thread stack area may be configured using a * single MPU region. */ #define ARM_CORE_MPU_NUM_MPU_REGIONS_FOR_THREAD_STACK 1 #endif /* CONFIG_MPU_REQUIRES_NON_OVERLAPPING_REGIONS */ /** * @brief Maximum number of MPU regions required to configure a * memory region for a (supervisor) Thread Stack Guard. */ #if (defined(CONFIG_MPU_REQUIRES_NON_OVERLAPPING_REGIONS) && \ defined(CONFIG_MPU_GAP_FILLING)) \ || defined(CONFIG_CPU_HAS_NXP_MPU) /* * When dynamic regions may not be defined on top of statically * allocated memory regions, defining a region for a supervisor * thread stack guard requires two additional MPU regions to be * configured; one for defining the stack guard and an additional * one for partitioning the underlying memory area. * * The same is required for the NXP MPU due to its OR-based decision * policy; the MPU stack guard applies more restrictive permissions on * the underlying (SRAM) regions, and, therefore, we need to partition * the underlying SRAM region. */ #define ARM_CORE_MPU_NUM_MPU_REGIONS_FOR_MPU_STACK_GUARD 2 #elif defined(CONFIG_CPU_HAS_ARM_MPU) /* When dynamic regions may be defined on top of statically allocated * memory regions, a supervisor thread stack guard area may be configured * using a single MPU region. */ #define ARM_CORE_MPU_NUM_MPU_REGIONS_FOR_MPU_STACK_GUARD 1 #endif /* CONFIG_MPU_REQUIRES_NON_OVERLAPPING_REGIONS || CPU_HAS_NXP_MPU */ #endif /* CONFIG_USERSPACE */ /* ARM Core MPU Driver API */ /* * This API has to be implemented by all the MPU drivers that have * ARM_MPU support. */ /** * @brief configure a set of fixed (static) MPU regions * * Internal API function to configure a set of static MPU memory regions, * within a (background) memory area determined by start and end address. * The total number of HW MPU regions to be programmed depends on the MPU * architecture. * * The function shall be invoked once, upon system initialization. * * @param static_regions an array of pointers to memory partitions * to be programmed * @param regions_num the number of regions to be programmed * @param background_area_start the start address of the background memory area * @param background_area_end the end address of the background memory area * * The function shall assert if the operation cannot be not performed * successfully. Therefore: * - the number of HW MPU regions to be programmed shall not exceed the number * of available MPU indices, * - the size and alignment of the static regions shall comply with the * requirements of the MPU hardware. */ void arm_core_mpu_configure_static_mpu_regions( const struct z_arm_mpu_partition *static_regions, const uint8_t regions_num, const uint32_t background_area_start, const uint32_t background_area_end); #if defined(CONFIG_MPU_REQUIRES_NON_OVERLAPPING_REGIONS) /* Number of memory areas, inside which dynamic regions * may be programmed in run-time. */ #define MPU_DYNAMIC_REGION_AREAS_NUM 1 /** * @brief mark a set of memory regions as eligible for dynamic configuration * * Internal API function to configure a set of memory regions, determined * by their start address and size, as memory areas eligible for dynamically * programming MPU regions (such as a supervisor stack overflow guard) at * run-time (for example, thread upon context-switch). * * The function shall be invoked once, upon system initialization. * * @param dyn_region_areas an array of z_arm_mpu_partition objects declaring the * eligible memory areas for dynamic programming * @param dyn_region_areas_num the number of eligible areas for dynamic * programming. * * The function shall assert if the operation cannot be not performed * successfully. Therefore, the requested areas shall correspond to * static memory regions, configured earlier by * arm_core_mpu_configure_static_mpu_regions(). */ void arm_core_mpu_mark_areas_for_dynamic_regions( const struct z_arm_mpu_partition *dyn_region_areas, const uint8_t dyn_region_areas_num); #endif /* CONFIG_MPU_REQUIRES_NON_OVERLAPPING_REGIONS */ /** * @brief configure a set of dynamic MPU regions * * Internal API function to configure a set of dynamic MPU memory regions * within a (background) memory area. The total number of HW MPU regions * to be programmed depends on the MPU architecture. * * @param dynamic_regions an array of pointers to memory partitions * to be programmed * @param regions_num the number of regions to be programmed * * The function shall assert if the operation cannot be not performed * successfully. Therefore, the number of HW MPU regions to be programmed shall * not exceed the number of (currently) available MPU indices. */ void arm_core_mpu_configure_dynamic_mpu_regions( const struct z_arm_mpu_partition *dynamic_regions, uint8_t regions_num); #if defined(CONFIG_USERSPACE) /** * @brief update configuration of an active memory partition * * Internal API function to re-configure the access permissions of an * active memory partition, i.e. a partition that has earlier been * configured in the (current) thread context. * * @param partition Pointer to a structure holding the partition information * (must be valid). * @param new_attr New access permissions attribute for the partition. * * The function shall assert if the operation cannot be not performed * successfully (e.g. the given partition can not be found). */ void arm_core_mpu_mem_partition_config_update( struct z_arm_mpu_partition *partition, k_mem_partition_attr_t *new_attr); #endif /* CONFIG_USERSPACE */ /** * @brief configure the base address and size for an MPU region * * @param type MPU region type * @param base base address in RAM * @param size size of the region */ void arm_core_mpu_configure(uint8_t type, uint32_t base, uint32_t size); /** * @brief configure MPU regions for the memory partitions of the memory domain * * @param mem_domain memory domain that thread belongs to */ void arm_core_mpu_configure_mem_domain(struct k_mem_domain *mem_domain); /** * @brief configure MPU regions for a user thread's context * * @param thread thread to configure */ void arm_core_mpu_configure_user_context(struct k_thread *thread); /** * @brief configure MPU region for a single memory partition * * @param part_index memory partition index * @param part memory partition info */ void arm_core_mpu_configure_mem_partition(uint32_t part_index, struct z_arm_mpu_partition *part); /** * @brief Reset MPU region for a single memory partition * * @param part_index memory partition index */ void arm_core_mpu_mem_partition_remove(uint32_t part_index); /** * @brief Get the maximum number of available (free) MPU region indices * for configuring dynamic MPU regions. */ int arm_core_mpu_get_max_available_dyn_regions(void); /** * @brief validate the given buffer is user accessible or not * * Note: Validation will always return failure, if the supplied buffer * spans multiple enabled MPU regions (even if these regions all * permit user access). */ int arm_core_mpu_buffer_validate(const void *addr, size_t size, int write); #endif /* CONFIG_ARM_MPU */ #ifdef __cplusplus } #endif #endif /* ZEPHYR_INCLUDE_ARCH_ARM_CORTEX_M_MPU_ARM_CORE_MPU_DEV_H_ */ ```
/content/code_sandbox/arch/arm/core/mpu/arm_core_mpu_dev.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,124
```c /* * */ #include <zephyr/device.h> #include <zephyr/init.h> #include <zephyr/kernel.h> #include <zephyr/sys/barrier.h> #include "arm_core_mpu_dev.h" #include <zephyr/linker/linker-defs.h> #include <kernel_arch_data.h> #include <zephyr/mem_mgmt/mem_attr.h> #include <zephyr/dt-bindings/memory-attr/memory-attr-arm.h> #define LOG_LEVEL CONFIG_MPU_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_DECLARE(mpu); #if defined(CONFIG_ARMV8_M_BASELINE) || defined(CONFIG_ARMV8_M_MAINLINE) /* The order here is on purpose since ARMv8-M SoCs may define * CONFIG_ARMV6_M_ARMV8_M_BASELINE or CONFIG_ARMV7_M_ARMV8_M_MAINLINE * so we want to check for ARMv8-M first. */ #define MPU_NODEID DT_INST(0, arm_armv8m_mpu) #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) #define MPU_NODEID DT_INST(0, arm_armv7m_mpu) #elif defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) #define MPU_NODEID DT_INST(0, arm_armv6m_mpu) #endif #define NODE_HAS_PROP_AND_OR(node_id, prop) \ DT_NODE_HAS_PROP(node_id, prop) || BUILD_ASSERT((DT_FOREACH_STATUS_OKAY_NODE_VARGS( NODE_HAS_PROP_AND_OR, zephyr_memory_region_mpu) false) == false, "`zephyr,memory-region-mpu` was deprecated in favor of `zephyr,memory-attr`"); /* * Global status variable holding the number of HW MPU region indices, which * have been reserved by the MPU driver to program the static (fixed) memory * regions. */ static uint8_t static_regions_num; /* Include architecture-specific internal headers. */ #if defined(CONFIG_CPU_CORTEX_M0PLUS) || \ defined(CONFIG_CPU_CORTEX_M3) || \ defined(CONFIG_CPU_CORTEX_M4) || \ defined(CONFIG_CPU_CORTEX_M7) || \ defined(CONFIG_ARMV7_R) #include "arm_mpu_v7_internal.h" #elif defined(CONFIG_CPU_CORTEX_M23) || \ defined(CONFIG_CPU_CORTEX_M33) || \ defined(CONFIG_CPU_CORTEX_M55) || \ defined(CONFIG_CPU_CORTEX_M85) || \ defined(CONFIG_AARCH32_ARMV8_R) #include "arm_mpu_v8_internal.h" #else #error "Unsupported ARM CPU" #endif static int region_allocate_and_init(const uint8_t index, const struct arm_mpu_region *region_conf) { /* Attempt to allocate new region index. */ if (index > (get_num_regions() - 1U)) { /* No available MPU region index. */ LOG_ERR("Failed to allocate new MPU region %u\n", index); return -EINVAL; } LOG_DBG("Program MPU region at index 0x%x", index); /* Program region */ region_init(index, region_conf); return index; } #define _BUILD_REGION_CONF(reg, _ATTR) \ (struct arm_mpu_region) ARM_MPU_REGION_INIT((reg).dt_name, \ (reg).dt_addr, \ (reg).dt_size, \ _ATTR) /* This internal function programs the MPU regions defined in the DT when using * the `zephyr,memory-attr = <( DT_MEM_ARM(...) )>` property. */ static int mpu_configure_regions_from_dt(uint8_t *reg_index) { const struct mem_attr_region_t *region; size_t num_regions; num_regions = mem_attr_get_regions(&region); for (size_t idx = 0; idx < num_regions; idx++) { struct arm_mpu_region region_conf; switch (DT_MEM_ARM_GET(region[idx].dt_attr)) { case DT_MEM_ARM_MPU_RAM: region_conf = _BUILD_REGION_CONF(region[idx], REGION_RAM_ATTR); break; #ifdef REGION_RAM_NOCACHE_ATTR case DT_MEM_ARM_MPU_RAM_NOCACHE: region_conf = _BUILD_REGION_CONF(region[idx], REGION_RAM_NOCACHE_ATTR); __ASSERT(!(region[idx].dt_attr & DT_MEM_CACHEABLE), "RAM_NOCACHE with DT_MEM_CACHEABLE attribute\n"); break; #endif #ifdef REGION_FLASH_ATTR case DT_MEM_ARM_MPU_FLASH: region_conf = _BUILD_REGION_CONF(region[idx], REGION_FLASH_ATTR); break; #endif #ifdef REGION_PPB_ATTR case DT_MEM_ARM_MPU_PPB: region_conf = _BUILD_REGION_CONF(region[idx], REGION_PPB_ATTR); break; #endif #ifdef REGION_IO_ATTR case DT_MEM_ARM_MPU_IO: region_conf = _BUILD_REGION_CONF(region[idx], REGION_IO_ATTR); break; #endif #ifdef REGION_EXTMEM_ATTR case DT_MEM_ARM_MPU_EXTMEM: region_conf = _BUILD_REGION_CONF(region[idx], REGION_EXTMEM_ATTR); break; #endif default: /* Attribute other than ARM-specific is set. * This region should not be configured in MPU. */ continue; } #if defined(CONFIG_ARMV7_R) region_conf.size = size_to_mpu_rasr_size(region[idx].dt_size); #endif if (region_allocate_and_init((*reg_index), (const struct arm_mpu_region *) &region_conf) < 0) { return -EINVAL; } (*reg_index)++; } return 0; } /* This internal function programs an MPU region * of a given configuration at a given MPU index. */ static int mpu_configure_region(const uint8_t index, const struct z_arm_mpu_partition *new_region) { struct arm_mpu_region region_conf; LOG_DBG("Configure MPU region at index 0x%x", index); /* Populate internal ARM MPU region configuration structure. */ region_conf.base = new_region->start; #if defined(CONFIG_ARMV7_R) region_conf.size = size_to_mpu_rasr_size(new_region->size); #endif get_region_attr_from_mpu_partition_info(&region_conf.attr, &new_region->attr, new_region->start, new_region->size); /* Allocate and program region */ return region_allocate_and_init(index, (const struct arm_mpu_region *)&region_conf); } #if !defined(CONFIG_MPU_REQUIRES_NON_OVERLAPPING_REGIONS) || \ !defined(CONFIG_MPU_GAP_FILLING) /* This internal function programs a set of given MPU regions * over a background memory area, optionally performing a * sanity check of the memory regions to be programmed. */ static int mpu_configure_regions(const struct z_arm_mpu_partition regions[], uint8_t regions_num, uint8_t start_reg_index, bool do_sanity_check) { int i; int reg_index = start_reg_index; for (i = 0; i < regions_num; i++) { if (regions[i].size == 0U) { continue; } /* Non-empty region. */ if (do_sanity_check && (!mpu_partition_is_valid(&regions[i]))) { LOG_ERR("Partition %u: sanity check failed.", i); return -EINVAL; } reg_index = mpu_configure_region(reg_index, &regions[i]); if (reg_index == -EINVAL) { return reg_index; } /* Increment number of programmed MPU indices. */ reg_index++; } return reg_index; } #endif /* ARM Core MPU Driver API Implementation for ARM MPU */ #if defined(CONFIG_CPU_AARCH32_CORTEX_R) /** * @brief enable the MPU by setting bit in SCTRL register */ void arm_core_mpu_enable(void) { uint32_t val; val = __get_SCTLR(); val |= SCTLR_MPU_ENABLE; __set_SCTLR(val); /* Make sure that all the registers are set before proceeding */ barrier_dsync_fence_full(); barrier_isync_fence_full(); } /** * @brief disable the MPU by clearing bit in SCTRL register */ void arm_core_mpu_disable(void) { uint32_t val; /* Force any outstanding transfers to complete before disabling MPU */ barrier_dsync_fence_full(); val = __get_SCTLR(); val &= ~SCTLR_MPU_ENABLE; __set_SCTLR(val); /* Make sure that all the registers are set before proceeding */ barrier_dsync_fence_full(); barrier_isync_fence_full(); } #else /** * @brief enable the MPU */ void arm_core_mpu_enable(void) { /* Enable MPU and use the default memory map as a * background region for privileged software access if desired. */ #if defined(CONFIG_MPU_DISABLE_BACKGROUND_MAP) MPU->CTRL = MPU_CTRL_ENABLE_Msk; #else MPU->CTRL = MPU_CTRL_ENABLE_Msk | MPU_CTRL_PRIVDEFENA_Msk; #endif /* Make sure that all the registers are set before proceeding */ barrier_dsync_fence_full(); barrier_isync_fence_full(); } /** * @brief disable the MPU */ void arm_core_mpu_disable(void) { /* Force any outstanding transfers to complete before disabling MPU */ barrier_dmem_fence_full(); /* Disable MPU */ MPU->CTRL = 0; } #endif #if defined(CONFIG_USERSPACE) /** * @brief update configuration of an active memory partition */ void arm_core_mpu_mem_partition_config_update( struct z_arm_mpu_partition *partition, k_mem_partition_attr_t *new_attr) { /* Find the partition. ASSERT if not found. */ uint8_t i; uint8_t reg_index = get_num_regions(); for (i = get_dyn_region_min_index(); i < get_num_regions(); i++) { if (!is_enabled_region(i)) { continue; } uint32_t base = mpu_region_get_base(i); if (base != partition->start) { continue; } uint32_t size = mpu_region_get_size(i); if (size != partition->size) { continue; } /* Region found */ reg_index = i; break; } __ASSERT(reg_index != get_num_regions(), "Memory domain partition %p size %zu not found\n", (void *)partition->start, partition->size); /* Modify the permissions */ partition->attr = *new_attr; mpu_configure_region(reg_index, partition); } /** * @brief get the maximum number of available (free) MPU region indices * for configuring dynamic MPU partitions */ int arm_core_mpu_get_max_available_dyn_regions(void) { return get_num_regions() - static_regions_num; } /** * @brief validate the given buffer is user accessible or not * * Presumes the background mapping is NOT user accessible. */ int arm_core_mpu_buffer_validate(const void *addr, size_t size, int write) { return mpu_buffer_validate(addr, size, write); } #endif /* CONFIG_USERSPACE */ /** * @brief configure fixed (static) MPU regions. */ void arm_core_mpu_configure_static_mpu_regions(const struct z_arm_mpu_partition *static_regions, const uint8_t regions_num, const uint32_t background_area_start, const uint32_t background_area_end) { if (mpu_configure_static_mpu_regions(static_regions, regions_num, background_area_start, background_area_end) == -EINVAL) { __ASSERT(0, "Configuring %u static MPU regions failed\n", regions_num); } } #if defined(CONFIG_MPU_REQUIRES_NON_OVERLAPPING_REGIONS) /** * @brief mark memory areas for dynamic region configuration */ void arm_core_mpu_mark_areas_for_dynamic_regions( const struct z_arm_mpu_partition dyn_region_areas[], const uint8_t dyn_region_areas_num) { if (mpu_mark_areas_for_dynamic_regions(dyn_region_areas, dyn_region_areas_num) == -EINVAL) { __ASSERT(0, "Marking %u areas for dynamic regions failed\n", dyn_region_areas_num); } } #endif /* CONFIG_MPU_REQUIRES_NON_OVERLAPPING_REGIONS */ /** * @brief configure dynamic MPU regions. */ void arm_core_mpu_configure_dynamic_mpu_regions(const struct z_arm_mpu_partition *dynamic_regions, uint8_t regions_num) { if (mpu_configure_dynamic_mpu_regions(dynamic_regions, regions_num) == -EINVAL) { __ASSERT(0, "Configuring %u dynamic MPU regions failed\n", regions_num); } } /* ARM MPU Driver Initial Setup */ /* * @brief MPU default configuration * * This function provides the default configuration mechanism for the Memory * Protection Unit (MPU). */ int z_arm_mpu_init(void) { uint32_t r_index; if (mpu_config.num_regions > get_num_regions()) { /* Attempt to configure more MPU regions than * what is supported by hardware. As this operation * is executed during system (pre-kernel) initialization, * we want to ensure we can detect an attempt to * perform invalid configuration. */ __ASSERT(0, "Request to configure: %u regions (supported: %u)\n", mpu_config.num_regions, get_num_regions() ); return -1; } LOG_DBG("total region count: %d", get_num_regions()); arm_core_mpu_disable(); #if defined(CONFIG_NOCACHE_MEMORY) /* Clean and invalidate data cache if it is enabled and * that was not already done at boot */ #if defined(CONFIG_CPU_AARCH32_CORTEX_R) if (__get_SCTLR() & SCTLR_C_Msk) { L1C_CleanInvalidateDCacheAll(); } #else #if !defined(CONFIG_INIT_ARCH_HW_AT_BOOT) if (SCB->CCR & SCB_CCR_DC_Msk) { SCB_CleanInvalidateDCache(); } #endif #endif #endif /* CONFIG_NOCACHE_MEMORY */ /* Architecture-specific configuration */ mpu_init(); /* Program fixed regions configured at SOC definition. */ for (r_index = 0U; r_index < mpu_config.num_regions; r_index++) { region_init(r_index, &mpu_config.mpu_regions[r_index]); } /* Update the number of programmed MPU regions. */ static_regions_num = mpu_config.num_regions; /* DT-defined MPU regions. */ if (mpu_configure_regions_from_dt(&static_regions_num) == -EINVAL) { __ASSERT(0, "Failed to allocate MPU regions from DT\n"); return -EINVAL; } /* Clear all regions before enabling MPU */ for (int i = static_regions_num; i < get_num_regions(); i++) { mpu_clear_region(i); } arm_core_mpu_enable(); /* Program additional fixed flash region for null-pointer * dereferencing detection (debug feature) */ #if defined(CONFIG_NULL_POINTER_EXCEPTION_DETECTION_MPU) #if (defined(CONFIG_ARMV8_M_BASELINE) || defined(CONFIG_ARMV8_M_MAINLINE)) && \ (CONFIG_FLASH_BASE_ADDRESS > CONFIG_CORTEX_M_NULL_POINTER_EXCEPTION_PAGE_SIZE) #pragma message "Null-Pointer exception detection cannot be configured on un-mapped flash areas" #else const struct z_arm_mpu_partition unmap_region = { .start = 0x0, .size = CONFIG_CORTEX_M_NULL_POINTER_EXCEPTION_PAGE_SIZE, #if defined(CONFIG_ARMV8_M_BASELINE) || defined(CONFIG_ARMV8_M_MAINLINE) /* Overlapping region (with any permissions) * will result in fault generation */ .attr = K_MEM_PARTITION_P_RO_U_NA, #else /* Explicit no-access policy */ .attr = K_MEM_PARTITION_P_NA_U_NA, #endif }; /* The flash region for null pointer dereferencing detection shall * comply with the regular MPU partition definition restrictions * (size and alignment). */ _ARCH_MEM_PARTITION_ALIGN_CHECK(0x0, CONFIG_CORTEX_M_NULL_POINTER_EXCEPTION_PAGE_SIZE); #if defined(CONFIG_ARMV8_M_BASELINE) || defined(CONFIG_ARMV8_M_MAINLINE) /* ARMv8-M requires that the area: * 0x0 - CORTEX_M_NULL_POINTER_EXCEPTION_PAGE_SIZE * is not unmapped (belongs to a valid MPU region already). */ if ((arm_cmse_mpu_region_get(0x0) == -EINVAL) || (arm_cmse_mpu_region_get( CONFIG_CORTEX_M_NULL_POINTER_EXCEPTION_PAGE_SIZE - 1) == -EINVAL)) { __ASSERT(0, "Null pointer detection page unmapped\n"); } #endif if (mpu_configure_region(static_regions_num, &unmap_region) == -EINVAL) { __ASSERT(0, "Programming null-pointer detection region failed\n"); return -EINVAL; } static_regions_num++; #endif #endif /* CONFIG_NULL_POINTER_EXCEPTION_DETECTION_MPU */ /* Sanity check for number of regions in Cortex-M0+, M3, and M4. */ #if defined(CONFIG_CPU_CORTEX_M0PLUS) || \ defined(CONFIG_CPU_CORTEX_M3) || \ defined(CONFIG_CPU_CORTEX_M4) __ASSERT( (MPU->TYPE & MPU_TYPE_DREGION_Msk) >> MPU_TYPE_DREGION_Pos == 8, "Invalid number of MPU regions\n"); #endif /* CORTEX_M0PLUS || CPU_CORTEX_M3 || CPU_CORTEX_M4 */ return 0; } ```
/content/code_sandbox/arch/arm/core/mpu/arm_mpu.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,765
```unknown # Memory Protection Unit (MPU) configuration options if CPU_HAS_MPU config ARM_MPU bool "ARM MPU Support" select MPU select SRAM_REGION_PERMISSIONS select THREAD_STACK_INFO select ARCH_HAS_EXECUTABLE_PAGE_BIT select MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT if !(CPU_HAS_NXP_MPU || ARMV8_M_BASELINE || ARMV8_M_MAINLINE || AARCH32_ARMV8_R) select MPU_REQUIRES_NON_OVERLAPPING_REGIONS if CPU_HAS_ARM_MPU && (ARMV8_M_BASELINE || ARMV8_M_MAINLINE || AARCH32_ARMV8_R) select MPU_GAP_FILLING if AARCH32_ARMV8_R select ARCH_MEM_DOMAIN_SUPPORTS_ISOLATED_STACKS select MEM_DOMAIN_ISOLATED_STACKS help MCU implements Memory Protection Unit. Notes: The ARMv6-M and ARMv7-M MPU architecture requires a power-of-two alignment of MPU region base address and size. The NXP MPU as well as the ARMv8-M MPU do not require MPU regions to have power-of-two alignment for base address and region size. The ARMv8-M MPU requires the active MPU regions be non-overlapping. As a result of this, the ARMv8-M MPU needs to fully partition the memory map when programming dynamic memory regions (e.g. PRIV stack guard, user thread stack, and application memory domains), if the system requires PRIV access policy different from the access policy of the ARMv8-M background memory map. The application developer may enforce full PRIV (kernel) memory partition by enabling the CONFIG_MPU_GAP_FILLING option. By not enforcing full partition, MPU may leave part of kernel SRAM area covered only by the default ARMv8-M memory map. This is fine for User Mode, since the background ARM map does not allow nPRIV access at all. However, since the background map policy allows instruction fetches by privileged code, forcing this Kconfig option off prevents the system from directly triggering MemManage exceptions upon accidental attempts to execute code from SRAM in XIP builds. Since this does not compromise User Mode, we make the skipping of full partitioning the default behavior for the ARMv8-M MPU driver. config ARM_MPU_REGION_MIN_ALIGN_AND_SIZE int default 256 if ARM_MPU && ARMV6_M_ARMV8_M_BASELINE && !ARMV8_M_BASELINE default 64 if ARM_MPU && AARCH32_ARMV8_R default 32 if ARM_MPU default 4 help Minimum size (and alignment) of an ARM MPU region. Use this symbol to guarantee minimum size and alignment of MPU regions. A minimum 4-byte alignment is enforced in ARM builds without support for Memory Protection. if ARM_MPU config MPU_STACK_GUARD bool "Thread Stack Guards" help Enable Thread Stack Guards via MPU config MPU_STACK_GUARD_MIN_SIZE_FLOAT int depends on MPU_STACK_GUARD depends on FPU_SHARING default 128 help Minimum size (and alignment when applicable) of an ARM MPU region, which guards the stack of a thread that is using the Floating Point (FP) context. The width of the guard is set to 128, to accommodate the length of a Cortex-M exception stack frame when the floating point context is active. The FP context is only stacked in sharing FP registers mode, therefore, the option is applicable only when FPU_SHARING is selected. config MPU_ALLOW_FLASH_WRITE bool "Add MPU access to write to flash" help Enable this to allow MPU RWX access to flash memory config MPU_DISABLE_BACKGROUND_MAP bool "Disables the default background address map" help Enable this to turn off the default background MPU address map. Your SoC definition should likely provide its own custom MPU regions. config CUSTOM_SECTION_ALIGN bool "Custom Section Align" help MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT(ARMv7-M) sometimes cause memory wasting in linker scripts defined memory sections. Use this symbol to guarantee user custom section align size to avoid more memory used for respect alignment. But that needs carefully configure MPU region and sub-regions(ARMv7-M) to cover this feature. config CUSTOM_SECTION_MIN_ALIGN_SIZE int "Custom Section Align Size" default 32 help Custom align size of memory section in linker scripts. Usually it should consume less alignment memory. Although this alignment size is configured by users, it must also respect the power of two regulation if hardware requires. endif # ARM_MPU endif # CPU_HAS_MPU ```
/content/code_sandbox/arch/arm/core/mpu/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,022
```objective-c * */ #include <zephyr/sys/math_extras.h> /** * Get the number of supported MPU regions. */ static inline uint8_t get_num_regions(void) { uint32_t type = MPU->TYPE; type = (type & MPU_TYPE_DREGION_Msk) >> MPU_TYPE_DREGION_Pos; return (uint8_t)type; } static inline void set_region_number(uint32_t index) { MPU->RNR = index; } static inline uint32_t mpu_region_get_base(uint32_t index) { MPU->RNR = index; return MPU->RBAR & MPU_RBAR_ADDR_Msk; } /** * This internal function converts the SIZE field value of MPU_RASR * to the region size (in bytes). */ static inline uint32_t mpu_rasr_size_to_size(uint32_t rasr_size) { return 1 << (rasr_size + 1U); } /** * This internal function checks if region is enabled or not. * * Note: * The caller must provide a valid region number. */ static inline int is_enabled_region(uint32_t index) { /* Lock IRQs to ensure RNR value is correct when reading RASR. */ unsigned int key; uint32_t rasr; key = irq_lock(); MPU->RNR = index; rasr = MPU->RASR; irq_unlock(key); return (rasr & MPU_RASR_ENABLE_Msk) ? 1 : 0; } /** * This internal function returns the access permissions of an MPU region * specified by its region index. * * Note: * The caller must provide a valid region number. */ static inline uint32_t get_region_ap(uint32_t r_index) { /* Lock IRQs to ensure RNR value is correct when reading RASR. */ unsigned int key; uint32_t rasr; key = irq_lock(); MPU->RNR = r_index; rasr = MPU->RASR; irq_unlock(key); return (rasr & MPU_RASR_AP_Msk) >> MPU_RASR_AP_Pos; } /** * This internal function checks if the given buffer is in the region. * * Note: * The caller must provide a valid region number. */ static inline int is_in_region(uint32_t r_index, uint32_t start, uint32_t size) { uint32_t r_addr_start; uint32_t r_size_lshift; uint32_t r_addr_end; uint32_t end; /* Lock IRQs to ensure RNR value is correct when reading RBAR, RASR. */ unsigned int key; uint32_t rbar, rasr; key = irq_lock(); MPU->RNR = r_index; rbar = MPU->RBAR; rasr = MPU->RASR; irq_unlock(key); r_addr_start = rbar & MPU_RBAR_ADDR_Msk; r_size_lshift = ((rasr & MPU_RASR_SIZE_Msk) >> MPU_RASR_SIZE_Pos) + 1U; r_addr_end = r_addr_start + (1UL << r_size_lshift) - 1UL; size = size == 0U ? 0U : size - 1U; if (u32_add_overflow(start, size, &end)) { return 0; } if ((start >= r_addr_start) && (end <= r_addr_end)) { return 1; } return 0; } static inline uint32_t mpu_region_get_size(uint32_t index) { MPU->RNR = index; uint32_t rasr_size = (MPU->RASR & MPU_RASR_SIZE_Msk) >> MPU_RASR_SIZE_Pos; return mpu_rasr_size_to_size(rasr_size); } ```
/content/code_sandbox/arch/arm/core/mpu/cortex_m/arm_mpu_internal.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
806
```objective-c /* * */ /** * @file * @brief Private kernel definitions (ARM) * * This file contains private kernel function definitions and various * other definitions for the 32-bit ARM Cortex-A/R/M processor architecture * family. * * This file is also included by assembly language files which must #define * _ASMLANGUAGE before including this header file. Note that kernel * assembly source files obtains structure offset values via "absolute symbols" * in the offsets.o module. */ #ifndef ZEPHYR_ARCH_ARM_INCLUDE_KERNEL_ARCH_FUNC_H_ #define ZEPHYR_ARCH_ARM_INCLUDE_KERNEL_ARCH_FUNC_H_ #include <kernel_arch_data.h> #if defined(CONFIG_CPU_CORTEX_M) #include <cortex_m/kernel_arch_func.h> #else #include <cortex_a_r/kernel_arch_func.h> #endif #endif /* ZEPHYR_ARCH_ARM_INCLUDE_KERNEL_ARCH_FUNC_H_ */ ```
/content/code_sandbox/arch/arm/include/kernel_arch_func.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
183
```objective-c /* * */ /** * @file * @brief Private kernel definitions (ARM) * * This file contains private kernel structures definitions and various * other definitions for the ARM Cortex-A/R/M processor architecture family. * * This file is also included by assembly language files which must #define * _ASMLANGUAGE before including this header file. Note that kernel * assembly source files obtains structure offset values via "absolute symbols" * in the offsets.o module. */ #ifndef ZEPHYR_ARCH_ARM_INCLUDE_KERNEL_ARCH_DATA_H_ #define ZEPHYR_ARCH_ARM_INCLUDE_KERNEL_ARCH_DATA_H_ #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #include <zephyr/arch/cpu.h> #if defined(CONFIG_CPU_CORTEX_M) #include <cortex_m/stack.h> #include <cortex_m/exception.h> #elif defined(CONFIG_CPU_AARCH32_CORTEX_R) || defined(CONFIG_CPU_AARCH32_CORTEX_A) #include <cortex_a_r/stack.h> #include <cortex_a_r/exception.h> #endif #ifndef _ASMLANGUAGE #include <zephyr/kernel.h> #include <zephyr/types.h> #include <zephyr/sys/dlist.h> #include <zephyr/sys/atomic.h> #ifdef __cplusplus extern "C" { #endif typedef struct arch_esf _esf_t; typedef struct __basic_sf _basic_sf_t; #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) typedef struct __fpu_sf _fpu_sf_t; #endif #ifdef CONFIG_ARM_MPU struct z_arm_mpu_partition { uintptr_t start; size_t size; k_mem_partition_attr_t attr; }; #endif #ifdef __cplusplus } #endif #endif /* _ASMLANGUAGE */ #endif /* ZEPHYR_ARCH_ARM_INCLUDE_KERNEL_ARCH_DATA_H_ */ ```
/content/code_sandbox/arch/arm/include/kernel_arch_data.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
384
```objective-c * */ #include <zephyr/sys/math_extras.h> /** * Get the number of supported MPU regions. */ static inline uint8_t get_num_regions(void) { uint32_t type; __asm__ volatile("mrc p15, 0, %0, c0, c0, 4" : "=r" (type) ::); type = (type & MPU_TYPE_DREGION_Msk) >> MPU_TYPE_DREGION_Pos; return (uint8_t)type; } static inline uint32_t get_region_attributes(void) { uint32_t attr; __asm__ volatile("mrc p15, 0, %0, c6, c1, 4" : "=r" (attr) ::); return attr; } static inline uint32_t get_region_base_address(void) { uint32_t addr; __asm__ volatile("mrc p15, 0, %0, c6, c1, 0" : "=r" (addr) ::); return addr; } static inline uint32_t get_region_size(void) { uint32_t size; __asm__ volatile("mrc p15, 0, %0, c6, c1, 2" : "=r" (size) ::); return size; } static inline void set_region_attributes(uint32_t attr) { __asm__ volatile("mcr p15, 0, %0, c6, c1, 4" :: "r" (attr) :); } static inline void set_region_base_address(uint32_t addr) { __asm__ volatile("mcr p15, 0, %0, c6, c1, 0" :: "r" (addr) :); } static inline void set_region_number(uint32_t index) { __asm__ volatile("mcr p15, 0, %0, c6, c2, 0" :: "r" (index) :); } static inline uint32_t mpu_region_get_base(uint32_t index) { set_region_number(index); return get_region_base_address() & MPU_RBAR_ADDR_Msk; } /** * This internal function converts the SIZE field value of MPU_RASR * to the region size (in bytes). */ static inline uint32_t mpu_rasr_size_to_size(uint32_t rasr_size) { return 1 << (rasr_size + 1U); } static inline void set_region_size(uint32_t size) { __asm__ volatile("mcr p15, 0, %0, c6, c1, 2" :: "r" (size) :); } static inline void ARM_MPU_ClrRegion(uint32_t rnr) { set_region_number(rnr); /* clear size field, which contains enable bit */ set_region_size(0); } /** * This internal function checks if region is enabled or not. * * Note: * The caller must provide a valid region number. */ static inline int is_enabled_region(uint32_t index) { set_region_number(index); return (get_region_size() & MPU_RASR_ENABLE_Msk) ? 1 : 0; } /** * This internal function returns the access permissions of an MPU region * specified by its region index. * * Note: * The caller must provide a valid region number. */ static inline uint32_t get_region_ap(uint32_t r_index) { set_region_number(r_index); return (get_region_attributes() & MPU_RASR_AP_Msk) >> MPU_RASR_AP_Pos; } /** * This internal function checks if the given buffer is in the region. * * Note: * The caller must provide a valid region number. */ static inline int is_in_region(uint32_t r_index, uint32_t start, uint32_t size) { uint32_t r_addr_start; uint32_t r_size_lshift; uint32_t r_addr_end; uint32_t end; set_region_number(r_index); r_addr_start = get_region_base_address() & MPU_RBAR_ADDR_Msk; r_size_lshift = ((get_region_size() & MPU_RASR_SIZE_Msk) >> MPU_RASR_SIZE_Pos) + 1; r_addr_end = r_addr_start + (1UL << r_size_lshift) - 1; size = size == 0 ? 0 : size - 1; if (u32_add_overflow(start, size, &end)) { return 0; } if ((start >= r_addr_start) && (end <= r_addr_end)) { return 1; } return 0; } static inline uint32_t mpu_region_get_size(uint32_t index) { set_region_number(index); uint32_t rasr_size = (get_region_size() & MPU_RASR_SIZE_Msk) >> MPU_RASR_SIZE_Pos; return mpu_rasr_size_to_size(rasr_size); } ```
/content/code_sandbox/arch/arm/core/mpu/cortex_a_r/arm_mpu_internal.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,036
```objective-c /* * */ #ifndef ZEPHYR_ARCH_ARM_INCLUDE_OFFSETS_SHORT_ARCH_H_ #define ZEPHYR_ARCH_ARM_INCLUDE_OFFSETS_SHORT_ARCH_H_ #include <zephyr/offsets.h> /* kernel */ /* nothing for now */ /* end - kernel */ /* threads */ #define _thread_offset_to_basepri \ (___thread_t_arch_OFFSET + ___thread_arch_t_basepri_OFFSET) #define _thread_offset_to_preempt_float \ (___thread_t_arch_OFFSET + ___thread_arch_t_preempt_float_OFFSET) #if defined(CONFIG_CPU_AARCH32_CORTEX_A) || defined(CONFIG_CPU_AARCH32_CORTEX_R) #define _thread_offset_to_exception_depth \ (___thread_t_arch_OFFSET + ___thread_arch_t_exception_depth_OFFSET) #define _cpu_offset_to_exc_depth \ (___cpu_t_arch_OFFSET + ___cpu_arch_t_exc_depth_OFFSET) #endif #if defined(CONFIG_USERSPACE) || defined(CONFIG_FPU_SHARING) #define _thread_offset_to_mode \ (___thread_t_arch_OFFSET + ___thread_arch_t_mode_OFFSET) #endif #if defined(CONFIG_ARM_STORE_EXC_RETURN) #define _thread_offset_to_mode_exc_return \ (___thread_t_arch_OFFSET + ___thread_arch_t_mode_exc_return_OFFSET) #endif #ifdef CONFIG_USERSPACE #define _thread_offset_to_priv_stack_start \ (___thread_t_arch_OFFSET + ___thread_arch_t_priv_stack_start_OFFSET) #if defined(CONFIG_CPU_AARCH32_CORTEX_R) #define _thread_offset_to_priv_stack_end \ (___thread_t_arch_OFFSET + ___thread_arch_t_priv_stack_end_OFFSET) #define _thread_offset_to_sp_usr \ (___thread_t_arch_OFFSET + ___thread_arch_t_sp_usr_OFFSET) #endif #endif #if defined(CONFIG_THREAD_STACK_INFO) #define _thread_offset_to_stack_info_start \ (___thread_stack_info_t_start_OFFSET + ___thread_t_stack_info_OFFSET) #endif /* end - threads */ #endif /* ZEPHYR_ARCH_ARM_INCLUDE_OFFSETS_SHORT_ARCH_H_ */ ```
/content/code_sandbox/arch/arm/include/offsets_short_arch.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
397
```objective-c /* * */ /** * @file * @brief Private kernel definitions (ARM) * * This file contains private kernel function definitions and various * other definitions for the 32-bit ARM Cortex-M processor architecture * family. * * This file is also included by assembly language files which must #define * _ASMLANGUAGE before including this header file. Note that kernel * assembly source files obtains structure offset values via "absolute symbols" * in the offsets.o module. */ #ifndef ZEPHYR_ARCH_ARM_INCLUDE_CORTEX_M_KERNEL_ARCH_FUNC_H_ #define ZEPHYR_ARCH_ARM_INCLUDE_CORTEX_M_KERNEL_ARCH_FUNC_H_ #ifdef __cplusplus extern "C" { #endif #ifndef _ASMLANGUAGE extern void z_arm_fault_init(void); extern void z_arm_cpu_idle_init(void); #ifdef CONFIG_ARM_MPU extern void z_arm_configure_static_mpu_regions(void); extern void z_arm_configure_dynamic_mpu_regions(struct k_thread *thread); extern int z_arm_mpu_init(void); #endif /* CONFIG_ARM_MPU */ #ifdef CONFIG_ARM_AARCH32_MMU extern int z_arm_mmu_init(void); #endif /* CONFIG_ARM_AARCH32_MMU */ static ALWAYS_INLINE void arch_kernel_init(void) { z_arm_interrupt_stack_setup(); z_arm_exc_setup(); z_arm_fault_init(); z_arm_cpu_idle_init(); z_arm_clear_faults(); #if defined(CONFIG_ARM_MPU) z_arm_mpu_init(); /* Configure static memory map. This will program MPU regions, * to set up access permissions for fixed memory sections, such * as Application Memory or No-Cacheable SRAM area. * * This function is invoked once, upon system initialization. */ z_arm_configure_static_mpu_regions(); #endif /* CONFIG_ARM_MPU */ } static ALWAYS_INLINE void arch_thread_return_value_set(struct k_thread *thread, unsigned int value) { thread->arch.swap_return_value = value; } #if !defined(CONFIG_MULTITHREADING) extern FUNC_NORETURN void z_arm_switch_to_main_no_multithreading( k_thread_entry_t main_func, void *p1, void *p2, void *p3); #define ARCH_SWITCH_TO_MAIN_NO_MULTITHREADING \ z_arm_switch_to_main_no_multithreading #endif /* !CONFIG_MULTITHREADING */ extern FUNC_NORETURN void z_arm_userspace_enter(k_thread_entry_t user_entry, void *p1, void *p2, void *p3, uint32_t stack_end, uint32_t stack_start); extern void z_arm_fatal_error(unsigned int reason, const struct arch_esf *esf); #endif /* _ASMLANGUAGE */ #ifdef __cplusplus } #endif #endif /* ZEPHYR_ARCH_ARM_INCLUDE_CORTEX_M_KERNEL_ARCH_FUNC_H_ */ ```
/content/code_sandbox/arch/arm/include/cortex_m/kernel_arch_func.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
570
```objective-c /* * */ /** * @file * @brief Exception/interrupt context helpers for Cortex-M CPUs * * Exception/interrupt context helpers. */ #ifndef ZEPHYR_ARCH_ARM_INCLUDE_CORTEX_M_EXCEPTION_H_ #define ZEPHYR_ARCH_ARM_INCLUDE_CORTEX_M_EXCEPTION_H_ #include <zephyr/arch/cpu.h> #ifdef _ASMLANGUAGE /* nothing */ #else #include <cmsis_core.h> #include <zephyr/arch/arm/exception.h> #include <zephyr/irq_offload.h> #ifdef __cplusplus extern "C" { #endif #ifdef CONFIG_IRQ_OFFLOAD extern volatile irq_offload_routine_t offload_routine; #endif /* Writes to the AIRCR must be accompanied by a write of the value 0x05FA * to the Vector Key field, otherwise the writes are ignored. */ #define AIRCR_VECT_KEY_PERMIT_WRITE 0x05FAUL /* * The current executing vector is found in the IPSR register. All * IRQs and system exceptions are considered as interrupt context. */ static ALWAYS_INLINE bool arch_is_in_isr(void) { return (__get_IPSR()) ? (true) : (false); } /** * @brief Find out if we were in ISR context * before the current exception occurred. * * A function that determines, based on inspecting the current * ESF, whether the processor was in handler mode before entering * the current exception state (i.e. nested exception) or not. * * Notes: * - The function shall only be called from ISR context. * - We do not use ARM processor state flags to determine * whether we are in a nested exception; we rely on the * RETPSR value stacked on the ESF. Hence, the function * assumes that the ESF stack frame has a valid RETPSR * value. * * @param esf the exception stack frame (cannot be NULL) * @return true if execution state was in handler mode, before * the current exception occurred, otherwise false. */ static ALWAYS_INLINE bool arch_is_in_nested_exception(const struct arch_esf *esf) { return (esf->basic.xpsr & IPSR_ISR_Msk) ? (true) : (false); } #if defined(CONFIG_USERSPACE) /** * @brief Is the thread in unprivileged mode * * @param esf the exception stack frame (unused) * @return true if the current thread was in unprivileged mode */ static ALWAYS_INLINE bool z_arm_preempted_thread_in_user_mode(const struct arch_esf *esf) { return z_arm_thread_is_in_user_mode(); } #endif /** * @brief Setup system exceptions * * Set exception priorities to conform with the BASEPRI locking mechanism. * Set PendSV priority to lowest possible. * * Enable fault exceptions. */ static ALWAYS_INLINE void z_arm_exc_setup(void) { /* PendSV is set to lowest priority, regardless of it being used. * This is done as the IRQ is always enabled. */ NVIC_SetPriority(PendSV_IRQn, _EXC_PENDSV_PRIO); #ifdef CONFIG_CPU_CORTEX_M_HAS_BASEPRI /* Note: SVCall IRQ priority level is left to default (0) * for Cortex-M variants without BASEPRI (e.g. ARMv6-M). */ NVIC_SetPriority(SVCall_IRQn, _EXC_SVC_PRIO); #endif #ifdef CONFIG_CPU_CORTEX_M_HAS_PROGRAMMABLE_FAULT_PRIOS NVIC_SetPriority(MemoryManagement_IRQn, _EXC_FAULT_PRIO); NVIC_SetPriority(BusFault_IRQn, _EXC_FAULT_PRIO); NVIC_SetPriority(UsageFault_IRQn, _EXC_FAULT_PRIO); #if defined(CONFIG_CORTEX_M_DEBUG_MONITOR_HOOK) NVIC_SetPriority(DebugMonitor_IRQn, IRQ_PRIO_LOWEST); #elif defined(CONFIG_CPU_CORTEX_M_HAS_DWT) NVIC_SetPriority(DebugMonitor_IRQn, _EXC_FAULT_PRIO); #endif #if defined(CONFIG_ARM_SECURE_FIRMWARE) NVIC_SetPriority(SecureFault_IRQn, _EXC_FAULT_PRIO); #endif /* CONFIG_ARM_SECURE_FIRMWARE */ /* Enable Usage, Mem, & Bus Faults */ SCB->SHCSR |= SCB_SHCSR_USGFAULTENA_Msk | SCB_SHCSR_MEMFAULTENA_Msk | SCB_SHCSR_BUSFAULTENA_Msk; #if defined(CONFIG_ARM_SECURE_FIRMWARE) /* Enable Secure Fault */ SCB->SHCSR |= SCB_SHCSR_SECUREFAULTENA_Msk; /* Clear BFAR before setting BusFaults to target Non-Secure state. */ SCB->BFAR = 0; #endif /* CONFIG_ARM_SECURE_FIRMWARE */ #endif /* CONFIG_CPU_CORTEX_M_HAS_PROGRAMMABLE_FAULT_PRIOS */ #if defined(CONFIG_ARM_SECURE_FIRMWARE) && \ !defined(CONFIG_ARM_SECURE_BUSFAULT_HARDFAULT_NMI) /* Set NMI, Hard, and Bus Faults as Non-Secure. * NMI and Bus Faults targeting the Secure state will * escalate to a SecureFault or SecureHardFault. */ SCB->AIRCR = (SCB->AIRCR & (~(SCB_AIRCR_VECTKEY_Msk))) | SCB_AIRCR_BFHFNMINS_Msk | ((AIRCR_VECT_KEY_PERMIT_WRITE << SCB_AIRCR_VECTKEY_Pos) & SCB_AIRCR_VECTKEY_Msk); /* Note: Fault conditions that would generate a SecureFault * in a PE with the Main Extension instead generate a * SecureHardFault in a PE without the Main Extension. */ #endif /* ARM_SECURE_FIRMWARE && !ARM_SECURE_BUSFAULT_HARDFAULT_NMI */ #if defined(CONFIG_CPU_CORTEX_M_HAS_SYSTICK) && \ !defined(CONFIG_CORTEX_M_SYSTICK) /* SoC implements SysTick, but the system does not use it * as driver for system timing. However, the SysTick IRQ is * always enabled, so we must ensure the interrupt priority * is set to a level lower than the kernel interrupts (for * the assert mechanism to work properly) in case the SysTick * interrupt is accidentally raised. */ NVIC_SetPriority(SysTick_IRQn, _EXC_IRQ_DEFAULT_PRIO); #endif /* CPU_CORTEX_M_HAS_SYSTICK && ! CORTEX_M_SYSTICK */ } /** * @brief Clear Fault exceptions * * Clear out exceptions for Mem, Bus, Usage and Hard Faults */ static ALWAYS_INLINE void z_arm_clear_faults(void) { #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) /* Reset all faults */ SCB->CFSR = SCB_CFSR_USGFAULTSR_Msk | SCB_CFSR_MEMFAULTSR_Msk | SCB_CFSR_BUSFAULTSR_Msk; /* Clear all Hard Faults - HFSR is write-one-to-clear */ SCB->HFSR = 0xffffffff; #else #error Unknown ARM architecture #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */ } /** * @brief Assess whether a debug monitor event should be treated as an error * * This routine checks the status of a debug_monitor() exception, and * evaluates whether this needs to be considered as a processor error. * * @return true if the DM exception is a processor error, otherwise false */ bool z_arm_debug_monitor_event_error_check(void); #ifdef __cplusplus } #endif #endif /* _ASMLANGUAGE */ #endif /* ZEPHYR_ARCH_ARM_INCLUDE_CORTEX_M_EXCEPTION_H_ */ ```
/content/code_sandbox/arch/arm/include/cortex_m/exception.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,601
```objective-c /* * */ /** * @file * @brief TrustZone API for use in nonsecure firmware * * TrustZone API for Cortex-M CPUs implementing the Security Extension. * The following API can be used by the nonsecure firmware to interact with the * secure firmware. */ #ifndef ZEPHYR_ARCH_ARM_INCLUDE_AARCH32_CORTEX_M_TZ_NS_H_ #define ZEPHYR_ARCH_ARM_INCLUDE_AARCH32_CORTEX_M_TZ_NS_H_ #ifdef _ASMLANGUAGE /* nothing */ #else /** * @brief Macro for "sandwiching" a function call (@p name) between two other * calls * * This macro should be called via @ref __TZ_WRAP_FUNC. * * This macro creates the function body of an "outer" function which behaves * exactly like the wrapped function (@p name), except that the preface function * is called before, and the postface function afterwards. * * @param preface The function to call first. Must have no parameters and no * return value. * @param name The main function, i.e. the function to wrap. This function * will receive the arguments, and its return value will be * returned. * @param postface The function to call last. Must have no parameters and no * return value. * @param store_lr The assembly instruction for storing away the LR value * before the functions are called. This instruction must leave * r0-r3 unmodified. * @param load_lr The assembly instruction for restoring the LR value after * the functions have been called. This instruction must leave * r0-r3 unmodified. */ #define __TZ_WRAP_FUNC_RAW(preface, name, postface, store_lr, load_lr) \ __asm__ volatile( \ ".global "#preface"; .type "#preface", %function"); \ __asm__ volatile( \ ".global "#name"; .type "#name", %function"); \ __asm__ volatile( \ ".global "#postface"; .type "#postface", %function"); \ __asm__ volatile( \ store_lr "\n\t" \ "push {r0-r3}\n\t" \ "bl " #preface "\n\t" \ "pop {r0-r3}\n\t" \ "bl " #name " \n\t" \ "push {r0-r3}\n\t" \ "bl " #postface "\n\t" \ "pop {r0-r3}\n\t" \ load_lr "\n\t" \ ::); /** * @brief Macro for "sandwiching" a function call (@p name) in two other calls * * @pre The wrapped function MUST not pass arguments or return values via * the stack. I.e. the arguments and return values must each fit within 4 * words, after accounting for alignment. * Since nothing is passed on the stack, the stack can safely be used to * store LR. * * Usage example: * * int foo(char *arg); // Implemented elsewhere. * int __attribute__((naked)) foo_wrapped(char *arg) * { * __TZ_WRAP_FUNC(bar, foo, baz) * } * * is equivalent to * * int foo(char *arg); // Implemented elsewhere. * int foo_wrapped(char *arg) * { * bar(); * int res = foo(arg); * baz(); * return res; * } * * @note __attribute__((naked)) is not mandatory, but without it, GCC gives a * warning for functions with a return value. It also reduces flash use. * * See @ref __TZ_WRAP_FUNC_RAW for more information. */ #define __TZ_WRAP_FUNC(preface, name, postface) \ __TZ_WRAP_FUNC_RAW(preface, name, postface, "push {r4, lr}", \ "pop {r4, pc}") #ifdef CONFIG_ARM_FIRMWARE_USES_SECURE_ENTRY_FUNCS /** * @brief Create a thread safe wrapper function for a non-secure entry function * * This locks the scheduler before calling the function by wrapping the NS entry * function in @ref k_sched_lock / @ref k_sched_unlock, using * @ref __TZ_WRAP_FUNC. * * In non-secure code: * * int foo(char *arg); // Declaration of entry function. * TZ_THREAD_SAFE_NONSECURE_ENTRY_FUNC(foo_safe, int, foo, char *arg) * * Usage in non-secure code: * * int ret = foo_safe("my arg"); * * If NS entry functions are called without such a wrapper, and a thread switch * happens while execution is in the secure binary, the app will possibly crash * upon returning to the non-secure binary. * * @param ret The return type of the NS entry function. * @param name The desired name of the safe function. This assumes there is a * corresponding NS entry function called nsc_name. * @param ... The rest of the signature of the function. This must be the same * signature as the corresponding NS entry function. */ #define TZ_THREAD_SAFE_NONSECURE_ENTRY_FUNC(name, ret, nsc_name, ...) \ ret __attribute__((naked)) name(__VA_ARGS__) \ { \ __TZ_WRAP_FUNC(k_sched_lock, nsc_name, k_sched_unlock); \ } #endif /* CONFIG_ARM_FIRMWARE_USES_SECURE_ENTRY_FUNCS */ #endif /* _ASMLANGUAGE */ #endif /* ZEPHYR_ARCH_ARM_INCLUDE_AARCH32_CORTEX_M_TZ_NS_H_ */ ```
/content/code_sandbox/arch/arm/include/cortex_m/tz_ns.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,227
```objective-c /* * */ /** * @file * @brief ARM Core CMSE API * * CMSE API for Cortex-M23/M33 CPUs. */ #ifndef ZEPHYR_ARCH_ARM_INCLUDE_AARCH32_CORTEX_M_CMSE_H_ #define ZEPHYR_ARCH_ARM_INCLUDE_AARCH32_CORTEX_M_CMSE_H_ #ifdef _ASMLANGUAGE /* nothing */ #else #include <arm_cmse.h> #include <stdint.h> #ifdef __cplusplus extern "C" { #endif /* * Address information retrieval based on the TT instructions. * * The TT instructions are used to check the access permissions that different * security states and privilege levels have on memory at a specified address */ /** * @brief Get the MPU region number of an address * * Return the non-negative MPU region that the address maps to, * or -EINVAL to indicate that an invalid MPU region was retrieved. * * Note: * Obtained region is valid only if: * - the function is called from privileged mode * - the MPU is implemented and enabled * - the given address matches a single, enabled MPU region * * @param addr The address for which the MPU region is requested * * @return a valid MPU region number or -EINVAL */ int arm_cmse_mpu_region_get(uint32_t addr); /** * @brief Read accessibility of an address * * Evaluates whether a specified memory location can be read according to the * permissions of the current state MPU and the specified operation mode. * * This function shall always return zero: * - if executed from an unprivileged mode, * - if the address matches multiple MPU regions. * * @param addr The address for which the readability is requested * @param force_npriv Instruct to return the readability of the address * for unprivileged access, regardless of whether the current * mode is privileged or unprivileged. * * @return 1 if address is readable, 0 otherwise. */ int arm_cmse_addr_read_ok(uint32_t addr, int force_npriv); /** * @brief Read and Write accessibility of an address * * Evaluates whether a specified memory location can be read/written according * to the permissions of the current state MPU and the specified operation * mode. * * This function shall always return zero: * - if executed from an unprivileged mode, * - if the address matches multiple MPU regions. * * @param addr The address for which the RW ability is requested * @param force_npriv Instruct to return the RW ability of the address * for unprivileged access, regardless of whether the current * mode is privileged or unprivileged. * * @return 1 if address is Read and Writable, 0 otherwise. */ int arm_cmse_addr_readwrite_ok(uint32_t addr, int force_npriv); /** * @brief Read accessibility of an address range * * Evaluates whether a memory address range, specified by its base address * and size, can be read according to the permissions of the current state MPU * and the specified operation mode. * * This function shall always return zero: * - if executed from an unprivileged mode, * - if the address range overlaps with multiple MPU (and/or SAU/IDAU) regions. * * @param addr The base address of an address range, * for which the readability is requested * @param size The size of the address range * @param force_npriv Instruct to return the readability of the address range * for unprivileged access, regardless of whether the current * mode is privileged or unprivileged. * * @return 1 if address range is readable, 0 otherwise. */ int arm_cmse_addr_range_read_ok(uint32_t addr, uint32_t size, int force_npriv); /** * @brief Read and Write accessibility of an address range * * Evaluates whether a memory address range, specified by its base address * and size, can be read/written according to the permissions of the current * state MPU and the specified operation mode. * * This function shall always return zero: * - if executed from an unprivileged mode, * - if the address range overlaps with multiple MPU (and/or SAU/IDAU) regions. * * @param addr The base address of an address range, * for which the RW ability is requested * @param size The size of the address range * @param force_npriv Instruct to return the RW ability of the address range * for unprivileged access, regardless of whether the current * mode is privileged or unprivileged. * * @return 1 if address range is Read and Writable, 0 otherwise. */ int arm_cmse_addr_range_readwrite_ok(uint32_t addr, uint32_t size, int force_npriv); /* Required for C99 compilation (required for GCC-8.x version, * where typeof is used instead of __typeof__) */ #ifndef typeof #define typeof __typeof__ #endif /** * @brief Read accessibility of an object * * Evaluates whether a given object can be read according to the * permissions of the current state MPU. * * The macro shall always evaluate to zero if called from an unprivileged mode. * * @param p_obj Pointer to the given object * for which the readability is requested * * @pre Object is allocated in a single MPU (and/or SAU/IDAU) region. * * @return p_obj if object is readable, NULL otherwise. */ #define ARM_CMSE_OBJECT_READ_OK(p_obj) \ cmse_check_pointed_object(p_obj, CMSE_MPU_READ) /** * @brief Read accessibility of an object (nPRIV mode) * * Evaluates whether a given object can be read according to the * permissions of the current state MPU (unprivileged read). * * The macro shall always evaluate to zero if called from an unprivileged mode. * * @param p_obj Pointer to the given object * for which the readability is requested * * @pre Object is allocated in a single MPU (and/or SAU/IDAU) region. * * @return p_obj if object is readable, NULL otherwise. */ #define ARM_CMSE_OBJECT_UNPRIV_READ_OK(p_obj) \ cmse_check_pointed_object(p_obj, CMSE_MPU_UNPRIV | CMSE_MPU_READ) /** * @brief Read and Write accessibility of an object * * Evaluates whether a given object can be read and written * according to the permissions of the current state MPU. * * The macro shall always evaluate to zero if called from an unprivileged mode. * * @param p_obj Pointer to the given object * for which the read and write ability is requested * * @pre Object is allocated in a single MPU (and/or SAU/IDAU) region. * * @return p_obj if object is Read and Writable, NULL otherwise. */ #define ARM_CMSE_OBJECT_READWRITE_OK(p_obj) \ cmse_check_pointed_object(p_obj, CMSE_MPU_READWRITE) /** * @brief Read and Write accessibility of an object (nPRIV mode) * * Evaluates whether a given object can be read and written according * to the permissions of the current state MPU (unprivileged read/write). * * The macro shall always evaluate to zero if called from an unprivileged mode. * * @param p_obj Pointer to the given object * for which the read and write ability is requested * * @pre Object is allocated in a single MPU (and/or SAU/IDAU) region. * * @return p_obj if object is Read and Writable, NULL otherwise. */ #define ARM_CMSE_OBJECT_UNPRIV_READWRITE_OK(p_obj) \ cmse_check_pointed_object(p_obj, CMSE_MPU_UNPRIV | CMSE_MPU_READWRITE) #if defined(CONFIG_ARM_SECURE_FIRMWARE) /** * @brief Get the MPU (Non-Secure) region number of an address * * Return the non-negative MPU (Non-Secure) region that the address maps to, * or -EINVAL to indicate that an invalid MPU region was retrieved. * * Note: * Obtained region is valid only if: * - the function is called from Secure state * - the MPU is implemented and enabled * - the given address matches a single, enabled MPU region * * @param addr The address for which the MPU region is requested * * @return a valid MPU region number or -EINVAL */ int arm_cmse_mpu_nonsecure_region_get(uint32_t addr); /** * @brief Get the SAU region number of an address * * Return the non-negative SAU (Non-Secure) region that the address maps to, * or -EINVAL to indicate that an invalid SAU region was retrieved. * * Note: * Obtained region is valid only if: * - the function is called from Secure state * - the SAU is implemented and enabled * - the given address is not exempt from the secure memory attribution * * @param addr The address for which the SAU region is requested * * @return a valid SAU region number or -EINVAL */ int arm_cmse_sau_region_get(uint32_t addr); /** * @brief Get the IDAU region number of an address * * Return the non-negative IDAU (Non-Secure) region that the address maps to, * or -EINVAL to indicate that an invalid IDAU region was retrieved. * * Note: * Obtained region is valid only if: * - the function is called from Secure state * - the IDAU can provide a region number * - the given address is not exempt from the secure memory attribution * * @param addr The address for which the IDAU region is requested * * @return a valid IDAU region number or -EINVAL */ int arm_cmse_idau_region_get(uint32_t addr); /** * @brief Security attribution of an address * * Evaluates whether a specified memory location belongs to a Secure region. * This function shall always return zero if executed from Non-Secure state. * * @param addr The address for which the security attribution is requested * * @return 1 if address is Secure, 0 otherwise. */ int arm_cmse_addr_is_secure(uint32_t addr); /** * @brief Non-Secure Read accessibility of an address * * Evaluates whether a specified memory location can be read from Non-Secure * state according to the permissions of the Non-Secure state MPU and the * specified operation mode. * * This function shall always return zero: * - if executed from Non-Secure state * - if the address matches multiple MPU regions. * * @param addr The address for which the readability is requested * @param force_npriv Instruct to return the readability of the address * for unprivileged access, regardless of whether the current * mode is privileged or unprivileged. * * @return 1 if address is readable from Non-Secure state, 0 otherwise. */ int arm_cmse_addr_nonsecure_read_ok(uint32_t addr, int force_npriv); /** * @brief Non-Secure Read and Write accessibility of an address * * Evaluates whether a specified memory location can be read/written from * Non-Secure state according to the permissions of the Non-Secure state MPU * and the specified operation mode. * * This function shall always return zero: * - if executed from Non-Secure mode, * - if the address matches multiple MPU regions. * * @param addr The address for which the RW ability is requested * @param force_npriv Instruct to return the RW ability of the address * for unprivileged access, regardless of whether the current * mode is privileged or unprivileged. * * @return 1 if address is Read and Writable from Non-Secure state, 0 otherwise */ int arm_cmse_addr_nonsecure_readwrite_ok(uint32_t addr, int force_npriv); /** * @brief Non-Secure Read accessibility of an address range * * Evaluates whether a memory address range, specified by its base address * and size, can be read according to the permissions of the Non-Secure state * MPU and the specified operation mode. * * This function shall always return zero: * - if executed from Non-Secure mode, * - if the address matches multiple MPU (and/or SAU/IDAU) regions. * * @param addr The base address of an address range, * for which the readability is requested * @param size The size of the address range * @param force_npriv Instruct to return the readability of the address range * for unprivileged access, regardless of whether the current * mode is privileged or unprivileged. * * @return 1 if address range is readable, 0 otherwise. */ int arm_cmse_addr_range_nonsecure_read_ok(uint32_t addr, uint32_t size, int force_npriv); /** * @brief Non-Secure Read and Write accessibility of an address range * * Evaluates whether a memory address range, specified by its base address * and size, can be read and written according to the permissions of the * Non-Secure state MPU and the specified operation mode. * * This function shall always return zero: * - if executed from Non-Secure mode, * - if the address matches multiple MPU (and/or SAU/IDAU) regions. * * @param addr The base address of an address range, * for which Read and Write ability is requested * @param size The size of the address range * @param force_npriv Instruct to return the readability of the address range * for unprivileged access, regardless of whether the current * mode is privileged or unprivileged. * * @return 1 if address range is readable, 0 otherwise. */ int arm_cmse_addr_range_nonsecure_readwrite_ok(uint32_t addr, uint32_t size, int force_npriv); /** * @brief Non-Secure Read accessibility of an object * * Evaluates whether a given object can be read according to the * permissions of the Non-Secure state MPU. * * The macro shall always evaluate to zero if called from Non-Secure state. * * @param p_obj Pointer to the given object * for which the readability is requested * * @pre Object is allocated in a single MPU region. * * @return p_obj if object is readable from Non-Secure state, NULL otherwise. */ #define ARM_CMSE_OBJECT_NONSECURE_READ_OK(p_obj) \ cmse_check_pointed_object(p_obj, CMSE_NONSECURE | CMSE_MPU_READ) /** * @brief Non-Secure Read accessibility of an object (nPRIV mode) * * Evaluates whether a given object can be read according to the * permissions of the Non-Secure state MPU (unprivileged read). * * The macro shall always evaluate to zero if called from Non-Secure state. * * @param p_obj Pointer to the given object * for which the readability is requested * * @pre Object is allocated in a single MPU region. * * @return p_obj if object is readable from Non-Secure state, NULL otherwise. */ #define ARM_CMSE_OBJECT_NONSECURE_UNPRIV_READ_OK(p_obj) \ cmse_check_pointed_object(p_obj, \ CMSE_NONSECURE | CMSE_MPU_UNPRIV | CMSE_MPU_READ) /** * @brief Non-Secure Read and Write accessibility of an object * * Evaluates whether a given object can be read and written * according to the permissions of the Non-Secure state MPU. * * The macro shall always evaluate to zero if called from Non-Secure state. * * @param p_obj Pointer to the given object * for which the read and write ability is requested * * @pre Object is allocated in a single MPU region. * * @return p_obj if object is Non-Secure Read and Writable, NULL otherwise. */ #define ARM_CMSE_OBJECT_NONSECURE_READWRITE_OK(p_obj) \ cmse_check_pointed_object(p_obj, CMSE_NONSECURE | CMSE_MPU_READWRITE) /** * @brief Non-Secure Read and Write accessibility of an object (nPRIV mode) * * Evaluates whether a given object can be read and written according * to the permissions of the Non-Secure state MPU (unprivileged read/write). * * The macro shall always evaluate to zero if called from Non-Secure state. * * @param p_obj Pointer to the given object * for which the read and write ability is requested * * @pre Object is allocated in a single MPU region. * * @return p_obj if object is Non-Secure Read and Writable, NULL otherwise. */ #define ARM_CMSE_OBJECT_NON_SECURE_UNPRIV_READWRITE_OK(p_obj) \ cmse_check_pointed_object(p_obj, \ CMSE_NONSECURE | CMSE_MPU_UNPRIV | CMSE_MPU_READWRITE) #endif /* CONFIG_ARM_SECURE_FIRMWARE */ #ifdef __cplusplus } #endif #endif /* _ASMLANGUAGE */ #endif /* ZEPHYR_ARCH_ARM_INCLUDE_AARCH32_CORTEX_M_CMSE_H_ */ ```
/content/code_sandbox/arch/arm/include/cortex_m/cmse.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,622
```objective-c /* * */ /** * @file * @brief Stack helpers for Cortex-M CPUs * * Stack helper functions. */ #ifndef ZEPHYR_ARCH_ARM_INCLUDE_AARCH32_CORTEX_M_STACK_H_ #define ZEPHYR_ARCH_ARM_INCLUDE_AARCH32_CORTEX_M_STACK_H_ #ifdef _ASMLANGUAGE /* nothing */ #else #include <cmsis_core.h> #ifdef __cplusplus extern "C" { #endif K_KERNEL_STACK_ARRAY_DECLARE(z_interrupt_stacks, CONFIG_MP_MAX_NUM_CPUS, CONFIG_ISR_STACK_SIZE); /** * * @brief Setup interrupt stack * * On Cortex-M, the interrupt stack is registered in the MSP (main stack * pointer) register, and switched to automatically when taking an exception. * */ static ALWAYS_INLINE void z_arm_interrupt_stack_setup(void) { uint32_t msp = (uint32_t)(K_KERNEL_STACK_BUFFER(z_interrupt_stacks[0])) + K_KERNEL_STACK_SIZEOF(z_interrupt_stacks[0]); __set_MSP(msp); #if defined(CONFIG_BUILTIN_STACK_GUARD) #if defined(CONFIG_CPU_CORTEX_M_HAS_SPLIM) __set_MSPLIM((uint32_t)z_interrupt_stacks[0]); #else #error "Built-in MSP limit checks not supported by HW" #endif #endif /* CONFIG_BUILTIN_STACK_GUARD */ #if defined(CONFIG_STACK_ALIGN_DOUBLE_WORD) /* Enforce double-word stack alignment on exception entry * for Cortex-M3 and Cortex-M4 (ARMv7-M) MCUs. For the rest * of ARM Cortex-M processors this setting is enforced by * default and it is not configurable. */ #if defined(CONFIG_CPU_CORTEX_M3) || defined(CONFIG_CPU_CORTEX_M4) SCB->CCR |= SCB_CCR_STKALIGN_Msk; #endif #endif /* CONFIG_STACK_ALIGN_DOUBLE_WORD */ } #ifdef __cplusplus } #endif #endif /* _ASMLANGUAGE */ #endif /* ZEPHYR_ARCH_ARM_INCLUDE_AARCH32_CORTEX_M_STACK_H_ */ ```
/content/code_sandbox/arch/arm/include/cortex_m/stack.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
419
```objective-c /* * */ /** * @file * @brief DWT utility functions for Cortex-M CPUs * */ #ifndef ZEPHYR_ARCH_ARM_INCLUDE_AARCH32_CORTEX_M_DWT_H_ #define ZEPHYR_ARCH_ARM_INCLUDE_AARCH32_CORTEX_M_DWT_H_ #ifdef _ASMLANGUAGE /* nothing */ #else #include <cmsis_core.h> #include <zephyr/sys/__assert.h> #ifdef __cplusplus extern "C" { #endif #if defined(CONFIG_CORTEX_M_DWT) /* Define DWT LSR masks which are currently not defined by the CMSIS V5.1.2. * (LSR register is defined but not its bitfields). * Reuse ITM LSR mask as it is the same offset than DWT LSR one. */ #if !defined DWT_LSR_Present_Msk #define DWT_LSR_Present_Msk ITM_LSR_Present_Msk #endif #if !defined DWT_LSR_Access_Msk #define DWT_LSR_Access_Msk ITM_LSR_Access_Msk #endif static inline void dwt_access(bool ena) { #if defined(CONFIG_CPU_CORTEX_M7) /* * In case of Cortex M7, we need to check the optional presence of * Lock Access Register (LAR) which is indicated in Lock Status * Register (LSR). When present, a special access token must be written * to unlock DWT registers. */ uint32_t lsr = DWT->LSR; if ((lsr & DWT_LSR_Present_Msk) != 0) { if (ena) { if ((lsr & DWT_LSR_Access_Msk) != 0) { /* Access is locked. unlock it */ DWT->LAR = 0xC5ACCE55; } } else { if ((lsr & DWT_LSR_Access_Msk) == 0) { /* Access is unlocked. Lock it */ DWT->LAR = 0; } } } #else /* CONFIG_CPU_CORTEX_M7 */ ARG_UNUSED(ena); #endif /* CONFIG_CPU_CORTEX_M7 */ } /** * @brief Enable DWT * * This routine enables the DWT unit. * * @return 0 */ static inline int z_arm_dwt_init(void) { /* Enable tracing */ CoreDebug->DEMCR |= CoreDebug_DEMCR_TRCENA_Msk; /* Unlock DWT access if any */ dwt_access(true); return 0; } /** * @brief Initialize and Enable the DWT cycle counter * * This routine enables the cycle counter and initializes its value to zero. * * @return 0 */ static inline int z_arm_dwt_init_cycle_counter(void) { /* Clear and enable the cycle counter */ DWT->CYCCNT = 0; DWT->CTRL |= DWT_CTRL_CYCCNTENA_Msk; /* Assert that the cycle counter is indeed implemented. * The field is called NOCYCCNT. So 1 means there is no cycle counter. */ __ASSERT((DWT->CTRL & DWT_CTRL_NOCYCCNT_Msk) == 0, "DWT implements no cycle counter. " "Cannot be used for cycle counting\n"); return 0; } /** * @brief Return the current value of the cycle counter * * This routine returns the current value of the DWT Cycle Counter (DWT.CYCCNT) * * @return the cycle counter value */ static inline uint32_t z_arm_dwt_get_cycles(void) { return DWT->CYCCNT; } /** * @brief Reset and start the DWT cycle counter * * This routine starts the cycle counter and resets its value to zero. */ static inline void z_arm_dwt_cycle_count_start(void) { DWT->CYCCNT = 0; DWT->CTRL |= DWT_CTRL_CYCCNTENA_Msk; } /** * @brief Enable the debug monitor handler * * This routine enables the DebugMonitor handler to service * data watchpoint events coming from DWT. The routine sets * the DebugMonitor exception priority to highest possible. */ static inline void z_arm_dwt_enable_debug_monitor(void) { /* * In case the CPU is left in Debug mode, the behavior will be * unpredictable if the DebugMonitor exception is triggered. We * assert that the CPU is in normal mode. */ __ASSERT((CoreDebug->DHCSR & CoreDebug_DHCSR_C_DEBUGEN_Msk) == 0, "Cannot enable DBM when CPU is in Debug mode\n"); #if defined(CONFIG_ARMV8_M_SE) && !defined(CONFIG_ARM_NONSECURE_FIRMWARE) /* * By design, the DebugMonitor exception is only employed * for null-pointer dereferencing detection, and enabling * that feature is not supported in Non-Secure builds. So * when enabling the DebugMonitor exception, assert that * it is not targeting the Non Secure domain. */ __ASSERT((CoreDebug->DEMCR & DCB_DEMCR_SDME_Msk) != 0, "DebugMonitor targets Non-Secure\n"); #endif /* The DebugMonitor handler priority is set already * to the highest value (_EXC_FAULT_PRIO) during * system initialization. */ /* Enable debug monitor exception triggered on debug events */ CoreDebug->DEMCR |= CoreDebug_DEMCR_MON_EN_Msk; } #endif /* CONFIG_CORTEX_M_DWT */ #ifdef __cplusplus } #endif #endif /* _ASMLANGUAGE */ #endif /* ZEPHYR_ARCH_ARM_INCLUDE_AARCH32_CORTEX_M_DWT_H_ */ ```
/content/code_sandbox/arch/arm/include/cortex_m/dwt.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,213
```objective-c /* * */ /** * @file * @brief Private kernel definitions (ARM) * * This file contains private kernel function definitions and various * other definitions for the 32-bit ARM Cortex-A/R processor architecture * family. * * This file is also included by assembly language files which must #define * _ASMLANGUAGE before including this header file. Note that kernel * assembly source files obtains structure offset values via "absolute symbols" * in the offsets.o module. */ #ifndef ZEPHYR_ARCH_ARM_INCLUDE_CORTEX_A_R_KERNEL_ARCH_FUNC_H_ #define ZEPHYR_ARCH_ARM_INCLUDE_CORTEX_A_R_KERNEL_ARCH_FUNC_H_ #ifdef __cplusplus extern "C" { #endif #ifndef _ASMLANGUAGE static ALWAYS_INLINE void arch_kernel_init(void) { } #ifndef CONFIG_USE_SWITCH static ALWAYS_INLINE void arch_thread_return_value_set(struct k_thread *thread, unsigned int value) { thread->arch.swap_return_value = value; } #else static ALWAYS_INLINE void arch_switch(void *switch_to, void **switched_from) { extern void z_arm_context_switch(struct k_thread *new, struct k_thread *old); struct k_thread *new = switch_to; struct k_thread *old = CONTAINER_OF(switched_from, struct k_thread, switch_handle); z_arm_context_switch(new, old); } #endif extern FUNC_NORETURN void z_arm_userspace_enter(k_thread_entry_t user_entry, void *p1, void *p2, void *p3, uint32_t stack_end, uint32_t stack_start); extern void z_arm_fatal_error(unsigned int reason, const struct arch_esf *esf); #endif /* _ASMLANGUAGE */ #ifdef __cplusplus } #endif #endif /* ZEPHYR_ARCH_ARM_INCLUDE_CORTEX_A_R_KERNEL_ARCH_FUNC_H_ */ ```
/content/code_sandbox/arch/arm/include/cortex_a_r/kernel_arch_func.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
380
```objective-c /* * */ /** * @file * @brief Management for Tightly Coupled Memory * */ #ifndef ZEPHYR_ARCH_ARM_INCLUDE_AARCH32_CORTEX_A_R_TCM_H_ #define ZEPHYR_ARCH_ARM_INCLUDE_AARCH32_CORTEX_A_R_TCM_H_ #ifdef _ASMLANGUAGE /* nothing */ #else #ifdef __cplusplus extern "C" { #endif /** * * @brief Disable ECC on Tightly Coupled Memory Banks * * Notes: * * This function shall only be called in Privileged mode. * */ void z_arm_tcm_disable_ecc(void); #ifdef __cplusplus } #endif #endif /* _ASMLANGUAGE */ #endif /* ZEPHYR_ARCH_ARM_INCLUDE_AARCH32_CORTEX_A_R_TCM_H_ */ ```
/content/code_sandbox/arch/arm/include/cortex_a_r/tcm.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
161
```objective-c /* * */ /** * @file * @brief Exception/interrupt context helpers for Cortex-A and Cortex-R CPUs * * Exception/interrupt context helpers. */ #ifndef ZEPHYR_ARCH_ARM_INCLUDE_CORTEX_A_R_EXCEPTION_H_ #define ZEPHYR_ARCH_ARM_INCLUDE_CORTEX_A_R_EXCEPTION_H_ #include <zephyr/arch/cpu.h> #ifdef _ASMLANGUAGE /* nothing */ #else #include <zephyr/irq_offload.h> #ifdef __cplusplus extern "C" { #endif #ifdef CONFIG_IRQ_OFFLOAD extern volatile irq_offload_routine_t offload_routine; #endif /* Check the CPSR mode bits to see if we are in IRQ or FIQ mode */ static ALWAYS_INLINE bool arch_is_in_isr(void) { return (arch_curr_cpu()->nested != 0U); } static ALWAYS_INLINE bool arch_is_in_nested_exception(const struct arch_esf *esf) { return (arch_curr_cpu()->arch.exc_depth > 1U) ? (true) : (false); } #if defined(CONFIG_USERSPACE) /* * This function is used by privileged code to determine if the thread * associated with the stack frame is in user mode. */ static ALWAYS_INLINE bool z_arm_preempted_thread_in_user_mode(const struct arch_esf *esf) { return ((esf->basic.xpsr & CPSR_M_Msk) == CPSR_M_USR); } #endif #ifndef CONFIG_USE_SWITCH extern void z_arm_cortex_r_svc(void); #endif #ifdef __cplusplus } #endif #endif /* _ASMLANGUAGE */ #endif /* ZEPHYR_ARCH_ARM_INCLUDE_CORTEX_A_R_EXCEPTION_H_ */ ```
/content/code_sandbox/arch/arm/include/cortex_a_r/exception.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
341
```objective-c /* * */ /** * @file * @brief Stack helpers for Cortex-A and Cortex-R CPUs * * Stack helper functions. */ #ifndef ZEPHYR_ARCH_ARM_INCLUDE_AARCH32_CORTEX_A_R_STACK_H_ #define ZEPHYR_ARCH_ARM_INCLUDE_AARCH32_CORTEX_A_R_STACK_H_ #ifdef __cplusplus extern "C" { #endif #ifdef _ASMLANGUAGE /* nothing */ #else extern void z_arm_init_stacks(void); #endif /* _ASMLANGUAGE */ #ifdef __cplusplus } #endif #endif /* ZEPHYR_ARCH_ARM_INCLUDE_AARCH32_CORTEX_A_R_STACK_H_ */ ```
/content/code_sandbox/arch/arm/include/cortex_a_r/stack.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
130
```unknown menu "Nios II Options" depends on NIOS2 config ARCH string default "nios2" menu "Nios II Gen 2 Processor Options" config CPU_NIOS2_GEN2 bool default y select BUILD_OUTPUT_HEX select ARCH_HAS_EXTRA_EXCEPTION_INFO help This option signifies the use of a Nios II Gen 2 CPU endmenu menu "Nios II Family Options" config GEN_ISR_TABLES default y config GEN_IRQ_VECTOR_TABLE default n config NUM_IRQS int default 32 config HAS_MUL_INSTRUCTION bool config HAS_DIV_INSTRUCTION bool config HAS_MULX_INSTRUCTION bool config INCLUDE_RESET_VECTOR bool "Include Reset vector" default y help Include the reset vector stub, which enables instruction/data caches and then jumps to __start. This code is typically located at the very beginning of flash memory. You may need to omit this if using the nios2-download tool since it refuses to load data anywhere other than RAM. config EXTRA_EXCEPTION_INFO bool "Extra exception debug information" help Have exceptions print additional useful debugging information in human-readable form, at the expense of code size. For example, the cause code for an exception will be supplemented by a string describing what that cause code means. choice prompt "Global Pointer options" default GP_GLOBAL config GP_NONE bool "No global pointer" help Do not use global pointer relative offsets at all config GP_LOCAL bool "Local data global pointer references" help Use global pointer relative offsets for small globals declared in the same C file as the code that uses it. config GP_GLOBAL bool "Global data global pointer references" help Use global pointer relative offsets for small globals declared anywhere in the executable. Note that if any small globals that are put in alternate sections they must be declared in headers with proper __attribute__((section)) or the linker will error out. config GP_ALL_DATA bool "All data global pointer references" help Use GP relative access for all data in the program, not just small data. Use this if your board has 64K or less of RAM. endchoice endmenu endmenu ```
/content/code_sandbox/arch/nios2/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
488
```c /* * */ #include <zephyr/arch/cpu.h> #include <zephyr/sys/__assert.h> /** * Flush the entire instruction cache and pipeline. * * You will need to call this function if the application writes new program * text to memory, such as a boot copier or runtime synthesis of code. If the * new text was written with instructions that do not bypass cache memories, * this should immediately be followed by an invocation of * z_nios2_dcache_flush_all() so that cached instruction data is committed to * RAM. * * See Chapter 9 of the Nios II Gen 2 Software Developer's Handbook for more * information on cache considerations. */ #if ALT_CPU_ICACHE_SIZE > 0 void z_nios2_icache_flush_all(void) { uint32_t i; for (i = 0U; i < ALT_CPU_ICACHE_SIZE; i += ALT_CPU_ICACHE_LINE_SIZE) { z_nios2_icache_flush(i); } /* Get rid of any stale instructions in the pipeline */ z_nios2_pipeline_flush(); } #endif /** * Flush the entire data cache. * * This will be typically needed after writing new program text to memory * after flushing the instruction cache. * * The Nios II does not support hardware cache coherency for multi-master * or multi-processor systems and software coherency must be implemented * when communicating with shared memory. If support for this is introduced * in Zephyr additional APIs for flushing ranges of the data cache will need * to be implemented. * * See Chapter 9 of the Nios II Gen 2 Software Developer's Handbook for more * information on cache considerations. */ #if ALT_CPU_DCACHE_SIZE > 0 void z_nios2_dcache_flush_all(void) { uint32_t i; for (i = 0U; i < ALT_CPU_DCACHE_SIZE; i += ALT_CPU_DCACHE_LINE_SIZE) { z_nios2_dcache_flush(i); } } #endif /* * z_nios2_dcache_flush_no_writeback() is called to flush the data cache for a * memory region of length "len" bytes, starting at address "start". * * Any dirty lines in the data cache are NOT written back to memory. * Make sure you really want this behavior. If you aren't 100% sure, * use the z_nios2_dcache_flush() routine instead. */ #if ALT_CPU_DCACHE_SIZE > 0 void z_nios2_dcache_flush_no_writeback(void *start, uint32_t len) { uint8_t *i; uint8_t *end = ((char *) start) + len; for (i = start; i < end; i += ALT_CPU_DCACHE_LINE_SIZE) { __asm__ volatile ("initda (%0)" :: "r" (i)); } /* * For an unaligned flush request, we've got one more line left. * Note that this is dependent on ALT_CPU_DCACHE_LINE_SIZE to be a * multiple of 2 (which it always is). */ if (((uint32_t) start) & (ALT_CPU_DCACHE_LINE_SIZE - 1)) { __asm__ volatile ("initda (%0)" :: "r" (i)); } } #endif ```
/content/code_sandbox/arch/nios2/core/cache.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
704
```c /* * */ #include <zephyr/kernel.h> #include <ksched.h> /* forward declaration to asm function to adjust setup the arguments * to z_thread_entry() since this arch puts the first four arguments * in r4-r7 and not on the stack */ void z_thread_entry_wrapper(k_thread_entry_t, void *, void *, void *); struct init_stack_frame { /* top of the stack / most recently pushed */ /* Used by z_thread_entry_wrapper. pulls these off the stack and * into argument registers before calling z_thread_entry() */ k_thread_entry_t entry_point; void *arg1; void *arg2; void *arg3; /* least recently pushed */ }; void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, char *stack_ptr, k_thread_entry_t entry, void *arg1, void *arg2, void *arg3) { struct init_stack_frame *iframe; /* Initial stack frame data, stored at the base of the stack */ iframe = Z_STACK_PTR_TO_FRAME(struct init_stack_frame, stack_ptr); /* Setup the initial stack frame */ iframe->entry_point = entry; iframe->arg1 = arg1; iframe->arg2 = arg2; iframe->arg3 = arg3; thread->callee_saved.sp = (uint32_t)iframe; thread->callee_saved.ra = (uint32_t)z_thread_entry_wrapper; thread->callee_saved.key = NIOS2_STATUS_PIE_MSK; /* Leave the rest of thread->callee_saved junk */ } ```
/content/code_sandbox/arch/nios2/core/thread.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
331
```c /* * */ #include <zephyr/kernel.h> #include <zephyr/kernel_structs.h> #include <zephyr/irq_offload.h> volatile irq_offload_routine_t _offload_routine; static volatile const void *offload_param; /* Called by _enter_irq if it was passed 0 for ipending. * Just in case the offload routine itself generates an unhandled * exception, clear the offload_routine global before executing. */ void z_irq_do_offload(void) { irq_offload_routine_t tmp; if (!_offload_routine) { return; } tmp = _offload_routine; _offload_routine = NULL; tmp((const void *)offload_param); } void arch_irq_offload(irq_offload_routine_t routine, const void *parameter) { unsigned int key; key = irq_lock(); _offload_routine = routine; offload_param = parameter; __asm__ volatile ("trap"); irq_unlock(key); } ```
/content/code_sandbox/arch/nios2/core/irq_offload.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
214
```unknown /* * */ #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> /* exports */ GTEXT(__start) GTEXT(__reset) /* imports */ GTEXT(z_prep_c) GTEXT(z_interrupt_stacks) /* Allow use of r1/at (the assembler temporary register) in this * code, normally reserved for internal assembler use */ .set noat #if CONFIG_INCLUDE_RESET_VECTOR /* * Reset vector entry point into the system. Placed into special 'reset' * section so that the linker puts this at ALT_CPU_RESET_ADDR defined in * system.h * * This code can be at most 0x20 bytes, since the exception vector for Nios II * is usually configured to be 0x20 past the reset vector. */ SECTION_FUNC(reset, __reset) #if ALT_CPU_ICACHE_SIZE > 0 /* Aside from the instruction cache line associated with the reset * vector, the contents of the cache memories are indeterminate after * reset. To ensure cache coherency after reset, the reset handler * located at the reset vector must immediately initialize the * instruction cache. Next, either the reset handler or a subsequent * routine should proceed to initialize the data cache. * * The cache memory sizes are *always* a power of 2. */ #if ALT_CPU_ICACHE_SIZE > 0x8000 movhi r2, %hi(ALT_CPU_ICACHE_SIZE) #else movui r2, ALT_CPU_ICACHE_SIZE #endif 0: /* If ECC present, need to execute initd for each word address * to ensure ECC parity bits in data RAM get initialized */ #ifdef ALT_CPU_ECC_PRESENT subi r2, r2, 4 #else subi r2, r2, ALT_CPU_ICACHE_LINE_SIZE #endif initi r2 bgt r2, zero, 0b #endif /* ALT_CPU_ICACHE_SIZE > 0 */ /* Done all we need to do here, jump to __text_start */ movhi r1, %hi(__start) ori r1, r1, %lo(__start) jmp r1 #endif /* CONFIG_INCLUDE_RESET_VECTOR */ /* Remainder of asm-land initialization code before we can jump into * the C domain */ SECTION_FUNC(TEXT, __start) /* TODO if shadow register sets enabled, ensure we are in set 0 * GH-1821 */ /* Initialize the data cache if booting from bare metal. If * we're not booting from our reset vector, either by a bootloader * or JTAG, assume caches already initialized. */ #if ALT_CPU_DCACHE_SIZE > 0 && defined(CONFIG_INCLUDE_RESET_VECTOR) /* Per documentation data cache size is always a power of two. */ #if ALT_CPU_DCACHE_SIZE > 0x8000 movhi r2, %hi(ALT_CPU_DCACHE_SIZE) #else movui r2, ALT_CPU_DCACHE_SIZE #endif 0: /* If ECC present, need to execute initd for each word address * to ensure ECC parity bits in data RAM get initialized */ #ifdef ALT_CPU_ECC_PRESENT subi r2, r2, 4 #else subi r2, r2, ALT_CPU_DCACHE_LINE_SIZE #endif initd 0(r2) bgt r2, zero, 0b #endif /* ALT_CPU_DCACHE_SIZE && defined(CONFIG_INCLUDE_RESET_VECTOR) */ #ifdef CONFIG_INIT_STACKS /* Pre-populate all bytes in z_interrupt_stacks with 0xAA * init.c enforces that the z_interrupt_stacks pointer * and CONFIG_ISR_STACK_SIZE are a multiple of ARCH_STACK_PTR_ALIGN (4) */ movhi r1, %hi(z_interrupt_stacks) ori r1, r1, %lo(z_interrupt_stacks) movhi r2, %hi(CONFIG_ISR_STACK_SIZE) ori r2, r2, %lo(CONFIG_ISR_STACK_SIZE) /* Put constant 0xaaaaaaaa in r3 */ movhi r3, 0xaaaa ori r3, r3, 0xaaaa 1: /* Loop through the z_interrupt_stacks treating it as an array of * uint32_t, setting each element to r3 */ stw r3, (r1) subi r2, r2, 4 addi r1, r1, 4 blt r0, r2, 1b #endif /* Set up the initial stack pointer to the interrupt stack, safe * to use this as the CPU boots up with interrupts disabled and we * don't turn them on until much later, when the kernel is on * the main stack */ movhi sp, %hi(z_interrupt_stacks) ori sp, sp, %lo(z_interrupt_stacks) addi sp, sp, CONFIG_ISR_STACK_SIZE #if defined(CONFIG_GP_LOCAL) || defined(CONFIG_GP_GLOBAL) || \ defined(CONFIG_GP_ALL_DATA) /* Initialize global pointer with the linker variable we set */ movhi gp, %hi(_gp) ori gp, gp, %lo(_gp) #endif /* TODO if shadow register sets enabled, interate through them to set * up. Need to clear r0, write gp, set the exception stack pointer * GH-1821 */ /* Jump into C domain. z_prep_c zeroes BSS, copies rw data into RAM, * and then enters z_cstart */ call z_prep_c ```
/content/code_sandbox/arch/nios2/core/crt0.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,201
```c /* * */ /** * @file * @brief Full C support initialization * * * Initialization of full C support: zero the .bss, copy the .data if XIP, * call z_cstart(). * * Stack is available in this module, but not the global data/bss until their * initialization is performed. */ #include <zephyr/types.h> #include <zephyr/toolchain.h> #include <zephyr/linker/linker-defs.h> #include <zephyr/kernel_structs.h> #include <kernel_internal.h> /** * @brief Prepare to and run C code * * This routine prepares for the execution of and runs C code. */ void z_prep_c(void) { z_bss_zero(); z_data_copy(); /* In most XIP scenarios we copy the exception code into RAM, so need * to flush instruction cache. */ #ifdef CONFIG_XIP z_nios2_icache_flush_all(); #if ALT_CPU_ICACHE_SIZE > 0 /* Only need to flush the data cache here if there actually is an * instruction cache, so that the cached instruction data written is * actually committed. */ z_nios2_dcache_flush_all(); #endif #endif z_cstart(); CODE_UNREACHABLE; } ```
/content/code_sandbox/arch/nios2/core/prep_c.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
272
```c /* * */ #include <zephyr/kernel.h> #include <zephyr/kernel_structs.h> #ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_IDLE void arch_cpu_idle(void) { /* Do nothing but unconditionally unlock interrupts and return to the * caller. This CPU does not have any kind of power saving instruction. */ irq_unlock(NIOS2_STATUS_PIE_MSK); } #endif #ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_ATOMIC_IDLE void arch_cpu_atomic_idle(unsigned int key) { /* Do nothing but restore IRQ state. This CPU does not have any * kind of power saving instruction. */ irq_unlock(key); } #endif ```
/content/code_sandbox/arch/nios2/core/cpu_idle.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
136
```c /* * */ #include <zephyr/kernel.h> #include <zephyr/sys_clock.h> #include <zephyr/timing/timing.h> #include "altera_avalon_timer_regs.h" #define NIOS2_SUBTRACT_CLOCK_CYCLES(val) \ ((IORD_ALTERA_AVALON_TIMER_PERIODH(TIMER_0_BASE) << 16 | \ (IORD_ALTERA_AVALON_TIMER_PERIODL(TIMER_0_BASE))) - \ ((uint32_t)val)) #define TIMING_INFO_OS_GET_TIME() \ (NIOS2_SUBTRACT_CLOCK_CYCLES( \ ((uint32_t)IORD_ALTERA_AVALON_TIMER_SNAPH(TIMER_0_BASE) \ << 16) | \ ((uint32_t)IORD_ALTERA_AVALON_TIMER_SNAPL(TIMER_0_BASE)))) void arch_timing_init(void) { } void arch_timing_start(void) { } void arch_timing_stop(void) { } timing_t arch_timing_counter_get(void) { IOWR_ALTERA_AVALON_TIMER_SNAPL(TIMER_0_BASE, 10); return TIMING_INFO_OS_GET_TIME(); } uint64_t arch_timing_cycles_get(volatile timing_t *const start, volatile timing_t *const end) { return (*end - *start); } uint64_t arch_timing_freq_get(void) { return sys_clock_hw_cycles_per_sec(); } uint64_t arch_timing_cycles_to_ns(uint64_t cycles) { return k_cyc_to_ns_floor64(cycles); } uint64_t arch_timing_cycles_to_ns_avg(uint64_t cycles, uint32_t count) { return arch_timing_cycles_to_ns(cycles) / count; } uint32_t arch_timing_freq_get_mhz(void) { return (uint32_t)(arch_timing_freq_get() / 1000000U); } ```
/content/code_sandbox/arch/nios2/core/timing.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
399
```unknown /* * */ #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #include <offsets_short.h> /* exports */ GTEXT(arch_swap) GTEXT(z_thread_entry_wrapper) /* imports */ GTEXT(_k_neg_eagain) /* unsigned int arch_swap(unsigned int key) * * Always called with interrupts locked */ SECTION_FUNC(exception.other, arch_swap) #if defined(CONFIG_INSTRUMENT_THREAD_SWITCHING) /* Need to preserve r4 as it has the function argument. */ addi sp, sp, -12 stw ra, 8(sp) stw fp, 4(sp) stw r4, 0(sp) call z_thread_mark_switched_out ldw r4, 0(sp) ldw fp, 4(sp) ldw ra, 8(sp) addi sp, sp, 12 #endif /* Get a reference to _kernel in r10 */ movhi r10, %hi(_kernel) ori r10, r10, %lo(_kernel) /* Get the pointer to kernel->current */ ldw r11, _kernel_offset_to_current(r10) /* Store all the callee saved registers. We either got here via * an exception or from a cooperative invocation of arch_swap() from C * domain, so all the caller-saved registers have already been * saved by the exception asm or the calling C code already. */ stw r16, _thread_offset_to_r16(r11) stw r17, _thread_offset_to_r17(r11) stw r18, _thread_offset_to_r18(r11) stw r19, _thread_offset_to_r19(r11) stw r20, _thread_offset_to_r20(r11) stw r21, _thread_offset_to_r21(r11) stw r22, _thread_offset_to_r22(r11) stw r23, _thread_offset_to_r23(r11) stw r28, _thread_offset_to_r28(r11) stw ra, _thread_offset_to_ra(r11) stw sp, _thread_offset_to_sp(r11) /* r4 has the 'key' argument which is the result of irq_lock() * before this was called */ stw r4, _thread_offset_to_key(r11) /* Populate default return value */ movhi r5, %hi(_k_neg_eagain) ori r5, r5, %lo(_k_neg_eagain) ldw r4, (r5) stw r4, _thread_offset_to_retval(r11) /* get cached thread to run */ ldw r2, _kernel_offset_to_ready_q_cache(r10) /* At this point r2 points to the next thread to be swapped in */ /* the thread to be swapped in is now the current thread */ stw r2, _kernel_offset_to_current(r10) /* Restore callee-saved registers and switch to the incoming * thread's stack */ ldw r16, _thread_offset_to_r16(r2) ldw r17, _thread_offset_to_r17(r2) ldw r18, _thread_offset_to_r18(r2) ldw r19, _thread_offset_to_r19(r2) ldw r20, _thread_offset_to_r20(r2) ldw r21, _thread_offset_to_r21(r2) ldw r22, _thread_offset_to_r22(r2) ldw r23, _thread_offset_to_r23(r2) ldw r28, _thread_offset_to_r28(r2) ldw ra, _thread_offset_to_ra(r2) ldw sp, _thread_offset_to_sp(r2) /* We need to irq_unlock(current->coopReg.key); * key was supplied as argument to arch_swap(). Fetch it. */ ldw r3, _thread_offset_to_key(r2) /* * Load return value into r2 (return value register). -EAGAIN unless * someone previously called arch_thread_return_value_set(). Do this * before we potentially unlock interrupts. */ ldw r2, _thread_offset_to_retval(r2) /* Now do irq_unlock(current->coopReg.key) */ #if (ALT_CPU_NUM_OF_SHADOW_REG_SETS > 0) || \ (defined ALT_CPU_EIC_PRESENT) || \ (defined ALT_CPU_MMU_PRESENT) || \ (defined ALT_CPU_MPU_PRESENT) andi r3, r3, NIOS2_STATUS_PIE_MSK beq r3, zero, no_unlock rdctl r3, status ori r3, r3, NIOS2_STATUS_PIE_MSK wrctl status, r3 no_unlock: #else wrctl status, r3 #endif #if defined(CONFIG_INSTRUMENT_THREAD_SWITCHING) /* Also need to preserve r2, r3 as return values */ addi sp, sp, -20 stw ra, 16(sp) stw fp, 12(sp) stw r4, 8(sp) stw r3, 4(sp) stw r2, 0(sp) call z_thread_mark_switched_in ldw r2, 0(sp) ldw r3, 4(sp) ldw r4, 8(sp) ldw fp, 12(sp) ldw ra, 16(sp) addi sp, sp, 20 #endif ret /* void z_thread_entry_wrapper(void) */ SECTION_FUNC(TEXT, z_thread_entry_wrapper) /* This all corresponds to struct init_stack_frame defined in * thread.c. We need to take this stuff off the stack and put * it in the appropriate registers */ /* Can't return from here, just put NULL in ra */ movi ra, 0 /* Calling convention has first 4 arguments in registers r4-r7. */ ldw r4, 0(sp) ldw r5, 4(sp) ldw r6, 8(sp) ldw r7, 12(sp) /* pop all the stuff that we just loaded into registers */ addi sp, sp, 16 call z_thread_entry ```
/content/code_sandbox/arch/nios2/core/swap.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,373
```unknown /* * */ #include <zephyr/toolchain.h> GTEXT(__start) ```
/content/code_sandbox/arch/nios2/core/reset.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
18
```c /* * */ #include <zephyr/kernel.h> #include <zephyr/arch/cpu.h> #include <zephyr/kernel_structs.h> #include <inttypes.h> #include <zephyr/logging/log.h> LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); FUNC_NORETURN void z_nios2_fatal_error(unsigned int reason, const struct arch_esf *esf) { #if CONFIG_EXCEPTION_DEBUG if (esf != NULL) { /* Subtract 4 from EA since we added 4 earlier so that the * faulting instruction isn't retried. * * TODO: Only caller-saved registers get saved upon exception * entry. We may want to introduce a config option to save and * dump all registers, at the expense of some stack space. */ LOG_ERR("Faulting instruction: 0x%08x", esf->instr - 4); LOG_ERR(" r1: 0x%08x r2: 0x%08x r3: 0x%08x r4: 0x%08x", esf->r1, esf->r2, esf->r3, esf->r4); LOG_ERR(" r5: 0x%08x r6: 0x%08x r7: 0x%08x r8: 0x%08x", esf->r5, esf->r6, esf->r7, esf->r8); LOG_ERR(" r9: 0x%08x r10: 0x%08x r11: 0x%08x r12: 0x%08x", esf->r9, esf->r10, esf->r11, esf->r12); LOG_ERR(" r13: 0x%08x r14: 0x%08x r15: 0x%08x ra: 0x%08x", esf->r13, esf->r14, esf->r15, esf->ra); LOG_ERR("estatus: %08x", esf->estatus); } #endif /* CONFIG_EXCEPTION_DEBUG */ z_fatal_error(reason, esf); CODE_UNREACHABLE; } #if defined(CONFIG_EXTRA_EXCEPTION_INFO) && \ (defined(CONFIG_PRINTK) || defined(CONFIG_LOG)) \ && defined(ALT_CPU_HAS_EXTRA_EXCEPTION_INFO) static char *cause_str(uint32_t cause_code) { switch (cause_code) { case 0: return "reset"; case 1: return "processor-only reset request"; case 2: return "interrupt"; case 3: return "trap"; case 4: return "unimplemented instruction"; case 5: return "illegal instruction"; case 6: return "misaligned data address"; case 7: return "misaligned destination address"; case 8: return "division error"; case 9: return "supervisor-only instruction address"; case 10: return "supervisor-only instruction"; case 11: return "supervisor-only data address"; case 12: return "TLB miss"; case 13: return "TLB permission violation (execute)"; case 14: return "TLB permission violation (read)"; case 15: return "TLB permission violation (write)"; case 16: return "MPU region violation (instruction)"; case 17: return "MPU region violation (data)"; case 18: return "ECC TLB error"; case 19: return "ECC fetch error (instruction)"; case 20: return "ECC register file error"; case 21: return "ECC data error"; case 22: return "ECC data cache writeback error"; case 23: return "bus instruction fetch error"; case 24: return "bus data region violation"; default: return "unknown"; } } #endif FUNC_NORETURN void _Fault(const struct arch_esf *esf) { #if defined(CONFIG_PRINTK) || defined(CONFIG_LOG) /* Unfortunately, completely unavailable on Nios II/e cores */ #ifdef ALT_CPU_HAS_EXTRA_EXCEPTION_INFO uint32_t exc_reg, badaddr_reg, eccftl; enum nios2_exception_cause cause; exc_reg = z_nios2_creg_read(NIOS2_CR_EXCEPTION); /* Bit 31 indicates potentially fatal ECC error */ eccftl = (exc_reg & NIOS2_EXCEPTION_REG_ECCFTL_MASK) != 0U; /* Bits 2-6 contain the cause code */ cause = (exc_reg & NIOS2_EXCEPTION_REG_CAUSE_MASK) >> NIOS2_EXCEPTION_REG_CAUSE_OFST; LOG_ERR("Exception cause: %d ECCFTL: 0x%x", cause, eccftl); #if CONFIG_EXTRA_EXCEPTION_INFO LOG_ERR("reason: %s", cause_str(cause)); #endif if (BIT(cause) & NIOS2_BADADDR_CAUSE_MASK) { badaddr_reg = z_nios2_creg_read(NIOS2_CR_BADADDR); LOG_ERR("Badaddr: 0x%x", badaddr_reg); } #endif /* ALT_CPU_HAS_EXTRA_EXCEPTION_INFO */ #endif /* CONFIG_PRINTK || CONFIG_LOG */ z_nios2_fatal_error(K_ERR_CPU_EXCEPTION, esf); } #ifdef ALT_CPU_HAS_DEBUG_STUB FUNC_NORETURN void arch_system_halt(unsigned int reason) { ARG_UNUSED(reason); z_nios2_break(); CODE_UNREACHABLE; } #endif ```
/content/code_sandbox/arch/nios2/core/fatal.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,213
```unknown /* * */ #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #include <offsets_short.h> /* exports */ GTEXT(_exception) /* import */ GTEXT(_Fault) GTEXT(arch_swap) #ifdef CONFIG_IRQ_OFFLOAD GTEXT(z_irq_do_offload) GTEXT(_offload_routine) #endif /* Allows use of r1/at register, otherwise reserved for assembler use */ .set noat /* Placed into special 'exception' section so that the linker can put this code * at ALT_CPU_EXCEPTION_ADDR defined in system.h * * This is the common entry point for processor exceptions and interrupts from * the Internal Interrupt Controller (IIC). * * If the External (EIC) controller is in use, then we will never get here on * behalf of an interrupt, instead the EIC driver will have set up a vector * table and the processor will jump directly into the appropriate table * entry. */ SECTION_FUNC(exception.entry, _exception) /* Reserve thread stack space for saving context */ subi sp, sp, __struct_arch_esf_SIZEOF /* Preserve all caller-saved registers onto the thread's stack */ stw ra, __struct_arch_esf_ra_OFFSET(sp) stw r1, __struct_arch_esf_r1_OFFSET(sp) stw r2, __struct_arch_esf_r2_OFFSET(sp) stw r3, __struct_arch_esf_r3_OFFSET(sp) stw r4, __struct_arch_esf_r4_OFFSET(sp) stw r5, __struct_arch_esf_r5_OFFSET(sp) stw r6, __struct_arch_esf_r6_OFFSET(sp) stw r7, __struct_arch_esf_r7_OFFSET(sp) stw r8, __struct_arch_esf_r8_OFFSET(sp) stw r9, __struct_arch_esf_r9_OFFSET(sp) stw r10, __struct_arch_esf_r10_OFFSET(sp) stw r11, __struct_arch_esf_r11_OFFSET(sp) stw r12, __struct_arch_esf_r12_OFFSET(sp) stw r13, __struct_arch_esf_r13_OFFSET(sp) stw r14, __struct_arch_esf_r14_OFFSET(sp) stw r15, __struct_arch_esf_r15_OFFSET(sp) /* Store value of estatus control register */ rdctl et, estatus stw et, __struct_arch_esf_estatus_OFFSET(sp) /* ea-4 is the address of the instruction when the exception happened, * put this in the stack frame as well */ addi r15, ea, -4 stw r15, __struct_arch_esf_instr_OFFSET(sp) /* Figure out whether we are here because of an interrupt or an * exception. If an interrupt, switch stacks and enter IRQ handling * code. If an exception, remain on current stack and enter exception * handing code. From the CPU manual, ipending must be nonzero and * estatis.PIE must be enabled for this to be considered an interrupt. * * Stick ipending in r4 since it will be an arg for _enter_irq */ rdctl r4, ipending beq r4, zero, not_interrupt /* We stashed estatus in et earlier */ andi r15, et, 1 beq r15, zero, not_interrupt is_interrupt: /* If we get here, this is an interrupt */ /* Grab a reference to _kernel in r10 so we can determine the * current irq stack pointer */ movhi r10, %hi(_kernel) ori r10, r10, %lo(_kernel) /* Stash a copy of thread's sp in r12 so that we can put it on the IRQ * stack */ mov r12, sp /* Switch to interrupt stack */ ldw sp, _kernel_offset_to_irq_stack(r10) /* Store thread stack pointer onto IRQ stack */ addi sp, sp, -4 stw r12, 0(sp) on_irq_stack: /* Enter C interrupt handling code. Value of ipending will be the * function parameter since we put it in r4 */ call _enter_irq /* Interrupt handler finished and the interrupt should be serviced * now, the appropriate bits in ipending should be cleared */ /* Get a reference to _kernel again in r10 */ movhi r10, %hi(_kernel) ori r10, r10, %lo(_kernel) #ifdef CONFIG_PREEMPT_ENABLED ldw r11, _kernel_offset_to_current(r10) /* Determine whether the exception of the ISR requires context * switch */ /* Call into the kernel to see if a scheduling decision is necessary */ ldw r2, _kernel_offset_to_ready_q_cache(r10) beq r2, r11, no_reschedule /* * A context reschedule is required: keep the volatile registers of * the interrupted thread on the context's stack. Utilize * the existing arch_swap() primitive to save the remaining * thread's registers (including floating point) and perform * a switch to the new thread. */ /* We put the thread stack pointer on top of the IRQ stack before * we switched stacks. Restore it to go back to thread stack */ ldw sp, 0(sp) /* Argument to Swap() is estatus since that's the state of the * status register before the exception happened. When coming * out of the context switch we need this info to restore * IRQ lock state. We put this value in et earlier. */ mov r4, et call arch_swap jmpi _exception_exit #else jmpi no_reschedule #endif /* CONFIG_PREEMPT_ENABLED */ not_interrupt: /* Since this wasn't an interrupt we're not going to restart the * faulting instruction. * * We earlier put ea - 4 in the stack frame, replace it with just ea */ stw ea, __struct_arch_esf_instr_OFFSET(sp) #ifdef CONFIG_IRQ_OFFLOAD /* Check the contents of _offload_routine. If non-NULL, jump into * the interrupt code anyway. */ movhi r10, %hi(_offload_routine) ori r10, r10, %lo(_offload_routine) ldw r11, (r10) bne r11, zero, is_interrupt #endif _exception_enter_fault: /* If we get here, the exception wasn't in interrupt or an * invocation of irq_oflload(). Let _Fault() handle it in * C domain */ mov r4, sp call _Fault jmpi _exception_exit no_reschedule: /* We put the thread stack pointer on top of the IRQ stack before * we switched stacks. Restore it to go back to thread stack */ ldw sp, 0(sp) /* Fall through */ _exception_exit: /* We are on the thread stack. Restore all saved registers * and return to the interrupted context */ /* Return address from the exception */ ldw ea, __struct_arch_esf_instr_OFFSET(sp) /* Restore estatus * XXX is this right??? */ ldw r5, __struct_arch_esf_estatus_OFFSET(sp) wrctl estatus, r5 /* Restore caller-saved registers */ ldw ra, __struct_arch_esf_ra_OFFSET(sp) ldw r1, __struct_arch_esf_r1_OFFSET(sp) ldw r2, __struct_arch_esf_r2_OFFSET(sp) ldw r3, __struct_arch_esf_r3_OFFSET(sp) ldw r4, __struct_arch_esf_r4_OFFSET(sp) ldw r5, __struct_arch_esf_r5_OFFSET(sp) ldw r6, __struct_arch_esf_r6_OFFSET(sp) ldw r7, __struct_arch_esf_r7_OFFSET(sp) ldw r8, __struct_arch_esf_r8_OFFSET(sp) ldw r9, __struct_arch_esf_r9_OFFSET(sp) ldw r10, __struct_arch_esf_r10_OFFSET(sp) ldw r11, __struct_arch_esf_r11_OFFSET(sp) ldw r12, __struct_arch_esf_r12_OFFSET(sp) ldw r13, __struct_arch_esf_r13_OFFSET(sp) ldw r14, __struct_arch_esf_r14_OFFSET(sp) ldw r15, __struct_arch_esf_r15_OFFSET(sp) /* Put the stack pointer back where it was when we entered * exception state */ addi sp, sp, __struct_arch_esf_SIZEOF /* All done, copy estatus into status and transfer to ea */ eret ```
/content/code_sandbox/arch/nios2/core/exception.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,956
```c /* * */ /** * @file * @brief Nios II C-domain interrupt management code for use with Internal * Interrupt Controller (IIC) */ #include <zephyr/kernel.h> #include <zephyr/kernel_structs.h> #include <zephyr/arch/cpu.h> #include <zephyr/irq.h> #include <zephyr/sw_isr_table.h> #include <ksched.h> #include <kswap.h> #include <zephyr/tracing/tracing.h> #include <zephyr/logging/log.h> LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); FUNC_NORETURN void z_irq_spurious(const void *unused) { ARG_UNUSED(unused); LOG_ERR("Spurious interrupt detected! ipending: %x", z_nios2_creg_read(NIOS2_CR_IPENDING)); z_nios2_fatal_error(K_ERR_SPURIOUS_IRQ, NULL); } void arch_irq_enable(unsigned int irq) { uint32_t ienable; unsigned int key; key = irq_lock(); ienable = z_nios2_creg_read(NIOS2_CR_IENABLE); ienable |= BIT(irq); z_nios2_creg_write(NIOS2_CR_IENABLE, ienable); irq_unlock(key); }; void arch_irq_disable(unsigned int irq) { uint32_t ienable; unsigned int key; key = irq_lock(); ienable = z_nios2_creg_read(NIOS2_CR_IENABLE); ienable &= ~BIT(irq); z_nios2_creg_write(NIOS2_CR_IENABLE, ienable); irq_unlock(key); }; int arch_irq_is_enabled(unsigned int irq) { uint32_t ienable; ienable = z_nios2_creg_read(NIOS2_CR_IENABLE); return ienable & BIT(irq); } /** * @brief Interrupt demux function * * Given a bitfield of pending interrupts, execute the appropriate handler * * @param ipending Bitfield of interrupts */ void _enter_irq(uint32_t ipending) { int index; _kernel.cpus[0].nested++; #ifdef CONFIG_IRQ_OFFLOAD z_irq_do_offload(); #endif while (ipending) { struct _isr_table_entry *ite; #ifdef CONFIG_TRACING_ISR sys_trace_isr_enter(); #endif index = find_lsb_set(ipending) - 1; ipending &= ~BIT(index); ite = &_sw_isr_table[index]; ite->isr(ite->arg); #ifdef CONFIG_TRACING_ISR sys_trace_isr_exit(); #endif } _kernel.cpus[0].nested--; #ifdef CONFIG_STACK_SENTINEL z_check_stack_sentinel(); #endif } #ifdef CONFIG_DYNAMIC_INTERRUPTS int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, void (*routine)(const void *parameter), const void *parameter, uint32_t flags) { ARG_UNUSED(flags); ARG_UNUSED(priority); z_isr_install(irq, routine, parameter); return irq; } #endif /* CONFIG_DYNAMIC_INTERRUPTS */ ```
/content/code_sandbox/arch/nios2/core/irq_manage.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
636
```c /* * */ /** * @file * @brief Nios II kernel structure member offset definition file * * This module is responsible for the generation of the absolute symbols whose * value represents the member offsets for various Nios II kernel * structures. * * All of the absolute symbols defined by this module will be present in the * final kernel ELF image (due to the linker's reference to the _OffsetAbsSyms * symbol). * * INTERNAL * It is NOT necessary to define the offset for every member of a structure. * Typically, only those members that are accessed by assembly language routines * are defined; however, it doesn't hurt to define all fields for the sake of * completeness. */ #include <zephyr/kernel.h> #include <kernel_arch_data.h> #include <gen_offset.h> #include <kernel_offsets.h> /* struct coop member offsets */ GEN_OFFSET_SYM(_callee_saved_t, r16); GEN_OFFSET_SYM(_callee_saved_t, r17); GEN_OFFSET_SYM(_callee_saved_t, r18); GEN_OFFSET_SYM(_callee_saved_t, r19); GEN_OFFSET_SYM(_callee_saved_t, r20); GEN_OFFSET_SYM(_callee_saved_t, r21); GEN_OFFSET_SYM(_callee_saved_t, r22); GEN_OFFSET_SYM(_callee_saved_t, r23); GEN_OFFSET_SYM(_callee_saved_t, r28); GEN_OFFSET_SYM(_callee_saved_t, ra); GEN_OFFSET_SYM(_callee_saved_t, sp); GEN_OFFSET_SYM(_callee_saved_t, key); GEN_OFFSET_SYM(_callee_saved_t, retval); GEN_OFFSET_STRUCT(arch_esf, ra); GEN_OFFSET_STRUCT(arch_esf, r1); GEN_OFFSET_STRUCT(arch_esf, r2); GEN_OFFSET_STRUCT(arch_esf, r3); GEN_OFFSET_STRUCT(arch_esf, r4); GEN_OFFSET_STRUCT(arch_esf, r5); GEN_OFFSET_STRUCT(arch_esf, r6); GEN_OFFSET_STRUCT(arch_esf, r7); GEN_OFFSET_STRUCT(arch_esf, r8); GEN_OFFSET_STRUCT(arch_esf, r9); GEN_OFFSET_STRUCT(arch_esf, r10); GEN_OFFSET_STRUCT(arch_esf, r11); GEN_OFFSET_STRUCT(arch_esf, r12); GEN_OFFSET_STRUCT(arch_esf, r13); GEN_OFFSET_STRUCT(arch_esf, r14); GEN_OFFSET_STRUCT(arch_esf, r15); GEN_OFFSET_STRUCT(arch_esf, estatus); GEN_OFFSET_STRUCT(arch_esf, instr); GEN_ABSOLUTE_SYM(__struct_arch_esf_SIZEOF, sizeof(struct arch_esf)); GEN_ABS_SYM_END ```
/content/code_sandbox/arch/nios2/core/offsets/offsets.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
541
```objective-c /* * */ /** * @file * @brief Private kernel definitions * * This file contains private kernel function/macro definitions and various * other definitions for the Nios II processor architecture. * * This file is also included by assembly language files which must #define * _ASMLANGUAGE before including this header file. Note that kernel * assembly source files obtains structure offset values via "absolute * symbols" in the offsets.o module. */ #ifndef ZEPHYR_ARCH_NIOS2_INCLUDE_KERNEL_ARCH_FUNC_H_ #define ZEPHYR_ARCH_NIOS2_INCLUDE_KERNEL_ARCH_FUNC_H_ #include <kernel_arch_data.h> #ifdef __cplusplus extern "C" { #endif #ifndef _ASMLANGUAGE static ALWAYS_INLINE void arch_kernel_init(void) { } static ALWAYS_INLINE void arch_thread_return_value_set(struct k_thread *thread, unsigned int value) { thread->callee_saved.retval = value; } FUNC_NORETURN void z_nios2_fatal_error(unsigned int reason, const struct arch_esf *esf); static inline bool arch_is_in_isr(void) { return _kernel.cpus[0].nested != 0U; } #ifdef CONFIG_IRQ_OFFLOAD void z_irq_do_offload(void); #endif #if ALT_CPU_ICACHE_SIZE > 0 void z_nios2_icache_flush_all(void); #else #define z_nios2_icache_flush_all() do { } while (false) #endif #if ALT_CPU_DCACHE_SIZE > 0 void z_nios2_dcache_flush_all(void); void z_nios2_dcache_flush_no_writeback(void *start, uint32_t len); #else #define z_nios2_dcache_flush_all() do { } while (false) #define z_nios2_dcache_flush_no_writeback(x, y) do { } while (false) #endif #endif /* _ASMLANGUAGE */ #ifdef __cplusplus } #endif #endif /* ZEPHYR_ARCH_NIOS2_INCLUDE_KERNEL_ARCH_FUNC_H_ */ ```
/content/code_sandbox/arch/nios2/include/kernel_arch_func.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
414
```objective-c /* * */ /** * @file * @brief Private kernel definitions * * This file contains private kernel structures definitions and various * other definitions for the Nios II processor architecture. * * This file is also included by assembly language files which must #define * _ASMLANGUAGE before including this header file. Note that kernel * assembly source files obtains structure offset values via "absolute * symbols" in the offsets.o module. */ #ifndef ZEPHYR_ARCH_NIOS2_INCLUDE_KERNEL_ARCH_DATA_H_ #define ZEPHYR_ARCH_NIOS2_INCLUDE_KERNEL_ARCH_DATA_H_ #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #include <zephyr/arch/cpu.h> #ifndef _ASMLANGUAGE #include <zephyr/kernel.h> #include <zephyr/types.h> #include <zephyr/sys/util.h> #include <zephyr/sys/dlist.h> #ifdef __cplusplus extern "C" { #endif #ifdef __cplusplus } #endif #endif /* _ASMLANGUAGE */ #endif /* ZEPHYR_ARCH_NIOS2_INCLUDE_KERNEL_ARCH_DATA_H_ */ ```
/content/code_sandbox/arch/nios2/include/kernel_arch_data.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
235
```objective-c /* * */ #ifndef ZEPHYR_ARCH_NIOS2_INCLUDE_OFFSETS_SHORT_ARCH_H_ #define ZEPHYR_ARCH_NIOS2_INCLUDE_OFFSETS_SHORT_ARCH_H_ #include <zephyr/offsets.h> /* kernel */ /* nothing for now */ /* end - kernel */ /* threads */ #define _thread_offset_to_r16 \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_r16_OFFSET) #define _thread_offset_to_r17 \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_r17_OFFSET) #define _thread_offset_to_r18 \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_r18_OFFSET) #define _thread_offset_to_r19 \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_r19_OFFSET) #define _thread_offset_to_r20 \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_r20_OFFSET) #define _thread_offset_to_r21 \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_r21_OFFSET) #define _thread_offset_to_r22 \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_r22_OFFSET) #define _thread_offset_to_r23 \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_r23_OFFSET) #define _thread_offset_to_r28 \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_r28_OFFSET) #define _thread_offset_to_ra \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_ra_OFFSET) #define _thread_offset_to_sp \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_sp_OFFSET) #define _thread_offset_to_key \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_key_OFFSET) #define _thread_offset_to_retval \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_retval_OFFSET) /* end - threads */ #endif /* ZEPHYR_ARCH_NIOS2_INCLUDE_OFFSETS_SHORT_ARCH_H_ */ ```
/content/code_sandbox/arch/nios2/include/offsets_short_arch.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
404
```c /* * */ #include "sw_isr_common.h" #include <zephyr/sw_isr_table.h> #include <zephyr/irq.h> #include <zephyr/sys/__assert.h> void __weak z_isr_install(unsigned int irq, void (*routine)(const void *), const void *param) { unsigned int table_idx; /* * Do not assert on the IRQ enable status for ARM GIC since the SGI * type interrupts are always enabled and attempting to install an ISR * for them will cause the assertion to fail. */ #ifndef CONFIG_GIC __ASSERT(!irq_is_enabled(irq), "IRQ %d is enabled", irq); #endif /* !CONFIG_GIC */ table_idx = z_get_sw_isr_table_idx(irq); /* If dynamic IRQs are enabled, then the _sw_isr_table is in RAM and * can be modified */ _sw_isr_table[table_idx].arg = param; _sw_isr_table[table_idx].isr = routine; } /* Some architectures don't/can't interpret flags or priority and have * no more processing to do than this. Provide a generic fallback. */ int __weak arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, void (*routine)(const void *), const void *parameter, uint32_t flags) { ARG_UNUSED(flags); ARG_UNUSED(priority); z_isr_install(irq, routine, parameter); return irq; } ```
/content/code_sandbox/arch/common/dynamic_isr.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
306
```c /* * */ #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #include <zephyr/sw_isr_table.h> #include <zephyr/arch/cpu.h> /* There is an additional member at the end populated by the linker script * which indicates the number of interrupts specified */ struct int_list_header { uint32_t table_size; uint32_t offset; #if defined(CONFIG_ISR_TABLES_LOCAL_DECLARATION) uint32_t swi_table_entry_size; uint32_t shared_isr_table_entry_size; uint32_t shared_isr_client_num_offset; #endif /* defined(CONFIG_ISR_TABLES_LOCAL_DECLARATION) */ }; /* These values are not included in the resulting binary, but instead form the * header of the initList section, which is used by gen_isr_tables.py to create * the vector and sw isr tables, */ Z_GENERIC_SECTION(.irq_info) __used struct int_list_header _iheader = { .table_size = IRQ_TABLE_SIZE, .offset = CONFIG_GEN_IRQ_START_VECTOR, #if defined(CONFIG_ISR_TABLES_LOCAL_DECLARATION) .swi_table_entry_size = sizeof(struct _isr_table_entry), #if defined(CONFIG_SHARED_INTERRUPTS) .shared_isr_table_entry_size = sizeof(struct z_shared_isr_table_entry), .shared_isr_client_num_offset = offsetof(struct z_shared_isr_table_entry, client_num), #endif /* defined(CONFIG_SHARED_INTERRUPTS) */ #endif /* defined(CONFIG_ISR_TABLES_LOCAL_DECLARATION) */ }; /* These are placeholder tables. They will be replaced by the real tables * generated by gen_isr_tables.py. * * z_irq_spurious is used as a placeholder value to ensure that it is not * optimized out in the first linker pass. The first linker pass must contain * the same symbols as the second linker pass for the code generation to work. */ /* Some arches don't use a vector table, they have a common exception entry * point for all interrupts. Don't generate a table in this case. */ #ifdef CONFIG_GEN_IRQ_VECTOR_TABLE /* When both the IRQ vector table and the software ISR table are used, populate * the IRQ vector table with the common software ISR by default, such that all * indirect interrupt vectors are handled using the software ISR table; * otherwise, populate the IRQ vector table with z_irq_spurious so that all * un-connected IRQ vectors end up in the spurious IRQ handler. */ #ifdef CONFIG_GEN_SW_ISR_TABLE #define IRQ_VECTOR_TABLE_DEFAULT_ISR _isr_wrapper #else #define IRQ_VECTOR_TABLE_DEFAULT_ISR z_irq_spurious #endif /* CONFIG_GEN_SW_ISR_TABLE */ #ifdef CONFIG_IRQ_VECTOR_TABLE_JUMP_BY_CODE /* Assembly code for a jump instruction. Must be set by the architecture. */ #ifndef ARCH_IRQ_VECTOR_JUMP_CODE #error "ARCH_IRQ_VECTOR_JUMP_CODE not defined" #endif #define BUILD_VECTOR(n, _) __asm(ARCH_IRQ_VECTOR_JUMP_CODE(IRQ_VECTOR_TABLE_DEFAULT_ISR)) /* The IRQ vector table contains the jump opcodes towards the vector routine */ void __irq_vector_table __attribute__((naked)) _irq_vector_table(void) { LISTIFY(CONFIG_NUM_IRQS, BUILD_VECTOR, (;)); }; #else /* The IRQ vector table is an array of vector addresses */ uintptr_t __irq_vector_table _irq_vector_table[IRQ_TABLE_SIZE] = { [0 ...(IRQ_TABLE_SIZE - 1)] = (uintptr_t)&IRQ_VECTOR_TABLE_DEFAULT_ISR, }; #endif /* CONFIG_IRQ_VECTOR_TABLE_JUMP_BY_CODE */ #endif /* CONFIG_GEN_IRQ_VECTOR_TABLE */ /* If there are no interrupts at all, or all interrupts are of the 'direct' * type and bypass the _sw_isr_table, then do not generate one. */ #ifdef CONFIG_GEN_SW_ISR_TABLE struct _isr_table_entry __sw_isr_table _sw_isr_table[IRQ_TABLE_SIZE] = { [0 ...(IRQ_TABLE_SIZE - 1)] = {(const void *)0x42, &z_irq_spurious}, }; #endif #ifdef CONFIG_SHARED_INTERRUPTS struct z_shared_isr_table_entry __shared_sw_isr_table z_shared_sw_isr_table[IRQ_TABLE_SIZE] = { }; #endif ```
/content/code_sandbox/arch/common/isr_tables.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
841
```c /* * */ #include "sw_isr_common.h" #include <zephyr/sw_isr_table.h> #include <zephyr/irq.h> #include <zephyr/sys/__assert.h> /* * Common code for arches that use software ISR tables (CONFIG_GEN_ISR_TABLES) */ unsigned int __weak z_get_sw_isr_table_idx(unsigned int irq) { unsigned int table_idx = irq - CONFIG_GEN_IRQ_START_VECTOR; __ASSERT_NO_MSG(table_idx < IRQ_TABLE_SIZE); return table_idx; } ```
/content/code_sandbox/arch/common/sw_isr_common.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
105
```c /* * */ #include "sw_isr_common.h" #include <zephyr/sw_isr_table.h> #include <zephyr/spinlock.h> /* an interrupt line can be considered shared only if there's * at least 2 clients using it. As such, enforce the fact that * the maximum number of allowed clients should be at least 2. */ BUILD_ASSERT(CONFIG_SHARED_IRQ_MAX_NUM_CLIENTS >= 2, "maximum number of clients should be at least 2"); void z_shared_isr(const void *data) { size_t i; const struct z_shared_isr_table_entry *entry; const struct _isr_table_entry *client; entry = data; for (i = 0; i < entry->client_num; i++) { client = &entry->clients[i]; if (client->isr) { client->isr(client->arg); } } } #ifdef CONFIG_DYNAMIC_INTERRUPTS static struct k_spinlock lock; void z_isr_install(unsigned int irq, void (*routine)(const void *), const void *param) { struct z_shared_isr_table_entry *shared_entry; struct _isr_table_entry *entry; struct _isr_table_entry *client; unsigned int table_idx; int i; k_spinlock_key_t key; table_idx = z_get_sw_isr_table_idx(irq); /* check for out of bounds table index */ if (table_idx >= IRQ_TABLE_SIZE) { return; } shared_entry = &z_shared_sw_isr_table[table_idx]; entry = &_sw_isr_table[table_idx]; key = k_spin_lock(&lock); /* have we reached the client limit? */ __ASSERT(shared_entry->client_num < CONFIG_SHARED_IRQ_MAX_NUM_CLIENTS, "reached maximum number of clients"); if (entry->isr == z_irq_spurious) { /* this is the first time a ISR/arg pair is registered * for INTID => no need to share it. */ entry->isr = routine; entry->arg = param; k_spin_unlock(&lock, key); return; } else if (entry->isr != z_shared_isr) { /* INTID is being used by another ISR/arg pair. * Push back the ISR/arg pair registered in _sw_isr_table * to the list of clients and hijack the pair stored in * _sw_isr_table with our own z_shared_isr/shared_entry pair. */ shared_entry->clients[shared_entry->client_num].isr = entry->isr; shared_entry->clients[shared_entry->client_num].arg = entry->arg; shared_entry->client_num++; entry->isr = z_shared_isr; entry->arg = shared_entry; } /* don't register the same ISR/arg pair multiple times */ for (i = 0; i < shared_entry->client_num; i++) { client = &shared_entry->clients[i]; __ASSERT(client->isr != routine && client->arg != param, "trying to register duplicate ISR/arg pair"); } shared_entry->clients[shared_entry->client_num].isr = routine; shared_entry->clients[shared_entry->client_num].arg = param; shared_entry->client_num++; k_spin_unlock(&lock, key); } static void swap_client_data(struct _isr_table_entry *a, struct _isr_table_entry *b) { struct _isr_table_entry tmp; tmp.arg = a->arg; tmp.isr = a->isr; a->arg = b->arg; a->isr = b->isr; b->arg = tmp.arg; b->isr = tmp.isr; } static void shared_irq_remove_client(struct z_shared_isr_table_entry *shared_entry, int client_idx, unsigned int table_idx) { int i; shared_entry->clients[client_idx].isr = NULL; shared_entry->clients[client_idx].arg = NULL; /* push back the removed client to the end of the client list */ for (i = client_idx; i <= (int)shared_entry->client_num - 2; i++) { swap_client_data(&shared_entry->clients[i], &shared_entry->clients[i + 1]); } shared_entry->client_num--; /* "unshare" interrupt if there will be a single client left */ if (shared_entry->client_num == 1) { _sw_isr_table[table_idx].isr = shared_entry->clients[0].isr; _sw_isr_table[table_idx].arg = shared_entry->clients[0].arg; shared_entry->clients[0].isr = NULL; shared_entry->clients[0].arg = NULL; shared_entry->client_num--; } } int __weak arch_irq_disconnect_dynamic(unsigned int irq, unsigned int priority, void (*routine)(const void *parameter), const void *parameter, uint32_t flags) { ARG_UNUSED(priority); ARG_UNUSED(flags); return z_isr_uninstall(irq, routine, parameter); } int z_isr_uninstall(unsigned int irq, void (*routine)(const void *), const void *parameter) { struct z_shared_isr_table_entry *shared_entry; struct _isr_table_entry *entry; struct _isr_table_entry *client; unsigned int table_idx; size_t i; k_spinlock_key_t key; table_idx = z_get_sw_isr_table_idx(irq); /* check for out of bounds table index */ if (table_idx >= IRQ_TABLE_SIZE) { return -EINVAL; } shared_entry = &z_shared_sw_isr_table[table_idx]; entry = &_sw_isr_table[table_idx]; key = k_spin_lock(&lock); /* note: it's important that we remove the ISR/arg pair even if * the IRQ line is not being shared because z_isr_install() will * not overwrite it unless the _sw_isr_table entry for the given * IRQ line contains the default pair which is z_irq_spurious/NULL. */ if (!shared_entry->client_num) { if (entry->isr == routine && entry->arg == parameter) { entry->isr = z_irq_spurious; entry->arg = NULL; } goto out_unlock; } for (i = 0; i < shared_entry->client_num; i++) { client = &shared_entry->clients[i]; if (client->isr == routine && client->arg == parameter) { /* note: this is the only match we're going to get */ shared_irq_remove_client(shared_entry, i, table_idx); goto out_unlock; } } out_unlock: k_spin_unlock(&lock, key); return 0; } #endif /* CONFIG_DYNAMIC_INTERRUPTS */ ```
/content/code_sandbox/arch/common/shared_irq.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,456
```c /* * */ #include <zephyr/device.h> #include <zephyr/irq.h> #include <zephyr/sw_isr_table.h> #include <zephyr/sys/__assert.h> #include <zephyr/sys/util.h> BUILD_ASSERT((CONFIG_NUM_2ND_LEVEL_AGGREGATORS * CONFIG_MAX_IRQ_PER_AGGREGATOR) <= BIT(CONFIG_2ND_LEVEL_INTERRUPT_BITS), "L2 bits not enough to cover the number of L2 IRQs"); /** * @brief Get the aggregator that's responsible for the given irq * * @param irq IRQ number to query * * @return Aggregator entry, NULL if irq is level 1 or not found. */ static const struct _irq_parent_entry *get_intc_entry_for_irq(unsigned int irq) { const unsigned int level = irq_get_level(irq); /* 1st level aggregator is not registered */ if (level == 1) { return NULL; } const unsigned int intc_irq = irq_get_intc_irq(irq); /* Find an aggregator entry that matches the level & intc_irq */ STRUCT_SECTION_FOREACH_ALTERNATE(intc_table, _irq_parent_entry, intc) { if ((intc->level == level) && (intc->irq == intc_irq)) { return intc; } } return NULL; } const struct device *z_get_sw_isr_device_from_irq(unsigned int irq) { const struct _irq_parent_entry *intc = get_intc_entry_for_irq(irq); __ASSERT(intc != NULL, "can't find an aggregator to handle irq(%X)", irq); return intc != NULL ? intc->dev : NULL; } unsigned int z_get_sw_isr_irq_from_device(const struct device *dev) { /* Get the IRQN for the aggregator */ STRUCT_SECTION_FOREACH_ALTERNATE(intc_table, _irq_parent_entry, intc) { if (intc->dev == dev) { return intc->irq; } } __ASSERT(false, "dev(%p) not found", dev); return 0; } unsigned int z_get_sw_isr_table_idx(unsigned int irq) { unsigned int table_idx, local_irq; const struct _irq_parent_entry *intc = get_intc_entry_for_irq(irq); const unsigned int level = irq_get_level(irq); if (intc != NULL) { local_irq = irq_from_level(irq, level); __ASSERT_NO_MSG(local_irq < CONFIG_MAX_IRQ_PER_AGGREGATOR); table_idx = intc->offset + local_irq; } else { /* irq level must be 1 if no intc entry */ __ASSERT(level == 1, "can't find an aggregator to handle irq(%X)", irq); table_idx = irq; } table_idx -= CONFIG_GEN_IRQ_START_VECTOR; __ASSERT_NO_MSG(table_idx < IRQ_TABLE_SIZE); return table_idx; } ```
/content/code_sandbox/arch/common/multilevel_irq.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
625
```c /* * */ #include <zephyr/kernel.h> #include <zephyr/sys_clock.h> #include <zephyr/timing/timing.h> void arch_timing_init(void) { } void arch_timing_start(void) { } void arch_timing_stop(void) { } timing_t arch_timing_counter_get(void) { return k_cycle_get_32(); } uint64_t arch_timing_cycles_get(volatile timing_t *const start, volatile timing_t *const end) { return (*end - *start); } uint64_t arch_timing_freq_get(void) { return sys_clock_hw_cycles_per_sec(); } uint64_t arch_timing_cycles_to_ns(uint64_t cycles) { return k_cyc_to_ns_floor64(cycles); } uint64_t arch_timing_cycles_to_ns_avg(uint64_t cycles, uint32_t count) { return arch_timing_cycles_to_ns(cycles) / count; } uint32_t arch_timing_freq_get_mhz(void) { return (uint32_t)(arch_timing_freq_get() / 1000000U); } ```
/content/code_sandbox/arch/common/timing.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
214
```linker script /* * */ /* * LLVM LLD fills empty spaces (created using ALIGN() or moving the location * counter) in executable segments with TrapInstr pattern, e.g. for ARM the * TrapInstr pattern is 0xd4d4d4d4. GNU LD fills empty spaces with 0x00 * pattern. * * We may want to have some section (e.g. rom_start) filled with 0x00, * e.g. because MCU can interpret the pattern as a configuration data. */ FILL(0x00); ```
/content/code_sandbox/arch/common/fill_with_zeros.ld
linker script
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
116
```linker script /* * */ /* * To provide correct value, this file must be the first file included in * snippets-rom-start.ld. This variable is used in rom_start_offset.ld */ HIDDEN(__rom_start_address = .); ```
/content/code_sandbox/arch/common/rom_start_address.ld
linker script
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
48
```linker script /* * */ /* Copied from linker.ld */ /* Non-cached region of RAM */ SECTION_DATA_PROLOGUE(_NOCACHE_SECTION_NAME,(NOLOAD),) { #if defined(CONFIG_MMU) MMU_ALIGN; #else MPU_ALIGN(_nocache_ram_size); #endif _nocache_ram_start = .; *(.nocache) *(".nocache.*") #include <snippets-nocache-section.ld> #if defined(CONFIG_MMU) MMU_ALIGN; #else MPU_ALIGN(_nocache_ram_size); #endif _nocache_ram_end = .; } GROUP_DATA_LINK_IN(RAMABLE_REGION, RAMABLE_REGION) _nocache_ram_size = _nocache_ram_end - _nocache_ram_start; ```
/content/code_sandbox/arch/common/nocache.ld
linker script
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
155
```linker script /* * */ /* Copied from linker.ld */ SECTION_DATA_PROLOGUE(.ramfunc,,) { MPU_ALIGN(__ramfunc_size); __ramfunc_start = .; *(.ramfunc) *(".ramfunc.*") #include <snippets-ramfunc-section.ld> MPU_ALIGN(__ramfunc_size); __ramfunc_end = .; } GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION) __ramfunc_size = __ramfunc_end - __ramfunc_start; __ramfunc_load_start = LOADADDR(.ramfunc); ```
/content/code_sandbox/arch/common/ramfunc.ld
linker script
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
120
```unknown # Common architecture configuration options config SEMIHOST bool "Semihosting support for ARM and RISC-V targets" depends on ARM || ARM64 || RISCV help Semihosting is a mechanism that enables code running on an ARM or RISC-V target to communicate and use the Input/Output facilities on a host computer that is running a debugger. Additional information can be found in: path_to_url path_to_url This option is compatible with hardware and with QEMU, through the (automatic) use of the -semihosting-config switch when invoking it. config LEGACY_MULTI_LEVEL_TABLE_GENERATION bool "Auto generates the multi-level interrupt LUT (deprecated)" default y select DEPRECATED depends on MULTI_LEVEL_INTERRUPTS depends on !PLIC depends on !NXP_IRQSTEER depends on !RV32M1_INTMUX depends on !CAVS_ICTL depends on !DW_ICTL_ACE depends on !DW_ICTL help A make-shift Kconfig to continue generating the multi-level interrupt LUT with the legacy way using DT macros. config ISR_TABLE_SHELL bool "Shell command to dump the ISR tables" depends on GEN_SW_ISR_TABLE depends on SHELL help This option enables a shell command to dump the ISR tables. ```
/content/code_sandbox/arch/common/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
296
```c /* * Organisation (CSIRO) ABN 41 687 119 230. * */ #include <string.h> #include <zephyr/kernel.h> #include <zephyr/arch/common/semihost.h> struct semihost_poll_in_args { long zero; } __packed; struct semihost_open_args { const char *path; long mode; long path_len; } __packed; struct semihost_close_args { long fd; } __packed; struct semihost_flen_args { long fd; } __packed; struct semihost_seek_args { long fd; long offset; } __packed; struct semihost_read_args { long fd; char *buf; long len; } __packed; struct semihost_write_args { long fd; const char *buf; long len; } __packed; char semihost_poll_in(void) { struct semihost_poll_in_args args = { .zero = 0 }; return (char)semihost_exec(SEMIHOST_READC, &args); } void semihost_poll_out(char c) { /* WRITEC takes a pointer directly to the character */ (void)semihost_exec(SEMIHOST_WRITEC, &c); } long semihost_open(const char *path, long mode) { struct semihost_open_args args = { .path = path, .mode = mode, .path_len = strlen(path) }; return semihost_exec(SEMIHOST_OPEN, &args); } long semihost_close(long fd) { struct semihost_close_args args = { .fd = fd }; return semihost_exec(SEMIHOST_CLOSE, &args); } long semihost_flen(long fd) { struct semihost_flen_args args = { .fd = fd }; return semihost_exec(SEMIHOST_FLEN, &args); } long semihost_seek(long fd, long offset) { struct semihost_seek_args args = { .fd = fd, .offset = offset }; return semihost_exec(SEMIHOST_SEEK, &args); } long semihost_read(long fd, void *buf, long len) { struct semihost_read_args args = { .fd = fd, .buf = buf, .len = len }; long ret; ret = semihost_exec(SEMIHOST_READ, &args); /* EOF condition */ if (ret == len) { ret = -EIO; } /* All bytes read */ else if (ret == 0) { ret = len; } return ret; } long semihost_write(long fd, const void *buf, long len) { struct semihost_write_args args = { .fd = fd, .buf = buf, .len = len }; return semihost_exec(SEMIHOST_WRITE, &args); } ```
/content/code_sandbox/arch/common/semihost.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
589
```linker script /* * */ /* * The line below this comment is equivalent to '. = CONFIG_ROM_START_OFFSET' * as interpreted by GNU LD, but also compatible with LLVM LLD. * * Simple assignment doesn't work for LLVM LLD, because the dot inside section * is absolute, so assigning offset here results in moving location counter * backwards. * * We can't use '. += CONFIG_ROM_START_OFFSET' here because there might be some * other files included before this file. * * Symbol __rom_start_address is defined in rom_start_address.ld */ . += CONFIG_ROM_START_OFFSET - (. - __rom_start_address); . = ALIGN(4); ```
/content/code_sandbox/arch/common/rom_start_offset.ld
linker script
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
135
```c /* * */ #include <zephyr/sw_isr_table.h> #include <zephyr/sys/util.h> /** * @file * @brief This file houses the deprecated legacy macros-generated multi-level interrupt lookup * table code, compiled when `CONFIG_LEGACY_MULTI_LEVEL_TABLE_GENERATION` is enabled. */ /* * Insert code if the node_id is an interrupt controller */ #define Z_IF_DT_IS_INTC(node_id, code) \ IF_ENABLED(DT_NODE_HAS_PROP(node_id, interrupt_controller), (code)) /* * Expands to node_id if its IRQN is equal to `_irq`, nothing otherwise * This only works for `_irq` between 0 & 4095, see `IS_EQ` */ #define Z_IF_DT_INTC_IRQN_EQ(node_id, _irq) IF_ENABLED(IS_EQ(DT_IRQ(node_id, irq), _irq), (node_id)) /* * Expands to node_id if it's an interrupt controller & its IRQN is `irq`, or nothing otherwise */ #define Z_DT_INTC_GET_IRQN(node_id, _irq) \ Z_IF_DT_IS_INTC(node_id, Z_IF_DT_INTC_IRQN_EQ(node_id, _irq)) /** * Loop through child of "/soc" and get root interrupt controllers with `_irq` as IRQN, * this assumes only one device has the IRQN * @param _irq irq number * @return node_id(s) that has the `_irq` number, or empty if none of them has the `_irq` */ #define INTC_DT_IRQN_GET(_irq) \ DT_FOREACH_CHILD_STATUS_OKAY_VARGS(DT_PATH(soc), Z_DT_INTC_GET_IRQN, _irq) #define INIT_IRQ_PARENT_OFFSET_2ND(n, d, i, o) \ IRQ_PARENT_ENTRY_DEFINE(intc_l2_##n, DEVICE_DT_GET_OR_NULL(d), i, o, 2) #define IRQ_INDEX_TO_OFFSET(i, base) (base + i * CONFIG_MAX_IRQ_PER_AGGREGATOR) #define CAT_2ND_LVL_LIST(i, base) \ INIT_IRQ_PARENT_OFFSET_2ND(i, INTC_DT_IRQN_GET(CONFIG_2ND_LVL_INTR_0##i##_OFFSET), \ CONFIG_2ND_LVL_INTR_0##i##_OFFSET, \ IRQ_INDEX_TO_OFFSET(i, base)) LISTIFY(CONFIG_NUM_2ND_LEVEL_AGGREGATORS, CAT_2ND_LVL_LIST, (;), CONFIG_2ND_LVL_ISR_TBL_OFFSET); #ifdef CONFIG_3RD_LEVEL_INTERRUPTS BUILD_ASSERT((CONFIG_NUM_3RD_LEVEL_AGGREGATORS * CONFIG_MAX_IRQ_PER_AGGREGATOR) <= BIT(CONFIG_3RD_LEVEL_INTERRUPT_BITS), "L3 bits not enough to cover the number of L3 IRQs"); #define INIT_IRQ_PARENT_OFFSET_3RD(n, d, i, o) \ IRQ_PARENT_ENTRY_DEFINE(intc_l3_##n, DEVICE_DT_GET_OR_NULL(d), i, o, 3) #define CAT_3RD_LVL_LIST(i, base) \ INIT_IRQ_PARENT_OFFSET_3RD(i, INTC_DT_IRQN_GET(CONFIG_3RD_LVL_INTR_0##i##_OFFSET), \ CONFIG_3RD_LVL_INTR_0##i##_OFFSET, \ IRQ_INDEX_TO_OFFSET(i, base)) LISTIFY(CONFIG_NUM_3RD_LEVEL_AGGREGATORS, CAT_3RD_LVL_LIST, (;), CONFIG_3RD_LVL_ISR_TBL_OFFSET); #endif /* CONFIG_3RD_LEVEL_INTERRUPTS */ ```
/content/code_sandbox/arch/common/multilevel_irq_legacy.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
738
```objective-c /* * */ /** * @file * @brief Private header for the software-managed ISR table's functions */ #ifndef ZEPHYR_ARCH_COMMON_INCLUDE_SW_ISR_COMMON_H_ #define ZEPHYR_ARCH_COMMON_INCLUDE_SW_ISR_COMMON_H_ #if !defined(_ASMLANGUAGE) #include <zephyr/device.h> #ifdef __cplusplus extern "C" { #endif /** * @brief Helper function used to compute the index in _sw_isr_table * based on passed IRQ. * * @param irq IRQ number in its zephyr format * * @return corresponding index in _sw_isr_table */ unsigned int z_get_sw_isr_table_idx(unsigned int irq); /** * @brief Helper function used to get the parent interrupt controller device based on passed IRQ. * * @param irq IRQ number in its zephyr format * * @return corresponding interrupt controller device in _sw_isr_table */ const struct device *z_get_sw_isr_device_from_irq(unsigned int irq); /** * @brief Helper function used to get the IRQN of the passed in parent interrupt * controller device. * * @param dev parent interrupt controller device * * @return IRQN of the interrupt controller */ unsigned int z_get_sw_isr_irq_from_device(const struct device *dev); #ifdef __cplusplus } #endif #endif /* _ASMLANGUAGE */ #endif /* ZEPHYR_ARCH_COMMON_INCLUDE_SW_ISR_COMMON_H_ */ ```
/content/code_sandbox/arch/common/include/sw_isr_common.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
292
```c /* * */ #include <zephyr/debug/symtab.h> #include <zephyr/shell/shell.h> #include <zephyr/sw_isr_table.h> static void dump_isr_table_entry(const struct shell *sh, int idx, struct _isr_table_entry *entry) { if ((entry->isr == z_irq_spurious) || (entry->isr == NULL)) { return; } #ifdef CONFIG_SYMTAB const char *name = symtab_find_symbol_name((uintptr_t)entry->isr, NULL); shell_print(sh, "%4d: %s(%p)", idx, name, entry->arg); #else shell_print(sh, "%4d: %p(%p)", idx, entry->isr, entry->arg); #endif /* CONFIG_SYMTAB */ } static int cmd_sw_isr_table(const struct shell *sh, size_t argc, char **argv) { shell_print(sh, "_sw_isr_table[%d]\n", IRQ_TABLE_SIZE); for (int idx = 0; idx < IRQ_TABLE_SIZE; idx++) { dump_isr_table_entry(sh, idx, &_sw_isr_table[idx]); } return 0; } #ifdef CONFIG_SHARED_INTERRUPTS static int cmd_shared_sw_isr_table(const struct shell *sh, size_t argc, char **argv) { shell_print(sh, "z_shared_sw_isr_table[%d][%d]\n", IRQ_TABLE_SIZE, CONFIG_SHARED_IRQ_MAX_NUM_CLIENTS); for (int idx = 0; idx < IRQ_TABLE_SIZE; idx++) { for (int c = 0; c < z_shared_sw_isr_table[idx].client_num; c++) { dump_isr_table_entry(sh, idx, &z_shared_sw_isr_table[idx].clients[c]); } } return 0; } #endif /* CONFIG_SHARED_INTERRUPTS */ SHELL_STATIC_SUBCMD_SET_CREATE(isr_table_cmds, SHELL_CMD_ARG(sw_isr_table, NULL, "Dump _sw_isr_table.\n" "Usage: isr_table sw_isr_table", cmd_sw_isr_table, 1, 0), #ifdef CONFIG_SHARED_INTERRUPTS SHELL_CMD_ARG(shared_sw_isr_table, NULL, "Dump z_shared_sw_isr_table.\n" "Usage: isr_table shared_sw_isr_table", cmd_shared_sw_isr_table, 1, 0), #endif /* CONFIG_SHARED_INTERRUPTS */ SHELL_SUBCMD_SET_END); SHELL_CMD_ARG_REGISTER(isr_table, &isr_table_cmds, "ISR tables shell command", NULL, 0, 0); ```
/content/code_sandbox/arch/common/isr_tables_shell.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
543
```cmake # For Aarch64, multilib is not an actively pursued solution for most Linux # distributions. Userspace is (generally) either 32-bit or 64-bit but not # both. # @Intent: Call a script to get userspace wordsize for comparison with CONFIG_64BIT execute_process( COMMAND ${PYTHON_EXECUTABLE} ${ZEPHYR_BASE}/scripts/build/user_wordsize.py OUTPUT_VARIABLE WORDSIZE OUTPUT_STRIP_TRAILING_WHITESPACE ) if (CONFIG_64BIT) if (${WORDSIZE} STREQUAL "32") message(FATAL_ERROR "CONFIG_64BIT=y but this Aarch64 machine has a 32-bit userspace.\n" "If you were targeting native_sim/native/64, target native_sim instead.\n" "Otherwise, be sure to define CONFIG_64BIT appropriately.\n" ) endif() zephyr_compile_options(-fPIC) else () if (${WORDSIZE} STREQUAL "64") message(FATAL_ERROR "CONFIG_64BIT=n but this Aarch64 machine has a 64-bit userspace.\n" "If you were targeting native_sim, target native_sim/native/64 instead.\n" "Otherwise, be sure to define CONFIG_64BIT appropriately.\n" ) endif() endif () ```
/content/code_sandbox/arch/posix/Linux.aarch64.cmake
cmake
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
294
```unknown # General configuration options menu "POSIX (native) Options" depends on ARCH_POSIX config ARCH default "posix" config ARCH_POSIX_RECOMMENDED_STACK_SIZE int default 44 if 64BIT && STACK_SENTINEL default 40 if 64BIT default 28 if STACK_SENTINEL default 24 help In bytes, stack size for Zephyr threads meant only for the POSIX architecture. (In this architecture only part of the thread status is kept in the Zephyr thread stack, the real stack is the native underlying pthread stack. Therefore the allocated stack can be limited to this size) config ARCH_POSIX_LIBFUZZER bool "Build fuzz test target" help Build as an LLVM libfuzzer target. Requires support from the toolchain (currently only clang works, and only on native_{posix,sim}[//64]), and should normally be used in concert with some of CONFIG_ASAN/UBSAN/MSAN for validation. The application needs to implement the LLVMFuzzerTestOneInput() entry point, which runs in the host environment "outside" the OS. See Zephyr documentation and sample and path_to_url for more information. config ARCH_POSIX_TRAP_ON_FATAL bool "Raise a SIGTRAP on fatal error" help Raise a SIGTRAP signal on fatal error before exiting. This automatically suspends the target if a debugger is attached. endmenu ```
/content/code_sandbox/arch/posix/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
326
```c /* * */ /** * @file CPU power management code for POSIX * * This module provides: * * An implementation of the architecture-specific * arch_cpu_idle() primitive required by the kernel idle loop component. * It can be called within an implementation of _pm_save_idle(), * which is provided for the kernel by the platform. * * An implementation of arch_cpu_atomic_idle(), which * atomically re-enables interrupts and enters low power mode. * * A weak stub for sys_arch_reboot(), which does nothing */ #include "posix_core.h" #include "posix_board_if.h" #include <zephyr/arch/posix/posix_soc_if.h> #include <zephyr/tracing/tracing.h> #if !defined(CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT) #error "The POSIX architecture needs a custom busy_wait implementation. \ CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT must be selected" /* Each POSIX arch board (or SOC) must provide an implementation of * arch_busy_wait() */ #endif void arch_cpu_idle(void) { sys_trace_idle(); posix_irq_full_unlock(); posix_halt_cpu(); } void arch_cpu_atomic_idle(unsigned int key) { sys_trace_idle(); posix_atomic_halt_cpu(key); } #if defined(CONFIG_REBOOT) /** * @brief Stub for sys_arch_reboot * * Does nothing */ void __weak sys_arch_reboot(int type) { posix_print_warning("%s called with type %d. Exiting\n", __func__, type); posix_exit(1); } #endif /* CONFIG_REBOOT */ ```
/content/code_sandbox/arch/posix/core/cpuhalt.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
321
```c /* * */ /* * Interfacing between the POSIX arch and the Native Simulator (nsi) CPU thread emulator * * This posix architecture "bottom" will be used when building with the native simulator. */ #include "nct_if.h" static void *te_state; /* * Initialize the posix architecture */ void posix_arch_init(void) { extern void posix_arch_thread_entry(void *pa_thread_status); te_state = nct_init(posix_arch_thread_entry); } /* * Clear the state of the POSIX architecture * free whatever memory it may have allocated, etc. */ void posix_arch_clean_up(void) { nct_clean_up(te_state); } void posix_swap(int next_allowed_thread_nbr, int this_th_nbr) { (void) this_th_nbr; nct_swap_threads(te_state, next_allowed_thread_nbr); } void posix_main_thread_start(int next_allowed_thread_nbr) { nct_first_thread_start(te_state, next_allowed_thread_nbr); } int posix_new_thread(void *payload) { return nct_new_thread(te_state, payload); } void posix_abort_thread(int thread_idx) { nct_abort_thread(te_state, thread_idx); } int posix_arch_get_unique_thread_id(int thread_idx) { return nct_get_unique_thread_id(te_state, thread_idx); } ```
/content/code_sandbox/arch/posix/core/posix_core_nsi.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
263
```c /* * */ #include <signal.h> void nsi_raise_sigtrap(void) { raise(SIGTRAP); } ```
/content/code_sandbox/arch/posix/core/fatal_trap.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
24
```c /* * */ /** * @file * @brief Thread support primitives * * This module provides core thread related primitives for the POSIX * architecture */ #include <zephyr/toolchain.h> #include <zephyr/kernel_structs.h> #include <ksched.h> #include "posix_core.h" #include <zephyr/arch/posix/posix_soc_if.h> #ifdef CONFIG_TRACING #include <zephyr/tracing/tracing_macros.h> #include <zephyr/tracing/tracing.h> #endif /* Note that in this arch we cheat quite a bit: we use as stack a normal * pthreads stack and therefore we ignore the stack size */ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, char *stack_ptr, k_thread_entry_t entry, void *p1, void *p2, void *p3) { posix_thread_status_t *thread_status; /* We store it in the same place where normal archs store the * "initial stack frame" */ thread_status = Z_STACK_PTR_TO_FRAME(posix_thread_status_t, stack_ptr); /* z_thread_entry() arguments */ thread_status->entry_point = entry; thread_status->arg1 = p1; thread_status->arg2 = p2; thread_status->arg3 = p3; #if defined(CONFIG_ARCH_HAS_THREAD_ABORT) thread_status->aborted = 0; #endif thread->callee_saved.thread_status = thread_status; thread_status->thread_idx = posix_new_thread((void *)thread_status); } void posix_arch_thread_entry(void *pa_thread_status) { posix_thread_status_t *ptr = pa_thread_status; posix_irq_full_unlock(); z_thread_entry(ptr->entry_point, ptr->arg1, ptr->arg2, ptr->arg3); } #if defined(CONFIG_ARCH_HAS_THREAD_ABORT) void z_impl_k_thread_abort(k_tid_t thread) { unsigned int key; int thread_idx; SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, abort, thread); posix_thread_status_t *tstatus = (posix_thread_status_t *) thread->callee_saved.thread_status; thread_idx = tstatus->thread_idx; key = irq_lock(); if (_current == thread) { if (tstatus->aborted == 0) { /* LCOV_EXCL_BR_LINE */ tstatus->aborted = 1; } else { posix_print_warning(/* LCOV_EXCL_LINE */ "POSIX arch: The kernel is trying to abort and swap " "out of an already aborted thread %i. This " "should NOT have happened\n", thread_idx); } posix_abort_thread(thread_idx); } z_thread_abort(thread); if (tstatus->aborted == 0) { PC_DEBUG("%s aborting now [%i] %i\n", __func__, posix_arch_get_unique_thread_id(thread_idx), thread_idx); tstatus->aborted = 1; posix_abort_thread(thread_idx); } else { PC_DEBUG("%s ignoring re_abort of [%i] " "%i\n", __func__, posix_arch_get_unique_thread_id(thread_idx), thread_idx); } /* The abort handler might have altered the ready queue. */ z_reschedule_irqlock(key); SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, abort, thread); } #endif ```
/content/code_sandbox/arch/posix/core/thread.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
729
```c /* * */ #include <zephyr/arch/posix/posix_soc_if.h> #include "board_irq.h" #ifdef CONFIG_IRQ_OFFLOAD #include <zephyr/irq_offload.h> void arch_irq_offload(irq_offload_routine_t routine, const void *parameter) { posix_irq_offload(routine, parameter); } #endif void arch_irq_enable(unsigned int irq) { posix_irq_enable(irq); } void arch_irq_disable(unsigned int irq) { posix_irq_disable(irq); } int arch_irq_is_enabled(unsigned int irq) { return posix_irq_is_enabled(irq); } #ifdef CONFIG_DYNAMIC_INTERRUPTS /** * Configure a dynamic interrupt. * * Use this instead of IRQ_CONNECT() if arguments cannot be known at build time. * * @param irq IRQ line number * @param priority Interrupt priority * @param routine Interrupt service routine * @param parameter ISR parameter * @param flags Arch-specific IRQ configuration flags * * @return The vector assigned to this interrupt */ int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, void (*routine)(const void *parameter), const void *parameter, uint32_t flags) { posix_isr_declare(irq, (int)flags, routine, parameter); posix_irq_priority_set(irq, priority, flags); return irq; } #endif /* CONFIG_DYNAMIC_INTERRUPTS */ ```
/content/code_sandbox/arch/posix/core/irq.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
286
```c /* * */ #include <zephyr/kernel.h> #include <zephyr/arch/cpu.h> #include <zephyr/kernel_structs.h> #include <zephyr/sys/printk.h> #include <inttypes.h> #include <zephyr/logging/log_ctrl.h> #include <zephyr/arch/posix/posix_soc_if.h> extern void nsi_raise_sigtrap(void); FUNC_NORETURN void arch_system_halt(unsigned int reason) { ARG_UNUSED(reason); if (IS_ENABLED(CONFIG_ARCH_POSIX_TRAP_ON_FATAL)) { nsi_raise_sigtrap(); } posix_print_error_and_exit("Exiting due to fatal error\n"); CODE_UNREACHABLE; /* LCOV_EXCL_LINE */ } ```
/content/code_sandbox/arch/posix/core/fatal.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
152
```c /* * */ /** * @file * @brief Kernel swapper code for POSIX * * This module implements the arch_swap() routine for the POSIX architecture. * */ #include <zephyr/kernel.h> #include <zephyr/kernel_structs.h> #include "posix_core.h" #include <zephyr/irq.h> #include "kswap.h" #include <zephyr/pm/pm.h> int arch_swap(unsigned int key) { /* * struct k_thread * _current is the currently running thread * struct k_thread * _kernel.ready_q.cache contains the next thread to * run (cannot be NULL) * * Here a "real" arch would save all processor registers, stack pointer * and so forth. But we do not need to do so because we use posix * threads => those are all nicely kept by the native OS kernel */ #if CONFIG_INSTRUMENT_THREAD_SWITCHING z_thread_mark_switched_out(); #endif _current->callee_saved.key = key; _current->callee_saved.retval = -EAGAIN; /* retval may be modified with a call to * arch_thread_return_value_set() */ posix_thread_status_t *ready_thread_ptr = (posix_thread_status_t *) _kernel.ready_q.cache->callee_saved.thread_status; posix_thread_status_t *this_thread_ptr = (posix_thread_status_t *) _current->callee_saved.thread_status; _current = _kernel.ready_q.cache; #if CONFIG_INSTRUMENT_THREAD_SWITCHING z_thread_mark_switched_in(); #endif /* * Here a "real" arch would load all processor registers for the thread * to run. In this arch case, we just block this thread until allowed * to run later, and signal to whomever is allowed to run to * continue. */ posix_swap(ready_thread_ptr->thread_idx, this_thread_ptr->thread_idx); /* When we continue, _kernel->current points back to this thread */ irq_unlock(_current->callee_saved.key); return _current->callee_saved.retval; } #ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN /* This is just a version of arch_swap() in which we do not save anything * about the current thread. * * Note that we will never come back to this thread: posix_main_thread_start() * does never return. */ void arch_switch_to_main_thread(struct k_thread *main_thread, char *stack_ptr, k_thread_entry_t _main) { ARG_UNUSED(stack_ptr); ARG_UNUSED(_main); posix_thread_status_t *ready_thread_ptr = (posix_thread_status_t *) _kernel.ready_q.cache->callee_saved.thread_status; #ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING z_thread_mark_switched_out(); #endif _current = _kernel.ready_q.cache; #ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING z_thread_mark_switched_in(); #endif posix_main_thread_start(ready_thread_ptr->thread_idx); } /* LCOV_EXCL_LINE */ #endif #ifdef CONFIG_PM /** * If the kernel is in idle mode, take it out */ void posix_irq_check_idle_exit(void) { if (_kernel.idle) { _kernel.idle = 0; pm_system_resume(); } } #endif ```
/content/code_sandbox/arch/posix/core/swap.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
699
```c /* * */ /** * @file * @brief Kernel structure member offset definition file * * This module is responsible for the generation of the absolute symbols whose * value represents the member offsets for various IA-32 structures. * * All of the absolute symbols defined by this module will be present in the * final kernel ELF image (due to the linker's reference to the _OffsetAbsSyms * symbol). * * INTERNAL * It is NOT necessary to define the offset for every member of a structure. * Typically, only those members that are accessed by assembly language routines * are defined; however, it doesn't hurt to define all fields for the sake of * completeness. */ #include <zephyr/kernel.h> #include <kernel_arch_data.h> #include <gen_offset.h> #include <kernel_offsets.h> GEN_ABS_SYM_END ```
/content/code_sandbox/arch/posix/core/offsets/offsets.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
176
```c /* * */ /* * This module exist to provide a basic compatibility shim * from Native simulator components into the POSIX architecture. * * It is a transitional component, intended to facilitate * the migration towards the Native simulator. */ #include <zephyr/arch/posix/posix_trace.h> #include <zephyr/toolchain.h> #include "posix_board_if.h" void nsi_print_error_and_exit(const char *format, ...) { va_list variable_args; va_start(variable_args, format); posix_vprint_error_and_exit(format, variable_args); va_end(variable_args); } void nsi_print_warning(const char *format, ...) { va_list variable_args; va_start(variable_args, format); posix_vprint_warning(format, variable_args); va_end(variable_args); } void nsi_print_trace(const char *format, ...) { va_list variable_args; va_start(variable_args, format); posix_vprint_trace(format, variable_args); va_end(variable_args); } FUNC_NORETURN void nsi_exit(int exit_code) { posix_exit(exit_code); CODE_UNREACHABLE; } ```
/content/code_sandbox/arch/posix/core/nsi_compat/nsi_compat.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
227
```objective-c /* * */ #ifndef ARCH_POSIX_CORE_NSI_COMPAT_H #define ARCH_POSIX_CORE_NSI_COMPAT_H #include "nsi_tracing.h" #include "nsi_safe_call.h" #ifdef __cplusplus extern "C" { #endif void nsi_exit(int exit_code); #ifdef __cplusplus } #endif #endif /* ARCH_POSIX_CORE_NSI_COMPAT_H */ ```
/content/code_sandbox/arch/posix/core/nsi_compat/nsi_compat.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
77
```objective-c /* * */ /* This file is only meant to be included by kernel_structs.h */ #ifndef ZEPHYR_ARCH_POSIX_INCLUDE_KERNEL_ARCH_FUNC_H_ #define ZEPHYR_ARCH_POSIX_INCLUDE_KERNEL_ARCH_FUNC_H_ #include <kernel_arch_data.h> #ifndef _ASMLANGUAGE #ifdef __cplusplus extern "C" { #endif static inline void arch_kernel_init(void) { /* Nothing to be done */ } static ALWAYS_INLINE void arch_thread_return_value_set(struct k_thread *thread, unsigned int value) { thread->callee_saved.retval = value; } #ifdef __cplusplus } #endif static inline bool arch_is_in_isr(void) { return _kernel.cpus[0].nested != 0U; } #endif /* _ASMLANGUAGE */ #endif /* ZEPHYR_ARCH_POSIX_INCLUDE_KERNEL_ARCH_FUNC_H_ */ ```
/content/code_sandbox/arch/posix/include/kernel_arch_func.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
174
```objective-c /* * */ /** * @file * @brief Private kernel definitions (POSIX) * */ #ifndef ZEPHYR_ARCH_POSIX_INCLUDE_KERNEL_ARCH_DATA_H_ #define ZEPHYR_ARCH_POSIX_INCLUDE_KERNEL_ARCH_DATA_H_ #endif /* ZEPHYR_ARCH_POSIX_INCLUDE_KERNEL_ARCH_DATA_H_ */ ```
/content/code_sandbox/arch/posix/include/kernel_arch_data.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
65
```objective-c /* * */ #ifndef ZEPHYR_ARCH_POSIX_INCLUDE_POSIX_CORE_H_ #define ZEPHYR_ARCH_POSIX_INCLUDE_POSIX_CORE_H_ #include <zephyr/kernel.h> #ifdef __cplusplus extern "C" { #endif typedef struct { k_thread_entry_t entry_point; void *arg1; void *arg2; void *arg3; int thread_idx; #if defined(CONFIG_ARCH_HAS_THREAD_ABORT) /* The kernel may indicate that a thread has been aborted several */ /* times */ int aborted; #endif /* * Note: If more elements are added to this structure, remember to * update ARCH_POSIX_RECOMMENDED_STACK_SIZE in the configuration. * * Currently there are 4 pointers + 2 ints, on a 32-bit native posix * implementation this will result in 24 bytes ( 4*4 + 2*4). * For a 64-bit implementation the recommended stack size will be * 40 bytes ( 4*8 + 2*4 ). */ } posix_thread_status_t; void posix_irq_check_idle_exit(void); void posix_arch_init(void); void posix_arch_clean_up(void); void posix_swap(int next_allowed_thread_nbr, int this_th_nbr); void posix_main_thread_start(int next_allowed_thread_nbr); int posix_new_thread(void *payload); void posix_abort_thread(int thread_idx); int posix_arch_get_unique_thread_id(int thread_idx); #ifndef POSIX_ARCH_DEBUG_PRINTS #define POSIX_ARCH_DEBUG_PRINTS 0 #endif #if POSIX_ARCH_DEBUG_PRINTS #define PC_DEBUG(fmt, ...) posix_print_trace("POSIX arch core:" fmt, __VA_ARGS__) #else #define PC_DEBUG(...) #endif #ifdef __cplusplus } #endif #endif /* ZEPHYR_ARCH_POSIX_INCLUDE_POSIX_CORE_H_ */ ```
/content/code_sandbox/arch/posix/include/posix_core.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
377
```objective-c /* * */ #ifndef ZEPHYR_ARCH_POSIX_INCLUDE_OFFSETS_SHORT_ARCH_H_ #define ZEPHYR_ARCH_POSIX_INCLUDE_OFFSETS_SHORT_ARCH_H_ #include <zephyr/offsets.h> /* kernel */ /* end - kernel */ /* threads */ /* end - threads */ #endif /* ZEPHYR_ARCH_POSIX_INCLUDE_OFFSETS_SHORT_ARCH_H_ */ ```
/content/code_sandbox/arch/posix/include/offsets_short_arch.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
77
```objective-c /* * */ /* * Undefine all system-specific macros defined internally, by the compiler. * Run 'gcc -dM -E - < /dev/null | sort' to get full list of internally * defined macros. */ #undef __gnu_linux__ #undef __linux #undef __linux__ #undef linux #undef __unix #undef __unix__ #undef unix ```
/content/code_sandbox/arch/posix/include/undef_system_defines.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
76
```objective-c /* EMPTY ON PURPOSE. Why do the intel and ARM arch have 2 versions of it? */ ```
/content/code_sandbox/arch/posix/include/asm_inline_gcc.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
21
```objective-c /* Inline assembler kernel functions and macros */ /* * */ #ifndef ZEPHYR_ARCH_POSIX_INCLUDE_ASM_INLINE_H_ #define ZEPHYR_ARCH_POSIX_INCLUDE_ASM_INLINE_H_ #if !defined(CONFIG_ARCH_POSIX) #error The arch/posix/include/asm_inline.h is only for the POSIX architecture #endif #if defined(__GNUC__) #include <asm_inline_gcc.h> /* The empty one.. */ #include <zephyr/arch/posix/asm_inline_gcc.h> #else #include <asm_inline_other.h> #endif /* __GNUC__ */ #endif /* ZEPHYR_ARCH_POSIX_INCLUDE_ASM_INLINE_H_ */ ```
/content/code_sandbox/arch/posix/include/asm_inline.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
129
```objective-c /* * */ #ifndef ZEPHYR_ARCH_POSIX_INCLUDE_POSIX_ARCH_INTERNAL_H_ #define ZEPHYR_ARCH_POSIX_INCLUDE_POSIX_ARCH_INTERNAL_H_ #include <zephyr/toolchain.h> #define PC_SAFE_CALL(a) pc_safe_call(a, #a) #ifdef __cplusplus extern "C" { #endif static inline void pc_safe_call(int test, const char *test_str) { /* LCOV_EXCL_START */ /* See Note1 */ if (unlikely(test)) { posix_print_error_and_exit("POSIX arch: Error on: %s\n", test_str); } /* LCOV_EXCL_STOP */ } #ifdef __cplusplus } #endif #endif /* ZEPHYR_ARCH_POSIX_INCLUDE_POSIX_ARCH_INTERNAL_H_ */ /* * Note 1: * * All checks for the host pthreads functions which are wrapped by PC_SAFE_CALL * are meant to never fail, and therefore will not be covered. */ ```
/content/code_sandbox/arch/posix/include/posix_arch_internal.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
202
```unknown menu "SPARC Options" depends on SPARC config ARCH default "sparc" config SPARC_NWIN int "Number of register windows" default 8 help Number of implemented register windows. config GEN_ISR_TABLES default y config GEN_IRQ_VECTOR_TABLE default n config GEN_SW_ISR_TABLE default y config NUM_IRQS int default 32 config SPARC_CASA bool "CASA instructions" help Use CASA atomic instructions. Defined by SPARC V9 and available in some LEON processors. # The SPARC V8 ABI allocates a stack frame of minimum 96 byte for each SAVE # instruction so we bump the kernel default values. config MAIN_STACK_SIZE default 4096 if COVERAGE_GCOV default 2048 config IDLE_STACK_SIZE default 1024 config ISR_STACK_SIZE default 4096 config TEST_EXTRA_STACK_SIZE default 4096 if COVERAGE_GCOV default 2048 config IPM_CONSOLE_STACK_SIZE default 4096 if COVERAGE_GCOV default 1024 config NET_TX_STACK_SIZE default 2048 config NET_RX_STACK_SIZE default 2048 endmenu ```
/content/code_sandbox/arch/sparc/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
266
```objective-c /* * */ /* * Header to be able to compile the Zephyr kernel on top of a POSIX OS via the * POSIX ARCH * * This file is only used in the POSIX ARCH, and not in any other architecture * * Most users will be normally unaware of this file existence, unless they have * a link issue in which their POSIX functions calls are reported in errors (as * zap_<original_func_name>). * If you do see a link error telling you that zap_something is undefined, it is * likely that you forgot to select the corresponding Zephyr POSIX API. * * This header is included automatically when targeting some POSIX ARCH boards * (for ex. native_posix). * It will be included in _all_ Zephyr and application source files * (it is passed with the option "-include" to the compiler call) * * A few files (those which need to access the host OS APIs) will set * NO_POSIX_CHEATS to avoid including this file. These are typically only * the POSIX arch private files and some of the drivers meant only for the POSIX * architecture. * No file which is meant to run in an embedded target should set * NO_POSIX_CHEATS */ #if !defined(ZEPHYR_ARCH_POSIX_INCLUDE_POSIX_CHEATS_H_) && !defined(NO_POSIX_CHEATS) #define ZEPHYR_ARCH_POSIX_INCLUDE_POSIX_CHEATS_H_ /* * Normally main() is the main entry point of a C executable. * When compiling for native_posix, the Zephyr "application" is not the actual * entry point of the executable but something the Zephyr OS calls during * boot. * Therefore we need to rename this application main something else, so * we free the function name "main" for its normal purpose */ #ifndef main #define main(...) _posix_zephyr_main(__VA_ARGS__) #endif #if defined(__cplusplus) /* To be able to define main() in C++ code we need to have its prototype * defined somewhere visibly. Otherwise name mangling will prevent the linker * from finding it. Zephyr assumes an int main(void) prototype and therefore * this will be the prototype after renaming: */ extern "C" int _posix_zephyr_main(void); #endif #ifdef CONFIG_POSIX_API /* * The defines below in this header exist only to enable the Zephyr POSIX API * (include/posix/), and applications using it, to be compiled on top of * native_posix. * * Without this header, both the Zephyr POSIX API functions and the equivalent * host OS functions would have the same name. This would result in the linker * not picking the correct ones. * * Renaming these functions allows the linker to distinguish * which calls are meant for the Zephyr POSIX API (zap_something), and * which are meant for the host OS. * * The zap_ prefix should be understood as an attempt to namespace them * into something which is unlikely to collide with other real functions * (Any unlikely string would have done) * * If you want to link an external library together with Zephyr code for the * native_posix target, where that external library calls into the Zephyr * POSIX API, you may want to include this header when compiling that library, * or rename the calls to match the ones in the defines below. */ /* Condition variables */ #define pthread_cond_init(...) zap_pthread_cond_init(__VA_ARGS__) #define pthread_cond_destroy(...) zap_pthread_cond_destroy(__VA_ARGS__) #define pthread_cond_signal(...) zap_pthread_cond_signal(__VA_ARGS__) #define pthread_cond_broadcast(...) zap_pthread_cond_broadcast(__VA_ARGS__) #define pthread_cond_wait(...) zap_pthread_cond_wait(__VA_ARGS__) #define pthread_cond_timedwait(...) zap_pthread_cond_timedwait(__VA_ARGS__) #define pthread_condattr_init(...) zap_pthread_condattr_init(__VA_ARGS__) #define pthread_condattr_destroy(...) zap_pthread_condattr_destroy(__VA_ARGS__) /* Semaphore */ #define sem_destroy(...) zap_sem_destroy(__VA_ARGS__) #define sem_getvalue(...) zap_sem_getvalue(__VA_ARGS__) #define sem_init(...) zap_sem_init(__VA_ARGS__) #define sem_post(...) zap_sem_post(__VA_ARGS__) #define sem_timedwait(...) zap_sem_timedwait(__VA_ARGS__) #define sem_trywait(...) zap_sem_trywait(__VA_ARGS__) #define sem_wait(...) zap_sem_wait(__VA_ARGS__) /* Mutex */ #define pthread_mutex_init(...) zap_pthread_mutex_init(__VA_ARGS__) #define pthread_mutex_destroy(...) zap_pthread_mutex_destroy(__VA_ARGS__) #define pthread_mutex_lock(...) zap_pthread_mutex_lock(__VA_ARGS__) #define pthread_mutex_timedlock(...) zap_pthread_mutex_timedlock(__VA_ARGS__) #define pthread_mutex_trylock(...) zap_pthread_mutex_trylock(__VA_ARGS__) #define pthread_mutex_unlock(...) zap_pthread_mutex_unlock(__VA_ARGS__) #define pthread_mutexattr_init(...) zap_pthread_mutexattr_init(__VA_ARGS__) #define pthread_mutexattr_destroy(...) \ zap_pthread_mutexattr_destroy(__VA_ARGS__) /* Barrier */ #define pthread_barrier_wait(...) zap_pthread_barrier_wait(__VA_ARGS__) #define pthread_barrier_init(...) zap_pthread_barrier_init(__VA_ARGS__) #define pthread_barrier_destroy(...) zap_pthread_barrier_destroy(__VA_ARGS__) #define pthread_barrierattr_init(...) zap_pthread_barrierattr_init(__VA_ARGS__) #define pthread_barrierattr_destroy(...) \ zap_pthread_barrierattr_destroy(__VA_ARGS__) /* Pthread */ #define pthread_attr_init(...) zap_pthread_attr_init(__VA_ARGS__) #define pthread_attr_destroy(...) zap_pthread_attr_destroy(__VA_ARGS__) #define pthread_attr_getschedparam(...) \ zap_pthread_attr_getschedparam(__VA_ARGS__) #define pthread_attr_getstack(...) zap_pthread_attr_getstack(__VA_ARGS__) #define pthread_attr_getstacksize(...) \ zap_pthread_attr_getstacksize(__VA_ARGS__) #define pthread_equal(...) zap_pthread_equal(__VA_ARGS__) #define pthread_self(...) zap_pthread_self(__VA_ARGS__) #define pthread_getschedparam(...) zap_pthread_getschedparam(__VA_ARGS__) #define pthread_once(...) zap_pthread_once(__VA_ARGS__) #define pthread_exit(...) zap_pthread_exit(__VA_ARGS__) #define pthread_join(...) zap_pthread_join(__VA_ARGS__) #define pthread_detach(...) zap_pthread_detach(__VA_ARGS__) #define pthread_cancel(...) zap_pthread_cancel(__VA_ARGS__) #define pthread_attr_getdetachstate(...) \ zap_pthread_attr_getdetachstate(__VA_ARGS__) #define pthread_attr_setdetachstate(...) \ zap_pthread_attr_setdetachstate(__VA_ARGS__) #define pthread_attr_setschedparam(...) \ zap_pthread_attr_setschedparam(__VA_ARGS__) #define pthread_attr_setschedpolicy(...)\ zap_pthread_attr_setschedpolicy(__VA_ARGS__) #define pthread_attr_getschedpolicy(...)\ zap_pthread_attr_getschedpolicy(__VA_ARGS__) #define pthread_attr_setstack(...) zap_pthread_attr_setstack(__VA_ARGS__) #define pthread_create(...) zap_pthread_create(__VA_ARGS__) #define pthread_setcancelstate(...) zap_pthread_setcancelstate(__VA_ARGS__) #define pthread_setschedparam(...) zap_pthread_setschedparam(__VA_ARGS__) /* Scheduler */ #define sched_yield(...) zap_sched_yield(__VA_ARGS__) #define sched_get_priority_min(...) zap_sched_get_priority_min(__VA_ARGS__) #define sched_get_priority_max(...) zap_sched_get_priority_max(__VA_ARGS__) #define sched_getparam(...) zap_sched_getparam(__VA_ARGS__) #define sched_getscheduler(...) zap_sched_getscheduler(__VA_ARGS__) /* Sleep */ #define sleep(...) zap_sleep(__VA_ARGS__) #define usleep(...) zap_usleep(__VA_ARGS__) /* Clock */ #define clock_gettime(...) zap_clock_gettime(__VA_ARGS__) #define clock_settime(...) zap_clock_settime(__VA_ARGS__) #define gettimeofday(...) zap_clock_gettimeofday(__VA_ARGS__) /* Timer */ #define timer_create(...) zap_timer_create(__VA_ARGS__) #define timer_delete(...) zap_timer_delete(__VA_ARGS__) #define timer_gettime(...) zap_timer_gettime(__VA_ARGS__) #define timer_settime(...) zap_timer_settime(__VA_ARGS__) /* Read/Write lock */ #define pthread_rwlock_destroy(...) zap_pthread_rwlock_destroy(__VA_ARGS__) #define pthread_rwlock_init(...) zap_pthread_rwlock_init(__VA_ARGS__) #define pthread_rwlock_rdlock(...) zap_pthread_rwlock_rdlock(__VA_ARGS__) #define pthread_rwlock_unlock(...) zap_pthread_rwlock_unlock(__VA_ARGS__) #define pthread_rwlock_wrlock(...) zap_pthread_rwlock_wrlock(__VA_ARGS__) #define pthread_rwlockattr_init(...) zap_pthread_rwlockattr_init(__VA_ARGS__) #define pthread_rwlock_timedrdlock(...)\ zap_pthread_rwlock_timedrdlock(__VA_ARGS__) #define pthread_rwlock_timedwrlock(...)\ zap_pthread_rwlock_timedwrlock(__VA_ARGS__) #define pthread_rwlock_tryrdlock(...)\ zap_pthread_rwlock_tryrdlock(__VA_ARGS__) #define pthread_rwlock_trywrlock(...)\ zap_pthread_rwlock_trywrlock(__VA_ARGS__) #define pthread_rwlockattr_destroy(...)\ zap_pthread_rwlockattr_destroy(__VA_ARGS__) /* Pthread key */ #define pthread_key_create(...) zap_pthread_key_create(__VA_ARGS__) #define pthread_key_delete(...) zap_pthread_key_delete(__VA_ARGS__) #define pthread_setspecific(...) zap_pthread_setspecific(__VA_ARGS__) #define pthread_getspecific(...) zap_pthread_getspecific(__VA_ARGS__) /* message queue */ #define mq_open(...) zap_mq_open(__VA_ARGS__) #define mq_close(...) zap_mq_close(__VA_ARGS__) #define mq_unlink(...) zap_mq_unlink(__VA_ARGS__) #define mq_getattr(...) zap_mq_getattr(__VA_ARGS__) #define mq_receive(...) zap_mq_receive(__VA_ARGS__) #define mq_send(...) zap_mq_send(__VA_ARGS__) #define mq_setattr(...) zap_mq_setattr(__VA_ARGS__) #define mq_timedreceive(...) zap_mq_timedreceive(__VA_ARGS__) #define mq_timedsend(...) zap_mq_timedsend(__VA_ARGS__) /* File system */ #define open zap_open #define close zap_close #define write zap_write #define read zap_read #define lseek zap_lseek #define opendir zap_opendir #define closedir zap_closedir #define readdir zap_readdir #define rename zap_rename #define unlink zap_unlink #define stat zap_stat #define mkdir zap_mkdir /* eventfd */ #define eventfd zap_eventfd #define eventfd_read zap_eventfd_read #define eventfd_write zap_eventfd_write #endif /* CONFIG_POSIX_API */ #endif /* ZEPHYR_ARCH_POSIX_INCLUDE_POSIX_CHEATS_H_ */ ```
/content/code_sandbox/arch/posix/include/posix_cheats.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,316
```c /* * */ #include <zephyr/kernel.h> #include <ksched.h> void z_thread_entry_wrapper(k_thread_entry_t thread, void *arg1, void *arg2, void *arg3); /* * Frame used by _thread_entry_wrapper * * Allocate a 16 register window save area at bottom of the stack. This is * required if we need to taken a trap (interrupt) in the thread entry wrapper. */ struct init_stack_frame { uint32_t window_save_area[16]; }; #if defined(CONFIG_FPU_SHARING) #define USER_FP_MASK K_FP_REGS #else #define USER_FP_MASK 0 #endif void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, char *stack_ptr, k_thread_entry_t entry, void *p1, void *p2, void *p3) { struct init_stack_frame *iframe; /* Initial stack frame data, stored at base of the stack */ iframe = Z_STACK_PTR_TO_FRAME(struct init_stack_frame, stack_ptr); thread->callee_saved.i0 = (uint32_t) entry; thread->callee_saved.i1 = (uint32_t) p1; thread->callee_saved.i2 = (uint32_t) p2; thread->callee_saved.i3 = (uint32_t) p3; thread->callee_saved.i6 = 0; /* frame pointer */ thread->callee_saved.o6 = (uint32_t) iframe; /* stack pointer */ thread->callee_saved.o7 = (uint32_t) z_thread_entry_wrapper - 8; thread->callee_saved.psr = PSR_S | PSR_PS | PSR_ET; if (IS_ENABLED(CONFIG_FPU_SHARING)) { /* Selected threads can use the FPU */ if (thread->base.user_options & USER_FP_MASK) { thread->callee_saved.psr |= PSR_EF; } } else if (IS_ENABLED(CONFIG_FPU)) { /* Any thread can use the FPU */ thread->callee_saved.psr |= PSR_EF; } thread->switch_handle = thread; } void *z_arch_get_next_switch_handle(struct k_thread **old_thread) { *old_thread = _current; return z_get_next_switch_handle(*old_thread); } #if defined(CONFIG_FPU_SHARING) int arch_float_disable(struct k_thread *thread) { return -ENOTSUP; } int arch_float_enable(struct k_thread *thread, unsigned int options) { return -ENOTSUP; } #endif /* CONFIG_FPU_SHARING */ ```
/content/code_sandbox/arch/sparc/core/thread.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
546
```c /* * */ #include <zephyr/kernel.h> #include <zephyr/kernel_structs.h> #include <zephyr/irq.h> #include <zephyr/irq_offload.h> volatile irq_offload_routine_t _offload_routine; static volatile const void *offload_param; void z_irq_do_offload(void) { irq_offload_routine_t tmp; if (!_offload_routine) { return; } tmp = _offload_routine; _offload_routine = NULL; tmp((const void *)offload_param); } void arch_irq_offload(irq_offload_routine_t routine, const void *parameter) { unsigned int key; key = irq_lock(); _offload_routine = routine; offload_param = parameter; /* Generate irq offload trap */ __asm__ volatile ("ta 13"); irq_unlock(key); } ```
/content/code_sandbox/arch/sparc/core/irq_offload.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
189