text stringlengths 9 39.2M | dir stringlengths 25 226 | lang stringclasses 163 values | created_date timestamp[s] | updated_date timestamp[s] | repo_name stringclasses 751 values | repo_full_name stringclasses 752 values | star int64 1.01k 183k | len_tokens int64 1 18.5M |
|---|---|---|---|---|---|---|---|---|
```objective-c
/* arcv2_irq_unit.h - ARCv2 Interrupt Unit device driver */
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARC_V2_ARCV2_IRQ_UNIT_H_
#define ZEPHYR_INCLUDE_ARCH_ARC_V2_ARCV2_IRQ_UNIT_H_
#ifdef __cplusplus
extern "C" {
#endif
/* configuration flags for interrupt unit */
#define _ARC_V2_INT_PRIO_MASK 0xf
#define _ARC_V2_INT_DISABLE 0
#define _ARC_V2_INT_ENABLE 1
#define _ARC_V2_INT_LEVEL 0
#define _ARC_V2_INT_PULSE 1
#ifndef _ASMLANGUAGE
/*
* NOTE:
*
* All APIs provided by this file are protected with INTERRUPTS LOCKED. The
* APIs themselves are writing the IRQ_SELECT, selecting which IRQ's registers
* it wants to write to, then write to them: THIS IS NOT AN ATOMIC OPERATION.
*
* Locking the interrupts inside of the APIs are some kind of self-protection
* to guarantee the correctness of operation if the callers don't lock
* the interrupt.
*
*/
/**
* @brief Enable/disable interrupt
*
* Enables or disables the specified interrupt
* @param irq IRQ line number
* @param enable 1 to enable, 0 to disable
*/
static ALWAYS_INLINE
void z_arc_v2_irq_unit_irq_enable_set(
int irq,
unsigned char enable
)
{
unsigned int key = arch_irq_lock();
z_arc_v2_aux_reg_write(_ARC_V2_IRQ_SELECT, irq);
z_arc_v2_aux_reg_write(_ARC_V2_IRQ_ENABLE, enable);
arch_irq_unlock(key);
}
/**
* @brief Enable interrupt
*
* Enables the specified interrupt
*/
static ALWAYS_INLINE
void z_arc_v2_irq_unit_int_enable(int irq)
{
z_arc_v2_irq_unit_irq_enable_set(irq, _ARC_V2_INT_ENABLE);
}
/**
* @brief Disable interrupt
*
* Disables the specified interrupt
*/
static ALWAYS_INLINE
void z_arc_v2_irq_unit_int_disable(int irq)
{
z_arc_v2_irq_unit_irq_enable_set(irq, _ARC_V2_INT_DISABLE);
}
/**
* @brief Poll the enable status of interrupt
*
* Polls the enable status of the specified interrupt
*
* @return 1 enabled, 0 disabled
*/
static ALWAYS_INLINE
bool z_arc_v2_irq_unit_int_enabled(int irq)
{
bool ret;
unsigned int key = arch_irq_lock();
z_arc_v2_aux_reg_write(_ARC_V2_IRQ_SELECT, irq);
ret = z_arc_v2_aux_reg_read(_ARC_V2_IRQ_ENABLE) & 0x1;
arch_irq_unlock(key);
return ret;
}
/**
* @brief Set interrupt priority
*
* Set the priority of the specified interrupt
*/
static ALWAYS_INLINE
void z_arc_v2_irq_unit_prio_set(int irq, unsigned char prio)
{
unsigned int key = arch_irq_lock();
z_arc_v2_aux_reg_write(_ARC_V2_IRQ_SELECT, irq);
#if defined(CONFIG_ARC_SECURE_FIRMWARE)
z_arc_v2_aux_reg_write(_ARC_V2_IRQ_PRIORITY,
(z_arc_v2_aux_reg_read(_ARC_V2_IRQ_PRIORITY) & (~_ARC_V2_INT_PRIO_MASK))
| prio);
#else
z_arc_v2_aux_reg_write(_ARC_V2_IRQ_PRIORITY, prio);
#endif
arch_irq_unlock(key);
}
#if defined(CONFIG_ARC_SECURE_FIRMWARE)
/**
* @brief Configure the secure state of interrupt
*
* Configure the secure state of the specified interrupt
*/
static ALWAYS_INLINE
void z_arc_v2_irq_uinit_secure_set(int irq, bool secure)
{
unsigned int key = arch_irq_lock();
z_arc_v2_aux_reg_write(_ARC_V2_IRQ_SELECT, irq);
if (secure) {
z_arc_v2_aux_reg_write(_ARC_V2_IRQ_PRIORITY,
z_arc_v2_aux_reg_read(_ARC_V2_IRQ_PRIORITY) |
_ARC_V2_IRQ_PRIORITY_SECURE);
} else {
z_arc_v2_aux_reg_write(_ARC_V2_IRQ_PRIORITY,
z_arc_v2_aux_reg_read(_ARC_V2_IRQ_PRIORITY) &
_ARC_V2_INT_PRIO_MASK);
}
arch_irq_unlock(key);
}
#endif
/**
* @brief Set interrupt sensitivity
*
* Set the sensitivity of the specified interrupt to either
* _ARC_V2_INT_LEVEL or _ARC_V2_INT_PULSE. Level interrupts will remain
* asserted until the interrupt handler clears the interrupt at the peripheral.
* Pulse interrupts self-clear as the interrupt handler is entered.
*/
static ALWAYS_INLINE
void z_arc_v2_irq_unit_sensitivity_set(int irq, int s)
{
unsigned int key = arch_irq_lock();
z_arc_v2_aux_reg_write(_ARC_V2_IRQ_SELECT, irq);
z_arc_v2_aux_reg_write(_ARC_V2_IRQ_TRIGGER, s);
arch_irq_unlock(key);
}
/*
* @brief Check whether processor in interrupt/exception state
*
* Check whether processor in interrupt/exception state
*
* @return 1 in interrupt/exception state; 0 not in
*/
static ALWAYS_INLINE
bool z_arc_v2_irq_unit_is_in_isr(void)
{
uint32_t act = z_arc_v2_aux_reg_read(_ARC_V2_AUX_IRQ_ACT);
/* in exception ?*/
if (z_arc_v2_aux_reg_read(_ARC_V2_STATUS32) & _ARC_V2_STATUS32_AE) {
return true;
}
return ((act & 0xffff) != 0U);
}
/**
* @brief Sets an IRQ line to level/pulse trigger
*
* Sets the IRQ line <irq> to trigger an interrupt based on the level or the
* edge of the signal. Valid values for <trigger> are _ARC_V2_INT_LEVEL and
* _ARC_V2_INT_PULSE.
*/
static ALWAYS_INLINE
void z_arc_v2_irq_unit_trigger_set(int irq, unsigned int trigger)
{
unsigned int key = arch_irq_lock();
z_arc_v2_aux_reg_write(_ARC_V2_IRQ_SELECT, irq);
z_arc_v2_aux_reg_write(_ARC_V2_IRQ_TRIGGER, trigger);
arch_irq_unlock(key);
}
/**
* @brief Returns an IRQ line trigger type
*
* Gets the IRQ line <irq> trigger type.
* Valid values for <trigger> are _ARC_V2_INT_LEVEL and _ARC_V2_INT_PULSE.
*
* @return trigger state
*/
static ALWAYS_INLINE
unsigned int z_arc_v2_irq_unit_trigger_get(int irq)
{
unsigned int ret;
unsigned int key = arch_irq_lock();
z_arc_v2_aux_reg_write(_ARC_V2_IRQ_SELECT, irq);
ret = z_arc_v2_aux_reg_read(_ARC_V2_IRQ_TRIGGER);
arch_irq_unlock(key);
return ret;
}
/**
* @brief Send EOI signal to interrupt unit
*
* This routine sends an EOI (End Of Interrupt) signal to the interrupt unit
* to clear a pulse-triggered interrupt.
*/
static ALWAYS_INLINE
void z_arc_v2_irq_unit_int_eoi(int irq)
{
unsigned int key = arch_irq_lock();
z_arc_v2_aux_reg_write(_ARC_V2_IRQ_SELECT, irq);
z_arc_v2_aux_reg_write(_ARC_V2_IRQ_PULSE_CANCEL, 1);
arch_irq_unlock(key);
}
#endif /* _ASMLANGUAGE */
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_ARCH_ARC_V2_ARCV2_IRQ_UNIT_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arc/v2/arcv2_irq_unit.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,553 |
```linker script
/*
*
*/
#if DT_NODE_HAS_PROP(DT_INST(0, arc_xccm), reg) && \
(DT_REG_SIZE(DT_INST(0, arc_xccm)) > 0)
#define XCCM_START DT_REG_ADDR(DT_INST(0, arc_xccm))
#define XCCM_SIZE DT_REG_SIZE(DT_INST(0, arc_xccm))
#endif
#if DT_NODE_HAS_PROP(DT_INST(0, arc_yccm), reg) && \
(DT_REG_SIZE(DT_INST(0, arc_yccm)) > 0)
#define YCCM_START DT_REG_ADDR(DT_INST(0, arc_yccm))
#define YCCM_SIZE DT_REG_SIZE(DT_INST(0, arc_yccm))
#endif
``` | /content/code_sandbox/include/zephyr/arch/arc/v2/xy_mem.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 172 |
```objective-c
/*
*
*/
/**
* @file
* @brief ARCv2 public interrupt handling
*
* ARCv2 kernel interrupt handling interface. Included by arc/arch.h.
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARC_V2_IRQ_H_
#define ZEPHYR_INCLUDE_ARCH_ARC_V2_IRQ_H_
#include <zephyr/arch/arc/v2/aux_regs.h>
#include <zephyr/toolchain/common.h>
#include <zephyr/irq.h>
#include <zephyr/sys/util.h>
#include <zephyr/sw_isr_table.h>
#ifdef __cplusplus
extern "C" {
#endif
#define ARC_MP_PRIMARY_CPU_ID 0
#ifndef _ASMLANGUAGE
extern void z_arc_firq_stack_set(void);
extern void arch_irq_enable(unsigned int irq);
extern void arch_irq_disable(unsigned int irq);
extern int arch_irq_is_enabled(unsigned int irq);
#ifdef CONFIG_TRACING_ISR
extern void sys_trace_isr_enter(void);
extern void sys_trace_isr_exit(void);
#endif
extern void z_irq_priority_set(unsigned int irq, unsigned int prio,
uint32_t flags);
/* Z_ISR_DECLARE will populate the .intList section with the interrupt's
* parameters, which will then be used by gen_irq_tables.py to create
* the vector table and the software ISR table. This is all done at
* build-time.
*
* We additionally set the priority in the interrupt controller at
* runtime.
*/
#define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
{ \
Z_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \
z_irq_priority_set(irq_p, priority_p, flags_p); \
}
/**
* Configure a 'direct' static interrupt.
*
* When firq has no separate stack(CONFIG_ARC_FIRQ_STACK=N), it's not safe
* to call C ISR handlers because sp will be switched to bank1's sp which
* is undefined value.
* So for this case, the priority cannot be set to 0 but next level 1
*
* When firq has separate stack (CONFIG_ARC_FIRQ_STACK=y) but at the same
* time stack checking is enabled (CONFIG_ARC_STACK_CHECKING=y)
* the stack checking can raise stack check exception as sp is switched to
* firq's stack (bank1's sp). So for this case, the priority cannot be set
* to 0 but next level 1.
*
* Note that for the above cases, if application still wants to use firq by
* setting priority to 0. Application can call z_irq_priority_set again.
* Then it's left to application to handle the details of firq
*
* See include/irq.h for details.
* All arguments must be computable at build time.
*/
#define ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \
{ \
Z_ISR_DECLARE_DIRECT(irq_p, ISR_FLAG_DIRECT, isr_p); \
BUILD_ASSERT(priority_p || !IS_ENABLED(CONFIG_ARC_FIRQ) || \
(IS_ENABLED(CONFIG_ARC_FIRQ_STACK) && \
!IS_ENABLED(CONFIG_ARC_STACK_CHECKING)), \
"irq priority cannot be set to 0 when CONFIG_ARC_FIRQ_STACK" \
"is not configured or CONFIG_ARC_FIRQ_STACK " \
"and CONFIG_ARC_STACK_CHECKING are configured together"); \
z_irq_priority_set(irq_p, priority_p, flags_p); \
}
static inline void arch_isr_direct_header(void)
{
#ifdef CONFIG_TRACING_ISR
sys_trace_isr_enter();
#endif
}
static inline void arch_isr_direct_footer(int maybe_swap)
{
/* clear SW generated interrupt */
if (z_arc_v2_aux_reg_read(_ARC_V2_ICAUSE) ==
z_arc_v2_aux_reg_read(_ARC_V2_AUX_IRQ_HINT)) {
z_arc_v2_aux_reg_write(_ARC_V2_AUX_IRQ_HINT, 0);
}
#ifdef CONFIG_TRACING_ISR
sys_trace_isr_exit();
#endif
}
#define ARCH_ISR_DIRECT_HEADER() arch_isr_direct_header()
extern void arch_isr_direct_header(void);
#define ARCH_ISR_DIRECT_FOOTER(swap) arch_isr_direct_footer(swap)
#if defined(__CCAC__)
#define _ARC_DIRECT_ISR_FUNC_ATTRIBUTE __interrupt__
#else
#define _ARC_DIRECT_ISR_FUNC_ATTRIBUTE interrupt("ilink")
#endif
/*
* Scheduling can not be done in direct isr. If required, please use kernel
* aware interrupt handling
*/
#define ARCH_ISR_DIRECT_DECLARE(name) \
static inline int name##_body(void); \
__attribute__ ((_ARC_DIRECT_ISR_FUNC_ATTRIBUTE))void name(void) \
{ \
ISR_DIRECT_HEADER(); \
name##_body(); \
ISR_DIRECT_FOOTER(0); \
} \
static inline int name##_body(void)
/**
*
* @brief Disable all interrupts on the local CPU
*
* This routine disables interrupts. It can be called from either interrupt or
* thread level. This routine returns an architecture-dependent
* lock-out key representing the "interrupt disable state" prior to the call;
* this key can be passed to irq_unlock() to re-enable interrupts.
*
* The lock-out key should only be used as the argument to the
* irq_unlock() API. It should never be used to manually re-enable
* interrupts or to inspect or manipulate the contents of the source register.
*
* This function can be called recursively: it will return a key to return the
* state of interrupt locking to the previous level.
*
* WARNINGS
* Invoking a kernel routine with interrupts locked may result in
* interrupts being re-enabled for an unspecified period of time. If the
* called routine blocks, interrupts will be re-enabled while another
* thread executes, or while the system is idle.
*
* The "interrupt disable state" is an attribute of a thread. Thus, if a
* thread disables interrupts and subsequently invokes a kernel
* routine that causes the calling thread to block, the interrupt
* disable state will be restored when the thread is later rescheduled
* for execution.
*
* @return An architecture-dependent lock-out key representing the
* "interrupt disable state" prior to the call.
*/
static ALWAYS_INLINE unsigned int arch_irq_lock(void)
{
unsigned int key;
__asm__ volatile("clri %0" : "=r"(key):: "memory");
return key;
}
static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
{
__asm__ volatile("seti %0" : : "ir"(key) : "memory");
}
static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
{
/* ARC irq lock uses instruction "clri r0",
* r0 == {26d0, 1b1, STATUS32.IE, STATUS32.E[3:0] }
* bit4 is used to record IE (Interrupt Enable) bit
*/
return (key & 0x10) == 0x10;
}
#endif /* _ASMLANGUAGE */
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_ARCH_ARC_V2_IRQ_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arc/v2/irq.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,491 |
```objective-c
/* asm_inline_gcc.h - ARC inline assembler and macros for public functions */
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARC_V2_ASM_INLINE_GCC_H_
#define ZEPHYR_INCLUDE_ARCH_ARC_V2_ASM_INLINE_GCC_H_
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
#include <stddef.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief read timestamp register (CPU frequency)
*/
extern uint64_t z_tsc_read(void);
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_ARC_V2_ASM_INLINE_GCC_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arc/v2/asm_inline_gcc.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 140 |
```objective-c
/*
*
*/
/**
* @file
* @brief ARCv2 public kernel miscellaneous
*
* ARC-specific kernel miscellaneous interface. Included by arc/arch.h.
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARC_V2_MISC_H_
#define ZEPHYR_INCLUDE_ARCH_ARC_V2_MISC_H_
#ifdef __cplusplus
extern "C" {
#endif
#ifndef _ASMLANGUAGE
extern unsigned int z_arc_cpu_sleep_mode;
extern uint32_t sys_clock_cycle_get_32(void);
static inline uint32_t arch_k_cycle_get_32(void)
{
return sys_clock_cycle_get_32();
}
extern uint64_t sys_clock_cycle_get_64(void);
static inline uint64_t arch_k_cycle_get_64(void)
{
return sys_clock_cycle_get_64();
}
#endif
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_ARCH_ARC_V2_MISC_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arc/v2/misc.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 183 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARC_V2_SYS_IO_H_
#define ZEPHYR_INCLUDE_ARCH_ARC_V2_SYS_IO_H_
#ifndef _ASMLANGUAGE
#include <zephyr/toolchain.h>
#include <zephyr/sys/sys_io.h>
#include <zephyr/arch/arc/v2/aux_regs.h>
#include <zephyr/types.h>
#include <stddef.h>
#ifdef __cplusplus
extern "C" {
#endif
/* Implementation of sys_io.h's documented functions */
static ALWAYS_INLINE
void sys_out8(uint8_t data, io_port_t port)
{
z_arc_v2_aux_reg_write(port, data);
}
static ALWAYS_INLINE
uint8_t sys_in8(io_port_t port)
{
return (uint8_t)(z_arc_v2_aux_reg_read(port) & 0x000000ff);
}
static ALWAYS_INLINE
void sys_out16(uint16_t data, io_port_t port)
{
z_arc_v2_aux_reg_write(port, data);
}
static ALWAYS_INLINE
uint16_t sys_in16(io_port_t port)
{
return (uint16_t)(z_arc_v2_aux_reg_read(port) & 0x0000ffff);
}
static ALWAYS_INLINE
void sys_out32(uint32_t data, io_port_t port)
{
z_arc_v2_aux_reg_write(port, data);
}
static ALWAYS_INLINE
uint32_t sys_in32(io_port_t port)
{
return z_arc_v2_aux_reg_read(port);
}
static ALWAYS_INLINE
void sys_io_set_bit(io_port_t port, unsigned int bit)
{
uint32_t reg = 0;
__asm__ volatile("lr %1, [%0]\n"
"bset %1, %1, %2\n"
"sr %1, [%0];\n\t"
:
: "ir" (port),
"r" (reg), "ir" (bit)
: "memory", "cc");
}
static ALWAYS_INLINE
void sys_io_clear_bit(io_port_t port, unsigned int bit)
{
uint32_t reg = 0;
__asm__ volatile("lr %1, [%0]\n"
"bclr %1, %1, %2\n"
"sr %1, [%0];\n\t"
:
: "ir" (port),
"r" (reg), "ir" (bit)
: "memory", "cc");
}
static ALWAYS_INLINE
int sys_io_test_bit(io_port_t port, unsigned int bit)
{
uint32_t status = _ARC_V2_STATUS32;
uint32_t reg = 0;
uint32_t ret;
__asm__ volatile("lr %2, [%1]\n"
"btst %2, %3\n"
"lr %0, [%4];\n\t"
: "=r" (ret)
: "ir" (port),
"r" (reg), "ir" (bit), "i" (status)
: "memory", "cc");
return !(ret & _ARC_V2_STATUS32_Z);
}
static ALWAYS_INLINE
int sys_io_test_and_set_bit(io_port_t port, unsigned int bit)
{
int ret;
ret = sys_io_test_bit(port, bit);
sys_io_set_bit(port, bit);
return ret;
}
static ALWAYS_INLINE
int sys_io_test_and_clear_bit(io_port_t port, unsigned int bit)
{
int ret;
ret = sys_io_test_bit(port, bit);
sys_io_clear_bit(port, bit);
return ret;
}
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_ARC_V2_SYS_IO_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arc/v2/sys_io.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 781 |
```objective-c
/* asm_inline.h - ARC inline assembler and macros for public functions */
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARC_V2_ASM_INLINE_H_
#define ZEPHYR_INCLUDE_ARCH_ARC_V2_ASM_INLINE_H_
/*
* The file must not be included directly
* Include kernel.h instead
*/
#if defined(__GNUC__)
#include <zephyr/arch/arc/v2/asm_inline_gcc.h>
#else
#error "you need to provide an asm_inline.h for your compiler"
#endif
#endif /* ZEPHYR_INCLUDE_ARCH_ARC_V2_ASM_INLINE_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arc/v2/asm_inline.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 122 |
```objective-c
/*
*
*/
/**
* @file
* @brief ARCv2 public error handling
*
* ARC-specific kernel error handling interface. Included by arc/arch.h.
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARC_V2_ERROR_H_
#define ZEPHYR_INCLUDE_ARCH_ARC_V2_ERROR_H_
#include <zephyr/arch/arc/syscall.h>
#include <zephyr/arch/arc/v2/exception.h>
#include <stdbool.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* use trap_s to raise a SW exception
*/
#define ARCH_EXCEPT(reason_p) do { \
__asm__ volatile ( \
"mov %%r0, %[reason]\n\t" \
"trap_s %[id]\n\t" \
: \
: [reason] "i" (reason_p), \
[id] "i" (_TRAP_S_CALL_RUNTIME_EXCEPT) \
: "memory"); \
CODE_UNREACHABLE; /* LCOV_EXCL_LINE */ \
} while (false)
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_ARCH_ARC_V2_ERROR_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arc/v2/error.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 250 |
```objective-c
/*
*
*/
/**
* @file
* @brief ARCv2 auxiliary registers definitions
*
*
* Definitions for auxiliary registers.
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARC_V2_AUX_REGS_H_
#define ZEPHYR_INCLUDE_ARCH_ARC_V2_AUX_REGS_H_
#include <zephyr/sys/util_macro.h>
#define _ARC_V2_LP_START 0x002
#define _ARC_V2_LP_END 0x003
#define _ARC_V2_IDENTITY 0x004
#define _ARC_V2_SEC_STAT 0x09
#define _ARC_V2_STATUS32 0x00a
#define _ARC_V2_STATUS32_P0 0x00b
#define _ARC_V2_USER_SP 0x00d
#define _ARC_V2_AUX_IRQ_CTRL 0x00e
#define _ARC_V2_IC_IVIC 0x010
#define _ARC_V2_IC_CTRL 0x011
#define _ARC_V2_IC_LIL 0x013
#define _ARC_V2_IC_IVIL 0x019
#define _ARC_V2_IC_DATA 0x01d
#define _ARC_V2_TMR0_COUNT 0x021
#define _ARC_V2_TMR0_CONTROL 0x022
#define _ARC_V2_TMR0_LIMIT 0x023
#define _ARC_V2_IRQ_VECT_BASE 0x025
#define _ARC_V2_IRQ_VECT_BASE_S 0x26
#define _ARC_V2_KERNEL_SP 0x38
#define _ARC_V2_SEC_U_SP 0x39
#define _ARC_V2_SEC_K_SP 0x3a
#define _ARC_V2_AUX_IRQ_ACT 0x043
#define _ARC_V2_DC_IVDC 0x047
#define _ARC_V2_DC_CTRL 0x048
#define _ARC_V2_DC_LDL 0x049
#define _ARC_V2_DC_IVDL 0x04a
#define _ARC_V2_DC_FLSH 0x04b
#define _ARC_V2_DC_FLDL 0x04c
#define _ARC_V2_EA_BUILD 0x065
#define _ARC_V2_VECBASE_AC_BUILD 0x068
#define _ARC_V2_FP_BUILD 0x06b
#define _ARC_V2_DPFP_BUILD 0x06c
#define _ARC_V2_MPU_BUILD 0x06d
#define _ARC_V2_RF_BUILD 0x06e
#define _ARC_V2_MMU_BUILD 0x06f
#define _ARC_V2_VECBASE_BUILD 0x071
#define _ARC_V2_D_CACHE_BUILD 0x072
#define _ARC_V2_DCCM_BUILD 0x074
#define _ARC_V2_TIMER_BUILD 0x075
#define _ARC_V2_AP_BUILD 0x076
#define _ARC_V2_I_CACHE_BUILD 0x077
#define _ARC_V2_ICCM_BUILD 0x078
#define _ARC_V2_MULTIPLY_BUILD 0x07b
#define _ARC_V2_SWAP_BUILD 0x07c
#define _ARC_V2_NORM_BUILD 0x07d
#define _ARC_V2_MINMAX_BUILD 0x07e
#define _ARC_V2_BARREL_BUILD 0x07f
#define _ARC_V2_ISA_CONFIG 0x0c1
#define _ARC_V2_SEP_BUILD 0x0c7
#define _ARC_V2_LPB_BUILD 0x0e9
#define _ARC_V2_LPB_CTRL 0x488
#define _ARC_V2_IRQ_BUILD 0x0f3
#define _ARC_V2_PCT_BUILD 0x0f5
#define _ARC_V2_CC_BUILD 0x0f6
#define _ARC_V2_TMR1_COUNT 0x100
#define _ARC_V2_TMR1_CONTROL 0x101
#define _ARC_V2_TMR1_LIMIT 0x102
#define _ARC_V2_S_TMR0_COUNT 0x106
#define _ARC_V2_S_TMR0_CONTROL 0x107
#define _ARC_V2_S_TMR0_LIMIT 0x108
#define _ARC_V2_S_TMR1_COUNT 0x109
#define _ARC_V2_S_TMR1_CONTROL 0x10a
#define _ARC_V2_S_TMR1_LIMIT 0x10b
#define _ARC_V2_IRQ_PRIO_PEND 0x200
#define _ARC_V2_AUX_IRQ_HINT 0x201
#define _ARC_V2_IRQ_PRIORITY 0x206
#define _ARC_V2_USTACK_TOP 0x260
#define _ARC_V2_USTACK_BASE 0x261
#define _ARC_V2_S_USTACK_TOP 0x262
#define _ARC_V2_S_USTACK_BASE 0x263
#define _ARC_V2_KSTACK_TOP 0x264
#define _ARC_V2_KSTACK_BASE 0x265
#define _ARC_V2_S_KSTACK_TOP 0x266
#define _ARC_V2_S_KSTACK_BASE 0x267
#define _ARC_V2_NSC_TABLE_TOP 0x268
#define _ARC_V2_NSC_TABLE_BASE 0x269
#define _ARC_V2_JLI_BASE 0x290
#define _ARC_V2_LDI_BASE 0x291
#define _ARC_V2_EI_BASE 0x292
#define _ARC_V2_ERET 0x400
#define _ARC_V2_ERSTATUS 0x402
#define _ARC_V2_ECR 0x403
#define _ARC_V2_EFA 0x404
#define _ARC_V2_ERSEC_STAT 0x406
#define _ARC_V2_ICAUSE 0x40a
#define _ARC_V2_IRQ_SELECT 0x40b
#define _ARC_V2_IRQ_ENABLE 0x40c
#define _ARC_V2_IRQ_TRIGGER 0x40d
#define _ARC_V2_IRQ_STATUS 0x40f
#define _ARC_V2_IRQ_PULSE_CANCEL 0x415
#define _ARC_V2_IRQ_PENDING 0x416
#define _ARC_V2_FPU_CTRL 0x300
#define _ARC_V2_FPU_STATUS 0x301
#define _ARC_V2_FPU_DPFP1L 0x302
#define _ARC_V2_FPU_DPFP1H 0x303
#define _ARC_V2_FPU_DPFP2L 0x304
#define _ARC_V2_FPU_DPFP2H 0x305
#define _ARC_V2_MPU_EN 0x409
#define _ARC_V2_MPU_RDB0 0x422
#define _ARC_V2_MPU_RDP0 0x423
#define _ARC_V2_MPU_INDEX 0x448
#define _ARC_V2_MPU_RSTART 0x449
#define _ARC_V2_MPU_REND 0x44A
#define _ARC_V2_MPU_RPER 0x44B
#define _ARC_V2_MPU_PROBE 0x44C
#define _ARC_V2_ACC0_GHI 0x583
#define _ARC_V2_ACC0_HI 0x582
#define _ARC_V2_ACC0_GLO 0x581
#define _ARC_V2_ACC0_LO 0x580
#define _ARC_V2_DSP_BUILD 0x7A
#define _ARC_V2_DSP_CTRL 0x59f
#define _ARC_V2_DSP_BFLY0 0x598
#define _ARC_V2_DSP_FFT_CTRL 0x59e
#define _ARC_V2_AGU_BUILD 0xcc
#define _ARC_V2_AGU_AP0 0x5c0
#define _ARC_V2_AGU_AP1 0x5c1
#define _ARC_V2_AGU_AP2 0x5c2
#define _ARC_V2_AGU_AP3 0x5c3
#define _ARC_V2_AGU_AP4 0x5c4
#define _ARC_V2_AGU_AP5 0x5c5
#define _ARC_V2_AGU_AP6 0x5c6
#define _ARC_V2_AGU_AP7 0x5c7
#define _ARC_V2_AGU_AP8 0x5c8
#define _ARC_V2_AGU_AP9 0x5c9
#define _ARC_V2_AGU_AP10 0x5ca
#define _ARC_V2_AGU_AP11 0x5cb
#define _ARC_V2_AGU_OS0 0x5d0
#define _ARC_V2_AGU_OS1 0x5d1
#define _ARC_V2_AGU_OS2 0x5d2
#define _ARC_V2_AGU_OS3 0x5d3
#define _ARC_V2_AGU_OS4 0x5d4
#define _ARC_V2_AGU_OS5 0x5d5
#define _ARC_V2_AGU_OS6 0x5d6
#define _ARC_V2_AGU_OS7 0x5d7
#define _ARC_V2_AGU_MOD0 0x5e0
#define _ARC_V2_AGU_MOD1 0x5e1
#define _ARC_V2_AGU_MOD2 0x5e2
#define _ARC_V2_AGU_MOD3 0x5e3
#define _ARC_V2_AGU_MOD4 0x5e4
#define _ARC_V2_AGU_MOD5 0x5e5
#define _ARC_V2_AGU_MOD6 0x5e6
#define _ARC_V2_AGU_MOD7 0x5e7
#define _ARC_V2_AGU_MOD8 0x5e8
#define _ARC_V2_AGU_MOD9 0x5e9
#define _ARC_V2_AGU_MOD10 0x5ea
#define _ARC_V2_AGU_MOD11 0x5eb
#define _ARC_V2_AGU_MOD12 0x5ec
#define _ARC_V2_AGU_MOD13 0x5ed
#define _ARC_V2_AGU_MOD14 0x5ee
#define _ARC_V2_AGU_MOD15 0x5ef
#define _ARC_V2_AGU_MOD16 0x5f0
#define _ARC_V2_AGU_MOD17 0x5f1
#define _ARC_V2_AGU_MOD18 0x5f2
#define _ARC_V2_AGU_MOD19 0x5f3
#define _ARC_V2_AGU_MOD20 0x5f4
#define _ARC_V2_AGU_MOD21 0x5f5
#define _ARC_V2_AGU_MOD22 0x5f6
#define _ARC_V2_AGU_MOD23 0x5f7
#define _ARC_HW_PF_BUILD 0xf70
#define _ARC_HW_PF_CTRL 0x4f
/* _ARC_HW_PF_CTRL bits */
#define _ARC_HW_PF_CTRL_ENABLE BIT(0)
/* STATUS32/STATUS32_P0 bits */
#define _ARC_V2_STATUS32_H (1 << 0)
#define Z_ARC_V2_STATUS32_E(x) ((x) << 1)
#define _ARC_V2_STATUS32_AE_BIT 5
#define _ARC_V2_STATUS32_AE (1 << _ARC_V2_STATUS32_AE_BIT)
#define _ARC_V2_STATUS32_DE (1 << 6)
#define _ARC_V2_STATUS32_U_BIT 7
#define _ARC_V2_STATUS32_U (1 << _ARC_V2_STATUS32_U_BIT)
#define _ARC_V2_STATUS32_V (1 << 8)
#define _ARC_V2_STATUS32_C (1 << 9)
#define _ARC_V2_STATUS32_N (1 << 10)
#define _ARC_V2_STATUS32_Z (1 << 11)
#define _ARC_V2_STATUS32_L (1 << 12)
#define _ARC_V2_STATUS32_DZ_BIT 13
#define _ARC_V2_STATUS32_DZ (1 << _ARC_V2_STATUS32_DZ_BIT)
#define _ARC_V2_STATUS32_SC_BIT 14
#define _ARC_V2_STATUS32_SC (1 << _ARC_V2_STATUS32_SC_BIT)
#define _ARC_V2_STATUS32_ES (1 << 15)
#define _ARC_V2_STATUS32_RB(x) ((x) << 16)
#define _ARC_V2_STATUS32_AD_BIT 19
#define _ARC_V2_STATUS32_AD (1 << _ARC_V2_STATUS32_AD_BIT)
#define _ARC_V2_STATUS32_US_BIT 20
#define _ARC_V2_STATUS32_US (1 << _ARC_V2_STATUS32_US_BIT)
#define _ARC_V2_STATUS32_S_BIT 21
#define _ARC_V2_STATUS32_S (1 << _ARC_V2_STATUS32_US_BIT)
#define _ARC_V2_STATUS32_IE (1 << 31)
/* SEC_STAT bits */
#define _ARC_V2_SEC_STAT_SSC_BIT 0
#define _ARC_V2_SEC_STAT_SSC (1 << _ARC_V2_SEC_STAT_SSC_BIT)
#define _ARC_V2_SEC_STAT_NSRT_BIT 1
#define _ARC_V2_SEC_STAT_NSRT (1 << _ARC_V2_SEC_STAT_NSRT_BIT)
#define _ARC_V2_SEC_STAT_NSRU_BIT 2
#define _ARC_V2_SEC_STAT_NSRU (1 << _ARC_V2_SEC_STAT_NSRU_BIT)
#define _ARC_V2_SEC_STAT_IRM_BIT 3
#define _ARC_V2_SEC_STAT_IRM (1 << _ARC_V2_SEC_STAT_IRM_BIT)
#define _ARC_V2_SEC_STAT_SUE_BIT 4
#define _ARC_V2_SEC_STAT_SUE (1 << _ARC_V2_SEC_STAT_SUE_BIT)
#define _ARC_V2_SEC_STAT_NIC_BIT 5
#define _ARC_V2_SEC_STAT_NIC (1 << _ARC_V2_SEC_STAT_NIC_BIT)
/* interrupt related bits */
#define _ARC_V2_IRQ_PRIORITY_SECURE 0x100
/* exception cause register masks */
#define Z_ARC_V2_ECR_VECTOR(X) ((X & 0xff0000) >> 16)
#define Z_ARC_V2_ECR_CODE(X) ((X & 0xff00) >> 8)
#define Z_ARC_V2_ECR_PARAMETER(X) (X & 0xff)
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
#if defined(__CCAC__)
#define z_arc_v2_aux_reg_read(reg) _lr((volatile uint32_t)reg)
#define z_arc_v2_aux_reg_write(reg, val) \
_sr((unsigned int)val, (volatile uint32_t)reg)
#else /* ! __CCAC__ */
#define z_arc_v2_aux_reg_read(reg) __builtin_arc_lr((volatile uint32_t)reg)
#define z_arc_v2_aux_reg_write(reg, val) \
__builtin_arc_sr((unsigned int)val, (volatile uint32_t)reg)
#endif /* __CCAC__ */
#endif /* _ASMLANGUAGE */
#define z_arc_v2_core_id() \
({ \
unsigned int __ret; \
__asm__ __volatile__("lr %0, [%1]\n" \
"xbfu %0, %0, 0xe8\n" \
: "=r"(__ret) \
: "i"(_ARC_V2_IDENTITY)); \
__ret; \
})
#endif /* ZEPHYR_INCLUDE_ARCH_ARC_V2_AUX_REGS_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arc/v2/aux_regs.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,320 |
```linker script
/*
*
*/
/**
* @brief Common parts of the linker scripts for the ARCv2 targets for
* GNU and MWDT toolchains.
*/
#include <zephyr/linker/sections.h>
#include <zephyr/linker/linker-defs.h>
#include <zephyr/linker/linker-tool.h>
/* physical address of RAM */
#ifdef CONFIG_HARVARD
#define ROMABLE_REGION ICCM
#define RAMABLE_REGION DCCM
#define ROM_RAM_IN_SAME_REGION 0
#else
#if defined(CONFIG_XIP) && (FLASH_SIZE != 0)
#define ROMABLE_REGION FLASH
#define RAMABLE_REGION SRAM
#define ROM_RAM_IN_SAME_REGION 0
#else
#define ROMABLE_REGION SRAM
#define RAMABLE_REGION SRAM
#define ROM_RAM_IN_SAME_REGION 1
#endif
#endif
#ifdef CONFIG_ARC_MPU_ENABLE
#if CONFIG_ARC_MPU_VER == 2
#define MPU_MIN_SIZE 2048
#elif (CONFIG_ARC_MPU_VER == 3) || (CONFIG_ARC_MPU_VER == 4) || \
(CONFIG_ARC_MPU_VER == 6) || (CONFIG_ARC_MPU_VER == 8)
#define MPU_MIN_SIZE 32
#endif
#define MPU_MIN_SIZE_ALIGN . = ALIGN(MPU_MIN_SIZE);
#if defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT)
#define MPU_ALIGN(region_size) \
. = ALIGN(MPU_MIN_SIZE); \
. = ALIGN( 1 << LOG2CEIL(region_size))
#else
#define MPU_ALIGN(region_size) \
. = ALIGN(MPU_MIN_SIZE)
#endif
#else
#define MPU_MIN_SIZE_ALIGN
#define MPU_ALIGN(region_size) . = ALIGN(4)
#endif
OUTPUT_ARCH(arc)
ENTRY(CONFIG_KERNEL_ENTRY)
MEMORY {
#ifdef FLASH_START
FLASH (rx) : ORIGIN = FLASH_START, LENGTH = FLASH_SIZE
#endif
#ifdef ICCM_START
ICCM (rwx) : ORIGIN = ICCM_START, LENGTH = ICCM_SIZE
#endif
#ifdef SRAM_START
SRAM (rwx) : ORIGIN = SRAM_START, LENGTH = SRAM_SIZE
#endif
#ifdef DCCM_START
DCCM (rw) : ORIGIN = DCCM_START, LENGTH = DCCM_SIZE
#endif
#ifdef XCCM_START
XCCM (rw) : ORIGIN = XCCM_START, LENGTH = XCCM_SIZE
#endif
#ifdef YCCM_START
YCCM (rw) : ORIGIN = YCCM_START, LENGTH = YCCM_SIZE
#endif
/* Used by and documented in include/linker/intlist.ld */
IDT_LIST (wx) : ORIGIN = 0xFFFFF7FF, LENGTH = 2K
}
SECTIONS {
#include <zephyr/linker/rel-sections.ld>
#ifdef CONFIG_LLEXT
#include <zephyr/linker/llext-sections.ld>
#endif
GROUP_START(ROMABLE_REGION)
SECTION_PROLOGUE(_TEXT_SECTION_NAME,,ALIGN(1024)) {
__rom_region_start = .;
__text_region_start = .;
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-rom-start.ld>
*(.text .text.*)
*(.gnu.linkonce.t.*)
. = ALIGN(4);
#include <zephyr/linker/kobject-text.ld>
} GROUP_LINK_IN(ROMABLE_REGION)
__text_region_end = .;
__rodata_region_start = .;
#include <zephyr/linker/common-rom.ld>
/* Located in generated directory. This file is populated by calling
* zephyr_linker_sources(ROM_SECTIONS ...). Useful for grouping iterable RO structs.
*/
#include <snippets-rom-sections.ld>
#ifdef __MWDT_LINKER_CMD__
SECTION_DATA_PROLOGUE(tdata,,)
{
*(.tls .tls.*);
} GROUP_ROM_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
#ifdef CONFIG_XIP
/* The "primary copy" of tls should be only in flash on XIP systems */
PROVIDE(_arcmwdt_tls_start = LOADADDR(tdata));
#else
PROVIDE(_arcmwdt_tls_start = ADDR(tdata));
#endif
PROVIDE(_arcmwdt_tls_size = SIZEOF(tdata));
/* TODO: add mwdt specific ROM C++ sections */
#else
#include <zephyr/linker/thread-local-storage.ld>
#include <zephyr/linker/cplusplus-rom.ld>
#endif /* __MWDT_LINKER_CMD__ */
SECTION_PROLOGUE(_RODATA_SECTION_NAME,,) {
*(".rodata")
*(".rodata.*")
*(.gnu.linkonce.r.*)
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-rodata.ld>
#include <zephyr/linker/kobject-rom.ld>
#if defined(CONFIG_CPP) && !defined(CONFIG_STATIC_INIT_GNU) && defined(__MWDT_LINKER_CMD__)
. = ALIGN(4);
_fctors = .;
KEEP(*(.ctors*))
_ectors = .;
#endif /* CONFIG_CPP && !CONFIG_STATIC_INIT_GNU && __MWDT_LINKER_CMD__ */
/* This extra MPU alignment of RAMABLE_REGION is only required if we put ROMABLE_REGION and
* RAMABLE_REGION into the same (continuous) memory - otherwise we can get beginning of the
* RAMABLE_REGION in the end of ROMABLE_REGION MPU aperture.
*/
#if ROM_RAM_IN_SAME_REGION
MPU_ALIGN(ABSOLUTE(.) - __rom_region_start);
#endif
} GROUP_LINK_IN(ROMABLE_REGION)
__rodata_region_end = .;
MPU_ALIGN(__rodata_region_end - __rom_region_start);
__rom_region_end = .;
__rom_region_size = __rom_region_end - __rom_region_start;
GROUP_END(ROMABLE_REGION)
GROUP_START(RAMABLE_REGION)
MPU_MIN_SIZE_ALIGN
_image_ram_start = .;
#include <app_data_alignment.ld>
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-ram-sections.ld>
#if defined(CONFIG_USERSPACE)
#define APP_SHARED_ALIGN MPU_MIN_SIZE_ALIGN
#define SMEM_PARTITION_ALIGN MPU_ALIGN
#include <app_smem.ld>
_app_smem_size = _app_smem_end - _app_smem_start;
_app_smem_rom_start = LOADADDR(_APP_SMEM_SECTION_NAME);
#endif /* CONFIG_USERSPACE */
SECTION_DATA_PROLOGUE(_BSS_SECTION_NAME,(NOLOAD),) {
MPU_MIN_SIZE_ALIGN
/*
* For performance, BSS section is assumed to be 4 byte aligned and
* a multiple of 4 bytes
*/
. = ALIGN(4);
__bss_start = .;
__kernel_ram_start = .;
*(".bss")
*(".bss.*")
*(COMMON)
*(".kernel_bss.*")
/*
* BSP clears this memory in words only and doesn't clear any
* potential left over bytes.
*/
__bss_end = ALIGN(4);
} GROUP_DATA_LINK_IN(RAMABLE_REGION, RAMABLE_REGION)
#include <zephyr/linker/common-noinit.ld>
GROUP_START(DATA_REGION)
SECTION_DATA_PROLOGUE(_DATA_SECTION_NAME,,) {
/* when XIP, .text is in ROM, but vector table must be at start of .data */
__data_region_start = .;
__data_start = .;
*(".data")
*(".data.*")
*(".kernel.*")
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-rwdata.ld>
__data_end = .;
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
__data_size = __data_end - __data_start;
__data_load_start = LOADADDR(_DATA_SECTION_NAME);
__data_region_load_start = LOADADDR(_DATA_SECTION_NAME);
#include <zephyr/linker/common-ram.ld>
#include <zephyr/linker/kobject-data.ld>
#ifdef __MWDT_LINKER_CMD__
/* TODO: add mwdt specific RAM C++ sections */
#else
#include <zephyr/linker/cplusplus-ram.ld>
#endif /* __MWDT_LINKER_CMD__ */
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-data-sections.ld>
__data_region_end = .;
MPU_MIN_SIZE_ALIGN
/* Define linker symbols */
__kernel_ram_end = .;
__kernel_ram_size = __kernel_ram_end - __kernel_ram_start;
#ifdef __MWDT_LINKER_CMD__
/* mwdt requires _fstack, _estack which will be used in _stkchk.
* _stkchk is inserted by mwdt automatically, if _fstack, _estack is not
* set correctly, the brk_s instruction will be called
* here, we use a trick to deceive the compiler.
*/
_fstack = _image_ram_start;
_estack = .;
#endif /* __MWDT_LINKER_CMD__ */
#ifdef CONFIG_ARC_XY_ENABLE
SECTION_PROLOGUE(xdata,,)
{
__xdata_start = .;
*(.Xdata*)
__xdata_end = .;
} GROUP_DATA_LINK_IN(XCCM, RAMABLE_REGION)
SECTION_PROLOGUE(ydata,,)
{
__ydata_start = .;
*(.Ydata*)
__ydata_end = .;
} GROUP_DATA_LINK_IN(YCCM, RAMABLE_REGION)
#endif
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-sections.ld>
#include <zephyr/linker/ram-end.ld>
GROUP_END(RAMABLE_REGION)
#include <zephyr/linker/debug-sections.ld>
SECTION_PROLOGUE(.ARC.attributes, 0,) {
KEEP(*(.ARC.attributes))
KEEP(*(.gnu.attributes))
}
#if !defined(CONFIG_XIP)
/*
* Zephyr uses _estack as a start of heap allocation area.
* Region [_estack .. sram_end] should be defined in MPU.
* Including _image_ram region which is [_image_ram_start .. _image_ram_end]
* we get [_image_ram_start .. sram_end] region to be defined in MPU.
*/
__arc_rw_sram_size = SRAM_START + SRAM_SIZE - _image_ram_start;
#endif
/DISCARD/ : {
#if defined(CONFIG_CPP) && !defined(CONFIG_STATIC_INIT_GNU) && defined(__MWDT_LINKER_CMD__)
*(.dtors*)
*(.fini*)
*(.eh_frame*)
#endif /* CONFIG_CPP && !CONFIG_STATIC_INIT_GNU && __MWDT_LINKER_CMD__ */
*(.note.GNU-stack)
*(.got.plt)
*(.igot.plt)
*(.got)
*(.igot)
}
}
``` | /content/code_sandbox/include/zephyr/arch/arc/v2/linker.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,520 |
```objective-c
/*
*
*/
/**
* @file
* @brief ARCv2 ARC Connect driver
*
* ARCv2 ARC Connect driver interface. Included by arc/arch.h.
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARC_V2_ARC_CONNECT_H_
#define ZEPHYR_INCLUDE_ARCH_ARC_V2_ARC_CONNECT_H_
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
#include <zephyr/arch/arc/v2/aux_regs.h>
#ifdef __cplusplus
extern "C" {
#endif
#define _ARC_V2_CONNECT_BCR 0x0d0
#define _ARC_V2_CONNECT_IDU_BCR 0x0d5
#define _ARC_V2_CONNECT_GFRC_BCR 0x0d6
#define _ARC_V2_CONNECT_CMD 0x600
#define _ARC_V2_CONNECT_WDATA 0x601
#define _ARC_V2_CONNECT_READBACK 0x602
#define ARC_CONNECT_CMD_CHECK_CORE_ID 0x0
#define ARC_CONNECT_CMD_INTRPT_GENERATE_IRQ 0x1
#define ARC_CONNECT_CMD_INTRPT_GENERATE_ACK 0x2
#define ARC_CONNECT_CMD_INTRPT_READ_STATUS 0x3
#define ARC_CONNECT_CMD_INTRPT_CHECK_SOURCE 0x4
#define ARC_CONNECT_CMD_SEMA_CLAIM_AND_READ 0x11
#define ARC_CONNECT_CMD_SEMA_RELEASE 0x12
#define ARC_CONNECT_CMD_SEMA_FORCE_RELEASE 0x13
#define ARC_CONNECT_CMD_MSG_SRAM_SET_ADDR 0x21
#define ARC_CONNECT_CMD_MSG_SRAM_READ_ADDR 0x22
#define ARC_CONNECT_CMD_MSG_SRAM_SET_ADDR_OFFSET 0x23
#define ARC_CONNECT_CMD_MSG_SRAM_READ_ADDR_OFFSET 0x24
#define ARC_CONNECT_CMD_MSG_SRAM_WRITE 0x25
#define ARC_CONNECT_CMD_MSG_SRAM_WRITE_INC 0x26
#define ARC_CONNECT_CMD_MSG_SRAM_WRITE_IMM 0x27
#define ARC_CONNECT_CMD_MSG_SRAM_READ 0x28
#define ARC_CONNECT_CMD_MSG_SRAM_READ_INC 0x29
#define ARC_CONNECT_CMD_MSG_SRAM_READ_IMM 0x2a
#define ARC_CONNECT_CMD_MSG_SRAM_SET_ECC_CTRL 0x2b
#define ARC_CONNECT_CMD_MSG_SRAM_READ_ECC_CTRL 0x2c
#define ARC_CONNECT_CMD_DEBUG_RESET 0x31
#define ARC_CONNECT_CMD_DEBUG_HALT 0x32
#define ARC_CONNECT_CMD_DEBUG_RUN 0x33
#define ARC_CONNECT_CMD_DEBUG_SET_MASK 0x34
#define ARC_CONNECT_CMD_DEBUG_READ_MASK 0x35
#define ARC_CONNECT_CMD_DEBUG_SET_SELECT 0x36
#define ARC_CONNECT_CMD_DEBUG_READ_SELECT 0x37
#define ARC_CONNECT_CMD_DEBUG_READ_EN 0x38
#define ARC_CONNECT_CMD_DEBUG_READ_CMD 0x39
#define ARC_CONNECT_CMD_DEBUG_READ_CORE 0x3a
#define ARC_CONNECT_CMD_DEBUG_MASK_SH 0x08 /* if a self-halt occurs, a global halt is triggered */
#define ARC_CONNECT_CMD_DEBUG_MASK_BH 0x04 /* if a breakpoint caused halt occurs, a global halt is triggered */
#define ARC_CONNECT_CMD_DEBUG_MASK_AH 0x02 /* if an actionpoint caused halt occurs, a global halt is triggered */
#define ARC_CONNECT_CMD_DEBUG_MASK_H 0x01 /* whenever the core is halted, a global halt is triggered */
#define ARC_CONNECT_CMD_GFRC_CLEAR 0x41
#define ARC_CONNECT_CMD_GFRC_READ_LO 0x42
#define ARC_CONNECT_CMD_GFRC_READ_HI 0x43
#define ARC_CONNECT_CMD_GFRC_ENABLE 0x44
#define ARC_CONNECT_CMD_GFRC_DISABLE 0x45
#define ARC_CONNECT_CMD_GFRC_READ_DISABLE 0x46
#define ARC_CONNECT_CMD_GFRC_SET_CORE 0x47
#define ARC_CONNECT_CMD_GFRC_READ_CORE 0x48
#define ARC_CONNECT_CMD_GFRC_READ_HALT 0x49
#define ARC_CONNECT_CMD_PDM_SET_PM 0x81
#define ARC_CONNECT_CMD_PDM_READ_PSTATUS 0x82
#define ARC_CONNECT_CMD_PMU_SET_PUCNT 0x51
#define ARC_CONNECT_CMD_PMU_READ_PUCNT 0x52
#define ARC_CONNECT_CMD_PMU_SET_RSTCNT 0x53
#define ARC_CONNECT_CMD_PMU_READ_RSTCNT 0x54
#define ARC_CONNECT_CMD_PMU_SET_PDCNT 0x55
#define ARC_CONNECT_CMD_PMU_READ_PDCNT 0x56
#define ARC_CONNECT_CMD_IDU_ENABLE 0x71
#define ARC_CONNECT_CMD_IDU_DISABLE 0x72
#define ARC_CONNECT_CMD_IDU_READ_ENABLE 0x73
#define ARC_CONNECT_CMD_IDU_SET_MODE 0x74
#define ARC_CONNECT_CMD_IDU_READ_MODE 0x75
#define ARC_CONNECT_CMD_IDU_SET_DEST 0x76
#define ARC_CONNECT_CMD_IDU_READ_DEST 0x77
#define ARC_CONNECT_CMD_IDU_GEN_CIRQ 0x78
#define ARC_CONNECT_CMD_IDU_ACK_CIRQ 0x79
#define ARC_CONNECT_CMD_IDU_CHECK_STATUS 0x7a
#define ARC_CONNECT_CMD_IDU_CHECK_SOURCE 0x7b
#define ARC_CONNECT_CMD_IDU_SET_MASK 0x7c
#define ARC_CONNECT_CMD_IDU_READ_MASK 0x7d
#define ARC_CONNECT_CMD_IDU_CHECK_FIRST 0x7e
/* the start intno of common interrupt managed by IDU */
#define ARC_CONNECT_IDU_IRQ_START 24
#define ARC_CONNECT_INTRPT_TRIGGER_LEVEL 0
#define ARC_CONNECT_INTRPT_TRIGGER_EDGE 1
#define ARC_CONNECT_DISTRI_MODE_ROUND_ROBIN 0
#define ARC_CONNECT_DISTRI_MODE_FIRST_ACK 1
#define ARC_CONNECT_DISTRI_ALL_DEST 2
struct arc_connect_cmd {
union {
struct {
#ifdef CONFIG_BIG_ENDIAN
uint32_t pad:8, param:16, cmd:8;
#else
uint32_t cmd:8, param:16, pad:8;
#endif
};
uint32_t val;
};
};
struct arc_connect_bcr {
union {
struct {
#ifdef CONFIG_BIG_ENDIAN
uint32_t pad4:6, pw_dom:1, pad3:1,
idu:1, pad2:1, num_cores:6,
pad:1, gfrc:1, dbg:1, pw:1,
msg:1, sem:1, ipi:1, slv:1,
ver:8;
#else
uint32_t ver:8,
slv:1, ipi:1, sem:1, msg:1,
pw:1, dbg:1, gfrc:1, pad:1,
num_cores:6, pad2:1, idu:1,
pad3:1, pw_dom:1, pad4:6;
#endif
};
uint32_t val;
};
};
struct arc_connect_idu_bcr {
union {
struct {
#ifdef CONFIG_BIG_ENDIAN
uint32_t pad:21, cirqnum:3, ver:8;
#else
uint32_t ver:8, cirqnum:3, pad:21;
#endif
};
uint32_t val;
};
};
static inline void z_arc_connect_cmd(uint32_t cmd, uint32_t param)
{
struct arc_connect_cmd regval;
regval.pad = 0;
regval.cmd = cmd;
regval.param = param;
z_arc_v2_aux_reg_write(_ARC_V2_CONNECT_CMD, regval.val);
}
static inline void z_arc_connect_cmd_data(uint32_t cmd, uint32_t param,
uint32_t data)
{
z_arc_v2_aux_reg_write(_ARC_V2_CONNECT_WDATA, data);
z_arc_connect_cmd(cmd, param);
}
static inline uint32_t z_arc_connect_cmd_readback(void)
{
return z_arc_v2_aux_reg_read(_ARC_V2_CONNECT_READBACK);
}
/* inter-core interrupt related functions */
extern void z_arc_connect_ici_generate(uint32_t core_id);
extern void z_arc_connect_ici_ack(uint32_t core_id);
extern uint32_t z_arc_connect_ici_read_status(uint32_t core_id);
extern uint32_t z_arc_connect_ici_check_src(void);
extern void z_arc_connect_ici_clear(void);
/* inter-core debug related functions */
extern void z_arc_connect_debug_reset(uint32_t core_mask);
extern void z_arc_connect_debug_halt(uint32_t core_mask);
extern void z_arc_connect_debug_run(uint32_t core_mask);
extern void z_arc_connect_debug_mask_set(uint32_t core_mask, uint32_t mask);
extern uint32_t z_arc_connect_debug_mask_read(uint32_t core_mask);
extern void z_arc_connect_debug_select_set(uint32_t core_mask);
extern uint32_t z_arc_connect_debug_select_read(void);
extern uint32_t z_arc_connect_debug_en_read(void);
extern uint32_t z_arc_connect_debug_cmd_read(void);
extern uint32_t z_arc_connect_debug_core_read(void);
/* global free-running counter(gfrc) related functions */
extern void z_arc_connect_gfrc_clear(void);
extern uint64_t z_arc_connect_gfrc_read(void);
extern void z_arc_connect_gfrc_enable(void);
extern void z_arc_connect_gfrc_disable(void);
extern void z_arc_connect_gfrc_core_set(uint32_t core_mask);
extern uint32_t z_arc_connect_gfrc_halt_read(void);
extern uint32_t z_arc_connect_gfrc_core_read(void);
/* interrupt distribute unit related functions */
extern void z_arc_connect_idu_enable(void);
extern void z_arc_connect_idu_disable(void);
extern uint32_t z_arc_connect_idu_read_enable(void);
extern void z_arc_connect_idu_set_mode(uint32_t irq_num,
uint16_t trigger_mode, uint16_t distri_mode);
extern uint32_t z_arc_connect_idu_read_mode(uint32_t irq_num);
extern void z_arc_connect_idu_set_dest(uint32_t irq_num, uint32_t core_mask);
extern uint32_t z_arc_connect_idu_read_dest(uint32_t irq_num);
extern void z_arc_connect_idu_gen_cirq(uint32_t irq_num);
extern void z_arc_connect_idu_ack_cirq(uint32_t irq_num);
extern uint32_t z_arc_connect_idu_check_status(uint32_t irq_num);
extern uint32_t z_arc_connect_idu_check_source(uint32_t irq_num);
extern void z_arc_connect_idu_set_mask(uint32_t irq_num, uint32_t mask);
extern uint32_t z_arc_connect_idu_read_mask(uint32_t irq_num);
extern uint32_t z_arc_connect_idu_check_first(uint32_t irq_num);
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_ARC_V2_ARC_CONNECT_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arc/v2/arc_connect.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,325 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARC_V2_DSP_ARC_DSP_H_
#define ZEPHYR_INCLUDE_ARCH_ARC_V2_DSP_ARC_DSP_H_
/**
* @brief Disable dsp context preservation
*
* The function is used to disable the preservation of dsp
* and agu context registers for a particular thread.
*
* The @a options parameter indicates which register sets will
* not be used by the specified thread. It is used by ARC only.
*
* @param thread ID of thread.
* @param options register sets options
*
*/
void arc_dsp_disable(struct k_thread *thread, unsigned int options);
/**
* @brief Enable dsp context preservation
*
* The function is used to enable the preservation of dsp
* and agu context registers for a particular thread.
*
* The @a options parameter indicates which register sets will
* be used by the specified thread. It is used by ARC only.
*
* @param thread ID of thread.
* @param options register sets options
*
*/
void arc_dsp_enable(struct k_thread *thread, unsigned int options);
#endif /* ZEPHYR_INCLUDE_ARCH_ARC_V2_DSP_ARC_DSP_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arc/v2/dsp/arc_dsp.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 250 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARC_V2_SJLI_H
#define ZEPHYR_INCLUDE_ARCH_ARC_V2_SJLI_H
#define SJLI_CALL_ARC_SECURE 0
#define ARC_S_CALL_AUX_READ 0
#define ARC_S_CALL_AUX_WRITE 1
#define ARC_S_CALL_IRQ_ALLOC 2
#define ARC_S_CALL_CLRI 3
#define ARC_S_CALL_SETI 4
#define ARC_S_CALL_LIMIT 5
#define ARC_N_IRQ_START_LEVEL ((CONFIG_NUM_IRQ_PRIO_LEVELS + 1) / 2)
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
#include <stdbool.h>
#include <zephyr/arch/arc/v2/aux_regs.h>
#ifdef __cplusplus
extern "C" {
#endif
#define arc_sjli(id) \
(__asm__ volatile("sjli %[sjli_id]\n" :: [sjli_id] "i" (id)))
#ifdef CONFIG_ARC_SECURE_FIRMWARE
typedef uint32_t (*_arc_s_call_handler_t)(uint32_t arg1, uint32_t arg2, uint32_t arg3,
uint32_t arg4, uint32_t arg5, uint32_t arg6);
extern void arc_go_to_normal(uint32_t addr);
extern void _arc_do_secure_call(void);
extern const _arc_s_call_handler_t arc_s_call_table[ARC_S_CALL_LIMIT];
#endif
#ifdef CONFIG_ARC_NORMAL_FIRMWARE
static inline uint32_t _arc_s_call_invoke6(uint32_t arg1, uint32_t arg2, uint32_t arg3,
uint32_t arg4, uint32_t arg5, uint32_t arg6,
uint32_t call_id)
{
register uint32_t ret __asm__("r0") = arg1;
register uint32_t r1 __asm__("r1") = arg2;
register uint32_t r2 __asm__("r2") = arg3;
register uint32_t r3 __asm__("r3") = arg4;
register uint32_t r4 __asm__("r4") = arg5;
register uint32_t r5 __asm__("r5") = arg6;
register uint32_t r6 __asm__("r6") = call_id;
compiler_barrier();
__asm__ volatile(
"sjli %[id]\n"
: "=r"(ret)
: [id] "i" (SJLI_CALL_ARC_SECURE),
"r" (ret), "r" (r1), "r" (r2), "r" (r3),
"r" (r4), "r" (r5), "r" (r6));
return ret;
}
static inline uint32_t _arc_s_call_invoke5(uint32_t arg1, uint32_t arg2, uint32_t arg3,
uint32_t arg4, uint32_t arg5, uint32_t call_id)
{
register uint32_t ret __asm__("r0") = arg1;
register uint32_t r1 __asm__("r1") = arg2;
register uint32_t r2 __asm__("r2") = arg3;
register uint32_t r3 __asm__("r3") = arg4;
register uint32_t r4 __asm__("r4") = arg5;
register uint32_t r6 __asm__("r6") = call_id;
compiler_barrier();
__asm__ volatile(
"sjli %[id]\n"
: "=r"(ret)
: [id] "i" (SJLI_CALL_ARC_SECURE),
"r" (ret), "r" (r1), "r" (r2), "r" (r3),
"r" (r4), "r" (r6));
return ret;
}
static inline uint32_t _arc_s_call_invoke4(uint32_t arg1, uint32_t arg2, uint32_t arg3,
uint32_t arg4, uint32_t call_id)
{
register uint32_t ret __asm__("r0") = arg1;
register uint32_t r1 __asm__("r1") = arg2;
register uint32_t r2 __asm__("r2") = arg3;
register uint32_t r3 __asm__("r3") = arg4;
register uint32_t r6 __asm__("r6") = call_id;
compiler_barrier();
__asm__ volatile(
"sjli %[id]\n"
: "=r"(ret)
: [id] "i" (SJLI_CALL_ARC_SECURE),
"r" (ret), "r" (r1), "r" (r2), "r" (r3),
"r" (r6));
return ret;
}
static inline uint32_t _arc_s_call_invoke3(uint32_t arg1, uint32_t arg2, uint32_t arg3,
uint32_t call_id)
{
register uint32_t ret __asm__("r0") = arg1;
register uint32_t r1 __asm__("r1") = arg2;
register uint32_t r2 __asm__("r2") = arg3;
register uint32_t r6 __asm__("r6") = call_id;
compiler_barrier();
__asm__ volatile(
"sjli %[id]\n"
: "=r"(ret)
: [id] "i" (SJLI_CALL_ARC_SECURE),
"r" (ret), "r" (r1), "r" (r2), "r" (r6));
return ret;
}
static inline uint32_t _arc_s_call_invoke2(uint32_t arg1, uint32_t arg2, uint32_t call_id)
{
register uint32_t ret __asm__("r0") = arg1;
register uint32_t r1 __asm__("r1") = arg2;
register uint32_t r6 __asm__("r6") = call_id;
compiler_barrier();
__asm__ volatile(
"sjli %[id]\n"
: "=r"(ret)
: [id] "i" (SJLI_CALL_ARC_SECURE),
"r" (ret), "r" (r1), "r" (r6));
return ret;
}
static inline uint32_t _arc_s_call_invoke1(uint32_t arg1, uint32_t call_id)
{
register uint32_t ret __asm__("r0") = arg1;
register uint32_t r6 __asm__("r6") = call_id;
compiler_barrier();
__asm__ volatile(
"sjli %[id]\n"
: "=r"(ret)
: [id] "i" (SJLI_CALL_ARC_SECURE),
"r" (ret), "r" (r6));
return ret;
}
static inline uint32_t _arc_s_call_invoke0(uint32_t call_id)
{
register uint32_t ret __asm__("r0");
register uint32_t r6 __asm__("r6") = call_id;
compiler_barrier();
__asm__ volatile(
"sjli %[id]\n"
: "=r"(ret)
: [id] "i" (SJLI_CALL_ARC_SECURE),
"r" (ret), "r" (r6));
return ret;
}
static inline bool _arch_is_user_context(void)
{
uint32_t status;
compiler_barrier();
__asm__ volatile("lr %0, [%[status32]]\n"
: "=r"(status)
: [status32] "i" (_ARC_V2_STATUS32));
return !(status & _ARC_V2_STATUS32_US) ? true : false;
}
#endif /* CONFIG_ARC_NORMAL_FIRMWARE */
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_ARC_V2_SECURE_H */
``` | /content/code_sandbox/include/zephyr/arch/arc/v2/secureshield/arc_secure.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,667 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARC_V2_MPU_ARC_MPU_H_
#define ZEPHYR_INCLUDE_ARCH_ARC_V2_MPU_ARC_MPU_H_
#define AUX_MPU_ATTR_UE 0x008 /* allow user execution */
#define AUX_MPU_ATTR_UW 0x010 /* allow user write */
#define AUX_MPU_ATTR_UR 0x020 /* allow user read */
#define AUX_MPU_ATTR_KE 0x040 /* only allow kernel execution */
#define AUX_MPU_ATTR_KW 0x080 /* only allow kernel write */
#define AUX_MPU_ATTR_KR 0x100 /* only allow kernel read */
#define AUX_MPU_ATTR_S 0x8000 /* secure */
#define AUX_MPU_ATTR_N 0x0000 /* normal */
/*
* a region is dynamic means it can be split into sub regions.
* This attribute is meaningful for ARC MPUv3 which does not support mpu
* entry overlap. For ARC MPUv2, this attribute will be ignored as it
* supports mpu overlap in hardware.
*/
#define REGION_DYNAMIC 0x800 /* dynamic flag */
/* Some helper defines for common regions */
#define REGION_KERNEL_RAM_ATTR \
(AUX_MPU_ATTR_KW | AUX_MPU_ATTR_KR)
#define REGION_KERNEL_ROM_ATTR \
(AUX_MPU_ATTR_KE | AUX_MPU_ATTR_KR)
#define REGION_RAM_ATTR \
(AUX_MPU_ATTR_UW | AUX_MPU_ATTR_UR | \
AUX_MPU_ATTR_KW | AUX_MPU_ATTR_KR)
#define REGION_ROM_ATTR \
(AUX_MPU_ATTR_UE | AUX_MPU_ATTR_UR | \
AUX_MPU_ATTR_KE | AUX_MPU_ATTR_KR)
#define REGION_IO_ATTR \
(AUX_MPU_ATTR_UW | AUX_MPU_ATTR_UR | \
AUX_MPU_ATTR_KW | AUX_MPU_ATTR_KR)
#define REGION_ALL_ATTR \
(AUX_MPU_ATTR_UW | AUX_MPU_ATTR_UR | \
AUX_MPU_ATTR_KW | AUX_MPU_ATTR_KR | \
AUX_MPU_ATTR_KE | AUX_MPU_ATTR_UE)
#define REGION_32B 0x200
#define REGION_64B 0x201
#define REGION_128B 0x202
#define REGION_256B 0x203
#define REGION_512B 0x400
#define REGION_1K 0x401
#define REGION_2K 0x402
#define REGION_4K 0x403
#define REGION_8K 0x600
#define REGION_16K 0x601
#define REGION_32K 0x602
#define REGION_64K 0x603
#define REGION_128K 0x800
#define REGION_256K 0x801
#define REGION_512K 0x802
#define REGION_1M 0x803
#define REGION_2M 0xA00
#define REGION_4M 0xA01
#define REGION_8M 0xA02
#define REGION_16M 0xA03
#define REGION_32M 0xC00
#define REGION_64M 0xC01
#define REGION_128M 0xC02
#define REGION_256M 0xC03
#define REGION_512M 0xE00
#define REGION_1G 0xE01
#define REGION_2G 0xE02
#define REGION_4G 0xE03
/* Region definition data structure */
struct arc_mpu_region {
/* Region Name */
const char *name;
/* Region Base Address */
uint32_t base;
uint32_t size;
/* Region Attributes */
uint32_t attr;
};
#define MPU_REGION_ENTRY(_name, _base, _size, _attr) \
{\
.name = _name, \
.base = _base, \
.size = _size, \
.attr = _attr, \
}
/* MPU configuration data structure */
struct arc_mpu_config {
/* Number of regions */
uint32_t num_regions;
/* Regions */
struct arc_mpu_region *mpu_regions;
};
/* Reference to the MPU configuration */
extern struct arc_mpu_config mpu_config;
#endif /* _ARC_CORE_MPU_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arc/v2/mpu/arc_mpu.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 958 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARC_V2_MPU_ARC_CORE_MPU_H_
#define ZEPHYR_INCLUDE_ARCH_ARC_V2_MPU_ARC_CORE_MPU_H_
#ifdef __cplusplus
extern "C" {
#endif
/*
* The defines below represent the region types. The MPU driver is responsible
* to allocate the region accordingly to the type and set the correct
* attributes.
*
* Each MPU is different and has a different set of attributes, hence instead
* of having the attributes at this level the arc_mpu_core defines the intent
* types.
* An intent type (i.e. THREAD_STACK_GUARD) can correspond to a different set
* of operations and attributes for each MPU and it is responsibility of the
* MPU driver to select the correct ones.
*
* The intent based configuration can't fail hence at this level no error
* is returned by the configuration functions.
* If one of the operations corresponding to an intent fails the error has to
* be managed inside the MPU driver and not escalated.
*/
/* Thread Region Intent Type */
#define THREAD_STACK_USER_REGION 0x0
#define THREAD_STACK_REGION 0x1
#define THREAD_APP_DATA_REGION 0x2
#define THREAD_STACK_GUARD_REGION 0x3
#define THREAD_DOMAIN_PARTITION_REGION 0x4
#if defined(CONFIG_ARC_CORE_MPU)
/* ARC Core MPU Driver API */
/*
* This API has to be implemented by all the MPU drivers that have
* ARC_CORE_MPU support.
*/
/**
* @brief enable the MPU
*/
void arc_core_mpu_enable(void);
/**
* @brief disable the MPU
*/
void arc_core_mpu_disable(void);
/**
* @brief configure the thread's mpu regions
*
* @param thread the target thread
*/
void arc_core_mpu_configure_thread(struct k_thread *thread);
/*
* Before configure the MPU regions, MPU should be disabled
*/
/**
* @brief configure the default region
*
* @param region_attr region attribute of default region
*/
void arc_core_mpu_default(uint32_t region_attr);
/**
* @brief configure the MPU region
*
* @param index MPU region index
* @param base base address
* @param size size of region
* @param region_attr region attribute
*/
int arc_core_mpu_region(uint32_t index, uint32_t base, uint32_t size,
uint32_t region_attr);
#endif /* CONFIG_ARC_CORE_MPU */
#if defined(CONFIG_USERSPACE)
void arc_core_mpu_configure_mem_domain(struct k_thread *thread);
void arc_core_mpu_remove_mem_domain(struct k_mem_domain *mem_domain);
void arc_core_mpu_remove_mem_partition(struct k_mem_domain *domain,
uint32_t partition_id);
int arc_core_mpu_get_max_domain_partition_regions(void);
int arc_core_mpu_buffer_validate(const void *addr, size_t size, int write);
#endif
void configure_mpu_thread(struct k_thread *thread);
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_ARCH_ARC_V2_MPU_ARC_CORE_MPU_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arc/v2/mpu/arc_core_mpu.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 652 |
```objective-c
/*
*
* ALU/Memory instructions pseudo-mnemonics for ARC64 ISA
*/
.macro MOVR, d, s
movl\&$suffix d, s
.endm
.macro LDR, d, s, off
.if $narg == 2
ldl\&$suffix d, [s]
.else
ldl\&$suffix d, [s, off]
.endif
.endm
.macro STR, d, s, off
.if $narg == 2
stl\&$suffix d, [s]
.else
stl\&$suffix d, [s, off]
.endif
.endm
.macro PUSHR, r
pushl r
.endm
.macro POPR, r
popl r
.endm
.macro LRR, d, aux
lrl d, aux
.endm
.macro SRR, d, aux
srl d, aux
.endm
.macro ADDR, d, s, v
addl\&$suffix d, s, v
.endm
.macro ADD2R, d, s, v
add2l\&$suffix d, s, v
.endm
.macro ADD3R, d, s, v
add3l d, s, v
.endm
.macro SUBR, d, s, v
subl d, s, v
.endm
.macro BMSKNR, d, s, v
bmsknl d, s, v
.endm
.macro LSRR, d, s, v
lsrl d, s, v
.endm
.macro ASLR, d, s, v
asll d, s, v
.endm
.macro ANDR, d, s, v
andl d, s, v
.endm
.macro ORR, d, s, v
orl d, s, v
.endm
.macro BRR, d, s, lbl
br\&$suffix\l d, s, lbl
.endm
.macro BREQR, d, s, lbl
breql d, s, lbl
.endm
.macro CMPR, op1, op2
cmpl op1, op2
.endm
``` | /content/code_sandbox/include/zephyr/arch/arc/asm-compat/asm-macro-64-bit-mwdt.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 512 |
```objective-c
/*
*
* ALU/Memory instructions pseudo-mnemonics for ARCv2 and ARC32 ISA
*/
.macro MOVR, d, s
mov\&$suffix d, s
.endm
.macro LDR, d, s, off
.if $narg == 2
ld\&$suffix d, [s]
.else
ld\&$suffix d, [s, off]
.endif
.endm
.macro STR, d, s, off
.if $narg == 2
st\&$suffix d, [s]
.else
st\&$suffix d, [s, off]
.endif
.endm
.macro PUSHR, r
push r
.endm
.macro POPR, r
pop r
.endm
.macro LRR, d, aux
lr d, aux
.endm
.macro SRR, d, aux
sr d, aux
.endm
.macro ADDR, d, s, v
add\&$suffix d, s, v
.endm
.macro ADD2R, d, s, v
add2\&$suffix d, s, v
.endm
.macro ADD3R, d, s, v
add3 d, s, v
.endm
.macro SUBR, d, s, v
sub d, s, v
.endm
.macro BMSKNR, d, s, v
bmskn d, s, v
.endm
.macro LSRR, d, s, v
lsr d, s, v
.endm
.macro ASLR, d, s, v
asl d, s, v
.endm
.macro ANDR, d, s, v
and d, s, v
.endm
.macro ORR, d, s, v
or d, s, v
.endm
.macro BRR, d, s, lbl
br\&$suffix d, s, lbl
.endm
.macro BREQR, d, s, lbl
breq d, s, lbl
.endm
.macro CMPR, op1, op2
cmp op1, op2
.endm
``` | /content/code_sandbox/include/zephyr/arch/arc/asm-compat/asm-macro-32-bit-mwdt.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 499 |
```objective-c
/*
*
* Author: Vineet Gupta <vgupta@synopsys.com>
*
* ALU/Memory instructions pseudo-mnemonics for ARCv2 and ARC32 ISA
*/
.irp cc,,.hi,.nz
.macro MOVR\cc d, s
mov\cc \d, \s
.endm
.endr
.irp aa,,.ab,.as,.aw
.macro LDR\aa d, s, off=0
ld\aa \d, [\s, \off]
.endm
.endr
.irp aa,,.ab,.as,.aw
.macro STR\aa d, s, off=0
; workaround assembler barfing for ST r, [@symb, 0]
.if \off == 0
st\aa \d, [\s]
.else
st\aa \d, [\s, \off]
.endif
.endm
.endr
.macro PUSHR r
push \r
.endm
.macro POPR r
pop \r
.endm
.macro LRR d, aux
lr \d, \aux
.endm
.macro SRR d, aux
sr \d, \aux
.endm
.irp cc,,.nz
.macro ADDR\cc d, s, v
add\cc \d, \s, \v
.endm
.endr
.irp cc,,.nz
.macro ADD2R\cc d, s, v
add2\cc \d, \s, \v
.endm
.endr
.macro ADD3R d, s, v
add3 \d, \s, \v
.endm
.macro SUBR d, s, v
sub \d, \s, \v
.endm
.macro BMSKNR d, s, v
bmskn \d, \s, \v
.endm
.macro LSRR d, s, v
lsr \d, \s, \v
.endm
.macro ASLR d, s, v
asl \d, \s, \v
.endm
.macro ANDR d, s, v
and \d, \s, \v
.endm
.macro ORR, d, s, v
or \d, \s, \v
.endm
.irp cc,ne,eq
.macro BRR\cc d, s, lbl
br\cc \d, \s, \lbl
.endm
.endr
.macro BREQR d, s, lbl
breq \d, \s, \lbl
.endm
.macro CMPR op1, op2
cmp \op1, \op2
.endm
``` | /content/code_sandbox/include/zephyr/arch/arc/asm-compat/asm-macro-32-bit-gnu.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 618 |
```objective-c
/*
*
* Author: Vineet Gupta <vgupta@synopsys.com>
*
* Top level include file providing ISA pseudo-mnemonics for use in assembler
* and inline assembly.
*
* - Helps code reuse across ARC64/ARC32/ARCv2
* e.g. "LDR" maps to 'LD' on 32-bit ISA, 'LDL' on 64-bit ARCv2/ARC64
*
* - Provides emulation with multiple instructions if the case be
* e.g. "DBNZ" implemented using 'SUB' and 'BRNE'
*
* - Looks more complex than it really is: mainly because Kconfig defines
* are not "honored" in inline assembly. So each variant is unconditional
* code in a standalone file with Kconfig based #ifdef'ry here. During the
* build process, the "C" preprocessor runs through this file, leaving
* just the final variant include in code fed to compiler/assembler.
*/
#ifndef __ASM_ARC_ASM_H
#define __ASM_ARC_ASM_H 1
#ifdef _ASMLANGUAGE
#if defined(CONFIG_ISA_ARCV3) && defined(CONFIG_64BIT)
#define ARC_PTR .xword
#define ARC_REGSZ 8
#define ARC_REGSHIFT 3
#if defined(__CCAC__)
#include "asm-macro-64-bit-mwdt.h"
#else
#include "asm-macro-64-bit-gnu.h"
#endif /* defined(__CCAC__) */
#elif defined(CONFIG_ISA_ARCV3) && !defined(CONFIG_64BIT)
#define ARC_PTR .word
#define ARC_REGSZ 4
#define ARC_REGSHIFT 2
#if defined(__CCAC__)
#include "asm-macro-32-bit-mwdt.h"
#else
#include "asm-macro-32-bit-gnu.h"
#endif /* defined(__CCAC__) */
#else
#define ARC_PTR .word
#define ARC_REGSZ 4
#define ARC_REGSHIFT 2
#if defined(__CCAC__)
#include "asm-macro-32-bit-mwdt.h"
#else
#include "asm-macro-32-bit-gnu.h"
#endif /* defined(__CCAC__) */
#endif
#else /* !_ASMLANGUAGE */
#error "asm-compat macroses used not in assembler code!"
#endif /* _ASMLANGUAGE */
#endif
``` | /content/code_sandbox/include/zephyr/arch/arc/asm-compat/assembler.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 494 |
```objective-c
/*
*
* Author: Vineet Gupta <vgupta@synopsys.com>
*
* pseudo-mnemonics for ALU/Memory instructions for ARC64 ISA
*/
.irp cc,,.hi,.nz
.macro MOVR\cc d, s
movl\cc \d, \s
.endm
.endr
.irp aa,,.ab,.as,.aw
.macro LDR\aa d, s, off=0
ldl\aa \d, [\s, \off]
.endm
.endr
.irp aa,.ab,.as,.aw
.macro STR\aa d, s, off=0
; workaround assembler barfing for ST r, [@symb, 0]
.if \off == 0
stl\aa \d, [\s]
.else
stl\aa \d, [\s, \off]
.endif
.endm
.endr
.macro STR d, s, off=0
.if \off == 0
stl \d, [\s]
.else
.if \off > 255
STR.as \d, \s, \off / 8
.else
stl \d, [\s, \off]
.endif
.endif
.endm
.macro PUSHR r
pushl \r
.endm
.macro POPR r
popl \r
.endm
.macro LRR d, aux
lrl \d, \aux
.endm
.macro SRR d, aux
srl \d, \aux
.endm
.irp cc,,.nz
.macro ADDR\cc d, s, v
addl\cc \d, \s, \v
.endm
.endr
.irp cc,,.nz
.macro ADD2R\cc d, s, v
add2l\cc \d, \s, \v
.endm
.endr
.macro ADD3R d, s, v
add3l \d, \s, \v
.endm
.macro SUBR d, s, v
subl \d, \s, \v
.endm
.macro BMSKNR d, s, v
bmsknl \d, \s, \v
.endm
.macro LSRR d, s, v
lsrl \d, \s, \v
.endm
.macro ASLR d, s, v
asll \d, \s, \v
.endm
.macro ANDR d, s, v
andl \d, \s, \v
.endm
.macro ORR, d, s, v
orl \d, \s, \v
.endm
.irp cc,ne,eq
.macro BRR\cc d, s, lbl
br\cc\()l \d, \s, \lbl
.endm
.endr
.macro BREQR d, s, lbl
breql \d, \s, \lbl
.endm
.macro CMPR op1, op2
cmpl \op1, \op2
.endm
``` | /content/code_sandbox/include/zephyr/arch/arc/asm-compat/asm-macro-64-bit-gnu.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 717 |
```yaml
archs:
- name: arc
path: arc
- name: arm
path: arm
- name: arm64
path: arm64
- name: mips
path: mips
- name: nios2
path: nios2
- name: posix
path: posix
- name: riscv
path: riscv
- name: sparc
path: sparc
- name: xtensa
path: xtensa
- name: x86
path: x86
``` | /content/code_sandbox/arch/archs.yml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 131 |
```unknown
source "$(KCONFIG_BINARY_DIR)/arch/Kconfig"
``` | /content/code_sandbox/arch/Kconfig.v2 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 13 |
```objective-c
/*
*/
#ifndef ZEPHYR_INCLUDE_ZBUS_H_
#define ZEPHYR_INCLUDE_ZBUS_H_
#include <string.h>
#include <zephyr/kernel.h>
#include <zephyr/sys/iterable_sections.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Zbus API
* @defgroup zbus_apis Zbus APIs
* @ingroup os_services
* @{
*/
/**
* @brief Type used to represent a channel mutable data.
*
* Every channel has a zbus_channel_data structure associated.
*/
struct zbus_channel_data {
/** Static channel observer list start index. Considering the ITERABLE SECTIONS allocation
* order.
*/
int16_t observers_start_idx;
/** Static channel observer list end index. Considering the ITERABLE SECTIONS allocation
* order.
*/
int16_t observers_end_idx;
/** Access control semaphore. Points to the semaphore used to avoid race conditions
* for accessing the channel.
*/
struct k_sem sem;
#if defined(CONFIG_ZBUS_PRIORITY_BOOST)
/** Highest observer priority. Indicates the priority that the VDED will use to boost the
* notification process avoiding preemptions.
*/
int highest_observer_priority;
#endif /* CONFIG_ZBUS_PRIORITY_BOOST */
#if defined(CONFIG_ZBUS_RUNTIME_OBSERVERS) || defined(__DOXYGEN__)
/** Channel observer list. Represents the channel's observers list, it can be empty
* or have listeners and subscribers mixed in any sequence. It can be changed in runtime.
*/
sys_slist_t observers;
#endif /* CONFIG_ZBUS_RUNTIME_OBSERVERS */
#if defined(CONFIG_ZBUS_MSG_SUBSCRIBER_NET_BUF_POOL_ISOLATION) || defined(__DOXYGEN__)
/** Net buf pool for message subscribers. It can be either the global or a separated one.
*/
struct net_buf_pool *msg_subscriber_pool;
#endif /* ZBUS_MSG_SUBSCRIBER_NET_BUF_POOL_ISOLATION */
};
/**
* @brief Type used to represent a channel.
*
* Every channel has a zbus_channel structure associated used to control the channel
* access and usage.
*/
struct zbus_channel {
#if defined(CONFIG_ZBUS_CHANNEL_NAME) || defined(__DOXYGEN__)
/** Channel name. */
const char *const name;
#endif
/** Message reference. Represents the message's reference that points to the actual
* shared memory region.
*/
void *const message;
/** Message size. Represents the channel's message size. */
const size_t message_size;
/** User data available to extend zbus features. The channel must be claimed before
* using this field.
*/
void *const user_data;
/** Message validator. Stores the reference to the function to check the message
* validity before actually performing the publishing. No invalid messages can be
* published. Every message is valid when this field is empty.
*/
bool (*const validator)(const void *msg, size_t msg_size);
/** Mutable channel data struct. */
struct zbus_channel_data *const data;
};
/**
* @brief Type used to represent an observer type.
*
* A observer can be a listener or a subscriber.
*/
enum __packed zbus_observer_type {
ZBUS_OBSERVER_LISTENER_TYPE,
ZBUS_OBSERVER_SUBSCRIBER_TYPE,
ZBUS_OBSERVER_MSG_SUBSCRIBER_TYPE,
};
struct zbus_observer_data {
/** Enabled flag. Indicates if observer is receiving notification. */
bool enabled;
#if defined(CONFIG_ZBUS_PRIORITY_BOOST)
/** Subscriber attached thread priority. */
int priority;
#endif /* CONFIG_ZBUS_PRIORITY_BOOST */
};
/**
* @brief Type used to represent an observer.
*
* Every observer has an representation structure containing the relevant information.
* An observer is a code portion interested in some channel. The observer can be notified
* synchronously or asynchronously and it is called listener and subscriber respectively.
* The observer can be enabled or disabled during runtime by change the enabled boolean
* field of the structure. The listeners have a callback function that is executed by the
* bus with the index of the changed channel as argument when the notification is sent.
* The subscribers have a message queue where the bus enqueues the index of the changed
* channel when a notification is sent.
*
* @see zbus_obs_set_enable function to properly change the observer's enabled field.
*
*/
struct zbus_observer {
#if defined(CONFIG_ZBUS_OBSERVER_NAME) || defined(__DOXYGEN__)
/** Observer name. */
const char *const name;
#endif
/** Type indication. */
enum zbus_observer_type type;
/** Mutable observer data struct. */
struct zbus_observer_data *const data;
union {
/** Observer message queue. It turns the observer into a subscriber. */
struct k_msgq *const queue;
/** Observer callback function. It turns the observer into a listener. */
void (*const callback)(const struct zbus_channel *chan);
#if defined(CONFIG_ZBUS_MSG_SUBSCRIBER) || defined(__DOXYGEN__)
/** Observer message FIFO. It turns the observer into a message subscriber. It only
* exists if the @kconfig{CONFIG_ZBUS_MSG_SUBSCRIBER} is enabled.
*/
struct k_fifo *const message_fifo;
#endif /* CONFIG_ZBUS_MSG_SUBSCRIBER */
};
};
/** @cond INTERNAL_HIDDEN */
struct zbus_channel_observation_mask {
bool enabled;
};
struct zbus_channel_observation {
const struct zbus_channel *const chan;
const struct zbus_observer *const obs;
};
#ifdef __cplusplus
#define _ZBUS_CPP_EXTERN extern
#else
#define _ZBUS_CPP_EXTERN
#endif /* __cplusplus */
#define ZBUS_MIN_THREAD_PRIORITY (CONFIG_NUM_PREEMPT_PRIORITIES - 1)
#if defined(CONFIG_ZBUS_ASSERT_MOCK)
#define _ZBUS_ASSERT(_cond, _fmt, ...) \
do { \
if (!(_cond)) { \
printk("ZBUS ASSERT: "); \
printk(_fmt, ##__VA_ARGS__); \
printk("\n"); \
return -EFAULT; \
} \
} while (0)
#else
#define _ZBUS_ASSERT(_cond, _fmt, ...) __ASSERT(_cond, _fmt, ##__VA_ARGS__)
#endif
#if defined(CONFIG_ZBUS_CHANNEL_NAME)
#define ZBUS_CHANNEL_NAME_INIT(_name) .name = #_name,
#define _ZBUS_CHAN_NAME(_chan) (_chan)->name
#else
#define ZBUS_CHANNEL_NAME_INIT(_name)
#define _ZBUS_CHAN_NAME(_chan) ""
#endif
#if defined(CONFIG_ZBUS_OBSERVER_NAME)
#define ZBUS_OBSERVER_NAME_INIT(_name) .name = #_name,
#define _ZBUS_OBS_NAME(_obs) (_obs)->name
#else
#define ZBUS_OBSERVER_NAME_INIT(_name)
#define _ZBUS_OBS_NAME(_obs) ""
#endif
#if defined(CONFIG_ZBUS_RUNTIME_OBSERVERS)
#define ZBUS_RUNTIME_OBSERVERS_LIST_DECL(_slist_name) static sys_slist_t _slist_name
#define ZBUS_RUNTIME_OBSERVERS_LIST_INIT(_slist_name) .runtime_observers = &_slist_name,
#else
#define ZBUS_RUNTIME_OBSERVERS_LIST_DECL(_slist_name)
#define ZBUS_RUNTIME_OBSERVERS_LIST_INIT(_slist_name) /* No runtime observers */
#endif
#define _ZBUS_OBS_EXTERN(_name) extern struct zbus_observer _name
#define _ZBUS_CHAN_EXTERN(_name) extern const struct zbus_channel _name
#define ZBUS_REF(_value) &(_value)
#define FOR_EACH_FIXED_ARG_NONEMPTY_TERM(F, sep, fixed_arg, ...) \
COND_CODE_0(/* are there zero non-empty arguments ? */ \
NUM_VA_ARGS_LESS_1( \
LIST_DROP_EMPTY(__VA_ARGS__, _)), /* if so, expand to nothing */ \
(), /* otherwise, expand to: */ \
(FOR_EACH_IDX_FIXED_ARG( \
F, sep, fixed_arg, \
LIST_DROP_EMPTY(__VA_ARGS__)) /* plus a final terminator */ \
__DEBRACKET sep))
#define _ZBUS_OBSERVATION_PREFIX(_idx) \
GET_ARG_N(_idx, 00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 10, 11, 12, 13, 14, 15, 16, 17, \
18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, \
38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, \
58, 59, 60, 61, 62, 63)
#define _ZBUS_CHAN_OBSERVATION(_idx, _obs, _chan) \
const STRUCT_SECTION_ITERABLE( \
zbus_channel_observation, \
_CONCAT(_chan, _ZBUS_OBSERVATION_PREFIX(UTIL_INC(_idx)))) = {.chan = &_chan, \
.obs = &_obs}; \
STRUCT_SECTION_ITERABLE(zbus_channel_observation_mask, \
_CONCAT(_CONCAT(_chan, _ZBUS_OBSERVATION_PREFIX(UTIL_INC(_idx))), \
_mask)) = {.enabled = false};
#if defined(CONFIG_ZBUS_RUNTIME_OBSERVERS) || defined(__DOXYGEN__)
#define _ZBUS_RUNTIME_OBSERVERS(_name) .observers = &(_CONCAT(_observers_, _name)),
#define _ZBUS_RUNTIME_OBSERVERS_DECL(_name) static sys_slist_t _CONCAT(_observers_, _name);
#else
#define _ZBUS_RUNTIME_OBSERVERS(_name)
#define _ZBUS_RUNTIME_OBSERVERS_DECL(_name)
#endif /* CONFIG_ZBUS_RUNTIME_OBSERVERS */
/** @endcond */
/* clang-format off */
/**
* @brief Add a static channel observervation.
*
* This macro initializes a channel observation by receiving the
* channel and the observer.
*
* @param _chan Channel instance.
* @param _obs Observer instance.
* @param _masked Observation state.
* @param _prio Observer notification sequence priority.
*/
#define ZBUS_CHAN_ADD_OBS_WITH_MASK(_chan, _obs, _masked, _prio) \
const STRUCT_SECTION_ITERABLE(zbus_channel_observation, \
_CONCAT(_CONCAT(_chan, zz), _CONCAT(_prio, _obs))) = { \
.chan = &_chan, \
.obs = &_obs, \
}; \
STRUCT_SECTION_ITERABLE(zbus_channel_observation_mask, \
_CONCAT(_CONCAT(_CONCAT(_chan, zz), _CONCAT(_prio, _obs)), \
_mask)) = {.enabled = _masked}
/* clang-format on */
/**
* @brief Add a static channel observervation.
*
* This macro initializes a channel observation by receiving the
* channel and the observer.
*
* @param _chan Channel instance.
* @param _obs Observer instance.
* @param _prio Observer notification sequence priority.
*/
#define ZBUS_CHAN_ADD_OBS(_chan, _obs, _prio) ZBUS_CHAN_ADD_OBS_WITH_MASK(_chan, _obs, false, _prio)
/**
* @def ZBUS_OBS_DECLARE
* This macro list the observers to be used in a file. Internally, it declares the observers with
* the extern statement. Note it is only necessary when the observers are declared outside the file.
*/
#define ZBUS_OBS_DECLARE(...) FOR_EACH_NONEMPTY_TERM(_ZBUS_OBS_EXTERN, (;), __VA_ARGS__)
/**
* @def ZBUS_CHAN_DECLARE
* This macro list the channels to be used in a file. Internally, it declares the channels with the
* extern statement. Note it is only necessary when the channels are declared outside the file.
*/
#define ZBUS_CHAN_DECLARE(...) FOR_EACH(_ZBUS_CHAN_EXTERN, (;), __VA_ARGS__)
/**
* @def ZBUS_OBSERVERS_EMPTY
* This macro indicates the channel has no observers.
*/
#define ZBUS_OBSERVERS_EMPTY
/**
* @def ZBUS_OBSERVERS
* This macro indicates the channel has listed observers. Note the sequence of observer notification
* will follow the same as listed.
*/
#define ZBUS_OBSERVERS(...) __VA_ARGS__
/* clang-format off */
/**
* @brief Zbus channel definition.
*
* This macro defines a channel.
*
* @param _name The channel's name.
* @param _type The Message type. It must be a struct or union.
* @param _validator The validator function.
* @param _user_data A pointer to the user data.
*
* @see struct zbus_channel
* @param _observers The observers list. The sequence indicates the priority of the observer. The
* first the highest priority.
* @param _init_val The message initialization.
*/
#define ZBUS_CHAN_DEFINE(_name, _type, _validator, _user_data, _observers, _init_val) \
static _type _CONCAT(_zbus_message_, _name) = _init_val; \
static struct zbus_channel_data _CONCAT(_zbus_chan_data_, _name) = { \
.observers_start_idx = -1, \
.observers_end_idx = -1, \
.sem = Z_SEM_INITIALIZER(_CONCAT(_zbus_chan_data_, _name).sem, 1, 1), \
IF_ENABLED(CONFIG_ZBUS_RUNTIME_OBSERVERS, ( \
.observers = SYS_SLIST_STATIC_INIT( \
&_CONCAT(_zbus_chan_data_, _name).observers), \
)) \
IF_ENABLED(CONFIG_ZBUS_PRIORITY_BOOST, ( \
.highest_observer_priority = ZBUS_MIN_THREAD_PRIORITY, \
)) \
}; \
static K_MUTEX_DEFINE(_CONCAT(_zbus_mutex_, _name)); \
_ZBUS_CPP_EXTERN const STRUCT_SECTION_ITERABLE(zbus_channel, _name) = { \
ZBUS_CHANNEL_NAME_INIT(_name) /* Maybe removed */ \
.message = &_CONCAT(_zbus_message_, _name), \
.message_size = sizeof(_type), \
.user_data = _user_data, \
.validator = _validator, \
.data = &_CONCAT(_zbus_chan_data_, _name), \
IF_ENABLED(ZBUS_MSG_SUBSCRIBER_NET_BUF_POOL_ISOLATION, ( \
.msg_subscriber_pool = &_zbus_msg_subscribers_pool, \
)) \
}; \
/* Extern declaration of observers */ \
ZBUS_OBS_DECLARE(_observers); \
/* Create all channel observations from observers list */ \
FOR_EACH_FIXED_ARG_NONEMPTY_TERM(_ZBUS_CHAN_OBSERVATION, (;), _name, _observers)
/* clang-format on */
/**
* @brief Initialize a message.
*
* This macro initializes a message by passing the values to initialize the message struct
* or union.
*
* @param[in] _val Variadic with the initial values. ``ZBUS_INIT(0)`` means ``{0}``, as
* ZBUS_INIT(.a=10, .b=30) means ``{.a=10, .b=30}``.
*/
#define ZBUS_MSG_INIT(_val, ...) \
{ \
_val, ##__VA_ARGS__ \
}
/* clang-format off */
/**
* @brief Define and initialize a subscriber.
*
* This macro defines an observer of subscriber type. It defines a message queue where the
* subscriber will receive the notification asynchronously, and initialize the ``struct
* zbus_observer`` defining the subscriber.
*
* @param[in] _name The subscriber's name.
* @param[in] _queue_size The notification queue's size.
* @param[in] _enable The subscriber initial enable state.
*/
#define ZBUS_SUBSCRIBER_DEFINE_WITH_ENABLE(_name, _queue_size, _enable) \
K_MSGQ_DEFINE(_zbus_observer_queue_##_name, \
sizeof(const struct zbus_channel *), \
_queue_size, sizeof(const struct zbus_channel *) \
); \
static struct zbus_observer_data _CONCAT(_zbus_obs_data_, _name) = { \
.enabled = _enable, \
IF_ENABLED(CONFIG_ZBUS_PRIORITY_BOOST, ( \
.priority = ZBUS_MIN_THREAD_PRIORITY, \
)) \
}; \
STRUCT_SECTION_ITERABLE(zbus_observer, _name) = { \
ZBUS_OBSERVER_NAME_INIT(_name) /* Name field */ \
.type = ZBUS_OBSERVER_SUBSCRIBER_TYPE, \
.data = &_CONCAT(_zbus_obs_data_, _name), \
.queue = &_zbus_observer_queue_##_name, \
}
/* clang-format on */
/**
* @brief Define and initialize a subscriber.
*
* This macro defines an observer of subscriber type. It defines a message queue where the
* subscriber will receive the notification asynchronously, and initialize the ``struct
* zbus_observer`` defining the subscriber. The subscribers are defined in the enabled
* state with this macro.
*
* @param[in] _name The subscriber's name.
* @param[in] _queue_size The notification queue's size.
*/
#define ZBUS_SUBSCRIBER_DEFINE(_name, _queue_size) \
ZBUS_SUBSCRIBER_DEFINE_WITH_ENABLE(_name, _queue_size, true)
/* clang-format off */
/**
* @brief Define and initialize a listener.
*
* This macro defines an observer of listener type. This macro establishes the callback where the
* listener will be notified synchronously, and initialize the ``struct zbus_observer`` defining the
* listener.
*
* @param[in] _name The listener's name.
* @param[in] _cb The callback function.
* @param[in] _enable The listener initial enable state.
*/
#define ZBUS_LISTENER_DEFINE_WITH_ENABLE(_name, _cb, _enable) \
static struct zbus_observer_data _CONCAT(_zbus_obs_data_, _name) = { \
.enabled = _enable, \
IF_ENABLED(CONFIG_ZBUS_PRIORITY_BOOST, ( \
.priority = ZBUS_MIN_THREAD_PRIORITY, \
)) \
}; \
STRUCT_SECTION_ITERABLE(zbus_observer, _name) = { \
ZBUS_OBSERVER_NAME_INIT(_name) /* Name field */ \
.type = ZBUS_OBSERVER_LISTENER_TYPE, \
.data = &_CONCAT(_zbus_obs_data_, _name), \
.callback = (_cb) \
}
/* clang-format on */
/**
* @brief Define and initialize a listener.
*
* This macro defines an observer of listener type. This macro establishes the callback where the
* listener will be notified synchronously and initialize the ``struct zbus_observer`` defining the
* listener. The listeners are defined in the enabled state with this macro.
*
* @param[in] _name The listener's name.
* @param[in] _cb The callback function.
*/
#define ZBUS_LISTENER_DEFINE(_name, _cb) ZBUS_LISTENER_DEFINE_WITH_ENABLE(_name, _cb, true)
/* clang-format off */
/**
* @brief Define and initialize a message subscriber.
*
* This macro defines an observer of @ref ZBUS_OBSERVER_SUBSCRIBER_TYPE type. It defines a FIFO
* where the subscriber will receive the message asynchronously and initialize the @ref
* zbus_observer defining the subscriber.
*
* @param[in] _name The subscriber's name.
* @param[in] _enable The subscriber's initial state.
*/
#define ZBUS_MSG_SUBSCRIBER_DEFINE_WITH_ENABLE(_name, _enable) \
static K_FIFO_DEFINE(_zbus_observer_fifo_##_name); \
static struct zbus_observer_data _CONCAT(_zbus_obs_data_, _name) = { \
.enabled = _enable, \
IF_ENABLED(CONFIG_ZBUS_PRIORITY_BOOST, ( \
.priority = ZBUS_MIN_THREAD_PRIORITY, \
)) \
}; \
STRUCT_SECTION_ITERABLE(zbus_observer, _name) = { \
ZBUS_OBSERVER_NAME_INIT(_name) /* Name field */ \
.type = ZBUS_OBSERVER_MSG_SUBSCRIBER_TYPE, \
.data = &_CONCAT(_zbus_obs_data_, _name), \
.message_fifo = &_zbus_observer_fifo_##_name, \
}
/* clang-format on */
/**
* @brief Define and initialize an enabled message subscriber.
*
* This macro defines an observer of message subscriber type. It defines a FIFO where the
* subscriber will receive the message asynchronously and initialize the @ref
* zbus_observer defining the subscriber. The message subscribers are defined in the enabled state
* with this macro.
*
* @param[in] _name The subscriber's name.
*/
#define ZBUS_MSG_SUBSCRIBER_DEFINE(_name) ZBUS_MSG_SUBSCRIBER_DEFINE_WITH_ENABLE(_name, true)
/**
*
* @brief Publish to a channel
*
* This routine publishes a message to a channel.
*
* @param chan The channel's reference.
* @param msg Reference to the message where the publish function copies the channel's
* message data from.
* @param timeout Waiting period to publish the channel,
* or one of the special values K_NO_WAIT and K_FOREVER.
*
* @retval 0 Channel published.
* @retval -ENOMSG The message is invalid based on the validator function or some of the
* observers could not receive the notification.
* @retval -EBUSY The channel is busy.
* @retval -EAGAIN Waiting period timed out.
* @retval -EFAULT A parameter is incorrect, the notification could not be sent to one or more
* observer, or the function context is invalid (inside an ISR). The function only returns this
* value when the @kconfig{CONFIG_ZBUS_ASSERT_MOCK} is enabled.
*/
int zbus_chan_pub(const struct zbus_channel *chan, const void *msg, k_timeout_t timeout);
/**
* @brief Read a channel
*
* This routine reads a message from a channel.
*
* @param[in] chan The channel's reference.
* @param[out] msg Reference to the message where the read function copies the channel's
* message data to.
* @param[in] timeout Waiting period to read the channel,
* or one of the special values K_NO_WAIT and K_FOREVER.
*
* @retval 0 Channel read.
* @retval -EBUSY The channel is busy.
* @retval -EAGAIN Waiting period timed out.
* @retval -EFAULT A parameter is incorrect, or the function context is invalid (inside an ISR). The
* function only returns this value when the @kconfig{CONFIG_ZBUS_ASSERT_MOCK} is enabled.
*/
int zbus_chan_read(const struct zbus_channel *chan, void *msg, k_timeout_t timeout);
/**
* @brief Claim a channel
*
* This routine claims a channel. During the claiming period the channel is blocked for publishing,
* reading, notifying or claiming again. Finishing is the only available action.
*
* @warning After calling this routine, the channel cannot be used by other
* thread until the zbus_chan_finish routine is performed.
*
* @warning This routine should only be called once before a zbus_chan_finish.
*
* @param[in] chan The channel's reference.
* @param[in] timeout Waiting period to claim the channel,
* or one of the special values K_NO_WAIT and K_FOREVER.
*
* @retval 0 Channel claimed.
* @retval -EBUSY The channel is busy.
* @retval -EAGAIN Waiting period timed out.
* @retval -EFAULT A parameter is incorrect, or the function context is invalid (inside an ISR). The
* function only returns this value when the @kconfig{CONFIG_ZBUS_ASSERT_MOCK} is enabled.
*/
int zbus_chan_claim(const struct zbus_channel *chan, k_timeout_t timeout);
/**
* @brief Finish a channel claim.
*
* This routine finishes a channel claim. After calling this routine with success, the channel will
* be able to be used by other thread.
*
* @warning This routine must only be used after a zbus_chan_claim.
*
* @param chan The channel's reference.
*
* @retval 0 Channel finished.
* @retval -EFAULT A parameter is incorrect, or the function context is invalid (inside an ISR). The
* function only returns this value when the @kconfig{CONFIG_ZBUS_ASSERT_MOCK} is enabled.
*/
int zbus_chan_finish(const struct zbus_channel *chan);
/**
* @brief Force a channel notification.
*
* This routine forces the event dispatcher to notify the channel's observers even if the message
* has no changes. Note this function could be useful after claiming/finishing actions.
*
* @param chan The channel's reference.
* @param timeout Waiting period to notify the channel,
* or one of the special values K_NO_WAIT and K_FOREVER.
*
* @retval 0 Channel notified.
* @retval -EBUSY The channel's semaphore returned without waiting.
* @retval -EAGAIN Timeout to take the channel's semaphore.
* @retval -ENOMEM There is not more buffer on the messgage buffers pool.
* @retval -EFAULT A parameter is incorrect, the notification could not be sent to one or more
* observer, or the function context is invalid (inside an ISR). The function only returns this
* value when the @kconfig{CONFIG_ZBUS_ASSERT_MOCK} is enabled.
*/
int zbus_chan_notify(const struct zbus_channel *chan, k_timeout_t timeout);
#if defined(CONFIG_ZBUS_CHANNEL_NAME) || defined(__DOXYGEN__)
/**
* @brief Get the channel's name.
*
* This routine returns the channel's name reference.
*
* @param chan The channel's reference.
*
* @return Channel's name reference.
*/
static inline const char *zbus_chan_name(const struct zbus_channel *chan)
{
__ASSERT(chan != NULL, "chan is required");
return chan->name;
}
#endif
/**
* @brief Get the reference for a channel message directly.
*
* This routine returns the reference of a channel message.
*
* @warning This function must only be used directly for already locked channels. This
* can be done inside a listener for the receiving channel or after claim a channel.
*
* @param chan The channel's reference.
*
* @return Channel's message reference.
*/
static inline void *zbus_chan_msg(const struct zbus_channel *chan)
{
__ASSERT(chan != NULL, "chan is required");
return chan->message;
}
/**
* @brief Get a constant reference for a channel message directly.
*
* This routine returns a constant reference of a channel message. This should be used
* inside listeners to access the message directly. In this way zbus prevents the listener of
* changing the notifying channel's message during the notification process.
*
* @warning This function must only be used directly for already locked channels. This
* can be done inside a listener for the receiving channel or after claim a channel.
*
* @param chan The channel's constant reference.
*
* @return A constant channel's message reference.
*/
static inline const void *zbus_chan_const_msg(const struct zbus_channel *chan)
{
__ASSERT(chan != NULL, "chan is required");
return chan->message;
}
/**
* @brief Get the channel's message size.
*
* This routine returns the channel's message size.
*
* @param chan The channel's reference.
*
* @return Channel's message size.
*/
static inline uint16_t zbus_chan_msg_size(const struct zbus_channel *chan)
{
__ASSERT(chan != NULL, "chan is required");
return chan->message_size;
}
/**
* @brief Get the channel's user data.
*
* This routine returns the channel's user data.
*
* @param chan The channel's reference.
*
* @return Channel's user data.
*/
static inline void *zbus_chan_user_data(const struct zbus_channel *chan)
{
__ASSERT(chan != NULL, "chan is required");
return chan->user_data;
}
#if defined(CONFIG_ZBUS_MSG_SUBSCRIBER_NET_BUF_POOL_ISOLATION) || defined(__DOXYGEN__)
/**
* @brief Set the channel's msg subscriber `net_buf` pool.
*
* @param chan The channel's reference.
* @param pool The reference to the `net_buf` memory pool.
*/
static inline void zbus_chan_set_msg_sub_pool(const struct zbus_channel *chan,
struct net_buf_pool *pool)
{
__ASSERT(chan != NULL, "chan is required");
__ASSERT(pool != NULL, "pool is required");
chan->data->msg_subscriber_pool = pool;
}
#endif /* ZBUS_MSG_SUBSCRIBER_NET_BUF_POOL_ISOLATION */
#if defined(CONFIG_ZBUS_RUNTIME_OBSERVERS) || defined(__DOXYGEN__)
/**
* @brief Add an observer to a channel.
*
* This routine adds an observer to the channel.
*
* @param chan The channel's reference.
* @param obs The observer's reference to be added.
* @param timeout Waiting period to add an observer,
* or one of the special values K_NO_WAIT and K_FOREVER.
*
* @retval 0 Observer added to the channel.
* @retval -EALREADY The observer is already present in the channel's runtime observers list.
* @retval -ENOMEM Returned without waiting.
* @retval -EAGAIN Waiting period timed out.
* @retval -EINVAL Some parameter is invalid.
*/
int zbus_chan_add_obs(const struct zbus_channel *chan, const struct zbus_observer *obs,
k_timeout_t timeout);
/**
* @brief Remove an observer from a channel.
*
* This routine removes an observer to the channel.
*
* @param chan The channel's reference.
* @param obs The observer's reference to be removed.
* @param timeout Waiting period to remove an observer,
* or one of the special values K_NO_WAIT and K_FOREVER.
*
* @retval 0 Observer removed to the channel.
* @retval -EINVAL Invalid data supplied.
* @retval -EBUSY Returned without waiting.
* @retval -EAGAIN Waiting period timed out.
* @retval -ENODATA no observer found in channel's runtime observer list.
* @retval -ENOMEM Returned without waiting.
*/
int zbus_chan_rm_obs(const struct zbus_channel *chan, const struct zbus_observer *obs,
k_timeout_t timeout);
/** @cond INTERNAL_HIDDEN */
struct zbus_observer_node {
sys_snode_t node;
const struct zbus_observer *obs;
};
/** @endcond */
#endif /* CONFIG_ZBUS_RUNTIME_OBSERVERS */
/**
* @brief Change the observer state.
*
* This routine changes the observer state. A channel when disabled will not receive
* notifications from the event dispatcher.
*
* @param[in] obs The observer's reference.
* @param[in] enabled State to be. When false the observer stops to receive notifications.
*
* @retval 0 Observer set enable.
* @retval -EFAULT A parameter is incorrect, or the function context is invalid (inside an ISR). The
* function only returns this value when the @kconfig{CONFIG_ZBUS_ASSERT_MOCK} is enabled.
*/
int zbus_obs_set_enable(struct zbus_observer *obs, bool enabled);
/**
* @brief Get the observer state.
*
* This routine retrieves the observer state.
*
* @param[in] obs The observer's reference.
* @param[out] enable The boolean output's reference.
*
* @return Observer state.
*/
static inline int zbus_obs_is_enabled(struct zbus_observer *obs, bool *enable)
{
_ZBUS_ASSERT(obs != NULL, "obs is required");
_ZBUS_ASSERT(enable != NULL, "enable is required");
*enable = obs->data->enabled;
return 0;
}
/**
* @brief Mask notifications from a channel to an observer.
*
* The observer can mask notifications from a specific observing channel by calling this function.
*
* @param obs The observer's reference to be added.
* @param chan The channel's reference.
* @param masked The mask state. When the mask is true, the observer will not receive notifications
* from the channel.
*
* @retval 0 Channel notifications masked to the observer.
* @retval -ESRCH No observation found for the related pair chan/obs.
* @retval -EINVAL Some parameter is invalid.
*/
int zbus_obs_set_chan_notification_mask(const struct zbus_observer *obs,
const struct zbus_channel *chan, bool masked);
/**
* @brief Get the notifications masking state from a channel to an observer.
*
* @param obs The observer's reference to be added.
* @param chan The channel's reference.
* @param[out] masked The mask state. When the mask is true, the observer will not receive
* notifications from the channel.
*
* @retval 0 Retrieved the masked state.
* @retval -ESRCH No observation found for the related pair chan/obs.
* @retval -EINVAL Some parameter is invalid.
*/
int zbus_obs_is_chan_notification_masked(const struct zbus_observer *obs,
const struct zbus_channel *chan, bool *masked);
#if defined(CONFIG_ZBUS_OBSERVER_NAME) || defined(__DOXYGEN__)
/**
* @brief Get the observer's name.
*
* This routine returns the observer's name reference.
*
* @param obs The observer's reference.
*
* @return The observer's name reference.
*/
static inline const char *zbus_obs_name(const struct zbus_observer *obs)
{
__ASSERT(obs != NULL, "obs is required");
return obs->name;
}
#endif
#if defined(CONFIG_ZBUS_PRIORITY_BOOST) || defined(__DOXYGEN__)
/**
* @brief Set the observer thread priority by attaching it to a thread.
*
* @param[in] obs The observer's reference.
*
* @retval 0 Observer detached from the thread.
* @retval -EFAULT A parameter is incorrect, or the function context is invalid (inside an ISR). The
* function only returns this value when the @kconfig{CONFIG_ZBUS_ASSERT_MOCK} is enabled.
*/
int zbus_obs_attach_to_thread(const struct zbus_observer *obs);
/**
* @brief Clear the observer thread priority by detaching it from a thread.
*
* @param[in] obs The observer's reference.
*
* @retval 0 Observer detached from the thread.
* @retval -EFAULT A parameter is incorrect, or the function context is invalid (inside an ISR). The
* function only returns this value when the @kconfig{CONFIG_ZBUS_ASSERT_MOCK} is enabled.
*/
int zbus_obs_detach_from_thread(const struct zbus_observer *obs);
#endif /* CONFIG_ZBUS_PRIORITY_BOOST */
/**
* @brief Wait for a channel notification.
*
* This routine makes the subscriber to wait a notification. The notification comes as a channel
* reference.
*
* @param[in] sub The subscriber's reference.
* @param[out] chan The notification channel's reference.
* @param[in] timeout Waiting period for a notification arrival,
* or one of the special values K_NO_WAIT and K_FOREVER.
*
* @retval 0 Notification received.
* @retval -ENOMSG Returned without waiting.
* @retval -EAGAIN Waiting period timed out.
* @retval -EINVAL The observer is not a subscriber.
* @retval -EFAULT A parameter is incorrect, or the function context is invalid (inside an ISR). The
* function only returns this value when the @kconfig{CONFIG_ZBUS_ASSERT_MOCK} is enabled.
*/
int zbus_sub_wait(const struct zbus_observer *sub, const struct zbus_channel **chan,
k_timeout_t timeout);
#if defined(CONFIG_ZBUS_MSG_SUBSCRIBER) || defined(__DOXYGEN__)
/**
* @brief Wait for a channel message.
*
* This routine makes the subscriber wait for the new message in case of channel publication.
*
* @param[in] sub The subscriber's reference.
* @param[out] chan The notification channel's reference.
* @param[out] msg A reference to a copy of the published message.
* @param[in] timeout Waiting period for a notification arrival,
* or one of the special values, K_NO_WAIT and K_FOREVER.
*
* @retval 0 Message received.
* @retval -EINVAL The observer is not a subscriber.
* @retval -ENOMSG Could not retrieve the net_buf from the subscriber FIFO.
* @retval -EILSEQ Received an invalid channel reference.
* @retval -EFAULT A parameter is incorrect, or the function context is invalid (inside an ISR). The
* function only returns this value when the @kconfig{CONFIG_ZBUS_ASSERT_MOCK} is enabled.
*/
int zbus_sub_wait_msg(const struct zbus_observer *sub, const struct zbus_channel **chan, void *msg,
k_timeout_t timeout);
#endif /* CONFIG_ZBUS_MSG_SUBSCRIBER */
/**
*
* @brief Iterate over channels.
*
* Enables the developer to iterate over the channels giving to this function an
* iterator_func which is called for each channel. If the iterator_func returns false all
* the iteration stops.
*
* @param[in] iterator_func The function that will be execute on each iteration.
*
* @retval true Iterator executed for all channels.
* @retval false Iterator could not be executed. Some iterate returned false.
*/
bool zbus_iterate_over_channels(bool (*iterator_func)(const struct zbus_channel *chan));
/**
*
* @brief Iterate over channels with user data.
*
* Enables the developer to iterate over the channels giving to this function an
* iterator_func which is called for each channel. If the iterator_func returns false all
* the iteration stops.
*
* @param[in] iterator_func The function that will be execute on each iteration.
* @param[in] user_data The user data that can be passed in the function.
*
* @retval true Iterator executed for all channels.
* @retval false Iterator could not be executed. Some iterate returned false.
*/
bool zbus_iterate_over_channels_with_user_data(
bool (*iterator_func)(const struct zbus_channel *chan, void *user_data), void *user_data);
/**
*
* @brief Iterate over observers.
*
* Enables the developer to iterate over the observers giving to this function an
* iterator_func which is called for each observer. If the iterator_func returns false all
* the iteration stops.
*
* @param[in] iterator_func The function that will be execute on each iteration.
*
* @retval true Iterator executed for all channels.
* @retval false Iterator could not be executed. Some iterate returned false.
*/
bool zbus_iterate_over_observers(bool (*iterator_func)(const struct zbus_observer *obs));
/**
*
* @brief Iterate over observers with user data.
*
* Enables the developer to iterate over the observers giving to this function an
* iterator_func which is called for each observer. If the iterator_func returns false all
* the iteration stops.
*
* @param[in] iterator_func The function that will be execute on each iteration.
* @param[in] user_data The user data that can be passed in the function.
*
* @retval true Iterator executed for all channels.
* @retval false Iterator could not be executed. Some iterate returned false.
*/
bool zbus_iterate_over_observers_with_user_data(
bool (*iterator_func)(const struct zbus_observer *obs, void *user_data), void *user_data);
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_ZBUS_H_ */
``` | /content/code_sandbox/include/zephyr/zbus/zbus.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 8,490 |
```unknown
# Note: $ARCH might be a glob pattern
source "$(ARCH_DIR)/$(ARCH)/Kconfig"
``` | /content/code_sandbox/arch/Kconfig.v1 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 24 |
```unknown
# General architecture configuration options
# Include these first so that any properties (e.g. defaults) below can be
# overridden (by defining symbols in multiple locations)
source "$(ARCH_DIR)/Kconfig.$(HWM_SCHEME)"
# ToDo: Generate a Kconfig.arch for loading of additional arch in HWMv2.
osource "$(KCONFIG_BINARY_DIR)/Kconfig.arch"
# Architecture symbols
#
# Should be 'select'ed by low-level symbols like SOC_SERIES_* or, lacking that,
# by SOC_*.
config ARC
bool
select ARCH_IS_SET
imply XIP
select ARCH_HAS_THREAD_LOCAL_STORAGE
select ARCH_SUPPORTS_ROM_START
select ARCH_HAS_DIRECTED_IPIS
help
ARC architecture
config ARM
bool
select ARCH_IS_SET
select ARCH_SUPPORTS_COREDUMP if CPU_CORTEX_M
select ARCH_SUPPORTS_COREDUMP_THREADS if CPU_CORTEX_M
# FIXME: current state of the code for all ARM requires this, but
# is really only necessary for Cortex-M with ARM MPU!
select GEN_PRIV_STACKS
select ARCH_HAS_THREAD_LOCAL_STORAGE if CPU_AARCH32_CORTEX_R || CPU_CORTEX_M || CPU_AARCH32_CORTEX_A
select BARRIER_OPERATIONS_ARCH
help
ARM architecture
config ARM64
bool
select ARCH_IS_SET
select 64BIT
select ARCH_SUPPORTS_COREDUMP
select HAS_ARM_SMCCC
select ARCH_HAS_THREAD_LOCAL_STORAGE
select USE_SWITCH
select USE_SWITCH_SUPPORTED
select IRQ_OFFLOAD_NESTED if IRQ_OFFLOAD
select BARRIER_OPERATIONS_ARCH
select ARCH_HAS_DIRECTED_IPIS
help
ARM64 (AArch64) architecture
config MIPS
bool
select ARCH_IS_SET
select ATOMIC_OPERATIONS_C
help
MIPS architecture
config SPARC
bool
select ARCH_IS_SET
select USE_SWITCH
select USE_SWITCH_SUPPORTED
select BIG_ENDIAN
select ATOMIC_OPERATIONS_BUILTIN if SPARC_CASA
select ATOMIC_OPERATIONS_C if !SPARC_CASA
select ARCH_HAS_THREAD_LOCAL_STORAGE
select ARCH_HAS_EXTRA_EXCEPTION_INFO
help
SPARC architecture
config X86
bool
select ARCH_IS_SET
select ATOMIC_OPERATIONS_BUILTIN
select ARCH_SUPPORTS_COREDUMP
select ARCH_SUPPORTS_ROM_START if !X86_64
select CPU_HAS_MMU
select ARCH_MEM_DOMAIN_DATA if USERSPACE && !X86_COMMON_PAGE_TABLE
select ARCH_MEM_DOMAIN_SYNCHRONOUS_API if USERSPACE
select ARCH_HAS_GDBSTUB if !X86_64
select ARCH_HAS_TIMING_FUNCTIONS
select ARCH_HAS_THREAD_LOCAL_STORAGE
select ARCH_HAS_DEMAND_PAGING if !X86_64
select IRQ_OFFLOAD_NESTED if IRQ_OFFLOAD
select NEED_LIBC_MEM_PARTITION if USERSPACE && TIMING_FUNCTIONS \
&& !BOARD_HAS_TIMING_FUNCTIONS \
&& !SOC_HAS_TIMING_FUNCTIONS
select ARCH_HAS_STACK_CANARIES_TLS
select ARCH_SUPPORTS_MEM_MAPPED_STACKS if X86_MMU && !DEMAND_PAGING
help
x86 architecture
config NIOS2
bool
select ARCH_IS_SET
select ATOMIC_OPERATIONS_C
imply XIP
select ARCH_HAS_TIMING_FUNCTIONS
help
Nios II Gen 2 architecture
config RISCV
bool
select ARCH_IS_SET
select ARCH_SUPPORTS_COREDUMP
select ARCH_SUPPORTS_ROM_START if !SOC_FAMILY_ESPRESSIF_ESP32
select ARCH_HAS_CODE_DATA_RELOCATION
select ARCH_HAS_THREAD_LOCAL_STORAGE
select ARCH_HAS_STACKWALK
select IRQ_OFFLOAD_NESTED if IRQ_OFFLOAD
select USE_SWITCH_SUPPORTED
select USE_SWITCH
select SCHED_IPI_SUPPORTED if SMP
select ARCH_HAS_DIRECTED_IPIS
select BARRIER_OPERATIONS_BUILTIN
imply XIP
help
RISCV architecture
config XTENSA
bool
select ARCH_IS_SET
select USE_SWITCH
select USE_SWITCH_SUPPORTED
select IRQ_OFFLOAD_NESTED if IRQ_OFFLOAD
select ARCH_HAS_CODE_DATA_RELOCATION
select ARCH_HAS_TIMING_FUNCTIONS
select ARCH_MEM_DOMAIN_DATA if USERSPACE
select ARCH_HAS_DIRECTED_IPIS
select THREAD_STACK_INFO
help
Xtensa architecture
config ARCH_POSIX
bool
select ARCH_IS_SET
select ATOMIC_OPERATIONS_BUILTIN
select ARCH_HAS_CUSTOM_SWAP_TO_MAIN
select ARCH_HAS_CUSTOM_BUSY_WAIT
select ARCH_HAS_THREAD_ABORT
select NATIVE_BUILD
select HAS_COVERAGE_SUPPORT
select BARRIER_OPERATIONS_BUILTIN
# POSIX arch based targets get their memory cleared on entry by the host OS
select SKIP_BSS_CLEAR
# Override the C standard used for compilation to C 2011
# This is due to some tests using _Static_assert which is a 2011 feature, but
# otherwise relying on compilers supporting it also when set to C99.
# This was in general ok, but with some host compilers and C library versions
# it led to problems. So we override it to 2011 for the native targets.
select REQUIRES_STD_C11
help
POSIX (native) architecture
config ARCH_IS_SET
bool
help
Helper symbol to detect SoCs forgetting to select one of the arch
symbols above. See the top-level CMakeLists.txt.
menu "General Architecture Options"
source "arch/common/Kconfig"
module = ARCH
module-str = arch
source "subsys/logging/Kconfig.template.log_config"
config BIG_ENDIAN
bool
help
This option tells the build system that the target system is big-endian.
Little-endian architecture is the default and should leave this option
unselected. This option is selected by arch/$ARCH/Kconfig,
soc/**/Kconfig, or boards/**/Kconfig and the user should generally avoid
modifying it. The option is used to select linker script OUTPUT_FORMAT
and command line option for gen_isr_tables.py.
config LITTLE_ENDIAN
# Hidden Kconfig option representing the default little-endian architecture
# This is just the opposite of BIG_ENDIAN and is used for non-negative
# conditional compilation
bool
depends on !BIG_ENDIAN
default y
config 64BIT
bool
help
This option tells the build system that the target system is
using a 64-bit address space, meaning that pointer and long types
are 64 bits wide. This option is selected by arch/$ARCH/Kconfig,
soc/**/Kconfig, or boards/**/Kconfig and the user should generally
avoid modifying it.
# Workaround for not being able to have commas in macro arguments
DT_CHOSEN_Z_SRAM := zephyr,sram
config SRAM_SIZE
int "SRAM Size in kB"
default $(dt_chosen_reg_size_int,$(DT_CHOSEN_Z_SRAM),0,K)
help
The SRAM size in kB. The default value comes from /chosen/zephyr,sram in
devicetree. The user should generally avoid changing it via menuconfig or
in configuration files.
config SRAM_BASE_ADDRESS
hex "SRAM Base Address"
default $(dt_chosen_reg_addr_hex,$(DT_CHOSEN_Z_SRAM))
help
The SRAM base address. The default value comes from
/chosen/zephyr,sram in devicetree. The user should generally avoid
changing it via menuconfig or in configuration files.
if ARC || ARM || ARM64 || NIOS2 || X86 || RISCV
# Workaround for not being able to have commas in macro arguments
DT_CHOSEN_Z_FLASH := zephyr,flash
config FLASH_SIZE
int "Flash Size in kB"
default $(dt_chosen_reg_size_int,$(DT_CHOSEN_Z_FLASH),0,K) if (XIP && (ARM ||ARM64)) || !ARM
default 0 if !XIP
help
This option specifies the size of the flash in kB. It is normally set by
the board's defconfig file and the user should generally avoid modifying
it via the menu configuration.
config FLASH_BASE_ADDRESS
hex "Flash Base Address"
default $(dt_chosen_reg_addr_hex,$(DT_CHOSEN_Z_FLASH)) if (XIP && (ARM || ARM64)) || !ARM
default 0 if !XIP
help
This option specifies the base address of the flash on the board. It is
normally set by the board's defconfig file and the user should generally
avoid modifying it via the menu configuration.
endif # ARM || ARM64 || ARC || NIOS2 || X86 || RISCV
if ARCH_HAS_TRUSTED_EXECUTION
config TRUSTED_EXECUTION_SECURE
bool "Trusted Execution: Secure firmware image"
help
Select this option to enable building a Secure firmware
image for a platform that supports Trusted Execution. A
Secure firmware image will execute in Secure state. It may
allow the CPU to execute in Non-Secure (Normal) state.
Therefore, a Secure firmware image shall be able to
configure security attributions of CPU resources (memory
areas, peripherals, interrupts, etc.) as well as to handle
faults, related to security violations. It may optionally
allow certain functions to be called from the Non-Secure
(Normal) domain.
config TRUSTED_EXECUTION_NONSECURE
depends on !TRUSTED_EXECUTION_SECURE
bool "Trusted Execution: Non-Secure firmware image"
help
Select this option to enable building a Non-Secure
firmware image for a platform that supports Trusted
Execution. A Non-Secure firmware image will execute
in Non-Secure (Normal) state. Therefore, it shall not
access CPU resources (memory areas, peripherals,
interrupts etc.) belonging to the Secure domain.
endif # ARCH_HAS_TRUSTED_EXECUTION
config HW_STACK_PROTECTION
bool "Hardware Stack Protection"
depends on ARCH_HAS_STACK_PROTECTION
help
Select this option to enable hardware-based platform features to
catch stack overflows when the system is running in privileged
mode. If CONFIG_USERSPACE is not enabled, the system is always
running in privileged mode.
Note that this does not necessarily prevent corruption and assertions
about the overall system state when a fault is triggered cannot be
made.
config USERSPACE
bool "User mode threads"
depends on ARCH_HAS_USERSPACE
depends on RUNTIME_ERROR_CHECKS
depends on SRAM_REGION_PERMISSIONS
select THREAD_STACK_INFO
select LINKER_USE_NO_RELAX
help
When enabled, threads may be created or dropped down to user mode,
which has significantly restricted permissions and must interact
with the kernel via system calls. See Zephyr documentation for more
details about this feature.
If a user thread overflows its stack, this will be caught and the
kernel itself will be shielded from harm. Enabling this option
may or may not catch stack overflows when the system is in
privileged mode or handling a system call; to ensure these are always
caught, enable CONFIG_HW_STACK_PROTECTION.
config PRIVILEGED_STACK_SIZE
int "Size of privileged stack"
default 2048 if EMUL
default 1024
depends on ARCH_HAS_USERSPACE
help
This option sets the privileged stack region size that will be used
in addition to the user mode thread stack. During normal execution,
this region will be inaccessible from user mode. During system calls,
this region will be utilized by the system call. This value must be
a multiple of the minimum stack alignment.
config KOBJECT_TEXT_AREA
int "Size of kobject text area"
default 512 if COVERAGE_GCOV
default 512 if NO_OPTIMIZATIONS
default 512 if STACK_CANARIES && RISCV
default 256
depends on ARCH_HAS_USERSPACE
help
Size of kernel object text area. Used in linker script.
config KOBJECT_DATA_AREA_RESERVE_EXTRA_PERCENT
int "Reserve extra kobject data area (in percentage)"
default 100
depends on ARCH_HAS_USERSPACE
help
Multiplication factor used to calculate the size of placeholder to
reserve space for kobject metadata hash table. The hash table is
generated via gperf is highly dependent on the absolute addresses of
kobjects which might change between prebuilts. To reserve enough
space for the hash table during final linking passes to keep
kobjects in same place, the size of reserved space is calculated
from the first prebuilt plus additional space calculated with
this percentage (of the kobject data area in first prebuilt).
config KOBJECT_RODATA_AREA_EXTRA_BYTES
int "Reserve extra bytes for kobject rodata area"
default 16
depends on ARCH_HAS_USERSPACE
help
Reserve a few more bytes for the RODATA region for kobject metadata.
This is to account for the uncertainty of tables generated by gperf.
config GEN_PRIV_STACKS
bool
help
Selected if the architecture requires that privilege elevation stacks
be allocated in a separate memory area. This is typical of arches
whose MPUs require regions to be power-of-two aligned/sized.
FIXME: This should be removed and replaced with checks against
CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT, but both ARM and ARC
changes will be necessary for this.
config STACK_GROWS_UP
bool "Stack grows towards higher memory addresses"
help
Select this option if the architecture has upward growing thread
stacks. This is not common.
config NO_UNUSED_STACK_INSPECTION
bool
help
Selected if the architecture will generate a fault if unused stack
memory is examined, which is the region between the current stack
pointer and the deepest available address in the current stack
region.
config MAX_THREAD_BYTES
int "Bytes to use when tracking object thread permissions"
default 2
depends on USERSPACE
help
Every kernel object will have an associated bitfield to store
thread permissions for that object. This controls the size of the
bitfield (in bytes) and imposes a limit on how many threads can
be created in the system.
config DYNAMIC_OBJECTS
bool "Allow kernel objects to be allocated at runtime"
depends on USERSPACE
help
Enabling this option allows for kernel objects to be requested from
the calling thread's resource pool, at a slight cost in performance
due to the supplemental run-time tables required to validate such
objects.
Objects allocated in this way can be freed with a supervisor-only
API call, or when the number of references to that object drops to
zero.
config NOCACHE_MEMORY
bool "Support for uncached memory"
depends on ARCH_HAS_NOCACHE_MEMORY_SUPPORT
help
Add a "nocache" read-write memory section that is configured to
not be cached. This memory section can be used to perform DMA
transfers when cache coherence issues are not optimal or can not
be solved using cache maintenance operations.
config FRAME_POINTER
bool "Compile the kernel with frame pointers"
select OVERRIDE_FRAME_POINTER_DEFAULT
help
Select Y here to gain precise stack traces at the expense of slightly
increased size and decreased speed.
config ARCH_STACKWALK_MAX_FRAMES
int "Max depth for stack walk function"
default 8
depends on ARCH_HAS_STACKWALK
help
Depending on implementation, this can place a hard limit on the depths of the stack
for the stack walk function to examine.
menu "Interrupt Configuration"
config ISR_TABLES_LOCAL_DECLARATION_SUPPORTED
bool
default y
# Userspace is currently not supported
depends on !USERSPACE
# List of currently supported architectures
depends on ARM || ARM64
# List of currently supported toolchains
depends on "$(ZEPHYR_TOOLCHAIN_VARIANT)" = "zephyr" || "$(ZEPHYR_TOOLCHAIN_VARIANT)" = "gnuarmemb"
config ISR_TABLES_LOCAL_DECLARATION
bool "ISR tables created locally and placed by linker [EXPERIMENTAL]"
depends on ISR_TABLES_LOCAL_DECLARATION_SUPPORTED
select EXPERIMENTAL
help
Enable new scheme of interrupt tables generation.
This is totally different generator that would create tables entries locally
where the IRQ_CONNECT macro is called and then use the linker script to position it
in the right place in memory.
The most important advantage of such approach is that the generated interrupt tables
are LTO compatible.
The drawback is that the support on the architecture port is required.
config DYNAMIC_INTERRUPTS
bool "Installation of IRQs at runtime"
help
Enable installation of interrupts at runtime, which will move some
interrupt-related data structures to RAM instead of ROM, and
on some architectures increase code size.
config SHARED_INTERRUPTS
bool "Set this to enable support for shared interrupts"
depends on GEN_SW_ISR_TABLE
select EXPERIMENTAL
help
Set this to enable support for shared interrupts. Use this with
caution as enabling this will increase the image size by a
non-negligible amount.
config SHARED_IRQ_MAX_NUM_CLIENTS
int "Maximum number of clients allowed per shared interrupt"
default 2
depends on SHARED_INTERRUPTS
help
This option controls the maximum number of clients allowed
per shared interrupt. Set this according to your needs.
config GEN_ISR_TABLES
bool "Use generated IRQ tables"
help
This option controls whether a platform uses the gen_isr_tables
script to generate its interrupt tables. This mechanism will create
an appropriate hardware vector table and/or software IRQ table.
config GEN_IRQ_VECTOR_TABLE
bool "Generate an interrupt vector table"
default y
depends on GEN_ISR_TABLES
help
This option controls whether a platform using gen_isr_tables
needs an interrupt vector table created. Only disable this if the
platform does not use a vector table at all, or requires the vector
table to be in a format that is not an array of function pointers
indexed by IRQ line. In the latter case, the vector table must be
supplied by the application or architecture code.
config ARCH_IRQ_VECTOR_TABLE_ALIGN
int "Alignment size of the interrupt vector table"
default 4
depends on GEN_IRQ_VECTOR_TABLE
help
This option controls alignment size of generated
_irq_vector_table. Some architecture needs an IRQ vector table
to be aligned to architecture specific size. The default
size is 0 for no alignment.
choice IRQ_VECTOR_TABLE_TYPE
prompt "IRQ vector table type"
depends on GEN_IRQ_VECTOR_TABLE
default IRQ_VECTOR_TABLE_JUMP_BY_CODE if (RISCV && !RISCV_HAS_CLIC)
default IRQ_VECTOR_TABLE_JUMP_BY_ADDRESS
config IRQ_VECTOR_TABLE_JUMP_BY_ADDRESS
bool "Jump by address"
help
The IRQ vector table contains the address of the interrupt handler.
config IRQ_VECTOR_TABLE_JUMP_BY_CODE
bool "Jump by code"
help
The IRQ vector table contains the opcode of a jump instruction to the
interrupt handler address.
endchoice
config GEN_SW_ISR_TABLE
bool "Generate a software ISR table"
default y
depends on GEN_ISR_TABLES
help
This option controls whether a platform using gen_isr_tables
needs a software ISR table table created. This is an array of struct
_isr_table_entry containing the interrupt service routine and supplied
parameter.
config ARCH_SW_ISR_TABLE_ALIGN
int "Alignment size of a software ISR table"
default 64 if RISCV_HAS_CLIC
default 4
depends on GEN_SW_ISR_TABLE
help
This option controls alignment size of generated
_sw_isr_table. Some architecture needs a software ISR table
to be aligned to architecture specific size. The default
size is 4.
config GEN_IRQ_START_VECTOR
int
default 0
depends on GEN_ISR_TABLES
help
On some architectures, part of the vector table may be reserved for
system exceptions and is declared separately from the tables
created by gen_isr_tables.py. When creating these tables, this value
will be subtracted from CONFIG_NUM_IRQS to properly size them.
This is a hidden option which needs to be set per architecture and
left alone.
config IRQ_OFFLOAD
bool "IRQ offload"
depends on TEST
help
Enable irq_offload() API which allows functions to be synchronously
run in interrupt context. Only useful for test cases that need
to validate the correctness of kernel objects in IRQ context.
config IRQ_OFFLOAD_NESTED
bool "irq_offload() supports nested IRQs"
depends on IRQ_OFFLOAD
help
When set by the arch layer, indicates that irq_offload() may
legally be called in interrupt context to cause a
synchronous nested interrupt on the current CPU. Not all
hardware is capable.
config EXCEPTION_DEBUG
bool "Unhandled exception debugging"
default y
depends on PRINTK || LOG
help
Install handlers for various CPU exception/trap vectors to
make debugging them easier, at a small expense in code size.
This prints out the specific exception vector and any associated
error codes.
config EXTRA_EXCEPTION_INFO
bool "Collect extra exception info"
depends on ARCH_HAS_EXTRA_EXCEPTION_INFO
help
This option enables the collection of extra information, such as
register state, when a fault occurs. This information can be useful
to collect for post-mortem analysis and debug of issues.
config SIMPLIFIED_EXCEPTION_CODES
bool "Convert arch specific exception codes to K_ERR_CPU_EXCEPTION"
default y if ZTEST
help
The same piece of faulty code (NULL dereference, etc) can result in
a multitude of potential exception codes at the CPU level, depending
upon whether addresses exist, an MPU is configured, the particular
implementation of the CPU or any number of other reasons. Enabling
this option collapses all the architecture specific exception codes
down to the generic K_ERR_CPU_EXCEPTION, which makes testing code
much more portable.
endmenu # Interrupt configuration
config INIT_ARCH_HW_AT_BOOT
bool "Initialize internal architecture state at boot"
depends on ARCH_SUPPORTS_ARCH_HW_INIT
help
This option instructs Zephyr to force the initialization
of the internal architectural state (for example ARCH-level
HW registers and system control blocks) during boot to
the reset values as specified by the corresponding
architecture manual. The option is useful when the Zephyr
firmware image is chain-loaded, for example, by a debugger
or a bootloader, and we need to guarantee that the internal
states of the architecture core blocks are restored to the
reset values (as specified by the architecture).
Note: the functionality is architecture-specific. For the
implementation details refer to each architecture where
this feature is supported.
endmenu
#
# Architecture Capabilities
#
config ARCH_HAS_SINGLE_THREAD_SUPPORT
bool
config ARCH_HAS_TIMING_FUNCTIONS
bool
config ARCH_HAS_TRUSTED_EXECUTION
bool
config ARCH_HAS_STACK_PROTECTION
bool
config ARCH_HAS_USERSPACE
bool
config ARCH_HAS_EXECUTABLE_PAGE_BIT
bool
config ARCH_HAS_NOCACHE_MEMORY_SUPPORT
bool
config ARCH_HAS_RAMFUNC_SUPPORT
bool
config ARCH_HAS_NESTED_EXCEPTION_DETECTION
bool
config ARCH_SUPPORTS_COREDUMP
bool
config ARCH_SUPPORTS_COREDUMP_THREADS
bool
config ARCH_SUPPORTS_ARCH_HW_INIT
bool
config ARCH_SUPPORTS_ROM_START
bool
config ARCH_HAS_EXTRA_EXCEPTION_INFO
bool
config ARCH_HAS_GDBSTUB
bool
config ARCH_HAS_STACKWALK
bool
help
This is selected when the architecture implemented the arch_stack_walk() API.
config ARCH_HAS_COHERENCE
bool
help
When selected, the architecture supports the
arch_mem_coherent() API and can link into incoherent/cached
memory using the ".cached" linker section.
config ARCH_HAS_THREAD_LOCAL_STORAGE
bool
config ARCH_HAS_SUSPEND_TO_RAM
bool
help
When selected, the architecture supports suspend-to-RAM (S2RAM).
config ARCH_HAS_STACK_CANARIES_TLS
bool
config ARCH_SUPPORTS_MEM_MAPPED_STACKS
bool
help
Select when the architecture supports memory mapped stacks.
#
# Other architecture related options
#
config ARCH_HAS_THREAD_ABORT
bool
config ARCH_HAS_CODE_DATA_RELOCATION
bool
help
When selected, the architecture/SoC implements support for
CODE_DATA_RELOCATION in its linker scripts.
#
# Hidden CPU family configs
#
config CPU_HAS_TEE
bool
help
This option is enabled when the CPU has support for Trusted
Execution Environment (e.g. when it has a security attribution
unit).
config CPU_HAS_DCLS
bool
help
This option is enabled when the processor hardware has support for
Dual-redundant Core Lock-step (DCLS) topology.
config CPU_HAS_FPU
bool
help
This option is enabled when the CPU has hardware floating point
unit.
config CPU_HAS_DSP
bool
help
This option is enabled when the CPU has hardware DSP unit.
config CPU_HAS_FPU_DOUBLE_PRECISION
bool
select CPU_HAS_FPU
help
When enabled, this indicates that the CPU has a double floating point
precision unit.
config CPU_HAS_MPU
bool
help
This option is enabled when the CPU has a Memory Protection Unit (MPU).
config CPU_HAS_MMU
bool
help
This hidden option is selected when the CPU has a Memory Management Unit
(MMU).
config ARCH_HAS_DEMAND_PAGING
bool
help
This hidden configuration should be selected by the architecture if
demand paging is supported.
config ARCH_HAS_RESERVED_PAGE_FRAMES
bool
help
This hidden configuration should be selected by the architecture if
certain RAM page frames need to be marked as reserved and never used for
memory mappings. The architecture will need to implement
arch_reserved_pages_update().
config ARCH_HAS_DIRECTED_IPIS
bool
help
This hidden configuration should be selected by the architecture if
it has an implementation for arch_sched_directed_ipi() which allows
for IPIs to be directed to specific CPUs.
config CPU_HAS_DCACHE
bool
help
This hidden configuration should be selected when the CPU has a d-cache.
config CPU_CACHE_INCOHERENT
bool
help
This hidden configuration should be selected when the CPU has
incoherent cache. This applies to intra-CPU multiprocessing
incoherence and makes only sense when MP_NUM_CPUS > 1.
config CPU_HAS_ICACHE
bool
help
This hidden configuration should be selected when the CPU has an i-cache.
config ARCH_MAPS_ALL_RAM
bool
help
This hidden option is selected by the architecture to inform the kernel
that all RAM is mapped at boot, and not just the bounds of the Zephyr image.
If RAM starts at 0x0, the first page must remain un-mapped to catch NULL
pointer dereferences. With this enabled, the kernel will not assume that
virtual memory addresses past the kernel image are available for mappings,
but instead takes into account an entire RAM mapping instead.
This is typically set by architectures which need direct access to all memory.
It is the architecture's responsibility to mark reserved memory regions
as such in arch_reserved_pages_update().
Although the kernel will not disturb this RAM mapping by re-mapping the associated
virtual addresses elsewhere, this is limited to only management of the
virtual address space. The kernel's page frame ontology will not consider
this mapping at all; non-kernel pages will be considered free (unless marked
as reserved) and K_MEM_PAGE_FRAME_MAPPED will not be set.
config DCLS
bool "Processor is configured in DCLS mode"
depends on CPU_HAS_DCLS
default y
help
This option is enabled when the processor hardware is configured in
Dual-redundant Core Lock-step (DCLS) topology. For the processor that
supports DCLS, but is configured in split-lock mode (by default or
changed at flash time), this option should be disabled.
menuconfig MPU
bool "MPU features"
depends on CPU_HAS_MPU
help
This option, when enabled, indicates to the core kernel that an MPU
is enabled.
if MPU
module = MPU
module-str = mpu
source "subsys/logging/Kconfig.template.log_config"
config MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT
bool
help
This option is enabled when the MPU requires a power of two alignment
and size for MPU regions.
config MPU_REQUIRES_NON_OVERLAPPING_REGIONS
bool
help
This option is enabled when the MPU requires the active (i.e. enabled)
MPU regions to be non-overlapping with each other.
config MPU_GAP_FILLING
bool "Force MPU to be filling in background memory regions"
depends on MPU_REQUIRES_NON_OVERLAPPING_REGIONS
default y if !USERSPACE
help
This Kconfig option instructs the MPU driver to enforce
a full kernel SRAM partitioning, when it programs the
dynamic MPU regions (user thread stack, PRIV stack guard
and application memory domains) during context-switch. We
allow this to be a configurable option, in order to be able
to switch the option off and have an increased number of MPU
regions available for application memory domain programming.
Notes:
An increased number of MPU regions should only be required,
when building with USERSPACE support. As a result, when we
build without USERSPACE support, gap filling should always
be required.
When the option is switched off, access to memory areas not
covered by explicit MPU regions is restricted to privileged
code on an ARCH-specific basis. Refer to ARCH-specific
documentation for more information on how this option is
used.
endif # MPU
config SRAM_REGION_PERMISSIONS
bool "Assign appropriate permissions to kernel areas in SRAM"
depends on MMU || MPU
default y
help
This option indicates that memory protection hardware
is present, enabled, and regions have been configured at boot for memory
ranges within the kernel image.
If this option is turned on, certain areas of the kernel image will
have the following access policies applied for all threads, including
supervisor threads:
1) All program text will be have read-only, execute memory permission
2) All read-only data will have read-only permission, and execution
disabled if the hardware supports it.
3) All other RAM addresses will have read-write permission, and
execution disabled if the hardware supports it.
Options such as USERSPACE or HW_STACK_PROTECTION may additionally
impose additional policies on the memory map, which may be global
or local to the current running thread.
This option may consume additional memory to satisfy memory protection
hardware alignment constraints.
If this option is disabled, the entire kernel will have default memory
access permissions set, typically read/write/execute. It may be desirable
to turn this off on MMU systems which are using the MMU for demand
paging, do not need memory protection, and would rather not use up
RAM for the alignment between regions.
config CODE_DATA_RELOCATION
bool "Support code/data section relocation"
depends on ARCH_HAS_CODE_DATA_RELOCATION
help
Enable support for relocating .text, data and .bss sections from specified
files and placing them in a chosen memory region. Files to relocate and
the target regions should be specified in CMakeLists.txt using
zephyr_code_relocate().
menu "DSP Options"
config DSP_SHARING
bool "DSP register sharing"
depends on CPU_HAS_DSP
help
This option enables preservation of the hardware DSP registers
across context switches to allow multiple threads to perform concurrent
DSP operations.
endmenu
menu "Floating Point Options"
config FPU
bool "Floating point unit (FPU)"
depends on CPU_HAS_FPU
help
This option enables the hardware Floating Point Unit (FPU), in order to
support using the floating point registers and instructions.
When this option is enabled, by default, threads may use the floating
point registers only in an exclusive manner, and this usually means that
only one thread may perform floating point operations.
If it is necessary for multiple threads to perform concurrent floating
point operations, the "FPU register sharing" option must be enabled to
preserve the floating point registers across context switches.
Note that this option cannot be selected for the platforms that do not
include a hardware floating point unit; the floating point support for
those platforms is dependent on the availability of the toolchain-
provided software floating point library.
config FPU_SHARING
bool "FPU register sharing"
depends on FPU && MULTITHREADING
help
This option enables preservation of the hardware floating point registers
across context switches to allow multiple threads to perform concurrent
floating point operations.
Note that some compiler configurations may activate a floating point
context by generating FP instructions for any thread, and that
context must be preserved when switching such threads in and out.
The developers can still disable the FP sharing mode in their
application projects, and switch to Unshared FP registers mode,
if it is guaranteed that the image code does not generate FP
instructions outside the single thread context that is allowed
to do so.
endmenu
menu "Cache Options"
config DCACHE
bool "Data cache (d-cache) support"
depends on CPU_HAS_DCACHE
default y
help
This option enables the support for the data cache (d-cache).
config ICACHE
bool "Instruction cache (i-cache) support"
depends on CPU_HAS_ICACHE
default y
help
This option enables the support for the instruction cache (i-cache).
config CACHE_DOUBLEMAP
bool "Cache double-mapping support"
depends on CPU_CACHE_INCOHERENT
default y
help
Double-mapping behavior where a pointer can be cheaply converted to
point to the same cached/uncached memory at different locations.
This applies to intra-CPU multiprocessing incoherence and makes only
sense when MP_NUM_CPUS > 1.
config CACHE_MANAGEMENT
bool "Cache management features"
depends on DCACHE || ICACHE
help
This option enables the cache management functions backed by arch or
driver code.
config DCACHE_LINE_SIZE_DETECT
bool "Detect d-cache line size at runtime"
depends on CACHE_MANAGEMENT && DCACHE
help
This option enables querying some architecture-specific hardware for
finding the d-cache line size at the expense of taking more memory and
code and a slightly increased boot time.
If the CPU's d-cache line size is known in advance, disable this option and
manually enter the value for DCACHE_LINE_SIZE or set it in the DT
using the 'd-cache-line-size' property.
config DCACHE_LINE_SIZE
int "d-cache line size"
depends on CACHE_MANAGEMENT && DCACHE && !DCACHE_LINE_SIZE_DETECT
default 0
help
Size in bytes of a CPU d-cache line. If this is set to 0 the value is
obtained from the 'd-cache-line-size' DT property instead if present.
Detect automatically at runtime by selecting DCACHE_LINE_SIZE_DETECT.
config ICACHE_LINE_SIZE_DETECT
bool "Detect i-cache line size at runtime"
depends on CACHE_MANAGEMENT && ICACHE
help
This option enables querying some architecture-specific hardware for
finding the i-cache line size at the expense of taking more memory and
code and a slightly increased boot time.
If the CPU's i-cache line size is known in advance, disable this option and
manually enter the value for ICACHE_LINE_SIZE or set it in the DT
using the 'i-cache-line-size' property.
config ICACHE_LINE_SIZE
int "i-cache line size"
depends on CACHE_MANAGEMENT && ICACHE && !ICACHE_LINE_SIZE_DETECT
default 0
help
Size in bytes of a CPU i-cache line. If this is set to 0 the value is
obtained from the 'i-cache-line-size' DT property instead if present.
Detect automatically at runtime by selecting ICACHE_LINE_SIZE_DETECT.
choice CACHE_TYPE
prompt "Cache type"
depends on CACHE_MANAGEMENT
default ARCH_CACHE
config ARCH_CACHE
bool "Integrated cache controller"
help
Integrated on-core cache controller
config EXTERNAL_CACHE
bool "External cache controller"
help
External cache controller
endchoice
endmenu
config ARCH
string
help
System architecture string.
config TOOLCHAIN_HAS_BUILTIN_FFS
bool
default y if !(64BIT && RISCV)
help
Hidden option to signal that toolchain has __builtin_ffs*().
config ARCH_HAS_CUSTOM_CPU_IDLE
bool
help
This options allows applications to override the default arch idle implementation with
a custom one.
config ARCH_HAS_CUSTOM_CPU_ATOMIC_IDLE
bool
help
This options allows applications to override the default arch idle implementation with
a custom one.
config ARCH_HAS_CUSTOM_SWAP_TO_MAIN
bool
help
It's possible that an architecture port cannot use _Swap() to swap to
the _main() thread, but instead must do something custom. It must
enable this option in that case.
config ARCH_HAS_CUSTOM_BUSY_WAIT
bool
help
It's possible that an architecture port cannot or does not want to use
the provided k_busy_wait(), but instead must do something custom. It must
enable this option in that case.
``` | /content/code_sandbox/arch/Kconfig | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 8,032 |
```c
/*
*
* based on arch/riscv/core/thread.c
*
*/
#include <zephyr/kernel.h>
extern uint32_t mips_cp0_status_int_mask;
void z_thread_entry(k_thread_entry_t thread,
void *arg1,
void *arg2,
void *arg3);
void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
char *stack_ptr, k_thread_entry_t entry,
void *p1, void *p2, void *p3)
{
struct arch_esf *stack_init;
/* Initial stack frame for thread */
stack_init = (struct arch_esf *)Z_STACK_PTR_ALIGN(
Z_STACK_PTR_TO_FRAME(struct arch_esf, stack_ptr)
);
/* Setup the initial stack frame */
stack_init->a0 = (unsigned long)entry;
stack_init->a1 = (unsigned long)p1;
stack_init->a2 = (unsigned long)p2;
stack_init->a3 = (unsigned long)p3;
stack_init->status = CP0_STATUS_DEF_RESTORE
| mips_cp0_status_int_mask;
stack_init->epc = (unsigned long)z_thread_entry;
thread->callee_saved.sp = (unsigned long)stack_init;
}
``` | /content/code_sandbox/arch/mips/core/thread.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 264 |
```unknown
#
#
# based on arch/riscv/Kconfig
#
#
menu "MIPS Options"
depends on MIPS
config ARCH
string
default "mips"
config GEN_ISR_TABLES
default y
config GEN_IRQ_VECTOR_TABLE
default n
config GEN_SW_ISR_TABLE
default y
config NUM_IRQS
int
# Bump the kernel default stack size values.
config MAIN_STACK_SIZE
default 4096 if COVERAGE_GCOV
default 2048
config IDLE_STACK_SIZE
default 1024
config ISR_STACK_SIZE
default 4096
config TEST_EXTRA_STACK_SIZE
default 4096 if COVERAGE_GCOV
default 2048
config SYSTEM_WORKQUEUE_STACK_SIZE
default 4096
config CMSIS_THREAD_MAX_STACK_SIZE
default 2048
config CMSIS_V2_THREAD_MAX_STACK_SIZE
default 2048
config CMSIS_V2_THREAD_DYNAMIC_STACK_SIZE
default 2048
config IPM_CONSOLE_STACK_SIZE
default 4096 if COVERAGE_GCOV
default 1024
endmenu
``` | /content/code_sandbox/arch/mips/Kconfig | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 225 |
```c
/*
*
* based on arch/riscv/core/irq_offload.c
*
*/
#include <zephyr/kernel.h>
#include <zephyr/kernel_structs.h>
#include <kernel_internal.h>
#include <zephyr/irq.h>
#include <zephyr/irq_offload.h>
volatile irq_offload_routine_t _offload_routine;
static volatile const void *offload_param;
/*
* Called by z_mips_enter_irq()
*
* Just in case the offload routine itself generates an unhandled
* exception, clear the offload_routine global before executing.
*/
void z_irq_do_offload(void)
{
irq_offload_routine_t tmp;
if (!_offload_routine) {
return;
}
tmp = _offload_routine;
_offload_routine = NULL;
tmp((const void *)offload_param);
}
void arch_irq_offload(irq_offload_routine_t routine, const void *parameter)
{
unsigned int key;
key = irq_lock();
_offload_routine = routine;
offload_param = parameter;
/* Generate irq offload trap */
__asm__ volatile ("syscall");
irq_unlock(key);
}
``` | /content/code_sandbox/arch/mips/core/irq_offload.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 247 |
```unknown
/*
*
* based on arch/riscv/core/swap.S
*
*/
#include <zephyr/toolchain.h>
#include <offsets_short.h>
#include <zephyr/arch/cpu.h>
#include <mips/regdef.h>
/*
* unsigned int arch_swap(unsigned int key)
*
* Always called with interrupts locked
* key is stored in a0 register
*/
GTEXT(arch_swap)
SECTION_FUNC(exception.other, arch_swap)
/* Make a system call to perform context switch */
syscall
/*
* when thread is rescheduled, unlock irq and return.
* Restored register v0 contains IRQ lock state of thread.
*/
la k0, _kernel
/* Get pointer to _kernel.current */
lw k1, _kernel_offset_to_current(k0)
/* Load return value of arch_swap function in register v0 */
lw v0, _thread_offset_to_swap_return_value(k1)
/*
* Unlock irq, following IRQ lock state in v0 register.
*/
mfc0 k0, CP0_STATUS
or k0, k0, a0
mtc0 k0, CP0_STATUS
ehb
/* Return */
jr ra
``` | /content/code_sandbox/arch/mips/core/swap.S | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 256 |
```c
/*
*
*/
/**
* @file
* @brief Full C support initialization
*/
#include <kernel_internal.h>
#include <zephyr/irq.h>
static void interrupt_init(void)
{
extern char __isr_vec[];
extern uint32_t mips_cp0_status_int_mask;
unsigned long ebase;
irq_lock();
mips_cp0_status_int_mask = 0;
ebase = 0x80000000;
memcpy((void *)(ebase + 0x180), __isr_vec, 0x80);
/*
* Disable boot exception vector in BOOTROM,
* use exception vector in RAM.
*/
write_c0_status(read_c0_status() & ~(ST0_BEV));
}
/**
*
* @brief Prepare to and run C code
*
* This routine prepares for the execution of and runs C code.
*
* @return N/A
*/
void z_prep_c(void)
{
z_bss_zero();
interrupt_init();
z_cstart();
CODE_UNREACHABLE;
}
``` | /content/code_sandbox/arch/mips/core/prep_c.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 213 |
```c
/*
*
*/
#include <zephyr/irq.h>
#include <zephyr/tracing/tracing.h>
static ALWAYS_INLINE void mips_idle(unsigned int key)
{
sys_trace_idle();
/* unlock interrupts */
irq_unlock(key);
/* wait for interrupt */
__asm__ volatile("wait");
}
#ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_IDLE
void arch_cpu_idle(void)
{
mips_idle(1);
}
#endif
#ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_ATOMIC_IDLE
void arch_cpu_atomic_idle(unsigned int key)
{
mips_idle(key);
}
#endif
``` | /content/code_sandbox/arch/mips/core/cpu_idle.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 116 |
```unknown
/*
*
*/
#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
#include <mips/regdef.h>
#include <mips/mipsregs.h>
GTEXT(__initialize)
GTEXT(__stack)
GTEXT(z_prep_c)
/*
* Remainder of asm-land initialization code before we can jump into
* the C domain.
*/
SECTION_FUNC(TEXT, __initialize)
.set noreorder
mtc0 zero, CP0_CAUSE
ehb
mfc0 k0, CP0_STATUS
li k1, ~(ST0_ERL | ST0_IE)
and k0, k1
mtc0 k0, CP0_STATUS
ehb
#ifdef CONFIG_INIT_STACKS
/* Pre-populate all bytes in z_interrupt_stacks with 0xAA */
la t0, z_interrupt_stacks
li t1, CONFIG_ISR_STACK_SIZE
add t1, t1, t0
/* Populate z_interrupt_stacks with 0xaaaaaaaa */
li t2, 0xaaaaaaaa
aa_loop:
sw t2, 0(t0)
addi t0, t0, 4
blt t0, t1, aa_loop
nop /* delay slot */
#endif
/*
* Setup stack pointer.
*/
la sp, __stack
/*
* Jump into C domain.
*/
la v0, z_prep_c
jal v0
nop /* delay slot */
``` | /content/code_sandbox/arch/mips/core/reset.S | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 318 |
```unknown
/*
*
* based on arch/riscv/core/isr.S and arch/nios2/core/exception.S
*
*/
#include <zephyr/toolchain.h>
#include <zephyr/kernel_structs.h>
#include <offsets_short.h>
#include <zephyr/arch/cpu.h>
#include <mips/regdef.h>
#include <mips/mipsregs.h>
#define ESF_O(FIELD) __struct_arch_esf_##FIELD##_OFFSET
#define THREAD_O(FIELD) _thread_offset_to_##FIELD
/* Convenience macros for loading/storing register states. */
#define DO_CALLEE_SAVED(op, reg) \
op s0, THREAD_O(s0)(reg) ;\
op s1, THREAD_O(s1)(reg) ;\
op s2, THREAD_O(s2)(reg) ;\
op s3, THREAD_O(s3)(reg) ;\
op s4, THREAD_O(s4)(reg) ;\
op s5, THREAD_O(s5)(reg) ;\
op s6, THREAD_O(s6)(reg) ;\
op s7, THREAD_O(s7)(reg) ;\
op s8, THREAD_O(s8)(reg) ;
#define STORE_CALLEE_SAVED(reg) \
DO_CALLEE_SAVED(OP_STOREREG, reg)
#define LOAD_CALLEE_SAVED(reg) \
DO_CALLEE_SAVED(OP_LOADREG, reg)
#define DO_CALLER_SAVED(op) \
op ra, ESF_O(ra)(sp) ;\
op gp, ESF_O(gp)(sp) ;\
op AT, ESF_O(at)(sp) ;\
op t0, ESF_O(t0)(sp) ;\
op t1, ESF_O(t1)(sp) ;\
op t2, ESF_O(t2)(sp) ;\
op t3, ESF_O(t3)(sp) ;\
op t4, ESF_O(t4)(sp) ;\
op t5, ESF_O(t5)(sp) ;\
op t6, ESF_O(t6)(sp) ;\
op t7, ESF_O(t7)(sp) ;\
op t8, ESF_O(t8)(sp) ;\
op t9, ESF_O(t9)(sp) ;\
op a0, ESF_O(a0)(sp) ;\
op a1, ESF_O(a1)(sp) ;\
op a2, ESF_O(a2)(sp) ;\
op a3, ESF_O(a3)(sp) ;\
op v0, ESF_O(v0)(sp) ;\
op v1, ESF_O(v1)(sp) ;
#define STORE_CALLER_SAVED() \
addi sp, sp, -__struct_arch_esf_SIZEOF ;\
DO_CALLER_SAVED(OP_STOREREG) ;
#define LOAD_CALLER_SAVED() \
DO_CALLER_SAVED(OP_LOADREG) ;\
addi sp, sp, __struct_arch_esf_SIZEOF ;
/* imports */
GTEXT(_Fault)
GTEXT(_k_neg_eagain)
GTEXT(z_thread_mark_switched_in)
/* exports */
GTEXT(__isr_vec)
SECTION_FUNC(exception.entry, __isr_vec)
la k0, _mips_interrupt
jr k0
SECTION_FUNC(exception.other, _mips_interrupt)
.set noat
/*
* Save caller-saved registers on current thread stack.
*/
STORE_CALLER_SAVED()
/* save CP0 registers */
mfhi t0
mflo t1
OP_STOREREG t0, ESF_O(hi)(sp)
OP_STOREREG t1, ESF_O(lo)(sp)
mfc0 t0, CP0_EPC
OP_STOREREG t0, ESF_O(epc)(sp)
mfc0 t1, CP0_BADVADDR
OP_STOREREG t1, ESF_O(badvaddr)(sp)
mfc0 t0, CP0_STATUS
OP_STOREREG t0, ESF_O(status)(sp)
mfc0 t1, CP0_CAUSE
OP_STOREREG t1, ESF_O(cause)(sp)
/*
* Check if exception is the result of an interrupt or not.
*/
li k0, CAUSE_EXP_MASK
and k1, k0, t1
srl k1, k1, CAUSE_EXP_SHIFT
/* ExcCode == 8 (SYSCALL) ? */
li k0, 8
beq k0, k1, is_kernel_syscall
/* a0 = ((cause & status) & CAUSE_IP_MASK) >> CAUSE_IP_SHIFT */
and t1, t1, t0
li a0, CAUSE_IP_MASK
and a0, a0, t1
srl a0, a0, CAUSE_IP_SHIFT
/* ExcCode == 0 (INTERRUPT) ? if not, go to unhandled */
bnez k1, unhandled
/* cause IP_MASK != 0 ? */
bnez a0, is_interrupt
unhandled:
move a0, sp
jal _Fault
eret
is_kernel_syscall:
/*
* A syscall is the result of an syscall instruction, in which case the
* EPC will contain the address of the syscall instruction.
* Increment saved EPC by 4 to prevent triggering the same syscall
* again upon exiting the ISR.
*/
OP_LOADREG k0, ESF_O(epc)(sp)
addi k0, k0, 4
OP_STOREREG k0, ESF_O(epc)(sp)
#ifdef CONFIG_IRQ_OFFLOAD
/*
* Determine if the system call is the result of an IRQ offloading.
* Done by checking if _offload_routine is not pointing to NULL.
* If NULL, jump to reschedule to perform a context-switch, otherwise,
* jump to is_interrupt to handle the IRQ offload.
*/
la t0, _offload_routine
OP_LOADREG t1, 0(t0)
/*
* Put 0 into a0: call z_mips_enter_irq() with ipending==0
* to prevent spurious interrupt.
*/
move a0, zero
bnez t1, is_interrupt
#endif /* CONFIG_IRQ_OFFLOAD */
/*
* Go to reschedule to handle context-switch
*/
j reschedule
is_interrupt:
/*
* Save current thread stack pointer and switch
* stack pointer to interrupt stack.
*/
/* Save thread stack pointer to temp register k0 */
move k0, sp
/* Switch to interrupt stack */
la k1, _kernel
OP_LOADREG sp, _kernel_offset_to_irq_stack(k1)
/*
* Save thread stack pointer on interrupt stack
*/
addi sp, sp, -16
OP_STOREREG k0, 0(sp)
on_irq_stack:
/*
* Enter C interrupt handling code. Value of ipending will be the
* function parameter since we put it in a0
*/
jal z_mips_enter_irq
on_thread_stack:
/* Restore thread stack pointer */
OP_LOADREG sp, 0(sp)
#ifdef CONFIG_PREEMPT_ENABLED
/*
* Check if we need to perform a reschedule
*/
/* Get pointer to _kernel.current */
OP_LOADREG t2, _kernel_offset_to_current(k1)
/*
* Check if next thread to schedule is current thread.
* If yes do not perform a reschedule
*/
OP_LOADREG t3, _kernel_offset_to_ready_q_cache(k1)
beq t3, t2, no_reschedule
#else
j no_reschedule
#endif /* CONFIG_PREEMPT_ENABLED */
reschedule:
/*
* Check if the current thread is the same as the thread on the ready Q. If
* so, do not reschedule.
* Note:
* Sometimes this code is execute back-to-back before the target thread
* has a chance to run. If this happens, the current thread and the
* target thread will be the same.
*/
la t0, _kernel
OP_LOADREG t2, _kernel_offset_to_current(t0)
OP_LOADREG t3, _kernel_offset_to_ready_q_cache(t0)
beq t2, t3, no_reschedule
/* Get reference to _kernel */
la t0, _kernel
/* Get pointer to _kernel.current */
OP_LOADREG t1, _kernel_offset_to_current(t0)
/*
* Save callee-saved registers of current kernel thread
* prior to handle context-switching
*/
STORE_CALLEE_SAVED(t1)
skip_callee_saved_reg:
/*
* Save stack pointer of current thread and set the default return value
* of z_swap to _k_neg_eagain for the thread.
*/
OP_STOREREG sp, _thread_offset_to_sp(t1)
la t2, _k_neg_eagain
lw t3, 0(t2)
sw t3, _thread_offset_to_swap_return_value(t1)
/* Get next thread to schedule. */
OP_LOADREG t1, _kernel_offset_to_ready_q_cache(t0)
/*
* Set _kernel.current to new thread loaded in t1
*/
OP_STOREREG t1, _kernel_offset_to_current(t0)
/* Switch to new thread stack */
OP_LOADREG sp, _thread_offset_to_sp(t1)
/* Restore callee-saved registers of new thread */
LOAD_CALLEE_SAVED(t1)
#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
jal z_thread_mark_switched_in
#endif
/* fallthrough */
no_reschedule:
/* restore CP0 */
OP_LOADREG t1, ESF_O(hi)(sp)
OP_LOADREG t2, ESF_O(lo)(sp)
mthi t1
mtlo t2
OP_LOADREG k0, ESF_O(epc)(sp)
mtc0 k0, CP0_EPC
OP_LOADREG k1, ESF_O(status)(sp)
mtc0 k1, CP0_STATUS
ehb
/* Restore caller-saved registers from thread stack */
LOAD_CALLER_SAVED()
/* exit ISR */
eret
``` | /content/code_sandbox/arch/mips/core/isr.S | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,269 |
```c
/*
*
*/
#include <zephyr/kernel.h>
#include <zephyr/logging/log.h>
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
FUNC_NORETURN void z_mips_fatal_error(unsigned int reason,
const struct arch_esf *esf)
{
#ifdef CONFIG_EXCEPTION_DEBUG
if (esf != NULL) {
LOG_ERR("$ 0 : (ze) %08lx(at) %08lx(v0) %08lx(v1)\n",
esf->at, esf->v0, esf->v1);
LOG_ERR("$ 4 : %08lx(a0) %08lx(a1) %08lx(a2) %08lx(a3)\n",
esf->a0, esf->a1, esf->a2, esf->a3);
LOG_ERR("$ 8 : %08lx(t0) %08lx(t1) %08lx(t2) %08lx(t3)\n",
esf->t0, esf->t1, esf->t2, esf->t3);
LOG_ERR("$12 : %08lx(t4) %08lx(t5) %08lx(t6) %08lx(t7)\n",
esf->t4, esf->t5, esf->t6, esf->t7);
LOG_ERR("...\n");
LOG_ERR("$24 : %08lx(t8) %08lx(t9)\n",
esf->t8, esf->t9);
LOG_ERR("$28 : %08lx(gp) (sp) (s8) %08lx(ra)\n",
esf->gp, esf->ra);
LOG_ERR("EPC : %08lx\n", esf->epc);
LOG_ERR("Status: %08lx\n", esf->status);
LOG_ERR("Cause : %08lx\n", esf->cause);
LOG_ERR("BadVA : %08lx\n", esf->badvaddr);
}
#endif /* CONFIG_EXCEPTION_DEBUG */
z_fatal_error(reason, esf);
CODE_UNREACHABLE;
}
static char *cause_str(unsigned long cause)
{
switch (cause) {
case 0:
return "interrupt pending";
case 1:
return "TLB modified";
case 2:
return "TLB miss on load or ifetch";
case 3:
return "TLB miss on store";
case 4:
return "address error on load or ifetch";
case 5:
return "address error on store";
case 6:
return "bus error on ifetch";
case 7:
return "bus error on load or store";
case 8:
return "system call";
case 9:
return "breakpoint";
case 10:
return "reserved instruction";
case 11:
return "coprocessor unusable";
case 12:
return "arithmetic overflow";
case 13:
return "trap instruction";
case 14:
return "virtual coherency instruction";
case 15:
return "floating point";
case 16:
return "iwatch";
case 23:
return "dwatch";
case 31:
return "virtual coherency data";
default:
return "unknown";
}
}
void _Fault(struct arch_esf *esf)
{
unsigned long cause;
cause = (read_c0_cause() & CAUSE_EXP_MASK) >> CAUSE_EXP_SHIFT;
LOG_ERR("");
LOG_ERR(" cause: %ld, %s", cause, cause_str(cause));
z_mips_fatal_error(K_ERR_CPU_EXCEPTION, esf);
}
``` | /content/code_sandbox/arch/mips/core/fatal.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 790 |
```c
/*
*
* based on arch/nios2/core/irq_manage.c
*
*/
#include <zephyr/kernel.h>
#include <kswap.h>
#include <zephyr/logging/log.h>
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
uint32_t mips_cp0_status_int_mask;
FUNC_NORETURN void z_irq_spurious(const void *unused)
{
unsigned long cause;
ARG_UNUSED(unused);
cause = (read_c0_cause() & CAUSE_EXP_MASK) >> CAUSE_EXP_SHIFT;
LOG_ERR("Spurious interrupt detected! CAUSE: %ld", cause);
z_mips_fatal_error(K_ERR_SPURIOUS_IRQ, NULL);
}
void arch_irq_enable(unsigned int irq)
{
unsigned int key;
uint32_t irq_mask;
key = irq_lock();
irq_mask = ST0_IP0 << irq;
mips_cp0_status_int_mask |= irq_mask;
write_c0_status(read_c0_status() | irq_mask);
irq_unlock(key);
}
void arch_irq_disable(unsigned int irq)
{
unsigned int key;
uint32_t irq_mask;
key = irq_lock();
irq_mask = ST0_IP0 << irq;
mips_cp0_status_int_mask &= ~irq_mask;
write_c0_status(read_c0_status() & ~irq_mask);
irq_unlock(key);
};
int arch_irq_is_enabled(unsigned int irq)
{
return read_c0_status() & (ST0_IP0 << irq);
}
void z_mips_enter_irq(uint32_t ipending)
{
_current_cpu->nested++;
#ifdef CONFIG_IRQ_OFFLOAD
z_irq_do_offload();
#endif
while (ipending) {
int index;
struct _isr_table_entry *ite;
if (IS_ENABLED(CONFIG_TRACING_ISR)) {
sys_trace_isr_enter();
}
index = find_lsb_set(ipending) - 1;
ipending &= ~BIT(index);
ite = &_sw_isr_table[index];
ite->isr(ite->arg);
if (IS_ENABLED(CONFIG_TRACING_ISR)) {
sys_trace_isr_exit();
}
}
_current_cpu->nested--;
if (IS_ENABLED(CONFIG_STACK_SENTINEL)) {
z_check_stack_sentinel();
}
}
#ifdef CONFIG_DYNAMIC_INTERRUPTS
int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
void (*routine)(const void *parameter),
const void *parameter, uint32_t flags)
{
ARG_UNUSED(flags);
ARG_UNUSED(priority);
z_isr_install(irq, routine, parameter);
return irq;
}
#endif /* CONFIG_DYNAMIC_INTERRUPTS */
``` | /content/code_sandbox/arch/mips/core/irq_manage.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 535 |
```objective-c
/*
*
* based on arch/riscv/include/kernel_arch_func.h
*
*/
/**
* @file
* @brief Private kernel definitions
*
* This file contains private kernel function/macro definitions and various
* other definitions for the MIPS processor architecture.
*/
#ifndef ZEPHYR_ARCH_MIPS_INCLUDE_KERNEL_ARCH_FUNC_H_
#define ZEPHYR_ARCH_MIPS_INCLUDE_KERNEL_ARCH_FUNC_H_
#include <kernel_arch_data.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifndef _ASMLANGUAGE
static ALWAYS_INLINE void arch_kernel_init(void)
{
}
static ALWAYS_INLINE void
arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
{
thread->arch.swap_return_value = value;
}
FUNC_NORETURN void z_mips_fatal_error(unsigned int reason,
const struct arch_esf *esf);
static inline bool arch_is_in_isr(void)
{
return _current_cpu->nested != 0U;
}
#ifdef CONFIG_IRQ_OFFLOAD
void z_irq_do_offload(void);
#endif
#endif /* _ASMLANGUAGE */
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_ARCH_MIPS_INCLUDE_KERNEL_ARCH_FUNC_H_ */
``` | /content/code_sandbox/arch/mips/include/kernel_arch_func.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 240 |
```objective-c
/*
*
* based on arch/riscv/include/kernel_arch_data.h
*
*/
/**
* @file
* @brief Private kernel definitions
*
* This file contains private kernel structures definitions and various
* other definitions for the MIPS processor architecture.
*/
#ifndef ZEPHYR_ARCH_MIPS_INCLUDE_KERNEL_ARCH_DATA_H_
#define ZEPHYR_ARCH_MIPS_INCLUDE_KERNEL_ARCH_DATA_H_
#include <zephyr/toolchain.h>
#include <zephyr/arch/cpu.h>
#ifndef _ASMLANGUAGE
#include <zephyr/kernel.h>
#include <zephyr/types.h>
#include <zephyr/sys/util.h>
#include <zephyr/sys/dlist.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_ARCH_MIPS_INCLUDE_KERNEL_ARCH_DATA_H_ */
``` | /content/code_sandbox/arch/mips/include/kernel_arch_data.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 180 |
```c
/*
*
* based on arch/riscv/core/offsets/offsets.c
*
*/
#include <kernel_arch_data.h>
#include <gen_offset.h>
#include <kernel_offsets.h>
GEN_OFFSET_SYM(_thread_arch_t, swap_return_value);
GEN_OFFSET_SYM(_callee_saved_t, sp);
GEN_OFFSET_SYM(_callee_saved_t, s0);
GEN_OFFSET_SYM(_callee_saved_t, s1);
GEN_OFFSET_SYM(_callee_saved_t, s2);
GEN_OFFSET_SYM(_callee_saved_t, s3);
GEN_OFFSET_SYM(_callee_saved_t, s4);
GEN_OFFSET_SYM(_callee_saved_t, s5);
GEN_OFFSET_SYM(_callee_saved_t, s6);
GEN_OFFSET_SYM(_callee_saved_t, s7);
GEN_OFFSET_SYM(_callee_saved_t, s8);
GEN_OFFSET_STRUCT(arch_esf, ra);
GEN_OFFSET_STRUCT(arch_esf, gp);
GEN_OFFSET_STRUCT(arch_esf, t0);
GEN_OFFSET_STRUCT(arch_esf, t1);
GEN_OFFSET_STRUCT(arch_esf, t2);
GEN_OFFSET_STRUCT(arch_esf, t3);
GEN_OFFSET_STRUCT(arch_esf, t4);
GEN_OFFSET_STRUCT(arch_esf, t5);
GEN_OFFSET_STRUCT(arch_esf, t6);
GEN_OFFSET_STRUCT(arch_esf, t7);
GEN_OFFSET_STRUCT(arch_esf, t8);
GEN_OFFSET_STRUCT(arch_esf, t9);
GEN_OFFSET_STRUCT(arch_esf, a0);
GEN_OFFSET_STRUCT(arch_esf, a1);
GEN_OFFSET_STRUCT(arch_esf, a2);
GEN_OFFSET_STRUCT(arch_esf, a3);
GEN_OFFSET_STRUCT(arch_esf, v0);
GEN_OFFSET_STRUCT(arch_esf, v1);
GEN_OFFSET_STRUCT(arch_esf, at);
GEN_OFFSET_STRUCT(arch_esf, epc);
GEN_OFFSET_STRUCT(arch_esf, badvaddr);
GEN_OFFSET_STRUCT(arch_esf, hi);
GEN_OFFSET_STRUCT(arch_esf, lo);
GEN_OFFSET_STRUCT(arch_esf, status);
GEN_OFFSET_STRUCT(arch_esf, cause);
GEN_ABSOLUTE_SYM(__struct_arch_esf_SIZEOF, STACK_ROUND_UP(sizeof(struct arch_esf)));
GEN_ABS_SYM_END
``` | /content/code_sandbox/arch/mips/core/offsets/offsets.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 456 |
```objective-c
/*
*
* based on arch/riscv/include/offsets_short_arch.h
*
*/
#ifndef ZEPHYR_ARCH_MIPS_INCLUDE_OFFSETS_SHORT_ARCH_H_
#define ZEPHYR_ARCH_MIPS_INCLUDE_OFFSETS_SHORT_ARCH_H_
#include <zephyr/offsets.h>
#define _thread_offset_to_sp \
(___thread_t_callee_saved_OFFSET + ___callee_saved_t_sp_OFFSET)
#define _thread_offset_to_s0 \
(___thread_t_callee_saved_OFFSET + ___callee_saved_t_s0_OFFSET)
#define _thread_offset_to_s1 \
(___thread_t_callee_saved_OFFSET + ___callee_saved_t_s1_OFFSET)
#define _thread_offset_to_s2 \
(___thread_t_callee_saved_OFFSET + ___callee_saved_t_s2_OFFSET)
#define _thread_offset_to_s3 \
(___thread_t_callee_saved_OFFSET + ___callee_saved_t_s3_OFFSET)
#define _thread_offset_to_s4 \
(___thread_t_callee_saved_OFFSET + ___callee_saved_t_s4_OFFSET)
#define _thread_offset_to_s5 \
(___thread_t_callee_saved_OFFSET + ___callee_saved_t_s5_OFFSET)
#define _thread_offset_to_s6 \
(___thread_t_callee_saved_OFFSET + ___callee_saved_t_s6_OFFSET)
#define _thread_offset_to_s7 \
(___thread_t_callee_saved_OFFSET + ___callee_saved_t_s7_OFFSET)
#define _thread_offset_to_s8 \
(___thread_t_callee_saved_OFFSET + ___callee_saved_t_s8_OFFSET)
#define _thread_offset_to_swap_return_value \
(___thread_t_arch_OFFSET + ___thread_arch_t_swap_return_value_OFFSET)
#endif /* ZEPHYR_ARCH_MIPS_INCLUDE_OFFSETS_SHORT_ARCH_H_ */
``` | /content/code_sandbox/arch/mips/include/offsets_short_arch.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 350 |
```objective-c
/*
*
* Macros for MIPS CP0 registers manipulations
* inspired by linux/arch/mips/include/asm/mipsregs.h
*
*/
#ifndef _ZEPHYR_ARCH_MIPS_INCLUDE_MIPS_MIPSREGS_H_
#define _ZEPHYR_ARCH_MIPS_INCLUDE_MIPS_MIPSREGS_H_
#define CP0_BADVADDR $8
#define CP0_COUNT $9
#define CP0_COMPARE $11
#define CP0_STATUS $12
#define CP0_CAUSE $13
#define CP0_EPC $14
/* CP0_STATUS bits */
#define ST0_IE 0x00000001
#define ST0_EXL 0x00000002
#define ST0_ERL 0x00000004
#define ST0_IP0 0x00000100
#define ST0_BEV 0x00400000
/* CP0_CAUSE bits */
#define CAUSE_EXP_MASK 0x0000007c
#define CAUSE_EXP_SHIFT 2
#define CAUSE_IP_MASK 0x0000ff00
#define CAUSE_IP_SHIFT 8
#define _mips_read_32bit_c0_register(reg) \
({ \
uint32_t val; \
__asm__ __volatile__("mfc0\t%0, " STRINGIFY(reg) "\n" \
: "=r" (val)); \
val; \
})
#define _mips_write_32bit_c0_register(reg, val) \
({ \
__asm__ __volatile__("mtc0 %z0, " STRINGIFY(reg) "\n" \
: \
: "Jr" ((uint32_t)(val))); \
})
#define read_c0_status() _mips_read_32bit_c0_register(CP0_STATUS)
#define write_c0_status(val) _mips_write_32bit_c0_register(CP0_STATUS, val)
#define read_c0_cause() _mips_read_32bit_c0_register(CP0_CAUSE)
#endif /* _ZEPHYR_ARCH_MIPS_INCLUDE_MIPS_MIPSREGS_H_ */
``` | /content/code_sandbox/arch/mips/include/mips/mipsregs.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 444 |
```objective-c
/*
*
* Register names for o32 ABI, see [1] for details.
*
* [1] See MIPS Run (The Morgan Kaufmann Series in Computer
* Architecture and Design) 2nd Edition by Dominic Sweetman
*
*/
#ifndef ZEPHYR_ARCH_MIPS_INCLUDE_MIPS_REGDEF_H_
#define ZEPHYR_ARCH_MIPS_INCLUDE_MIPS_REGDEF_H_
/* always 0 */
#define zero $0
/* assembly temporary */
#define AT $1
/* subroutine return values */
#define v0 $2
#define v1 $3
/* arguments */
#define a0 $4
#define a1 $5
#define a2 $6
#define a3 $7
/* temporaries */
#define t0 $8
#define t1 $9
#define t2 $10
#define t3 $11
#define t4 $12
#define t5 $13
#define t6 $14
#define t7 $15
/* subroutine register variables */
#define s0 $16
#define s1 $17
#define s2 $18
#define s3 $19
#define s4 $20
#define s5 $21
#define s6 $22
#define s7 $23
/* temporaries */
#define t8 $24
#define t9 $25
/* interrupt/trap handler scratch registers */
#define k0 $26
#define k1 $27
/* global pointer */
#define gp $28
/* stack pointer */
#define sp $29
/* frame pointer / ninth subroutine register variable */
#define fp $30
#define s8 $30
/* return address */
#define ra $31
#endif /* ZEPHYR_ARCH_MIPS_INCLUDE_MIPS_REGDEF_H_ */
``` | /content/code_sandbox/arch/mips/include/mips/regdef.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 350 |
```c
/*
*
*/
/**
* @file Software interrupts utility code - ARM implementation
*/
#include <zephyr/kernel.h>
#include <zephyr/irq_offload.h>
#include <cmsis_core.h>
volatile irq_offload_routine_t offload_routine;
static const void *offload_param;
/* Called by z_arm_svc */
void z_irq_do_offload(void)
{
offload_routine(offload_param);
}
void arch_irq_offload(irq_offload_routine_t routine, const void *parameter)
{
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) && !defined(CONFIG_ARMV8_M_BASELINE) \
&& defined(CONFIG_ASSERT)
/* ARMv6-M HardFault if you make a SVC call with interrupts locked.
*/
__ASSERT(__get_PRIMASK() == 0U, "irq_offload called with interrupts locked\n");
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE && CONFIG_ASSERT */
k_sched_lock();
offload_routine = routine;
offload_param = parameter;
__asm__ volatile ("svc %[id]"
:
: [id] "i" (_SVC_CALL_IRQ_OFFLOAD)
: "memory");
offload_routine = NULL;
k_sched_unlock();
}
``` | /content/code_sandbox/arch/arm/core/irq_offload.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 267 |
```linker script
/*
*
*/
#if LINKER_ZEPHYR_FINAL && defined(CONFIG_ISR_TABLES_LOCAL_DECLARATION)
INCLUDE isr_tables_swi.ld
#endif
``` | /content/code_sandbox/arch/arm/core/swi_tables.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 31 |
```linker script
/*
*
*/
#if defined(CONFIG_CPU_CORTEX_M_HAS_VTOR)
/*
* In an MCU with VTOR, the VTOR.TBLOFF is set to the start address of the
* exc_vector_table (i.e. _vector_start) during initialization. Therefore,
* exc_vector_table must respect the alignment requirements of VTOR.TBLOFF
* described below.
*/
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
/* VTOR bits 0:7 are reserved (RES0). This requires that the base address
* of the vector table is 64-word aligned.
*/
. = ALIGN( 1 << LOG2CEIL(4 * 64) );
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
/* VTOR bits 0:6 are reserved (RES0). This requires that the base address
* of the vector table is 32-word aligned.
*/
. = ALIGN( 1 << LOG2CEIL(4 * 32) );
#else
#error "Unsupported architecture variant"
#endif
/* When setting TBLOFF in VTOR we must align the offset to the number of
* exception entries in the vector table. The minimum alignment of 32 words
* is sufficient for the 16 ARM Core exceptions and up to 16 HW interrupts.
* For more than 16 HW interrupts, we adjust the alignment by rounding up
* to the next power of two; this restriction guarantees a functional VTOR
* setting in any Cortex-M implementation (might not be required in every
* Cortex-M processor).
*/
. = ALIGN( 1 << LOG2CEIL(4 * (16 + CONFIG_NUM_IRQS)) );
#endif
#ifdef CONFIG_ARM_ZIMAGE_HEADER
/*
* For AArch32 (A/R), VBAR has Bits [4:0] = RES0.
* For AArch32 (M), VTOR has Bits [6:0] = RES0. Thus, vector start address
* should be aligned in such a way so that it satisfies the requirements of
* VBAR and VTOR ie Bits [6:0] = 0.
*/
. = ALIGN( 0x80 );
#endif
_vector_start = .;
KEEP(*(.exc_vector_table))
KEEP(*(".exc_vector_table.*"))
#if LINKER_ZEPHYR_FINAL && defined(CONFIG_ISR_TABLES_LOCAL_DECLARATION)
INCLUDE isr_tables_vt.ld
#else
KEEP(*(.vectors))
#endif
_vector_end = .;
``` | /content/code_sandbox/arch/arm/core/vector_table.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 513 |
```unknown
# ARM architecture configuration options
menu "ARM Options"
depends on ARM
config ARCH
default "arm"
config CPU_CORTEX
bool
help
This option signifies the use of a CPU of the Cortex family.
config ARM_CUSTOM_INTERRUPT_CONTROLLER
bool
help
This option indicates that the ARM CPU is connected to a custom (i.e.
non-GIC or NVIC) interrupt controller.
A number of Cortex-A and Cortex-R cores (Cortex-A5, Cortex-R4/5, ...)
allow interfacing to a custom external interrupt controller and this
option must be selected when such cores are connected to an interrupt
controller that is not the ARM Generic Interrupt Controller (GIC) or
the Cortex-M ARM Nested Vectored Interrupt Controller (NVIC).
When this option is selected, the architecture interrupt control
functions are mapped to the SoC interrupt control interface, which is
implemented at the SoC level.
N.B. Since all Cortex-M cores have a NVIC, if this option is selected it
is assumed that the custom interrupt control interface implementation
assumes responsibility for handling the NVIC.
config ROMSTART_RELOCATION_ROM
bool "Relocate rom_start region"
default n
help
Relocates the rom_start region containing the boot-vector data and
irq vectors to the region specified by configurations:
ROMSTART_REGION_ADDRESS and ROMSTART_REGION_SIZE
This is useful for the Linux Remoteproc framework that uses the elf-loader
such that it is able to load the correct boot-vector (contained in rom_start)
into the correct memory location independent of the chosen zephyr,flash
ROM region.
Most SOCs include an alias for the boot-vector at address 0x00000000
so a default which might be supported by the corresponding Linux rproc driver.
If it is not, additionnal options allows to specify the addresses.
In general this option should be chosen if the zephyr,flash chosen node
is not placed into the boot-vector memory area.
While this aims at generating a correct zephyr.elf file, it has the side
effect of enlarging the bin file. If the zephyr.bin file is used to boot the
secondary core, this option should be disabled.
Example:
on IMX7D, the chosen zephyr,flash can be OCRAM/OCRAM_S/TCM/DDR memories
for code location. But the boot-vector must be placed into OCRAM_S for the
CORTEX-M to boot (alias 0, real 0x00180000/32K available).
if ROMSTART_RELOCATION_ROM
config ROMSTART_REGION_ADDRESS
hex "Base address of the rom_start region"
default 0x00000000
help
Start address of the rom_start region.
This setting can be derived from a DT node reg property or specified directly.
A default value of 0x00000000 might work in most cases as SOCs have an alias
to the right memory region of the boot-vector.
Examples:
-IMX7D the boot-vector is OCRAM_S (0x00180000, aliased at 0x0).
-IMX6SX the boot-vector is TCML (0x007F8000, aliased at 0x0).
-IMX8MQ the boot-vector is TCML (0x007E0000, aliased at 0x0).
-IMX8MN the boot-vector is ITCM (0x007E0000, aliased at 0x0).
Example of DT definition:
$(dt_nodelabel_reg_addr_hex,ocram_s_sys)
config ROMSTART_REGION_SIZE
hex "Size of the rom_start region"
default 1
help
Size of the rom_start region in KB.
Default is 1KB which is enough to store the boot and irq vectors.
This setting can be derived from a DT node reg property or specified directly.
Example for IMX7D that needs the boot-vector into OCRAM_S (0x00180000):
$(dt_nodelabel_reg_size_hex,ocram_s_sys,0,K)
endif
config CODE_DATA_RELOCATION_SRAM
bool "Relocate code/data sections to SRAM"
depends on CPU_CORTEX_M
select CODE_DATA_RELOCATION
help
When selected this will relocate .text, data and .bss sections from
the specified files and places it in SRAM. The files should be specified
in the CMakeList.txt file with a cmake API zephyr_code_relocate(). This
config is used to create an MPU entry for the SRAM space used for code
relocation.
config ARM_ON_ENTER_CPU_IDLE_HOOK
bool
help
Enables a hook (z_arm_on_enter_cpu_idle()) that is called when
the CPU is made idle (by k_cpu_idle() or k_cpu_atomic_idle()).
If needed, this hook can be used to prevent the CPU from actually
entering sleep by skipping the WFE/WFI instruction.
config ARM_ON_ENTER_CPU_IDLE_PREPARE_HOOK
bool
help
Enables a hook (z_arm_on_enter_cpu_idle_prepare()) that is called when
the CPU is made idle (by k_cpu_idle() or k_cpu_atomic_idle()).
If needed, this hook can prepare data to upcoming call to
z_arm_on_enter_cpu_idle(). The z_arm_on_enter_cpu_idle_prepare differs
from z_arm_on_enter_cpu_idle because it is called before interrupts are
disabled.
config ARM_ON_EXIT_CPU_IDLE
bool
help
Enables a possibility to inject SoC-specific code just after WFI/WFE
instructions of the cpu idle implementation.
Enabling this option requires that the SoC provides a soc_cpu_idle.h
header file which defines SOC_ON_EXIT_CPU_IDLE macro guarded by
_ASMLANGUAGE.
The SOC_ON_EXIT_CPU_IDLE macro is expanded just after
WFI/WFE instructions before any memory access is performed. The purpose
of the SOC_ON_EXIT_CPU_IDLE is to perform an action that mitigate issues
observed on some SoCs caused by a memory access following WFI/WFE
instructions.
rsource "core/Kconfig"
rsource "core/Kconfig.vfp"
# General options signifying CPU capabilities of ARM SoCs
config CPU_HAS_ARM_MPU
bool
select CPU_HAS_MPU
help
This option is enabled when the CPU has a Memory Protection Unit (MPU)
in ARM flavor.
config CPU_HAS_NXP_MPU
bool
select CPU_HAS_MPU
help
This option is enabled when the CPU has a Memory Protection Unit (MPU)
in NXP flavor.
config CPU_HAS_CUSTOM_FIXED_SOC_MPU_REGIONS
bool "Custom fixed SoC MPU region definition"
help
If enabled, this option signifies that the SoC will
define and configure its own fixed MPU regions in the
SoC definition. These fixed MPU regions are currently
used to set Flash and SRAM default access policies and
they are programmed at boot time.
config CPU_HAS_ARM_SAU
bool
select CPU_HAS_TEE
help
MCU implements the ARM Security Attribution Unit (SAU).
config CPU_HAS_NRF_IDAU
bool
select CPU_HAS_TEE
help
MCU implements the nRF (vendor-specific) Security Attribution Unit.
(IDAU: "Implementation-Defined Attribution Unit", in accordance with
ARM terminology).
config HAS_SWO
bool
help
When enabled, indicates that SoC has an SWO output
endmenu
``` | /content/code_sandbox/arch/arm/Kconfig | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,626 |
```unknown
# ARM architecture VFP configuration options
# Math coprocessor symbols; these should be selected by the CPU symbol to
# indicate that the CPU core can be configured with the specified
# coprocessor(s).
config CPU_HAS_VFP
bool
select CPU_HAS_FPU
imply FPU
imply FPU_SHARING
help
This option signifies the support for a Vectored Floating-Point (VFP)
coprocessor.
config CPU_HAS_NEON
bool
select CPU_HAS_FPU
help
This option signifies the support for a NEON (Advanced SIMD) coprocessor.
# VFP type symbols; these should be selected by the SoC symbol to specify the
# type of the VFP core instantiated by the SoC.
config VFP_SP_D16
bool
select CPU_HAS_VFP
select VFP_FEATURE_SINGLE_PRECISION
select VFP_FEATURE_REGS_S32_D16
help
This option signifies the use of a VFP floating-point coprocessor
that supports only single-precision operations with 16 double-word
registers.
config VFP_SP_D16_FP16
bool
select CPU_HAS_VFP
select VFP_FEATURE_HALF_PRECISION
select VFP_FEATURE_SINGLE_PRECISION
select VFP_FEATURE_REGS_S32_D16
help
This option signifies the use of a VFP floating-point coprocessor
that supports half- and single-precision operations with 16
double-word registers.
config VFP_SP_D16_FP16_FMAC
bool
select CPU_HAS_VFP
select VFP_FEATURE_HALF_PRECISION
select VFP_FEATURE_SINGLE_PRECISION
select VFP_FEATURE_FMAC
select VFP_FEATURE_REGS_S32_D16
help
This option signifies the use of a VFP floating-point coprocessor
that supports half- and single-precision operations (including fused
multiply-accumulate) with 16 double-word registers.
config VFP_DP_D16
bool
select CPU_HAS_VFP
select VFP_FEATURE_SINGLE_PRECISION
select VFP_FEATURE_DOUBLE_PRECISION
select VFP_FEATURE_REGS_S32_D16
help
This option signifies the use of a VFP floating-point coprocessor
that supports single- and double-precision operations with 16
double-word registers.
config VFP_DP_D16_FP16
bool
select CPU_HAS_VFP
select VFP_FEATURE_HALF_PRECISION
select VFP_FEATURE_SINGLE_PRECISION
select VFP_FEATURE_DOUBLE_PRECISION
select VFP_FEATURE_REGS_S32_D16
help
This option signifies the use of a VFP floating-point coprocessor
that supports half-, single- and double-precision operations with 16
double-word registers.
config VFP_DP_D16_FP16_FMAC
bool
select CPU_HAS_VFP
select VFP_FEATURE_HALF_PRECISION
select VFP_FEATURE_SINGLE_PRECISION
select VFP_FEATURE_DOUBLE_PRECISION
select VFP_FEATURE_FMAC
select VFP_FEATURE_REGS_S32_D16
help
This option signifies the use of a VFP floating-point coprocessor
that supports half-, single- and double-precision operations
(including fused multiply-accumulate) with 16 double-word registers.
config VFP_U_DP_D16_FP16_FMAC
bool
select CPU_HAS_VFP
select VFP_FEATURE_HALF_PRECISION
select VFP_FEATURE_SINGLE_PRECISION
select VFP_FEATURE_DOUBLE_PRECISION
select VFP_FEATURE_FMAC
select VFP_FEATURE_REGS_S32_D16
select VFP_FEATURE_TRAP
help
This option signifies the use of a VFP floating-point coprocessor
that supports half-, single-, double-precision operations (including
fused multiply-accumulate) and floating-point exception trapping with 16
double-word registers.
config VFP_DP_D32_FP16_FMAC
bool
select CPU_HAS_VFP
select VFP_FEATURE_HALF_PRECISION
select VFP_FEATURE_SINGLE_PRECISION
select VFP_FEATURE_DOUBLE_PRECISION
select VFP_FEATURE_FMAC
select VFP_FEATURE_REGS_S64_D32
help
This option signifies the use of a VFP floating-point coprocessor
that supports half-, single- and double-precision operations
(including fused multiply-accumulate) with 32 double-word registers.
config VFP_U_DP_D32_FP16_FMAC
bool
select CPU_HAS_VFP
select VFP_FEATURE_HALF_PRECISION
select VFP_FEATURE_SINGLE_PRECISION
select VFP_FEATURE_DOUBLE_PRECISION
select VFP_FEATURE_FMAC
select VFP_FEATURE_REGS_S64_D32
select VFP_FEATURE_TRAP
help
This option signifies the use of a VFP floating-point coprocessor
that supports half-, single-, double-precision operations (including
fused multiply-accumulate) and floating-point exception trapping with 32
double-word registers.
if CPU_HAS_VFP
# VFP feature symbols; these are the helper symbols used by the floating-point
# support code to resolve the supported VFP features.
config VFP_FEATURE_HALF_PRECISION
bool
help
This option signifies that the VFP coprocessor supports
half-precision operations (half-precision extension).
config VFP_FEATURE_SINGLE_PRECISION
bool
help
This option signifies that the VFP coprocessor supports
single-precision operations.
config VFP_FEATURE_DOUBLE_PRECISION
bool
select CPU_HAS_FPU_DOUBLE_PRECISION
help
This option signifies that the VFP coprocessor supports
double-precision operations.
config VFP_FEATURE_VECTOR
bool
help
This option signifies that the VFP coprocessor supports vector
operations.
config VFP_FEATURE_FMAC
bool
help
This option signifies that the VFP coprocessor supports the fused
multiply-accumulate operations.
config VFP_FEATURE_REGS_S32_D16
bool
help
This option signifies that the VFP coprocessor implements 16
double-precision (32 single-precision) floating-point registers.
config VFP_FEATURE_REGS_S64_D32
bool
help
This option signifies that the VFP coprocessor implements 32
double-precision (64 single-precision) floating-point registers.
config VFP_FEATURE_TRAP
bool
help
This option signifies that the VFP coprocessor supports the trapping
of floating-point exceptions to allow software implementation of
the unsupported VFP instructions.
endif # CPU_HAS_VFP
# Advanced SIMD type symbols; these should be selected by the SoC symbol to
# specify the type of the VFP core instantiated by the SoC.
config NEON
bool
select CPU_HAS_NEON
help
This option signifies the use of a NEON Advanced SIMD coprocessor.
``` | /content/code_sandbox/arch/arm/core/Kconfig.vfp | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,425 |
```linker script
/*
*
*/
#if defined(CONFIG_ARM_ZIMAGE_HEADER)
KEEP(*(.image_header))
KEEP(*(.".image_header.*"))
__end = .;
#endif
``` | /content/code_sandbox/arch/arm/core/zimage_header.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 32 |
```c
/*
*
*/
/*
* @file
* @brief Basic C++ destructor module for globals for ARM
*/
#include <zephyr/toolchain.h>
EXTERN_C int __cxa_atexit(void (*destructor)(void *), void *objptr, void *dso);
/**
* @brief Register destructor for a global object
*
* @param objptr global object pointer
* @param destructor the global object destructor function
* @param dso Dynamic Shared Object handle for shared libraries
*
* Wrapper for __cxa_atexit()
*/
int __aeabi_atexit(void *objptr, void (*destructor)(void *), void *dso)
{
return __cxa_atexit(destructor, objptr, dso);
}
``` | /content/code_sandbox/arch/arm/core/__aeabi_atexit.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 154 |
```unknown
/*
*
*/
/**
* @file
* @brief Default basic NMI handler before the kernel is up
*
* Provide a default handler for NMI before the system is up. The default action
* is to hard hang, sleeping.
*
* This might be preferable than rebooting to help debugging, or because
* rebooting might trigger the exact same problem over and over.
*/
#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
_ASM_FILE_PROLOGUE
GTEXT(z_SysNmiOnReset)
SECTION_FUNC(TEXT, z_SysNmiOnReset)
wfi
b z_SysNmiOnReset
``` | /content/code_sandbox/arch/arm/core/nmi_on_reset.S | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 142 |
```c
/*
*
*/
#include <zephyr/kernel.h>
#include <zephyr/kernel_structs.h>
#include <kernel_internal.h>
#include <kernel_tls.h>
#include <zephyr/app_memory/app_memdomain.h>
#include <zephyr/sys/util.h>
#ifdef CONFIG_CPU_CORTEX_M
/*
* Since Cortex-M does not have the thread ID or process ID
* register needed to store TLS pointer at runtime for
* toolchain to access thread data. Use a global variable
* instead.
*/
K_APP_DMEM(z_libc_partition) uintptr_t z_arm_tls_ptr;
#endif
size_t arch_tls_stack_setup(struct k_thread *new_thread, char *stack_ptr)
{
/*
* TLS area for ARM has some data fields following by
* thread data and bss. These fields are supposed to be
* used by toolchain and OS TLS code to aid in locating
* the TLS data/bss. Zephyr currently has no use for
* this so we can simply skip these. However, since GCC
* is generating code assuming these fields are there,
* we simply skip them when setting the TLS pointer.
*/
/*
* Since we are populating things backwards,
* setup the TLS data/bss area first.
*/
stack_ptr -= z_tls_data_size();
z_tls_copy(stack_ptr);
/* Skip two pointers due to toolchain */
stack_ptr -= sizeof(uintptr_t) * 2;
/*
* Set thread TLS pointer which is used in
* context switch to point to TLS area.
*/
new_thread->tls = POINTER_TO_UINT(stack_ptr);
return (z_tls_data_size() + (sizeof(uintptr_t) * 2));
}
``` | /content/code_sandbox/arch/arm/core/tls.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 360 |
```c
/*
*
*/
#include <zephyr/kernel.h>
#include <kernel_internal.h>
#include <zephyr/arch/arm/gdbstub.h>
#include <zephyr/debug/gdbstub.h>
/* Position of each register in the packet - n-th register in the ctx.registers array needs to be
* the packet_pos[n]-th byte of the g (read all registers) packet. See struct arm_register_names in
* GDB file gdb/arm-tdep.c, which defines these positions.
*/
static const int packet_pos[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 41};
/* Required struct */
static struct gdb_ctx ctx;
/* Return true if BKPT instruction caused the current entry */
static int is_bkpt(unsigned int exc_cause)
{
int ret = 0;
if (exc_cause == GDB_EXCEPTION_BREAKPOINT) {
/* Get the instruction */
unsigned int instr = sys_read32(ctx.registers[PC]);
/* Try to check the instruction encoding */
int ist = ((ctx.registers[SPSR] & BIT(SPSR_J)) >> (SPSR_J - 1)) |
((ctx.registers[SPSR] & BIT(SPSR_T)) >> SPSR_T);
if (ist == SPSR_ISETSTATE_ARM) {
/* ARM instruction set state */
ret = ((instr & 0xFF00000) == 0x1200000) && ((instr & 0xF0) == 0x70);
} else if (ist != SPSR_ISETSTATE_JAZELLE) {
/* Thumb or ThumbEE encoding */
ret = ((instr & 0xFF00) == 0xBE00);
}
}
return ret;
}
/* Wrapper function to save and restore execution c */
void z_gdb_entry(struct arch_esf *esf, unsigned int exc_cause)
{
/* Disable the hardware breakpoint in case it was set */
__asm__ volatile("mcr p14, 0, %0, c0, c0, 5" ::"r"(0x0) :);
ctx.exception = exc_cause;
/* save the registers */
ctx.registers[R0] = esf->basic.r0;
ctx.registers[R1] = esf->basic.r1;
ctx.registers[R2] = esf->basic.r2;
ctx.registers[R3] = esf->basic.r3;
/* The EXTRA_EXCEPTION_INFO kernel option ensures these regs are set */
ctx.registers[R4] = esf->extra_info.callee->v1;
ctx.registers[R5] = esf->extra_info.callee->v2;
ctx.registers[R6] = esf->extra_info.callee->v3;
ctx.registers[R7] = esf->extra_info.callee->v4;
ctx.registers[R8] = esf->extra_info.callee->v5;
ctx.registers[R9] = esf->extra_info.callee->v6;
ctx.registers[R10] = esf->extra_info.callee->v7;
ctx.registers[R11] = esf->extra_info.callee->v8;
ctx.registers[R13] = esf->extra_info.callee->psp;
ctx.registers[R12] = esf->basic.r12;
ctx.registers[LR] = esf->basic.lr;
ctx.registers[PC] = esf->basic.pc;
ctx.registers[SPSR] = esf->basic.xpsr;
/* True if entering after a BKPT instruction */
const int bkpt_entry = is_bkpt(exc_cause);
z_gdb_main_loop(&ctx);
/* The registers part of EXTRA_EXCEPTION_INFO are read-only - the excpetion return code
* does not restore them, thus we don't need to do so here
*/
esf->basic.r0 = ctx.registers[R0];
esf->basic.r1 = ctx.registers[R1];
esf->basic.r2 = ctx.registers[R2];
esf->basic.r3 = ctx.registers[R3];
esf->basic.r12 = ctx.registers[R12];
esf->basic.lr = ctx.registers[LR];
esf->basic.pc = ctx.registers[PC];
esf->basic.xpsr = ctx.registers[SPSR];
/* TODO: restore regs from extra exc. info */
if (bkpt_entry) {
/* Apply this offset, so that the process won't be affected by the
* BKPT instruction
*/
esf->basic.pc += 0x4;
}
esf->basic.xpsr = ctx.registers[SPSR];
}
void arch_gdb_init(void)
{
uint32_t reg_val;
/* Enable the monitor debug mode */
__asm__ volatile("mrc p14, 0, %0, c0, c2, 2" : "=r"(reg_val)::);
reg_val |= DBGDSCR_MONITOR_MODE_EN;
__asm__ volatile("mcr p14, 0, %0, c0, c2, 2" ::"r"(reg_val) :);
/* Generate the Prefetch abort exception */
__asm__ volatile("BKPT");
}
void arch_gdb_continue(void)
{
/* No need to do anything, return to the code. */
}
void arch_gdb_step(void)
{
/* Set the hardware breakpoint */
uint32_t reg_val = ctx.registers[PC];
/* set BVR (Breakpoint value register) to PC, make sure it is word aligned */
reg_val &= ~(0x3);
__asm__ volatile("mcr p14, 0, %0, c0, c0, 4" ::"r"(reg_val) :);
reg_val = 0;
/* Address mismatch */
reg_val |= (DBGDBCR_MEANING_ADDR_MISMATCH & DBGDBCR_MEANING_MASK) << DBGDBCR_MEANING_SHIFT;
/* Match any other instruction */
reg_val |= (0xF & DBGDBCR_BYTE_ADDR_MASK) << DBGDBCR_BYTE_ADDR_SHIFT;
/* Breakpoint enable */
reg_val |= DBGDBCR_BRK_EN_MASK;
__asm__ volatile("mcr p14, 0, %0, c0, c0, 5" ::"r"(reg_val) :);
}
size_t arch_gdb_reg_readall(struct gdb_ctx *c, uint8_t *buf, size_t buflen)
{
int ret = 0;
/* All other registers are not supported */
memset(buf, 'x', buflen);
for (int i = 0; i < GDB_NUM_REGS; i++) {
/* offset inside the packet */
int pos = packet_pos[i] * 8;
int r = bin2hex((const uint8_t *)(c->registers + i), 4, buf + pos, buflen - pos);
/* remove the newline character placed by the bin2hex function */
buf[pos + 8] = 'x';
if (r == 0) {
ret = 0;
break;
}
ret += r;
}
if (ret) {
/* Since we don't support some floating point registers, set the packet size
* manually
*/
ret = GDB_READALL_PACKET_SIZE;
}
return ret;
}
size_t arch_gdb_reg_writeall(struct gdb_ctx *c, uint8_t *hex, size_t hexlen)
{
int ret = 0;
for (unsigned int i = 0; i < hexlen; i += 8) {
if (hex[i] != 'x') {
/* check if the stub supports this register */
for (unsigned int j = 0; j < GDB_NUM_REGS; j++) {
if (packet_pos[j] != i) {
continue;
}
int r = hex2bin(hex + i * 8, 8, (uint8_t *)(c->registers + j), 4);
if (r == 0) {
return 0;
}
ret += r;
}
}
}
return ret;
}
size_t arch_gdb_reg_readone(struct gdb_ctx *c, uint8_t *buf, size_t buflen, uint32_t regno)
{
/* Reading four bytes (could be any return value except 0, which would indicate an error) */
int ret = 4;
/* Fill the buffer with 'x' in case the stub does not support the required register */
memset(buf, 'x', 8);
if (regno == SPSR_REG_IDX) {
/* The SPSR register is at the end, we have to check separately */
ret = bin2hex((uint8_t *)(c->registers + GDB_NUM_REGS - 1), 4, buf, buflen);
} else {
/* Check which of our registers corresponds to regnum */
for (int i = 0; i < GDB_NUM_REGS; i++) {
if (packet_pos[i] == regno) {
ret = bin2hex((uint8_t *)(c->registers + i), 4, buf, buflen);
break;
}
}
}
return ret;
}
size_t arch_gdb_reg_writeone(struct gdb_ctx *c, uint8_t *hex, size_t hexlen, uint32_t regno)
{
int ret = 0;
/* Set the value of a register */
if (hexlen != 8) {
return ret;
}
if (regno < (GDB_NUM_REGS - 1)) {
/* Again, check the corresponding register index */
for (int i = 0; i < GDB_NUM_REGS; i++) {
if (packet_pos[i] == regno) {
ret = hex2bin(hex, hexlen, (uint8_t *)(c->registers + i), 4);
break;
}
}
} else if (regno == SPSR_REG_IDX) {
ret = hex2bin(hex, hexlen, (uint8_t *)(c->registers + GDB_NUM_REGS - 1), 4);
}
return ret;
}
``` | /content/code_sandbox/arch/arm/core/gdbstub.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,230 |
```unknown
/*
*
*/
#include <zephyr/linker/sections.h>
_ASM_FILE_PROLOGUE
SECTION_SUBSEC_FUNC(image_header,_image_header_section,_image_header)
#ifdef CONFIG_CPU_CORTEX_M
/*
* setting the _very_ early boot on the main stack allows to use memset
* on the interrupt stack when CONFIG_INIT_STACKS is enabled before
* switching to the interrupt stack for the rest of the early boot
*/
.long z_main_stack + CONFIG_MAIN_STACK_SIZE
.long z_arm_reset
#else
b __start // branch to kernel start
.long 0 // reserved
#endif
.long 0 // reserved
.long 0 // reserved
.long 0 // reserved
.long 0 // reserved
.long 0 // reserved
.long 0 // reserved
.long 0 // reserved
.long 0x016f2818 // Magic number
.long __rom_region_start // start address of zImage
.long __end // end address of zImage
``` | /content/code_sandbox/arch/arm/core/header.S | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 234 |
```unknown
/*
* Userspace and service handler hooks
*
*
*
*/
#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
#include <offsets_short.h>
#include <zephyr/syscall.h>
#include <zephyr/arch/arm/exception.h>
#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
#include <zephyr/arch/cpu.h>
#endif
_ASM_FILE_PROLOGUE
GTEXT(z_arm_userspace_enter)
GTEXT(z_arm_do_syscall)
GTEXT(arch_user_string_nlen)
GTEXT(z_arm_user_string_nlen_fault_start)
GTEXT(z_arm_user_string_nlen_fault_end)
GTEXT(z_arm_user_string_nlen_fixup)
GDATA(_kernel)
/* Imports */
GDATA(_k_syscall_table)
/**
*
* User space entry function
*
* This function is the entry point to user mode from privileged execution.
* The conversion is one way, and threads which transition to user mode do
* not transition back later, unless they are doing system calls.
*
* The function is invoked as:
* z_arm_userspace_enter(user_entry, p1, p2, p3,
* stack_info.start, stack_info.size);
*/
SECTION_FUNC(TEXT,z_arm_userspace_enter)
/* move user_entry to lr */
mov lr, r0
/* prepare to set stack to privileged stack */
ldr r0, =_kernel
ldr r0, [r0, #_kernel_offset_to_current]
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
/* move p1 to ip */
mov ip, r1
ldr r1, =_thread_offset_to_priv_stack_start
ldr r0, [r0, r1] /* priv stack ptr */
ldr r1, =CONFIG_PRIVILEGED_STACK_SIZE
add r0, r0, r1
/* Restore p1 from ip */
mov r1, ip
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
ldr r0, [r0, #_thread_offset_to_priv_stack_start] /* priv stack ptr */
ldr ip, =CONFIG_PRIVILEGED_STACK_SIZE
add r0, r0, ip
#elif defined(CONFIG_CPU_AARCH32_CORTEX_R)
ldr r0, [r0, #_thread_offset_to_priv_stack_start] /* priv stack ptr */
ldr ip, =CONFIG_PRIVILEGED_STACK_SIZE
add r0, r0, ip
ldr ip, =_kernel
ldr ip, [ip, #_kernel_offset_to_current]
str r0, [ip, #_thread_offset_to_priv_stack_end] /* priv stack end */
#endif
/* store current stack pointer to ip
* the current stack pointer is needed to retrieve
* stack_info.start and stack_info.size
*/
mov ip, sp
#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
mov sp, r0
#else
/* set stack to privileged stack
*
* Note [applies only when CONFIG_BUILTIN_STACK_GUARD is enabled]:
* modifying PSP via MSR instruction is not subject to stack limit
* checking, so we do not need to clear PSPLIM before setting PSP.
* The operation is safe since, by design, the privileged stack is
* located in memory higher than the default (user) thread stack.
*/
msr PSP, r0
#endif
#if defined(CONFIG_BUILTIN_STACK_GUARD)
/* At this point the privileged stack is not yet protected by PSPLIM.
* Since we have just switched to the top of the privileged stack, we
* are safe, as long as the stack can accommodate the maximum exception
* stack frame.
*/
/* set stack pointer limit to the start of the priv stack */
ldr r0, =_kernel
ldr r0, [r0, #_kernel_offset_to_current]
ldr r0, [r0, #_thread_offset_to_priv_stack_start] /* priv stack ptr */
msr PSPLIM, r0
#endif
/* push args to stack */
push {r1,r2,r3,lr}
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
mov r1, ip
push {r0,r1}
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
|| defined(CONFIG_CPU_AARCH32_CORTEX_R)
push {r0,ip}
#endif
/* Re-program dynamic memory map.
*
* Important note:
* z_arm_configure_dynamic_mpu_regions() may re-program the MPU Stack Guard
* to guard the privilege stack for overflows (if building with option
* CONFIG_MPU_STACK_GUARD). There is a risk of actually overflowing the
* stack while doing the re-programming. We minimize the risk by placing
* this function immediately after we have switched to the privileged stack
* so that the whole stack area is available for this critical operation.
*
* Note that the risk for overflow is higher if using the normal thread
* stack, since we do not control how much stack is actually left, when
* user invokes z_arm_userspace_enter().
*/
ldr r0, =_kernel
ldr r0, [r0, #_kernel_offset_to_current]
bl z_arm_configure_dynamic_mpu_regions
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
pop {r0,r3}
/* load up stack info from user stack */
ldr r0, [r3]
ldr r3, [r3, #4]
mov ip, r3
push {r0,r3}
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
|| defined(CONFIG_CPU_AARCH32_CORTEX_R)
pop {r0,ip}
/* load up stack info from user stack */
ldr r0, [ip]
ldr ip, [ip, #4]
push {r0,ip}
#endif
/* clear the user stack area to clean out privileged data */
/* from right past the guard right up to the end */
mov r2, ip
#ifdef CONFIG_INIT_STACKS
ldr r1,=0xaaaaaaaa
#else
eors r1, r1
#endif
bl memset
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
pop {r0, r1}
mov ip, r1
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
|| defined(CONFIG_CPU_AARCH32_CORTEX_R)
pop {r0,ip}
#endif
/* r0 contains user stack start, ip contains user stack size */
add r0, r0, ip /* calculate top of stack */
/* pop remaining arguments from stack before switching stacks */
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
/* Use r4 to pop lr, then restore r4 */
mov ip, r4
pop {r1,r2,r3,r4}
mov lr, r4
mov r4, ip
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
|| defined(CONFIG_CPU_AARCH32_CORTEX_R)
pop {r1,r2,r3,lr}
#endif
#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
/*
* set stack to user stack. We are in SYSTEM state, so r13 and r14 are
* shared with USER state
*/
mov sp, r0
#else
#if defined(CONFIG_BUILTIN_STACK_GUARD)
/*
* Guard the default (user) stack until thread drops privileges.
*
* Notes:
* PSPLIM is configured *before* PSP switches to the default (user) stack.
* This is safe, since the user stack is located, by design, in a lower
* memory area compared to the privileged stack.
*
* However, we need to prevent a context-switch to occur, because that
* would re-configure PSPLIM to guard the privileged stack; we enforce
* a PendSV locking for this purporse.
*
* Between PSPLIM update and PSP switch, the privileged stack will be
* left un-guarded; this is safe, as long as the privileged stack is
* large enough to accommodate a maximum exception stack frame.
*/
/* Temporarily store current IRQ locking status in ip */
mrs ip, BASEPRI
push {r0, ip}
/* Lock PendSV while reprogramming PSP and PSPLIM */
mov r0, #_EXC_PENDSV_PRIO_MASK
msr BASEPRI_MAX, r0
isb
/* Set PSPLIM to guard the thread's user stack. */
ldr r0, =_kernel
ldr r0, [r0, #_kernel_offset_to_current]
ldr r0, [r0, #_thread_offset_to_stack_info_start]
msr PSPLIM, r0
pop {r0, ip}
#endif
/* set stack to user stack */
msr PSP, r0
#endif
#if defined(CONFIG_BUILTIN_STACK_GUARD)
/* Restore interrupt lock status */
msr BASEPRI, ip
isb
#endif
/* restore r0 */
mov r0, lr
#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
/* change processor mode to unprivileged, with all interrupts enabled. */
msr CPSR_c, #MODE_USR
#else
/* change processor mode to unprivileged */
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
push {r0, r1, r2, r3}
ldr r0, =_kernel
ldr r0, [r0, #_kernel_offset_to_current]
ldr r1, =_thread_offset_to_mode
ldr r1, [r0, r1]
movs r2, #1
orrs r1, r1, r2
mrs r3, CONTROL
orrs r3, r3, r2
mov ip, r3
/* Store (unprivileged) mode in thread's mode state variable */
ldr r2, =_thread_offset_to_mode
str r1, [r0, r2]
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
push {r0, r1}
ldr r0, =_kernel
ldr r0, [r0, #_kernel_offset_to_current]
ldr r1, [r0, #_thread_offset_to_mode]
orrs r1, r1, #1
mrs ip, CONTROL
orrs ip, ip, #1
/* Store (unprivileged) mode in thread's mode state variable */
str r1, [r0, #_thread_offset_to_mode]
#endif
dsb
msr CONTROL, ip
#endif
/* ISB is not strictly necessary here (stack pointer is not being
* touched), but it's recommended to avoid executing pre-fetched
* instructions with the previous privilege.
*/
isb
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
pop {r0, r1, r2, r3}
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
pop {r0, r1}
#endif
/* jump to z_thread_entry entry */
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
push {r0, r1}
ldr r0, =z_thread_entry
mov ip, r0
pop {r0, r1}
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
|| defined(CONFIG_CPU_AARCH32_CORTEX_R)
ldr ip, =z_thread_entry
#endif
bx ip
/**
*
* Userspace system call function
*
* This function is used to do system calls from unprivileged code. This
* function is responsible for the following:
* 1) Fixing up bad syscalls
* 2) Configuring privileged stack and loading up stack arguments
* 3) Dispatching the system call
* 4) Restoring stack and calling back to the caller of the SVC
*
*/
SECTION_FUNC(TEXT, z_arm_do_syscall)
/* Note [when using MPU-based stack guarding]:
* The function is executing in privileged mode. This implies that we
* shall not be allowed to use the thread's default unprivileged stack,
* (i.e push to or pop from it), to avoid a possible stack corruption.
*
* Rationale: since we execute in PRIV mode and no MPU guard
* is guarding the end of the default stack, we won't be able
* to detect any stack overflows.
*
* Note [when using built-in stack limit checking on ARMv8-M]:
* At this point PSPLIM is already configured to guard the default (user)
* stack, so pushing to the default thread's stack is safe.
*/
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
/* save current stack pointer (user stack) */
mov ip, sp
/* temporarily push to user stack */
push {r0,r1}
/* setup privileged stack */
ldr r0, =_kernel
ldr r0, [r0, #_kernel_offset_to_current]
adds r0, r0, #_thread_offset_to_priv_stack_start
ldr r0, [r0] /* priv stack ptr */
ldr r1, =CONFIG_PRIVILEGED_STACK_SIZE
add r0, r1
/* Store current SP and LR at the beginning of the priv stack */
subs r0, #8
mov r1, ip
str r1, [r0, #0]
mov r1, lr
str r1, [r0, #4]
mov ip, r0
/* Restore user stack and original r0, r1 */
pop {r0, r1}
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
/* setup privileged stack */
ldr ip, =_kernel
ldr ip, [ip, #_kernel_offset_to_current]
ldr ip, [ip, #_thread_offset_to_priv_stack_start] /* priv stack ptr */
add ip, #CONFIG_PRIVILEGED_STACK_SIZE
/* Store current SP and LR at the beginning of the priv stack */
subs ip, #8
str sp, [ip, #0]
str lr, [ip, #4]
#elif defined(CONFIG_CPU_AARCH32_CORTEX_R)
/*
* The SVC handler has already switched to the privileged stack.
* Store the user SP and LR at the beginning of the priv stack.
*/
ldr ip, =_kernel
ldr ip, [ip, #_kernel_offset_to_current]
ldr ip, [ip, #_thread_offset_to_sp_usr]
push {ip, lr}
#endif
#if !defined(CONFIG_CPU_AARCH32_CORTEX_R)
/* switch to privileged stack */
msr PSP, ip
#endif
/* Note (applies when using stack limit checking):
* We do not need to lock IRQs after switching PSP to the privileged stack;
* PSPLIM is guarding the default (user) stack, which, by design, is
* located at *lower* memory area. Since we switch to the top of the
* privileged stack we are safe, as long as the stack can accommodate
* the maximum exception stack frame.
*/
#if defined(CONFIG_BUILTIN_STACK_GUARD)
/* Set stack pointer limit (needed in privileged mode) */
ldr ip, =_kernel
ldr ip, [ip, #_kernel_offset_to_current]
ldr ip, [ip, #_thread_offset_to_priv_stack_start] /* priv stack ptr */
msr PSPLIM, ip
#endif
/*
* r0-r5 contain arguments
* r6 contains call_id
* r8 contains original LR
*/
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
/* save r0, r1 to ip, lr */
mov ip, r0
mov lr, r1
ldr r0, =K_SYSCALL_BAD
cmp r6, r0
bne valid_syscall
/* BAD SYSCALL path */
/* fixup stack frame on the privileged stack, adding ssf */
mov r1, sp
/* ssf is present in r1 (sp) */
push {r1,lr}
push {r4,r5}
/* restore r0, r1 */
mov r0, ip
mov r1, lr
b dispatch_syscall
valid_syscall:
/* push ssf to privileged stack */
mov r1, sp
push {r1}
/* push args to complete stack frame */
push {r4,r5}
dispatch_syscall:
/* original r0 is saved in ip */
ldr r0, =_k_syscall_table
lsls r6, #2
add r0, r6
ldr r0, [r0] /* load table address */
/* swap ip and r0, restore r1 from lr */
mov r1, ip
mov ip, r0
mov r0, r1
mov r1, lr
/* execute function from dispatch table */
blx ip
/* restore LR
* r0 holds the return value and needs to be preserved
*/
mov ip, r0
mov r0, sp
ldr r0, [r0,#16]
mov lr, r0
/* Restore r0 */
mov r0, ip
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
|| defined(CONFIG_CPU_AARCH32_CORTEX_R)
ldr ip, =K_SYSCALL_BAD
cmp r6, ip
bne valid_syscall
/* BAD SYSCALL path */
/* fixup stack frame on the privileged stack, adding ssf */
mov ip, sp
push {r4,r5,ip,lr}
b dispatch_syscall
valid_syscall:
/* push args to complete stack frame */
mov ip, sp
push {r4,r5,ip}
dispatch_syscall:
ldr ip, =_k_syscall_table
lsl r6, #2
add ip, r6
ldr ip, [ip] /* load table address */
/* execute function from dispatch table */
blx ip
/* restore LR */
ldr lr, [sp,#16]
#endif
#if defined(CONFIG_BUILTIN_STACK_GUARD)
/*
* Guard the default (user) stack until thread drops privileges.
*
* Notes:
* PSPLIM is configured *before* PSP switches to the default (user) stack.
* This is safe, since the user stack is located, by design, in a lower
* memory area compared to the privileged stack.
*
* However, we need to prevent a context-switch to occur, because that
* would re-configure PSPLIM to guard the privileged stack; we enforce
* a PendSV locking for this purporse.
*
* Between PSPLIM update and PSP switch, the privileged stack will be
* left un-guarded; this is safe, as long as the privileged stack is
* large enough to accommodate a maximum exception stack frame.
*/
/* Temporarily store current IRQ locking status in r2 */
mrs r2, BASEPRI
/* Lock PendSV while reprogramming PSP and PSPLIM */
mov r3, #_EXC_PENDSV_PRIO_MASK
msr BASEPRI_MAX, r3
isb
/* Set PSPLIM to guard the thread's user stack. */
ldr r3, =_kernel
ldr r3, [r3, #_kernel_offset_to_current]
ldr r3, [r3, #_thread_offset_to_stack_info_start] /* stack_info.start */
msr PSPLIM, r3
#endif
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
/* set stack back to unprivileged stack */
mov ip, r0
mov r0, sp
ldr r0, [r0,#12]
msr PSP, r0
/* Restore r0 */
mov r0, ip
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
/* set stack back to unprivileged stack */
ldr ip, [sp,#12]
msr PSP, ip
#endif
#if defined(CONFIG_BUILTIN_STACK_GUARD)
/* Restore interrupt lock status */
msr BASEPRI, r2
isb
#endif
push {r0, r1}
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
push {r2, r3}
ldr r0, =_kernel
ldr r0, [r0, #_kernel_offset_to_current]
ldr r2, =_thread_offset_to_mode
ldr r1, [r0, r2]
movs r3, #1
orrs r1, r1, r3
/* Store (unprivileged) mode in thread's mode state variable */
str r1, [r0, r2]
dsb
/* drop privileges by setting bit 0 in CONTROL */
mrs r2, CONTROL
orrs r2, r2, r3
msr CONTROL, r2
pop {r2, r3}
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
|| defined(CONFIG_CPU_AARCH32_CORTEX_R)
ldr r0, =_kernel
ldr r0, [r0, #_kernel_offset_to_current]
ldr r1, [r0, #_thread_offset_to_mode]
orrs r1, r1, #1
/* Store (unprivileged) mode in thread's mode state variable */
str r1, [r0, #_thread_offset_to_mode]
dsb
#if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
/* drop privileges by setting bit 0 in CONTROL */
mrs ip, CONTROL
orrs ip, ip, #1
msr CONTROL, ip
#endif
#endif
/* ISB is not strictly necessary here (stack pointer is not being
* touched), but it's recommended to avoid executing pre-fetched
* instructions with the previous privilege.
*/
isb
pop {r0, r1}
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
/* Zero out volatile (caller-saved) registers so as to not leak state from
* kernel mode. The C calling convention for the syscall handler will
* restore the others to original values.
*/
movs r2, #0
movs r3, #0
/*
* return back to original function that called SVC, add 1 to force thumb
* mode
*/
/* Save return value temporarily to ip */
mov ip, r0
mov r0, r8
movs r1, #1
orrs r0, r0, r1
/* swap ip, r0 */
mov r1, ip
mov ip, r0
mov r0, r1
movs r1, #0
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
/* Zero out volatile (caller-saved) registers so as to not leak state from
* kernel mode. The C calling convention for the syscall handler will
* restore the others to original values.
*/
mov r1, #0
mov r2, #0
mov r3, #0
/*
* return back to original function that called SVC, add 1 to force thumb
* mode
*/
mov ip, r8
orrs ip, ip, #1
#elif defined(CONFIG_CPU_AARCH32_CORTEX_R)
/* Restore user stack pointer */
ldr ip, [sp,#12]
mov sp, ip
/* Zero out volatile (caller-saved) registers so as to not leak state from
* kernel mode. The C calling convention for the syscall handler will
* restore the others to original values.
*/
mov r1, #0
mov r2, #0
mov r3, #0
/*
* return back to original function that called SVC
*/
mov ip, r8
cps #MODE_USR
#endif
bx ip
/*
* size_t arch_user_string_nlen(const char *s, size_t maxsize, int *err_arg)
*/
SECTION_FUNC(TEXT, arch_user_string_nlen)
push {r0, r1, r2, r4, r5, lr}
/* sp+4 is error value, init to -1 */
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) \
|| defined(CONFIG_CPU_AARCH32_CORTEX_R)
ldr r3, =-1
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
mov.w r3, #-1
#endif
str r3, [sp, #4]
/* Perform string length calculation */
movs r3, #0 /* r3 is the counter */
strlen_loop:
z_arm_user_string_nlen_fault_start:
/* r0 contains the string. r5 = *(r0 + r3]). This could fault. */
ldrb r5, [r0, r3]
z_arm_user_string_nlen_fault_end:
#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
cmp r5, #0
beq strlen_done
cmp r3, r1
beq strlen_done
adds r3, #1
b strlen_loop
#else
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
cmp r5, #0
beq strlen_done
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
cbz r5, strlen_done
#endif
cmp r3, r1
beq.n strlen_done
adds r3, #1
b.n strlen_loop
#endif
strlen_done:
/* Move length calculation from r3 to r0 (return value register) */
mov r0, r3
/* Clear error value since we succeeded */
movs r1, #0
str r1, [sp, #4]
z_arm_user_string_nlen_fixup:
/* Write error value to err pointer parameter */
ldr r1, [sp, #4]
str r1, [r2, #0]
add sp, #12
pop {r4, r5, pc}
``` | /content/code_sandbox/arch/arm/core/userspace.S | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 5,940 |
```c
/*
*
*/
#include <zephyr/llext/elf.h>
#include <zephyr/llext/llext.h>
#include <zephyr/logging/log.h>
#include <zephyr/sys/util.h>
LOG_MODULE_REGISTER(elf, CONFIG_LLEXT_LOG_LEVEL);
#define OPCODE2ARMMEM(x) ((uint32_t)(x))
#define OPCODE2THM16MEM(x) ((uint16_t)(x))
#define MEM2ARMOPCODE(x) OPCODE2ARMMEM(x)
#define MEM2THM16OPCODE(x) OPCODE2THM16MEM(x)
#define JUMP_UPPER_BOUNDARY ((int32_t)0xfe000000)
#define JUMP_LOWER_BOUNDARY ((int32_t)0x2000000)
#define PREL31_UPPER_BOUNDARY ((int32_t)0x40000000)
#define PREL31_LOWER_BOUNDARY ((int32_t)-0x40000000)
#define THM_JUMP_UPPER_BOUNDARY ((int32_t)0xff000000)
#define THM_JUMP_LOWER_BOUNDARY ((int32_t)0x01000000)
#define MASK_V4BX_RM_COND 0xf000000f
#define MASK_V4BX_NOT_RM_COND 0x01a0f000
#define MASK_BRANCH_COND GENMASK(31, 28)
#define MASK_BRANCH_101 GENMASK(27, 25)
#define MASK_BRANCH_L BIT(24)
#define MASK_BRANCH_OFFSET GENMASK(23, 0)
#define MASK_MOV_COND GENMASK(31, 28)
#define MASK_MOV_00 GENMASK(27, 26)
#define MASK_MOV_I BIT(25)
#define MASK_MOV_OPCODE GENMASK(24, 21)
#define MASK_MOV_S BIT(20)
#define MASK_MOV_RN GENMASK(19, 16)
#define MASK_MOV_RD GENMASK(15, 12)
#define MASK_MOV_OPERAND2 GENMASK(11, 0)
#define BIT_THM_BW_S 10
#define MASK_THM_BW_11110 GENMASK(15, 11)
#define MASK_THM_BW_S BIT(10)
#define MASK_THM_BW_IMM10 GENMASK(9, 0)
#define BIT_THM_BL_J1 13
#define BIT_THM_BL_J2 11
#define MASK_THM_BL_10 GENMASK(15, 14)
#define MASK_THM_BL_J1 BIT(13)
#define MASK_THM_BL_1 BIT(12)
#define MASK_THM_BL_J2 BIT(11)
#define MASK_THM_BL_IMM11 GENMASK(10, 0)
#define MASK_THM_MOV_11110 GENMASK(15, 11)
#define MASK_THM_MOV_I BIT(10)
#define MASK_THM_MOV_100100 GENMASK(9, 4)
#define MASK_THM_MOV_IMM4 GENMASK(3, 0)
#define MASK_THM_MOV_0 BIT(15)
#define MASK_THM_MOV_IMM3 GENMASK(14, 12)
#define MASK_THM_MOV_RD GENMASK(11, 8)
#define MASK_THM_MOV_IMM8 GENMASK(7, 0)
#define SHIFT_PREL31_SIGN 30
#define SHIFT_BRANCH_OFFSET 2
#define SHIFT_JUMPS_SIGN 25
#define SHIFT_MOV_RD 4
#define SHIFT_MOV_RN 4
#define SHIFT_MOVS_SIGN 15
#define SHIFT_THM_JUMPS_SIGN 24
#define SHIFT_THM_BW_IMM10 12
#define SHIFT_THM_BL_J2 22
#define SHIFT_THM_BL_J1 23
#define SHIFT_THM_MOVS_SIGN 15
#define SHIFT_THM_MOV_I 1
#define SHIFT_THM_MOV_IMM3 4
#define SHIFT_THM_MOV_IMM4 12
static inline int prel31_decode(elf_word reloc_type, uint32_t loc,
uint32_t sym_base_addr, const char *sym_name, int32_t *offset)
{
int ret;
*offset = sign_extend(*(int32_t *)loc, SHIFT_PREL31_SIGN);
*offset += sym_base_addr - loc;
if (*offset >= PREL31_UPPER_BOUNDARY || *offset < PREL31_LOWER_BOUNDARY) {
LOG_ERR("sym '%s': relocation out of range (%#x -> %#x)\n",
sym_name, loc, sym_base_addr);
ret = -ENOEXEC;
} else {
ret = 0;
}
return ret;
}
static inline void prel31_reloc(uint32_t loc, int32_t *offset)
{
*(uint32_t *)loc &= BIT(31);
*(uint32_t *)loc |= *offset & GENMASK(30, 0);
}
static int prel31_handler(elf_word reloc_type, uint32_t loc,
uint32_t sym_base_addr, const char *sym_name)
{
int ret;
int32_t offset;
ret = prel31_decode(reloc_type, loc, sym_base_addr, sym_name, &offset);
if (!ret) {
prel31_reloc(loc, &offset);
}
return ret;
}
static inline int jumps_decode(elf_word reloc_type, uint32_t loc,
uint32_t sym_base_addr, const char *sym_name, int32_t *offset)
{
int ret;
*offset = MEM2ARMOPCODE(*(uint32_t *)loc);
*offset = (*offset & MASK_BRANCH_OFFSET) << SHIFT_BRANCH_OFFSET;
*offset = sign_extend(*offset, SHIFT_JUMPS_SIGN);
*offset += sym_base_addr - loc;
if (*offset >= JUMP_LOWER_BOUNDARY || *offset <= JUMP_UPPER_BOUNDARY) {
LOG_ERR("sym '%s': relocation out of range (%#x -> %#x)\n",
sym_name, loc, sym_base_addr);
ret = -ENOEXEC;
} else {
ret = 0;
}
return ret;
}
static inline void jumps_reloc(uint32_t loc, int32_t *offset)
{
*offset >>= SHIFT_BRANCH_OFFSET;
*offset &= MASK_BRANCH_OFFSET;
*(uint32_t *)loc &= OPCODE2ARMMEM(MASK_BRANCH_COND|MASK_BRANCH_101|MASK_BRANCH_L);
*(uint32_t *)loc |= OPCODE2ARMMEM(*offset);
}
static int jumps_handler(elf_word reloc_type, uint32_t loc,
uint32_t sym_base_addr, const char *sym_name)
{
int ret;
int32_t offset;
ret = jumps_decode(reloc_type, loc, sym_base_addr, sym_name, &offset);
if (!ret) {
jumps_reloc(loc, &offset);
}
return ret;
}
static void movs_handler(elf_word reloc_type, uint32_t loc,
uint32_t sym_base_addr, const char *sym_name)
{
int32_t offset;
uint32_t tmp;
offset = tmp = MEM2ARMOPCODE(*(uint32_t *)loc);
offset = ((offset & MASK_MOV_RN) >> SHIFT_MOV_RN) | (offset & MASK_MOV_OPERAND2);
offset = sign_extend(offset, SHIFT_MOVS_SIGN);
offset += sym_base_addr;
if (reloc_type == R_ARM_MOVT_PREL || reloc_type == R_ARM_MOVW_PREL_NC) {
offset -= loc;
}
if (reloc_type == R_ARM_MOVT_ABS || reloc_type == R_ARM_MOVT_PREL) {
offset >>= 16;
}
tmp &= (MASK_MOV_COND | MASK_MOV_00 | MASK_MOV_I | MASK_MOV_OPCODE | MASK_MOV_RD);
tmp |= ((offset & MASK_MOV_RD) << SHIFT_MOV_RD) | (offset & MASK_MOV_OPERAND2);
*(uint32_t *)loc = OPCODE2ARMMEM(tmp);
}
static inline int thm_jumps_decode(elf_word reloc_type, uint32_t loc,
uint32_t sym_base_addr, const char *sym_name, int32_t *offset,
uint32_t *upper, uint32_t *lower)
{
int ret;
uint32_t j_one, j_two, sign;
*upper = MEM2THM16OPCODE(*(uint16_t *)loc);
*lower = MEM2THM16OPCODE(*(uint16_t *)(loc + 2));
/* sign is bit10 */
sign = (*upper >> BIT_THM_BW_S) & 1;
j_one = (*lower >> BIT_THM_BL_J1) & 1;
j_two = (*lower >> BIT_THM_BL_J2) & 1;
*offset = (sign << SHIFT_THM_JUMPS_SIGN) |
((~(j_one ^ sign) & 1) << SHIFT_THM_BL_J1) |
((~(j_two ^ sign) & 1) << SHIFT_THM_BL_J2) |
((*upper & MASK_THM_BW_IMM10) << SHIFT_THM_BW_IMM10) |
((*lower & MASK_THM_BL_IMM11) << 1);
*offset = sign_extend(*offset, SHIFT_THM_JUMPS_SIGN);
*offset += sym_base_addr - loc;
if (*offset >= THM_JUMP_LOWER_BOUNDARY || *offset <= THM_JUMP_UPPER_BOUNDARY) {
LOG_ERR("sym '%s': relocation out of range (%#x -> %#x)\n",
sym_name, loc, sym_base_addr);
ret = -ENOEXEC;
} else {
ret = 0;
}
return ret;
}
static inline void thm_jumps_reloc(uint32_t loc, int32_t *offset,
uint32_t *upper, uint32_t *lower)
{
uint32_t j_one, j_two, sign;
sign = (*offset >> SHIFT_THM_JUMPS_SIGN) & 1;
j_one = sign ^ (~(*offset >> SHIFT_THM_BL_J1) & 1);
j_two = sign ^ (~(*offset >> SHIFT_THM_BL_J2) & 1);
*upper = (uint16_t)((*upper & MASK_THM_BW_11110) | (sign << BIT_THM_BW_S) |
((*offset >> SHIFT_THM_BW_IMM10) & MASK_THM_BW_IMM10));
*lower = (uint16_t)((*lower & (MASK_THM_BL_10|MASK_THM_BL_1)) |
(j_one << BIT_THM_BL_J1) | (j_two << BIT_THM_BL_J2) |
((*offset >> 1) & MASK_THM_BL_IMM11));
*(uint16_t *)loc = OPCODE2THM16MEM(*upper);
*(uint16_t *)(loc + 2) = OPCODE2THM16MEM(*lower);
}
static int thm_jumps_handler(elf_word reloc_type, uint32_t loc,
uint32_t sym_base_addr, const char *sym_name)
{
int ret;
int32_t offset;
uint32_t upper, lower;
ret = thm_jumps_decode(reloc_type, loc, sym_base_addr, sym_name, &offset, &upper, &lower);
if (!ret) {
thm_jumps_reloc(loc, &offset, &upper, &lower);
}
return ret;
}
static void thm_movs_handler(elf_word reloc_type, uint32_t loc,
uint32_t sym_base_addr, const char *sym_name)
{
int32_t offset;
uint32_t upper, lower;
upper = MEM2THM16OPCODE(*(uint16_t *)loc);
lower = MEM2THM16OPCODE(*(uint16_t *)(loc + 2));
/* MOVT/MOVW instructions encoding in Thumb-2 */
offset = ((upper & MASK_THM_MOV_IMM4) << SHIFT_THM_MOV_IMM4) |
((upper & MASK_THM_MOV_I) << SHIFT_THM_MOV_I) |
((lower & MASK_THM_MOV_IMM3) >> SHIFT_THM_MOV_IMM3) | (lower & MASK_THM_MOV_IMM8);
offset = sign_extend(offset, SHIFT_THM_MOVS_SIGN);
offset += sym_base_addr;
if (reloc_type == R_ARM_THM_MOVT_PREL || reloc_type == R_ARM_THM_MOVW_PREL_NC) {
offset -= loc;
}
if (reloc_type == R_ARM_THM_MOVT_ABS || reloc_type == R_ARM_THM_MOVT_PREL) {
offset >>= 16;
}
upper = (uint16_t)((upper & (MASK_THM_MOV_11110|MASK_THM_MOV_100100)) |
((offset & (MASK_THM_MOV_IMM4<<SHIFT_THM_MOV_IMM4)) >> SHIFT_THM_MOV_IMM4) |
((offset & (MASK_THM_MOV_I<<SHIFT_THM_MOV_I)) >> SHIFT_THM_MOV_I));
lower = (uint16_t)((lower & (MASK_THM_MOV_0|MASK_THM_MOV_RD)) |
((offset & (MASK_THM_MOV_IMM3>>SHIFT_THM_MOV_IMM3)) << SHIFT_THM_MOV_IMM3) |
(offset & MASK_THM_MOV_IMM8));
*(uint16_t *)loc = OPCODE2THM16MEM(upper);
*(uint16_t *)(loc + 2) = OPCODE2THM16MEM(lower);
}
/**
* @brief Architecture specific function for relocating partially linked (static) elf
*
* Elf files contain a series of relocations described in a section. These relocation
* instructions are architecture specific and each architecture supporting extensions
* must implement this.
*
* The relocation codes for arm are well documented
* path_to_url#relocation
*
* Handler functions prefixed by '_thm_' means that they are Thumb instructions specific.
* Do NOT mix them with not 'Thumb instructions' in the below switch/case: they are not
* intended to work together.
*/
int arch_elf_relocate(elf_rela_t *rel, uintptr_t loc, uintptr_t sym_base_addr,
const char *sym_name, uintptr_t load_bias)
{
int ret = 0;
elf_word reloc_type = ELF32_R_TYPE(rel->r_info);
LOG_DBG("%d %lx %lx %s", reloc_type, loc, sym_base_addr, sym_name);
switch (reloc_type) {
case R_ARM_NONE:
break;
case R_ARM_ABS32:
case R_ARM_TARGET1:
*(uint32_t *)loc += sym_base_addr;
break;
case R_ARM_PC24:
case R_ARM_CALL:
case R_ARM_JUMP24:
ret = jumps_handler(reloc_type, loc, sym_base_addr, sym_name);
break;
case R_ARM_V4BX:
/* keep Rm and condition bits */
*(uint32_t *)loc &= OPCODE2ARMMEM(MASK_V4BX_RM_COND);
/* remove the rest */
*(uint32_t *)loc |= OPCODE2ARMMEM(MASK_V4BX_NOT_RM_COND);
break;
case R_ARM_PREL31:
ret = prel31_handler(reloc_type, loc, sym_base_addr, sym_name);
break;
case R_ARM_REL32:
*(uint32_t *)loc += sym_base_addr - loc;
break;
case R_ARM_MOVW_ABS_NC:
case R_ARM_MOVT_ABS:
case R_ARM_MOVW_PREL_NC:
case R_ARM_MOVT_PREL:
movs_handler(reloc_type, loc, sym_base_addr, sym_name);
break;
case R_ARM_THM_CALL:
case R_ARM_THM_JUMP24:
ret = thm_jumps_handler(reloc_type, loc, sym_base_addr, sym_name);
break;
case R_ARM_THM_MOVW_ABS_NC:
case R_ARM_THM_MOVT_ABS:
case R_ARM_THM_MOVW_PREL_NC:
case R_ARM_THM_MOVT_PREL:
thm_movs_handler(reloc_type, loc, sym_base_addr, sym_name);
break;
case R_ARM_RELATIVE:
*(uint32_t *)loc += load_bias;
break;
case R_ARM_GLOB_DAT:
case R_ARM_JUMP_SLOT:
*(uint32_t *)loc = sym_base_addr;
break;
default:
LOG_ERR("unknown relocation: %u\n", reloc_type);
ret = -ENOEXEC;
}
return ret;
}
``` | /content/code_sandbox/arch/arm/core/elf.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,445 |
```c
/*
*
*/
/**
* @file
* @brief Kernel fatal error handler for ARM Cortex-M and Cortex-R
*
* This module provides the z_arm_fatal_error() routine for ARM Cortex-M
* and Cortex-R CPUs.
*/
#include <zephyr/kernel.h>
#include <kernel_arch_data.h>
#include <zephyr/logging/log.h>
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
#ifdef CONFIG_EXCEPTION_DEBUG
static void esf_dump(const struct arch_esf *esf)
{
LOG_ERR("r0/a1: 0x%08x r1/a2: 0x%08x r2/a3: 0x%08x",
esf->basic.a1, esf->basic.a2, esf->basic.a3);
LOG_ERR("r3/a4: 0x%08x r12/ip: 0x%08x r14/lr: 0x%08x",
esf->basic.a4, esf->basic.ip, esf->basic.lr);
LOG_ERR(" xpsr: 0x%08x", esf->basic.xpsr);
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
for (int i = 0; i < ARRAY_SIZE(esf->fpu.s); i += 4) {
LOG_ERR("s[%2d]: 0x%08x s[%2d]: 0x%08x"
" s[%2d]: 0x%08x s[%2d]: 0x%08x",
i, (uint32_t)esf->fpu.s[i],
i + 1, (uint32_t)esf->fpu.s[i + 1],
i + 2, (uint32_t)esf->fpu.s[i + 2],
i + 3, (uint32_t)esf->fpu.s[i + 3]);
}
#ifdef CONFIG_VFP_FEATURE_REGS_S64_D32
for (int i = 0; i < ARRAY_SIZE(esf->fpu.d); i += 4) {
LOG_ERR("d[%2d]: 0x%16llx d[%2d]: 0x%16llx"
" d[%2d]: 0x%16llx d[%2d]: 0x%16llx",
i, (uint64_t)esf->fpu.d[i],
i + 1, (uint64_t)esf->fpu.d[i + 1],
i + 2, (uint64_t)esf->fpu.d[i + 2],
i + 3, (uint64_t)esf->fpu.d[i + 3]);
}
#endif
LOG_ERR("fpscr: 0x%08x", esf->fpu.fpscr);
#endif
#if defined(CONFIG_EXTRA_EXCEPTION_INFO)
const struct _callee_saved *callee = esf->extra_info.callee;
if (callee != NULL) {
LOG_ERR("r4/v1: 0x%08x r5/v2: 0x%08x r6/v3: 0x%08x",
callee->v1, callee->v2, callee->v3);
LOG_ERR("r7/v4: 0x%08x r8/v5: 0x%08x r9/v6: 0x%08x",
callee->v4, callee->v5, callee->v6);
LOG_ERR("r10/v7: 0x%08x r11/v8: 0x%08x psp: 0x%08x",
callee->v7, callee->v8, callee->psp);
}
LOG_ERR("EXC_RETURN: 0x%0x", esf->extra_info.exc_return);
#endif /* CONFIG_EXTRA_EXCEPTION_INFO */
LOG_ERR("Faulting instruction address (r15/pc): 0x%08x",
esf->basic.pc);
}
#endif /* CONFIG_EXCEPTION_DEBUG */
void z_arm_fatal_error(unsigned int reason, const struct arch_esf *esf)
{
#ifdef CONFIG_EXCEPTION_DEBUG
if (esf != NULL) {
esf_dump(esf);
}
#endif /* CONFIG_EXCEPTION_DEBUG */
/* LOG the IRQn that was unhandled */
#if defined(CONFIG_CPU_CORTEX_M)
if (reason == K_ERR_SPURIOUS_IRQ) {
uint32_t irqn = __get_IPSR() - 16;
LOG_ERR("Unhandled IRQn: %d", irqn);
}
#endif
z_fatal_error(reason, esf);
}
/**
* @brief Handle a software-generated fatal exception
* (e.g. kernel oops, panic, etc.).
*
* Notes:
* - the function is invoked in SVC Handler
* - if triggered from nPRIV mode, only oops and stack fail error reasons
* may be propagated to the fault handling process.
* - We expect the supplied exception stack frame to always be a valid
* frame. That is because, if the ESF cannot be stacked during an SVC,
* a processor fault (e.g. stacking error) will be generated, and the
* fault handler will executed instead of the SVC.
*
* @param esf exception frame
* @param callee_regs Callee-saved registers (R4-R11)
*/
void z_do_kernel_oops(const struct arch_esf *esf, _callee_saved_t *callee_regs)
{
#if !(defined(CONFIG_EXTRA_EXCEPTION_INFO) && defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE))
ARG_UNUSED(callee_regs);
#endif
/* Stacked R0 holds the exception reason. */
unsigned int reason = esf->basic.r0;
#if defined(CONFIG_USERSPACE)
if (z_arm_preempted_thread_in_user_mode(esf)) {
/*
* Exception triggered from user mode.
*
* User mode is only allowed to induce oopses and stack check
* failures via software-triggered system fatal exceptions.
*/
if (!((esf->basic.r0 == K_ERR_KERNEL_OOPS) ||
(esf->basic.r0 == K_ERR_STACK_CHK_FAIL))) {
reason = K_ERR_KERNEL_OOPS;
}
}
#endif /* CONFIG_USERSPACE */
#if !defined(CONFIG_EXTRA_EXCEPTION_INFO)
z_arm_fatal_error(reason, esf);
#else
struct arch_esf esf_copy;
memcpy(&esf_copy, esf, offsetof(struct arch_esf, extra_info));
#if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
/* extra exception info is collected in callee_reg param
* on CONFIG_ARMV7_M_ARMV8_M_MAINLINE
*/
esf_copy.extra_info = (struct __extra_esf_info) {
.callee = callee_regs,
};
#else
/* extra exception info is not collected for kernel oops
* path today so we make a copy of the ESF and zero out
* that information
*/
esf_copy.extra_info = (struct __extra_esf_info) { 0 };
#endif /* CONFIG_ARMV7_M_ARMV8_M_MAINLINE */
z_arm_fatal_error(reason, &esf_copy);
#endif /* CONFIG_EXTRA_EXCEPTION_INFO */
}
FUNC_NORETURN void arch_syscall_oops(void *ssf_ptr)
{
uint32_t *ssf_contents = ssf_ptr;
struct arch_esf oops_esf = { 0 };
/* TODO: Copy the rest of the register set out of ssf_ptr */
oops_esf.basic.pc = ssf_contents[3];
z_arm_fatal_error(K_ERR_KERNEL_OOPS, &oops_esf);
CODE_UNREACHABLE;
}
``` | /content/code_sandbox/arch/arm/core/fatal.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,680 |
```unknown
# ARM core configuration options
config CPU_CORTEX_M
bool
select CPU_CORTEX
select ARCH_HAS_CUSTOM_SWAP_TO_MAIN
select HAS_CMSIS_CORE
select HAS_FLASH_LOAD_OFFSET
select ARCH_HAS_SINGLE_THREAD_SUPPORT
select ARCH_HAS_THREAD_ABORT
select ARCH_HAS_TRUSTED_EXECUTION if ARM_TRUSTZONE_M
select ARCH_HAS_STACK_PROTECTION if (ARM_MPU && !ARMV6_M_ARMV8_M_BASELINE) || CPU_CORTEX_M_HAS_SPLIM
select ARCH_HAS_USERSPACE if ARM_MPU
select ARCH_HAS_NOCACHE_MEMORY_SUPPORT if ARM_MPU && CPU_HAS_ARM_MPU && CPU_HAS_DCACHE
select ARCH_HAS_RAMFUNC_SUPPORT
select ARCH_HAS_NESTED_EXCEPTION_DETECTION
select SWAP_NONATOMIC
select ARCH_HAS_EXTRA_EXCEPTION_INFO
select ARCH_HAS_TIMING_FUNCTIONS if CPU_CORTEX_M_HAS_DWT
select ARCH_SUPPORTS_ARCH_HW_INIT
select ARCH_HAS_SUSPEND_TO_RAM
select ARCH_HAS_CODE_DATA_RELOCATION
select ARCH_SUPPORTS_ROM_START
imply XIP
help
This option signifies the use of a CPU of the Cortex-M family.
config CPU_AARCH32_CORTEX_R
bool
select CPU_CORTEX
select HAS_CMSIS_CORE
select ARCH_HAS_NESTED_EXCEPTION_DETECTION
select HAS_FLASH_LOAD_OFFSET
select ARCH_HAS_USERSPACE if ARM_MPU && !USE_SWITCH
select ARCH_HAS_EXTRA_EXCEPTION_INFO if !USE_SWITCH
select ARCH_HAS_CODE_DATA_RELOCATION
select ARCH_HAS_NOCACHE_MEMORY_SUPPORT if ARM_MPU && CPU_HAS_ARM_MPU && CPU_HAS_DCACHE
select ARCH_SUPPORTS_ROM_START
select USE_SWITCH_SUPPORTED
help
This option signifies the use of a CPU of the Cortex-R family.
config ARM_ZIMAGE_HEADER
bool "zImage Header"
depends on CPU_AARCH32_CORTEX_R || CPU_AARCH32_CORTEX_A || CPU_CORTEX_M_HAS_VTOR
help
This option adds a zImage Header.
config CPU_AARCH32_CORTEX_A
bool
select CPU_CORTEX
select CPU_HAS_MMU
select HAS_CMSIS_CORE
select HAS_FLASH_LOAD_OFFSET
select ARCH_HAS_EXTRA_EXCEPTION_INFO if !USE_SWITCH
select ARCH_HAS_NOCACHE_MEMORY_SUPPORT
select USE_SWITCH_SUPPORTED
# GDBSTUB has not yet been tested on Cortex M or R SoCs
select ARCH_HAS_GDBSTUB
# GDB on ARM needs the extra registers
select EXTRA_EXCEPTION_INFO if GDBSTUB
help
This option signifies the use of a CPU of the Cortex-A family.
config GDBSTUB_BUF_SZ
# GDB for ARM expects up to 18 4-byte plus 8 12-byte
# registers - 336 HEX letters
default 350 if GDBSTUB
config ISA_THUMB2
bool
help
From: path_to_url
Thumb-2 technology is the instruction set underlying the ARM Cortex
architecture which provides enhanced levels of performance, energy
efficiency, and code density for a wide range of embedded
applications.
Thumb-2 technology builds on the success of Thumb, the innovative
high code density instruction set for ARM microprocessor cores, to
increase the power of the ARM microprocessor core available to
developers of low cost, high performance systems.
The technology is backwards compatible with existing ARM and Thumb
solutions, while significantly extending the features available to
the Thumb instructions set. This allows more of the application to
benefit from the best in class code density of Thumb.
For performance optimized code Thumb-2 technology uses 31 percent
less memory to reduce system cost, while providing up to 38 percent
higher performance than existing high density code, which can be used
to prolong battery-life or to enrich the product feature set. Thumb-2
technology is featured in the processor, and in all ARMv7
architecture-based processors.
config ISA_ARM
bool
help
From: path_to_url
A32 instructions, known as Arm instructions in pre-Armv8 architectures,
are 32 bits wide, and are aligned on 4-byte boundaries. A32 instructions
are supported by both A-profile and R-profile architectures.
A32 was traditionally used in applications requiring the highest
performance, or for handling hardware exceptions such as interrupts and
processor start-up. Much of its functionality was subsumed into T32 with
the introduction of Thumb-2 technology.
config ASSEMBLER_ISA_THUMB2
bool
default y if ISA_THUMB2 && !ISA_ARM
depends on !ISA_ARM
help
This helper symbol specifies the default target instruction set for
the assembler.
When only the Thumb-2 ISA is supported (i.e. on Cortex-M cores), the
assembler must use the Thumb-2 instruction set.
When both the Thumb-2 and ARM ISAs are supported (i.e. on Cortex-A
and Cortex-R cores), the assembler must use the ARM instruction set
because the architecture assembly code makes use of the ARM
instructions.
config COMPILER_ISA_THUMB2
bool "Compile C/C++ functions using Thumb-2 instruction set"
depends on ISA_THUMB2
default y
help
This option configures the compiler to compile all C/C++ functions
using the Thumb-2 instruction set.
N.B. The scope of this symbol is not necessarily limited to the C and
C++ languages; in fact, this symbol refers to all forms of
"compiled" code.
When an additional natively-compiled language support is added
in the future, this symbol shall also specify the Thumb-2
instruction set for that language.
config NUM_IRQS
int
config STACK_ALIGN_DOUBLE_WORD
bool "Align stacks on double-words (8 octets)"
default y
help
This is needed to conform to AAPCS, the procedure call standard for
the ARM. It wastes stack space. The option also enforces alignment
of stack upon exception entry on Cortex-M3 and Cortex-M4 (ARMv7-M).
Note that for ARMv6-M, ARMv8-M, and Cortex-M7 MCUs stack alignment
on exception entry is enabled by default and it is not configurable.
config RUNTIME_NMI
bool "Attach an NMI handler at runtime"
select REBOOT
help
The kernel provides a simple NMI handler that simply hangs in a tight
loop if triggered. This fills the requirement that there must be an
NMI handler installed when the CPU boots. If a custom handler is
needed, enable this option and attach it via z_arm_nmi_set_handler().
config PLATFORM_SPECIFIC_INIT
bool "Platform (SOC) specific startup hook"
help
The platform specific initialization code (z_arm_platform_init) is
executed at the beginning of the startup code (__start).
config FAULT_DUMP
int "Fault dump level"
default 2
range 0 2
help
Different levels for display information when a fault occurs.
2: The default. Display specific and verbose information. Consumes
the most memory (long strings).
1: Display general and short information. Consumes less memory
(short strings).
0: Off.
config BUILTIN_STACK_GUARD
bool "Thread Stack Guards based on built-in ARM stack limit checking"
depends on CPU_CORTEX_M_HAS_SPLIM
select THREAD_STACK_INFO
help
Enable Thread/Interrupt Stack Guards via built-in Stack Pointer
limit checking. The functionality must be supported by HW.
config ARM_STACK_PROTECTION
bool
default y if HW_STACK_PROTECTION
imply BUILTIN_STACK_GUARD if CPU_CORTEX_M_HAS_SPLIM
select MPU_STACK_GUARD if (!BUILTIN_STACK_GUARD && ARM_MPU)
help
This option enables either:
- The built-in Stack Pointer limit checking, or
- the MPU-based stack guard
to cause a system fatal error
if the bounds of the current process stack are overflowed.
The two stack guard options are mutually exclusive. The
selection of the built-in Stack Pointer limit checking is
prioritized over the MPU-based stack guard. The developer
still has the option to manually select the MPU-based
stack guard, if this is desired.
config ARM_SECURE_FIRMWARE
bool
depends on ARMV8_M_SE
default y if TRUSTED_EXECUTION_SECURE
help
This option indicates that we are building a Zephyr image that
is intended to execute in Secure state. The option is only
applicable to ARMv8-M MCUs that implement the Security Extension.
This option enables Zephyr to include code that executes in
Secure state, as well as to exclude code that is designed to
execute only in Non-secure state.
Code executing in Secure state has access to both the Secure
and Non-Secure resources of the Cortex-M MCU.
Code executing in Non-Secure state may trigger Secure Faults,
if Secure MCU resources are accessed from the Non-Secure state.
Secure Faults may only be handled by code executing in Secure
state.
config ARM_NONSECURE_FIRMWARE
bool
depends on !ARM_SECURE_FIRMWARE
depends on ARMV8_M_SE
default y if TRUSTED_EXECUTION_NONSECURE
help
This option indicates that we are building a Zephyr image that
is intended to execute in Non-Secure state. Execution of this
image is triggered by Secure firmware that executes in Secure
state. The option is only applicable to ARMv8-M MCUs that
implement the Security Extension.
This option enables Zephyr to include code that executes in
Non-Secure state only, as well as to exclude code that is
designed to execute only in Secure state.
Code executing in Non-Secure state has no access to Secure
resources of the Cortex-M MCU, and, therefore, it shall avoid
accessing them.
config ARM_NONSECURE_PREEMPTIBLE_SECURE_CALLS
bool "Allow secure function calls to be preempted"
depends on ARM_NONSECURE_FIRMWARE
help
When enabled, this option indicates that preemptible Zephyr
threads performing secure function calls, are allowed to be
preempted. When disabled, the option indicates that such
threads many not be context-switched-out while doing a Secure
function call.
config ARM_STORE_EXC_RETURN
bool
default y if CPU_CORTEX_M && (FPU_SHARING || ARM_NONSECURE_PREEMPTIBLE_SECURE_CALLS)
help
Store the EXC_RETURN value when switching threads.
This is needed when switching between threads that differ in either
FPU usage or security domain.
choice
prompt "Floating point ABI"
default FP_HARDABI
depends on FPU
config FP_HARDABI
bool "Floating point Hard ABI"
help
This option selects the Floating point ABI in which hardware floating
point instructions are generated and uses FPU-specific calling
conventions.
config FP_SOFTABI
bool "Floating point Soft ABI"
help
This option selects the Floating point ABI in which hardware floating
point instructions are generated but soft-float calling conventions.
endchoice
config FP16
bool "Half-precision floating point support"
default y
help
This option enables the half-precision (16-bit) floating point support
via the `__fp16` (both IEEE and ARM alternative formats) and the
`_Float16` (defined by ISO/IEC TS 18661-3:2015) types.
choice
prompt "FP16 format"
default FP16_IEEE
depends on FP16
config FP16_IEEE
bool "FP16 IEEE format"
help
This option selects the IEEE 754-2008 format for FP16. This format can
represent normalized values in the range of 2^(-14) to 65504. There are
11 bits of significand precision, approximately 3 decimal digits.
config FP16_ALT
bool "FP16 ARM alternative format"
help
This option selects the ARM alternative format for FP16. This
representation is similar to the IEEE 754-2008 format, but does not
support infinites or NaNs. Instead, the range of exponents is extended,
so that this format can represent normalized values in the range of
2^(-14) to 131008.
Please note that Clang doesn't support the ARM alternative format.
endchoice
rsource "cortex_m/Kconfig"
rsource "cortex_a_r/Kconfig"
rsource "mpu/Kconfig"
rsource "mmu/Kconfig"
``` | /content/code_sandbox/arch/arm/core/Kconfig | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,707 |
```c
/*
*
*/
/**
* @file
* @brief NMI handler infrastructure
*
* Provides a boot time handler that simply hangs in a sleep loop, and a run
* time handler that resets the CPU. Also provides a mechanism for hooking a
* custom run time handler.
*/
#include <zephyr/kernel.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/sys/printk.h>
#include <zephyr/sys/reboot.h>
#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
extern void z_SysNmiOnReset(void);
#if !defined(CONFIG_RUNTIME_NMI)
#define handler z_SysNmiOnReset
#endif
#ifdef CONFIG_RUNTIME_NMI
typedef void (*_NmiHandler_t)(void);
static _NmiHandler_t handler = z_SysNmiOnReset;
/**
*
* @brief Install a custom runtime NMI handler
*
* Meant to be called by platform code if they want to install a custom NMI
* handler that reboots. It should be installed after the console is
* initialized if it is meant to output to the console.
*
*/
void z_arm_nmi_set_handler(void (*pHandler)(void))
{
handler = pHandler;
}
#endif /* CONFIG_RUNTIME_NMI */
/**
*
* @brief Handler installed in the vector table
*
* Simply call what is installed in 'static void(*handler)(void)'.
*
*/
void z_arm_nmi(void)
{
handler();
z_arm_int_exit();
}
``` | /content/code_sandbox/arch/arm/core/nmi.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 319 |
```c
/*
*
*/
#include <gen_offset.h>
#include "offsets_aarch32.c"
GEN_ABS_SYM_END
``` | /content/code_sandbox/arch/arm/core/offsets/offsets.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 24 |
```c
/*
*
*/
/**
* @file
* @brief Cache manipulation
*
* This module contains functions for manipulation caches.
*/
#include <zephyr/arch/cpu.h>
#include <zephyr/cache.h>
#include <cmsis_core.h>
void arch_dcache_enable(void)
{
SCB_EnableDCache();
}
void arch_dcache_disable(void)
{
SCB_DisableDCache();
}
int arch_dcache_flush_all(void)
{
SCB_CleanDCache();
return 0;
}
int arch_dcache_invd_all(void)
{
SCB_InvalidateDCache();
return 0;
}
int arch_dcache_flush_and_invd_all(void)
{
SCB_CleanInvalidateDCache();
return 0;
}
int arch_dcache_flush_range(void *start_addr, size_t size)
{
SCB_CleanDCache_by_Addr(start_addr, size);
return 0;
}
int arch_dcache_invd_range(void *start_addr, size_t size)
{
SCB_InvalidateDCache_by_Addr(start_addr, size);
return 0;
}
int arch_dcache_flush_and_invd_range(void *start_addr, size_t size)
{
SCB_CleanInvalidateDCache_by_Addr(start_addr, size);
return 0;
}
void arch_icache_enable(void)
{
SCB_EnableICache();
}
void arch_icache_disable(void)
{
SCB_DisableICache();
}
int arch_icache_flush_all(void)
{
return -ENOTSUP;
}
int arch_icache_invd_all(void)
{
SCB_InvalidateICache();
return 0;
}
int arch_icache_flush_and_invd_all(void)
{
return -ENOTSUP;
}
int arch_icache_flush_range(void *start_addr, size_t size)
{
return -ENOTSUP;
}
int arch_icache_invd_range(void *start_addr, size_t size)
{
SCB_InvalidateICache_by_Addr(start_addr, size);
return 0;
}
int arch_icache_flush_and_invd_range(void *start_addr, size_t size)
{
return -ENOTSUP;
}
``` | /content/code_sandbox/arch/arm/core/cortex_m/cache.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 434 |
```c
/*
*
*/
/**
* @file
* @brief ARM kernel structure member offset definition file
*
* This module is responsible for the generation of the absolute symbols whose
* value represents the member offsets for various ARM kernel structures.
*
* All of the absolute symbols defined by this module will be present in the
* final kernel ELF image (due to the linker's reference to the _OffsetAbsSyms
* symbol).
*
* INTERNAL
* It is NOT necessary to define the offset for every member of a structure.
* Typically, only those members that are accessed by assembly language routines
* are defined; however, it doesn't hurt to define all fields for the sake of
* completeness.
*/
#ifndef _ARM_OFFSETS_INC_
#define _ARM_OFFSETS_INC_
#include <zephyr/kernel.h>
#include <kernel_arch_data.h>
#include <kernel_offsets.h>
GEN_OFFSET_SYM(_thread_arch_t, basepri);
GEN_OFFSET_SYM(_thread_arch_t, swap_return_value);
#if defined(CONFIG_CPU_AARCH32_CORTEX_A) || defined(CONFIG_CPU_AARCH32_CORTEX_R)
GEN_OFFSET_SYM(_thread_arch_t, exception_depth);
GEN_OFFSET_SYM(_cpu_arch_t, exc_depth);
#endif
#if defined(CONFIG_ARM_STORE_EXC_RETURN) || defined(CONFIG_USERSPACE)
GEN_OFFSET_SYM(_thread_arch_t, mode);
#endif
#if defined(CONFIG_ARM_STORE_EXC_RETURN)
GEN_OFFSET_SYM(_thread_arch_t, mode_exc_return);
#endif
#if defined(CONFIG_USERSPACE)
GEN_OFFSET_SYM(_thread_arch_t, priv_stack_start);
#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
GEN_OFFSET_SYM(_thread_arch_t, priv_stack_end);
GEN_OFFSET_SYM(_thread_arch_t, sp_usr);
#endif
#endif
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
GEN_OFFSET_SYM(_thread_arch_t, preempt_float);
#endif
GEN_OFFSET_SYM(_basic_sf_t, pc);
GEN_OFFSET_SYM(_basic_sf_t, xpsr);
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
GEN_OFFSET_SYM(_fpu_sf_t, fpscr);
GEN_ABSOLUTE_SYM(___fpu_t_SIZEOF, sizeof(_fpu_sf_t));
#endif
GEN_ABSOLUTE_SYM(___esf_t_SIZEOF, sizeof(_esf_t));
/* size of the entire preempt registers structure */
GEN_ABSOLUTE_SYM(___callee_saved_t_SIZEOF, sizeof(struct _callee_saved));
#if defined(CONFIG_EXTRA_EXCEPTION_INFO)
GEN_ABSOLUTE_SYM(___extra_esf_info_t_SIZEOF, sizeof(struct __extra_esf_info));
#endif
#if defined(CONFIG_THREAD_STACK_INFO)
GEN_OFFSET_SYM(_thread_stack_info_t, start);
#endif
/*
* CPU context for S2RAM
*/
#if defined(CONFIG_PM_S2RAM)
GEN_OFFSET_SYM(_cpu_context_t, msp);
GEN_OFFSET_SYM(_cpu_context_t, msplim);
GEN_OFFSET_SYM(_cpu_context_t, psp);
GEN_OFFSET_SYM(_cpu_context_t, psplim);
GEN_OFFSET_SYM(_cpu_context_t, apsr);
GEN_OFFSET_SYM(_cpu_context_t, ipsr);
GEN_OFFSET_SYM(_cpu_context_t, epsr);
GEN_OFFSET_SYM(_cpu_context_t, primask);
GEN_OFFSET_SYM(_cpu_context_t, faultmask);
GEN_OFFSET_SYM(_cpu_context_t, basepri);
GEN_OFFSET_SYM(_cpu_context_t, control);
#endif /* CONFIG_PM_S2RAM */
#endif /* _ARM_OFFSETS_INC_ */
``` | /content/code_sandbox/arch/arm/core/offsets/offsets_aarch32.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 699 |
```c
/*
*
*/
#include <zephyr/kernel.h>
#include <cmsis_core.h>
#include <zephyr/arch/arm/cortex_m/fpu.h>
/**
* @file @brief Helper functions for saving and restoring the FP context.
*
*/
void z_arm_save_fp_context(struct fpu_ctx_full *buffer)
{
#if defined(CONFIG_FPU_SHARING)
__ASSERT_NO_MSG(buffer != NULL);
uint32_t CONTROL = __get_CONTROL();
if (CONTROL & CONTROL_FPCA_Msk) {
/* Store caller-saved and callee-saved FP registers. */
__asm__ volatile(
"vstmia %0, {s0-s15}\n"
"vstmia %1, {s16-s31}\n"
:: "r" (buffer->caller_saved), "r" (buffer->callee_saved) :
);
buffer->fpscr = __get_FPSCR();
buffer->ctx_saved = true;
/* Disable FPCA so no stacking of FP registers happens in TFM. */
__set_CONTROL(CONTROL & ~CONTROL_FPCA_Msk);
/* ISB is recommended after setting CONTROL. It's not needed
* here though, since FPCA should have no impact on instruction
* fetching.
*/
}
#endif
}
void z_arm_restore_fp_context(const struct fpu_ctx_full *buffer)
{
#if defined(CONFIG_FPU_SHARING)
if (buffer->ctx_saved) {
/* Set FPCA first so it is set even if an interrupt happens
* during restoration.
*/
__set_CONTROL(__get_CONTROL() | CONTROL_FPCA_Msk);
/* Restore FP state. */
__set_FPSCR(buffer->fpscr);
__asm__ volatile(
"vldmia %0, {s0-s15}\n"
"vldmia %1, {s16-s31}\n"
:: "r" (buffer->caller_saved), "r" (buffer->callee_saved) :
);
}
#endif
}
``` | /content/code_sandbox/arch/arm/core/cortex_m/fpu.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 437 |
```c
/*
*
*/
/**
* @file
* @brief ARM Cortex-M interrupt initialization
*
*/
#include <zephyr/arch/cpu.h>
#include <cmsis_core.h>
/**
*
* @brief Initialize interrupts
*
* Ensures all interrupts have their priority set to _EXC_IRQ_DEFAULT_PRIO and
* not 0, which they have it set to when coming out of reset. This ensures that
* interrupt locking via BASEPRI works as expected.
*
*/
void z_arm_interrupt_init(void)
{
int irq = 0;
for (; irq < CONFIG_NUM_IRQS; irq++) {
NVIC_SetPriority((IRQn_Type)irq, _IRQ_PRIO_OFFSET);
}
}
``` | /content/code_sandbox/arch/arm/core/cortex_m/irq_init.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 144 |
```unknown
/*
*
*/
/**
* @file
* @brief ARM Cortex-M suspend-to-RAM code (S2RAM)
*/
#include <zephyr/toolchain.h>
#include <offsets_short.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/arch/common/pm_s2ram.h>
_ASM_FILE_PROLOGUE
GTEXT(pm_s2ram_mark_set)
GTEXT(pm_s2ram_mark_check_and_clear)
GDATA(_cpu_context)
SECTION_FUNC(TEXT, arch_pm_s2ram_suspend)
/*
* Save the CPU context
*
* r0: address of the system_off function
*/
push {r4-r12, lr}
ldr r1, =_cpu_context
mrs r2, msp
str r2, [r1, #___cpu_context_t_msp_OFFSET]
mrs r2, msplim
str r2, [r1, #___cpu_context_t_msplim_OFFSET]
mrs r2, psp
str r2, [r1, #___cpu_context_t_psp_OFFSET]
mrs r2, psplim
str r2, [r1, #___cpu_context_t_psplim_OFFSET]
mrs r2, apsr
str r2, [r1, #___cpu_context_t_apsr_OFFSET]
mrs r2, ipsr
str r2, [r1, #___cpu_context_t_ipsr_OFFSET]
mrs r2, epsr
str r2, [r1, #___cpu_context_t_epsr_OFFSET]
mrs r2, primask
str r2, [r1, #___cpu_context_t_primask_OFFSET]
mrs r2, faultmask
str r2, [r1, #___cpu_context_t_faultmask_OFFSET]
mrs r2, basepri
str r2, [r1, #___cpu_context_t_basepri_OFFSET]
mrs r2, control
str r2, [r1, #___cpu_context_t_control_OFFSET]
/*
* Mark entering suspend to RAM.
*/
bl pm_s2ram_mark_set
/*
* Call the system_off function passed as parameter. This should never
* return.
*/
blx r0
/*
* The system_off function returns here only when the powering off was
* not successful (in r0 the return value).
*/
/*
* Reset the marking of suspend to RAM, return is ignored.
*/
push {r0}
bl pm_s2ram_mark_check_and_clear
pop {r0}
pop {r4-r12, lr}
bx lr
GTEXT(arch_pm_s2ram_resume)
SECTION_FUNC(TEXT, arch_pm_s2ram_resume)
/*
* Check if reset occurred after suspending to RAM.
*/
push {lr}
bl pm_s2ram_mark_check_and_clear
cmp r0, #0x1
pop {lr}
beq resume
bx lr
resume:
/*
* Restore the CPU context
*/
ldr r0, =_cpu_context
ldr r1, [r0, #___cpu_context_t_msp_OFFSET]
msr msp, r1
ldr r1, [r0, #___cpu_context_t_msplim_OFFSET]
msr msplim, r1
ldr r1, [r0, #___cpu_context_t_psp_OFFSET]
msr psp, r1
ldr r1, [r0, #___cpu_context_t_psplim_OFFSET]
msr psplim, r1
ldr r1, [r0, #___cpu_context_t_apsr_OFFSET]
msr apsr_nzcvq, r1
ldr r1, [r0, #___cpu_context_t_ipsr_OFFSET]
msr ipsr, r1
ldr r1, [r0, #___cpu_context_t_epsr_OFFSET]
msr epsr, r1
ldr r1, [r0, #___cpu_context_t_primask_OFFSET]
msr primask, r1
ldr r1, [r0, #___cpu_context_t_faultmask_OFFSET]
msr faultmask, r1
ldr r1, [r0, #___cpu_context_t_basepri_OFFSET]
msr basepri, r1
ldr r1, [r0, #___cpu_context_t_control_OFFSET]
msr control, r1
isb
pop {r4-r12, lr}
/*
* Set the return value and return
*/
mov r0, #0
bx lr
``` | /content/code_sandbox/arch/arm/core/cortex_m/pm_s2ram.S | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,001 |
```c
/*
*
*/
/**
* @file
* @brief New thread creation for ARM Cortex-M
*
* Core thread related primitives for the ARM Cortex-M
* processor architecture.
*/
#include <zephyr/kernel.h>
#include <zephyr/llext/symbol.h>
#include <ksched.h>
#include <zephyr/sys/barrier.h>
#include <stdbool.h>
#include <cmsis_core.h>
#if (MPU_GUARD_ALIGN_AND_SIZE_FLOAT > MPU_GUARD_ALIGN_AND_SIZE)
#define FP_GUARD_EXTRA_SIZE (MPU_GUARD_ALIGN_AND_SIZE_FLOAT - \
MPU_GUARD_ALIGN_AND_SIZE)
#else
#define FP_GUARD_EXTRA_SIZE 0
#endif
#ifndef EXC_RETURN_FTYPE
/* bit [4] allocate stack for floating-point context: 0=done 1=skipped */
#define EXC_RETURN_FTYPE (0x00000010UL)
#endif
/* Default last octet of EXC_RETURN, for threads that have not run yet.
* The full EXC_RETURN value will be e.g. 0xFFFFFFBC.
*/
#if defined(CONFIG_ARM_NONSECURE_FIRMWARE)
#define DEFAULT_EXC_RETURN 0xBC;
#else
#define DEFAULT_EXC_RETURN 0xFD;
#endif
#if !defined(CONFIG_MULTITHREADING)
K_THREAD_STACK_DECLARE(z_main_stack, CONFIG_MAIN_STACK_SIZE);
#endif
/* An initial context, to be "restored" by z_arm_pendsv(), is put at the other
* end of the stack, and thus reusable by the stack when not needed anymore.
*
* The initial context is an exception stack frame (ESF) since exiting the
* PendSV exception will want to pop an ESF. Interestingly, even if the lsb of
* an instruction address to jump to must always be set since the CPU always
* runs in thumb mode, the ESF expects the real address of the instruction,
* with the lsb *not* set (instructions are always aligned on 16 bit
* halfwords). Since the compiler automatically sets the lsb of function
* addresses, we have to unset it manually before storing it in the 'pc' field
* of the ESF.
*/
void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
char *stack_ptr, k_thread_entry_t entry,
void *p1, void *p2, void *p3)
{
struct __basic_sf *iframe;
#ifdef CONFIG_MPU_STACK_GUARD
#if defined(CONFIG_USERSPACE)
if (z_stack_is_user_capable(stack)) {
/* Guard area is carved-out of the buffer instead of reserved
* for stacks that can host user threads
*/
thread->stack_info.start += MPU_GUARD_ALIGN_AND_SIZE;
thread->stack_info.size -= MPU_GUARD_ALIGN_AND_SIZE;
}
#endif /* CONFIG_USERSPACE */
#if FP_GUARD_EXTRA_SIZE > 0
if ((thread->base.user_options & K_FP_REGS) != 0) {
/* Larger guard needed due to lazy stacking of FP regs may
* overshoot the guard area without writing anything. We
* carve it out of the stack buffer as-needed instead of
* unconditionally reserving it.
*/
thread->stack_info.start += FP_GUARD_EXTRA_SIZE;
thread->stack_info.size -= FP_GUARD_EXTRA_SIZE;
}
#endif /* FP_GUARD_EXTRA_SIZE */
#endif /* CONFIG_MPU_STACK_GUARD */
iframe = Z_STACK_PTR_TO_FRAME(struct __basic_sf, stack_ptr);
#if defined(CONFIG_USERSPACE)
if ((thread->base.user_options & K_USER) != 0) {
iframe->pc = (uint32_t)arch_user_mode_enter;
} else {
iframe->pc = (uint32_t)z_thread_entry;
}
#else
iframe->pc = (uint32_t)z_thread_entry;
#endif
/* force ARM mode by clearing LSB of address */
iframe->pc &= 0xfffffffe;
iframe->a1 = (uint32_t)entry;
iframe->a2 = (uint32_t)p1;
iframe->a3 = (uint32_t)p2;
iframe->a4 = (uint32_t)p3;
iframe->xpsr =
0x01000000UL; /* clear all, thumb bit is 1, even if RO */
thread->callee_saved.psp = (uint32_t)iframe;
thread->arch.basepri = 0;
#if defined(CONFIG_ARM_STORE_EXC_RETURN) || defined(CONFIG_USERSPACE)
thread->arch.mode = 0;
#if defined(CONFIG_ARM_STORE_EXC_RETURN)
thread->arch.mode_exc_return = DEFAULT_EXC_RETURN;
#endif
#if FP_GUARD_EXTRA_SIZE > 0
if ((thread->base.user_options & K_FP_REGS) != 0) {
thread->arch.mode |= Z_ARM_MODE_MPU_GUARD_FLOAT_Msk;
}
#endif
#if defined(CONFIG_USERSPACE)
thread->arch.priv_stack_start = 0;
#endif
#endif
/*
* initial values in all other registers/thread entries are
* irrelevant.
*/
}
#if defined(CONFIG_MPU_STACK_GUARD) && defined(CONFIG_FPU) \
&& defined(CONFIG_FPU_SHARING)
static inline void z_arm_thread_stack_info_adjust(struct k_thread *thread,
bool use_large_guard)
{
if (use_large_guard) {
/* Switch to use a large MPU guard if not already. */
if ((thread->arch.mode &
Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) == 0) {
/* Default guard size is used. Update required. */
thread->arch.mode |= Z_ARM_MODE_MPU_GUARD_FLOAT_Msk;
#if defined(CONFIG_USERSPACE)
if (thread->arch.priv_stack_start) {
/* User thread */
thread->arch.priv_stack_start +=
FP_GUARD_EXTRA_SIZE;
} else
#endif /* CONFIG_USERSPACE */
{
/* Privileged thread */
thread->stack_info.start +=
FP_GUARD_EXTRA_SIZE;
thread->stack_info.size -=
FP_GUARD_EXTRA_SIZE;
}
}
} else {
/* Switch to use the default MPU guard size if not already. */
if ((thread->arch.mode &
Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) {
/* Large guard size is used. Update required. */
thread->arch.mode &= ~Z_ARM_MODE_MPU_GUARD_FLOAT_Msk;
#if defined(CONFIG_USERSPACE)
if (thread->arch.priv_stack_start) {
/* User thread */
thread->arch.priv_stack_start -=
FP_GUARD_EXTRA_SIZE;
} else
#endif /* CONFIG_USERSPACE */
{
/* Privileged thread */
thread->stack_info.start -=
FP_GUARD_EXTRA_SIZE;
thread->stack_info.size +=
FP_GUARD_EXTRA_SIZE;
}
}
}
}
/*
* Adjust the MPU stack guard size together with the FPU
* policy and the stack_info values for the thread that is
* being switched in.
*/
uint32_t z_arm_mpu_stack_guard_and_fpu_adjust(struct k_thread *thread)
{
if (((thread->base.user_options & K_FP_REGS) != 0) ||
((thread->arch.mode_exc_return & EXC_RETURN_FTYPE) == 0)) {
/* The thread has been pre-tagged (at creation or later) with
* K_FP_REGS, i.e. it is expected to be using the FPU registers
* (if not already). Activate lazy stacking and program a large
* MPU guard to safely detect privilege thread stack overflows.
*
* OR
* The thread is not pre-tagged with K_FP_REGS, but it has
* generated an FP context. Activate lazy stacking and
* program a large MPU guard to detect privilege thread
* stack overflows.
*/
FPU->FPCCR |= FPU_FPCCR_LSPEN_Msk;
z_arm_thread_stack_info_adjust(thread, true);
/* Tag the thread with K_FP_REGS */
thread->base.user_options |= K_FP_REGS;
return MPU_GUARD_ALIGN_AND_SIZE_FLOAT;
}
/* Thread is not pre-tagged with K_FP_REGS, and it has
* not been using the FPU. Since there is no active FPU
* context, de-activate lazy stacking and program the
* default MPU guard size.
*/
FPU->FPCCR &= (~FPU_FPCCR_LSPEN_Msk);
z_arm_thread_stack_info_adjust(thread, false);
return MPU_GUARD_ALIGN_AND_SIZE;
}
#endif
#ifdef CONFIG_USERSPACE
FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
void *p1, void *p2, void *p3)
{
/* Set up privileged stack before entering user mode */
_current->arch.priv_stack_start =
(uint32_t)z_priv_stack_find(_current->stack_obj);
#if defined(CONFIG_MPU_STACK_GUARD)
#if defined(CONFIG_THREAD_STACK_INFO)
/* We're dropping to user mode which means the guard area is no
* longer used here, it instead is moved to the privilege stack
* to catch stack overflows there. Un-do the calculations done
* which accounted for memory borrowed from the thread stack.
*/
#if FP_GUARD_EXTRA_SIZE > 0
if ((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) {
_current->stack_info.start -= FP_GUARD_EXTRA_SIZE;
_current->stack_info.size += FP_GUARD_EXTRA_SIZE;
}
#endif /* FP_GUARD_EXTRA_SIZE */
_current->stack_info.start -= MPU_GUARD_ALIGN_AND_SIZE;
_current->stack_info.size += MPU_GUARD_ALIGN_AND_SIZE;
#endif /* CONFIG_THREAD_STACK_INFO */
/* Stack guard area reserved at the bottom of the thread's
* privileged stack. Adjust the available (writable) stack
* buffer area accordingly.
*/
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
_current->arch.priv_stack_start +=
((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE;
#else
_current->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE;
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
#endif /* CONFIG_MPU_STACK_GUARD */
z_arm_userspace_enter(user_entry, p1, p2, p3,
(uint32_t)_current->stack_info.start,
_current->stack_info.size -
_current->stack_info.delta);
CODE_UNREACHABLE;
}
bool z_arm_thread_is_in_user_mode(void)
{
uint32_t value;
/* return mode information */
value = __get_CONTROL();
return (value & CONTROL_nPRIV_Msk) != 0;
}
EXPORT_SYMBOL(z_arm_thread_is_in_user_mode);
#endif
#if defined(CONFIG_BUILTIN_STACK_GUARD)
/*
* @brief Configure ARM built-in stack guard
*
* This function configures per thread stack guards by reprogramming
* the built-in Process Stack Pointer Limit Register (PSPLIM).
* The functionality is meant to be used during context switch.
*
* @param thread thread info data structure.
*/
void configure_builtin_stack_guard(struct k_thread *thread)
{
#if defined(CONFIG_USERSPACE)
if ((thread->arch.mode & CONTROL_nPRIV_Msk) != 0) {
/* Only configure stack limit for threads in privileged mode
* (i.e supervisor threads or user threads doing system call).
* User threads executing in user mode do not require a stack
* limit protection.
*/
__set_PSPLIM(0);
return;
}
/* Only configure PSPLIM to guard the privileged stack area, if
* the thread is currently using it, otherwise guard the default
* thread stack. Note that the conditional check relies on the
* thread privileged stack being allocated in higher memory area
* than the default thread stack (ensured by design).
*/
uint32_t guard_start =
((thread->arch.priv_stack_start) &&
(__get_PSP() >= thread->arch.priv_stack_start)) ?
(uint32_t)thread->arch.priv_stack_start :
(uint32_t)thread->stack_obj;
__ASSERT(thread->stack_info.start == ((uint32_t)thread->stack_obj),
"stack_info.start does not point to the start of the"
"thread allocated area.");
#else
uint32_t guard_start = thread->stack_info.start;
#endif
#if defined(CONFIG_CPU_CORTEX_M_HAS_SPLIM)
__set_PSPLIM(guard_start);
#else
#error "Built-in PSP limit checks not supported by HW"
#endif
}
#endif /* CONFIG_BUILTIN_STACK_GUARD */
#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
#define IS_MPU_GUARD_VIOLATION(guard_start, guard_len, fault_addr, stack_ptr) \
((fault_addr != -EINVAL) ? \
((fault_addr >= guard_start) && \
(fault_addr < (guard_start + guard_len)) && \
(stack_ptr < (guard_start + guard_len))) \
: \
(stack_ptr < (guard_start + guard_len)))
/**
* @brief Assess occurrence of current thread's stack corruption
*
* This function performs an assessment whether a memory fault (on a
* given memory address) is the result of stack memory corruption of
* the current thread.
*
* Thread stack corruption for supervisor threads or user threads in
* privilege mode (when User Space is supported) is reported upon an
* attempt to access the stack guard area (if MPU Stack Guard feature
* is supported). Additionally the current PSP (process stack pointer)
* must be pointing inside or below the guard area.
*
* Thread stack corruption for user threads in user mode is reported,
* if the current PSP is pointing below the start of the current
* thread's stack.
*
* Notes:
* - we assume a fully descending stack,
* - we assume a stacking error has occurred,
* - the function shall be called when handling MemManage and Bus fault,
* and only if a Stacking error has been reported.
*
* If stack corruption is detected, the function returns the lowest
* allowed address where the Stack Pointer can safely point to, to
* prevent from errors when un-stacking the corrupted stack frame
* upon exception return.
*
* @param fault_addr memory address on which memory access violation
* has been reported. It can be invalid (-EINVAL),
* if only Stacking error has been reported.
* @param psp current address the PSP points to
*
* @return The lowest allowed stack frame pointer, if error is a
* thread stack corruption, otherwise return 0.
*/
uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp)
{
#if defined(CONFIG_MULTITHREADING)
const struct k_thread *thread = _current;
if (thread == NULL) {
return 0;
}
#endif
#if (defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)) && \
defined(CONFIG_MPU_STACK_GUARD)
uint32_t guard_len =
((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE;
#else
/* If MPU_STACK_GUARD is not enabled, the guard length is
* effectively zero. Stack overflows may be detected only
* for user threads in nPRIV mode.
*/
uint32_t guard_len = MPU_GUARD_ALIGN_AND_SIZE;
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
#if defined(CONFIG_USERSPACE)
if (thread->arch.priv_stack_start) {
/* User thread */
if (z_arm_thread_is_in_user_mode() == false) {
/* User thread in privilege mode */
if (IS_MPU_GUARD_VIOLATION(
thread->arch.priv_stack_start - guard_len,
guard_len,
fault_addr, psp)) {
/* Thread's privilege stack corruption */
return thread->arch.priv_stack_start;
}
} else {
if (psp < (uint32_t)thread->stack_obj) {
/* Thread's user stack corruption */
return (uint32_t)thread->stack_obj;
}
}
} else {
/* Supervisor thread */
if (IS_MPU_GUARD_VIOLATION(thread->stack_info.start -
guard_len,
guard_len,
fault_addr, psp)) {
/* Supervisor thread stack corruption */
return thread->stack_info.start;
}
}
#else /* CONFIG_USERSPACE */
#if defined(CONFIG_MULTITHREADING)
if (IS_MPU_GUARD_VIOLATION(thread->stack_info.start - guard_len,
guard_len,
fault_addr, psp)) {
/* Thread stack corruption */
return thread->stack_info.start;
}
#else
if (IS_MPU_GUARD_VIOLATION((uint32_t)z_main_stack,
guard_len,
fault_addr, psp)) {
/* Thread stack corruption */
return (uint32_t)K_THREAD_STACK_BUFFER(z_main_stack);
}
#endif
#endif /* CONFIG_USERSPACE */
return 0;
}
#endif /* CONFIG_MPU_STACK_GUARD || CONFIG_USERSPACE */
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
int arch_float_disable(struct k_thread *thread)
{
if (thread != _current) {
return -EINVAL;
}
if (arch_is_in_isr()) {
return -EINVAL;
}
/* Disable all floating point capabilities for the thread */
/* K_FP_REG flag is used in SWAP and stack check fail. Locking
* interrupts here prevents a possible context-switch or MPU
* fault to take an outdated thread user_options flag into
* account.
*/
int key = arch_irq_lock();
thread->base.user_options &= ~K_FP_REGS;
__set_CONTROL(__get_CONTROL() & (~CONTROL_FPCA_Msk));
/* No need to add an ISB barrier after setting the CONTROL
* register; arch_irq_unlock() already adds one.
*/
arch_irq_unlock(key);
return 0;
}
int arch_float_enable(struct k_thread *thread, unsigned int options)
{
/* This is not supported in Cortex-M */
return -ENOTSUP;
}
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
/* Internal function for Cortex-M initialization,
* applicable to either case of running Zephyr
* with or without multi-threading support.
*/
static void z_arm_prepare_switch_to_main(void)
{
#if defined(CONFIG_FPU)
/* Initialize the Floating Point Status and Control Register when in
* Unshared FP Registers mode (In Shared FP Registers mode, FPSCR is
* initialized at thread creation for threads that make use of the FP).
*/
#if defined(CONFIG_ARMV8_1_M_MAINLINE)
/*
* For ARMv8.1-M with FPU, the FPSCR[18:16] LTPSIZE field must be set
* to 0b100 for "Tail predication not applied" as it's reset value
*/
__set_FPSCR(4 << FPU_FPDSCR_LTPSIZE_Pos);
#else
__set_FPSCR(0);
#endif
#if defined(CONFIG_FPU_SHARING)
/* In Sharing mode clearing FPSCR may set the CONTROL.FPCA flag. */
__set_CONTROL(__get_CONTROL() & (~(CONTROL_FPCA_Msk)));
barrier_isync_fence_full();
#endif /* CONFIG_FPU_SHARING */
#endif /* CONFIG_FPU */
}
void arch_switch_to_main_thread(struct k_thread *main_thread, char *stack_ptr,
k_thread_entry_t _main)
{
z_arm_prepare_switch_to_main();
_current = main_thread;
#if defined(CONFIG_THREAD_LOCAL_STORAGE)
/* On Cortex-M, TLS uses a global variable as pointer to
* the thread local storage area. So this needs to point
* to the main thread's TLS area before switching to any
* thread for the first time, as the pointer is only set
* during context switching.
*/
extern uintptr_t z_arm_tls_ptr;
z_arm_tls_ptr = main_thread->tls;
#endif
#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
z_thread_mark_switched_in();
#endif
/* the ready queue cache already contains the main thread */
#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
/*
* If stack protection is enabled, make sure to set it
* before jumping to thread entry function
*/
z_arm_configure_dynamic_mpu_regions(main_thread);
#endif
#if defined(CONFIG_BUILTIN_STACK_GUARD)
/* Set PSPLIM register for built-in stack guarding of main thread. */
#if defined(CONFIG_CPU_CORTEX_M_HAS_SPLIM)
__set_PSPLIM(main_thread->stack_info.start);
#else
#error "Built-in PSP limit checks not supported by the hardware."
#endif
#endif /* CONFIG_BUILTIN_STACK_GUARD */
/*
* Set PSP to the highest address of the main stack
* before enabling interrupts and jumping to main.
*
* The compiler may store _main on the stack, but this
* location is relative to `PSP`.
* This assembly block ensures that _main is stored in
* a callee saved register before switching stack and continuing
* with the thread entry process.
*
* When calling arch_irq_unlock_outlined, LR is lost which is fine since
* we do not intend to return after calling z_thread_entry.
*/
__asm__ volatile (
"mov r4, %0\n" /* force _main to be stored in a register */
"msr PSP, %1\n" /* __set_PSP(stack_ptr) */
"mov r0, #0\n" /* arch_irq_unlock(0) */
"ldr r3, =arch_irq_unlock_outlined\n"
"blx r3\n"
"mov r0, r4\n" /* z_thread_entry(_main, NULL, NULL, NULL) */
"mov r1, #0\n"
"mov r2, #0\n"
"mov r3, #0\n"
"ldr r4, =z_thread_entry\n"
"bx r4\n" /* We dont intend to return, so there is no need to link. */
: "+r" (_main)
: "r" (stack_ptr)
: "r0", "r1", "r2", "r3", "r4", "ip", "lr");
CODE_UNREACHABLE;
}
__used void arch_irq_unlock_outlined(unsigned int key)
{
#if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
__enable_fault_irq(); /* alters FAULTMASK */
__enable_irq(); /* alters PRIMASK */
#endif
arch_irq_unlock(key);
}
__used unsigned int arch_irq_lock_outlined(void)
{
return arch_irq_lock();
}
#if !defined(CONFIG_MULTITHREADING)
FUNC_NORETURN void z_arm_switch_to_main_no_multithreading(
k_thread_entry_t main_entry, void *p1, void *p2, void *p3)
{
z_arm_prepare_switch_to_main();
/* Set PSP to the highest address of the main stack. */
char *psp = K_THREAD_STACK_BUFFER(z_main_stack) +
K_THREAD_STACK_SIZEOF(z_main_stack);
#if defined(CONFIG_BUILTIN_STACK_GUARD)
char *psplim = (K_THREAD_STACK_BUFFER(z_main_stack));
/* Clear PSPLIM before setting it to guard the main stack area. */
__set_PSPLIM(0);
#endif
/* Store all required input in registers, to be accessible
* after stack pointer change. The function is not going
* to return, so callee-saved registers do not need to be
* stacked.
*
* The compiler may store _main on the stack, but this
* location is relative to `PSP`.
* This assembly block ensures that _main is stored in
* a callee saved register before switching stack and continuing
* with the thread entry process.
*/
__asm__ volatile (
#ifdef CONFIG_BUILTIN_STACK_GUARD
"msr PSPLIM, %[_psplim]\n" /* __set_PSPLIM(_psplim) */
#endif
"msr PSP, %[_psp]\n" /* __set_PSP(psp) */
"mov r0, #0\n"
"ldr r1, =arch_irq_unlock_outlined\n"
"blx r1\n"
"mov r0, %[_p1]\n"
"mov r1, %[_p2]\n"
"mov r2, %[_p3]\n"
"blx %[_main_entry]\n" /* main_entry(p1, p2, p3) */
"ldr r0, =arch_irq_lock_outlined\n"
"blx r0\n"
"loop: b loop\n\t" /* while (true); */
:
: [_p1]"r" (p1), [_p2]"r" (p2), [_p3]"r" (p3),
[_psp]"r" (psp), [_main_entry]"r" (main_entry)
#ifdef CONFIG_BUILTIN_STACK_GUARD
, [_psplim]"r" (psplim)
#endif
: "r0", "r1", "r2", "ip", "lr"
);
CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
}
#endif /* !CONFIG_MULTITHREADING */
``` | /content/code_sandbox/arch/arm/core/cortex_m/thread.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 5,529 |
```c
/*
*
*/
#include <zephyr/arch/cpu.h>
#include <zephyr/linker/sections.h>
#include <zephyr/arch/common/pm_s2ram.h>
#define MAGIC (0xDABBAD00)
/**
* CPU context for S2RAM
*/
__noinit _cpu_context_t _cpu_context;
#ifndef CONFIG_PM_S2RAM_CUSTOM_MARKING
/**
* S2RAM Marker
*/
static __noinit uint32_t marker;
void pm_s2ram_mark_set(void)
{
marker = MAGIC;
}
bool pm_s2ram_mark_check_and_clear(void)
{
if (marker == MAGIC) {
marker = 0;
return true;
}
return false;
}
#endif /* CONFIG_PM_S2RAM_CUSTOM_MARKING */
``` | /content/code_sandbox/arch/arm/core/cortex_m/pm_s2ram.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 159 |
```c
/*
*
*/
#include <string.h>
#include <zephyr/debug/coredump.h>
#define ARCH_HDR_VER 2
uint32_t z_arm_coredump_fault_sp;
struct arm_arch_block {
struct {
uint32_t r0;
uint32_t r1;
uint32_t r2;
uint32_t r3;
uint32_t r12;
uint32_t lr;
uint32_t pc;
uint32_t xpsr;
uint32_t sp;
/* callee registers - optionally collected in V2 */
uint32_t r4;
uint32_t r5;
uint32_t r6;
uint32_t r7;
uint32_t r8;
uint32_t r9;
uint32_t r10;
uint32_t r11;
} r;
} __packed;
/*
* This might be too large for stack space if defined
* inside function. So do it here.
*/
static struct arm_arch_block arch_blk;
void arch_coredump_info_dump(const struct arch_esf *esf)
{
struct coredump_arch_hdr_t hdr = {
.id = COREDUMP_ARCH_HDR_ID,
.hdr_version = ARCH_HDR_VER,
.num_bytes = sizeof(arch_blk),
};
/* Nothing to process */
if (esf == NULL) {
return;
}
(void)memset(&arch_blk, 0, sizeof(arch_blk));
/*
* 17 registers expected by GDB.
* Not all are in ESF but the GDB stub
* will need to send all 17 as one packet.
* The stub will need to send undefined
* for registers not presented in coredump.
*/
arch_blk.r.r0 = esf->basic.r0;
arch_blk.r.r1 = esf->basic.r1;
arch_blk.r.r2 = esf->basic.r2;
arch_blk.r.r3 = esf->basic.r3;
arch_blk.r.r12 = esf->basic.ip;
arch_blk.r.lr = esf->basic.lr;
arch_blk.r.pc = esf->basic.pc;
arch_blk.r.xpsr = esf->basic.xpsr;
arch_blk.r.sp = z_arm_coredump_fault_sp;
#if defined(CONFIG_EXTRA_EXCEPTION_INFO)
if (esf->extra_info.callee) {
arch_blk.r.r4 = esf->extra_info.callee->v1;
arch_blk.r.r5 = esf->extra_info.callee->v2;
arch_blk.r.r6 = esf->extra_info.callee->v3;
arch_blk.r.r7 = esf->extra_info.callee->v4;
arch_blk.r.r8 = esf->extra_info.callee->v5;
arch_blk.r.r9 = esf->extra_info.callee->v6;
arch_blk.r.r10 = esf->extra_info.callee->v7;
arch_blk.r.r11 = esf->extra_info.callee->v8;
}
#endif
/* Send for output */
coredump_buffer_output((uint8_t *)&hdr, sizeof(hdr));
coredump_buffer_output((uint8_t *)&arch_blk, sizeof(arch_blk));
}
uint16_t arch_coredump_tgt_code_get(void)
{
return COREDUMP_TGT_ARM_CORTEX_M;
}
``` | /content/code_sandbox/arch/arm/core/cortex_m/coredump.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 739 |
```c
/*
*
*/
/**
* @file
* @brief ARM Cortex-M k_thread_abort() routine
*
* The ARM Cortex-M architecture provides its own k_thread_abort() to deal
* with different CPU modes (handler vs thread) when a thread aborts. When its
* entry point returns or when it aborts itself, the CPU is in thread mode and
* must call z_swap() (which triggers a service call), but when in handler
* mode, the CPU must exit handler mode to cause the context switch, and thus
* must queue the PendSV exception.
*/
#include <zephyr/kernel.h>
#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
#include <ksched.h>
#include <kswap.h>
#include <zephyr/sys/__assert.h>
void z_impl_k_thread_abort(k_tid_t thread)
{
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, abort, thread);
if (_current == thread) {
if (arch_is_in_isr()) {
/* ARM is unlike most arches in that this is true
* even for non-peripheral interrupts, even though
* for these types of faults there is not an implicit
* reschedule on the way out. See #21923.
*
* We have to reschedule since the current thread
* should no longer run after we return, so
* Trigger PendSV, in case we are in one of the
* situations where the isr check is true but there
* is not an implicit scheduler invocation.
*/
SCB->ICSR |= SCB_ICSR_PENDSVSET_Msk;
/* Clear any system calls that may be pending
* as they have a higher priority than the PendSV
* handler and will check the stack of the thread
* being aborted.
*/
SCB->SHCSR &= ~SCB_SHCSR_SVCALLPENDED_Msk;
}
}
z_thread_abort(thread);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, abort, thread);
}
``` | /content/code_sandbox/arch/arm/core/cortex_m/thread_abort.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 438 |
```linker script
/*
*
*/
#if defined(CONFIG_CPU_CORTEX_M_HAS_VTOR)
/*
* In an MCU with VTOR, the VTOR.TBLOFF is set to the start address of the
* vector_relay_table, when building with support for interrupt relaying.
* Therefore, vector_relay_table must respect the alignment requirements
* of VTOR.TBLOFF described below.
*/
/* VTOR bits 0:6 are reserved (RES0). This requires that the base address
* of the vector table is 32-word aligned.
*/
. = ALIGN( 1 << LOG2CEIL(4 * 32) );
/* When setting TBLOFF in VTOR we must align the offset to the number of
* exception entries in the vector table. The minimum alignment of 32 words
* is sufficient for the 16 ARM Core exceptions and up to 16 HW interrupts.
* For more than 16 HW interrupts, we adjust the alignment by rounding up
* to the next power of two; this restriction guarantees a functional VTOR
* setting in any Cortex-M implementation (might not be required in every
* Cortex-M processor).
*/
. = ALIGN( 1 << LOG2CEIL(4 * (16 + CONFIG_NUM_IRQS)) );
#endif
KEEP(*(.vector_relay_table))
KEEP(*(".vector_relay_table.*"))
#include "vector_table_pad.ld"
``` | /content/code_sandbox/arch/arm/core/cortex_m/relay_vector_table.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 287 |
```linker script
/*
*/
/* Copied from linker.ld */
/* Reserved 4 bytes to save vector table base address */
SECTION_PROLOGUE(.vt_pointer,(NOLOAD),)
{
*(.vt_pointer_section)
*(".vt_pointer_section.*")
} GROUP_LINK_IN(RAMABLE_REGION)
``` | /content/code_sandbox/arch/arm/core/cortex_m/vt_pointer_section.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 62 |
```unknown
/*
*
*/
/**
* @file irq_relay.S
*
* @brief IRQ relay vector table and relay handler for Cortex-M0 or
* Armv8-M baseline SoCs
*
* In certain ARMv6-M and Armv8-M baseline cores the vector table address can
* not be changed. Once the * vector table is occupied by bootloader, there
* will be no IRQ support in the chainloaded image.
*
* This program will link into bootloader, once an interrupt is coming,
* the bootloader can forward the interrupt to the chainloaded image. This
* will support DFU on those cores.
*
* Note: Currently support mcuboot only.
* */
#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
_ASM_FILE_PROLOGUE
GDATA(_vector_table_pointer)
GDATA(z_main_stack)
SECTION_FUNC(TEXT, __vector_relay_handler)
mrs r0, ipsr;
lsls r0, r0, #0x02;
ldr r1, =_vector_table_pointer;
ldr r1, [r1];
adds r1, r1, r0;
ldr r1, [r1];
/**
* The size of IRQ vector is 4 bytes, the offset within vector table
* is the IRQ number times 4 (aka r0 << 2). As know as the r1 stored
* the offset of real vector table, thus the (r1 = r1 + r0 << 2) will
* be the real irq handle vector.
* */
bx r1;
GTEXT(__vector_relay_handler)
SECTION_FUNC(vector_relay_table, __vector_relay_table)
.word z_main_stack + CONFIG_MAIN_STACK_SIZE
.word z_arm_reset
.word __vector_relay_handler /* nmi */
.word __vector_relay_handler /* hard fault */
.word __vector_relay_handler
.word __vector_relay_handler
.word __vector_relay_handler
.word __vector_relay_handler
.word __vector_relay_handler
.word __vector_relay_handler
.word __vector_relay_handler
.word __vector_relay_handler /* svc */
.word __vector_relay_handler
.word __vector_relay_handler
.word __vector_relay_handler /* pendsv */
.word __vector_relay_handler
/* End of system exception */
.rept CONFIG_NUM_IRQS
.word __vector_relay_handler
.endr
GDATA(__vector_relay_table)
``` | /content/code_sandbox/arch/arm/core/cortex_m/irq_relay.S | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 542 |
```c
/*
*
*/
/**
* @file
* @brief Common fault handler for ARM Cortex-M
*
* Common fault handler for ARM Cortex-M processors.
*/
#include <zephyr/kernel.h>
#include <kernel_internal.h>
#include <inttypes.h>
#include <zephyr/arch/common/exc_handle.h>
#include <zephyr/linker/linker-defs.h>
#include <zephyr/logging/log.h>
#include <zephyr/sys/barrier.h>
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
#if defined(CONFIG_PRINTK) || defined(CONFIG_LOG)
#define PR_EXC(...) LOG_ERR(__VA_ARGS__)
#define STORE_xFAR(reg_var, reg) uint32_t reg_var = (uint32_t)reg
#else
#define PR_EXC(...)
#define STORE_xFAR(reg_var, reg)
#endif /* CONFIG_PRINTK || CONFIG_LOG */
#if (CONFIG_FAULT_DUMP == 2)
#define PR_FAULT_INFO(...) PR_EXC(__VA_ARGS__)
#else
#define PR_FAULT_INFO(...)
#endif
#if defined(CONFIG_ARM_MPU) && defined(CONFIG_CPU_HAS_NXP_MPU)
#define EMN(edr) (((edr) & SYSMPU_EDR_EMN_MASK) >> SYSMPU_EDR_EMN_SHIFT)
#define EACD(edr) (((edr) & SYSMPU_EDR_EACD_MASK) >> SYSMPU_EDR_EACD_SHIFT)
#endif
/* Exception Return (EXC_RETURN) is provided in LR upon exception entry.
* It is used to perform an exception return and to detect possible state
* transition upon exception.
*/
/* Prefix. Indicates that this is an EXC_RETURN value.
* This field reads as 0b11111111.
*/
#define EXC_RETURN_INDICATOR_PREFIX (0xFF << 24)
/* bit[0]: Exception Secure. The security domain the exception was taken to. */
#define EXC_RETURN_EXCEPTION_SECURE_Pos 0
#define EXC_RETURN_EXCEPTION_SECURE_Msk \
BIT(EXC_RETURN_EXCEPTION_SECURE_Pos)
#define EXC_RETURN_EXCEPTION_SECURE_Non_Secure 0
#define EXC_RETURN_EXCEPTION_SECURE_Secure EXC_RETURN_EXCEPTION_SECURE_Msk
/* bit[2]: Stack Pointer selection. */
#define EXC_RETURN_SPSEL_Pos 2
#define EXC_RETURN_SPSEL_Msk BIT(EXC_RETURN_SPSEL_Pos)
#define EXC_RETURN_SPSEL_MAIN 0
#define EXC_RETURN_SPSEL_PROCESS EXC_RETURN_SPSEL_Msk
/* bit[3]: Mode. Indicates the Mode that was stacked from. */
#define EXC_RETURN_MODE_Pos 3
#define EXC_RETURN_MODE_Msk BIT(EXC_RETURN_MODE_Pos)
#define EXC_RETURN_MODE_HANDLER 0
#define EXC_RETURN_MODE_THREAD EXC_RETURN_MODE_Msk
/* bit[4]: Stack frame type. Indicates whether the stack frame is a standard
* integer only stack frame or an extended floating-point stack frame.
*/
#define EXC_RETURN_STACK_FRAME_TYPE_Pos 4
#define EXC_RETURN_STACK_FRAME_TYPE_Msk BIT(EXC_RETURN_STACK_FRAME_TYPE_Pos)
#define EXC_RETURN_STACK_FRAME_TYPE_EXTENDED 0
#define EXC_RETURN_STACK_FRAME_TYPE_STANDARD EXC_RETURN_STACK_FRAME_TYPE_Msk
/* bit[5]: Default callee register stacking. Indicates whether the default
* stacking rules apply, or whether the callee registers are already on the
* stack.
*/
#define EXC_RETURN_CALLEE_STACK_Pos 5
#define EXC_RETURN_CALLEE_STACK_Msk BIT(EXC_RETURN_CALLEE_STACK_Pos)
#define EXC_RETURN_CALLEE_STACK_SKIPPED 0
#define EXC_RETURN_CALLEE_STACK_DEFAULT EXC_RETURN_CALLEE_STACK_Msk
/* bit[6]: Secure or Non-secure stack. Indicates whether a Secure or
* Non-secure stack is used to restore stack frame on exception return.
*/
#define EXC_RETURN_RETURN_STACK_Pos 6
#define EXC_RETURN_RETURN_STACK_Msk BIT(EXC_RETURN_RETURN_STACK_Pos)
#define EXC_RETURN_RETURN_STACK_Non_Secure 0
#define EXC_RETURN_RETURN_STACK_Secure EXC_RETURN_RETURN_STACK_Msk
/* Integrity signature for an ARMv8-M implementation */
#if defined(CONFIG_ARMV7_M_ARMV8_M_FP)
#define INTEGRITY_SIGNATURE_STD 0xFEFA125BUL
#define INTEGRITY_SIGNATURE_EXT 0xFEFA125AUL
#else
#define INTEGRITY_SIGNATURE 0xFEFA125BUL
#endif /* CONFIG_ARMV7_M_ARMV8_M_FP */
/* Size (in words) of the additional state context that is pushed
* to the Secure stack during a Non-Secure exception entry.
*/
#define ADDITIONAL_STATE_CONTEXT_WORDS 10
#if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
/* helpers to access memory/bus/usage faults */
#define SCB_CFSR_MEMFAULTSR \
(uint32_t)((SCB->CFSR & SCB_CFSR_MEMFAULTSR_Msk) \
>> SCB_CFSR_MEMFAULTSR_Pos)
#define SCB_CFSR_BUSFAULTSR \
(uint32_t)((SCB->CFSR & SCB_CFSR_BUSFAULTSR_Msk) \
>> SCB_CFSR_BUSFAULTSR_Pos)
#define SCB_CFSR_USGFAULTSR \
(uint32_t)((SCB->CFSR & SCB_CFSR_USGFAULTSR_Msk) \
>> SCB_CFSR_USGFAULTSR_Pos)
#endif /* CONFIG_ARMV7_M_ARMV8_M_MAINLINE */
/**
*
* Dump information regarding fault (FAULT_DUMP == 1)
*
* Dump information regarding the fault when CONFIG_FAULT_DUMP is set to 1
* (short form).
*
* eg. (precise bus error escalated to hard fault):
*
* Fault! EXC #3
* HARD FAULT: Escalation (see below)!
* MMFSR: 0x00000000, BFSR: 0x00000082, UFSR: 0x00000000
* BFAR: 0xff001234
*
*
*
* Dump information regarding fault (FAULT_DUMP == 2)
*
* Dump information regarding the fault when CONFIG_FAULT_DUMP is set to 2
* (long form), and return the error code for the kernel to identify the fatal
* error reason.
*
* eg. (precise bus error escalated to hard fault):
*
* ***** HARD FAULT *****
* Fault escalation (see below)
* ***** BUS FAULT *****
* Precise data bus error
* Address: 0xff001234
*
*/
#if (CONFIG_FAULT_DUMP == 1)
static void fault_show(const struct arch_esf *esf, int fault)
{
PR_EXC("Fault! EXC #%d", fault);
#if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
PR_EXC("MMFSR: 0x%x, BFSR: 0x%x, UFSR: 0x%x", SCB_CFSR_MEMFAULTSR,
SCB_CFSR_BUSFAULTSR, SCB_CFSR_USGFAULTSR);
#if defined(CONFIG_ARM_SECURE_FIRMWARE)
PR_EXC("SFSR: 0x%x", SAU->SFSR);
#endif /* CONFIG_ARM_SECURE_FIRMWARE */
#endif /* CONFIG_ARMV7_M_ARMV8_M_MAINLINE */
}
#else
/* For Dump level 2, detailed information is generated by the
* fault handling functions for individual fault conditions, so this
* function is left empty.
*
* For Dump level 0, no information needs to be generated.
*/
static void fault_show(const struct arch_esf *esf, int fault)
{
(void)esf;
(void)fault;
}
#endif /* FAULT_DUMP == 1 */
#ifdef CONFIG_USERSPACE
Z_EXC_DECLARE(z_arm_user_string_nlen);
static const struct z_exc_handle exceptions[] = {
Z_EXC_HANDLE(z_arm_user_string_nlen)
};
#endif
/* Perform an assessment whether an MPU fault shall be
* treated as recoverable.
*
* @return true if error is recoverable, otherwise return false.
*/
static bool memory_fault_recoverable(struct arch_esf *esf, bool synchronous)
{
#ifdef CONFIG_USERSPACE
for (int i = 0; i < ARRAY_SIZE(exceptions); i++) {
/* Mask out instruction mode */
uint32_t start = (uint32_t)exceptions[i].start & ~0x1U;
uint32_t end = (uint32_t)exceptions[i].end & ~0x1U;
#if defined(CONFIG_NULL_POINTER_EXCEPTION_DETECTION_DWT)
/* Non-synchronous exceptions (e.g. DebugMonitor) may have
* allowed PC to continue to the next instruction.
*/
end += (synchronous) ? 0x0 : 0x4;
#else
ARG_UNUSED(synchronous);
#endif
if (esf->basic.pc >= start && esf->basic.pc < end) {
esf->basic.pc = (uint32_t)(exceptions[i].fixup);
return true;
}
}
#endif
return false;
}
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
/* HardFault is used for all fault conditions on ARMv6-M. */
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
uint32_t z_check_thread_stack_fail(const uint32_t fault_addr,
const uint32_t psp);
#endif /* CONFIG_MPU_STACK_GUARD || defined(CONFIG_USERSPACE) */
/**
*
* @brief Dump MemManage fault information
*
* See z_arm_fault_dump() for example.
*
* @return error code to identify the fatal error reason
*/
static uint32_t mem_manage_fault(struct arch_esf *esf, int from_hard_fault,
bool *recoverable)
{
uint32_t reason = K_ERR_ARM_MEM_GENERIC;
uint32_t mmfar = -EINVAL;
PR_FAULT_INFO("***** MPU FAULT *****");
if ((SCB->CFSR & SCB_CFSR_MSTKERR_Msk) != 0) {
reason = K_ERR_ARM_MEM_STACKING;
PR_FAULT_INFO(" Stacking error (context area might be"
" not valid)");
}
if ((SCB->CFSR & SCB_CFSR_MUNSTKERR_Msk) != 0) {
reason = K_ERR_ARM_MEM_UNSTACKING;
PR_FAULT_INFO(" Unstacking error");
}
if ((SCB->CFSR & SCB_CFSR_DACCVIOL_Msk) != 0) {
reason = K_ERR_ARM_MEM_DATA_ACCESS;
PR_FAULT_INFO(" Data Access Violation");
/* In a fault handler, to determine the true faulting address:
* 1. Read and save the MMFAR value.
* 2. Read the MMARVALID bit in the MMFSR.
* The MMFAR address is valid only if this bit is 1.
*
* Software must follow this sequence because another higher
* priority exception might change the MMFAR value.
*/
uint32_t temp = SCB->MMFAR;
if ((SCB->CFSR & SCB_CFSR_MMARVALID_Msk) != 0) {
mmfar = temp;
PR_EXC(" MMFAR Address: 0x%x", mmfar);
if (from_hard_fault != 0) {
/* clear SCB_MMAR[VALID] to reset */
SCB->CFSR &= ~SCB_CFSR_MMARVALID_Msk;
}
}
}
if ((SCB->CFSR & SCB_CFSR_IACCVIOL_Msk) != 0) {
reason = K_ERR_ARM_MEM_INSTRUCTION_ACCESS;
PR_FAULT_INFO(" Instruction Access Violation");
}
#if defined(CONFIG_ARMV7_M_ARMV8_M_FP)
if ((SCB->CFSR & SCB_CFSR_MLSPERR_Msk) != 0) {
reason = K_ERR_ARM_MEM_FP_LAZY_STATE_PRESERVATION;
PR_FAULT_INFO(
" Floating-point lazy state preservation error");
}
#endif /* CONFIG_ARMV7_M_ARMV8_M_FP */
/* When stack protection is enabled, we need to assess
* if the memory violation error is a stack corruption.
*
* By design, being a Stacking MemManage fault is a necessary
* and sufficient condition for a thread stack corruption.
* [Cortex-M process stack pointer is always descending and
* is never modified by code (except for the context-switch
* routine), therefore, a stacking error implies the PSP has
* crossed into an area beyond the thread stack.]
*
* Data Access Violation errors may or may not be caused by
* thread stack overflows.
*/
if ((SCB->CFSR & SCB_CFSR_MSTKERR_Msk) ||
(SCB->CFSR & SCB_CFSR_DACCVIOL_Msk)) {
#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
/* MemManage Faults are always banked between security
* states. Therefore, we can safely assume the fault
* originated from the same security state.
*
* As we only assess thread stack corruption, we only
* process the error further if the stack frame is on
* PSP. For always-banked MemManage Fault, this is
* equivalent to inspecting the RETTOBASE flag.
*
* Note:
* It is possible that MMFAR address is not written by the
* Cortex-M core; this occurs when the stacking error is
* not accompanied by a data access violation error (i.e.
* when stack overflows due to the exception entry frame
* stacking): z_check_thread_stack_fail() shall be able to
* handle the case of 'mmfar' holding the -EINVAL value.
*/
if (SCB->ICSR & SCB_ICSR_RETTOBASE_Msk) {
uint32_t min_stack_ptr = z_check_thread_stack_fail(mmfar,
((uint32_t) &esf[0]));
if (min_stack_ptr) {
/* When MemManage Stacking Error has occurred,
* the stack context frame might be corrupted
* but the stack pointer may have actually
* descent below the allowed (thread) stack
* area. We may face a problem with un-stacking
* the frame, upon the exception return, if we
* do not have sufficient access permissions to
* read the corrupted stack frame. Therefore,
* we manually force the stack pointer to the
* lowest allowed position, inside the thread's
* stack.
*
* Note:
* The PSP will normally be adjusted in a tail-
* chained exception performing context switch,
* after aborting the corrupted thread. The
* adjustment, here, is required as tail-chain
* cannot always be guaranteed.
*
* The manual adjustment of PSP is safe, as we
* will not be re-scheduling this thread again
* for execution; thread stack corruption is a
* fatal error and a thread that corrupted its
* stack needs to be aborted.
*/
__set_PSP(min_stack_ptr);
reason = K_ERR_STACK_CHK_FAIL;
} else {
__ASSERT(!(SCB->CFSR & SCB_CFSR_MSTKERR_Msk),
"Stacking error not a stack fail\n");
}
}
#else
(void)mmfar;
__ASSERT(!(SCB->CFSR & SCB_CFSR_MSTKERR_Msk),
"Stacking or Data Access Violation error "
"without stack guard, user-mode or null-pointer detection\n");
#endif /* CONFIG_MPU_STACK_GUARD || CONFIG_USERSPACE */
}
/* When we were handling this fault, we may have triggered a fp
* lazy stacking Memory Manage fault. At the time of writing, this
* can happen when printing. If that's true, we should clear the
* pending flag in addition to the clearing the reason for the fault
*/
#if defined(CONFIG_ARMV7_M_ARMV8_M_FP)
if ((SCB->CFSR & SCB_CFSR_MLSPERR_Msk) != 0) {
SCB->SHCSR &= ~SCB_SHCSR_MEMFAULTPENDED_Msk;
}
#endif /* CONFIG_ARMV7_M_ARMV8_M_FP */
/* clear MMFSR sticky bits */
SCB->CFSR |= SCB_CFSR_MEMFAULTSR_Msk;
/* Assess whether system shall ignore/recover from this MPU fault. */
*recoverable = memory_fault_recoverable(esf, true);
return reason;
}
/**
*
* @brief Dump BusFault information
*
* See z_arm_fault_dump() for example.
*
* @return error code to identify the fatal error reason.
*
*/
static int bus_fault(struct arch_esf *esf, int from_hard_fault, bool *recoverable)
{
uint32_t reason = K_ERR_ARM_BUS_GENERIC;
PR_FAULT_INFO("***** BUS FAULT *****");
if (SCB->CFSR & SCB_CFSR_STKERR_Msk) {
reason = K_ERR_ARM_BUS_STACKING;
PR_FAULT_INFO(" Stacking error");
}
if (SCB->CFSR & SCB_CFSR_UNSTKERR_Msk) {
reason = K_ERR_ARM_BUS_UNSTACKING;
PR_FAULT_INFO(" Unstacking error");
}
if (SCB->CFSR & SCB_CFSR_PRECISERR_Msk) {
reason = K_ERR_ARM_BUS_PRECISE_DATA_BUS;
PR_FAULT_INFO(" Precise data bus error");
/* In a fault handler, to determine the true faulting address:
* 1. Read and save the BFAR value.
* 2. Read the BFARVALID bit in the BFSR.
* The BFAR address is valid only if this bit is 1.
*
* Software must follow this sequence because another
* higher priority exception might change the BFAR value.
*/
STORE_xFAR(bfar, SCB->BFAR);
if ((SCB->CFSR & SCB_CFSR_BFARVALID_Msk) != 0) {
PR_EXC(" BFAR Address: 0x%x", bfar);
if (from_hard_fault != 0) {
/* clear SCB_CFSR_BFAR[VALID] to reset */
SCB->CFSR &= ~SCB_CFSR_BFARVALID_Msk;
}
}
}
if (SCB->CFSR & SCB_CFSR_IMPRECISERR_Msk) {
reason = K_ERR_ARM_BUS_IMPRECISE_DATA_BUS;
PR_FAULT_INFO(" Imprecise data bus error");
}
if ((SCB->CFSR & SCB_CFSR_IBUSERR_Msk) != 0) {
reason = K_ERR_ARM_BUS_INSTRUCTION_BUS;
PR_FAULT_INFO(" Instruction bus error");
#if !defined(CONFIG_ARMV7_M_ARMV8_M_FP)
}
#else
} else if (SCB->CFSR & SCB_CFSR_LSPERR_Msk) {
reason = K_ERR_ARM_BUS_FP_LAZY_STATE_PRESERVATION;
PR_FAULT_INFO(" Floating-point lazy state preservation error");
} else {
;
}
#endif /* !defined(CONFIG_ARMV7_M_ARMV8_M_FP) */
#if defined(CONFIG_ARM_MPU) && defined(CONFIG_CPU_HAS_NXP_MPU)
uint32_t sperr = SYSMPU->CESR & SYSMPU_CESR_SPERR_MASK;
uint32_t mask = BIT(31);
int i;
uint32_t ear = -EINVAL;
if (sperr) {
for (i = 0; i < SYSMPU_EAR_COUNT; i++, mask >>= 1) {
if ((sperr & mask) == 0U) {
continue;
}
STORE_xFAR(edr, SYSMPU->SP[i].EDR);
ear = SYSMPU->SP[i].EAR;
PR_FAULT_INFO(" NXP MPU error, port %d", i);
PR_FAULT_INFO(" Mode: %s, %s Address: 0x%x",
edr & BIT(2) ? "Supervisor" : "User",
edr & BIT(1) ? "Data" : "Instruction",
ear);
PR_FAULT_INFO(
" Type: %s, Master: %d, Regions: 0x%x",
edr & BIT(0) ? "Write" : "Read",
EMN(edr), EACD(edr));
/* When stack protection is enabled, we need to assess
* if the memory violation error is a stack corruption.
*
* By design, being a Stacking Bus fault is a necessary
* and sufficient condition for a stack corruption.
*/
if (SCB->CFSR & SCB_CFSR_STKERR_Msk) {
#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
/* Note: we can assume the fault originated
* from the same security state for ARM
* platforms implementing the NXP MPU
* (CONFIG_CPU_HAS_NXP_MPU=y).
*
* As we only assess thread stack corruption,
* we only process the error further, if the
* stack frame is on PSP. For NXP MPU-related
* Bus Faults (banked), this is equivalent to
* inspecting the RETTOBASE flag.
*/
if (SCB->ICSR & SCB_ICSR_RETTOBASE_Msk) {
uint32_t min_stack_ptr =
z_check_thread_stack_fail(ear,
((uint32_t) &esf[0]));
if (min_stack_ptr) {
/* When BusFault Stacking Error
* has occurred, the stack
* context frame might be
* corrupted but the stack
* pointer may have actually
* moved. We may face problems
* with un-stacking the frame,
* upon exception return, if we
* do not have sufficient
* permissions to read the
* corrupted stack frame.
* Therefore, we manually force
* the stack pointer to the
* lowest allowed position.
*
* Note:
* The PSP will normally be
* adjusted in a tail-chained
* exception performing context
* switch, after aborting the
* corrupted thread. Here, the
* adjustment is required as
* tail-chain cannot always be
* guaranteed.
*/
__set_PSP(min_stack_ptr);
reason =
K_ERR_STACK_CHK_FAIL;
break;
}
}
#else
(void)ear;
__ASSERT(0,
"Stacking error without stack guard"
"or User-mode support");
#endif /* CONFIG_MPU_STACK_GUARD || CONFIG_USERSPACE */
}
}
SYSMPU->CESR &= ~sperr;
}
#endif /* defined(CONFIG_ARM_MPU) && defined(CONFIG_CPU_HAS_NXP_MPU) */
/* clear BFSR sticky bits */
SCB->CFSR |= SCB_CFSR_BUSFAULTSR_Msk;
*recoverable = memory_fault_recoverable(esf, true);
return reason;
}
/**
*
* @brief Dump UsageFault information
*
* See z_arm_fault_dump() for example.
*
* @return error code to identify the fatal error reason
*/
static uint32_t usage_fault(const struct arch_esf *esf)
{
uint32_t reason = K_ERR_ARM_USAGE_GENERIC;
PR_FAULT_INFO("***** USAGE FAULT *****");
/* bits are sticky: they stack and must be reset */
if ((SCB->CFSR & SCB_CFSR_DIVBYZERO_Msk) != 0) {
reason = K_ERR_ARM_USAGE_DIV_0;
PR_FAULT_INFO(" Division by zero");
}
if ((SCB->CFSR & SCB_CFSR_UNALIGNED_Msk) != 0) {
reason = K_ERR_ARM_USAGE_UNALIGNED_ACCESS;
PR_FAULT_INFO(" Unaligned memory access");
}
#if defined(CONFIG_ARMV8_M_MAINLINE)
if ((SCB->CFSR & SCB_CFSR_STKOF_Msk) != 0) {
reason = K_ERR_ARM_USAGE_STACK_OVERFLOW;
PR_FAULT_INFO(" Stack overflow (context area not valid)");
#if defined(CONFIG_BUILTIN_STACK_GUARD)
/* Stack Overflows are always reported as stack corruption
* errors. Note that the built-in stack overflow mechanism
* prevents the context area to be loaded on the stack upon
* UsageFault exception entry. As a result, we cannot rely
* on the reported faulty instruction address, to determine
* the instruction that triggered the stack overflow.
*/
reason = K_ERR_STACK_CHK_FAIL;
#endif /* CONFIG_BUILTIN_STACK_GUARD */
}
#endif /* CONFIG_ARMV8_M_MAINLINE */
if ((SCB->CFSR & SCB_CFSR_NOCP_Msk) != 0) {
reason = K_ERR_ARM_USAGE_NO_COPROCESSOR;
PR_FAULT_INFO(" No coprocessor instructions");
}
if ((SCB->CFSR & SCB_CFSR_INVPC_Msk) != 0) {
reason = K_ERR_ARM_USAGE_ILLEGAL_EXC_RETURN;
PR_FAULT_INFO(" Illegal load of EXC_RETURN into PC");
}
if ((SCB->CFSR & SCB_CFSR_INVSTATE_Msk) != 0) {
reason = K_ERR_ARM_USAGE_ILLEGAL_EPSR;
PR_FAULT_INFO(" Illegal use of the EPSR");
}
if ((SCB->CFSR & SCB_CFSR_UNDEFINSTR_Msk) != 0) {
reason = K_ERR_ARM_USAGE_UNDEFINED_INSTRUCTION;
PR_FAULT_INFO(" Attempt to execute undefined instruction");
}
/* clear UFSR sticky bits */
SCB->CFSR |= SCB_CFSR_USGFAULTSR_Msk;
return reason;
}
#if defined(CONFIG_ARM_SECURE_FIRMWARE)
/**
*
* @brief Dump SecureFault information
*
* See z_arm_fault_dump() for example.
*
* @return error code to identify the fatal error reason
*/
static uint32_t secure_fault(const struct arch_esf *esf)
{
uint32_t reason = K_ERR_ARM_SECURE_GENERIC;
PR_FAULT_INFO("***** SECURE FAULT *****");
STORE_xFAR(sfar, SAU->SFAR);
if ((SAU->SFSR & SAU_SFSR_SFARVALID_Msk) != 0) {
PR_EXC(" Address: 0x%x", sfar);
}
/* bits are sticky: they stack and must be reset */
if ((SAU->SFSR & SAU_SFSR_INVEP_Msk) != 0) {
reason = K_ERR_ARM_SECURE_ENTRY_POINT;
PR_FAULT_INFO(" Invalid entry point");
} else if ((SAU->SFSR & SAU_SFSR_INVIS_Msk) != 0) {
reason = K_ERR_ARM_SECURE_INTEGRITY_SIGNATURE;
PR_FAULT_INFO(" Invalid integrity signature");
} else if ((SAU->SFSR & SAU_SFSR_INVER_Msk) != 0) {
reason = K_ERR_ARM_SECURE_EXCEPTION_RETURN;
PR_FAULT_INFO(" Invalid exception return");
} else if ((SAU->SFSR & SAU_SFSR_AUVIOL_Msk) != 0) {
reason = K_ERR_ARM_SECURE_ATTRIBUTION_UNIT;
PR_FAULT_INFO(" Attribution unit violation");
} else if ((SAU->SFSR & SAU_SFSR_INVTRAN_Msk) != 0) {
reason = K_ERR_ARM_SECURE_TRANSITION;
PR_FAULT_INFO(" Invalid transition");
} else if ((SAU->SFSR & SAU_SFSR_LSPERR_Msk) != 0) {
reason = K_ERR_ARM_SECURE_LAZY_STATE_PRESERVATION;
PR_FAULT_INFO(" Lazy state preservation");
} else if ((SAU->SFSR & SAU_SFSR_LSERR_Msk) != 0) {
reason = K_ERR_ARM_SECURE_LAZY_STATE_ERROR;
PR_FAULT_INFO(" Lazy state error");
}
/* clear SFSR sticky bits */
SAU->SFSR |= 0xFF;
return reason;
}
#endif /* defined(CONFIG_ARM_SECURE_FIRMWARE) */
/**
*
* @brief Dump debug monitor exception information
*
* See z_arm_fault_dump() for example.
*
*/
static void debug_monitor(struct arch_esf *esf, bool *recoverable)
{
*recoverable = false;
PR_FAULT_INFO(
"***** Debug monitor exception *****");
#if defined(CONFIG_NULL_POINTER_EXCEPTION_DETECTION_DWT)
if (!z_arm_debug_monitor_event_error_check()) {
/* By default, all debug monitor exceptions that are not
* treated as errors by z_arm_debug_event_error_check(),
* they are considered as recoverable errors.
*/
*recoverable = true;
} else {
*recoverable = memory_fault_recoverable(esf, false);
}
#endif
}
#else
#error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
static inline bool z_arm_is_synchronous_svc(struct arch_esf *esf)
{
uint16_t *ret_addr = (uint16_t *)esf->basic.pc;
/* SVC is a 16-bit instruction. On a synchronous SVC
* escalated to Hard Fault, the return address is the
* next instruction, i.e. after the SVC.
*/
#define _SVC_OPCODE 0xDF00
/* We are about to de-reference the program counter at the
* time of fault to determine if it was a SVC
* instruction. However, we don't know if the pc itself is
* valid -- we could have faulted due to trying to execute a
* corrupted function pointer.
*
* We will temporarily ignore BusFault's so a bad program
* counter does not trigger ARM lockup condition.
*/
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) && !defined(CONFIG_ARMV8_M_BASELINE)
/* Note: ARMv6-M does not support CCR.BFHFNMIGN so this access
* could generate a fault if the pc was invalid.
*/
uint16_t fault_insn = *(ret_addr - 1);
#else
SCB->CCR |= SCB_CCR_BFHFNMIGN_Msk;
barrier_dsync_fence_full();
barrier_isync_fence_full();
uint16_t fault_insn = *(ret_addr - 1);
SCB->CCR &= ~SCB_CCR_BFHFNMIGN_Msk;
barrier_dsync_fence_full();
barrier_isync_fence_full();
#endif /* ARMV6_M_ARMV8_M_BASELINE && !ARMV8_M_BASELINE */
if (((fault_insn & 0xff00) == _SVC_OPCODE) &&
((fault_insn & 0x00ff) == _SVC_CALL_RUNTIME_EXCEPT)) {
return true;
}
#undef _SVC_OPCODE
return false;
}
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
static inline bool z_arm_is_pc_valid(uintptr_t pc)
{
/* Is it in valid text region */
if ((((uintptr_t)&__text_region_start) <= pc) && (pc < ((uintptr_t)&__text_region_end))) {
return true;
}
/* Is it in valid ramfunc range */
if ((((uintptr_t)&__ramfunc_start) <= pc) && (pc < ((uintptr_t)&__ramfunc_end))) {
return true;
}
#if DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_itcm), okay)
/* Is it in the ITCM */
if ((((uintptr_t)&__itcm_start) <= pc) && (pc < ((uintptr_t)&__itcm_end))) {
return true;
}
#endif
return false;
}
#endif
/**
*
* @brief Dump hard fault information
*
* See z_arm_fault_dump() for example.
*
* @return error code to identify the fatal error reason
*/
static uint32_t hard_fault(struct arch_esf *esf, bool *recoverable)
{
uint32_t reason = K_ERR_CPU_EXCEPTION;
PR_FAULT_INFO("***** HARD FAULT *****");
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
/* Workaround for #18712:
* HardFault may be due to escalation, as a result of
* an SVC instruction that could not be executed; this
* can occur if ARCH_EXCEPT() is called by an ISR,
* which executes at priority equal to the SVC handler
* priority. We handle the case of Kernel OOPS and Stack
* Fail here.
*/
if (z_arm_is_pc_valid((uintptr_t)esf->basic.pc) && z_arm_is_synchronous_svc(esf)) {
PR_EXC("ARCH_EXCEPT with reason %x\n", esf->basic.r0);
reason = esf->basic.r0;
}
*recoverable = memory_fault_recoverable(esf, true);
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
*recoverable = false;
if ((SCB->HFSR & SCB_HFSR_VECTTBL_Msk) != 0) {
PR_EXC(" Bus fault on vector table read");
} else if ((SCB->HFSR & SCB_HFSR_DEBUGEVT_Msk) != 0) {
PR_EXC(" Debug event");
} else if ((SCB->HFSR & SCB_HFSR_FORCED_Msk) != 0) {
PR_EXC(" Fault escalation (see below)");
if (z_arm_is_synchronous_svc(esf)) {
PR_EXC("ARCH_EXCEPT with reason %x\n", esf->basic.r0);
reason = esf->basic.r0;
} else if ((SCB->CFSR & SCB_CFSR_MEMFAULTSR_Msk) != 0) {
reason = mem_manage_fault(esf, 1, recoverable);
} else if ((SCB->CFSR & SCB_CFSR_BUSFAULTSR_Msk) != 0) {
reason = bus_fault(esf, 1, recoverable);
} else if ((SCB->CFSR & SCB_CFSR_USGFAULTSR_Msk) != 0) {
reason = usage_fault(esf);
#if defined(CONFIG_ARM_SECURE_FIRMWARE)
} else if (SAU->SFSR != 0) {
reason = secure_fault(esf);
#endif /* CONFIG_ARM_SECURE_FIRMWARE */
} else {
__ASSERT(0,
"Fault escalation without FSR info");
}
} else {
__ASSERT(0,
"HardFault without HFSR info"
" Shall never occur");
}
#else
#error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
return reason;
}
/**
*
* @brief Dump reserved exception information
*
* See z_arm_fault_dump() for example.
*
*/
static void reserved_exception(const struct arch_esf *esf, int fault)
{
ARG_UNUSED(esf);
PR_FAULT_INFO("***** %s %d) *****",
fault < 16 ? "Reserved Exception (" : "Spurious interrupt (IRQ ",
fault - 16);
}
/* Handler function for ARM fault conditions. */
static uint32_t fault_handle(struct arch_esf *esf, int fault, bool *recoverable)
{
uint32_t reason = K_ERR_CPU_EXCEPTION;
*recoverable = false;
switch (fault) {
case 3:
reason = hard_fault(esf, recoverable);
break;
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
/* HardFault is raised for all fault conditions on ARMv6-M. */
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
case 4:
reason = mem_manage_fault(esf, 0, recoverable);
break;
case 5:
reason = bus_fault(esf, 0, recoverable);
break;
case 6:
reason = usage_fault(esf);
break;
#if defined(CONFIG_ARM_SECURE_FIRMWARE)
case 7:
reason = secure_fault(esf);
break;
#endif /* CONFIG_ARM_SECURE_FIRMWARE */
case 12:
debug_monitor(esf, recoverable);
break;
#else
#error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
default:
reserved_exception(esf, fault);
break;
}
if ((*recoverable) == false) {
/* Dump generic information about the fault. */
fault_show(esf, fault);
}
return reason;
}
#if defined(CONFIG_ARM_SECURE_FIRMWARE)
#if (CONFIG_FAULT_DUMP == 2)
/**
* @brief Dump the Secure Stack information for an exception that
* has occurred in Non-Secure state.
*
* @param secure_esf Pointer to the secure stack frame.
*/
static void secure_stack_dump(const struct arch_esf *secure_esf)
{
/*
* In case a Non-Secure exception interrupted the Secure
* execution, the Secure state has stacked the additional
* state context and the top of the stack contains the
* integrity signature.
*
* In case of a Non-Secure function call the top of the
* stack contains the return address to Secure state.
*/
uint32_t *top_of_sec_stack = (uint32_t *)secure_esf;
uint32_t sec_ret_addr;
#if defined(CONFIG_ARMV7_M_ARMV8_M_FP)
if ((*top_of_sec_stack == INTEGRITY_SIGNATURE_STD) ||
(*top_of_sec_stack == INTEGRITY_SIGNATURE_EXT)) {
#else
if (*top_of_sec_stack == INTEGRITY_SIGNATURE) {
#endif /* CONFIG_ARMV7_M_ARMV8_M_FP */
/* Secure state interrupted by a Non-Secure exception.
* The return address after the additional state
* context, stacked by the Secure code upon
* Non-Secure exception entry.
*/
top_of_sec_stack += ADDITIONAL_STATE_CONTEXT_WORDS;
secure_esf = (const struct arch_esf *)top_of_sec_stack;
sec_ret_addr = secure_esf->basic.pc;
} else {
/* Exception during Non-Secure function call.
* The return address is located on top of stack.
*/
sec_ret_addr = *top_of_sec_stack;
}
PR_FAULT_INFO(" S instruction address: 0x%x", sec_ret_addr);
}
#define SECURE_STACK_DUMP(esf) secure_stack_dump(esf)
#else
/* We do not dump the Secure stack information for lower dump levels. */
#define SECURE_STACK_DUMP(esf)
#endif /* CONFIG_FAULT_DUMP== 2 */
#endif /* CONFIG_ARM_SECURE_FIRMWARE */
/*
* This internal function does the following:
*
* - Retrieves the exception stack frame
* - Evaluates whether to report being in a nested exception
*
* If the ESF is not successfully retrieved, the function signals
* an error by returning NULL.
*
* @return ESF pointer on success, otherwise return NULL
*/
static inline struct arch_esf *get_esf(uint32_t msp, uint32_t psp, uint32_t exc_return,
bool *nested_exc)
{
bool alternative_state_exc = false;
struct arch_esf *ptr_esf = NULL;
*nested_exc = false;
if ((exc_return & EXC_RETURN_INDICATOR_PREFIX) !=
EXC_RETURN_INDICATOR_PREFIX) {
/* Invalid EXC_RETURN value. This is a fatal error. */
return NULL;
}
#if defined(CONFIG_ARM_SECURE_FIRMWARE)
if ((exc_return & EXC_RETURN_EXCEPTION_SECURE_Secure) == 0U) {
/* Secure Firmware shall only handle Secure Exceptions.
* This is a fatal error.
*/
return NULL;
}
if (exc_return & EXC_RETURN_RETURN_STACK_Secure) {
/* Exception entry occurred in Secure stack. */
} else {
/* Exception entry occurred in Non-Secure stack. Therefore,
* msp/psp point to the Secure stack, however, the actual
* exception stack frame is located in the Non-Secure stack.
*/
alternative_state_exc = true;
/* Dump the Secure stack before handling the actual fault. */
struct arch_esf *secure_esf;
if (exc_return & EXC_RETURN_SPSEL_PROCESS) {
/* Secure stack pointed by PSP */
secure_esf = (struct arch_esf *)psp;
} else {
/* Secure stack pointed by MSP */
secure_esf = (struct arch_esf *)msp;
*nested_exc = true;
}
SECURE_STACK_DUMP(secure_esf);
/* Handle the actual fault.
* Extract the correct stack frame from the Non-Secure state
* and supply it to the fault handing function.
*/
if (exc_return & EXC_RETURN_MODE_THREAD) {
ptr_esf = (struct arch_esf *)__TZ_get_PSP_NS();
} else {
ptr_esf = (struct arch_esf *)__TZ_get_MSP_NS();
}
}
#elif defined(CONFIG_ARM_NONSECURE_FIRMWARE)
if (exc_return & EXC_RETURN_EXCEPTION_SECURE_Secure) {
/* Non-Secure Firmware shall only handle Non-Secure Exceptions.
* This is a fatal error.
*/
return NULL;
}
if (exc_return & EXC_RETURN_RETURN_STACK_Secure) {
/* Exception entry occurred in Secure stack.
*
* Note that Non-Secure firmware cannot inspect the Secure
* stack to determine the root cause of the fault. Fault
* inspection will indicate the Non-Secure instruction
* that performed the branch to the Secure domain.
*/
alternative_state_exc = true;
PR_FAULT_INFO("Exception occurred in Secure State");
if (exc_return & EXC_RETURN_SPSEL_PROCESS) {
/* Non-Secure stack frame on PSP */
ptr_esf = (struct arch_esf *)psp;
} else {
/* Non-Secure stack frame on MSP */
ptr_esf = (struct arch_esf *)msp;
}
} else {
/* Exception entry occurred in Non-Secure stack. */
}
#else
/* The processor has a single execution state.
* We verify that the Thread mode is using PSP.
*/
if ((exc_return & EXC_RETURN_MODE_THREAD) &&
(!(exc_return & EXC_RETURN_SPSEL_PROCESS))) {
PR_EXC("SPSEL in thread mode does not indicate PSP");
return NULL;
}
#endif /* CONFIG_ARM_SECURE_FIRMWARE */
if (!alternative_state_exc) {
if (exc_return & EXC_RETURN_MODE_THREAD) {
/* Returning to thread mode */
ptr_esf = (struct arch_esf *)psp;
} else {
/* Returning to handler mode */
ptr_esf = (struct arch_esf *)msp;
*nested_exc = true;
}
}
return ptr_esf;
}
/**
*
* @brief ARM Fault handler
*
* This routine is called when fatal error conditions are detected by hardware
* and is responsible for:
* - resetting the processor fault status registers (for the case when the
* error handling policy allows the system to recover from the error),
* - reporting the error information,
* - determining the error reason to be provided as input to the user-
* provided routine, k_sys_fatal_error_handler().
* The k_sys_fatal_error_handler() is invoked once the above operations are
* completed, and is responsible for implementing the error handling policy.
*
* The function needs, first, to determine the exception stack frame.
* Note that the current security state might not be the actual
* state in which the processor was executing, when the exception occurred.
* The actual state may need to be determined by inspecting the EXC_RETURN
* value, which is provided as argument to the Fault handler.
*
* If the exception occurred in the same security state, the stack frame
* will be pointed to by either MSP or PSP depending on the processor
* execution state when the exception occurred. MSP and PSP values are
* provided as arguments to the Fault handler.
*
* @param msp MSP value immediately after the exception occurred
* @param psp PSP value immediately after the exception occurred
* @param exc_return EXC_RETURN value present in LR after exception entry.
* @param callee_regs Callee-saved registers (R4-R11, PSP)
*
*/
void z_arm_fault(uint32_t msp, uint32_t psp, uint32_t exc_return,
_callee_saved_t *callee_regs)
{
uint32_t reason = K_ERR_CPU_EXCEPTION;
int fault = SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk;
bool recoverable, nested_exc;
struct arch_esf *esf;
/* Create a stack-ed copy of the ESF to be used during
* the fault handling process.
*/
struct arch_esf esf_copy;
/* Force unlock interrupts */
arch_irq_unlock(0);
/* Retrieve the Exception Stack Frame (ESF) to be supplied
* as argument to the remainder of the fault handling process.
*/
esf = get_esf(msp, psp, exc_return, &nested_exc);
__ASSERT(esf != NULL,
"ESF could not be retrieved successfully. Shall never occur.");
#ifdef CONFIG_DEBUG_COREDUMP
z_arm_coredump_fault_sp = POINTER_TO_UINT(esf);
#endif
reason = fault_handle(esf, fault, &recoverable);
if (recoverable) {
return;
}
/* Copy ESF */
#if !defined(CONFIG_EXTRA_EXCEPTION_INFO)
memcpy(&esf_copy, esf, sizeof(struct arch_esf));
ARG_UNUSED(callee_regs);
#else
/* the extra exception info is not present in the original esf
* so we only copy the fields before those.
*/
memcpy(&esf_copy, esf, offsetof(struct arch_esf, extra_info));
esf_copy.extra_info = (struct __extra_esf_info) {
.callee = callee_regs,
.exc_return = exc_return,
.msp = msp
};
#endif /* CONFIG_EXTRA_EXCEPTION_INFO */
/* Overwrite stacked IPSR to mark a nested exception,
* or a return to Thread mode. Note that this may be
* required, if the retrieved ESF contents are invalid
* due to, for instance, a stacking error.
*/
if (nested_exc) {
if ((esf_copy.basic.xpsr & IPSR_ISR_Msk) == 0) {
esf_copy.basic.xpsr |= IPSR_ISR_Msk;
}
} else {
esf_copy.basic.xpsr &= ~(IPSR_ISR_Msk);
}
if (IS_ENABLED(CONFIG_SIMPLIFIED_EXCEPTION_CODES) && (reason >= K_ERR_ARCH_START)) {
reason = K_ERR_CPU_EXCEPTION;
}
z_arm_fatal_error(reason, &esf_copy);
}
/**
*
* @brief Initialization of fault handling
*
* Turns on the desired hardware faults.
*
*/
void z_arm_fault_init(void)
{
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
SCB->CCR |= SCB_CCR_DIV_0_TRP_Msk;
#else
#error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
#if defined(CONFIG_BUILTIN_STACK_GUARD)
/* If Stack guarding via SP limit checking is enabled, disable
* SP limit checking inside HardFault and NMI. This is done
* in order to allow for the desired fault logging to execute
* properly in all cases.
*
* Note that this could allow a Secure Firmware Main Stack
* to descend into non-secure region during HardFault and
* NMI exception entry. To prevent from this, non-secure
* memory regions must be located higher than secure memory
* regions.
*
* For Non-Secure Firmware this could allow the Non-Secure Main
* Stack to attempt to descend into secure region, in which case a
* Secure Hard Fault will occur and we can track the fault from there.
*/
SCB->CCR |= SCB_CCR_STKOFHFNMIGN_Msk;
#endif /* CONFIG_BUILTIN_STACK_GUARD */
#ifdef CONFIG_TRAP_UNALIGNED_ACCESS
SCB->CCR |= SCB_CCR_UNALIGN_TRP_Msk;
#else
SCB->CCR &= ~SCB_CCR_UNALIGN_TRP_Msk;
#endif /* CONFIG_TRAP_UNALIGNED_ACCESS */
}
``` | /content/code_sandbox/arch/arm/core/cortex_m/fault.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 10,615 |
```c
/*
*
*/
/**
* @file
* @brief Full C support initialization
*
*
* Initialization of full C support: zero the .bss, copy the .data if XIP,
* call z_cstart().
*
* Stack is available in this module, but not the global data/bss until their
* initialization is performed.
*/
#include <zephyr/kernel.h>
#include <kernel_internal.h>
#include <zephyr/linker/linker-defs.h>
#include <zephyr/sys/barrier.h>
#if defined(__GNUC__)
/*
* GCC can detect if memcpy is passed a NULL argument, however one of
* the cases of relocate_vector_table() it is valid to pass NULL, so we
* suppress the warning for this case. We need to do this before
* string.h is included to get the declaration of memcpy.
*/
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wnonnull"
#endif
#include <string.h>
#if defined(CONFIG_SW_VECTOR_RELAY) || defined(CONFIG_SW_VECTOR_RELAY_CLIENT)
Z_GENERIC_SECTION(.vt_pointer_section) __attribute__((used))
void *_vector_table_pointer;
#endif
#ifdef CONFIG_CPU_CORTEX_M_HAS_VTOR
#define VECTOR_ADDRESS ((uintptr_t)_vector_start)
static inline void relocate_vector_table(void)
{
SCB->VTOR = VECTOR_ADDRESS & SCB_VTOR_TBLOFF_Msk;
barrier_dsync_fence_full();
barrier_isync_fence_full();
}
#else
#define VECTOR_ADDRESS 0
void __weak relocate_vector_table(void)
{
#if defined(CONFIG_XIP) && (CONFIG_FLASH_BASE_ADDRESS != 0) || \
!defined(CONFIG_XIP) && (CONFIG_SRAM_BASE_ADDRESS != 0)
size_t vector_size = (size_t)_vector_end - (size_t)_vector_start;
(void)memcpy(VECTOR_ADDRESS, _vector_start, vector_size);
#elif defined(CONFIG_SW_VECTOR_RELAY) || defined(CONFIG_SW_VECTOR_RELAY_CLIENT)
_vector_table_pointer = _vector_start;
#endif
}
#if defined(__GNUC__)
#pragma GCC diagnostic pop
#endif
#endif /* CONFIG_CPU_CORTEX_M_HAS_VTOR */
#if defined(CONFIG_CPU_HAS_FPU)
static inline void z_arm_floating_point_init(void)
{
/*
* Upon reset, the Co-Processor Access Control Register is, normally,
* 0x00000000. However, it might be left un-cleared by firmware running
* before Zephyr boot.
*/
SCB->CPACR &= (~(CPACR_CP10_Msk | CPACR_CP11_Msk));
#if defined(CONFIG_FPU)
/*
* Enable CP10 and CP11 Co-Processors to enable access to floating
* point registers.
*/
#if defined(CONFIG_USERSPACE)
/* Full access */
SCB->CPACR |= CPACR_CP10_FULL_ACCESS | CPACR_CP11_FULL_ACCESS;
#else
/* Privileged access only */
SCB->CPACR |= CPACR_CP10_PRIV_ACCESS | CPACR_CP11_PRIV_ACCESS;
#endif /* CONFIG_USERSPACE */
/*
* Upon reset, the FPU Context Control Register is 0xC0000000
* (both Automatic and Lazy state preservation is enabled).
*/
#if defined(CONFIG_MULTITHREADING) && !defined(CONFIG_FPU_SHARING)
/* Unshared FP registers (multithreading) mode. We disable the
* automatic stacking of FP registers (automatic setting of
* FPCA bit in the CONTROL register), upon exception entries,
* as the FP registers are to be used by a single context (and
* the use of FP registers in ISRs is not supported). This
* configuration improves interrupt latency and decreases the
* stack memory requirement for the (single) thread that makes
* use of the FP co-processor.
*/
FPU->FPCCR &= (~(FPU_FPCCR_ASPEN_Msk | FPU_FPCCR_LSPEN_Msk));
#else
/*
* FP register sharing (multithreading) mode or single-threading mode.
*
* Enable both automatic and lazy state preservation of the FP context.
* The FPCA bit of the CONTROL register will be automatically set, if
* the thread uses the floating point registers. Because of lazy state
* preservation the volatile FP registers will not be stacked upon
* exception entry, however, the required area in the stack frame will
* be reserved for them. This configuration improves interrupt latency.
* The registers will eventually be stacked when the thread is swapped
* out during context-switch or if an ISR attempts to execute floating
* point instructions.
*/
FPU->FPCCR = FPU_FPCCR_ASPEN_Msk | FPU_FPCCR_LSPEN_Msk;
#endif /* CONFIG_FPU_SHARING */
/* Make the side-effects of modifying the FPCCR be realized
* immediately.
*/
barrier_dsync_fence_full();
barrier_isync_fence_full();
/* Initialize the Floating Point Status and Control Register. */
#if defined(CONFIG_ARMV8_1_M_MAINLINE)
/*
* For ARMv8.1-M with FPU, the FPSCR[18:16] LTPSIZE field must be set
* to 0b100 for "Tail predication not applied" as it's reset value
*/
__set_FPSCR(4 << FPU_FPDSCR_LTPSIZE_Pos);
#else
__set_FPSCR(0);
#endif
/*
* Note:
* The use of the FP register bank is enabled, however the FP context
* will be activated (FPCA bit on the CONTROL register) in the presence
* of floating point instructions.
*/
#endif /* CONFIG_FPU */
/*
* Upon reset, the CONTROL.FPCA bit is, normally, cleared. However,
* it might be left un-cleared by firmware running before Zephyr boot.
* We must clear this bit to prevent errors in exception unstacking.
*
* Note:
* In Sharing FP Registers mode CONTROL.FPCA is cleared before switching
* to main, so it may be skipped here (saving few boot cycles).
*
* If CONFIG_INIT_ARCH_HW_AT_BOOT is set, CONTROL is cleared at reset.
*/
#if (!defined(CONFIG_FPU) || !defined(CONFIG_FPU_SHARING)) && \
(!defined(CONFIG_INIT_ARCH_HW_AT_BOOT))
__set_CONTROL(__get_CONTROL() & (~(CONTROL_FPCA_Msk)));
#endif
}
#endif /* CONFIG_CPU_HAS_FPU */
extern FUNC_NORETURN void z_cstart(void);
/**
*
* @brief Prepare to and run C code
*
* This routine prepares for the execution of and runs C code.
*
*/
void z_prep_c(void)
{
relocate_vector_table();
#if defined(CONFIG_CPU_HAS_FPU)
z_arm_floating_point_init();
#endif
z_bss_zero();
z_data_copy();
#if defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER)
/* Invoke SoC-specific interrupt controller initialization */
z_soc_irq_init();
#else
z_arm_interrupt_init();
#endif /* CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER */
z_cstart();
CODE_UNREACHABLE;
}
``` | /content/code_sandbox/arch/arm/core/cortex_m/prep_c.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,493 |
```linker script
/*
*
*/
/*
* Padding inserted after the (first-stage) vector table, so that the
* Zephyr image does not attempt to use the area which we reserve to
* detect null pointer dereferencing (0x0 - <size>). If the end of the
* vector table section is higher than the upper end of the reserved
* area, we add no padding.
*
* Note that even if the following linker script snippet is included
* multiple times, the padding will only be added at most once, to the
* first stage vector table.
*/
#if defined(CONFIG_CORTEX_M_NULL_POINTER_EXCEPTION)
. = MAX(ABSOLUTE(.), CONFIG_CORTEX_M_NULL_POINTER_EXCEPTION_PAGE_SIZE);
#endif /* CONFIG_CORTEX_M_NULL_POINTER_EXCEPTION */
``` | /content/code_sandbox/arch/arm/core/cortex_m/vector_table_pad.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 157 |
```c
/*
*
*/
/**
* @file
* @brief ARM Cortex-M power management
*/
#include <zephyr/kernel.h>
#include <cmsis_core.h>
#if defined(CONFIG_ARM_ON_EXIT_CPU_IDLE)
#include <soc_cpu_idle.h>
#endif
/**
* @brief Initialization of CPU idle
*
* Only called by arch_kernel_init(). Sets SEVONPEND bit once for the system's
* duration.
*/
void z_arm_cpu_idle_init(void)
{
SCB->SCR = SCB_SCR_SEVONPEND_Msk;
}
#if defined(CONFIG_ARM_ON_EXIT_CPU_IDLE)
#define ON_EXIT_IDLE_HOOK SOC_ON_EXIT_CPU_IDLE
#else
#define ON_EXIT_IDLE_HOOK do {} while (false)
#endif
#if defined(CONFIG_ARM_ON_ENTER_CPU_IDLE_HOOK)
#define SLEEP_IF_ALLOWED(wait_instr) do { \
/* Skip the wait instr if on_enter_cpu_idle returns false */ \
if (z_arm_on_enter_cpu_idle()) { \
/* Wait for all memory transaction to complete */ \
/* before entering low power state. */ \
__DSB(); \
wait_instr(); \
/* Inline the macro provided by SoC-specific code */ \
ON_EXIT_IDLE_HOOK; \
} \
} while (false)
#else
#define SLEEP_IF_ALLOWED(wait_instr) do { \
__DSB(); \
wait_instr(); \
ON_EXIT_IDLE_HOOK; \
} while (false)
#endif
#ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_IDLE
void arch_cpu_idle(void)
{
#if defined(CONFIG_TRACING)
sys_trace_idle();
#endif
#if CONFIG_ARM_ON_ENTER_CPU_IDLE_PREPARE_HOOK
z_arm_on_enter_cpu_idle_prepare();
#endif
#if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
/*
* PRIMASK is always cleared on ARMv7-M and ARMv8-M (not used
* for interrupt locking), and configuring BASEPRI to the lowest
* priority to ensure wake-up will cause interrupts to be serviced
* before entering low power state.
*
* Set PRIMASK before configuring BASEPRI to prevent interruption
* before wake-up.
*/
__disable_irq();
/*
* Set wake-up interrupt priority to the lowest and synchronize to
* ensure that this is visible to the WFI instruction.
*/
__set_BASEPRI(0);
__ISB();
#else
/*
* For all the other ARM architectures that do not implement BASEPRI,
* PRIMASK is used as the interrupt locking mechanism, and it is not
* necessary to set PRIMASK here, as PRIMASK would have already been
* set by the caller as part of interrupt locking if necessary
* (i.e. if the caller sets _kernel.idle).
*/
#endif
SLEEP_IF_ALLOWED(__WFI);
__enable_irq();
__ISB();
}
#endif
#ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_ATOMIC_IDLE
void arch_cpu_atomic_idle(unsigned int key)
{
#if defined(CONFIG_TRACING)
sys_trace_idle();
#endif
#if CONFIG_ARM_ON_ENTER_CPU_IDLE_PREPARE_HOOK
z_arm_on_enter_cpu_idle_prepare();
#endif
/*
* Lock PRIMASK while sleeping: wfe will still get interrupted by
* incoming interrupts but the CPU will not service them right away.
*/
__disable_irq();
/*
* No need to set SEVONPEND, it's set once in z_arm_cpu_idle_init()
* and never touched again.
*/
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
/* No BASEPRI, call wfe directly. (SEVONPEND is set in z_arm_cpu_idle_init()) */
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
/* unlock BASEPRI so wfe gets interrupted by incoming interrupts */
__set_BASEPRI(0);
__ISB();
#else
#error Unsupported architecture
#endif
SLEEP_IF_ALLOWED(__WFE);
arch_irq_unlock(key);
#if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
__enable_irq();
#endif
}
#endif
``` | /content/code_sandbox/arch/arm/core/cortex_m/cpu_idle.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 852 |
```c
/*
*
*/
/**
* @file
* @brief ARM Cortex-M exception/interrupt exit API
*
* Provides functions for performing kernel handling when exiting exceptions or
* interrupts that are installed directly in the vector table (i.e. that are not
* wrapped around by _isr_wrapper()).
*/
#include <zephyr/kernel.h>
#include <kswap.h>
#include <cmsis_core.h>
/**
*
* @brief Kernel housekeeping when exiting interrupt handler installed
* directly in vector table
*
* Kernel allows installing interrupt handlers (ISRs) directly into the vector
* table to get the lowest interrupt latency possible. This allows the ISR to
* be invoked directly without going through a software interrupt table.
* However, upon exiting the ISR, some kernel work must still be performed,
* namely possible context switching. While ISRs connected in the software
* interrupt table do this automatically via a wrapper, ISRs connected directly
* in the vector table must invoke z_arm_int_exit() as the *very last* action
* before returning.
*
* e.g.
*
* void myISR(void)
* {
* printk("in %s\n", __FUNCTION__);
* doStuff();
* z_arm_int_exit();
* }
*
*/
FUNC_ALIAS(z_arm_exc_exit, z_arm_int_exit, void);
/**
*
* @brief Kernel housekeeping when exiting exception handler installed
* directly in vector table
*
* See z_arm_int_exit().
*
*/
Z_GENERIC_SECTION(.text._HandlerModeExit) void z_arm_exc_exit(void)
{
#ifdef CONFIG_PREEMPT_ENABLED
/* If thread is preemptible */
if (_kernel.cpus->current->base.prio >= 0) {
/* and cached thread is not current thread */
if (_kernel.ready_q.cache != _kernel.cpus->current) {
/* trigger a context switch */
SCB->ICSR |= SCB_ICSR_PENDSVSET_Msk;
}
}
#endif /* CONFIG_PREEMPT_ENABLED */
#ifdef CONFIG_STACK_SENTINEL
z_check_stack_sentinel();
#endif /* CONFIG_STACK_SENTINEL */
}
``` | /content/code_sandbox/arch/arm/core/cortex_m/exc_exit.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 445 |
```c
/*
*
*/
#include <zephyr/kernel.h>
#include <kernel_internal.h>
#include <errno.h>
/* The 'key' actually represents the BASEPRI register
* prior to disabling interrupts via the BASEPRI mechanism.
*
* arch_swap() itself does not do much.
*
* It simply stores the intlock key (the BASEPRI value) parameter into
* current->basepri, and then triggers a PendSV exception, which does
* the heavy lifting of context switching.
* This is the only place we have to save BASEPRI since the other paths to
* z_arm_pendsv all come from handling an interrupt, which means we know the
* interrupts were not locked: in that case the BASEPRI value is 0.
*
* Given that arch_swap() is called to effect a cooperative context switch,
* only the caller-saved integer registers need to be saved in the thread of the
* outgoing thread. This is all performed by the hardware, which stores it in
* its exception stack frame, created when handling the z_arm_pendsv exception.
*
* On ARMv6-M, the intlock key is represented by the PRIMASK register,
* as BASEPRI is not available.
*/
int arch_swap(unsigned int key)
{
/* store off key and return value */
_current->arch.basepri = key;
_current->arch.swap_return_value = -EAGAIN;
/* set pending bit to make sure we will take a PendSV exception */
SCB->ICSR |= SCB_ICSR_PENDSVSET_Msk;
/* clear mask or enable all irqs to take a pendsv */
irq_unlock(0);
/* Context switch is performed here. Returning implies the
* thread has been context-switched-in again.
*/
return _current->arch.swap_return_value;
}
uintptr_t z_arm_pendsv_c(uintptr_t exc_ret)
{
/* Store LSB of LR (EXC_RETURN) to the thread's 'mode' word. */
IF_ENABLED(CONFIG_ARM_STORE_EXC_RETURN,
(_kernel.cpus[0].current->arch.mode_exc_return = (uint8_t)exc_ret;));
/* Protect the kernel state while we play with the thread lists */
uint32_t basepri = arch_irq_lock();
/* fetch the thread to run from the ready queue cache */
struct k_thread *current = _kernel.cpus[0].current = _kernel.ready_q.cache;
/*
* Clear PendSV so that if another interrupt comes in and
* decides, with the new kernel state based on the new thread
* being context-switched in, that it needs to reschedule, it
* will take, but that previously pended PendSVs do not take,
* since they were based on the previous kernel state and this
* has been handled.
*/
SCB->ICSR = SCB_ICSR_PENDSVCLR_Msk;
/* For Cortex-M, store TLS pointer in a global variable,
* as it lacks the process ID or thread ID register
* to be used by toolchain to access thread data.
*/
IF_ENABLED(CONFIG_THREAD_LOCAL_STORAGE,
(extern uintptr_t z_arm_tls_ptr; z_arm_tls_ptr = current->tls));
IF_ENABLED(CONFIG_ARM_STORE_EXC_RETURN,
(exc_ret = (exc_ret & 0xFFFFFF00) | current->arch.mode_exc_return));
/* Restore previous interrupt disable state (irq_lock key)
* (We clear the arch.basepri field after restoring state)
*/
basepri = current->arch.basepri;
current->arch.basepri = 0;
arch_irq_unlock(basepri);
#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
/* Re-program dynamic memory map */
z_arm_configure_dynamic_mpu_regions(current);
#endif
/* restore mode */
IF_ENABLED(CONFIG_USERSPACE, ({
CONTROL_Type ctrl = {.w = __get_CONTROL()};
/* exit privileged state when returning to thread mode. */
ctrl.b.nPRIV = 0;
/* __set_CONTROL inserts an ISB which is may not be necessary here
* (stack pointer may not be touched), but it's recommended to avoid
* executing pre-fetched instructions with the previous privilege.
*/
__set_CONTROL(ctrl.w | current->arch.mode);
}));
return exc_ret;
}
``` | /content/code_sandbox/arch/arm/core/cortex_m/swap.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 917 |
```c
/*
*
*/
/**
* @file
* @brief ARM Cortex-M Timing functions interface based on DWT
*
*/
#include <zephyr/init.h>
#include <zephyr/kernel.h>
#include <zephyr/timing/timing.h>
#include <cortex_m/dwt.h>
#include <cmsis_core.h>
#include <zephyr/sys_clock.h>
/**
* @brief Return the current frequency of the cycle counter
*
* This routine returns the current frequency of the DWT Cycle Counter
* in DWT cycles per second (Hz).
*
* @return the cycle counter frequency value
*/
static inline uint64_t z_arm_dwt_freq_get(void)
{
#if defined(CONFIG_SOC_FAMILY_NORDIC_NRF) || \
defined(CONFIG_SOC_SERIES_IMXRT6XX)
/*
* DWT frequency is taken directly from the
* System Core clock (CPU) frequency, if the
* CMSIS SystemCoreClock symbols is available.
*/
SystemCoreClockUpdate();
return SystemCoreClock;
#elif defined(CONFIG_CORTEX_M_SYSTICK)
/* SysTick and DWT both run at CPU frequency,
* reflected in the system timer HW cycles/sec.
*/
return sys_clock_hw_cycles_per_sec();
#else
static uint64_t dwt_frequency;
uint32_t cyc_start, cyc_end;
uint64_t dwt_start, dwt_end;
uint64_t cyc_freq = sys_clock_hw_cycles_per_sec();
uint64_t dcyc, ddwt;
if (!dwt_frequency) {
z_arm_dwt_init();
do {
cyc_start = k_cycle_get_32();
dwt_start = z_arm_dwt_get_cycles();
k_busy_wait(10 * USEC_PER_MSEC);
cyc_end = k_cycle_get_32();
dwt_end = z_arm_dwt_get_cycles();
/*
* cycles are in 32-bit, and delta must be
* calculated in 32-bit precision. Or it would be
* wrapping around in 64-bit.
*/
dcyc = (uint32_t)cyc_end - (uint32_t)cyc_start;
ddwt = dwt_end - dwt_start;
} while ((dcyc == 0) || (ddwt == 0));
dwt_frequency = (cyc_freq * ddwt) / dcyc;
}
return dwt_frequency;
#endif /* CONFIG_SOC_FAMILY_NORDIC_NRF */
}
void arch_timing_init(void)
{
z_arm_dwt_init();
z_arm_dwt_init_cycle_counter();
}
void arch_timing_start(void)
{
z_arm_dwt_cycle_count_start();
}
void arch_timing_stop(void)
{
DWT->CTRL &= ~DWT_CTRL_CYCCNTENA_Msk;
}
timing_t arch_timing_counter_get(void)
{
return (timing_t)z_arm_dwt_get_cycles();
}
uint64_t arch_timing_cycles_get(volatile timing_t *const start,
volatile timing_t *const end)
{
return (*end - *start);
}
uint64_t arch_timing_freq_get(void)
{
return z_arm_dwt_freq_get();
}
uint64_t arch_timing_cycles_to_ns(uint64_t cycles)
{
return (cycles) * (NSEC_PER_USEC) / arch_timing_freq_get_mhz();
}
uint64_t arch_timing_cycles_to_ns_avg(uint64_t cycles, uint32_t count)
{
return arch_timing_cycles_to_ns(cycles) / count;
}
uint32_t arch_timing_freq_get_mhz(void)
{
return (uint32_t)(arch_timing_freq_get() / 1000000U);
}
``` | /content/code_sandbox/arch/arm/core/cortex_m/timing.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 748 |
```objective-c
/*
*
*/
/**
* @file
* @brief Definitions for the boot vector table
*
*
* Definitions for the boot vector table.
*
* System exception handler names all have the same format:
*
* __<exception name with underscores>
*
* No other symbol has the same format, so they are easy to spot.
*/
#ifndef ZEPHYR_ARCH_ARM_CORE_AARCH32_CORTEX_M_VECTOR_TABLE_H_
#define ZEPHYR_ARCH_ARM_CORE_AARCH32_CORTEX_M_VECTOR_TABLE_H_
#ifdef _ASMLANGUAGE
#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
#include <zephyr/sys/util.h>
GTEXT(__start)
GDATA(_vector_table)
GTEXT(z_arm_reset)
GTEXT(z_arm_nmi)
GTEXT(z_arm_hard_fault)
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
GTEXT(z_arm_svc)
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
GTEXT(z_arm_mpu_fault)
GTEXT(z_arm_bus_fault)
GTEXT(z_arm_usage_fault)
#if defined(CONFIG_ARM_SECURE_FIRMWARE)
GTEXT(z_arm_secure_fault)
#endif /* CONFIG_ARM_SECURE_FIRMWARE */
GTEXT(z_arm_svc)
GTEXT(z_arm_debug_monitor)
#else
#error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
GTEXT(z_arm_pendsv)
GTEXT(z_arm_exc_spurious)
GTEXT(z_prep_c)
#if defined(CONFIG_GEN_ISR_TABLES)
GTEXT(_isr_wrapper)
#endif /* CONFIG_GEN_ISR_TABLES */
#else /* _ASMLANGUAGE */
#ifdef __cplusplus
extern "C" {
#endif
extern void *_vector_table[];
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_ARCH_ARM_CORE_AARCH32_CORTEX_M_VECTOR_TABLE_H_ */
``` | /content/code_sandbox/arch/arm/core/cortex_m/vector_table.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 387 |
```c
/*
*
*/
/**
* @file
* @brief ARM Cortex-M System Control Block interface
*
*
* Most of the SCB interface consists of simple bit-flipping methods, and is
* implemented as inline functions in scb.h. This module thus contains only data
* definitions and more complex routines, if needed.
*/
#include <zephyr/kernel.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/sys/util.h>
#include <zephyr/sys/barrier.h>
#include <cmsis_core.h>
#include <zephyr/linker/linker-defs.h>
#include <zephyr/cache.h>
#include <zephyr/arch/cache.h>
#if defined(CONFIG_CPU_HAS_NXP_MPU)
#include <fsl_sysmpu.h>
#endif
/**
*
* @brief Reset the system
*
* This routine resets the processor.
*
*/
void __weak sys_arch_reboot(int type)
{
ARG_UNUSED(type);
NVIC_SystemReset();
}
#if defined(CONFIG_ARM_MPU)
#if defined(CONFIG_CPU_HAS_ARM_MPU)
/**
*
* @brief Clear all MPU region configuration
*
* This routine clears all ARM MPU region configuration.
*
*/
void z_arm_clear_arm_mpu_config(void)
{
int i;
int num_regions =
((MPU->TYPE & MPU_TYPE_DREGION_Msk) >> MPU_TYPE_DREGION_Pos);
for (i = 0; i < num_regions; i++) {
ARM_MPU_ClrRegion(i);
}
}
#elif CONFIG_CPU_HAS_NXP_MPU
void z_arm_clear_arm_mpu_config(void)
{
int i;
int num_regions = FSL_FEATURE_SYSMPU_DESCRIPTOR_COUNT;
SYSMPU_Enable(SYSMPU, false);
/* NXP MPU region 0 is reserved for the debugger */
for (i = 1; i < num_regions; i++) {
SYSMPU_RegionEnable(SYSMPU, i, false);
}
}
#endif /* CONFIG_CPU_HAS_NXP_MPU */
#endif /* CONFIG_ARM_MPU */
#if defined(CONFIG_INIT_ARCH_HW_AT_BOOT)
/**
*
* @brief Reset system control blocks and core registers
*
* This routine resets Cortex-M system control block
* components and core registers.
*
*/
void z_arm_init_arch_hw_at_boot(void)
{
/* Disable interrupts */
__disable_irq();
#if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
__set_FAULTMASK(0);
#endif
/* Initialize System Control Block components */
#if defined(CONFIG_ARM_MPU)
/* Clear MPU region configuration */
z_arm_clear_arm_mpu_config();
#endif /* CONFIG_ARM_MPU */
/* Disable NVIC interrupts */
for (uint8_t i = 0; i < ARRAY_SIZE(NVIC->ICER); i++) {
NVIC->ICER[i] = 0xFFFFFFFF;
}
/* Clear pending NVIC interrupts */
for (uint8_t i = 0; i < ARRAY_SIZE(NVIC->ICPR); i++) {
NVIC->ICPR[i] = 0xFFFFFFFF;
}
#if defined(CONFIG_ARCH_CACHE)
#if defined(CONFIG_DCACHE)
/* Reset D-Cache settings. If the D-Cache was enabled,
* SCB_DisableDCache() takes care of cleaning and invalidating it.
* If it was already disabled, just call SCB_InvalidateDCache() to
* reset it to a known clean state.
*/
if (SCB->CCR & SCB_CCR_DC_Msk) {
/*
* Do not use sys_cache_data_disable at this point, but instead
* the architecture specific function. This ensures that the
* cache is disabled although CONFIG_CACHE_MANAGEMENT might be
* disabled.
*/
SCB_DisableDCache();
} else {
SCB_InvalidateDCache();
}
#endif /* CONFIG_DCACHE */
#if defined(CONFIG_ICACHE)
/*
* Reset I-Cache settings.
* Do not use sys_cache_data_disable at this point, but instead
* the architecture specific function. This ensures that the
* cache is disabled although CONFIG_CACHE_MANAGEMENT might be
* disabled.
*/
SCB_DisableICache();
#endif /* CONFIG_ICACHE */
#endif /* CONFIG_ARCH_CACHE */
/* Restore Interrupts */
__enable_irq();
barrier_dsync_fence_full();
barrier_isync_fence_full();
}
#endif /* CONFIG_INIT_ARCH_HW_AT_BOOT */
``` | /content/code_sandbox/arch/arm/core/cortex_m/scb.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 921 |
```unknown
/*
*
*/
/**
* @file
* @brief Populated vector table in ROM
*
* Vector table at the beginning of the image for starting system. The reset
* vector is the system entry point, ie. the first instruction executed.
*
* The table is populated with all the system exception handlers. The NMI vector
* must be populated with a valid handler since it can happen at any time. The
* rest should not be triggered until the kernel is ready to handle them.
*/
#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
#include "vector_table.h"
_ASM_FILE_PROLOGUE
/*
* Tell armclang that stack alignment are ensured.
*/
.eabi_attribute Tag_ABI_align_preserved, 1
GDATA(z_main_stack)
SECTION_SUBSEC_FUNC(exc_vector_table,_vector_table_section,_vector_table)
/*
* setting the _very_ early boot on the main stack allows to use memset
* on the interrupt stack when CONFIG_INIT_STACKS is enabled before
* switching to the interrupt stack for the rest of the early boot
*/
.word z_main_stack + CONFIG_MAIN_STACK_SIZE
.word z_arm_reset
.word z_arm_nmi
.word z_arm_hard_fault
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word z_arm_svc
.word 0
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
.word z_arm_mpu_fault
.word z_arm_bus_fault
.word z_arm_usage_fault
#if defined(CONFIG_ARMV8_M_SE)
#if defined(CONFIG_ARM_SECURE_FIRMWARE)
.word z_arm_secure_fault
#else
.word z_arm_exc_spurious
#endif /* CONFIG_ARM_SECURE_FIRMWARE */
#else
.word 0
#endif /* CONFIG_ARMV8_M_SE */
.word 0
.word 0
.word 0
.word z_arm_svc
.word z_arm_debug_monitor
#else
#error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
.word 0
#if defined(CONFIG_MULTITHREADING)
.word z_arm_pendsv
#else
.word z_arm_exc_spurious
#endif
#if defined(CONFIG_CPU_CORTEX_M_HAS_SYSTICK)
#if defined(CONFIG_SYS_CLOCK_EXISTS) && \
defined(CONFIG_CORTEX_M_SYSTICK_INSTALL_ISR)
.word sys_clock_isr
#else
.word z_arm_exc_spurious
#endif /* CONFIG_SYS_CLOCK_EXISTS && CONFIG_CORTEX_M_SYSTICK_INSTALL_ISR */
#else
.word 0
#endif /* CONFIG_CPU_CORTEX_M_HAS_SYSTICK */
``` | /content/code_sandbox/arch/arm/core/cortex_m/vector_table.S | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 613 |
```unknown
/*
*
*/
/**
* @file
* @brief Thread context switching for ARM Cortex-M
*
* This module implements the routines necessary for thread context switching
* on ARM Cortex-M CPUs.
*/
#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
#include <offsets_short.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/syscall.h>
#include <zephyr/kernel.h>
#include <zephyr/arch/arm/cortex_m/cpu.h>
_ASM_FILE_PROLOGUE
GTEXT(z_arm_svc)
GTEXT(z_arm_pendsv)
GTEXT(z_do_kernel_oops)
GTEXT(z_arm_pendsv_c)
#if defined(CONFIG_USERSPACE)
GTEXT(z_arm_do_syscall)
#endif
GDATA(_kernel)
#if defined(CONFIG_THREAD_LOCAL_STORAGE)
GDATA(z_arm_tls_ptr)
#endif
/**
*
* @brief PendSV exception handler, handling context switches
*
* The PendSV exception is the only execution context in the system that can
* perform context switching. When an execution context finds out it has to
* switch contexts, it pends the PendSV exception.
*
* When PendSV is pended, the decision that a context switch must happen has
* already been taken. In other words, when z_arm_pendsv() runs, we *know* we
* have to swap *something*.
*
* For Cortex-M, z_arm_pendsv() is invoked with no arguments.
*/
SECTION_FUNC(TEXT, z_arm_pendsv)
#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
/* Register the context switch */
push {r0, lr}
bl z_thread_mark_switched_out
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
pop {r0, r1}
mov lr, r1
#else
pop {r0, lr}
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
#endif /* CONFIG_INSTRUMENT_THREAD_SWITCHING */
/* load _kernel into r1 and current k_thread into r2 */
ldr r1, =_kernel
ldr r2, [r1, #_kernel_offset_to_current]
#if defined(CONFIG_ARM_STORE_EXC_RETURN)
/* Store LSB of LR (EXC_RETURN) to the thread's 'mode' word. */
strb lr, [r2, #_thread_offset_to_mode_exc_return]
#endif
/* addr of callee-saved regs in thread in r0 */
ldr r0, =_thread_offset_to_callee_saved
add r0, r2
/* save callee-saved + psp in thread */
mrs ip, PSP
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
/* Store current r4-r7 */
stmea r0!, {r4-r7}
/* copy r8-r12 into r3-r7 */
mov r3, r8
mov r4, r9
mov r5, r10
mov r6, r11
mov r7, ip
/* store r8-12 */
stmea r0!, {r3-r7}
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
stmia r0, {r4-r11, ip}
#ifdef CONFIG_FPU_SHARING
/* Assess whether switched-out thread had been using the FP registers. */
tst lr, #_EXC_RETURN_FTYPE_Msk
bne out_fp_endif
/* FP context active: set FP state and store callee-saved registers.
* Note: if Lazy FP stacking is enabled, storing the callee-saved
* registers will automatically trigger FP state preservation in
* the thread's stack. This will also clear the FPCCR.LSPACT flag.
*/
add r0, r2, #_thread_offset_to_preempt_float
vstmia r0, {s16-s31}
out_fp_endif:
/* At this point FPCCR.LSPACT is guaranteed to be cleared,
* regardless of whether the thread has an active FP context.
*/
#endif /* CONFIG_FPU_SHARING */
#else
#error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
mov r4, lr
mov r0, lr
bl z_arm_pendsv_c
mov lr, r4
ldr r1, =_kernel
ldr r2, [r1, #_kernel_offset_to_current]
#if defined(CONFIG_ARM_STORE_EXC_RETURN)
/* Restore EXC_RETURN value. */
mov lr, r0
#endif
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
ldr r4, =_thread_offset_to_callee_saved
adds r0, r2, r4
/* restore r4-r12 for new thread */
/* first restore r8-r12 located after r4-r7 (4*4bytes) */
adds r0, #16
ldmia r0!, {r3-r7}
/* move to correct registers */
mov r8, r3
mov r9, r4
mov r10, r5
mov r11, r6
mov ip, r7
/* restore r4-r7, go back 9*4 bytes to the start of the stored block */
subs r0, #36
ldmia r0!, {r4-r7}
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
#ifdef CONFIG_FPU_SHARING
/* Assess whether switched-in thread had been using the FP registers. */
tst lr, #_EXC_RETURN_FTYPE_Msk
beq in_fp_active
/* FP context inactive for swapped-in thread:
* - reset FPSCR to 0
* - set EXC_RETURN.F_Type (prevents FP frame un-stacking when returning
* from pendSV)
*/
movs.n r3, #0
vmsr fpscr, r3
b in_fp_endif
in_fp_active:
/* FP context active:
* - clear EXC_RETURN.F_Type
* - FPSCR and caller-saved registers will be restored automatically
* - restore callee-saved FP registers
*/
add r0, r2, #_thread_offset_to_preempt_float
vldmia r0, {s16-s31}
in_fp_endif:
/* Clear CONTROL.FPCA that may have been set by FP instructions */
mrs r3, CONTROL
bic r3, #_CONTROL_FPCA_Msk
msr CONTROL, r3
isb
#endif
/* load callee-saved + psp from thread */
add r0, r2, #_thread_offset_to_callee_saved
ldmia r0, {r4-r11, ip}
#else
#error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
msr PSP, ip
#ifdef CONFIG_BUILTIN_STACK_GUARD
/* r2 contains k_thread */
add r0, r2, #0
push {r2, lr}
bl configure_builtin_stack_guard
pop {r2, lr}
#endif /* CONFIG_BUILTIN_STACK_GUARD */
#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
/* Register the context switch */
push {r0, lr}
bl z_thread_mark_switched_in
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
pop {r0, r1}
mov lr, r1
#else
pop {r0, lr}
#endif
#endif /* CONFIG_INSTRUMENT_THREAD_SWITCHING */
/*
* Cortex-M: return from PendSV exception
*/
bx lr
/**
*
* @brief Service call handler
*
* The service call (svc) is used in the following occasions:
* - IRQ offloading
* - Kernel run-time exceptions
* - System Calls (User mode)
*
*/
SECTION_FUNC(TEXT, z_arm_svc)
/* Use EXC_RETURN state to find out if stack frame is on the
* MSP or PSP
*/
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
movs r0, #_EXC_RETURN_SPSEL_Msk
mov r1, lr
tst r1, r0
beq _stack_frame_msp
mrs r0, PSP
bne _stack_frame_endif
_stack_frame_msp:
mrs r0, MSP
_stack_frame_endif:
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
tst lr, #_EXC_RETURN_SPSEL_Msk /* did we come from thread mode ? */
ite eq /* if zero (equal), came from handler mode */
mrseq r0, MSP /* handler mode, stack frame is on MSP */
mrsne r0, PSP /* thread mode, stack frame is on PSP */
#endif
/* Figure out what SVC call number was invoked */
ldr r1, [r0, #24] /* grab address of PC from stack frame */
/* SVC is a two-byte instruction, point to it and read the
* SVC number (lower byte of SCV instruction)
*/
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
subs r1, r1, #2
ldrb r1, [r1]
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
ldrb r1, [r1, #-2]
#endif
/*
* grab service call number:
* 0: Unused
* 1: irq_offload (if configured)
* 2: kernel panic or oops (software generated fatal exception)
* 3: System call (if user mode supported)
*/
#if defined(CONFIG_USERSPACE)
mrs r2, CONTROL
cmp r1, #3
beq _do_syscall
/*
* check that we are privileged before invoking other SVCs
* oops if we are unprivileged
*/
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
movs r3, #0x1
tst r2, r3
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
tst r2, #0x1
#endif
bne _oops
#endif /* CONFIG_USERSPACE */
cmp r1, #2
beq _oops
#if defined(CONFIG_IRQ_OFFLOAD)
push {r0, lr}
bl z_irq_do_offload /* call C routine which executes the offload */
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
pop {r0, r3}
mov lr, r3
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
pop {r0, lr}
#endif
/* exception return is done in z_arm_int_exit() */
ldr r0, =z_arm_int_exit
bx r0
#endif
_oops:
push {r0, lr}
#if defined(CONFIG_EXTRA_EXCEPTION_INFO)
#if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
/* Build _callee_saved_t. To match the struct
* definition we push the psp & then r11-r4
*/
mrs r1, PSP
push {r1, r2}
push {r4-r11}
mov r1, sp /* pointer to _callee_saved_t */
#endif /* CONFIG_ARMV7_M_ARMV8_M_MAINLINE */
#endif /* CONFIG_EXTRA_EXCEPTION_INFO */
bl z_do_kernel_oops
/* return from SVC exception is done here */
#if defined(CONFIG_EXTRA_EXCEPTION_INFO)
#if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
/* We do not need to restore any register state here
* because we did not use any callee-saved registers
* in this routine. Therefore, we can just reset
* the MSP to its value prior to entering the function
*/
add sp, #40
#endif /* CONFIG_ARMV7_M_ARMV8_M_MAINLINE */
#endif /* CONFIG_EXTRA_EXCEPTION_INFO */
pop {r0, pc}
#if defined(CONFIG_USERSPACE)
/*
* System call will setup a jump to the z_arm_do_syscall() function
* when the SVC returns via the bx lr.
*
* There is some trickery involved here because we have to preserve
* the original PC value so that we can return back to the caller of
* the SVC.
*
* On SVC exeption, the stack looks like the following:
* r0 - r1 - r2 - r3 - r12 - LR - PC - PSR
*
* Registers look like:
* r0 - arg1
* r1 - arg2
* r2 - arg3
* r3 - arg4
* r4 - arg5
* r5 - arg6
* r6 - call_id
* r8 - saved link register
*/
_do_syscall:
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
movs r3, #24
ldr r1, [r0, r3] /* grab address of PC from stack frame */
mov r8, r1
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
ldr r8, [r0, #24] /* grab address of PC from stack frame */
#endif
ldr r1, =z_arm_do_syscall
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
str r1, [r0, r3] /* overwrite the PC to point to z_arm_do_syscall */
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
str r1, [r0, #24] /* overwrite the PC to point to z_arm_do_syscall */
#endif
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
ldr r3, =K_SYSCALL_LIMIT
cmp r6, r3
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
/* validate syscall limit */
ldr ip, =K_SYSCALL_LIMIT
cmp r6, ip
#endif
/* The supplied syscall_id must be lower than the limit
* (Requires unsigned integer comparison)
*/
blo valid_syscall_id
/* bad syscall id. Set arg1 to bad id and set call_id to SYSCALL_BAD */
str r6, [r0]
ldr r6, =K_SYSCALL_BAD
/* Bad syscalls treated as valid syscalls with ID K_SYSCALL_BAD. */
valid_syscall_id:
ldr r0, =_kernel
ldr r0, [r0, #_kernel_offset_to_current]
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
mov ip, r2
ldr r1, =_thread_offset_to_mode
ldr r3, [r0, r1]
movs r2, #1
bics r3, r2
/* Store (privileged) mode in thread's mode state variable */
str r3, [r0, r1]
mov r2, ip
dsb
/* set mode to privileged, r2 still contains value from CONTROL */
movs r3, #1
bics r2, r3
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
ldr r1, [r0, #_thread_offset_to_mode]
bic r1, #1
/* Store (privileged) mode in thread's mode state variable */
str r1, [r0, #_thread_offset_to_mode]
dsb
/* set mode to privileged, r2 still contains value from CONTROL */
bic r2, #1
#endif
msr CONTROL, r2
/* ISB is not strictly necessary here (stack pointer is not being
* touched), but it's recommended to avoid executing pre-fetched
* instructions with the previous privilege.
*/
isb
#if defined(CONFIG_BUILTIN_STACK_GUARD)
/* Thread is now in privileged mode; after returning from SCVall it
* will use the default (user) stack before switching to the privileged
* stack to execute the system call. We need to protect the user stack
* against stack overflows until this stack transition.
*/
ldr r1, [r0, #_thread_offset_to_stack_info_start] /* stack_info.start */
msr PSPLIM, r1
#endif /* CONFIG_BUILTIN_STACK_GUARD */
/* return from SVC to the modified LR - z_arm_do_syscall */
bx lr
#endif /* CONFIG_USERSPACE */
``` | /content/code_sandbox/arch/arm/core/cortex_m/swap_helper.S | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,638 |
```c
/*
*
*/
/**
* @file
* @brief ARM Cortex-M wrapper for ISRs with parameter
*
* Wrapper installed in vector table for handling dynamic interrupts that accept
* a parameter.
*/
#include <zephyr/kernel.h>
#include <zephyr/irq.h>
#include <zephyr/pm/pm.h>
#include <cmsis_core.h>
/**
*
* @brief Wrapper around ISRs when inserted in software ISR table
*
* When inserted in the vector table, _isr_wrapper() demuxes the ISR table
* using the running interrupt number as the index, and invokes the registered
* ISR with its corresponding argument. When returning from the ISR, it
* determines if a context switch needs to happen (see documentation for
* z_arm_pendsv()) and pends the PendSV exception if so: the latter will
* perform the context switch itself.
*
*/
void _isr_wrapper(void)
{
#ifdef CONFIG_TRACING_ISR
sys_trace_isr_enter();
#endif /* CONFIG_TRACING_ISR */
#ifdef CONFIG_PM
/*
* All interrupts are disabled when handling idle wakeup. For tickless
* idle, this ensures that the calculation and programming of the
* device for the next timer deadline is not interrupted. For
* non-tickless idle, this ensures that the clearing of the kernel idle
* state is not interrupted. In each case, pm_system_resume
* is called with interrupts disabled.
*/
/*
* Disable interrupts to prevent nesting while exiting idle state. This
* is only necessary for the Cortex-M because it is the only ARM
* architecture variant that automatically enables interrupts when
* entering an ISR.
*/
__disable_irq();
/* is this a wakeup from idle ? */
/* requested idle duration, in ticks */
if (_kernel.idle != 0) {
/* clear kernel idle state */
_kernel.idle = 0;
pm_system_resume();
}
/* re-enable interrupts */
__enable_irq();
#endif /* CONFIG_PM */
#if defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER)
int32_t irq_number = z_soc_irq_get_active();
#else
/* _sw_isr_table does not map the expections, only the interrupts. */
int32_t irq_number = __get_IPSR();
#endif
irq_number -= 16;
struct _isr_table_entry *entry = &_sw_isr_table[irq_number];
(entry->isr)(entry->arg);
#if defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER)
z_soc_irq_eoi(irq_number);
#endif
#ifdef CONFIG_TRACING_ISR
sys_trace_isr_exit();
#endif /* CONFIG_TRACING_ISR */
z_arm_exc_exit();
}
``` | /content/code_sandbox/arch/arm/core/cortex_m/isr_wrapper.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 560 |
```unknown
/*
*
*/
/**
* @file
* @brief Fault handlers for ARM Cortex-M
*
* Fault handlers for ARM Cortex-M processors.
*/
#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
_ASM_FILE_PROLOGUE
GTEXT(z_arm_fault)
GTEXT(z_arm_hard_fault)
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
/* HardFault is used for all fault conditions on ARMv6-M. */
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
GTEXT(z_arm_mpu_fault)
GTEXT(z_arm_bus_fault)
GTEXT(z_arm_usage_fault)
#if defined(CONFIG_ARM_SECURE_FIRMWARE)
GTEXT(z_arm_secure_fault)
#endif /* CONFIG_ARM_SECURE_FIRMWARE*/
WTEXT(z_arm_debug_monitor)
#else
#error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
GTEXT(z_arm_exc_spurious)
/**
*
* @brief Fault handler installed in the fault vectors
*
* Entry point for the HardFault, MemManageFault, BusFault, UsageFault,
* SecureFault and Debug Monitor exceptions.
*
* The function supplies the values of
* - the MSP
* - the PSP
* - the EXC_RETURN value
* - callee saved register state (r4-r11, psp)
* as parameters to the z_arm_fault() C function that will perform the
* rest of the fault handling:
* (i.e. z_arm_fault(MSP, PSP, EXC_RETURN, CALLEE_REGS)).
* Provides these symbols:
*
* z_arm_hard_fault
* z_arm_mpu_fault
* z_arm_bus_fault
* z_arm_usage_fault
* z_arm_secure_fault
* z_arm_debug_monitor
* z_arm_exc_spurious
*/
SECTION_SUBSEC_FUNC(TEXT,__fault,z_arm_hard_fault)
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
/* HardFault is used for all fault conditions on ARMv6-M. */
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
SECTION_SUBSEC_FUNC(TEXT,__fault,z_arm_mpu_fault)
SECTION_SUBSEC_FUNC(TEXT,__fault,z_arm_bus_fault)
SECTION_SUBSEC_FUNC(TEXT,__fault,z_arm_usage_fault)
#if defined(CONFIG_ARM_SECURE_FIRMWARE)
SECTION_SUBSEC_FUNC(TEXT,__fault,z_arm_secure_fault)
#endif /* CONFIG_ARM_SECURE_FIRMWARE */
SECTION_SUBSEC_FUNC(TEXT,__fault,z_arm_debug_monitor)
#else
#error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
SECTION_SUBSEC_FUNC(TEXT,__fault,z_arm_exc_spurious)
mrs r0, MSP
mrs r1, PSP
push {r0, lr}
#if defined(CONFIG_EXTRA_EXCEPTION_INFO)
/* Build _callee_saved_t. To match the struct
* definition we push the psp & then r11-r4
*/
push { r1, r2 }
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
mov r3, r11
mov r2, r10
push {r2, r3}
mov r3, r9
mov r2, r8
push {r2, r3}
push {r4-r7}
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
push {r4-r11}
#endif
mov r3, sp /* pointer to _callee_saved_t */
#endif /* CONFIG_EXTRA_EXCEPTION_INFO */
mov r2, lr /* EXC_RETURN */
bl z_arm_fault
#if defined(CONFIG_EXTRA_EXCEPTION_INFO)
/* We do not need to restore any register state here
* because we did not use any callee-saved registers
* in this routine. Therefore, we can just reset
* the MSP to its value prior to entering the function
*/
add sp, #40
#endif
pop {r0, pc}
.end
``` | /content/code_sandbox/arch/arm/core/cortex_m/fault_s.S | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 819 |
```c
/*
*
*/
/**
* @file
* @brief ARM Cortex-M debug monitor functions interface based on DWT
*
*/
#include <zephyr/kernel.h>
#include <zephyr/device.h>
#include <cortex_m/dwt.h>
/**
* @brief Assess whether a debug monitor event should be treated as an error
*
* This routine checks the status of a debug monitor ()exception, and
* evaluates whether this needs to be considered as a processor error.
*
* @return true if the DM exception is a processor error, otherwise false
*/
bool z_arm_debug_monitor_event_error_check(void)
{
#if defined(CONFIG_NULL_POINTER_EXCEPTION_DETECTION_DWT)
/* Assess whether this debug exception was triggered
* as a result of a null pointer (R/W) dereferencing.
*/
if (SCB->DFSR & SCB_DFSR_DWTTRAP_Msk) {
/* Debug event generated by the DWT unit */
if (DWT->FUNCTION0 & DWT_FUNCTION_MATCHED_Msk) {
/* DWT Comparator match used for */
printk("Null-pointer exception?\n");
}
__ASSERT((DWT->FUNCTION0 & DWT_FUNCTION_MATCHED_Msk) == 0,
"MATCHED flag should have been cleared on read.");
return true;
}
if (SCB->DFSR & SCB_DFSR_BKPT_Msk) {
/* Treat BKPT events as an error as well (since they
* would mean the system would be stuck in an infinite loop).
*/
return true;
}
#endif /* CONFIG_NULL_POINTER_EXCEPTION_DETECTION_DWT */
return false;
}
#if defined(CONFIG_NULL_POINTER_EXCEPTION_DETECTION_DWT)
/* The area (0x0 - <size>) monitored by DWT needs to be a power of 2,
* so we add a build assert that catches it.
*/
BUILD_ASSERT(!(CONFIG_CORTEX_M_NULL_POINTER_EXCEPTION_PAGE_SIZE &
(CONFIG_CORTEX_M_NULL_POINTER_EXCEPTION_PAGE_SIZE - 1)),
"the size of the partition must be power of 2");
static int z_arm_debug_enable_null_pointer_detection(void)
{
z_arm_dwt_init();
z_arm_dwt_enable_debug_monitor();
/* Enable null pointer detection by monitoring R/W access to the
* memory area 0x0 - <size> that is (or was intentionally left)
* unused by the system.
*/
#if defined(CONFIG_ARMV8_M_MAINLINE)
/* ASSERT that we have the comparators needed for the implementation */
if (((DWT->CTRL & DWT_CTRL_NUMCOMP_Msk) >> DWT_CTRL_NUMCOMP_Pos) < 2) {
__ASSERT(0, "on board DWT does not support the feature\n");
return -EINVAL;
}
/* Use comparators 0, 1, R/W access check */
DWT->COMP0 = 0;
DWT->COMP1 = CONFIG_CORTEX_M_NULL_POINTER_EXCEPTION_PAGE_SIZE - 1;
DWT->FUNCTION0 =
((0x4 << DWT_FUNCTION_MATCH_Pos) & DWT_FUNCTION_MATCH_Msk)
|
((0x1 << DWT_FUNCTION_ACTION_Pos) & DWT_FUNCTION_ACTION_Msk)
|
((0x0 << DWT_FUNCTION_DATAVSIZE_Pos) & DWT_FUNCTION_DATAVSIZE_Msk)
;
DWT->FUNCTION1 =
((0x7 << DWT_FUNCTION_MATCH_Pos) & DWT_FUNCTION_MATCH_Msk)
|
((0x1 << DWT_FUNCTION_ACTION_Pos) & DWT_FUNCTION_ACTION_Msk)
|
((0x0 << DWT_FUNCTION_DATAVSIZE_Pos) & DWT_FUNCTION_DATAVSIZE_Msk)
;
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
/* ASSERT that we have the comparator needed for the implementation */
if (((DWT->CTRL & DWT_CTRL_NUMCOMP_Msk) >> DWT_CTRL_NUMCOMP_Pos) < 1) {
__ASSERT(0, "on board DWT does not support the feature\n");
return -EINVAL;
}
/* Use comparator 0, R/W access check */
DWT->COMP0 = 0;
DWT->FUNCTION0 = (0x7 << DWT_FUNCTION_FUNCTION_Pos) &
DWT_FUNCTION_FUNCTION_Msk;
/* Set mask according to the desired size */
DWT->MASK0 = 32 - __builtin_clzl(
CONFIG_CORTEX_M_NULL_POINTER_EXCEPTION_PAGE_SIZE - 1);
#endif
return 0;
}
SYS_INIT(z_arm_debug_enable_null_pointer_detection, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
#endif /* CONFIG_NULL_POINTER_EXCEPTION_DETECTION_DWT */
``` | /content/code_sandbox/arch/arm/core/cortex_m/debug.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 991 |
```c
/*
* Organisation (CSIRO) ABN 41 687 119 230.
*
*/
#include <zephyr/arch/common/semihost.h>
long semihost_exec(enum semihost_instr instr, void *args)
{
register unsigned int r0 __asm__ ("r0") = instr;
register void *r1 __asm__ ("r1") = args;
register int ret __asm__ ("r0");
__asm__ __volatile__ ("bkpt 0xab"
: "=r" (ret) : "r" (r0), "r" (r1) : "memory");
return ret;
}
``` | /content/code_sandbox/arch/arm/core/cortex_m/semihost.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 133 |
```unknown
/*
*
*/
/**
* @file
* @brief Reset handler
*
* Reset handler that prepares the system for running C code.
*/
#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
#include <zephyr/arch/cpu.h>
#include "vector_table.h"
_ASM_FILE_PROLOGUE
GTEXT(z_arm_reset)
GTEXT(z_early_memset)
GDATA(z_interrupt_stacks)
#if defined(CONFIG_DEBUG_THREAD_INFO)
GDATA(z_sys_post_kernel)
#endif
#if defined(CONFIG_PLATFORM_SPECIFIC_INIT)
GTEXT(z_arm_platform_init)
#endif
#if defined(CONFIG_INIT_ARCH_HW_AT_BOOT)
GTEXT(z_arm_init_arch_hw_at_boot)
GDATA(z_main_stack)
#endif
#if defined(CONFIG_PM_S2RAM)
GTEXT(arch_pm_s2ram_resume)
#endif
/**
*
* @brief Reset vector
*
* Ran when the system comes out of reset, or when the firmware image is chain-
* loaded by another application (for instance, a bootloader). At minimum, the
* processor must be in thread mode with privileged level. At this point, the
* main stack pointer (MSP) should be already pointing to a valid area in SRAM.
*
* Locking interrupts prevents anything but NMIs and hard faults from
* interrupting the CPU. A default NMI handler is already in place in the
* vector table, and the boot code should not generate hard fault, or we're in
* deep trouble.
*
* We want to use the process stack pointer (PSP) instead of the MSP, since the
* MSP is to be set up to point to the one-and-only interrupt stack during
* later boot. That would not be possible if in use for running C code.
*
* When these steps are completed, jump to z_prep_c(), which will finish
* setting up the system for running C code.
*
*/
SECTION_SUBSEC_FUNC(TEXT,_reset_section,z_arm_reset)
/*
* The entry point is located at the z_arm_reset symbol, which
* is fetched by a XIP image playing the role of a bootloader, which jumps to
* it, not through the reset vector mechanism. Such bootloaders might want to
* search for a __start symbol instead, so create that alias here.
*/
SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start)
#if defined(CONFIG_DEBUG_THREAD_INFO)
/* Clear z_sys_post_kernel flag for RTOS aware debuggers */
movs.n r0, #0
ldr r1, =z_sys_post_kernel
strb r0, [r1]
#endif /* CONFIG_DEBUG_THREAD_INFO */
#if defined(CONFIG_INIT_ARCH_HW_AT_BOOT)
/* Reset CONTROL register */
movs.n r0, #0
msr CONTROL, r0
isb
#if defined(CONFIG_CPU_CORTEX_M_HAS_SPLIM)
/* Clear SPLIM registers */
movs.n r0, #0
msr MSPLIM, r0
msr PSPLIM, r0
#endif /* CONFIG_CPU_CORTEX_M_HAS_SPLIM */
#endif /* CONFIG_INIT_ARCH_HW_AT_BOOT */
#if defined(CONFIG_PM_S2RAM)
bl arch_pm_s2ram_resume
#endif /* CONFIG_PM_S2RAM */
#if defined(CONFIG_PLATFORM_SPECIFIC_INIT)
bl z_arm_platform_init
#endif
#if defined(CONFIG_INIT_ARCH_HW_AT_BOOT)
#if defined(CONFIG_CPU_HAS_ARM_MPU)
/* Disable MPU */
movs.n r0, #0
ldr r1, =_SCS_MPU_CTRL
str r0, [r1]
dsb
#endif /* CONFIG_CPU_HAS_ARM_MPU */
ldr r0, =z_main_stack + CONFIG_MAIN_STACK_SIZE
msr msp, r0
/* Initialize core architecture registers and system blocks */
bl z_arm_init_arch_hw_at_boot
#endif /* CONFIG_INIT_ARCH_HW_AT_BOOT */
/* lock interrupts: will get unlocked when switch to main task */
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
cpsid i
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
movs.n r0, #_EXC_IRQ_DEFAULT_PRIO
msr BASEPRI, r0
#else
#error Unknown ARM architecture
#endif
#ifdef CONFIG_WDOG_INIT
/* board-specific watchdog initialization is necessary */
bl z_arm_watchdog_init
#endif
/*
*
* Note: in all Cortex-M variants the interrupt stack area is at
* least equal to CONFIG_ISR_STACK_SIZE + MPU_GUARD_ALIGN_AND_SIZE
* (may be larger due to rounding up for stack pointer aligning
* purposes but this is sufficient during initialization).
*/
#ifdef CONFIG_INIT_STACKS
ldr r0, =z_interrupt_stacks
ldr r1, =0xaa
ldr r2, =CONFIG_ISR_STACK_SIZE + MPU_GUARD_ALIGN_AND_SIZE
bl z_early_memset
#endif
/*
* Set PSP and use it to boot without using MSP, so that it
* gets set to z_interrupt_stacks during initialization.
*/
ldr r0, =z_interrupt_stacks
ldr r1, =CONFIG_ISR_STACK_SIZE + MPU_GUARD_ALIGN_AND_SIZE
adds r0, r0, r1
msr PSP, r0
mrs r0, CONTROL
movs r1, #2
orrs r0, r1 /* CONTROL_SPSEL_Msk */
msr CONTROL, r0
/*
* When changing the stack pointer, software must use an ISB instruction
* immediately after the MSR instruction. This ensures that instructions
* after the ISB instruction execute using the new stack pointer.
*/
isb
/*
* 'bl' jumps the furthest of the branch instructions that are
* supported on all platforms. So it is used when jumping to z_prep_c
* (even though we do not intend to return).
*/
bl z_prep_c
``` | /content/code_sandbox/arch/arm/core/cortex_m/reset.S | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,270 |
```unknown
# ARM Cortex-M platform configuration options
# NOTE: We have the specific core implementations first and outside of the
# if CPU_CORTEX_M block so that SoCs can select which core they are using
# without having to select all the options related to that core. Everything
# else is captured inside the if CPU_CORTEX_M block so they are not exposed
# if one select a different ARM Cortex Family (Cortex-A or Cortex-R)
config CPU_CORTEX_M0
bool
select CPU_CORTEX_M
select ARMV6_M_ARMV8_M_BASELINE
help
This option signifies the use of a Cortex-M0 CPU
config CPU_CORTEX_M0PLUS
bool
select CPU_CORTEX_M
select ARMV6_M_ARMV8_M_BASELINE
help
This option signifies the use of a Cortex-M0+ CPU
config CPU_CORTEX_M1
bool
select CPU_CORTEX_M
select ARMV6_M_ARMV8_M_BASELINE
help
This option signifies the use of a Cortex-M1 CPU
config CPU_CORTEX_M3
bool
select CPU_CORTEX_M
select ARMV7_M_ARMV8_M_MAINLINE
help
This option signifies the use of a Cortex-M3 CPU
config CPU_CORTEX_M4
bool
select CPU_CORTEX_M
select ARMV7_M_ARMV8_M_MAINLINE
select ARMV7_M_ARMV8_M_FP if CPU_HAS_FPU
help
This option signifies the use of a Cortex-M4 CPU
config CPU_CORTEX_M23
bool
select CPU_CORTEX_M
select ARMV8_M_BASELINE
select ARMV8_M_SE if CPU_HAS_TEE
help
This option signifies the use of a Cortex-M23 CPU
config CPU_CORTEX_M33
bool
select CPU_CORTEX_M
select ARMV8_M_MAINLINE
select ARMV8_M_SE if CPU_HAS_TEE
select ARMV7_M_ARMV8_M_FP if CPU_HAS_FPU
help
This option signifies the use of a Cortex-M33 CPU
config CPU_CORTEX_M55
bool
select CPU_CORTEX_M
select ARMV8_1_M_MAINLINE
select ARMV8_M_SE if CPU_HAS_TEE
select ARMV7_M_ARMV8_M_FP if CPU_HAS_FPU
select CPU_HAS_DCACHE
select CPU_HAS_ICACHE
help
This option signifies the use of a Cortex-M55 CPU
config CPU_CORTEX_M85
bool
select CPU_CORTEX_M
select ARMV8_1_M_MAINLINE
select ARMV8_M_SE if CPU_HAS_TEE
select ARMV7_M_ARMV8_M_FP if CPU_HAS_FPU
select CPU_HAS_DCACHE
select CPU_HAS_ICACHE
help
This option signifies the use of a Cortex-M85 CPU
config CPU_CORTEX_M7
bool
select CPU_CORTEX_M
select ARMV7_M_ARMV8_M_MAINLINE
select ARMV7_M_ARMV8_M_FP if CPU_HAS_FPU
help
This option signifies the use of a Cortex-M7 CPU
if CPU_CORTEX_M
config CPU_CORTEX_M_HAS_SYSTICK
bool
help
This option is enabled when the CPU implements the SysTick timer.
config DCACHE_LINE_SIZE
default 32
config ICACHE_LINE_SIZE
default 32
config CPU_CORTEX_M_HAS_DWT
bool
depends on !CPU_CORTEX_M0 && !CPU_CORTEX_M0PLUS && !CPU_CORTEX_M1
help
This option signifies that the CPU implements the Data Watchpoint and
Trace (DWT) unit specified by the ARMv7-M and above.
While ARMv6-M does define a "DWT" unit, this is significantly different
from the DWT specified by the ARMv7-M and above in terms of both feature
set and register mappings.
config CPU_CORTEX_M_HAS_BASEPRI
bool
depends on ARMV7_M_ARMV8_M_MAINLINE
help
This option signifies the CPU has the BASEPRI register.
The BASEPRI register defines the minimum priority for
exception processing. When BASEPRI is set to a nonzero
value, it prevents the activation of all exceptions with
the same or lower priority level as the BASEPRI value.
Always present in CPUs that implement the ARMv7-M or
ARM8-M Mainline architectures.
config CPU_CORTEX_M_HAS_VTOR
bool
depends on !CPU_CORTEX_M0 && !CPU_CORTEX_M1
help
This option signifies the CPU has the VTOR register.
The VTOR indicates the offset of the vector table base
address from memory address 0x00000000. Always present
in CPUs implementing the ARMv7-M or ARMv8-M architectures.
Optional in CPUs implementing ARMv6-M, ARMv8-M Baseline
architectures (except for Cortex-M0/M1, where it is never
implemented).
config CPU_CORTEX_M_HAS_SPLIM
bool
depends on ARMV8_M_MAINLINE || (ARMV8_M_SE && !ARM_NONSECURE_FIRMWARE)
help
This option signifies the CPU has the MSPLIM, PSPLIM registers.
The stack pointer limit registers, MSPLIM, PSPLIM, limit the
extend to which the Main and Process Stack Pointers, respectively,
can descend. MSPLIM, PSPLIM are always present in ARMv8-M
MCUs that implement the ARMv8-M Main Extension (Mainline).
In an ARMv8-M Mainline implementation with the Security Extension
the MSPLIM, PSPLIM registers have additional Secure instances.
In an ARMv8-M Baseline implementation with the Security Extension
the MSPLIM, PSPLIM registers have only Secure instances.
config CPU_CORTEX_M_HAS_PROGRAMMABLE_FAULT_PRIOS
bool
depends on ARMV7_M_ARMV8_M_MAINLINE
help
This option signifies the CPU may trigger system faults
(other than HardFault) with configurable priority, and,
therefore, it needs to reserve a priority level for them.
config CPU_CORTEX_M0_HAS_VECTOR_TABLE_REMAP
bool
depends on ARMV6_M_ARMV8_M_BASELINE
help
This option signifies the Cortex-M0 has some mechanisms that can map
the vector table to SRAM
config CPU_CORTEX_M_HAS_CMSE
bool
depends on ARMV8_M_BASELINE || ARMV8_M_MAINLINE
help
This option signifies the Cortex-M CPU has the CMSE intrinsics.
config ARMV6_M_ARMV8_M_BASELINE
bool
select ATOMIC_OPERATIONS_C if !ARMV8_M_BASELINE
select ISA_THUMB2
help
This option signifies the use of an ARMv6-M processor
implementation, or the use of an ARMv8-M processor
supporting the Baseline implementation.
Notes:
- A Processing Element (PE) without the Main Extension
is also referred to as a Baseline Implementation. A
Baseline implementation has a subset of the instructions,
registers, and features, of a Mainline implementation.
- ARMv6-M compatibility is provided by all ARMv8-M
implementations.
config ARMV8_M_BASELINE
bool
select ARMV6_M_ARMV8_M_BASELINE
select CPU_CORTEX_M_HAS_CMSE
help
This option signifies the use of an ARMv8-M processor
implementation.
ARMv8-M Baseline includes additional features
not present in the ARMv6-M architecture.
config ARMV7_M_ARMV8_M_MAINLINE
bool
select ATOMIC_OPERATIONS_BUILTIN
select ISA_THUMB2
select CPU_CORTEX_M_HAS_BASEPRI
select CPU_CORTEX_M_HAS_VTOR
select CPU_CORTEX_M_HAS_PROGRAMMABLE_FAULT_PRIOS
select CPU_CORTEX_M_HAS_SYSTICK
help
This option signifies the use of an ARMv7-M processor
implementation, or the use of a backwards-compatible
ARMv8-M processor implementation supporting the Main
Extension.
Notes:
- A Processing Element (PE) with the Main Extension is also
referred to as a Mainline Implementation.
- ARMv7-M compatibility requires the Main Extension.
From path_to_url
The Main Extension provides backwards compatibility
with ARMv7-M.
config ARMV8_M_MAINLINE
bool
select ARMV7_M_ARMV8_M_MAINLINE
select CPU_CORTEX_M_HAS_SPLIM
select CPU_CORTEX_M_HAS_CMSE
help
This option signifies the use of an ARMv8-M processor
implementation, supporting the Main Extension.
ARMv8-M Main Extension includes additional features
not present in the ARMv7-M architecture.
config ARMV8_1_M_MAINLINE
bool
select ARMV8_M_MAINLINE
help
This option signifies the use of an ARMv8.1-M processor
implementation, supporting the Main Extension.
ARMv8.1-M Main Extension includes additional features
not present in the ARMv8-M architecture.
config ARMV8_M_SE
bool
depends on ARMV8_M_BASELINE || ARMV8_M_MAINLINE
select CPU_CORTEX_M_HAS_SPLIM if !ARM_NONSECURE_FIRMWARE
help
This option signifies the use of an ARMv8-M processor
implementation (Baseline or Mainline) supporting the
Security Extensions.
config ARMV7_M_ARMV8_M_FP
bool
depends on ARMV7_M_ARMV8_M_MAINLINE && !CPU_CORTEX_M3
imply FPU_SHARING
help
This option signifies the use of an ARMv7-M processor
implementation, or the use of an ARMv8-M processor
implementation supporting the Floating-Point Extension.
config ARMV8_M_DSP
bool
depends on ARMV8_M_MAINLINE
help
This option signifies the use of an ARMv8-M processor
implementation supporting the DSP Extension.
config ARMV8_1_M_MVEI
bool
depends on ARMV8_1_M_MAINLINE
depends on ARMV8_M_DSP
help
This option signifies the use of an ARMv8.1-M processor implementation
supporting the M-Profile Vector Extension (MVE) integer instruction set.
config ARMV8_1_M_MVEF
bool
depends on ARMV8_1_M_MVEI
help
This option signifies the use of an ARMv8.1-M processor implementation
supporting the M-Profile Vector Extension (MVE) floating-point
instruction set.
config ARMV8_1_M_PMU
bool
help
This option is enabled when the CPU implements ARMv8-M Performance
Monitoring Unit (PMU).
config ARMV8_M_PMU_EVENTCNT
int "Number of event counters in the Performance Monitoring Unit"
depends on ARMV8_1_M_PMU
range 2 8
help
The number of event counters implemented.
menu "ARM Cortex-M0/M0+/M1/M3/M4/M7/M23/M33/M55 options"
depends on ARMV6_M_ARMV8_M_BASELINE || ARMV7_M_ARMV8_M_MAINLINE
config GEN_ISR_TABLES
default y
config ZERO_LATENCY_IRQS
bool "Zero-latency interrupts"
depends on CPU_CORTEX_M_HAS_BASEPRI
help
The kernel may reserve some of the highest interrupts priorities in
the system for its own use. These interrupts will not be masked
by interrupt locking.
When connecting interrupts the kernel will offset all interrupts
to lower priority than those reserved by the kernel.
Zero-latency interrupt can be used to set up an interrupt at the
highest interrupt priority which will not be blocked by interrupt
locking.
Since Zero-latency ISRs will run in the same priority or possibly at
higher priority than the rest of the kernel they cannot use any
kernel functionality.
config ZERO_LATENCY_LEVELS
int "Number of interrupt priority levels reserved for zero latency"
depends on ZERO_LATENCY_IRQS
range 1 $(UINT8_MAX)
help
The amount of interrupt priority levels reserved for zero latency
interrupts. Increase this value to reserve more than one priority
level for zero latency interrupts.
config DYNAMIC_DIRECT_INTERRUPTS
bool "Support for dynamic direct interrupts"
depends on DYNAMIC_INTERRUPTS
help
Direct interrupts are designed for performance-critical interrupt
handling and do not go through all of the common interrupt handling
code. This option enables the installation of interrupt service
routines for direct interrupts at runtime.
Note: this requires enabling support for dynamic interrupts in the
kernel.
config SW_VECTOR_RELAY
bool "Software Vector Relay"
help
When building a bootloader firmware this option adds a
vector table relay handler and a vector relay table, to
relay interrupts based on a vector table pointer.
This is only required but not limited to Cortex-M Baseline CPUs
with no hardware vector table relocation mechanisms (e.g. VTOR).
config SW_VECTOR_RELAY_CLIENT
bool "Software Vector Relay (client)"
default y if BOOTLOADER_MCUBOOT && !CPU_CORTEX_M0_HAS_VECTOR_TABLE_REMAP
depends on !CPU_CORTEX_M_HAS_VTOR
help
Another image has enabled SW_VECTOR_RELAY, and will be forwarding
exceptions and HW interrupts to this image. Enable this option to make
sure the vector table pointer in RAM is set properly by the image upon
initialization.
config CORTEX_M_DWT
bool "Data Watchpoint and Trace (DWT)"
depends on CPU_CORTEX_M_HAS_DWT
default y if TIMING_FUNCTIONS
help
Enable and use the Data Watchpoint and Trace (DWT) unit for
timing functions.
config CORTEX_M_DEBUG_MONITOR_HOOK
bool "Debug monitor interrupt for debugging"
depends on !ARMV6_M_ARMV8_M_BASELINE
help
Enable this option to configure debug monitor exception to low priority
for debugging purposes.
# enabled, which may increase ESF stacking requirements for
# threads.
config TEST_EXTRA_STACK_SIZE
default 512 if TEST_ARM_CORTEX_M && FPU_SHARING
config TRAP_UNALIGNED_ACCESS
bool "Unaligned access trap"
depends on !ARMV6_M_ARMV8_M_BASELINE
help
If enabled, the CPU generates a UsageFault exception when executing a
halfword or word access.
endmenu
# Implement the null pointer detection using either the Data Watchpoint and
# Trace Unit and the Debug Monitor Exception, or the Memory Protection Unit.
choice NULL_POINTER_EXCEPTION_DETECTION
bool "Null-pointer exception"
# Disable this until path_to_url is fixed
# default NULL_POINTER_EXCEPTION_DETECTION_DWT if TEST_ARM_CORTEX_M && !ARM_NONSECURE_FIRMWARE && CPU_CORTEX_M_HAS_DWT
default NULL_POINTER_EXCEPTION_DETECTION_MPU if TEST_ARM_CORTEX_M && !ARM_NONSECURE_FIRMWARE && ARM_MPU && !CPU_CORTEX_M_HAS_DWT
default NULL_POINTER_EXCEPTION_DETECTION_NONE
help
There are 2 implementations available, one based
on DWT and the other based on MPU. Use this choice
symbol to select one of the options. By default the
feature is disabled. In the test suite the feature
is enabled and the DWT-based solution is preferred.
config NULL_POINTER_EXCEPTION_DETECTION_NONE
bool "No null pointer exception detection"
help
Null pointer exception detection feature is not
enabled.
config NULL_POINTER_EXCEPTION_DETECTION_DWT
bool "Use DWT for null pointer exception detection"
depends on CPU_CORTEX_M_HAS_DWT
depends on !TRUSTED_EXECUTION_NONSECURE
select CORTEX_M_DWT
select CORTEX_M_NULL_POINTER_EXCEPTION
help
Null pointer dereference detection implemented
using the DWT unit functionality.
Notes:
- Not enabled for Non-Secure FW images, where
null-pointer dereferencing is likely caught as
a SecureFault.
- Requires DWT functionality in the Cortex-M SoC
implementation (1 comparator for ARMv7-M, 2 comparators
for ARMv8-M).
- Requires the Cortex-M core be in normal mode.
config NULL_POINTER_EXCEPTION_DETECTION_MPU
bool "Use MPU for null pointer exception detection"
depends on !TRUSTED_EXECUTION_NONSECURE
depends on ARM_MPU
select CORTEX_M_NULL_POINTER_EXCEPTION
help
Null pointer dereference detection implemented
using MPU functionality.
Notes:
- Mutually exclusive to the DWT-based solution
- Not enabled for Non-Secure FW images, where
null-pointer dereferencing is likely caught as
a SecureFault.
- Requires MPU functionality to be present and
enabled. The implementation consumes 1 MPU region.
- In ARMv8-M, explicit null-pointer dereference
detection with MPU requires, additionally, that
the area: [0x0,
CORTEX_M_NULL_POINTER_EXCEPTION_PAGE_SIZE)
is not unmapped (covered by an MPU region already).
If it is unmapped null-pointer dereferencing may
still be indirectly detected (e.g. via a precise
Bus access fault), but this is not guaranteed. A
build-time message warns the user of this scenario.
endchoice
config CORTEX_M_NULL_POINTER_EXCEPTION
bool
help
Enable and use the null pointer exception option.
This is a debug feature in Cortex-M, allowing for
detecting null pointer dereferencing (raising a
CPU fault). Supporting the feature results in an
increased code footprint, determined by option
CORTEX_M_NULL_POINTER_EXCEPTION_PAGE_SIZE.
Note: this hidden option is selected by the choice
symbols corresponding to the DWT-based or to the
MPU-based solution.
if CORTEX_M_NULL_POINTER_EXCEPTION
config CORTEX_M_NULL_POINTER_EXCEPTION_PAGE_SIZE
hex "Size of paged unmapped to implement null pointer detection"
default 0x400
help
Size of the page reserved for detecting null pointer
dereferencing. Must be a power of two. A large value
offers enhanced detection performance to the cost of
wasting a large flash area that code may not use.
endif # CORTEX_M_NULL_POINTER_EXCEPTION
rsource "tz/Kconfig"
endif # CPU_CORTEX_M
``` | /content/code_sandbox/arch/arm/core/cortex_m/Kconfig | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,945 |
```unknown
/*
*
*/
#include <zephyr/toolchain.h>
_ASM_FILE_PROLOGUE
GTEXT(__aeabi_read_tp)
GDATA(z_arm_tls_ptr)
/* Grab the TLS pointer and store in R0.
* According to the Run-Time ABI for the Arm Architecture section 5.3.5, this
* function may only clobber r0, ip, lr & CPSR.
*
* This can only be guaranteed by either implementing a naked C function with
* inline assembly, or plain assembly.
*/
SECTION_FUNC(TEXT, __aeabi_read_tp)
ldr r0, =z_arm_tls_ptr
ldr r0, [r0]
bx lr
``` | /content/code_sandbox/arch/arm/core/cortex_m/__aeabi_read_tp.S | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 141 |
```c
/*
*
*/
/**
* @file
* @brief ARM Cortex-M interrupt management
*
*
* Interrupt management: enabling/disabling and dynamic ISR
* connecting/replacing. SW_ISR_TABLE_DYNAMIC has to be enabled for
* connecting ISRs at runtime.
*/
#include <zephyr/kernel.h>
#include <zephyr/arch/cpu.h>
#include <cmsis_core.h>
#include <zephyr/sys/__assert.h>
#include <zephyr/sys/barrier.h>
#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
#include <zephyr/sw_isr_table.h>
#include <zephyr/irq.h>
#include <zephyr/tracing/tracing.h>
#include <zephyr/pm/pm.h>
extern void z_arm_reserved(void);
#define NUM_IRQS_PER_REG 32
#define REG_FROM_IRQ(irq) (irq / NUM_IRQS_PER_REG)
#define BIT_FROM_IRQ(irq) (irq % NUM_IRQS_PER_REG)
#if !defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER)
void arch_irq_enable(unsigned int irq)
{
NVIC_EnableIRQ((IRQn_Type)irq);
}
void arch_irq_disable(unsigned int irq)
{
NVIC_DisableIRQ((IRQn_Type)irq);
}
int arch_irq_is_enabled(unsigned int irq)
{
return NVIC->ISER[REG_FROM_IRQ(irq)] & BIT(BIT_FROM_IRQ(irq));
}
/**
* @internal
*
* @brief Set an interrupt's priority
*
* The priority is verified if ASSERT_ON is enabled. The maximum number
* of priority levels is a little complex, as there are some hardware
* priority levels which are reserved.
*/
void z_arm_irq_priority_set(unsigned int irq, unsigned int prio, uint32_t flags)
{
/* The kernel may reserve some of the highest priority levels.
* So we offset the requested priority level with the number
* of priority levels reserved by the kernel.
*/
/* If we have zero latency interrupts, those interrupts will
* run at a priority level which is not masked by irq_lock().
* Our policy is to express priority levels with special properties
* via flags
*/
if (IS_ENABLED(CONFIG_ZERO_LATENCY_IRQS) && (flags & IRQ_ZERO_LATENCY)) {
if (ZERO_LATENCY_LEVELS == 1) {
prio = _EXC_ZERO_LATENCY_IRQS_PRIO;
} else {
/* Use caller supplied prio level as-is */
}
} else {
prio += _IRQ_PRIO_OFFSET;
}
/* The last priority level is also used by PendSV exception, but
* allow other interrupts to use the same level, even if it ends up
* affecting performance (can still be useful on systems with a
* reduced set of priorities, like Cortex-M0/M0+).
*/
__ASSERT(prio <= (BIT(NUM_IRQ_PRIO_BITS) - 1),
"invalid priority %d for %d irq! values must be less than %lu\n",
prio - _IRQ_PRIO_OFFSET, irq,
BIT(NUM_IRQ_PRIO_BITS) - (_IRQ_PRIO_OFFSET));
NVIC_SetPriority((IRQn_Type)irq, prio);
}
#endif /* !defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER) */
void z_arm_fatal_error(unsigned int reason, const struct arch_esf *esf);
/**
*
* @brief Spurious interrupt handler
*
* Installed in all _sw_isr_table slots at boot time. Throws an error if
* called.
*
*/
void z_irq_spurious(const void *unused)
{
ARG_UNUSED(unused);
z_arm_fatal_error(K_ERR_SPURIOUS_IRQ, NULL);
}
#ifdef CONFIG_PM
void _arch_isr_direct_pm(void)
{
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
unsigned int key;
/* irq_lock() does what we want for this CPU */
key = irq_lock();
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
/* Lock all interrupts. irq_lock() will on this CPU only disable those
* lower than BASEPRI, which is not what we want. See comments in
* arch/arm/core/cortex_m/isr_wrapper.c
*/
__asm__ volatile("cpsid i" : : : "memory");
#else
#error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
if (_kernel.idle) {
_kernel.idle = 0;
pm_system_resume();
}
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
irq_unlock(key);
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
__asm__ volatile("cpsie i" : : : "memory");
#else
#error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
}
#endif
#if defined(CONFIG_ARM_SECURE_FIRMWARE)
/**
*
* @brief Set the target security state for the given IRQ
*
* Function sets the security state (Secure or Non-Secure) targeted
* by the given irq. It requires ARMv8-M MCU.
* It is only compiled if ARM_SECURE_FIRMWARE is defined.
* It should only be called while in Secure state, otherwise, a write attempt
* to NVIC.ITNS register is write-ignored(WI), as the ITNS register is not
* banked between security states and, therefore, has no Non-Secure instance.
*
* It shall return the resulting target state of the given IRQ, indicating
* whether the operation has been performed successfully.
*
* @param irq IRQ line
* @param irq_target_state the desired IRQ target state
*
* @return The resulting target state of the given IRQ
*/
irq_target_state_t irq_target_state_set(unsigned int irq,
irq_target_state_t irq_target_state)
{
uint32_t result;
if (irq_target_state == IRQ_TARGET_STATE_SECURE) {
/* Set target to Secure */
result = NVIC_ClearTargetState(irq);
} else {
/* Set target to Non-Secure */
result = NVIC_SetTargetState(irq);
}
if (result) {
return IRQ_TARGET_STATE_NON_SECURE;
} else {
return IRQ_TARGET_STATE_SECURE;
}
}
/**
*
* @brief Determine whether the given IRQ targets the Secure state
*
* Function determines whether the given irq targets the Secure state
* or not (i.e. targets the Non-Secure state). It requires ARMv8-M MCU.
* It is only compiled if ARM_SECURE_FIRMWARE is defined.
* It should only be called while in Secure state, otherwise, a read attempt
* to NVIC.ITNS register is read-as-zero(RAZ), as the ITNS register is not
* banked between security states and, therefore, has no Non-Secure instance.
*
* @param irq IRQ line
*
* @return 1 if target state is Secure, 0 otherwise.
*/
int irq_target_state_is_secure(unsigned int irq)
{
return NVIC_GetTargetState(irq) == 0;
}
/**
*
* @brief Disable and set all interrupt lines to target Non-Secure state.
*
* The function is used to set all HW NVIC interrupt lines to target the
* Non-Secure state. The function shall only be called fron Secure state.
*
* Notes:
* - All NVIC interrupts are disabled before being routed to Non-Secure.
* - Bits corresponding to un-implemented interrupts are RES0, so writes
* will be ignored.
*
*/
void irq_target_state_set_all_non_secure(void)
{
int i;
/* Disable (Clear) all NVIC interrupt lines. */
for (i = 0; i < sizeof(NVIC->ICER) / sizeof(NVIC->ICER[0]); i++) {
NVIC->ICER[i] = 0xFFFFFFFF;
}
barrier_dsync_fence_full();
barrier_isync_fence_full();
/* Set all NVIC interrupt lines to target Non-Secure */
for (i = 0; i < sizeof(NVIC->ITNS) / sizeof(NVIC->ITNS[0]); i++) {
NVIC->ITNS[i] = 0xFFFFFFFF;
}
}
#endif /* CONFIG_ARM_SECURE_FIRMWARE */
#ifdef CONFIG_DYNAMIC_INTERRUPTS
#ifdef CONFIG_GEN_ISR_TABLES
int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
void (*routine)(const void *parameter),
const void *parameter, uint32_t flags)
{
z_isr_install(irq, routine, parameter);
z_arm_irq_priority_set(irq, priority, flags);
return irq;
}
#endif /* CONFIG_GEN_ISR_TABLES */
#ifdef CONFIG_DYNAMIC_DIRECT_INTERRUPTS
static inline void z_arm_irq_dynamic_direct_isr_dispatch(void)
{
uint32_t irq = __get_IPSR() - 16;
if (irq < IRQ_TABLE_SIZE) {
struct _isr_table_entry *isr_entry = &_sw_isr_table[irq];
isr_entry->isr(isr_entry->arg);
}
}
ISR_DIRECT_DECLARE(z_arm_irq_direct_dynamic_dispatch_reschedule)
{
z_arm_irq_dynamic_direct_isr_dispatch();
return 1;
}
ISR_DIRECT_DECLARE(z_arm_irq_direct_dynamic_dispatch_no_reschedule)
{
z_arm_irq_dynamic_direct_isr_dispatch();
return 0;
}
#endif /* CONFIG_DYNAMIC_DIRECT_INTERRUPTS */
#endif /* CONFIG_DYNAMIC_INTERRUPTS */
``` | /content/code_sandbox/arch/arm/core/cortex_m/irq_manage.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,945 |
```c
/*
*
*/
#include <zephyr/kernel.h>
#include <cortex_m/cmse.h>
int arm_cmse_mpu_region_get(uint32_t addr)
{
cmse_address_info_t addr_info = cmse_TT((void *)addr);
if (addr_info.flags.mpu_region_valid) {
return addr_info.flags.mpu_region;
}
return -EINVAL;
}
static int arm_cmse_addr_read_write_ok(uint32_t addr, int force_npriv, int rw)
{
cmse_address_info_t addr_info;
if (force_npriv) {
addr_info = cmse_TTT((void *)addr);
} else {
addr_info = cmse_TT((void *)addr);
}
return rw ? addr_info.flags.readwrite_ok : addr_info.flags.read_ok;
}
int arm_cmse_addr_read_ok(uint32_t addr, int force_npriv)
{
return arm_cmse_addr_read_write_ok(addr, force_npriv, 0);
}
int arm_cmse_addr_readwrite_ok(uint32_t addr, int force_npriv)
{
return arm_cmse_addr_read_write_ok(addr, force_npriv, 1);
}
static int arm_cmse_addr_range_read_write_ok(uint32_t addr, uint32_t size,
int force_npriv, int rw)
{
int flags = 0;
if (force_npriv != 0) {
flags |= CMSE_MPU_UNPRIV;
}
if (rw) {
flags |= CMSE_MPU_READWRITE;
} else {
flags |= CMSE_MPU_READ;
}
if (cmse_check_address_range((void *)addr, size, flags) != NULL) {
return 1;
} else {
return 0;
}
}
int arm_cmse_addr_range_read_ok(uint32_t addr, uint32_t size, int force_npriv)
{
return arm_cmse_addr_range_read_write_ok(addr, size, force_npriv, 0);
}
int arm_cmse_addr_range_readwrite_ok(uint32_t addr, uint32_t size, int force_npriv)
{
return arm_cmse_addr_range_read_write_ok(addr, size, force_npriv, 1);
}
#if defined(CONFIG_ARM_SECURE_FIRMWARE)
int arm_cmse_mpu_nonsecure_region_get(uint32_t addr)
{
cmse_address_info_t addr_info = cmse_TTA((void *)addr);
if (addr_info.flags.mpu_region_valid) {
return addr_info.flags.mpu_region;
}
return -EINVAL;
}
int arm_cmse_sau_region_get(uint32_t addr)
{
cmse_address_info_t addr_info = cmse_TT((void *)addr);
if (addr_info.flags.sau_region_valid) {
return addr_info.flags.sau_region;
}
return -EINVAL;
}
int arm_cmse_idau_region_get(uint32_t addr)
{
cmse_address_info_t addr_info = cmse_TT((void *)addr);
if (addr_info.flags.idau_region_valid) {
return addr_info.flags.idau_region;
}
return -EINVAL;
}
int arm_cmse_addr_is_secure(uint32_t addr)
{
cmse_address_info_t addr_info = cmse_TT((void *)addr);
return addr_info.flags.secure;
}
static int arm_cmse_addr_nonsecure_read_write_ok(uint32_t addr,
int force_npriv, int rw)
{
cmse_address_info_t addr_info;
if (force_npriv) {
addr_info = cmse_TTAT((void *)addr);
} else {
addr_info = cmse_TTA((void *)addr);
}
return rw ? addr_info.flags.nonsecure_readwrite_ok :
addr_info.flags.nonsecure_read_ok;
}
int arm_cmse_addr_nonsecure_read_ok(uint32_t addr, int force_npriv)
{
return arm_cmse_addr_nonsecure_read_write_ok(addr, force_npriv, 0);
}
int arm_cmse_addr_nonsecure_readwrite_ok(uint32_t addr, int force_npriv)
{
return arm_cmse_addr_nonsecure_read_write_ok(addr, force_npriv, 1);
}
static int arm_cmse_addr_range_nonsecure_read_write_ok(uint32_t addr, uint32_t size,
int force_npriv, int rw)
{
int flags = CMSE_NONSECURE;
if (force_npriv != 0) {
flags |= CMSE_MPU_UNPRIV;
}
if (rw) {
flags |= CMSE_MPU_READWRITE;
} else {
flags |= CMSE_MPU_READ;
}
if (cmse_check_address_range((void *)addr, size, flags) != NULL) {
return 1;
} else {
return 0;
}
}
int arm_cmse_addr_range_nonsecure_read_ok(uint32_t addr, uint32_t size,
int force_npriv)
{
return arm_cmse_addr_range_nonsecure_read_write_ok(addr, size,
force_npriv, 0);
}
int arm_cmse_addr_range_nonsecure_readwrite_ok(uint32_t addr, uint32_t size,
int force_npriv)
{
return arm_cmse_addr_range_nonsecure_read_write_ok(addr, size,
force_npriv, 1);
}
#endif /* CONFIG_ARM_SECURE_FIRMWARE */
``` | /content/code_sandbox/arch/arm/core/cortex_m/cmse/arm_core_cmse.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,095 |
```linker script
/*
*
*/
/* nRF-specific defines. */
#if defined(CONFIG_CPU_HAS_NRF_IDAU) && CONFIG_ARM_NSC_REGION_BASE_ADDRESS == 0
/* This SOC needs the NSC region to be at the end of an SPU region. */
#define __NSC_ALIGN (ALIGN(CONFIG_NRF_SPU_FLASH_REGION_SIZE) \
- MAX(32, (1 << LOG2CEIL(__sg_size))))
#define NSC_ALIGN \
. = (__NSC_ALIGN + ((ABSOLUTE(.) > __NSC_ALIGN) \
? CONFIG_NRF_SPU_FLASH_REGION_SIZE : 0))
#define NSC_ALIGN_END . = ALIGN(CONFIG_NRF_SPU_FLASH_REGION_SIZE)
#endif /* CONFIG_CPU_HAS_NRF_IDAU && CONFIG_ARM_NSC_REGION_BASE_ADDRESS != 0 */
#ifndef NSC_ALIGN
#if CONFIG_ARM_NSC_REGION_BASE_ADDRESS != 0
#define NSC_ALIGN . = ABSOLUTE(CONFIG_ARM_NSC_REGION_BASE_ADDRESS)
#else
/* The ARM SAU requires regions to be 32-byte-aligned. */
#define NSC_ALIGN . = ALIGN(32)
#endif /* CONFIG_ARM_NSC_REGION_BASE_ADDRESS */
#endif /* !NSC_ALIGN */
#ifndef NSC_ALIGN_END
#define NSC_ALIGN_END . = ALIGN(32)
#endif
SECTION_PROLOGUE(.gnu.sgstubs,,)
{
NSC_ALIGN;
__sg_start = .;
/* No input section necessary, since the Secure Entry Veneers are
automatically placed after the .gnu.sgstubs output section. */
} GROUP_LINK_IN(ROMABLE_REGION)
__sg_end = .;
__sg_size = __sg_end - __sg_start;
NSC_ALIGN_END;
__nsc_size = . - __sg_start;
/* nRF-specific ASSERT. */
#ifdef CONFIG_CPU_HAS_NRF_IDAU
#define NRF_SG_START (__sg_start % CONFIG_NRF_SPU_FLASH_REGION_SIZE)
#define NRF_SG_SIZE (CONFIG_NRF_SPU_FLASH_REGION_SIZE - NRF_SG_START)
ASSERT((__sg_size == 0)
|| (((1 << LOG2CEIL(NRF_SG_SIZE)) == NRF_SG_SIZE) /* Pow of 2 */
&& (NRF_SG_SIZE >= 32)
&& (NRF_SG_SIZE <= 4096)),
"The Non-Secure Callable region size must be a power of 2 \
between 32 and 4096 bytes.")
#endif
``` | /content/code_sandbox/arch/arm/core/cortex_m/tz/secure_entry_functions.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 514 |
```unknown
# ARM TrustZone-M core configuration options
config ARM_TRUSTZONE_M
bool "ARM TrustZone-M support"
depends on CPU_HAS_TEE
depends on ARMV8_M_SE
help
Platform has support for ARM TrustZone-M.
if ARM_TRUSTZONE_M
menu "ARM TrustZone-M Options"
depends on ARM_SECURE_FIRMWARE || ARM_NONSECURE_FIRMWARE
comment "Secure firmware"
depends on ARM_SECURE_FIRMWARE
comment "Non-secure firmware"
depends on !ARM_SECURE_FIRMWARE
config ARM_SECURE_BUSFAULT_HARDFAULT_NMI
bool "BusFault, HardFault, and NMI target Secure state"
depends on ARM_SECURE_FIRMWARE
help
Force NMI, HardFault, and BusFault (in Mainline ARMv8-M)
exceptions as Secure exceptions.
config ARM_FIRMWARE_HAS_SECURE_ENTRY_FUNCS
bool "Secure Firmware has Secure Entry functions"
depends on ARM_SECURE_FIRMWARE
help
Option indicates that ARM Secure Firmware contains
Secure Entry functions that may be called from
Non-Secure state. Secure Entry functions must be
located in Non-Secure Callable memory regions.
config ARM_NSC_REGION_BASE_ADDRESS
hex "ARM Non-Secure Callable Region base address"
depends on ARM_FIRMWARE_HAS_SECURE_ENTRY_FUNCS
default 0
help
Start address of Non-Secure Callable section.
Notes:
- The default value (i.e. when the user does not configure
the option explicitly) instructs the linker script to
place the Non-Secure Callable section, automatically,
inside the .text area.
- Certain requirements/restrictions may apply regarding
the size and the alignment of the starting address for
a Non-Secure Callable section, depending on the available
security attribution unit (SAU or IDAU) for a given SOC.
config ARM_FIRMWARE_USES_SECURE_ENTRY_FUNCS
bool "Non-Secure Firmware uses Secure Entry functions"
depends on ARM_NONSECURE_FIRMWARE
help
Option indicates that ARM Non-Secure Firmware uses Secure
Entry functions provided by the Secure Firmware. The Secure
Firmware must be configured to provide these functions.
config ARM_ENTRY_VENEERS_LIB_NAME
string "Entry Veneers symbol file"
depends on ARM_FIRMWARE_HAS_SECURE_ENTRY_FUNCS \
|| ARM_FIRMWARE_USES_SECURE_ENTRY_FUNCS
default "libentryveneers.a"
help
Library file to find the symbol table for the entry veneers.
The library will typically come from building the Secure
Firmware that contains secure entry functions, and allows
the Non-Secure Firmware to call into the Secure Firmware.
endmenu
endif # ARM_TRUSTZONE_M
``` | /content/code_sandbox/arch/arm/core/cortex_m/tz/Kconfig | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 573 |
```objective-c
/*
* ARMv7 MMU support
*
* Private data declarations
*
*/
#ifndef ZEPHYR_ARCH_AARCH32_ARM_MMU_PRIV_H_
#define ZEPHYR_ARCH_AARCH32_ARM_MMU_PRIV_H_
/*
* Comp.:
* ARM Architecture Reference Manual, ARMv7-A and ARMv7-R edition
* ARM document ID DDI0406C Rev. d, March 2018
* L1 / L2 page table entry formats and entry type IDs:
* Chapter B3.5.1, fig. B3-4 and B3-5, p. B3-1323 f.
*/
#define ARM_MMU_PT_L1_NUM_ENTRIES 4096
#define ARM_MMU_PT_L2_NUM_ENTRIES 256
#define ARM_MMU_PTE_L1_INDEX_PA_SHIFT 20
#define ARM_MMU_PTE_L1_INDEX_MASK 0xFFF
#define ARM_MMU_PTE_L2_INDEX_PA_SHIFT 12
#define ARM_MMU_PTE_L2_INDEX_MASK 0xFF
#define ARM_MMU_PT_L2_ADDR_SHIFT 10
#define ARM_MMU_PT_L2_ADDR_MASK 0x3FFFFF
#define ARM_MMU_PTE_L2_SMALL_PAGE_ADDR_SHIFT 12
#define ARM_MMU_PTE_L2_SMALL_PAGE_ADDR_MASK 0xFFFFF
#define ARM_MMU_ADDR_BELOW_PAGE_GRAN_MASK 0xFFF
#define ARM_MMU_PTE_ID_INVALID 0x0
#define ARM_MMU_PTE_ID_L2_PT 0x1
#define ARM_MMU_PTE_ID_SECTION 0x2
#define ARM_MMU_PTE_ID_LARGE_PAGE 0x1
#define ARM_MMU_PTE_ID_SMALL_PAGE 0x2
#define ARM_MMU_PERMS_AP2_DISABLE_WR 0x2
#define ARM_MMU_PERMS_AP1_ENABLE_PL0 0x1
#define ARM_MMU_TEX2_CACHEABLE_MEMORY 0x4
#define ARM_MMU_TEX_CACHE_ATTRS_WB_WA 0x1
#define ARM_MMU_TEX_CACHE_ATTRS_WT_nWA 0x2
#define ARM_MMU_TEX_CACHE_ATTRS_WB_nWA 0x3
#define ARM_MMU_C_CACHE_ATTRS_WB_WA 0
#define ARM_MMU_B_CACHE_ATTRS_WB_WA 1
#define ARM_MMU_C_CACHE_ATTRS_WT_nWA 1
#define ARM_MMU_B_CACHE_ATTRS_WT_nWA 0
#define ARM_MMU_C_CACHE_ATTRS_WB_nWA 1
#define ARM_MMU_B_CACHE_ATTRS_WB_nWA 1
/*
* The following defines might vary if support for CPUs without
* the multiprocessor extensions was to be implemented:
*/
#define ARM_MMU_TTBR_IRGN0_BIT_MP_EXT_ONLY BIT(6)
#define ARM_MMU_TTBR_NOS_BIT BIT(5)
#define ARM_MMU_TTBR_RGN_OUTER_NON_CACHEABLE 0x0
#define ARM_MMU_TTBR_RGN_OUTER_WB_WA_CACHEABLE 0x1
#define ARM_MMU_TTBR_RGN_OUTER_WT_CACHEABLE 0x2
#define ARM_MMU_TTBR_RGN_OUTER_WB_nWA_CACHEABLE 0x3
#define ARM_MMU_TTBR_RGN_SHIFT 3
#define ARM_MMU_TTBR_SHAREABLE_BIT BIT(1)
#define ARM_MMU_TTBR_IRGN1_BIT_MP_EXT_ONLY BIT(0)
#define ARM_MMU_TTBR_CACHEABLE_BIT_NON_MP_ONLY BIT(0)
/* <-- end MP-/non-MP-specific */
#define ARM_MMU_DOMAIN_OS 0
#define ARM_MMU_DOMAIN_DEVICE 1
#define ARM_MMU_DACR_ALL_DOMAINS_CLIENT 0x55555555
#define ARM_MMU_SCTLR_AFE_BIT BIT(29)
#define ARM_MMU_SCTLR_TEX_REMAP_ENABLE_BIT BIT(28)
#define ARM_MMU_SCTLR_HA_BIT BIT(17)
#define ARM_MMU_SCTLR_ICACHE_ENABLE_BIT BIT(12)
#define ARM_MMU_SCTLR_DCACHE_ENABLE_BIT BIT(2)
#define ARM_MMU_SCTLR_CHK_ALIGN_ENABLE_BIT BIT(1)
#define ARM_MMU_SCTLR_MMU_ENABLE_BIT BIT(0)
#define ARM_MMU_L2_PT_INDEX(pt) ((uint32_t)pt - (uint32_t)l2_page_tables) /\
sizeof(struct arm_mmu_l2_page_table);
union arm_mmu_l1_page_table_entry {
struct {
uint32_t id : 2; /* [00] */
uint32_t bufferable : 1;
uint32_t cacheable : 1;
uint32_t exec_never : 1;
uint32_t domain : 4;
uint32_t impl_def : 1;
uint32_t acc_perms10 : 2;
uint32_t tex : 3;
uint32_t acc_perms2 : 1;
uint32_t shared : 1;
uint32_t not_global : 1;
uint32_t zero : 1;
uint32_t non_sec : 1;
uint32_t base_address : 12; /* [31] */
} l1_section_1m;
struct {
uint32_t id : 2; /* [00] */
uint32_t zero0 : 1; /* PXN if avail. */
uint32_t non_sec : 1;
uint32_t zero1 : 1;
uint32_t domain : 4;
uint32_t impl_def : 1;
uint32_t l2_page_table_address : 22; /* [31] */
} l2_page_table_ref;
struct {
uint32_t id : 2; /* [00] */
uint32_t reserved : 30; /* [31] */
} undefined;
uint32_t word;
};
struct arm_mmu_l1_page_table {
union arm_mmu_l1_page_table_entry entries[ARM_MMU_PT_L1_NUM_ENTRIES];
};
union arm_mmu_l2_page_table_entry {
struct {
uint32_t id : 2; /* [00] */
uint32_t bufferable : 1;
uint32_t cacheable : 1;
uint32_t acc_perms10 : 2;
uint32_t tex : 3;
uint32_t acc_perms2 : 1;
uint32_t shared : 1;
uint32_t not_global : 1;
uint32_t pa_base : 20; /* [31] */
} l2_page_4k;
struct {
uint32_t id : 2; /* [00] */
uint32_t bufferable : 1;
uint32_t cacheable : 1;
uint32_t acc_perms10 : 2;
uint32_t zero : 3;
uint32_t acc_perms2 : 1;
uint32_t shared : 1;
uint32_t not_global : 1;
uint32_t tex : 3;
uint32_t exec_never : 1;
uint32_t pa_base : 16; /* [31] */
} l2_page_64k;
struct {
uint32_t id : 2; /* [00] */
uint32_t reserved : 30; /* [31] */
} undefined;
uint32_t word;
};
struct arm_mmu_l2_page_table {
union arm_mmu_l2_page_table_entry entries[ARM_MMU_PT_L2_NUM_ENTRIES];
};
/*
* Data structure for L2 table usage tracking, contains a
* L1 index reference if the respective L2 table is in use.
*/
struct arm_mmu_l2_page_table_status {
uint32_t l1_index : 12;
uint32_t entries : 9;
uint32_t reserved : 11;
};
/*
* Data structure used to describe memory areas defined by the
* current Zephyr image, for which an identity mapping (pa = va)
* will be set up. Those memory areas are processed during the
* MMU initialization.
*/
struct arm_mmu_flat_range {
const char *name;
uint32_t start;
uint32_t end;
uint32_t attrs;
};
/*
* Data structure containing the memory attributes and permissions
* data derived from a memory region's attr flags word in the format
* required for setting up the corresponding PTEs.
*/
struct arm_mmu_perms_attrs {
uint32_t acc_perms : 2;
uint32_t bufferable : 1;
uint32_t cacheable : 1;
uint32_t not_global : 1;
uint32_t non_sec : 1;
uint32_t shared : 1;
uint32_t tex : 3;
uint32_t exec_never : 1;
uint32_t id_mask : 2;
uint32_t domain : 4;
uint32_t reserved : 15;
};
#endif /* ZEPHYR_ARCH_AARCH32_ARM_MMU_PRIV_H_ */
``` | /content/code_sandbox/arch/arm/core/mmu/arm_mmu_priv.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,046 |
```unknown
#
# ARMv7 Memory Management Unit (MMU) configuration options
#
#
if CPU_HAS_MMU
config ARM_AARCH32_MMU
bool "ARMv7 Cortex-A MMU Support"
default y if CPU_AARCH32_CORTEX_A
select MMU
select SRAM_REGION_PERMISSIONS
select THREAD_STACK_INFO
select ARCH_HAS_EXECUTABLE_PAGE_BIT
help
The current CPU has an ARMv7 Memory Management Unit.
config ARM_MMU_NUM_L2_TABLES
depends on ARM_AARCH32_MMU
int "Number of L2 translation tables available to the MMU"
default 64
help
Number of level 2 translation tables. Each level 2 table
covers 1 MB of address space.
config ARM_MMU_REGION_MIN_ALIGN_AND_SIZE
int
default 4096
help
Minimum size (and alignment) of an ARM MMU page.
This value should not be modified.
endif # CPU_HAS_MMU
``` | /content/code_sandbox/arch/arm/core/mmu/Kconfig | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 206 |
```c
/*
*
*/
/**
* @file
* @brief Cortex-A/R AArch32 L1-cache maintenance operations.
*
* This module implement the cache API for Cortex-A/R AArch32 cores using CMSIS.
* Only L1-cache maintenance operations is supported.
*/
#include <zephyr/kernel.h>
#include <zephyr/cache.h>
#include <cmsis_core.h>
#include <zephyr/sys/barrier.h>
/* Cache Type Register */
#define CTR_DMINLINE_SHIFT 16
#define CTR_DMINLINE_MASK BIT_MASK(4)
#ifdef CONFIG_DCACHE
static size_t dcache_line_size;
/**
* @brief Get the smallest D-cache line size.
*
* Get the smallest D-cache line size of all the data and unified caches that
* the processor controls.
*/
size_t arch_dcache_line_size_get(void)
{
uint32_t val;
uint32_t dminline;
if (!dcache_line_size) {
val = read_sysreg(ctr);
dminline = (val >> CTR_DMINLINE_SHIFT) & CTR_DMINLINE_MASK;
/* Log2 of the number of words */
dcache_line_size = 2 << dminline;
}
return dcache_line_size;
}
void arch_dcache_enable(void)
{
uint32_t val;
arch_dcache_invd_all();
val = __get_SCTLR();
val |= SCTLR_C_Msk;
barrier_dsync_fence_full();
__set_SCTLR(val);
barrier_isync_fence_full();
}
void arch_dcache_disable(void)
{
uint32_t val;
val = __get_SCTLR();
val &= ~SCTLR_C_Msk;
barrier_dsync_fence_full();
__set_SCTLR(val);
barrier_isync_fence_full();
arch_dcache_flush_and_invd_all();
}
int arch_dcache_flush_all(void)
{
L1C_CleanDCacheAll();
return 0;
}
int arch_dcache_invd_all(void)
{
L1C_InvalidateDCacheAll();
return 0;
}
int arch_dcache_flush_and_invd_all(void)
{
L1C_CleanInvalidateDCacheAll();
return 0;
}
int arch_dcache_flush_range(void *start_addr, size_t size)
{
size_t line_size;
uintptr_t addr = (uintptr_t)start_addr;
uintptr_t end_addr = addr + size;
/* Align address to line size */
line_size = arch_dcache_line_size_get();
addr &= ~(line_size - 1);
while (addr < end_addr) {
L1C_CleanDCacheMVA((void *)addr);
addr += line_size;
}
return 0;
}
int arch_dcache_invd_range(void *start_addr, size_t size)
{
size_t line_size;
uintptr_t addr = (uintptr_t)start_addr;
uintptr_t end_addr = addr + size;
line_size = arch_dcache_line_size_get();
/*
* Clean and invalidate the partial cache lines at both ends of the
* given range to prevent data corruption
*/
if (end_addr & (line_size - 1)) {
end_addr &= ~(line_size - 1);
L1C_CleanInvalidateDCacheMVA((void *)end_addr);
}
if (addr & (line_size - 1)) {
addr &= ~(line_size - 1);
if (addr == end_addr) {
goto done;
}
L1C_CleanInvalidateDCacheMVA((void *)addr);
addr += line_size;
}
/* Align address to line size */
addr &= ~(line_size - 1);
while (addr < end_addr) {
L1C_InvalidateDCacheMVA((void *)addr);
addr += line_size;
}
done:
return 0;
}
int arch_dcache_flush_and_invd_range(void *start_addr, size_t size)
{
size_t line_size;
uintptr_t addr = (uintptr_t)start_addr;
uintptr_t end_addr = addr + size;
/* Align address to line size */
line_size = arch_dcache_line_size_get();
addr &= ~(line_size - 1);
while (addr < end_addr) {
L1C_CleanInvalidateDCacheMVA((void *)addr);
addr += line_size;
}
return 0;
}
#endif
#ifdef CONFIG_ICACHE
void arch_icache_enable(void)
{
arch_icache_invd_all();
__set_SCTLR(__get_SCTLR() | SCTLR_I_Msk);
barrier_isync_fence_full();
}
void arch_icache_disable(void)
{
__set_SCTLR(__get_SCTLR() & ~SCTLR_I_Msk);
barrier_isync_fence_full();
}
int arch_icache_flush_all(void)
{
return -ENOTSUP;
}
int arch_icache_invd_all(void)
{
L1C_InvalidateICacheAll();
return 0;
}
int arch_icache_flush_and_invd_all(void)
{
return -ENOTSUP;
}
int arch_icache_flush_range(void *start_addr, size_t size)
{
return -ENOTSUP;
}
int arch_icache_invd_range(void *start_addr, size_t size)
{
return -ENOTSUP;
}
int arch_icache_flush_and_invd_range(void *start_addr, size_t size)
{
return -ENOTSUP;
}
#endif
``` | /content/code_sandbox/arch/arm/core/cortex_a_r/cache.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,137 |
```unknown
/*
*/
/**
* @file
* @brief Thread context switching for ARM Cortex-A and Cortex-R (AArch32)
*
* This module implements the routines necessary for thread context switching
* on ARM Cortex-A and Cortex-R CPUs.
*/
#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
#include <zephyr/arch/cpu.h>
#include <offsets_short.h>
#include <zephyr/kernel.h>
#include "macro_priv.inc"
_ASM_FILE_PROLOGUE
GTEXT(z_arm_svc)
GTEXT(z_arm_context_switch)
GTEXT(z_do_kernel_oops)
GTEXT(z_arm_do_syscall)
/*
* Routine to handle context switches
*
* This function is directly called either by _isr_wrapper() in case of
* preemption, or arch_switch() in case of cooperative switching.
*
* void z_arm_context_switch(struct k_thread *new, struct k_thread *old);
*/
SECTION_FUNC(TEXT, z_arm_context_switch)
ldr r2, =_thread_offset_to_callee_saved
add r2, r1, r2
stm r2, {r4-r11, sp, lr}
/* save current thread's exception depth */
get_cpu r2
ldrb r3, [r2, #_cpu_offset_to_exc_depth]
strb r3, [r1, #_thread_offset_to_exception_depth]
/* retrieve next thread's exception depth */
ldrb r3, [r0, #_thread_offset_to_exception_depth]
strb r3, [r2, #_cpu_offset_to_exc_depth]
/* save old thread into switch handle which is required by
* z_sched_switch_spin().
*
* Note that this step must be done after all relevant state is
* saved.
*/
dsb
str r1, [r1, #___thread_t_switch_handle_OFFSET]
#if defined(CONFIG_THREAD_LOCAL_STORAGE)
/* Grab the TLS pointer */
ldr r3, [r0, #_thread_offset_to_tls]
/* Store TLS pointer in the "Process ID" register.
* This register is used as a base pointer to all
* thread variables with offsets added by toolchain.
*/
mcr 15, 0, r3, c13, c0, 2
#endif
ldr r2, =_thread_offset_to_callee_saved
add r2, r0, r2
ldm r2, {r4-r11, sp, lr}
#if defined (CONFIG_ARM_MPU)
/* Re-program dynamic memory map */
push {r0, lr}
bl z_arm_configure_dynamic_mpu_regions
pop {r0, lr}
#endif
#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
push {lr}
bl z_thread_mark_switched_in
pop {lr}
#endif
bx lr
/**
*
* @brief Service call handler
*
* The service call (svc) is used in the following occasions:
* - Cooperative context switching
* - IRQ offloading
* - Kernel run-time exceptions
*
*/
SECTION_FUNC(TEXT, z_arm_svc)
z_arm_cortex_ar_enter_exc
/* Get SVC number */
cps #MODE_SVC
mrs r0, spsr
tst r0, #0x20
ldreq r1, [lr, #-4]
biceq r1, #0xff000000
beq demux
ldr r1, [lr, #-2]
and r1, #0xff
/*
* grab service call number:
* TODO 0: context switch
* 1: irq_offload (if configured)
* 2: kernel panic or oops (software generated fatal exception)
* TODO 3: system calls for memory protection
*/
demux:
cps #MODE_SYS
cmp r1, #_SVC_CALL_RUNTIME_EXCEPT
beq _oops
#ifdef CONFIG_IRQ_OFFLOAD
cmp r1, #_SVC_CALL_IRQ_OFFLOAD
beq offload
b inv
offload:
get_cpu r2
ldr r3, [r2, #___cpu_t_nested_OFFSET]
add r3, r3, #1
str r3, [r2, #___cpu_t_nested_OFFSET]
/* If not nested: switch to IRQ stack and save current sp on it. */
cmp r3, #1
bhi 1f
mov r0, sp
cps #MODE_IRQ
push {r0}
1:
blx z_irq_do_offload
b z_arm_cortex_ar_irq_done
#endif
b inv
_oops:
/*
* Pass the exception frame to z_do_kernel_oops. r0 contains the
* exception reason.
*/
mov r0, sp
bl z_do_kernel_oops
inv:
mov r0, #0 /* K_ERR_CPU_EXCEPTION */
mov r1, sp
bl z_arm_fatal_error
/* Return here only in case of recoverable error */
b z_arm_cortex_ar_exit_exc
``` | /content/code_sandbox/arch/arm/core/cortex_a_r/switch.S | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,095 |
```c
/*
*
*/
#include <zephyr/kernel.h>
#include <cmsis_core.h>
void z_arm_tcm_disable_ecc(void)
{
#if defined(CONFIG_ARMV7_R)
uint32_t actlr;
actlr = __get_ACTLR();
actlr &= ~(ACTLR_ATCMPCEN_Msk | ACTLR_B0TCMPCEN_Msk |
ACTLR_B1TCMPCEN_Msk);
__set_ACTLR(actlr);
#endif
}
``` | /content/code_sandbox/arch/arm/core/cortex_a_r/tcm.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 97 |
```c
/*
*
*/
/**
* @file
* @brief ARM Cortex-A and Cortex-R interrupt initialization
*/
#include <zephyr/arch/cpu.h>
#include <zephyr/drivers/interrupt_controller/gic.h>
/**
*
* @brief Initialize interrupts
*
*/
void z_arm_interrupt_init(void)
{
/*
* Initialise interrupt controller.
*/
#ifdef CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER
/* Invoke SoC-specific interrupt controller initialisation */
z_soc_irq_init();
#endif
}
``` | /content/code_sandbox/arch/arm/core/cortex_a_r/irq_init.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 98 |
```c
/*
*
*/
#include <zephyr/kernel.h>
#include <kernel_internal.h>
#include <zephyr/arch/common/exc_handle.h>
#include <zephyr/logging/log.h>
#if defined(CONFIG_GDBSTUB)
#include <zephyr/arch/arm/gdbstub.h>
#include <zephyr/debug/gdbstub.h>
#endif
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
#define FAULT_DUMP_VERBOSE (CONFIG_FAULT_DUMP == 2)
#if FAULT_DUMP_VERBOSE
static const char *get_dbgdscr_moe_string(uint32_t moe)
{
switch (moe) {
case DBGDSCR_MOE_HALT_REQUEST:
return "Halt Request";
case DBGDSCR_MOE_BREAKPOINT:
return "Breakpoint";
case DBGDSCR_MOE_ASYNC_WATCHPOINT:
return "Asynchronous Watchpoint";
case DBGDSCR_MOE_BKPT_INSTRUCTION:
return "BKPT Instruction";
case DBGDSCR_MOE_EXT_DEBUG_REQUEST:
return "External Debug Request";
case DBGDSCR_MOE_VECTOR_CATCH:
return "Vector Catch";
case DBGDSCR_MOE_OS_UNLOCK_CATCH:
return "OS Unlock Catch";
case DBGDSCR_MOE_SYNC_WATCHPOINT:
return "Synchronous Watchpoint";
default:
return "Unknown";
}
}
static void dump_debug_event(void)
{
/* Read and parse debug mode of entry */
uint32_t dbgdscr = __get_DBGDSCR();
uint32_t moe = (dbgdscr & DBGDSCR_MOE_Msk) >> DBGDSCR_MOE_Pos;
/* Print debug event information */
LOG_ERR("Debug Event (%s)", get_dbgdscr_moe_string(moe));
}
static uint32_t dump_fault(uint32_t status, uint32_t addr)
{
uint32_t reason = K_ERR_CPU_EXCEPTION;
/*
* Dump fault status and, if applicable, status-specific information.
* Note that the fault address is only displayed for the synchronous
* faults because it is unpredictable for asynchronous faults.
*/
switch (status) {
case FSR_FS_ALIGNMENT_FAULT:
reason = K_ERR_ARM_ALIGNMENT_FAULT;
LOG_ERR("Alignment Fault @ 0x%08x", addr);
break;
case FSR_FS_PERMISSION_FAULT:
reason = K_ERR_ARM_PERMISSION_FAULT;
LOG_ERR("Permission Fault @ 0x%08x", addr);
break;
case FSR_FS_SYNC_EXTERNAL_ABORT:
reason = K_ERR_ARM_SYNC_EXTERNAL_ABORT;
LOG_ERR("Synchronous External Abort @ 0x%08x", addr);
break;
case FSR_FS_ASYNC_EXTERNAL_ABORT:
reason = K_ERR_ARM_ASYNC_EXTERNAL_ABORT;
LOG_ERR("Asynchronous External Abort");
break;
case FSR_FS_SYNC_PARITY_ERROR:
reason = K_ERR_ARM_SYNC_PARITY_ERROR;
LOG_ERR("Synchronous Parity/ECC Error @ 0x%08x", addr);
break;
case FSR_FS_ASYNC_PARITY_ERROR:
reason = K_ERR_ARM_ASYNC_PARITY_ERROR;
LOG_ERR("Asynchronous Parity/ECC Error");
break;
case FSR_FS_DEBUG_EVENT:
reason = K_ERR_ARM_DEBUG_EVENT;
dump_debug_event();
break;
#if defined(CONFIG_AARCH32_ARMV8_R)
case FSR_FS_TRANSLATION_FAULT:
reason = K_ERR_ARM_TRANSLATION_FAULT;
LOG_ERR("Translation Fault @ 0x%08x", addr);
break;
case FSR_FS_UNSUPPORTED_EXCLUSIVE_ACCESS_FAULT:
reason = K_ERR_ARM_UNSUPPORTED_EXCLUSIVE_ACCESS_FAULT;
LOG_ERR("Unsupported Exclusive Access Fault @ 0x%08x", addr);
break;
#else
case FSR_FS_BACKGROUND_FAULT:
reason = K_ERR_ARM_BACKGROUND_FAULT;
LOG_ERR("Background Fault @ 0x%08x", addr);
break;
#endif
default:
LOG_ERR("Unknown (%u)", status);
}
return reason;
}
#endif
#if defined(CONFIG_FPU_SHARING)
static ALWAYS_INLINE void z_arm_fpu_caller_save(struct __fpu_sf *fpu)
{
__asm__ volatile (
"vstmia %0, {s0-s15};\n"
: : "r" (&fpu->s[0])
: "memory"
);
#if CONFIG_VFP_FEATURE_REGS_S64_D32
__asm__ volatile (
"vstmia %0, {d16-d31};\n\t"
:
: "r" (&fpu->d[0])
: "memory"
);
#endif
}
/**
* @brief FPU undefined instruction fault handler
*
* @return Returns true if the FPU is already enabled
* implying a true undefined instruction
* Returns false if the FPU was disabled
*/
bool z_arm_fault_undef_instruction_fp(void)
{
/*
* Assume this is a floating point instruction that faulted because
* the FP unit was disabled. Enable the FP unit and try again. If
* the FP was already enabled then this was an actual undefined
* instruction.
*/
if (__get_FPEXC() & FPEXC_EN)
return true;
__set_FPEXC(FPEXC_EN);
if (_current_cpu->nested > 1) {
/*
* If the nested count is greater than 1, the undefined
* instruction exception came from an irq/svc context. (The
* irq/svc handler would have the nested count at 1 and then
* the undef exception would increment it to 2).
*/
struct __fpu_sf *spill_esf =
(struct __fpu_sf *)_current_cpu->fp_ctx;
if (spill_esf == NULL)
return false;
_current_cpu->fp_ctx = NULL;
/*
* If the nested count is 2 and the current thread has used the
* VFP (whether or not it was actually using the VFP before the
* current exception) OR if the nested count is greater than 2
* and the VFP was enabled on the irq/svc entrance for the
* saved exception stack frame, then save the floating point
* context because it is about to be overwritten.
*/
if (((_current_cpu->nested == 2)
&& (_current->base.user_options & K_FP_REGS))
|| ((_current_cpu->nested > 2)
&& (spill_esf->undefined & FPEXC_EN))) {
/*
* Spill VFP registers to specified exception stack
* frame
*/
spill_esf->undefined |= FPEXC_EN;
spill_esf->fpscr = __get_FPSCR();
z_arm_fpu_caller_save(spill_esf);
}
} else {
/*
* If the nested count is one, a thread was the faulting
* context. Just flag that this thread uses the VFP. This
* means that a thread that uses the VFP does not have to,
* but should, set K_FP_REGS on thread creation.
*/
_current->base.user_options |= K_FP_REGS;
}
return false;
}
#endif
/**
* @brief Undefined instruction fault handler
*
* @return Returns true if the fault is fatal
*/
bool z_arm_fault_undef_instruction(struct arch_esf *esf)
{
#if defined(CONFIG_FPU_SHARING)
/*
* This is a true undefined instruction and we will be crashing
* so save away the VFP registers.
*/
esf->fpu.undefined = __get_FPEXC();
esf->fpu.fpscr = __get_FPSCR();
z_arm_fpu_caller_save(&esf->fpu);
#endif
#if defined(CONFIG_GDBSTUB)
z_gdb_entry(esf, GDB_EXCEPTION_INVALID_INSTRUCTION);
/* Might not be fatal if GDB stub placed it in the code. */
return false;
#endif
/* Print fault information */
LOG_ERR("***** UNDEFINED INSTRUCTION ABORT *****");
uint32_t reason = IS_ENABLED(CONFIG_SIMPLIFIED_EXCEPTION_CODES) ?
K_ERR_CPU_EXCEPTION :
K_ERR_ARM_UNDEFINED_INSTRUCTION;
/* Invoke kernel fatal exception handler */
z_arm_fatal_error(reason, esf);
/* All undefined instructions are treated as fatal for now */
return true;
}
/**
* @brief Prefetch abort fault handler
*
* @return Returns true if the fault is fatal
*/
bool z_arm_fault_prefetch(struct arch_esf *esf)
{
uint32_t reason = K_ERR_CPU_EXCEPTION;
/* Read and parse Instruction Fault Status Register (IFSR) */
uint32_t ifsr = __get_IFSR();
#if defined(CONFIG_AARCH32_ARMV8_R)
uint32_t fs = ifsr & IFSR_STATUS_Msk;
#else
uint32_t fs = ((ifsr & IFSR_FS1_Msk) >> 6) | (ifsr & IFSR_FS0_Msk);
#endif
/* Read Instruction Fault Address Register (IFAR) */
uint32_t ifar = __get_IFAR();
#if defined(CONFIG_GDBSTUB)
/* The BKPT instruction could have caused a software breakpoint */
if (fs == IFSR_DEBUG_EVENT) {
/* Debug event, call the gdbstub handler */
z_gdb_entry(esf, GDB_EXCEPTION_BREAKPOINT);
} else {
/* Fatal */
z_gdb_entry(esf, GDB_EXCEPTION_MEMORY_FAULT);
}
return false;
#endif
/* Print fault information*/
LOG_ERR("***** PREFETCH ABORT *****");
if (FAULT_DUMP_VERBOSE) {
reason = dump_fault(fs, ifar);
}
/* Simplify exception codes if requested */
if (IS_ENABLED(CONFIG_SIMPLIFIED_EXCEPTION_CODES) && (reason >= K_ERR_ARCH_START)) {
reason = K_ERR_CPU_EXCEPTION;
}
/* Invoke kernel fatal exception handler */
z_arm_fatal_error(reason, esf);
/* All prefetch aborts are treated as fatal for now */
return true;
}
#ifdef CONFIG_USERSPACE
Z_EXC_DECLARE(z_arm_user_string_nlen);
static const struct z_exc_handle exceptions[] = {
Z_EXC_HANDLE(z_arm_user_string_nlen)
};
/* Perform an assessment whether an MPU fault shall be
* treated as recoverable.
*
* @return true if error is recoverable, otherwise return false.
*/
static bool memory_fault_recoverable(struct arch_esf *esf)
{
for (int i = 0; i < ARRAY_SIZE(exceptions); i++) {
/* Mask out instruction mode */
uint32_t start = (uint32_t)exceptions[i].start & ~0x1U;
uint32_t end = (uint32_t)exceptions[i].end & ~0x1U;
if (esf->basic.pc >= start && esf->basic.pc < end) {
esf->basic.pc = (uint32_t)(exceptions[i].fixup);
return true;
}
}
return false;
}
#endif
/**
* @brief Data abort fault handler
*
* @return Returns true if the fault is fatal
*/
bool z_arm_fault_data(struct arch_esf *esf)
{
uint32_t reason = K_ERR_CPU_EXCEPTION;
/* Read and parse Data Fault Status Register (DFSR) */
uint32_t dfsr = __get_DFSR();
#if defined(CONFIG_AARCH32_ARMV8_R)
uint32_t fs = dfsr & DFSR_STATUS_Msk;
#else
uint32_t fs = ((dfsr & DFSR_FS1_Msk) >> 6) | (dfsr & DFSR_FS0_Msk);
#endif
/* Read Data Fault Address Register (DFAR) */
uint32_t dfar = __get_DFAR();
#if defined(CONFIG_GDBSTUB)
z_gdb_entry(esf, GDB_EXCEPTION_MEMORY_FAULT);
/* return false - non-fatal error */
return false;
#endif
#if defined(CONFIG_USERSPACE)
if ((fs == COND_CODE_1(CONFIG_AARCH32_ARMV8_R,
(FSR_FS_TRANSLATION_FAULT),
(FSR_FS_BACKGROUND_FAULT)))
|| (fs == FSR_FS_PERMISSION_FAULT)) {
if (memory_fault_recoverable(esf)) {
return false;
}
}
#endif
/* Print fault information*/
LOG_ERR("***** DATA ABORT *****");
if (FAULT_DUMP_VERBOSE) {
reason = dump_fault(fs, dfar);
}
/* Simplify exception codes if requested */
if (IS_ENABLED(CONFIG_SIMPLIFIED_EXCEPTION_CODES) && (reason >= K_ERR_ARCH_START)) {
reason = K_ERR_CPU_EXCEPTION;
}
/* Invoke kernel fatal exception handler */
z_arm_fatal_error(reason, esf);
/* All data aborts are treated as fatal for now */
return true;
}
/**
* @brief Initialisation of fault handling
*/
void z_arm_fault_init(void)
{
/* Nothing to do for now */
}
``` | /content/code_sandbox/arch/arm/core/cortex_a_r/fault.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,762 |
```objective-c
/*
*/
/**
* @file
* @brief Definitions for boot code
*/
#ifndef _BOOT_H_
#define _BOOT_H_
#ifndef _ASMLANGUAGE
extern void *_vector_table[];
extern void __start(void);
#endif /* _ASMLANGUAGE */
/* Offsets into the boot_params structure */
#define BOOT_PARAM_MPID_OFFSET 0
#define BOOT_PARAM_IRQ_SP_OFFSET 4
#define BOOT_PARAM_FIQ_SP_OFFSET 8
#define BOOT_PARAM_ABT_SP_OFFSET 12
#define BOOT_PARAM_UDF_SP_OFFSET 16
#define BOOT_PARAM_SVC_SP_OFFSET 20
#define BOOT_PARAM_SYS_SP_OFFSET 24
#define BOOT_PARAM_VOTING_OFFSET 28
#endif /* _BOOT_H_ */
``` | /content/code_sandbox/arch/arm/core/cortex_a_r/boot.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 146 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.