text stringlengths 9 39.2M | dir stringlengths 25 226 | lang stringclasses 163 values | created_date timestamp[s] | updated_date timestamp[s] | repo_name stringclasses 751 values | repo_full_name stringclasses 752 values | star int64 1.01k 183k | len_tokens int64 1 18.5M |
|---|---|---|---|---|---|---|---|---|
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_SPARC_ARCH_INLINES_H
#define ZEPHYR_INCLUDE_ARCH_SPARC_ARCH_INLINES_H
#include <zephyr/kernel_structs.h>
static ALWAYS_INLINE unsigned int arch_num_cpus(void)
{
return CONFIG_MP_MAX_NUM_CPUS;
}
#endif /* ZEPHYR_INCLUDE_ARCH_SPARC_ARCH_INLINES_H */
``` | /content/code_sandbox/include/zephyr/arch/sparc/arch_inlines.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 83 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_SPARC_SPARC_H_
#define ZEPHYR_INCLUDE_ARCH_SPARC_SPARC_H_
/*
* @file
* @brief Definitions for the SPARC V8 architecture.
*/
/* Processor State Register */
#define PSR_VER_BIT 24
#define PSR_PIL_BIT 8
#define PSR_VER (0xf << PSR_VER_BIT)
#define PSR_EF (1 << 12)
#define PSR_S (1 << 7)
#define PSR_PS (1 << 6)
#define PSR_ET (1 << 5)
#define PSR_PIL (0xf << PSR_PIL_BIT)
#define PSR_CWP 0x1f
/* Trap Base Register */
#define TBR_TT_BIT 4
#define TBR_TBA 0xfffff000
#define TBR_TT 0x00000ff0
/* Trap types in TBR.TT */
#define TT_RESET 0x00
#define TT_WINDOW_OVERFLOW 0x05
#define TT_WINDOW_UNDERFLOW 0x06
#define TT_DATA_ACCESS_EXCEPTION 0x09
#endif /* ZEPHYR_INCLUDE_ARCH_SPARC_SPARC_H_ */
``` | /content/code_sandbox/include/zephyr/arch/sparc/sparc.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 274 |
```objective-c
/*
*
*/
/**
* @file
* @brief SPARC specific kernel interface header
* This header contains the SPARC specific kernel interface. It is
* included by the generic kernel interface header (arch/cpu.h)
*/
#ifndef ZEPHYR_INCLUDE_ARCH_SPARC_ARCH_H_
#define ZEPHYR_INCLUDE_ARCH_SPARC_ARCH_H_
#include <zephyr/arch/sparc/exception.h>
#include <zephyr/arch/sparc/thread.h>
#include <zephyr/arch/sparc/sparc.h>
#include <zephyr/arch/common/sys_bitops.h>
#include <zephyr/arch/common/sys_io.h>
#include <zephyr/arch/common/ffs.h>
#include <zephyr/irq.h>
#include <zephyr/sw_isr_table.h>
#include <soc.h>
#include <zephyr/devicetree.h>
/* stacks, for SPARC architecture stack shall be 8byte-aligned */
#define ARCH_STACK_PTR_ALIGN 8
/*
* Software trap numbers.
* Assembly usage: "ta SPARC_SW_TRAP_<TYPE>"
*/
#define SPARC_SW_TRAP_FLUSH_WINDOWS 0x03
#define SPARC_SW_TRAP_SET_PIL 0x09
#define SPARC_SW_TRAP_EXCEPT 0x0F
#ifndef _ASMLANGUAGE
#include <zephyr/sys/util.h>
#ifdef __cplusplus
extern "C" {
#endif
#define STACK_ROUND_UP(x) ROUND_UP(x, ARCH_STACK_PTR_ALIGN)
/*
* SOC specific function to translate from processor interrupt request level
* (1..15) to logical interrupt source number. For example by probing the
* interrupt controller.
*/
int z_sparc_int_get_source(int irl);
void z_irq_spurious(const void *unused);
#define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
{ \
Z_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \
}
static ALWAYS_INLINE unsigned int z_sparc_set_pil_inline(unsigned int newpil)
{
register uint32_t oldpil __asm__ ("o0") = newpil;
__asm__ volatile (
"ta %1\nnop\n" :
"=r" (oldpil) :
"i" (SPARC_SW_TRAP_SET_PIL), "r" (oldpil) :
"memory"
);
return oldpil;
}
static ALWAYS_INLINE unsigned int arch_irq_lock(void)
{
return z_sparc_set_pil_inline(15);
}
static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
{
z_sparc_set_pil_inline(key);
}
static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
{
return key == 0;
}
static ALWAYS_INLINE void arch_nop(void)
{
__asm__ volatile ("nop");
}
extern uint32_t sys_clock_cycle_get_32(void);
static inline uint32_t arch_k_cycle_get_32(void)
{
return sys_clock_cycle_get_32();
}
extern uint64_t sys_clock_cycle_get_64(void);
static inline uint64_t arch_k_cycle_get_64(void)
{
return sys_clock_cycle_get_64();
}
#define ARCH_EXCEPT(reason_p) \
do { \
register uint32_t _g1 __asm__("g1") = reason_p; \
\
__asm__ volatile ( \
"ta %[vector]\n\t" \
: \
: [vector] "i" (SPARC_SW_TRAP_EXCEPT), "r" (_g1) \
: "memory" \
); \
CODE_UNREACHABLE; \
} while (false)
#ifdef __cplusplus
}
#endif
#endif /*_ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_SPARC_ARCH_H_ */
``` | /content/code_sandbox/include/zephyr/arch/sparc/arch.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 833 |
```objective-c
/*
*
*/
/**
* @file
* @brief Per-arch thread definition
*
* This file contains definitions for
*
* struct _thread_arch
* struct _callee_saved
*
* necessary to instantiate instances of struct k_thread.
*/
#ifndef ZEPHYR_INCLUDE_ARCH_SPARC_THREAD_H_
#define ZEPHYR_INCLUDE_ARCH_SPARC_THREAD_H_
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
/*
* The following structure defines the list of registers that need to be
* saved/restored when a cooperative context switch occurs.
*/
struct _callee_saved {
/* y register used by mul/div */
uint32_t y;
/* processor status register */
uint32_t psr;
/*
* local registers
*
* Using uint64_t l0_and_l1 will put everything in this structure on a
* double word boundary which allows us to use double word loads and
* stores safely in the context switch.
*/
uint64_t l0_and_l1;
uint32_t l2;
uint32_t l3;
uint32_t l4;
uint32_t l5;
uint32_t l6;
uint32_t l7;
/* input registers */
uint32_t i0;
uint32_t i1;
uint32_t i2;
uint32_t i3;
uint32_t i4;
uint32_t i5;
uint32_t i6; /* frame pointer */
uint32_t i7;
/* output registers */
uint32_t o6; /* stack pointer */
uint32_t o7;
};
typedef struct _callee_saved _callee_saved_t;
struct _thread_arch {
/* empty */
};
typedef struct _thread_arch _thread_arch_t;
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_SPARC_THREAD_H_ */
``` | /content/code_sandbox/include/zephyr/arch/sparc/thread.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 384 |
```objective-c
/*
*/
/**
* @file
* @brief Xtensa public exception handling
*
* Xtensa-specific kernel exception handling interface. Included by
* arch/xtensa/arch.h.
*/
#ifndef ZEPHYR_INCLUDE_ARCH_XTENSA_EXCEPTION_H_
#define ZEPHYR_INCLUDE_ARCH_XTENSA_EXCEPTION_H_
#ifdef __cplusplus
extern "C" {
#endif
#ifndef _ASMLANGUAGE
/* Xtensa uses a variable length stack frame depending on how many
* register windows are in use. This isn't a struct type, it just
* matches the register/stack-unit width.
*/
struct arch_esf {
int dummy;
};
#endif
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_ARCH_XTENSA_EXCEPTION_H_ */
``` | /content/code_sandbox/include/zephyr/arch/xtensa/exception.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 160 |
```linker script
/*
*
*/
/**
* @file
* @brief Linker command/script file for SPARC
*/
#include <soc.h>
#include <zephyr/linker/sections.h>
#include <zephyr/linker/linker-defs.h>
#include <zephyr/linker/linker-tool.h>
ENTRY(CONFIG_KERNEL_ENTRY)
SECTIONS
{
#include <zephyr/linker/rel-sections.ld>
#ifdef CONFIG_LLEXT
#include <zephyr/linker/llext-sections.ld>
#endif
__rom_region_start = .;
SECTION_PROLOGUE(_TEXT_SECTION_NAME,,)
{
/* Trap table alignment required by SPARC V8 specification. */
. = ALIGN(0x1000);
__text_region_start = .;
*(.text.traptable)
*(.text)
*(.text.*)
*(.stub)
*(.gnu.linkonce.t.*)
} GROUP_LINK_IN(REGION_TEXT)
__text_region_end = .;
__rodata_region_start = .;
#include <zephyr/linker/common-rom.ld>
/* Located in generated directory. This file is populated by calling
* zephyr_linker_sources(ROM_SECTIONS ...). Useful for grouping iterable RO structs.
*/
#include <snippets-rom-sections.ld>
#include <zephyr/linker/thread-local-storage.ld>
SECTION_PROLOGUE(_RODATA_SECTION_NAME,,)
{
. = ALIGN(8);
*(.rodata)
*(.rodata.*)
*(.gnu.linkonce.r.*)
*(.rodata1)
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-rodata.ld>
} GROUP_LINK_IN(REGION_RODATA)
#include <zephyr/linker/cplusplus-rom.ld>
__rodata_region_end = .;
__rodata_region_size = __rodata_region_end - __rodata_region_start;
__rom_region_end = .;
__data_region_load_start = .;
SECTION_PROLOGUE(.plt,,)
{
*(.plt)
}
SECTION_PROLOGUE(.iplt,,)
{
*(.iplt)
}
SECTION_DATA_PROLOGUE(_DATA_SECTION_NAME,,)
{
. = ALIGN(8);
_image_ram_start = .;
__data_region_start = .;
__data_start = .;
*(.data)
*(.data.*)
*(.gnu.linkonce.d.*)
SORT(CONSTRUCTORS)
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-rwdata.ld>
__data_end = .;
} GROUP_DATA_LINK_IN(REGION_DATA_VMA, REGION_DATA_LMA)
__data_size = __data_end - __data_start;
__data_load_start = LOADADDR(_DATA_SECTION_NAME);
#include <zephyr/linker/common-ram.ld>
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-ram-sections.ld>
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-data-sections.ld>
__data_region_end = .;
SECTION_DATA_PROLOGUE(_BSS_SECTION_NAME,(NOLOAD),)
{
/*
* For performance, BSS section is assumed to be 4 byte aligned and
* a multiple of 4 bytes
*/
. = ALIGN(4);
__bss_start = .;
*(.dynbss)
*(.bss)
*(.bss.*)
*(.gnu.linkonce.b.*)
COMMON_SYMBOLS
/*
* As memory is cleared in words only, it is simpler to ensure the BSS
* section ends on a 4 byte boundary. This wastes a maximum of 3 bytes.
*/
__bss_end = ALIGN(4);
} GROUP_LINK_IN(REGION_BSS)
SECTION_PROLOGUE(_NOINIT_SECTION_NAME,(NOLOAD),)
{
/*
* This section is used for non-initialized objects that
* will not be cleared during the boot process.
*/
*(.noinit)
*(.noinit.*)
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-noinit.ld>
} GROUP_LINK_IN(REGION_BSS)
#include <zephyr/linker/cplusplus-ram.ld>
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-sections.ld>
#include <zephyr/linker/ram-end.ld>
GROUP_END(RAMABLE_REGION)
#include <zephyr/linker/debug-sections.ld>
/DISCARD/ : { *(.note.GNU-stack) }
/DISCARD/ : { *(.gnu_debuglink) }
/DISCARD/ : { *(.gnu.lto_*) }
SECTION_PROLOGUE(.gnu.attributes, 0,)
{
KEEP(*(.gnu.attributes))
}
}
``` | /content/code_sandbox/include/zephyr/arch/sparc/linker.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,151 |
```objective-c
/*
*
*/
#include <stdint.h>
#ifndef ZEPHYR_INCLUDE_ARCH_XTENSA_XTENSA_MMU_H
#define ZEPHYR_INCLUDE_ARCH_XTENSA_XTENSA_MMU_H
/**
* @defgroup xtensa_mmu_apis Xtensa Memory Management Unit (MMU) APIs
* @ingroup xtensa_apis
* @{
*/
/**
* @name Memory region permission and caching mode.
* @{
*/
/** Memory region is executable. */
#define XTENSA_MMU_PERM_X BIT(0)
/** Memory region is writable. */
#define XTENSA_MMU_PERM_W BIT(1)
/** Memory region is both executable and writable */
#define XTENSA_MMU_PERM_WX (XTENSA_MMU_PERM_W | XTENSA_MMU_PERM_X)
/** Memory region has write-back cache. */
#define XTENSA_MMU_CACHED_WB BIT(2)
/** Memory region has write-through cache. */
#define XTENSA_MMU_CACHED_WT BIT(3)
/**
* @}
*/
/**
* @name Memory domain and partitions
* @{
*/
typedef uint32_t k_mem_partition_attr_t;
#define K_MEM_PARTITION_IS_EXECUTABLE(attr) (((attr) & XTENSA_MMU_PERM_X) != 0)
#define K_MEM_PARTITION_IS_WRITABLE(attr) (((attr) & XTENSA_MMU_PERM_W) != 0)
#define K_MEM_PARTITION_IS_USER(attr) (((attr) & XTENSA_MMU_MAP_USER) != 0)
/* Read-Write access permission attributes */
#define K_MEM_PARTITION_P_RW_U_RW \
((k_mem_partition_attr_t) {XTENSA_MMU_PERM_W | XTENSA_MMU_MAP_USER})
#define K_MEM_PARTITION_P_RW_U_NA \
((k_mem_partition_attr_t) {0})
#define K_MEM_PARTITION_P_RO_U_RO \
((k_mem_partition_attr_t) {XTENSA_MMU_MAP_USER})
#define K_MEM_PARTITION_P_RO_U_NA \
((k_mem_partition_attr_t) {0})
#define K_MEM_PARTITION_P_NA_U_NA \
((k_mem_partition_attr_t) {0})
/* Execution-allowed attributes */
#define K_MEM_PARTITION_P_RX_U_RX \
((k_mem_partition_attr_t) {XTENSA_MMU_PERM_X})
/**
* @}
*/
/**
* @brief Software only bit to indicate a memory region can be accessed by user thread(s).
*
* This BIT tells the mapping code which ring PTE entries to use.
*/
#define XTENSA_MMU_MAP_USER BIT(4)
/**
* @brief Software only bit to indicate a memory region is shared by all threads.
*
* This BIT tells the mapping code whether the memory region should
* be shared between all threads. That is not used in the HW, it is
* just for the implementation.
*
* The PTE mapping this memory will use an ASID that is set in the
* ring 4 spot in RASID.
*/
#define XTENSA_MMU_MAP_SHARED BIT(30)
/**
* Struct used to map a memory region.
*/
struct xtensa_mmu_range {
/** Name of the memory region. */
const char *name;
/** Start address of the memory region. */
const uint32_t start;
/** End address of the memory region. */
const uint32_t end;
/** Attributes for the memory region. */
const uint32_t attrs;
};
/**
* @brief Additional memory regions required by SoC.
*
* These memory regions will be setup by MMU initialization code at boot.
*/
extern const struct xtensa_mmu_range xtensa_soc_mmu_ranges[];
/** Number of SoC additional memory regions. */
extern int xtensa_soc_mmu_ranges_num;
/**
* @brief Initialize hardware MMU.
*
* This initializes the MMU hardware and setup the memory regions at boot.
*/
void xtensa_mmu_init(void);
/**
* @brief Re-initialize hardware MMU.
*
* This configures the MMU hardware when the cpu lost context and has
* re-started.
*
* It assumes that the page table is already created and accessible in memory.
*/
void xtensa_mmu_reinit(void);
/**
* @brief Tell other processors to flush TLBs.
*
* This sends IPI to other processors to telling them to
* invalidate cache to page tables and flush TLBs. This is
* needed when one processor is updating page tables that
* may affect threads running on other processors.
*
* @note This needs to be implemented in the SoC layer.
*/
void xtensa_mmu_tlb_ipi(void);
/**
* @brief Invalidate cache to page tables and flush TLBs.
*
* This invalidates cache to all page tables and flush TLBs
* as they may have been modified by other processors.
*/
void xtensa_mmu_tlb_shootdown(void);
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_ARCH_XTENSA_XTENSA_MMU_H */
``` | /content/code_sandbox/include/zephyr/arch/xtensa/xtensa_mmu.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,054 |
```objective-c
/*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_XTENSA_XTENSA_IRQ_H_
#define ZEPHYR_INCLUDE_ARCH_XTENSA_XTENSA_IRQ_H_
#include <stdint.h>
#include <zephyr/toolchain.h>
#include <xtensa/config/core-isa.h>
#define CONFIG_GEN_IRQ_START_VECTOR 0
/**
* @cond INTERNAL_HIDDEN
*/
/*
* Call this function to enable the specified interrupts.
*
* mask - Bit mask of interrupts to be enabled.
*/
static inline void z_xt_ints_on(unsigned int mask)
{
int val;
__asm__ volatile("rsr.intenable %0" : "=r"(val));
val |= mask;
__asm__ volatile("wsr.intenable %0; rsync" : : "r"(val));
}
/*
* Call this function to disable the specified interrupts.
*
* mask - Bit mask of interrupts to be disabled.
*/
static inline void z_xt_ints_off(unsigned int mask)
{
int val;
__asm__ volatile("rsr.intenable %0" : "=r"(val));
val &= ~mask;
__asm__ volatile("wsr.intenable %0; rsync" : : "r"(val));
}
/*
* Call this function to set the specified (s/w) interrupt.
*/
static inline void z_xt_set_intset(unsigned int arg)
{
#if XCHAL_HAVE_INTERRUPTS
__asm__ volatile("wsr.intset %0; rsync" : : "r"(arg));
#else
ARG_UNUSED(arg);
#endif
}
/**
* INTERNAL_HIDDEN @endcond
*/
#ifdef CONFIG_MULTI_LEVEL_INTERRUPTS
/* for _soc_irq_*() */
#include <soc.h>
#ifdef CONFIG_2ND_LEVEL_INTERRUPTS
#ifdef CONFIG_3RD_LEVEL_INTERRUPTS
#define CONFIG_NUM_IRQS (XCHAL_NUM_INTERRUPTS +\
(CONFIG_NUM_2ND_LEVEL_AGGREGATORS +\
CONFIG_NUM_3RD_LEVEL_AGGREGATORS) *\
CONFIG_MAX_IRQ_PER_AGGREGATOR)
#else
#define CONFIG_NUM_IRQS (XCHAL_NUM_INTERRUPTS +\
CONFIG_NUM_2ND_LEVEL_AGGREGATORS *\
CONFIG_MAX_IRQ_PER_AGGREGATOR)
#endif /* CONFIG_3RD_LEVEL_INTERRUPTS */
#else
#define CONFIG_NUM_IRQS XCHAL_NUM_INTERRUPTS
#endif /* CONFIG_2ND_LEVEL_INTERRUPTS */
void z_soc_irq_init(void);
void z_soc_irq_enable(unsigned int irq);
void z_soc_irq_disable(unsigned int irq);
int z_soc_irq_is_enabled(unsigned int irq);
#define arch_irq_enable(irq) z_soc_irq_enable(irq)
#define arch_irq_disable(irq) z_soc_irq_disable(irq)
#define arch_irq_is_enabled(irq) z_soc_irq_is_enabled(irq)
#ifdef CONFIG_DYNAMIC_INTERRUPTS
extern int z_soc_irq_connect_dynamic(unsigned int irq, unsigned int priority,
void (*routine)(const void *parameter),
const void *parameter, uint32_t flags);
#endif
#else
#define CONFIG_NUM_IRQS XCHAL_NUM_INTERRUPTS
#define arch_irq_enable(irq) xtensa_irq_enable(irq)
#define arch_irq_disable(irq) xtensa_irq_disable(irq)
#define arch_irq_is_enabled(irq) xtensa_irq_is_enabled(irq)
#endif
/**
* @brief Enable interrupt on Xtensa core.
*
* @param irq Interrupt to be enabled.
*/
static ALWAYS_INLINE void xtensa_irq_enable(uint32_t irq)
{
z_xt_ints_on(1 << irq);
}
/**
* @brief Disable interrupt on Xtensa core.
*
* @param irq Interrupt to be disabled.
*/
static ALWAYS_INLINE void xtensa_irq_disable(uint32_t irq)
{
z_xt_ints_off(1 << irq);
}
/** Implementation of @ref arch_irq_lock. */
static ALWAYS_INLINE unsigned int arch_irq_lock(void)
{
unsigned int key;
__asm__ volatile("rsil %0, %1"
: "=r"(key) : "i"(XCHAL_EXCM_LEVEL) : "memory");
return key;
}
/** Implementation of @ref arch_irq_unlock. */
static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
{
__asm__ volatile("wsr.ps %0; rsync"
:: "r"(key) : "memory");
}
/** Implementation of @ref arch_irq_unlocked. */
static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
{
return (key & 0xf) == 0; /* INTLEVEL field */
}
/**
* @brief Query if an interrupt is enabled on Xtensa core.
*
* @param irq Interrupt to be queried.
*
* @return True if interrupt is enabled, false otherwise.
*/
int xtensa_irq_is_enabled(unsigned int irq);
#include <zephyr/irq.h>
#endif /* ZEPHYR_INCLUDE_ARCH_XTENSA_XTENSA_IRQ_H_ */
``` | /content/code_sandbox/include/zephyr/arch/xtensa/irq.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,017 |
```objective-c
/*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_XTENSA_CACHE_H_
#define ZEPHYR_INCLUDE_ARCH_XTENSA_CACHE_H_
#include <xtensa/config/core-isa.h>
#include <zephyr/toolchain.h>
#include <zephyr/sys/util.h>
#include <zephyr/debug/sparse.h>
#include <xtensa/hal.h>
#ifdef __cplusplus
extern "C" {
#endif
#define Z_DCACHE_MAX (XCHAL_DCACHE_SIZE / XCHAL_DCACHE_WAYS)
#if XCHAL_DCACHE_SIZE
BUILD_ASSERT(Z_IS_POW2(XCHAL_DCACHE_LINESIZE));
BUILD_ASSERT(Z_IS_POW2(Z_DCACHE_MAX));
#endif
#if defined(CONFIG_DCACHE) || defined(__DOXYGEN__)
/** Implementation of @ref arch_dcache_flush_range. */
static ALWAYS_INLINE int arch_dcache_flush_range(void *addr, size_t bytes)
{
#if XCHAL_DCACHE_SIZE
size_t step = XCHAL_DCACHE_LINESIZE;
size_t first = ROUND_DOWN(addr, step);
size_t last = ROUND_UP(((long)addr) + bytes, step);
size_t line;
for (line = first; bytes && line < last; line += step) {
__asm__ volatile("dhwb %0, 0" :: "r"(line));
}
#endif
return 0;
}
/** Implementation of @ref arch_dcache_flush_and_invd_range. */
static ALWAYS_INLINE int arch_dcache_flush_and_invd_range(void *addr, size_t bytes)
{
#if XCHAL_DCACHE_SIZE
size_t step = XCHAL_DCACHE_LINESIZE;
size_t first = ROUND_DOWN(addr, step);
size_t last = ROUND_UP(((long)addr) + bytes, step);
size_t line;
for (line = first; bytes && line < last; line += step) {
__asm__ volatile("dhwbi %0, 0" :: "r"(line));
}
#endif
return 0;
}
/** Implementation of @ref arch_dcache_invd_range. */
static ALWAYS_INLINE int arch_dcache_invd_range(void *addr, size_t bytes)
{
#if XCHAL_DCACHE_SIZE
size_t step = XCHAL_DCACHE_LINESIZE;
size_t first = ROUND_DOWN(addr, step);
size_t last = ROUND_UP(((long)addr) + bytes, step);
size_t line;
for (line = first; bytes && line < last; line += step) {
__asm__ volatile("dhi %0, 0" :: "r"(line));
}
#endif
return 0;
}
/** Implementation of @ref arch_dcache_invd_all. */
static ALWAYS_INLINE int arch_dcache_invd_all(void)
{
#if XCHAL_DCACHE_SIZE
size_t step = XCHAL_DCACHE_LINESIZE;
size_t line;
for (line = 0; line < XCHAL_DCACHE_SIZE; line += step) {
__asm__ volatile("dii %0, 0" :: "r"(line));
}
#endif
return 0;
}
/** Implementation of @ref arch_dcache_flush_all. */
static ALWAYS_INLINE int arch_dcache_flush_all(void)
{
#if XCHAL_DCACHE_SIZE
size_t step = XCHAL_DCACHE_LINESIZE;
size_t line;
for (line = 0; line < XCHAL_DCACHE_SIZE; line += step) {
__asm__ volatile("diwb %0, 0" :: "r"(line));
}
#endif
return 0;
}
/** Implementation of @ref arch_dcache_flush_and_invd_all. */
static ALWAYS_INLINE int arch_dcache_flush_and_invd_all(void)
{
#if XCHAL_DCACHE_SIZE
size_t step = XCHAL_DCACHE_LINESIZE;
size_t line;
for (line = 0; line < XCHAL_DCACHE_SIZE; line += step) {
__asm__ volatile("diwbi %0, 0" :: "r"(line));
}
#endif
return 0;
}
/** Implementation of @ref arch_dcache_enable. */
static ALWAYS_INLINE void arch_dcache_enable(void)
{
/* nothing */
}
/** Implementation of @ref arch_dcache_disable. */
static ALWAYS_INLINE void arch_dcache_disable(void)
{
/* nothing */
}
#endif /* CONFIG_DCACHE */
#if defined(CONFIG_ICACHE) || defined(__DOXYGEN__)
/** Implementation of @ref arch_icache_line_size_get. */
static ALWAYS_INLINE size_t arch_icache_line_size_get(void)
{
return -ENOTSUP;
}
/** Implementation of @ref arch_icache_flush_all. */
static ALWAYS_INLINE int arch_icache_flush_all(void)
{
return -ENOTSUP;
}
/** Implementation of @ref arch_icache_invd_all. */
static ALWAYS_INLINE int arch_icache_invd_all(void)
{
#if XCHAL_ICACHE_SIZE
xthal_icache_all_invalidate();
#endif
return 0;
}
/** Implementation of @ref arch_icache_flush_and_invd_all. */
static ALWAYS_INLINE int arch_icache_flush_and_invd_all(void)
{
return -ENOTSUP;
}
/** Implementation of @ref arch_icache_flush_range. */
static ALWAYS_INLINE int arch_icache_flush_range(void *addr, size_t size)
{
return -ENOTSUP;
}
/** Implementation of @ref arch_icache_invd_range. */
static ALWAYS_INLINE int arch_icache_invd_range(void *addr, size_t size)
{
#if XCHAL_ICACHE_SIZE
xthal_icache_region_invalidate(addr, size);
#endif
return 0;
}
/** Implementation of @ref arch_icache_flush_and_invd_range. */
static ALWAYS_INLINE int arch_icache_flush_and_invd_range(void *addr, size_t size)
{
return -ENOTSUP;
}
/** Implementation of @ref arch_icache_enable. */
static ALWAYS_INLINE void arch_icache_enable(void)
{
/* nothing */
}
/** Implementation of @ref arch_icache_disable. */
static ALWAYS_INLINE void arch_icache_disable(void)
{
/* nothing */
}
#endif /* CONFIG_ICACHE */
#if defined(CONFIG_CACHE_DOUBLEMAP)
/**
* @brief Test if a pointer is in cached region.
*
* Some hardware may map the same physical memory twice
* so that it can be seen in both (incoherent) cached mappings
* and a coherent "shared" area. This tests if a particular
* pointer is within the cached, coherent area.
*
* @param ptr Pointer
*
* @retval True if pointer is in cached region.
* @retval False if pointer is not in cached region.
*/
static inline bool arch_cache_is_ptr_cached(void *ptr)
{
size_t addr = (size_t) ptr;
return (addr >> 29) == CONFIG_XTENSA_CACHED_REGION;
}
/**
* @brief Test if a pointer is in un-cached region.
*
* Some hardware may map the same physical memory twice
* so that it can be seen in both (incoherent) cached mappings
* and a coherent "shared" area. This tests if a particular
* pointer is within the un-cached, incoherent area.
*
* @param ptr Pointer
*
* @retval True if pointer is not in cached region.
* @retval False if pointer is in cached region.
*/
static inline bool arch_cache_is_ptr_uncached(void *ptr)
{
size_t addr = (size_t) ptr;
return (addr >> 29) == CONFIG_XTENSA_UNCACHED_REGION;
}
static ALWAYS_INLINE uint32_t z_xtrpoflip(uint32_t addr, uint32_t rto, uint32_t rfrom)
{
/* The math here is all compile-time: when the two regions
* differ by a power of two, we can convert between them by
* setting or clearing just one bit. Otherwise it needs two
* operations.
*/
uint32_t rxor = (rto ^ rfrom) << 29;
rto <<= 29;
if (Z_IS_POW2(rxor)) {
if ((rxor & rto) == 0) {
return addr & ~rxor;
} else {
return addr | rxor;
}
} else {
return (addr & ~(7U << 29)) | rto;
}
}
/**
* @brief Return cached pointer to a RAM address
*
* The Xtensa coherence architecture maps addressable RAM twice, in
* two different 512MB regions whose L1 cache settings can be
* controlled independently. So for any given pointer, it is possible
* to convert it to and from a cached version.
*
* This function takes a pointer to any addressable object (either in
* cacheable memory or not) and returns a pointer that can be used to
* refer to the same memory through the L1 data cache. Data read
* through the resulting pointer will reflect locally cached values on
* the current CPU if they exist, and writes will go first into the
* cache and be written back later.
*
* @see arch_uncached_ptr()
*
* @param ptr A pointer to a valid C object
* @return A pointer to the same object via the L1 dcache
*/
static inline void __sparse_cache *arch_cache_cached_ptr_get(void *ptr)
{
return (__sparse_force void __sparse_cache *)z_xtrpoflip((uint32_t) ptr,
CONFIG_XTENSA_CACHED_REGION,
CONFIG_XTENSA_UNCACHED_REGION);
}
/**
* @brief Return uncached pointer to a RAM address
*
* The Xtensa coherence architecture maps addressable RAM twice, in
* two different 512MB regions whose L1 cache settings can be
* controlled independently. So for any given pointer, it is possible
* to convert it to and from a cached version.
*
* This function takes a pointer to any addressable object (either in
* cacheable memory or not) and returns a pointer that can be used to
* refer to the same memory while bypassing the L1 data cache. Data
* in the L1 cache will not be inspected nor modified by the access.
*
* @see arch_cached_ptr()
*
* @param ptr A pointer to a valid C object
* @return A pointer to the same object bypassing the L1 dcache
*/
static inline void *arch_cache_uncached_ptr_get(void __sparse_cache *ptr)
{
return (void *)z_xtrpoflip((__sparse_force uint32_t)ptr,
CONFIG_XTENSA_UNCACHED_REGION,
CONFIG_XTENSA_CACHED_REGION);
}
#else
static inline bool arch_cache_is_ptr_cached(void *ptr)
{
ARG_UNUSED(ptr);
return false;
}
static inline bool arch_cache_is_ptr_uncached(void *ptr)
{
ARG_UNUSED(ptr);
return false;
}
static inline void *arch_cache_cached_ptr_get(void *ptr)
{
return ptr;
}
static inline void *arch_cache_uncached_ptr_get(void *ptr)
{
return ptr;
}
#endif
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif /* ZEPHYR_INCLUDE_ARCH_XTENSA_CACHE_H_ */
``` | /content/code_sandbox/include/zephyr/arch/xtensa/cache.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,353 |
```objective-c
/*
*
*/
#include <inttypes.h>
#ifndef ZEPHYR_INCLUDE_ARCH_XTENSA_GDBSTUB_SYS_H_
#define ZEPHYR_INCLUDE_ARCH_XTENSA_GDBSTUB_SYS_H_
#ifdef CONFIG_GDBSTUB
#define XTREG_GRP_MASK 0x0F00
#define XTREG_GRP_GENERAL 0x0000
#define XTREG_GRP_ADDR 0x0100
#define XTREG_GRP_SPECIAL 0x0200
#define XTREG_GRP_USER 0x0300
/**
* @brief Register description for GDB stub.
*
* Values are based on gdb/gdb/xtensa-config.c in the Xtensa overlay,
* where registers are defined using XTREG() macro:
* XTREG(index,ofs,bsz,sz,al,tnum,flg,cp,ty,gr,name,fet,sto,mas,ct,x,y)
*
* Translation:
* idx : index
* regno : tnum
* 0x00xx : General Registers (A0 - A15, PC)
* 0x01xx : Address Registers (AR0 - AR31/AR63)
* 0x02xx : Special Registers (access via RSR/WSR)
* 0x03xx : User Registers (access via RUR/WUR)
* byte_size : sz
* gpkt_offset : ofs
*/
struct xtensa_register {
/** Register value */
uint32_t val;
/** GDB register index (for p/P packets) */
uint8_t idx;
/** Size of register */
uint8_t byte_size;
/** Xtensa register number */
uint16_t regno;
/**
* Offset of this register in GDB G-packet.
* -1 if register is not in G-packet.
*/
int16_t gpkt_offset;
/**
* Offset of saved register in stack frame.
* 0 if not saved in stack frame.
*/
int8_t stack_offset;
/** Sequence number */
uint8_t seqno;
/**
* Set to 1 if register should not be written
* to during debugging.
*/
uint8_t is_read_only:1;
};
/* Due to Xtensa SoCs being highly configurable,
* the register files between SoCs are not identical.
*
* This means generic registers can, sometimes, have
* different offsets from start of register files
* needed to communicate with GDB.
*
* Therefore, it is better to defer to the SoC layer
* for proper support for GDB.
*/
#include <gdbstub/soc.h>
/**
* @brief Architecture specific GDB context.
*/
struct gdb_ctx {
/** Exception reason */
unsigned int exception;
/** Register descriptions */
struct xtensa_register *regs;
/** Number of registers */
uint8_t num_regs;
/** Sequence number */
uint8_t seqno;
/** Index in register descriptions of A0 register */
uint8_t a0_idx;
/** Index in register descriptions of AR0 register */
uint8_t ar_idx;
/** Index in register descriptions of WINDOWBASE register */
uint8_t wb_idx;
};
/**
* Test if the register is a logical address register (A0 - A15).
*
* @retval true if register is A0 - A15
* @retval false if register is not A0 - A15
*/
static inline bool gdb_xtensa_is_logical_addr_reg(struct xtensa_register *reg)
{
if (reg->regno < 16) {
return true;
} else {
return false;
}
}
/**
* Test if the register is a address register (AR0 - AR31/AR63).
*
* @retval true if register is AR0 - AR31/AR63
* @retval false if not
*/
static inline bool gdb_xtensa_is_address_reg(struct xtensa_register *reg)
{
if ((reg->regno & XTREG_GRP_MASK) == XTREG_GRP_ADDR) {
return true;
} else {
return false;
}
}
/**
* Test if the register is a special register that needs to be
* accessed via RSR/WSR.
*
* @retval true if special register
* @retval false if not
*/
static inline bool gdb_xtensa_is_special_reg(struct xtensa_register *reg)
{
if ((reg->regno & XTREG_GRP_MASK) == XTREG_GRP_SPECIAL) {
return true;
} else {
return false;
}
}
/**
* Test if the register is a user register that needs to be
* accessed via RUR/WUR.
*
* @retval true if user register
* @retval false if not
*/
static inline bool gdb_xtensa_is_user_reg(struct xtensa_register *reg)
{
if ((reg->regno & XTREG_GRP_MASK) == XTREG_GRP_USER) {
return true;
} else {
return false;
}
}
#endif /* CONFIG_GDBSTUB */
#endif /* ZEPHYR_INCLUDE_ARCH_XTENSA_GDBSTUB_SYS_H_ */
``` | /content/code_sandbox/include/zephyr/arch/xtensa/gdbstub.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,085 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_XTENSA_ARCH_INLINES_H_
#define ZEPHYR_INCLUDE_ARCH_XTENSA_ARCH_INLINES_H_
#ifndef _ASMLANGUAGE
#include <zephyr/kernel_structs.h>
#include <zephyr/zsr.h>
/**
* @brief Read a special register.
*
* @param sr Name of special register.
*
* @return Value of special register.
*/
#define XTENSA_RSR(sr) \
({uint32_t v; \
__asm__ volatile ("rsr." sr " %0" : "=a"(v)); \
v; })
/**
* @brief Write to a special register.
*
* @param sr Name of special register.
* @param v Value to be written to special register.
*/
#define XTENSA_WSR(sr, v) \
do { \
__asm__ volatile ("wsr." sr " %0" : : "r"(v)); \
} while (false)
/**
* @brief Read a user register.
*
* @param ur Name of user register.
*
* @return Value of user register.
*/
#define XTENSA_RUR(ur) \
({uint32_t v; \
__asm__ volatile ("rur." ur " %0" : "=a"(v)); \
v; })
/**
* @brief Write to a user register.
*
* @param ur Name of user register.
* @param v Value to be written to user register.
*/
#define XTENSA_WUR(ur, v) \
do { \
__asm__ volatile ("wur." ur " %0" : : "r"(v)); \
} while (false)
/** Implementation of @ref arch_curr_cpu. */
static ALWAYS_INLINE _cpu_t *arch_curr_cpu(void)
{
_cpu_t *cpu;
cpu = (_cpu_t *)XTENSA_RSR(ZSR_CPU_STR);
return cpu;
}
/** Implementation of @ref arch_proc_id. */
static ALWAYS_INLINE uint32_t arch_proc_id(void)
{
uint32_t prid;
__asm__ volatile("rsr %0, PRID" : "=r"(prid));
return prid;
}
#ifdef CONFIG_SOC_HAS_RUNTIME_NUM_CPUS
extern unsigned int soc_num_cpus;
#endif
/** Implementation of @ref arch_num_cpus. */
static ALWAYS_INLINE unsigned int arch_num_cpus(void)
{
#ifdef CONFIG_SOC_HAS_RUNTIME_NUM_CPUS
return soc_num_cpus;
#else
return CONFIG_MP_MAX_NUM_CPUS;
#endif
}
#endif /* !_ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_XTENSA_ARCH_INLINES_H_ */
``` | /content/code_sandbox/include/zephyr/arch/xtensa/arch_inlines.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 552 |
```objective-c
/*
*/
/**
* @file
* @brief Xtensa specific kernel interface header
* This header contains the Xtensa specific kernel interface. It is included
* by the generic kernel interface header (include/zephyr/arch/cpu.h)
*/
#ifndef ZEPHYR_INCLUDE_ARCH_XTENSA_ARCH_H_
#define ZEPHYR_INCLUDE_ARCH_XTENSA_ARCH_H_
#include <zephyr/irq.h>
#include <zephyr/devicetree.h>
#if !defined(_ASMLANGUAGE) && !defined(__ASSEMBLER__)
#include <zephyr/types.h>
#include <zephyr/toolchain.h>
#include <zephyr/arch/common/sys_bitops.h>
#include <zephyr/arch/common/sys_io.h>
#include <zephyr/arch/common/ffs.h>
#include <zephyr/sw_isr_table.h>
#include <zephyr/arch/xtensa/syscall.h>
#include <zephyr/arch/xtensa/thread.h>
#include <zephyr/arch/xtensa/irq.h>
#include <xtensa/config/core.h>
#include <zephyr/arch/common/addr_types.h>
#include <zephyr/arch/xtensa/gdbstub.h>
#include <zephyr/debug/sparse.h>
#include <zephyr/arch/xtensa/thread_stack.h>
#include <zephyr/sys/slist.h>
#include <zephyr/drivers/timer/system_timer.h>
#ifdef CONFIG_XTENSA_MMU
#include <zephyr/arch/xtensa/xtensa_mmu.h>
#endif
#ifdef CONFIG_XTENSA_MPU
#include <zephyr/arch/xtensa/mpu.h>
#endif
/**
* @defgroup xtensa_apis Xtensa APIs
* @ingroup arch-interface
* @{
* @}
*
* @defgroup xtensa_internal_apis Xtensa Internal APIs
* @ingroup xtensa_apis
* @{
* @}
*/
#include <zephyr/arch/xtensa/exception.h>
#ifdef __cplusplus
extern "C" {
#endif
struct arch_mem_domain {
#ifdef CONFIG_XTENSA_MMU
uint32_t *ptables __aligned(CONFIG_MMU_PAGE_SIZE);
uint8_t asid;
bool dirty;
#endif
#ifdef CONFIG_XTENSA_MPU
struct xtensa_mpu_map mpu_map;
#endif
sys_snode_t node;
};
/**
* @brief Generate hardware exception.
*
* This generates hardware exception which is used by ARCH_EXCEPT().
*
* @param reason_p Reason for exception.
*/
void xtensa_arch_except(int reason_p);
/**
* @brief Generate kernel oops.
*
* This generates kernel oops which is used by arch_syscall_oops().
*
* @param reason_p Reason for exception.
* @param ssf Stack pointer.
*/
void xtensa_arch_kernel_oops(int reason_p, void *ssf);
#ifdef CONFIG_USERSPACE
#define ARCH_EXCEPT(reason_p) do { \
if (k_is_user_context()) { \
arch_syscall_invoke1(reason_p, \
K_SYSCALL_XTENSA_USER_FAULT); \
} else { \
xtensa_arch_except(reason_p); \
} \
CODE_UNREACHABLE; \
} while (false)
#else
#define ARCH_EXCEPT(reason_p) do { \
xtensa_arch_except(reason_p); \
CODE_UNREACHABLE; \
} while (false)
#endif
__syscall void xtensa_user_fault(unsigned int reason);
#include <zephyr/syscalls/arch.h>
/* internal routine documented in C file, needed by IRQ_CONNECT() macro */
void z_irq_priority_set(uint32_t irq, uint32_t prio, uint32_t flags);
#define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
{ \
Z_ISR_DECLARE(irq_p, flags_p, isr_p, isr_param_p); \
}
/** Implementation of @ref arch_k_cycle_get_32. */
static inline uint32_t arch_k_cycle_get_32(void)
{
return sys_clock_cycle_get_32();
}
/** Implementation of @ref arch_k_cycle_get_64. */
static inline uint64_t arch_k_cycle_get_64(void)
{
return sys_clock_cycle_get_64();
}
/** Implementation of @ref arch_nop. */
static ALWAYS_INLINE void arch_nop(void)
{
__asm__ volatile("nop");
}
/**
* @brief Lock VECBASE if supported by hardware.
*
* The bit 0 of VECBASE acts as a lock bit on hardware supporting
* this feature. When this bit is set, VECBASE cannot be changed
* until it is cleared by hardware reset. When the hardware does not
* support this bit, it is hardwired to 0.
*/
static ALWAYS_INLINE void xtensa_vecbase_lock(void)
{
int vecbase;
__asm__ volatile("rsr.vecbase %0" : "=r" (vecbase));
__asm__ volatile("wsr.vecbase %0; rsync" : : "r" (vecbase | 1));
}
#if defined(CONFIG_XTENSA_RPO_CACHE) || defined(__DOXYGEN__)
#if defined(CONFIG_ARCH_HAS_COHERENCE) || defined(__DOXYGEN__)
/** Implementation of @ref arch_mem_coherent. */
static inline bool arch_mem_coherent(void *ptr)
{
size_t addr = (size_t) ptr;
return (addr >> 29) == CONFIG_XTENSA_UNCACHED_REGION;
}
#endif
/* Utility to generate an unrolled and optimal[1] code sequence to set
* the RPO TLB registers (contra the HAL cacheattr macros, which
* generate larger code and can't be called from C), based on the
* KERNEL_COHERENCE configuration in use. Selects RPO attribute "2"
* for regions (including MMIO registers in region zero) which want to
* bypass L1, "4" for the cached region which wants writeback, and
* "15" (invalid) elsewhere.
*
* Note that on cores that have the "translation" option set, we need
* to put an identity mapping in the high bits. Also per spec
* changing the current code region (by definition cached) requires
* that WITLB be followed by an ISYNC and that both instructions live
* in the same cache line (two 3-byte instructions fit in an 8-byte
* aligned region, so that's guaranteed not to cross a cache line
* boundary).
*
* [1] With the sole exception of gcc's infuriating insistence on
* emitting a precomputed literal for addr + addrincr instead of
* computing it with a single ADD instruction from values it already
* has in registers. Explicitly assigning the variables to registers
* via an attribute works, but then emits needless MOV instructions
* instead. I tell myself it's just 32 bytes of .text, but... Sigh.
*/
#define _REGION_ATTR(r) \
((r) == 0 ? 2 : \
((r) == CONFIG_XTENSA_CACHED_REGION ? 4 : \
((r) == CONFIG_XTENSA_UNCACHED_REGION ? 2 : 15)))
#define _SET_ONE_TLB(region) do { \
uint32_t attr = _REGION_ATTR(region); \
if (XCHAL_HAVE_XLT_CACHEATTR) { \
attr |= addr; /* RPO with translation */ \
} \
if (region != CONFIG_XTENSA_CACHED_REGION) { \
__asm__ volatile("wdtlb %0, %1; witlb %0, %1" \
:: "r"(attr), "r"(addr)); \
} else { \
__asm__ volatile("wdtlb %0, %1" \
:: "r"(attr), "r"(addr)); \
__asm__ volatile("j 1f; .align 8; 1:"); \
__asm__ volatile("witlb %0, %1; isync" \
:: "r"(attr), "r"(addr)); \
} \
addr += addrincr; \
} while (0)
/**
* @brief Setup RPO TLB registers.
*/
#define ARCH_XTENSA_SET_RPO_TLB() \
do { \
register uint32_t addr = 0, addrincr = 0x20000000; \
FOR_EACH(_SET_ONE_TLB, (;), 0, 1, 2, 3, 4, 5, 6, 7); \
} while (0)
#endif /* CONFIG_XTENSA_RPO_CACHE */
#if defined(CONFIG_XTENSA_MMU) || defined(__DOXYGEN__)
/**
* @brief Perform additional steps after MMU initialization.
*
* This performs additional steps related to memory management
* after the main MMU initialization code. This needs to defined
* in the SoC layer. Default is do no nothing.
*
* @param is_core0 True if this is called while executing on
* CPU core #0.
*/
void arch_xtensa_mmu_post_init(bool is_core0);
#endif
#ifdef __cplusplus
}
#endif
#endif /* !defined(_ASMLANGUAGE) && !defined(__ASSEMBLER__) */
#endif /* ZEPHYR_INCLUDE_ARCH_XTENSA_ARCH_H_ */
``` | /content/code_sandbox/include/zephyr/arch/xtensa/arch.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,003 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_XTENSA_THREAD_H_
#define ZEPHYR_INCLUDE_ARCH_XTENSA_THREAD_H_
#include <stdint.h>
#ifndef _ASMLANGUAGE
#ifdef CONFIG_XTENSA_MPU
#include <zephyr/arch/xtensa/mpu.h>
#endif
/* Xtensa doesn't use these structs, but Zephyr core requires they be
* defined so they can be included in struct _thread_base. Dummy
* field exists for sizeof compatibility with C++.
*/
struct _callee_saved {
char dummy;
};
typedef struct _callee_saved _callee_saved_t;
struct _thread_arch {
uint32_t last_cpu;
#ifdef CONFIG_USERSPACE
#ifdef CONFIG_XTENSA_MMU
uint32_t *ptables;
#endif
#ifdef CONFIG_XTENSA_MPU
/* Pointer to the memory domain's MPU map. */
struct xtensa_mpu_map *mpu_map;
#endif
/* Initial privilege mode stack pointer when doing a system call.
* Un-set for surpervisor threads.
*/
uint8_t *psp;
#endif
};
typedef struct _thread_arch _thread_arch_t;
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_XTENSA_THREAD_H_ */
``` | /content/code_sandbox/include/zephyr/arch/xtensa/thread.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 267 |
```objective-c
/*
*
*/
/**
* @file
* @brief Xtensa specific syscall header
*
* This header contains the Xtensa specific syscall interface. It is
* included by the syscall interface architecture-abstraction header
* (include/arch/syscall.h)
*/
#ifndef ZEPHYR_INCLUDE_ARCH_XTENSA_SYSCALL_H_
#define ZEPHYR_INCLUDE_ARCH_XTENSA_SYSCALL_H_
#ifdef CONFIG_USERSPACE
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
#include <stdbool.h>
#include <zephyr/linker/sections.h>
#include <zephyr/sys/util_macro.h>
#include <xtensa/config/core-isa.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifdef CONFIG_XTENSA_SYSCALL_USE_HELPER
uintptr_t xtensa_syscall_helper(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3, uintptr_t arg4,
uintptr_t arg5, uintptr_t arg6,
uintptr_t call_id);
#define SYSINL ALWAYS_INLINE
#else
#define SYSINL inline
#endif /* CONFIG_XTENSA_SYSCALL_USE_HELPER */
/**
* We are following Linux Xtensa syscall ABI:
*
* syscall number arg1, arg2, arg3, arg4, arg5, arg6
* -------------- ----------------------------------
* a2 a6, a3, a4, a5, a8, a9
*
**/
static SYSINL uintptr_t arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3, uintptr_t arg4,
uintptr_t arg5, uintptr_t arg6,
uintptr_t call_id)
{
#ifdef CONFIG_XTENSA_SYSCALL_USE_HELPER
return xtensa_syscall_helper(arg1, arg2, arg3, arg4, arg5, arg6, call_id);
#else
register uintptr_t a2 __asm__("%a2") = call_id;
register uintptr_t a6 __asm__("%a6") = arg1;
register uintptr_t a3 __asm__("%a3") = arg2;
register uintptr_t a4 __asm__("%a4") = arg3;
register uintptr_t a5 __asm__("%a5") = arg4;
register uintptr_t a8 __asm__("%a8") = arg5;
register uintptr_t a9 __asm__("%a9") = arg6;
__asm__ volatile("syscall\n\t"
: "=r" (a2)
: "r" (a2), "r" (a6), "r" (a3), "r" (a4),
"r" (a5), "r" (a8), "r" (a9)
: "memory");
return a2;
#endif /* CONFIG_XTENSA_SYSCALL_USE_HELPER */
}
static SYSINL uintptr_t arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3, uintptr_t arg4,
uintptr_t arg5, uintptr_t call_id)
{
#ifdef CONFIG_XTENSA_SYSCALL_USE_HELPER
return xtensa_syscall_helper(arg1, arg2, arg3, arg4, arg5, 0, call_id);
#else
register uintptr_t a2 __asm__("%a2") = call_id;
register uintptr_t a6 __asm__("%a6") = arg1;
register uintptr_t a3 __asm__("%a3") = arg2;
register uintptr_t a4 __asm__("%a4") = arg3;
register uintptr_t a5 __asm__("%a5") = arg4;
register uintptr_t a8 __asm__("%a8") = arg5;
__asm__ volatile("syscall\n\t"
: "=r" (a2)
: "r" (a2), "r" (a6), "r" (a3), "r" (a4),
"r" (a5), "r" (a8)
: "memory");
return a2;
#endif /* CONFIG_XTENSA_SYSCALL_USE_HELPER */
}
static SYSINL uintptr_t arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3, uintptr_t arg4,
uintptr_t call_id)
{
#ifdef CONFIG_XTENSA_SYSCALL_USE_HELPER
return xtensa_syscall_helper(arg1, arg2, arg3, arg4, 0, 0, call_id);
#else
register uintptr_t a2 __asm__("%a2") = call_id;
register uintptr_t a6 __asm__("%a6") = arg1;
register uintptr_t a3 __asm__("%a3") = arg2;
register uintptr_t a4 __asm__("%a4") = arg3;
register uintptr_t a5 __asm__("%a5") = arg4;
__asm__ volatile("syscall\n\t"
: "=r" (a2)
: "r" (a2), "r" (a6), "r" (a3), "r" (a4),
"r" (a5)
: "memory");
return a2;
#endif /* CONFIG_XTENSA_SYSCALL_USE_HELPER */
}
static SYSINL uintptr_t arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3, uintptr_t call_id)
{
#ifdef CONFIG_XTENSA_SYSCALL_USE_HELPER
return xtensa_syscall_helper(arg1, arg2, arg3, 0, 0, 0, call_id);
#else
register uintptr_t a2 __asm__("%a2") = call_id;
register uintptr_t a6 __asm__("%a6") = arg1;
register uintptr_t a3 __asm__("%a3") = arg2;
register uintptr_t a4 __asm__("%a4") = arg3;
__asm__ volatile("syscall\n\t"
: "=r" (a2)
: "r" (a2), "r" (a6), "r" (a3), "r" (a4)
: "memory");
return a2;
#endif /* CONFIG_XTENSA_SYSCALL_USE_HELPER */
}
static SYSINL uintptr_t arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2,
uintptr_t call_id)
{
#ifdef CONFIG_XTENSA_SYSCALL_USE_HELPER
return xtensa_syscall_helper(arg1, arg2, 0, 0, 0, 0, call_id);
#else
register uintptr_t a2 __asm__("%a2") = call_id;
register uintptr_t a6 __asm__("%a6") = arg1;
register uintptr_t a3 __asm__("%a3") = arg2;
__asm__ volatile("syscall\n\t"
: "=r" (a2)
: "r" (a2), "r" (a6), "r" (a3)
: "memory");
return a2;
#endif
}
static SYSINL uintptr_t arch_syscall_invoke1(uintptr_t arg1,
uintptr_t call_id)
{
#ifdef CONFIG_XTENSA_SYSCALL_USE_HELPER
return xtensa_syscall_helper(arg1, 0, 0, 0, 0, 0, call_id);
#else
register uintptr_t a2 __asm__("%a2") = call_id;
register uintptr_t a6 __asm__("%a6") = arg1;
__asm__ volatile("syscall\n\t"
: "=r" (a2)
: "r" (a2), "r" (a6)
: "memory");
return a2;
#endif
}
static SYSINL uintptr_t arch_syscall_invoke0(uintptr_t call_id)
{
#ifdef CONFIG_XTENSA_SYSCALL_USE_HELPER
return xtensa_syscall_helper(0, 0, 0, 0, 0, 0, call_id);
#else
register uintptr_t a2 __asm__("%a2") = call_id;
__asm__ volatile("syscall\n\t"
: "=r" (a2)
: "r" (a2)
: "memory");
return a2;
#endif
}
/*
* There is no easy (or generic) way to figure out if a thread is runnining
* in un-privileged mode. Reading the current ring (PS.CRING) is a privileged
* instruction and not thread local storage is not available in xcc.
*/
static inline bool arch_is_user_context(void)
{
#if XCHAL_HAVE_THREADPTR
uint32_t thread;
__asm__ volatile(
"rur.THREADPTR %0\n\t"
: "=a" (thread)
);
#ifdef CONFIG_THREAD_LOCAL_STORAGE
extern __thread uint32_t is_user_mode;
if (!thread) {
return false;
}
return is_user_mode != 0;
#else
return !!thread;
#endif
#else /* XCHAL_HAVE_THREADPTR */
extern bool xtensa_is_user_context(void);
return xtensa_is_user_context();
#endif /* XCHAL_HAVE_THREADPTR */
}
#undef SYSINL
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* CONFIG_USERSPACE */
#endif /* ZEPHYR_INCLUDE_ARCH_XTENSA_SYSCALL_H_ */
``` | /content/code_sandbox/include/zephyr/arch/xtensa/syscall.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,985 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_XTENSA_THREAD_STACK_H_
#define ZEPHYR_INCLUDE_ARCH_XTENSA_THREAD_STACK_H_
#include <xtensa/config/core-isa.h>
#include <zephyr/toolchain.h>
#include <zephyr/sys/util.h>
#ifdef CONFIG_KERNEL_COHERENCE
#define ARCH_STACK_PTR_ALIGN XCHAL_DCACHE_LINESIZE
#else
#define ARCH_STACK_PTR_ALIGN 16
#endif
#ifdef CONFIG_USERSPACE
#ifdef CONFIG_XTENSA_MMU
#define XTENSA_STACK_BASE_ALIGN CONFIG_MMU_PAGE_SIZE
#define XTENSA_STACK_SIZE_ALIGN CONFIG_MMU_PAGE_SIZE
#endif
#ifdef CONFIG_XTENSA_MPU
#define XTENSA_STACK_BASE_ALIGN XCHAL_MPU_ALIGN
#define XTENSA_STACK_SIZE_ALIGN XCHAL_MPU_ALIGN
#endif
#else
#define XTENSA_STACK_BASE_ALIGN ARCH_STACK_PTR_ALIGN
#define XTENSA_STACK_SIZE_ALIGN ARCH_STACK_PTR_ALIGN
#endif
/*
*
* High memory addresses
*
* +-------------------+ <- thread.stack_info.start + thread.stack_info.size
* | TLS |
* +-------------------+ <- initial sp (computable with thread.stack_info.delta)
* | |
* | Thread stack |
* | |
* +-------------------+ <- thread.stack_info.start
* | Privileged stack | } CONFIG_MMU_PAGE_SIZE
* +-------------------+ <- thread.stack_obj
*
* Low Memory addresses
*/
#ifndef _ASMLANGUAGE
/* thread stack */
struct xtensa_thread_stack_header {
#if defined(CONFIG_XTENSA_MMU) || defined(CONFIG_XTENSA_MPU)
char privilege_stack[CONFIG_PRIVILEGED_STACK_SIZE];
#endif /* CONFIG_XTENSA_MPU */
} __packed __aligned(XTENSA_STACK_BASE_ALIGN);
#if defined(CONFIG_XTENSA_MMU) || defined(CONFIG_XTENSA_MPU)
#define ARCH_THREAD_STACK_RESERVED \
sizeof(struct xtensa_thread_stack_header)
#endif /* CONFIG_XTENSA_MMU || CONFIG_XTENSA_MPU */
#define ARCH_THREAD_STACK_OBJ_ALIGN(size) XTENSA_STACK_BASE_ALIGN
#define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
ROUND_UP((size), XTENSA_STACK_SIZE_ALIGN)
/* kernel stack */
#define ARCH_KERNEL_STACK_RESERVED 0
#define ARCH_KERNEL_STACK_OBJ_ALIGN ARCH_STACK_PTR_ALIGN
#endif /* _ASMLANGUAGE */
#endif
``` | /content/code_sandbox/include/zephyr/arch/xtensa/thread_stack.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 517 |
```objective-c
/*
*/
#ifndef ZEPHYR_INCLUDE_ATOMIC_XTENSA_H_
#define ZEPHYR_INCLUDE_ATOMIC_XTENSA_H_
/* Included from <zephyr/sys/atomic.h> */
/* Recent GCC versions actually do have working atomics support on
* Xtensa (and so should work with CONFIG_ATOMIC_OPERATIONS_BUILTIN),
* but existing versions of Xtensa's XCC do not. So we define an
* inline implementation here that is more or less identical
*/
/** Implementation of @ref atomic_get. */
static ALWAYS_INLINE atomic_val_t atomic_get(const atomic_t *target)
{
atomic_val_t ret;
/* Actual Xtensa hardware seems to have only in-order
* pipelines, but the architecture does define a barrier load,
* so use it. There is a matching s32ri instruction, but
* nothing in the Zephyr API requires a barrier store (all the
* atomic write ops have exchange semantics.
*/
__asm__ volatile("l32ai %0, %1, 0"
: "=r"(ret) : "r"(target) : "memory");
return ret;
}
/**
* @brief Xtensa specific atomic compare-and-set (CAS).
*
* @param addr Address of atomic variable.
* @param oldval Original value to compare against.
* @param newval New value to store.
*
* This utilizes SCOMPARE1 register and s32c1i instruction to
* perform compare-and-set atomic operation. This will
* unconditionally read from the atomic variable at @p addr
* before the comparison. This value is returned from
* the function.
*
* @return The value at the memory location before CAS.
*
* @see atomic_cas.
*/
static ALWAYS_INLINE
atomic_val_t xtensa_cas(atomic_t *addr, atomic_val_t oldval,
atomic_val_t newval)
{
__asm__ volatile("wsr %1, SCOMPARE1; s32c1i %0, %2, 0"
: "+r"(newval), "+r"(oldval) : "r"(addr) : "memory");
return newval; /* got swapped with the old memory by s32c1i */
}
/** Implementation of @ref atomic_cas. */
static ALWAYS_INLINE
bool atomic_cas(atomic_t *target, atomic_val_t oldval, atomic_val_t newval)
{
return oldval == xtensa_cas(target, oldval, newval);
}
/** Implementation of @ref atomic_ptr_cas. */
static ALWAYS_INLINE
bool atomic_ptr_cas(atomic_ptr_t *target, void *oldval, void *newval)
{
return (atomic_val_t) oldval
== xtensa_cas((atomic_t *) target, (atomic_val_t) oldval,
(atomic_val_t) newval);
}
/* Generates an atomic exchange sequence that swaps the value at
* address "target", whose old value is read to be "cur", with the
* specified expression. Evaluates to the old value which was
* atomically replaced.
*/
#define Z__GEN_ATOMXCHG(expr) ({ \
atomic_val_t res, cur; \
do { \
cur = *target; \
res = xtensa_cas(target, cur, (expr)); \
} while (res != cur); \
res; })
/** Implementation of @ref atomic_set. */
static ALWAYS_INLINE
atomic_val_t atomic_set(atomic_t *target, atomic_val_t value)
{
return Z__GEN_ATOMXCHG(value);
}
/** Implementation of @ref atomic_add. */
static ALWAYS_INLINE
atomic_val_t atomic_add(atomic_t *target, atomic_val_t value)
{
return Z__GEN_ATOMXCHG(cur + value);
}
/** Implementation of @ref atomic_sub. */
static ALWAYS_INLINE
atomic_val_t atomic_sub(atomic_t *target, atomic_val_t value)
{
return Z__GEN_ATOMXCHG(cur - value);
}
/** Implementation of @ref atomic_inc. */
static ALWAYS_INLINE
atomic_val_t atomic_inc(atomic_t *target)
{
return Z__GEN_ATOMXCHG(cur + 1);
}
/** Implementation of @ref atomic_dec. */
static ALWAYS_INLINE
atomic_val_t atomic_dec(atomic_t *target)
{
return Z__GEN_ATOMXCHG(cur - 1);
}
/** Implementation of @ref atomic_or. */
static ALWAYS_INLINE atomic_val_t atomic_or(atomic_t *target,
atomic_val_t value)
{
return Z__GEN_ATOMXCHG(cur | value);
}
/** Implementation of @ref atomic_xor. */
static ALWAYS_INLINE atomic_val_t atomic_xor(atomic_t *target,
atomic_val_t value)
{
return Z__GEN_ATOMXCHG(cur ^ value);
}
/** Implementation of @ref atomic_and. */
static ALWAYS_INLINE atomic_val_t atomic_and(atomic_t *target,
atomic_val_t value)
{
return Z__GEN_ATOMXCHG(cur & value);
}
/** Implementation of @ref atomic_nand. */
static ALWAYS_INLINE atomic_val_t atomic_nand(atomic_t *target,
atomic_val_t value)
{
return Z__GEN_ATOMXCHG(~(cur & value));
}
/** Implementation of @ref atomic_ptr_get. */
static ALWAYS_INLINE void *atomic_ptr_get(const atomic_ptr_t *target)
{
return (void *) atomic_get((atomic_t *)target);
}
/** Implementation of @ref atomic_ptr_set. */
static ALWAYS_INLINE void *atomic_ptr_set(atomic_ptr_t *target, void *value)
{
return (void *) atomic_set((atomic_t *) target, (atomic_val_t) value);
}
/** Implementation of @ref atomic_clear. */
static ALWAYS_INLINE atomic_val_t atomic_clear(atomic_t *target)
{
return atomic_set(target, 0);
}
/** Implementation of @ref atomic_ptr_clear. */
static ALWAYS_INLINE void *atomic_ptr_clear(atomic_ptr_t *target)
{
return (void *) atomic_set((atomic_t *) target, 0);
}
#endif /* ZEPHYR_INCLUDE_ATOMIC_XTENSA_H_ */
``` | /content/code_sandbox/include/zephyr/arch/xtensa/atomic_xtensa.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,280 |
```objective-c
/*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM64_HYPERCALL_H_
#define ZEPHYR_INCLUDE_ARCH_ARM64_HYPERCALL_H_
/* defined in hypercall.S by HYPERCALL(hypercall) */
int HYPERVISOR_console_io(int op, int cnt, char *str);
int HYPERVISOR_sched_op(int op, void *param);
int HYPERVISOR_event_channel_op(int op, void *param);
int HYPERVISOR_hvm_op(int op, void *param);
int HYPERVISOR_memory_op(int op, void *param);
int HYPERVISOR_grant_table_op(int op, void *uop, unsigned int count);
#ifdef CONFIG_XEN_DOM0
int HYPERVISOR_domctl(void *param);
#endif
#endif /* ZEPHYR_INCLUDE_ARCH_ARM64_HYPERCALL_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm64/hypercall.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 181 |
```objective-c
/*
*
*/
#include <stdint.h>
#include <zephyr/toolchain.h>
#include <zephyr/sys/util_macro.h>
#include <xtensa/config/core-isa.h>
#ifndef ZEPHYR_INCLUDE_ARCH_XTENSA_XTENSA_MPU_H
#define ZEPHYR_INCLUDE_ARCH_XTENSA_XTENSA_MPU_H
/**
* @defgroup xtensa_mpu_apis Xtensa Memory Protection Unit (MPU) APIs
* @ingroup xtensa_apis
* @{
*/
/** Number of available entries in the MPU table. */
#define XTENSA_MPU_NUM_ENTRIES XCHAL_MPU_ENTRIES
/**
* @name MPU memory region access rights.
*
* @note These are NOT bit masks, and must be used as whole value.
*
* @{
*/
/** Kernel and user modes no access. */
#define XTENSA_MPU_ACCESS_P_NA_U_NA (0)
/** Kernel mode execution only. */
#define XTENSA_MPU_ACCESS_P_X_U_NA (2)
/** User mode execution only. */
#define XTENSA_MPU_ACCESS_P_NA_U_X (3)
/** Kernel mode read only. */
#define XTENSA_MPU_ACCESS_P_RO_U_NA (4)
/** Kernel mode read and execution. */
#define XTENSA_MPU_ACCESS_P_RX_U_NA (5)
/** Kernel mode read and write. */
#define XTENSA_MPU_ACCESS_P_RW_U_NA (6)
/** Kernel mode read, write and execution. */
#define XTENSA_MPU_ACCESS_P_RWX_U_NA (7)
/** Kernel and user modes write only. */
#define XTENSA_MPU_ACCESS_P_WO_U_WO (8)
/** Kernel mode read, write. User mode read, write and execution. */
#define XTENSA_MPU_ACCESS_P_RW_U_RWX (9)
/** Kernel mode read and write. User mode read only. */
#define XTENSA_MPU_ACCESS_P_RW_U_RO (10)
/** Kernel mode read, write and execution. User mode read and execution. */
#define XTENSA_MPU_ACCESS_P_RWX_U_RX (11)
/** Kernel and user modes read only. */
#define XTENSA_MPU_ACCESS_P_RO_U_RO (12)
/** Kernel and user modes read and execution. */
#define XTENSA_MPU_ACCESS_P_RX_U_RX (13)
/** Kernel and user modes read and write. */
#define XTENSA_MPU_ACCESS_P_RW_U_RW (14)
/** Kernel and user modes read, write and execution. */
#define XTENSA_MPU_ACCESS_P_RWX_U_RWX (15)
/**
* @}
*/
/**
* @brief Foreground MPU Entry.
*
* This holds the as, at register values for one MPU entry which can be
* used directly by WPTLB.
*/
struct xtensa_mpu_entry {
/**
* Content of as register for WPTLB.
*
* This contains the start address, the enable bit, and the lock bit.
*/
union {
/** Raw value. */
uint32_t raw;
/** Individual parts. */
struct {
/**
* Enable bit for this entry.
*
* Modifying this will also modify the corresponding bit of
* the MPUENB register.
*/
uint32_t enable:1;
/**
* Lock bit for this entry.
*
* Usable only if MPULOCKABLE parameter is enabled in
* processor configuration.
*
* Once set:
* - This cannot be cleared until reset.
* - This entry can no longer be modified.
* - The start address of the next entry also
* cannot be modified.
*/
uint32_t lock:1;
/** Must be zero. */
uint32_t mbz:3;
/**
* Start address of this MPU entry.
*
* Effective bits in this portion are affected by the minimum
* segment size of each MPU entry, ranging from 32 bytes to 4GB.
*/
uint32_t start_addr:27;
} p;
} as;
/**
* Content of at register for WPTLB.
*
* This contains the memory type, access rights, and the segment number.
*/
union {
/** Raw value. */
uint32_t raw;
/** Individual parts. */
struct {
/** The segment number of this MPU entry. */
uint32_t segment:5;
/** Must be zero (part 1). */
uint32_t mbz1:3;
/**
* Access rights associated with this MPU entry.
*
* This dictates the access right from the start address of
* this entry, to the start address of next entry.
*
* Refer to XTENSA_MPU_ACCESS_* macros for available rights.
*/
uint32_t access_rights:4;
/**
* Memory type associated with this MPU entry.
*
* This dictates the memory type from the start address of
* this entry, to the start address of next entry.
*
* This affects how the hardware treats the memory, for example,
* cacheable vs non-cacheable, shareable vs non-shareable.
* Refer to the Xtensa Instruction Set Architecture (ISA) manual
* for general description, and the processor manual for processor
* specific information.
*/
uint32_t memory_type:9;
/** Must be zero (part 2). */
uint32_t mbz2:11;
} p;
} at;
};
/**
* @brief Struct to hold foreground MPU map and its entries.
*/
struct xtensa_mpu_map {
/**
* Array of MPU entries.
*/
struct xtensa_mpu_entry entries[XTENSA_MPU_NUM_ENTRIES];
};
/**
* @name Memory domain and partitions
* @{
*/
typedef uint32_t k_mem_partition_attr_t;
static inline bool xtensa_mem_partition_is_executable(k_mem_partition_attr_t access_rights)
{
bool is_exec;
switch (access_rights) {
case XTENSA_MPU_ACCESS_P_X_U_NA:
case XTENSA_MPU_ACCESS_P_NA_U_X:
case XTENSA_MPU_ACCESS_P_RX_U_NA:
case XTENSA_MPU_ACCESS_P_RWX_U_NA:
case XTENSA_MPU_ACCESS_P_RW_U_RWX:
case XTENSA_MPU_ACCESS_P_RWX_U_RX:
case XTENSA_MPU_ACCESS_P_RX_U_RX:
case XTENSA_MPU_ACCESS_P_RWX_U_RWX:
is_exec = true;
break;
default:
is_exec = false;
break;
};
return is_exec;
}
static inline bool xtensa_mem_partition_is_writable(k_mem_partition_attr_t access_rights)
{
bool is_writable;
switch (access_rights) {
case XTENSA_MPU_ACCESS_P_RW_U_NA:
case XTENSA_MPU_ACCESS_P_RWX_U_NA:
case XTENSA_MPU_ACCESS_P_WO_U_WO:
case XTENSA_MPU_ACCESS_P_RW_U_RWX:
case XTENSA_MPU_ACCESS_P_RW_U_RO:
case XTENSA_MPU_ACCESS_P_RWX_U_RX:
case XTENSA_MPU_ACCESS_P_RW_U_RW:
case XTENSA_MPU_ACCESS_P_RWX_U_RWX:
is_writable = true;
break;
default:
is_writable = false;
break;
};
return is_writable;
}
#define K_MEM_PARTITION_IS_EXECUTABLE(access_rights) \
(xtensa_mem_partition_is_executable(access_rights))
#define K_MEM_PARTITION_IS_WRITABLE(access_rights) \
(xtensa_mem_partition_is_writable(access_rights))
/* Read-Write access permission attributes */
#define K_MEM_PARTITION_P_RW_U_RW \
((k_mem_partition_attr_t) {XTENSA_MPU_ACCESS_P_RW_U_RW})
#define K_MEM_PARTITION_P_RW_U_NA \
((k_mem_partition_attr_t) {XTENSA_MPU_ACCESS_P_RW_U_NA})
#define K_MEM_PARTITION_P_RO_U_RO \
((k_mem_partition_attr_t) {XTENSA_MPU_ACCESS_P_RO_U_RO})
#define K_MEM_PARTITION_P_RO_U_NA \
((k_mem_partition_attr_t) {XTENSA_MPU_ACCESS_P_RO_U_NA})
#define K_MEM_PARTITION_P_NA_U_NA \
((k_mem_partition_attr_t) {XTENSA_MPU_ACCESS_P_NA_U_NA})
/* Execution-allowed attributes */
#define K_MEM_PARTITION_P_RX_U_RX \
((k_mem_partition_attr_t) {XTENSA_MPU_ACCESS_P_RX_U_RX})
/**
* @}
*/
/**
* Struct to describe a memory region [start, end).
*/
struct xtensa_mpu_range {
/** Start address (inclusive) of the memory region. */
const uintptr_t start;
/**
* End address (exclusive) of the memory region.
*
* Use 0xFFFFFFFF for the end of memory.
*/
const uintptr_t end;
/** Access rights for the memory region. */
const uint8_t access_rights:4;
/**
* Memory type for the region.
*
* Refer to the Xtensa Instruction Set Architecture (ISA) manual
* for general description, and the processor manual for processor
* specific information.
*/
const uint16_t memory_type:9;
} __packed;
/**
* @brief Additional memory regions required by SoC.
*
* These memory regions will be setup by MPU initialization code at boot.
*
* Must be defined in the SoC layer.
*/
extern const struct xtensa_mpu_range xtensa_soc_mpu_ranges[];
/**
* @brief Number of SoC additional memory regions.
*
* Must be defined in the SoC layer.
*/
extern const int xtensa_soc_mpu_ranges_num;
/**
* @brief Initialize hardware MPU.
*
* This initializes the MPU hardware and setup the memory regions at boot.
*/
void xtensa_mpu_init(void);
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_ARCH_XTENSA_XTENSA_MPU_H */
``` | /content/code_sandbox/include/zephyr/arch/xtensa/mpu.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,162 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM64_ARM_MEM_H_
#define ZEPHYR_INCLUDE_ARCH_ARM64_ARM_MEM_H_
/*
* Define ARM specific memory flags used by k_mem_map_phys_bare()
* followed public definitions in include/kernel/mm.h.
*/
/* For ARM64, K_MEM_CACHE_NONE is nGnRnE. */
#define K_MEM_ARM_DEVICE_nGnRnE K_MEM_CACHE_NONE
/** ARM64 Specific flags: device memory with nGnRE */
#define K_MEM_ARM_DEVICE_nGnRE 3
/** ARM64 Specific flags: device memory with GRE */
#define K_MEM_ARM_DEVICE_GRE 4
/** ARM64 Specific flags: normal memory with Non-cacheable */
#define K_MEM_ARM_NORMAL_NC 5
#endif /* ZEPHYR_INCLUDE_ARCH_ARM64_ARM_MEM_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm64/arm_mem.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 174 |
```objective-c
/*
*
*/
/**
* @file
* @brief Cortex-A public exception handling
*
* ARM-specific kernel exception handling interface. Included by arm64/arch.h.
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM64_EXCEPTION_H_
#define ZEPHYR_INCLUDE_ARCH_ARM64_EXCEPTION_H_
/* for assembler, only works with constants */
#ifdef _ASMLANGUAGE
#else
#include <zephyr/types.h>
#ifdef __cplusplus
extern "C" {
#endif
struct arch_esf {
uint64_t x0;
uint64_t x1;
uint64_t x2;
uint64_t x3;
uint64_t x4;
uint64_t x5;
uint64_t x6;
uint64_t x7;
uint64_t x8;
uint64_t x9;
uint64_t x10;
uint64_t x11;
uint64_t x12;
uint64_t x13;
uint64_t x14;
uint64_t x15;
uint64_t x16;
uint64_t x17;
uint64_t x18;
uint64_t lr;
uint64_t spsr;
uint64_t elr;
#ifdef CONFIG_FRAME_POINTER
uint64_t fp;
#endif
#ifdef CONFIG_ARM64_SAFE_EXCEPTION_STACK
uint64_t sp;
#endif
} __aligned(16);
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_ARM64_EXCEPTION_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm64/exception.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 292 |
```objective-c
/*
* The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM64_ARM_MMU_H_
#define ZEPHYR_INCLUDE_ARCH_ARM64_ARM_MMU_H_
#ifndef _ASMLANGUAGE
#include <stdint.h>
#include <stdlib.h>
#endif
/* Following Memory types supported through MAIR encodings can be passed
* by user through "attrs"(attributes) field of specified memory region.
* As MAIR supports such 8 encodings, we will reserve attrs[2:0];
* so that we can provide encodings upto 7 if needed in future.
*/
#define MT_TYPE_MASK 0x7U
#define MT_TYPE(attr) (attr & MT_TYPE_MASK)
#define MT_DEVICE_nGnRnE 0U
#define MT_DEVICE_nGnRE 1U
#define MT_DEVICE_GRE 2U
#define MT_NORMAL_NC 3U
#define MT_NORMAL 4U
#define MT_NORMAL_WT 5U
#define MEMORY_ATTRIBUTES ((0x00 << (MT_DEVICE_nGnRnE * 8)) | \
(0x04 << (MT_DEVICE_nGnRE * 8)) | \
(0x0c << (MT_DEVICE_GRE * 8)) | \
(0x44 << (MT_NORMAL_NC * 8)) | \
(0xffUL << (MT_NORMAL * 8)) | \
(0xbbUL << (MT_NORMAL_WT * 8)))
/* More flags from user's perspective are supported using remaining bits
* of "attrs" field, i.e. attrs[31:3], underlying code will take care
* of setting PTE fields correctly.
*
* current usage of attrs[31:3] is:
* attrs[3] : Access Permissions
* attrs[4] : Memory access from secure/ns state
* attrs[5] : Execute Permissions privileged mode (PXN)
* attrs[6] : Execute Permissions unprivileged mode (UXN)
* attrs[7] : Mirror RO/RW permissions to EL0
* attrs[8] : Overwrite existing mapping if any
* attrs[9] : non-Global mapping (nG)
*
*/
#define MT_PERM_SHIFT 3U
#define MT_SEC_SHIFT 4U
#define MT_P_EXECUTE_SHIFT 5U
#define MT_U_EXECUTE_SHIFT 6U
#define MT_RW_AP_SHIFT 7U
#define MT_NO_OVERWRITE_SHIFT 8U
#define MT_NON_GLOBAL_SHIFT 9U
#define MT_RO (0U << MT_PERM_SHIFT)
#define MT_RW (1U << MT_PERM_SHIFT)
#define MT_RW_AP_ELx (1U << MT_RW_AP_SHIFT)
#define MT_RW_AP_EL_HIGHER (0U << MT_RW_AP_SHIFT)
#define MT_SECURE (0U << MT_SEC_SHIFT)
#define MT_NS (1U << MT_SEC_SHIFT)
#define MT_P_EXECUTE (0U << MT_P_EXECUTE_SHIFT)
#define MT_P_EXECUTE_NEVER (1U << MT_P_EXECUTE_SHIFT)
#define MT_U_EXECUTE (0U << MT_U_EXECUTE_SHIFT)
#define MT_U_EXECUTE_NEVER (1U << MT_U_EXECUTE_SHIFT)
#define MT_NO_OVERWRITE (1U << MT_NO_OVERWRITE_SHIFT)
#define MT_G (0U << MT_NON_GLOBAL_SHIFT)
#define MT_NG (1U << MT_NON_GLOBAL_SHIFT)
#define MT_P_RW_U_RW (MT_RW | MT_RW_AP_ELx | MT_P_EXECUTE_NEVER | MT_U_EXECUTE_NEVER)
#define MT_P_RW_U_NA (MT_RW | MT_RW_AP_EL_HIGHER | MT_P_EXECUTE_NEVER | MT_U_EXECUTE_NEVER)
#define MT_P_RO_U_RO (MT_RO | MT_RW_AP_ELx | MT_P_EXECUTE_NEVER | MT_U_EXECUTE_NEVER)
#define MT_P_RO_U_NA (MT_RO | MT_RW_AP_EL_HIGHER | MT_P_EXECUTE_NEVER | MT_U_EXECUTE_NEVER)
#define MT_P_RO_U_RX (MT_RO | MT_RW_AP_ELx | MT_P_EXECUTE_NEVER | MT_U_EXECUTE)
#define MT_P_RX_U_RX (MT_RO | MT_RW_AP_ELx | MT_P_EXECUTE | MT_U_EXECUTE)
#define MT_P_RX_U_NA (MT_RO | MT_RW_AP_EL_HIGHER | MT_P_EXECUTE | MT_U_EXECUTE_NEVER)
#ifdef CONFIG_ARMV8_A_NS
#define MT_DEFAULT_SECURE_STATE MT_NS
#else
#define MT_DEFAULT_SECURE_STATE MT_SECURE
#endif
#ifndef _ASMLANGUAGE
/* Region definition data structure */
struct arm_mmu_region {
/* Region Base Physical Address */
uintptr_t base_pa;
/* Region Base Virtual Address */
uintptr_t base_va;
/* Region size */
size_t size;
/* Region Name */
const char *name;
/* Region Attributes */
uint32_t attrs;
};
/* MMU configuration data structure */
struct arm_mmu_config {
/* Number of regions */
unsigned int num_regions;
/* Regions */
const struct arm_mmu_region *mmu_regions;
};
struct arm_mmu_ptables {
uint64_t *base_xlat_table;
uint64_t ttbr0;
};
/* Convenience macros to represent the ARMv8-A-specific
* configuration for memory access permission and
* cache-ability attribution.
*/
#define MMU_REGION_ENTRY(_name, _base_pa, _base_va, _size, _attrs) \
{\
.name = _name, \
.base_pa = _base_pa, \
.base_va = _base_va, \
.size = _size, \
.attrs = _attrs, \
}
#define MMU_REGION_FLAT_ENTRY(name, adr, sz, attrs) \
MMU_REGION_ENTRY(name, adr, adr, sz, attrs)
/*
* @brief Auto generate mmu region entry for node_id
*
* Example usage:
*
* @code{.c}
* DT_FOREACH_STATUS_OKAY_VARGS(nxp_imx_gpio,
* MMU_REGION_DT_FLAT_ENTRY,
* (MT_DEVICE_nGnRnE | MT_P_RW_U_NA | MT_NS))
* @endcode
*
* @note Since devicetree_generated.h does not include
* node_id##_P_reg_FOREACH_PROP_ELEM* definitions,
* we can't automate dts node with multiple reg
* entries.
*/
#define MMU_REGION_DT_FLAT_ENTRY(node_id, attrs) \
MMU_REGION_FLAT_ENTRY(DT_NODE_FULL_NAME(node_id), \
DT_REG_ADDR(node_id), \
DT_REG_SIZE(node_id), \
attrs),
/*
* @brief Auto generate mmu region entry for status = "okay"
* nodes compatible to a driver
*
* Example usage:
*
* @code{.c}
* MMU_REGION_DT_COMPAT_FOREACH_FLAT_ENTRY(nxp_imx_gpio,
* (MT_DEVICE_nGnRnE | MT_P_RW_U_NA | MT_NS))
* @endcode
*
* @note This is a wrapper of @ref MMU_REGION_DT_FLAT_ENTRY
*/
#define MMU_REGION_DT_COMPAT_FOREACH_FLAT_ENTRY(compat, attr) \
DT_FOREACH_STATUS_OKAY_VARGS(compat, \
MMU_REGION_DT_FLAT_ENTRY, attr)
/* Kernel macros for memory attribution
* (access permissions and cache-ability).
*
* The macros are to be stored in k_mem_partition_attr_t
* objects. The format of a k_mem_partition_attr_t object
* is an uint32_t composed by permission and attribute flags
* located in include/arch/arm64/arm_mmu.h
*/
/* Read-Write access permission attributes */
#define K_MEM_PARTITION_P_RW_U_RW ((k_mem_partition_attr_t) \
{MT_P_RW_U_RW})
#define K_MEM_PARTITION_P_RW_U_NA ((k_mem_partition_attr_t) \
{MT_P_RW_U_NA})
#define K_MEM_PARTITION_P_RO_U_RO ((k_mem_partition_attr_t) \
{MT_P_RO_U_RO})
#define K_MEM_PARTITION_P_RO_U_NA ((k_mem_partition_attr_t) \
{MT_P_RO_U_NA})
/* Execution-allowed attributes */
#define K_MEM_PARTITION_P_RX_U_RX ((k_mem_partition_attr_t) \
{MT_P_RX_U_RX})
/* Typedef for the k_mem_partition attribute */
typedef struct { uint32_t attrs; } k_mem_partition_attr_t;
/* Reference to the MMU configuration.
*
* This struct is defined and populated for each SoC (in the SoC definition),
* and holds the build-time configuration information for the fixed MMU
* regions enabled during kernel initialization.
*/
extern const struct arm_mmu_config mmu_config;
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_ARM64_ARM_MMU_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm64/arm_mmu.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,901 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM64_CACHE_H_
#define ZEPHYR_INCLUDE_ARCH_ARM64_CACHE_H_
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
#include <zephyr/sys/util.h>
#include <zephyr/sys/barrier.h>
#include <zephyr/arch/cpu.h>
#include <errno.h>
#ifdef __cplusplus
extern "C" {
#endif
#define K_CACHE_WB BIT(0)
#define K_CACHE_INVD BIT(1)
#define K_CACHE_WB_INVD (K_CACHE_WB | K_CACHE_INVD)
#if defined(CONFIG_DCACHE)
#define CTR_EL0_DMINLINE_SHIFT 16
#define CTR_EL0_DMINLINE_MASK BIT_MASK(4)
#define CTR_EL0_CWG_SHIFT 24
#define CTR_EL0_CWG_MASK BIT_MASK(4)
/* clidr_el1 */
#define CLIDR_EL1_LOC_SHIFT 24
#define CLIDR_EL1_LOC_MASK BIT_MASK(3)
#define CLIDR_EL1_CTYPE_SHIFT(level) ((level) * 3)
#define CLIDR_EL1_CTYPE_MASK BIT_MASK(3)
/* ccsidr_el1 */
#define CCSIDR_EL1_LN_SZ_SHIFT 0
#define CCSIDR_EL1_LN_SZ_MASK BIT_MASK(3)
#define CCSIDR_EL1_WAYS_SHIFT 3
#define CCSIDR_EL1_WAYS_MASK BIT_MASK(10)
#define CCSIDR_EL1_SETS_SHIFT 13
#define CCSIDR_EL1_SETS_MASK BIT_MASK(15)
#define dc_ops(op, val) \
({ \
__asm__ volatile ("dc " op ", %0" :: "r" (val) : "memory"); \
})
static size_t dcache_line_size;
static ALWAYS_INLINE size_t arch_dcache_line_size_get(void)
{
uint64_t ctr_el0;
uint32_t dminline;
if (dcache_line_size) {
return dcache_line_size;
}
ctr_el0 = read_sysreg(CTR_EL0);
dminline = (ctr_el0 >> CTR_EL0_DMINLINE_SHIFT) & CTR_EL0_DMINLINE_MASK;
dcache_line_size = 4 << dminline;
return dcache_line_size;
}
/*
* operation for data cache by virtual address to PoC
* ops: K_CACHE_INVD: invalidate
* K_CACHE_WB: clean
* K_CACHE_WB_INVD: clean and invalidate
*/
static ALWAYS_INLINE int arm64_dcache_range(void *addr, size_t size, int op)
{
size_t line_size;
uintptr_t start_addr = (uintptr_t)addr;
uintptr_t end_addr = start_addr + size;
if (op != K_CACHE_INVD && op != K_CACHE_WB && op != K_CACHE_WB_INVD) {
return -ENOTSUP;
}
line_size = arch_dcache_line_size_get();
/*
* For the data cache invalidate operation, clean and invalidate
* the partial cache lines at both ends of the given range to
* prevent data corruption.
*
* For example (assume cache line size is 64 bytes):
* There are 2 consecutive 32-byte buffers, which can be cached in
* one line like below.
* +------------------+------------------+
* Cache line: | buffer 0 (dirty) | buffer 1 |
* +------------------+------------------+
* For the start address not aligned case, when invalidate the
* buffer 1, the full cache line will be invalidated, if the buffer
* 0 is dirty, its data will be lost.
* The same logic applies to the not aligned end address.
*/
if (op == K_CACHE_INVD) {
if (end_addr & (line_size - 1)) {
end_addr &= ~(line_size - 1);
dc_ops("civac", end_addr);
}
if (start_addr & (line_size - 1)) {
start_addr &= ~(line_size - 1);
if (start_addr == end_addr) {
goto done;
}
dc_ops("civac", start_addr);
start_addr += line_size;
}
}
/* Align address to line size */
start_addr &= ~(line_size - 1);
while (start_addr < end_addr) {
if (op == K_CACHE_INVD) {
dc_ops("ivac", start_addr);
} else if (op == K_CACHE_WB) {
dc_ops("cvac", start_addr);
} else if (op == K_CACHE_WB_INVD) {
dc_ops("civac", start_addr);
}
start_addr += line_size;
}
done:
barrier_dsync_fence_full();
return 0;
}
static ALWAYS_INLINE int arch_dcache_flush_all(void)
{
return -ENOTSUP;
}
static ALWAYS_INLINE int arch_dcache_invd_all(void)
{
return -ENOTSUP;
}
static ALWAYS_INLINE int arch_dcache_flush_and_invd_all(void)
{
return -ENOTSUP;
}
static ALWAYS_INLINE int arch_dcache_flush_range(void *addr, size_t size)
{
return arm64_dcache_range(addr, size, K_CACHE_WB);
}
static ALWAYS_INLINE int arch_dcache_invd_range(void *addr, size_t size)
{
return arm64_dcache_range(addr, size, K_CACHE_INVD);
}
static ALWAYS_INLINE int arch_dcache_flush_and_invd_range(void *addr, size_t size)
{
return arm64_dcache_range(addr, size, K_CACHE_WB_INVD);
}
static ALWAYS_INLINE void arch_dcache_enable(void)
{
/* nothing */
}
static ALWAYS_INLINE void arch_dcache_disable(void)
{
/* nothing */
}
#endif /* CONFIG_DCACHE */
#if defined(CONFIG_ICACHE)
static ALWAYS_INLINE size_t arch_icache_line_size_get(void)
{
return -ENOTSUP;
}
static ALWAYS_INLINE int arch_icache_flush_all(void)
{
return -ENOTSUP;
}
static ALWAYS_INLINE int arch_icache_invd_all(void)
{
return -ENOTSUP;
}
static ALWAYS_INLINE int arch_icache_flush_and_invd_all(void)
{
return -ENOTSUP;
}
static ALWAYS_INLINE int arch_icache_flush_range(void *addr, size_t size)
{
ARG_UNUSED(addr);
ARG_UNUSED(size);
return -ENOTSUP;
}
static ALWAYS_INLINE int arch_icache_invd_range(void *addr, size_t size)
{
ARG_UNUSED(addr);
ARG_UNUSED(size);
return -ENOTSUP;
}
static ALWAYS_INLINE int arch_icache_flush_and_invd_range(void *addr, size_t size)
{
ARG_UNUSED(addr);
ARG_UNUSED(size);
return -ENOTSUP;
}
static ALWAYS_INLINE void arch_icache_enable(void)
{
/* nothing */
}
static ALWAYS_INLINE void arch_icache_disable(void)
{
/* nothing */
}
#endif /* CONFIG_ICACHE */
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_ARM64_CACHE_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm64/cache.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,528 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM64_CPU_H_
#define ZEPHYR_INCLUDE_ARCH_ARM64_CPU_H_
#include <zephyr/sys/util_macro.h>
#include <stdbool.h>
#define DAIFSET_FIQ_BIT BIT(0)
#define DAIFSET_IRQ_BIT BIT(1)
#define DAIFSET_ABT_BIT BIT(2)
#define DAIFSET_DBG_BIT BIT(3)
#define DAIFCLR_FIQ_BIT BIT(0)
#define DAIFCLR_IRQ_BIT BIT(1)
#define DAIFCLR_ABT_BIT BIT(2)
#define DAIFCLR_DBG_BIT BIT(3)
#define DAIF_FIQ_BIT BIT(6)
#define DAIF_IRQ_BIT BIT(7)
#define DAIF_ABT_BIT BIT(8)
#define DAIF_DBG_BIT BIT(9)
#define SPSR_DAIF_SHIFT (6)
#define SPSR_DAIF_MASK (0xf << SPSR_DAIF_SHIFT)
#define SPSR_MODE_EL0T (0x0)
#define SPSR_MODE_EL1T (0x4)
#define SPSR_MODE_EL1H (0x5)
#define SPSR_MODE_EL2T (0x8)
#define SPSR_MODE_EL2H (0x9)
#define SPSR_MODE_MASK (0xf)
#define SCTLR_EL3_RES1 (BIT(29) | BIT(28) | BIT(23) | \
BIT(22) | BIT(18) | BIT(16) | \
BIT(11) | BIT(5) | BIT(4))
#define SCTLR_EL2_RES1 (BIT(29) | BIT(28) | BIT(23) | \
BIT(22) | BIT(18) | BIT(16) | \
BIT(11) | BIT(5) | BIT(4))
#define SCTLR_EL1_RES1 (BIT(29) | BIT(28) | BIT(23) | \
BIT(22) | BIT(20) | BIT(11))
#define SCTLR_M_BIT BIT(0)
#define SCTLR_A_BIT BIT(1)
#define SCTLR_C_BIT BIT(2)
#define SCTLR_SA_BIT BIT(3)
#define SCTLR_I_BIT BIT(12)
#define SCTLR_BR_BIT BIT(17)
#define CPACR_EL1_FPEN_NOTRAP (0x3 << 20)
#define SCR_NS_BIT BIT(0)
#define SCR_IRQ_BIT BIT(1)
#define SCR_FIQ_BIT BIT(2)
#define SCR_EA_BIT BIT(3)
#define SCR_SMD_BIT BIT(7)
#define SCR_HCE_BIT BIT(8)
#define SCR_RW_BIT BIT(10)
#define SCR_ST_BIT BIT(11)
#define SCR_EEL2_BIT BIT(18)
#define SCR_RES1 (BIT(4) | BIT(5))
/* MPIDR */
#define MPIDR_AFFLVL_MASK (0xffULL)
#define MPIDR_AFF0_SHIFT (0)
#define MPIDR_AFF1_SHIFT (8)
#define MPIDR_AFF2_SHIFT (16)
#define MPIDR_AFF3_SHIFT (32)
#define MPIDR_AFF_MASK (GENMASK(23, 0) | GENMASK(39, 32))
#define MPIDR_AFFLVL(mpidr, aff_level) \
(((mpidr) >> MPIDR_AFF##aff_level##_SHIFT) & MPIDR_AFFLVL_MASK)
#define GET_MPIDR() read_sysreg(mpidr_el1)
#define MPIDR_TO_CORE(mpidr) (mpidr & MPIDR_AFF_MASK)
#define MODE_EL_SHIFT (0x2)
#define MODE_EL_MASK (0x3)
#define MODE_EL3 (0x3)
#define MODE_EL2 (0x2)
#define MODE_EL1 (0x1)
#define MODE_EL0 (0x0)
#define GET_EL(_mode) (((_mode) >> MODE_EL_SHIFT) & MODE_EL_MASK)
#define ESR_EC_SHIFT (26)
#define ESR_EC_MASK BIT_MASK(6)
#define ESR_ISS_SHIFT (0)
#define ESR_ISS_MASK BIT_MASK(25)
#define ESR_IL_SHIFT (25)
#define ESR_IL_MASK BIT_MASK(1)
#define GET_ESR_EC(esr) (((esr) >> ESR_EC_SHIFT) & ESR_EC_MASK)
#define GET_ESR_IL(esr) (((esr) >> ESR_IL_SHIFT) & ESR_IL_MASK)
#define GET_ESR_ISS(esr) (((esr) >> ESR_ISS_SHIFT) & ESR_ISS_MASK)
#define CNTV_CTL_ENABLE_BIT BIT(0)
#define CNTV_CTL_IMASK_BIT BIT(1)
#define ID_AA64PFR0_EL0_SHIFT (0)
#define ID_AA64PFR0_EL1_SHIFT (4)
#define ID_AA64PFR0_EL2_SHIFT (8)
#define ID_AA64PFR0_EL3_SHIFT (12)
#define ID_AA64PFR0_ELX_MASK (0xf)
#define ID_AA64PFR0_SEL2_SHIFT (36)
#define ID_AA64PFR0_SEL2_MASK (0xf)
/*
* TODO: ACTLR is of class implementation defined. All core implementations
* in armv8a have the same implementation so far w.r.t few controls.
* When there will be differences we have to create core specific headers.
*/
#define ACTLR_EL3_CPUACTLR_BIT BIT(0)
#define ACTLR_EL3_CPUECTLR_BIT BIT(1)
#define ACTLR_EL3_L2CTLR_BIT BIT(4)
#define ACTLR_EL3_L2ECTLR_BIT BIT(5)
#define ACTLR_EL3_L2ACTLR_BIT BIT(6)
#define CPTR_EZ_BIT BIT(8)
#define CPTR_TFP_BIT BIT(10)
#define CPTR_TTA_BIT BIT(20)
#define CPTR_TCPAC_BIT BIT(31)
#define CPTR_EL2_RES1 BIT(13) | BIT(12) | BIT(9) | (0xff)
#define HCR_FMO_BIT BIT(3)
#define HCR_IMO_BIT BIT(4)
#define HCR_AMO_BIT BIT(5)
#define HCR_TGE_BIT BIT(27)
#define HCR_RW_BIT BIT(31)
/* System register interface to GICv3 */
#define ICC_IGRPEN1_EL1 S3_0_C12_C12_7
#define ICC_SGI1R S3_0_C12_C11_5
#define ICC_SRE_EL1 S3_0_C12_C12_5
#define ICC_SRE_EL2 S3_4_C12_C9_5
#define ICC_SRE_EL3 S3_6_C12_C12_5
#define ICC_CTLR_EL1 S3_0_C12_C12_4
#define ICC_CTLR_EL3 S3_6_C12_C12_4
#define ICC_PMR_EL1 S3_0_C4_C6_0
#define ICC_RPR_EL1 S3_0_C12_C11_3
#define ICC_IGRPEN1_EL3 S3_6_C12_C12_7
#define ICC_IGRPEN0_EL1 S3_0_C12_C12_6
#define ICC_HPPIR0_EL1 S3_0_C12_C8_2
#define ICC_HPPIR1_EL1 S3_0_C12_C12_2
#define ICC_IAR0_EL1 S3_0_C12_C8_0
#define ICC_IAR1_EL1 S3_0_C12_C12_0
#define ICC_EOIR0_EL1 S3_0_C12_C8_1
#define ICC_EOIR1_EL1 S3_0_C12_C12_1
#define ICC_SGI0R_EL1 S3_0_C12_C11_7
/* register constants */
#define ICC_SRE_ELx_SRE_BIT BIT(0)
#define ICC_SRE_ELx_DFB_BIT BIT(1)
#define ICC_SRE_ELx_DIB_BIT BIT(2)
#define ICC_SRE_EL3_EN_BIT BIT(3)
/* ICC SGI macros */
#define SGIR_TGT_MASK (0xffff)
#define SGIR_AFF1_SHIFT (16)
#define SGIR_AFF2_SHIFT (32)
#define SGIR_AFF3_SHIFT (48)
#define SGIR_AFF_MASK (0xff)
#define SGIR_INTID_SHIFT (24)
#define SGIR_INTID_MASK (0xf)
#define SGIR_IRM_SHIFT (40)
#define SGIR_IRM_MASK (0x1)
#define SGIR_IRM_TO_AFF (0)
#define GICV3_SGIR_VALUE(_aff3, _aff2, _aff1, _intid, _irm, _tgt) \
((((uint64_t) (_aff3) & SGIR_AFF_MASK) << SGIR_AFF3_SHIFT) | \
(((uint64_t) (_irm) & SGIR_IRM_MASK) << SGIR_IRM_SHIFT) | \
(((uint64_t) (_aff2) & SGIR_AFF_MASK) << SGIR_AFF2_SHIFT) | \
(((_intid) & SGIR_INTID_MASK) << SGIR_INTID_SHIFT) | \
(((_aff1) & SGIR_AFF_MASK) << SGIR_AFF1_SHIFT) | \
((_tgt) & SGIR_TGT_MASK))
/* Implementation defined register definitions */
#if defined(CONFIG_CPU_CORTEX_A72)
#define CORTEX_A72_L2CTLR_EL1 S3_1_C11_C0_2
#define CORTEX_A72_L2CTLR_DATA_RAM_LATENCY_SHIFT (0)
#define CORTEX_A72_L2CTLR_DATA_RAM_SETUP_SHIFT (5)
#define CORTEX_A72_L2CTLR_TAG_RAM_LATENCY_SHIFT (6)
#define CORTEX_A72_L2CTLR_TAG_RAM_SETUP_SHIFT (9)
#define CORTEX_A72_L2_DATA_RAM_LATENCY_3_CYCLES (2)
#define CORTEX_A72_L2_DATA_RAM_LATENCY_MASK (0x7)
#define CORTEX_A72_L2_DATA_RAM_SETUP_1_CYCLE (1)
#define CORTEX_A72_L2_TAG_RAM_LATENCY_2_CYCLES (1)
#define CORTEX_A72_L2_TAG_RAM_LATENCY_3_CYCLES (2)
#define CORTEX_A72_L2_TAG_RAM_LATENCY_MASK (0x7)
#define CORTEX_A72_L2_TAG_RAM_SETUP_1_CYCLE (1)
#define CORTEX_A72_L2ACTLR_EL1 S3_1_C15_C0_0
#define CORTEX_A72_L2ACTLR_DISABLE_ACE_SH_OR_CHI_BIT BIT(6)
#endif /* CONFIG_CPU_CORTEX_A72 */
#define L1_CACHE_SHIFT (6)
#define L1_CACHE_BYTES BIT(L1_CACHE_SHIFT)
#define ARM64_CPU_INIT_SIZE L1_CACHE_BYTES
#endif /* ZEPHYR_INCLUDE_ARCH_ARM64_CPU_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm64/cpu.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,467 |
```sourcepawn
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM64_MACRO_INC_
#define ZEPHYR_INCLUDE_ARCH_ARM64_MACRO_INC_
#ifdef _ASMLANGUAGE
.macro switch_el, xreg, el3_label, el2_label, el1_label
mrs \xreg, CurrentEL
cmp \xreg, 0xc
beq \el3_label
cmp \xreg, 0x8
beq \el2_label
cmp \xreg, 0x4
beq \el1_label
.endm
/*
* macro to support mov of immediate constant to 64 bit register
* It will generate instruction sequence of 'mov'/ 'movz' and one
* to three 'movk' depending on the immediate value.
*/
.macro mov_imm, xreg, imm
.if ((\imm) == 0)
mov \xreg, \imm
.else
.if (((\imm) >> 31) == 0 || ((\imm) >> 31) == 0x1ffffffff)
movz \xreg, (\imm >> 16) & 0xffff, lsl 16
.else
.if (((\imm) >> 47) == 0 || ((\imm) >> 47) == 0x1ffff)
movz \xreg, (\imm >> 32) & 0xffff, lsl 32
.else
movz \xreg, (\imm >> 48) & 0xffff, lsl 48
movk \xreg, (\imm >> 32) & 0xffff, lsl 32
.endif
movk \xreg, (\imm >> 16) & 0xffff, lsl 16
.endif
movk \xreg, (\imm) & 0xffff, lsl 0
.endif
.endm
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_ARM64_MACRO_INC_ */
``` | /content/code_sandbox/include/zephyr/arch/arm64/macro.inc | sourcepawn | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 450 |
```objective-c
/*
*
*/
/**
* @file
* @brief Cortex-A public interrupt handling
*
* ARM64-specific kernel interrupt handling interface.
* Included by arm64/arch.h.
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM64_IRQ_H_
#define ZEPHYR_INCLUDE_ARCH_ARM64_IRQ_H_
#include <zephyr/irq.h>
#include <zephyr/sw_isr_table.h>
#include <stdbool.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifdef _ASMLANGUAGE
GTEXT(arch_irq_enable)
GTEXT(arch_irq_disable)
GTEXT(arch_irq_is_enabled)
#if defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER)
GTEXT(z_soc_irq_get_active)
GTEXT(z_soc_irq_eoi)
#endif /* CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER */
#else
#if !defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER)
extern void arch_irq_enable(unsigned int irq);
extern void arch_irq_disable(unsigned int irq);
extern int arch_irq_is_enabled(unsigned int irq);
/* internal routine documented in C file, needed by IRQ_CONNECT() macro */
extern void z_arm64_irq_priority_set(unsigned int irq, unsigned int prio,
uint32_t flags);
#else
/*
* When a custom interrupt controller is specified, map the architecture
* interrupt control functions to the SoC layer interrupt control functions.
*/
void z_soc_irq_init(void);
void z_soc_irq_enable(unsigned int irq);
void z_soc_irq_disable(unsigned int irq);
int z_soc_irq_is_enabled(unsigned int irq);
void z_soc_irq_priority_set(
unsigned int irq, unsigned int prio, unsigned int flags);
unsigned int z_soc_irq_get_active(void);
void z_soc_irq_eoi(unsigned int irq);
#define arch_irq_enable(irq) z_soc_irq_enable(irq)
#define arch_irq_disable(irq) z_soc_irq_disable(irq)
#define arch_irq_is_enabled(irq) z_soc_irq_is_enabled(irq)
#define z_arm64_irq_priority_set(irq, prio, flags) \
z_soc_irq_priority_set(irq, prio, flags)
#endif /* !CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER */
extern void z_arm64_interrupt_init(void);
/* All arguments must be computable by the compiler at build time.
*
* Z_ISR_DECLARE will populate the .intList section with the interrupt's
* parameters, which will then be used by gen_irq_tables.py to create
* the vector table and the software ISR table. This is all done at
* build-time.
*
* We additionally set the priority in the interrupt controller at
* runtime.
*/
#define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
{ \
Z_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \
z_arm64_irq_priority_set(irq_p, priority_p, flags_p); \
}
#endif /* _ASMLANGUAGE */
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_ARCH_ARM64_IRQ_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm64/irq.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 599 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARM64_STRUCTS_H_
#define ZEPHYR_INCLUDE_ARM64_STRUCTS_H_
/* Per CPU architecture specifics */
struct _cpu_arch {
#ifdef CONFIG_FPU_SHARING
atomic_ptr_val_t fpu_owner;
#endif
#ifdef CONFIG_ARM64_SAFE_EXCEPTION_STACK
uint64_t safe_exception_stack;
uint64_t current_stack_limit;
/* Saved the corrupted stack pointer when stack overflow, else 0 */
uint64_t corrupted_sp;
#endif
};
#endif /* ZEPHYR_INCLUDE_ARM64_STRUCTS_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm64/structs.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 116 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM64_TIMER_H_
#define ZEPHYR_INCLUDE_ARCH_ARM64_TIMER_H_
#ifndef _ASMLANGUAGE
#include <limits.h>
#include <zephyr/drivers/timer/arm_arch_timer.h>
#include <zephyr/types.h>
#include <limits.h>
#ifdef __cplusplus
extern "C" {
#endif
#define ARM_ARCH_TIMER_IRQ ARM_TIMER_VIRTUAL_IRQ
#define ARM_ARCH_TIMER_PRIO ARM_TIMER_VIRTUAL_PRIO
#define ARM_ARCH_TIMER_FLAGS ARM_TIMER_VIRTUAL_FLAGS
static ALWAYS_INLINE void arm_arch_timer_init(void)
{
#ifdef CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME
extern int z_clock_hw_cycles_per_sec;
uint64_t cntfrq_el0 = read_cntfrq_el0();
__ASSERT(cntfrq_el0 < INT_MAX, "cntfrq_el0 cannot fit in system 'int'");
z_clock_hw_cycles_per_sec = (int) cntfrq_el0;
#endif
}
static ALWAYS_INLINE void arm_arch_timer_set_compare(uint64_t val)
{
write_cntv_cval_el0(val);
}
static ALWAYS_INLINE void arm_arch_timer_enable(unsigned char enable)
{
uint64_t cntv_ctl;
cntv_ctl = read_cntv_ctl_el0();
if (enable) {
cntv_ctl |= CNTV_CTL_ENABLE_BIT;
} else {
cntv_ctl &= ~CNTV_CTL_ENABLE_BIT;
}
write_cntv_ctl_el0(cntv_ctl);
}
static ALWAYS_INLINE void arm_arch_timer_set_irq_mask(bool mask)
{
uint64_t cntv_ctl;
cntv_ctl = read_cntv_ctl_el0();
if (mask) {
cntv_ctl |= CNTV_CTL_IMASK_BIT;
} else {
cntv_ctl &= ~CNTV_CTL_IMASK_BIT;
}
write_cntv_ctl_el0(cntv_ctl);
}
static ALWAYS_INLINE uint64_t arm_arch_timer_count(void)
{
return read_cntvct_el0();
}
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_ARM64_TIMER_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm64/timer.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 434 |
```objective-c
/*
*
*/
/**
* @file
* @brief tpidrro_el0 bits allocation
*
* Among other things, the tpidrro_el0 holds the address for the current
* CPU's struct _cpu instance. But such a pointer is at least 8-bytes
* aligned, and the address space is 48 bits max. That leaves plenty of
* free bits for other purposes.
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM64_TPIDRRO_EL0_H_
#define ZEPHYR_INCLUDE_ARCH_ARM64_TPIDRRO_EL0_H_
#define TPIDRROEL0_IN_EL0 0x0000000000000001
#define TPIDRROEL0_CURR_CPU 0x0000fffffffffff8
#define TPIDRROEL0_EXC_DEPTH 0xff00000000000000
#define TPIDRROEL0_EXC_UNIT 0x0100000000000000
#define TPIDRROEL0_EXC_SHIFT 56
#endif /* ZEPHYR_INCLUDE_ARCH_ARM64_TPIDRRO_EL0_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm64/tpidrro_el0.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 231 |
```objective-c
/*
*
*/
/**
* @file
* @brief ARM64 specific kernel interface header
*
* This header contains the ARM64 specific kernel interface. It is
* included by the kernel interface architecture-abstraction header
* (include/arm64/cpu.h)
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM64_ARCH_H_
#define ZEPHYR_INCLUDE_ARCH_ARM64_ARCH_H_
/* Add include for DTS generated information */
#include <zephyr/devicetree.h>
#include <zephyr/arch/arm64/thread.h>
#include <zephyr/arch/arm64/exception.h>
#include <zephyr/arch/arm64/irq.h>
#include <zephyr/arch/arm64/misc.h>
#include <zephyr/arch/arm64/asm_inline.h>
#include <zephyr/arch/arm64/cpu.h>
#include <zephyr/arch/arm64/macro.inc>
#include <zephyr/arch/arm64/sys_io.h>
#include <zephyr/arch/arm64/timer.h>
#include <zephyr/arch/arm64/error.h>
#include <zephyr/arch/arm64/mm.h>
#include <zephyr/arch/arm64/thread_stack.h>
#include <zephyr/arch/common/addr_types.h>
#include <zephyr/arch/common/sys_bitops.h>
#include <zephyr/arch/common/ffs.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifndef _ASMLANGUAGE
#include <zephyr/sys/slist.h>
struct arch_mem_domain {
#ifdef CONFIG_ARM_MMU
struct arm_mmu_ptables ptables;
#endif
sys_snode_t node;
};
#endif /* _ASMLANGUAGE */
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_ARCH_ARM64_ARCH_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm64/arch.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 359 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM64_ARCH_INLINES_H
#define ZEPHYR_INCLUDE_ARCH_ARM64_ARCH_INLINES_H
#ifndef _ASMLANGUAGE
#include <zephyr/kernel_structs.h>
#include <zephyr/arch/arm64/lib_helpers.h>
#include <zephyr/arch/arm64/tpidrro_el0.h>
#include <zephyr/sys/__assert.h>
/* Note: keep in sync with `get_cpu` in arch/arm64/core/macro_priv.inc */
static ALWAYS_INLINE _cpu_t *arch_curr_cpu(void)
{
return (_cpu_t *)(read_tpidrro_el0() & TPIDRROEL0_CURR_CPU);
}
static ALWAYS_INLINE int arch_exception_depth(void)
{
return (read_tpidrro_el0() & TPIDRROEL0_EXC_DEPTH) / TPIDRROEL0_EXC_UNIT;
}
static ALWAYS_INLINE uint32_t arch_proc_id(void)
{
uint64_t cpu_mpid = read_mpidr_el1();
__ASSERT(cpu_mpid == (uint32_t)cpu_mpid, "mpid extends past 32 bits");
return (uint32_t)cpu_mpid;
}
static ALWAYS_INLINE unsigned int arch_num_cpus(void)
{
return CONFIG_MP_MAX_NUM_CPUS;
}
#endif /* !_ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_ARM64_ARCH_INLINES_H */
``` | /content/code_sandbox/include/zephyr/arch/arm64/arch_inlines.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 296 |
```objective-c
/*
*
*/
/* Either public functions or macros or invoked by public functions */
#ifndef ZEPHYR_INCLUDE_ARCH_ARM64_ASM_INLINE_GCC_H_
#define ZEPHYR_INCLUDE_ARCH_ARM64_ASM_INLINE_GCC_H_
/*
* The file must not be included directly
* Include arch/cpu.h instead
*/
#ifndef _ASMLANGUAGE
#include <zephyr/arch/arm64/lib_helpers.h>
#include <zephyr/types.h>
#ifdef __cplusplus
extern "C" {
#endif
static ALWAYS_INLINE unsigned int arch_irq_lock(void)
{
unsigned int key;
/*
* Return the whole DAIF register as key but use DAIFSET to disable
* IRQs.
*/
key = read_daif();
disable_irq();
return key;
}
static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
{
write_daif(key);
}
static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
{
/* We only check the (I)RQ bit on the DAIF register */
return (key & DAIF_IRQ_BIT) == 0;
}
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_ARM64_ASM_INLINE_GCC_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm64/asm_inline_gcc.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 251 |
```objective-c
/*
*
*/
/**
* @file
* @brief Per-arch thread definition
*
* This file contains definitions for
*
* struct _thread_arch
* struct _callee_saved
*
* necessary to instantiate instances of struct k_thread.
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM64_THREAD_H_
#define ZEPHYR_INCLUDE_ARCH_ARM64_THREAD_H_
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
#include <zephyr/arch/arm64/mm.h>
struct _callee_saved {
uint64_t x19;
uint64_t x20;
uint64_t x21;
uint64_t x22;
uint64_t x23;
uint64_t x24;
uint64_t x25;
uint64_t x26;
uint64_t x27;
uint64_t x28;
uint64_t x29;
uint64_t sp_el0;
uint64_t sp_elx;
uint64_t lr;
};
typedef struct _callee_saved _callee_saved_t;
struct z_arm64_fp_context {
__int128 q0, q1, q2, q3, q4, q5, q6, q7;
__int128 q8, q9, q10, q11, q12, q13, q14, q15;
__int128 q16, q17, q18, q19, q20, q21, q22, q23;
__int128 q24, q25, q26, q27, q28, q29, q30, q31;
uint32_t fpsr, fpcr;
};
struct _thread_arch {
#if defined(CONFIG_USERSPACE) || defined(CONFIG_ARM64_STACK_PROTECTION)
#if defined(CONFIG_ARM_MMU)
struct arm_mmu_ptables *ptables;
#endif
#if defined(CONFIG_ARM_MPU)
struct dynamic_region_info regions[ARM64_MPU_MAX_DYNAMIC_REGIONS];
uint8_t region_num;
atomic_t flushing;
#endif
#endif
#ifdef CONFIG_ARM64_SAFE_EXCEPTION_STACK
uint64_t stack_limit;
#endif
#ifdef CONFIG_FPU_SHARING
struct z_arm64_fp_context saved_fp_context;
#endif
uint8_t exception_depth;
};
typedef struct _thread_arch _thread_arch_t;
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_ARM64_THREAD_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm64/thread.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 490 |
```objective-c
/*
*
*/
/**
* @file
* @brief Cortex-A public kernel miscellaneous
*
* ARM64-specific kernel miscellaneous interface. Included by
* arm64/arch.h.
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM64_MISC_H_
#define ZEPHYR_INCLUDE_ARCH_ARM64_MISC_H_
#ifdef __cplusplus
extern "C" {
#endif
#ifndef _ASMLANGUAGE
extern uint32_t sys_clock_cycle_get_32(void);
static inline uint32_t arch_k_cycle_get_32(void)
{
return sys_clock_cycle_get_32();
}
extern uint64_t sys_clock_cycle_get_64(void);
static inline uint64_t arch_k_cycle_get_64(void)
{
return sys_clock_cycle_get_64();
}
static ALWAYS_INLINE void arch_nop(void)
{
__asm__ volatile("nop");
}
#endif
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_ARCH_ARM64_MISC_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm64/misc.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 190 |
```objective-c
/*
*
*/
/**
* @file
* @brief ARM64 specific syscall header
*
* This header contains the ARM64 specific syscall interface. It is
* included by the syscall interface architecture-abstraction header
* (include/arch64/syscall.h)
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM64_SYSCALL_H_
#define ZEPHYR_INCLUDE_ARCH_ARM64_SYSCALL_H_
#define _SVC_CALL_IRQ_OFFLOAD 1
#define _SVC_CALL_RUNTIME_EXCEPT 2
#define _SVC_CALL_SYSTEM_CALL 3
#ifdef CONFIG_USERSPACE
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
#include <stdbool.h>
#include <zephyr/arch/arm64/lib_helpers.h>
#include <zephyr/arch/arm64/tpidrro_el0.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Syscall invocation macros. arm-specific machine constraints used to ensure
* args land in the proper registers.
*/
static inline uintptr_t arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3, uintptr_t arg4,
uintptr_t arg5, uintptr_t arg6,
uintptr_t call_id)
{
register uint64_t ret __asm__("x0") = arg1;
register uint64_t r1 __asm__("x1") = arg2;
register uint64_t r2 __asm__("x2") = arg3;
register uint64_t r3 __asm__("x3") = arg4;
register uint64_t r4 __asm__("x4") = arg5;
register uint64_t r5 __asm__("x5") = arg6;
register uint64_t r8 __asm__("x8") = call_id;
__asm__ volatile("svc %[svid]\n"
: "=r"(ret)
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r2), "r" (r3),
"r" (r4), "r" (r5), "r" (r8)
: "memory");
return ret;
}
static inline uintptr_t arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3, uintptr_t arg4,
uintptr_t arg5,
uintptr_t call_id)
{
register uint64_t ret __asm__("x0") = arg1;
register uint64_t r1 __asm__("x1") = arg2;
register uint64_t r2 __asm__("x2") = arg3;
register uint64_t r3 __asm__("x3") = arg4;
register uint64_t r4 __asm__("x4") = arg5;
register uint64_t r8 __asm__("x8") = call_id;
__asm__ volatile("svc %[svid]\n"
: "=r"(ret)
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r2), "r" (r3),
"r" (r4), "r" (r8)
: "memory");
return ret;
}
static inline uintptr_t arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3, uintptr_t arg4,
uintptr_t call_id)
{
register uint64_t ret __asm__("x0") = arg1;
register uint64_t r1 __asm__("x1") = arg2;
register uint64_t r2 __asm__("x2") = arg3;
register uint64_t r3 __asm__("x3") = arg4;
register uint64_t r8 __asm__("x8") = call_id;
__asm__ volatile("svc %[svid]\n"
: "=r"(ret)
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r2), "r" (r3),
"r" (r8)
: "memory");
return ret;
}
static inline uintptr_t arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3,
uintptr_t call_id)
{
register uint64_t ret __asm__("x0") = arg1;
register uint64_t r1 __asm__("x1") = arg2;
register uint64_t r2 __asm__("x2") = arg3;
register uint64_t r8 __asm__("x8") = call_id;
__asm__ volatile("svc %[svid]\n"
: "=r"(ret)
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r2), "r" (r8)
: "memory");
return ret;
}
static inline uintptr_t arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2,
uintptr_t call_id)
{
register uint64_t ret __asm__("x0") = arg1;
register uint64_t r1 __asm__("x1") = arg2;
register uint64_t r8 __asm__("x8") = call_id;
__asm__ volatile("svc %[svid]\n"
: "=r"(ret)
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r8)
: "memory");
return ret;
}
static inline uintptr_t arch_syscall_invoke1(uintptr_t arg1,
uintptr_t call_id)
{
register uint64_t ret __asm__("x0") = arg1;
register uint64_t r8 __asm__("x8") = call_id;
__asm__ volatile("svc %[svid]\n"
: "=r"(ret)
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret), "r" (r8)
: "memory");
return ret;
}
static inline uintptr_t arch_syscall_invoke0(uintptr_t call_id)
{
register uint64_t ret __asm__("x0");
register uint64_t r8 __asm__("x8") = call_id;
__asm__ volatile("svc %[svid]\n"
: "=r"(ret)
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret), "r" (r8)
: "memory");
return ret;
}
static inline bool arch_is_user_context(void)
{
return (read_tpidrro_el0() & TPIDRROEL0_IN_EL0) != 0;
}
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* CONFIG_USERSPACE */
#endif /* ZEPHYR_INCLUDE_ARCH_ARM64_SYSCALL_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm64/syscall.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,477 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM64_MM_H_
#define ZEPHYR_INCLUDE_ARCH_ARM64_MM_H_
#if defined(CONFIG_ARM_MMU)
#include <zephyr/arch/arm64/arm_mmu.h>
/*
* When mmu enabled, some section addresses need to be aligned with
* page size which is CONFIG_MMU_PAGE_SIZE
*/
#define MEM_DOMAIN_ALIGN_AND_SIZE CONFIG_MMU_PAGE_SIZE
#elif defined(CONFIG_ARM_MPU)
#include <zephyr/arch/arm64/cortex_r/arm_mpu.h>
/*
* When mpu enabled, some section addresses need to be aligned with
* mpu region min align size which is
* CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE
*/
#define MEM_DOMAIN_ALIGN_AND_SIZE CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE
#endif
#ifndef _ASMLANGUAGE
struct k_thread;
void z_arm64_thread_mem_domains_init(struct k_thread *thread);
void z_arm64_swap_mem_domains(struct k_thread *thread);
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_ARM64_MM_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm64/mm.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 225 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM64_ASM_INLINE_H_
#define ZEPHYR_INCLUDE_ARCH_ARM64_ASM_INLINE_H_
/*
* The file must not be included directly
* Include kernel.h instead
*/
#if defined(__GNUC__)
#include <zephyr/arch/arm64/asm_inline_gcc.h>
#else
#include <arch/arm/asm_inline_other.h>
#endif
#endif /* ZEPHYR_INCLUDE_ARCH_ARM64_ASM_INLINE_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm64/asm_inline.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 96 |
```objective-c
/**
*
*/
#ifndef ZEPHYR_INCLUDE_BARRIER_ARM64_H_
#define ZEPHYR_INCLUDE_BARRIER_ARM64_H_
#ifndef ZEPHYR_INCLUDE_SYS_BARRIER_H_
#error Please include <zephyr/sys/barrier.h>
#endif
#include <zephyr/toolchain.h>
#ifdef __cplusplus
extern "C" {
#endif
static ALWAYS_INLINE void z_barrier_dmem_fence_full(void)
{
__asm__ volatile ("dmb sy" ::: "memory");
}
static ALWAYS_INLINE void z_barrier_dsync_fence_full(void)
{
__asm__ volatile ("dsb sy" ::: "memory");
}
static ALWAYS_INLINE void z_barrier_isync_fence_full(void)
{
__asm__ volatile ("isb" ::: "memory");
}
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_BARRIER_ARM64_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm64/barrier.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 177 |
```objective-c
/*
*
*/
/* "Arch" bit manipulation functions in non-arch-specific C code (uses some
* gcc builtins)
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM64_SYS_IO_H_
#define ZEPHYR_INCLUDE_ARCH_ARM64_SYS_IO_H_
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
#include <zephyr/sys/sys_io.h>
#include <zephyr/sys/barrier.h>
#ifdef __cplusplus
extern "C" {
#endif
/* Memory mapped registers I/O functions */
/*
* We need to use explicit assembler instruction there, because with classic
* "volatile pointer" approach compiler might generate instruction with
* immediate value like
*
* str w4, [x1], #4
*
* Such instructions produce invalid syndrome in HSR register, so hypervisor
* can't emulate MMIO when it traps memory access.
*/
static ALWAYS_INLINE uint8_t sys_read8(mem_addr_t addr)
{
uint8_t val;
__asm__ volatile("ldrb %w0, [%1]" : "=r" (val) : "r" (addr));
barrier_dmem_fence_full();
return val;
}
static ALWAYS_INLINE void sys_write8(uint8_t data, mem_addr_t addr)
{
barrier_dmem_fence_full();
__asm__ volatile("strb %w0, [%1]" : : "r" (data), "r" (addr));
}
static ALWAYS_INLINE uint16_t sys_read16(mem_addr_t addr)
{
uint16_t val;
__asm__ volatile("ldrh %w0, [%1]" : "=r" (val) : "r" (addr));
barrier_dmem_fence_full();
return val;
}
static ALWAYS_INLINE void sys_write16(uint16_t data, mem_addr_t addr)
{
barrier_dmem_fence_full();
__asm__ volatile("strh %w0, [%1]" : : "r" (data), "r" (addr));
}
static ALWAYS_INLINE uint32_t sys_read32(mem_addr_t addr)
{
uint32_t val;
__asm__ volatile("ldr %w0, [%1]" : "=r" (val) : "r" (addr));
barrier_dmem_fence_full();
return val;
}
static ALWAYS_INLINE void sys_write32(uint32_t data, mem_addr_t addr)
{
barrier_dmem_fence_full();
__asm__ volatile("str %w0, [%1]" : : "r" (data), "r" (addr));
}
static ALWAYS_INLINE uint64_t sys_read64(mem_addr_t addr)
{
uint64_t val;
__asm__ volatile("ldr %x0, [%1]" : "=r" (val) : "r" (addr));
barrier_dmem_fence_full();
return val;
}
static ALWAYS_INLINE void sys_write64(uint64_t data, mem_addr_t addr)
{
barrier_dmem_fence_full();
__asm__ volatile("str %x0, [%1]" : : "r" (data), "r" (addr));
}
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_ARM64_SYS_IO_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm64/sys_io.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 668 |
```objective-c
/*
*
*/
/**
* @file
* @brief ARM AArch64 public error handling
*
* ARM AArch64-specific kernel error handling interface. Included by arch.h.
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM64_ERROR_H_
#define ZEPHYR_INCLUDE_ARCH_ARM64_ERROR_H_
#include <zephyr/arch/arm64/syscall.h>
#include <zephyr/arch/arm64/exception.h>
#include <stdbool.h>
#ifdef __cplusplus
extern "C" {
#endif
#define ARCH_EXCEPT(reason_p) \
do { \
register uint64_t x8 __asm__("x8") = reason_p; \
\
__asm__ volatile("svc %[id]\n" \
: \
: [id] "i" (_SVC_CALL_RUNTIME_EXCEPT), \
"r" (x8) \
: "memory"); \
} while (false)
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_ARCH_ARM64_ERROR_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm64/error.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 224 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM64_THREAD_STACK_H_
#define ZEPHYR_INCLUDE_ARCH_ARM64_THREAD_STACK_H_
#include <zephyr/arch/arm64/mm.h>
#define ARCH_STACK_PTR_ALIGN 16
#if defined(CONFIG_USERSPACE) || defined(CONFIG_ARM64_STACK_PROTECTION)
#define Z_ARM64_STACK_BASE_ALIGN MEM_DOMAIN_ALIGN_AND_SIZE
#define Z_ARM64_STACK_SIZE_ALIGN MEM_DOMAIN_ALIGN_AND_SIZE
#else
#define Z_ARM64_STACK_BASE_ALIGN ARCH_STACK_PTR_ALIGN
#define Z_ARM64_STACK_SIZE_ALIGN ARCH_STACK_PTR_ALIGN
#endif
#if defined(CONFIG_ARM64_STACK_PROTECTION)
#define Z_ARM64_STACK_GUARD_SIZE MEM_DOMAIN_ALIGN_AND_SIZE
#define Z_ARM64_K_STACK_BASE_ALIGN MEM_DOMAIN_ALIGN_AND_SIZE
#else
#define Z_ARM64_STACK_GUARD_SIZE 0
#define Z_ARM64_K_STACK_BASE_ALIGN ARCH_STACK_PTR_ALIGN
#endif
/*
* [ see also comments in arch/arm64/core/thread.c ]
*
* High memory addresses
*
* +-------------------+ <- thread.stack_info.start + thread.stack_info.size
* | TLS |
* +-------------------+ <- initial sp (computable with thread.stack_info.delta)
* | |
* | Used stack |
* | |
* +...................+ <- thread's current stack pointer
* | |
* | Unused stack |
* | |
* +-------------------+ <- thread.stack_info.start
* | Privileged stack | } K_(THREAD|KERNEL)_STACK_RESERVED
* +-------------------+ <- thread stack limit (update on every context switch)
* | Stack guard | } Z_ARM64_STACK_GUARD_SIZE (protected by MMU/MPU)
* +-------------------+ <- thread.stack_obj
*
* Low Memory addresses
*/
/* thread stack */
#define ARCH_THREAD_STACK_OBJ_ALIGN(size) Z_ARM64_STACK_BASE_ALIGN
#define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
ROUND_UP((size), Z_ARM64_STACK_SIZE_ALIGN)
#define ARCH_THREAD_STACK_RESERVED CONFIG_PRIVILEGED_STACK_SIZE + \
Z_ARM64_STACK_GUARD_SIZE
/* kernel stack */
#define ARCH_KERNEL_STACK_RESERVED Z_ARM64_STACK_GUARD_SIZE
#define ARCH_KERNEL_STACK_OBJ_ALIGN Z_ARM64_K_STACK_BASE_ALIGN
#ifndef _ASMLANGUAGE
struct z_arm64_thread_stack_header {
char privilege_stack[CONFIG_PRIVILEGED_STACK_SIZE];
} __packed __aligned(Z_ARM64_STACK_BASE_ALIGN);
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_ARM64_THREAD_STACK_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm64/thread_stack.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 544 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM64_SMCCC_H_
#define ZEPHYR_INCLUDE_ARCH_ARM64_SMCCC_H_
/*
* Result from SMC/HVC call
* @a0-a7 result values from registers 0 to 7
*/
struct arm_smccc_res {
unsigned long a0;
unsigned long a1;
unsigned long a2;
unsigned long a3;
unsigned long a4;
unsigned long a5;
unsigned long a6;
unsigned long a7;
};
typedef struct arm_smccc_res arm_smccc_res_t;
enum arm_smccc_conduit {
SMCCC_CONDUIT_NONE,
SMCCC_CONDUIT_SMC,
SMCCC_CONDUIT_HVC,
};
/*
* @brief Make HVC calls
*
* @param a0 function identifier
* @param a1-a7 parameters registers
* @param res results
*/
void arm_smccc_hvc(unsigned long a0, unsigned long a1,
unsigned long a2, unsigned long a3,
unsigned long a4, unsigned long a5,
unsigned long a6, unsigned long a7,
struct arm_smccc_res *res);
/*
* @brief Make SMC calls
*
* @param a0 function identifier
* @param a1-a7 parameters registers
* @param res results
*/
void arm_smccc_smc(unsigned long a0, unsigned long a1,
unsigned long a2, unsigned long a3,
unsigned long a4, unsigned long a5,
unsigned long a6, unsigned long a7,
struct arm_smccc_res *res);
#endif /* ZEPHYR_INCLUDE_ARCH_ARM64_SMCCC_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm64/arm-smccc.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 351 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM64_LIB_HELPERS_H_
#define ZEPHYR_INCLUDE_ARCH_ARM64_LIB_HELPERS_H_
#ifndef _ASMLANGUAGE
#include <zephyr/arch/arm64/cpu.h>
#include <stdint.h>
/* All the macros need a memory clobber */
#define read_sysreg(reg) \
({ \
uint64_t reg_val; \
__asm__ volatile ("mrs %0, " STRINGIFY(reg) \
: "=r" (reg_val) :: "memory"); \
reg_val; \
})
#define write_sysreg(val, reg) \
({ \
__asm__ volatile ("msr " STRINGIFY(reg) ", %0" \
:: "r" (val) : "memory"); \
})
#define zero_sysreg(reg) \
({ \
__asm__ volatile ("msr " STRINGIFY(reg) ", xzr" \
::: "memory"); \
})
#define MAKE_REG_HELPER(reg) \
static ALWAYS_INLINE uint64_t read_##reg(void) \
{ \
return read_sysreg(reg); \
} \
static ALWAYS_INLINE void write_##reg(uint64_t val) \
{ \
write_sysreg(val, reg); \
} \
static ALWAYS_INLINE void zero_##reg(void) \
{ \
zero_sysreg(reg); \
}
#define MAKE_REG_HELPER_EL123(reg) \
MAKE_REG_HELPER(reg##_el1) \
MAKE_REG_HELPER(reg##_el2) \
MAKE_REG_HELPER(reg##_el3)
MAKE_REG_HELPER(ccsidr_el1);
MAKE_REG_HELPER(clidr_el1);
MAKE_REG_HELPER(cntfrq_el0);
MAKE_REG_HELPER(cnthctl_el2);
MAKE_REG_HELPER(cnthp_ctl_el2);
MAKE_REG_HELPER(cnthps_ctl_el2);
MAKE_REG_HELPER(cntv_ctl_el0)
MAKE_REG_HELPER(cntv_cval_el0)
MAKE_REG_HELPER(cntvct_el0);
MAKE_REG_HELPER(cntvoff_el2);
MAKE_REG_HELPER(currentel);
MAKE_REG_HELPER(csselr_el1);
MAKE_REG_HELPER(daif)
MAKE_REG_HELPER(hcr_el2);
MAKE_REG_HELPER(id_aa64pfr0_el1);
MAKE_REG_HELPER(id_aa64mmfr0_el1);
MAKE_REG_HELPER(mpidr_el1);
MAKE_REG_HELPER(par_el1);
#if !defined(CONFIG_ARMV8_R)
MAKE_REG_HELPER(scr_el3);
#endif /* CONFIG_ARMV8_R */
MAKE_REG_HELPER(tpidrro_el0);
MAKE_REG_HELPER(vmpidr_el2);
MAKE_REG_HELPER(sp_el0);
MAKE_REG_HELPER_EL123(actlr)
MAKE_REG_HELPER_EL123(cpacr)
MAKE_REG_HELPER_EL123(cptr)
MAKE_REG_HELPER_EL123(elr)
MAKE_REG_HELPER_EL123(esr)
MAKE_REG_HELPER_EL123(far)
MAKE_REG_HELPER_EL123(mair)
MAKE_REG_HELPER_EL123(sctlr)
MAKE_REG_HELPER_EL123(spsr)
MAKE_REG_HELPER_EL123(tcr)
MAKE_REG_HELPER_EL123(ttbr0)
MAKE_REG_HELPER_EL123(vbar)
#if defined(CONFIG_ARM_MPU)
/* Armv8-R aarch64 mpu registers */
#define mpuir_el1 S3_0_c0_c0_4
#define prselr_el1 S3_0_c6_c2_1
#define prbar_el1 S3_0_c6_c8_0
#define prlar_el1 S3_0_c6_c8_1
MAKE_REG_HELPER(mpuir_el1);
MAKE_REG_HELPER(prselr_el1);
MAKE_REG_HELPER(prbar_el1);
MAKE_REG_HELPER(prlar_el1);
#endif
static ALWAYS_INLINE void enable_debug_exceptions(void)
{
__asm__ volatile ("msr DAIFClr, %0"
:: "i" (DAIFCLR_DBG_BIT) : "memory");
}
static ALWAYS_INLINE void disable_debug_exceptions(void)
{
__asm__ volatile ("msr DAIFSet, %0"
:: "i" (DAIFSET_DBG_BIT) : "memory");
}
static ALWAYS_INLINE void enable_serror_exceptions(void)
{
__asm__ volatile ("msr DAIFClr, %0"
:: "i" (DAIFCLR_ABT_BIT) : "memory");
}
static ALWAYS_INLINE void disable_serror_exceptions(void)
{
__asm__ volatile ("msr DAIFSet, %0"
:: "i" (DAIFSET_ABT_BIT) : "memory");
}
static ALWAYS_INLINE void enable_irq(void)
{
__asm__ volatile ("msr DAIFClr, %0"
:: "i" (DAIFCLR_IRQ_BIT) : "memory");
}
static ALWAYS_INLINE void disable_irq(void)
{
__asm__ volatile ("msr DAIFSet, %0"
:: "i" (DAIFSET_IRQ_BIT) : "memory");
}
static ALWAYS_INLINE void enable_fiq(void)
{
__asm__ volatile ("msr DAIFClr, %0"
:: "i" (DAIFCLR_FIQ_BIT) : "memory");
}
static ALWAYS_INLINE void disable_fiq(void)
{
__asm__ volatile ("msr DAIFSet, %0"
:: "i" (DAIFSET_FIQ_BIT) : "memory");
}
#define sev() __asm__ volatile("sev" : : : "memory")
#define wfe() __asm__ volatile("wfe" : : : "memory")
#define wfi() __asm__ volatile("wfi" : : : "memory")
static inline bool is_el_implemented(unsigned int el)
{
unsigned int shift;
if (el > 3) {
return false;
}
shift = ID_AA64PFR0_EL1_SHIFT * el;
return (((read_id_aa64pfr0_el1() >> shift) & ID_AA64PFR0_ELX_MASK) != 0U);
}
static inline bool is_el_highest_implemented(void)
{
uint32_t el_highest;
uint32_t curr_el;
el_highest = read_id_aa64pfr0_el1() & 0xFFFF;
el_highest = (31U - __builtin_clz(el_highest)) / 4;
curr_el = GET_EL(read_currentel());
if (curr_el < el_highest)
return false;
return true;
}
static inline bool is_el2_sec_supported(void)
{
return (((read_id_aa64pfr0_el1() >> ID_AA64PFR0_SEL2_SHIFT) &
ID_AA64PFR0_SEL2_MASK) != 0U);
}
static inline bool is_in_secure_state(void)
{
/* We cannot read SCR_EL3 from EL2 or EL1 */
return !IS_ENABLED(CONFIG_ARMV8_A_NS);
}
#endif /* !_ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_ARM64_LIB_HELPERS_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm64/lib_helpers.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,477 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM64_CORTEX_R_MPU_ARM_MPU_H_
#define ZEPHYR_INCLUDE_ARCH_ARM64_CORTEX_R_MPU_ARM_MPU_H_
/*
* Convenience macros to represent the ARMv8-R64-specific configuration
* for memory access permission and cache-ability attribution.
*/
/* MPU MPUIR Register Definitions */
#define MPU_IR_REGION_Msk (0xFFU)
/* MPU RBAR Register attribute msk Definitions */
#define MPU_RBAR_BASE_Pos 6U
#define MPU_RBAR_BASE_Msk (0x3FFFFFFFFFFFFFFUL << MPU_RBAR_BASE_Pos)
#define MPU_RBAR_SH_Pos 4U
#define MPU_RBAR_SH_Msk (0x3UL << MPU_RBAR_SH_Pos)
#define MPU_RBAR_AP_Pos 2U
#define MPU_RBAR_AP_Msk (0x3UL << MPU_RBAR_AP_Pos)
/* RBAR_EL1 XN */
#define MPU_RBAR_XN_Pos 1U
#define MPU_RBAR_XN_Msk (0x1UL << MPU_RBAR_XN_Pos)
/* MPU PLBAR_ELx Register Definitions */
#define MPU_RLAR_LIMIT_Pos 6U
#define MPU_RLAR_LIMIT_Msk (0x3FFFFFFFFFFFFFFUL << MPU_RLAR_LIMIT_Pos)
#define MPU_RLAR_AttrIndx_Pos 1U
#define MPU_RLAR_AttrIndx_Msk (0x7UL << MPU_RLAR_AttrIndx_Pos)
#define MPU_RLAR_EN_Msk (0x1UL)
/* PRBAR_ELx: Attribute flag for not-allowing execution (eXecute Never) */
#define NOT_EXEC MPU_RBAR_XN_Msk /* PRBAR_EL1 */
/* PRBAR_ELx: Attribute flag for access permissions */
/* Privileged Read Write, Unprivileged No Access */
#define P_RW_U_NA 0x0U
#define P_RW_U_NA_Msk ((P_RW_U_NA << MPU_RBAR_AP_Pos) & MPU_RBAR_AP_Msk)
/* Privileged Read Write, Unprivileged Read Write */
#define P_RW_U_RW 0x1U
#define P_RW_U_RW_Msk ((P_RW_U_RW << MPU_RBAR_AP_Pos) & MPU_RBAR_AP_Msk)
/* Privileged Read Only, Unprivileged No Access */
#define P_RO_U_NA 0x2U
#define P_RO_U_NA_Msk ((P_RO_U_NA << MPU_RBAR_AP_Pos) & MPU_RBAR_AP_Msk)
/* Privileged Read Only, Unprivileged Read Only */
#define P_RO_U_RO 0x3U
#define P_RO_U_RO_Msk ((P_RO_U_RO << MPU_RBAR_AP_Pos) & MPU_RBAR_AP_Msk)
/* PRBAR_ELx: Attribute flags for share-ability */
#define NON_SHAREABLE 0x0U
#define NON_SHAREABLE_Msk \
((NON_SHAREABLE << MPU_RBAR_SH_Pos) & MPU_RBAR_SH_Msk)
#define OUTER_SHAREABLE 0x2U
#define OUTER_SHAREABLE_Msk \
((OUTER_SHAREABLE << MPU_RBAR_SH_Pos) & MPU_RBAR_SH_Msk)
#define INNER_SHAREABLE 0x3U
#define INNER_SHAREABLE_Msk \
((INNER_SHAREABLE << MPU_RBAR_SH_Pos) & MPU_RBAR_SH_Msk)
/* MPIR_ELx Attribute flags for cache-ability */
/* Memory Attributes for Device Memory
* 1.Gathering (G/nG)
* Determines whether multiple accesses can be merged into a single
* bus transaction.
* nG: Number/size of accesses on the bus = number/size of accesses
* in code.
*
* 2.Reordering (R/nR)
* Determines whether accesses to the same device can be reordered.
* nR: Accesses to the same IMPLEMENTATION DEFINED block size will
* appear on the bus in program order.
*
* 3 Early Write Acknowledgment (E/nE)
* Indicates to the memory system whether a buffer can send
* acknowledgements.
* nE: The response should come from the end slave, not buffering in
* the interconnect.
*/
#define DEVICE_nGnRnE 0x0U
#define DEVICE_nGnRE 0x4U
#define DEVICE_nGRE 0x8U
#define DEVICE_GRE 0xCU
/* Read/Write Allocation Configurations for Cacheable Memory */
#define R_NON_W_NON 0x0U /* Do not allocate Read/Write */
#define R_NON_W_ALLOC 0x1U /* Do not allocate Read, Allocate Write */
#define R_ALLOC_W_NON 0x2U /* Allocate Read, Do not allocate Write */
#define R_ALLOC_W_ALLOC 0x3U /* Allocate Read/Write */
/* Memory Attributes for Normal Memory */
#define NORMAL_O_WT_NT 0x80U /* Normal, Outer Write-through non-transient */
#define NORMAL_O_WB_NT 0xC0U /* Normal, Outer Write-back non-transient */
#define NORMAL_O_NON_C 0x40U /* Normal, Outer Non-Cacheable */
#define NORMAL_I_WT_NT 0x08U /* Normal, Inner Write-through non-transient */
#define NORMAL_I_WB_NT 0x0CU /* Normal, Inner Write-back non-transient */
#define NORMAL_I_NON_C 0x04U /* Normal, Inner Non-Cacheable */
/* Global MAIR configurations */
#define MPU_MAIR_INDEX_DEVICE 0U
#define MPU_MAIR_ATTR_DEVICE (DEVICE_nGnRnE)
#define MPU_MAIR_INDEX_FLASH 1U
#define MPU_MAIR_ATTR_FLASH \
((NORMAL_O_WT_NT | (R_ALLOC_W_NON << 4)) | \
(NORMAL_I_WT_NT | R_ALLOC_W_NON))
#define MPU_MAIR_INDEX_SRAM 2U
#define MPU_MAIR_ATTR_SRAM \
((NORMAL_O_WB_NT | (R_ALLOC_W_ALLOC << 4)) | \
(NORMAL_I_WB_NT | R_ALLOC_W_ALLOC))
#define MPU_MAIR_INDEX_SRAM_NOCACHE 3U
#define MPU_MAIR_ATTR_SRAM_NOCACHE \
((NORMAL_O_NON_C | (R_NON_W_NON << 4)) | \
(NORMAL_I_NON_C | R_NON_W_NON))
#define MPU_MAIR_ATTRS \
((MPU_MAIR_ATTR_DEVICE << (MPU_MAIR_INDEX_DEVICE * 8)) | \
(MPU_MAIR_ATTR_FLASH << (MPU_MAIR_INDEX_FLASH * 8)) | \
(MPU_MAIR_ATTR_SRAM << (MPU_MAIR_INDEX_SRAM * 8)) | \
(MPU_MAIR_ATTR_SRAM_NOCACHE << (MPU_MAIR_INDEX_SRAM_NOCACHE * 8)))
/* Some helper defines for common regions.
*
* Note that the ARMv8-R MPU architecture requires that the
* enabled MPU regions are non-overlapping. Therefore, it is
* recommended to use these helper defines only for configuring
* fixed MPU regions at build-time.
*/
#define REGION_IO_ATTR \
{ \
/* AP, XN, SH */ \
.rbar = NOT_EXEC | P_RW_U_NA_Msk | NON_SHAREABLE_Msk, \
/* Cache-ability */ \
.mair_idx = MPU_MAIR_INDEX_DEVICE, \
}
#define REGION_RAM_ATTR \
{ \
/* AP, XN, SH */ \
.rbar = NOT_EXEC | P_RW_U_NA_Msk | OUTER_SHAREABLE_Msk, \
/* Cache-ability */ \
.mair_idx = MPU_MAIR_INDEX_SRAM, \
}
#define REGION_RAM_NOCACHE_ATTR \
{ \
/* AP, XN, SH */ \
.rbar = NOT_EXEC | P_RW_U_NA_Msk | NON_SHAREABLE_Msk, \
/* Cache-ability */ \
.mair_idx = MPU_MAIR_INDEX_SRAM_NOCACHE, \
}
#define REGION_RAM_TEXT_ATTR \
{ \
/* AP, XN, SH */ \
.rbar = P_RO_U_RO_Msk | INNER_SHAREABLE_Msk, \
/* Cache-ability */ \
.mair_idx = MPU_MAIR_INDEX_SRAM, \
}
#define REGION_RAM_RO_ATTR \
{ \
/* AP, XN, SH */ \
.rbar = NOT_EXEC | P_RO_U_RO_Msk | INNER_SHAREABLE_Msk, \
/* Cache-ability */ \
.mair_idx = MPU_MAIR_INDEX_SRAM, \
}
#if defined(CONFIG_MPU_ALLOW_FLASH_WRITE)
/* Note that the access permissions allow for un-privileged writes
*/
#define REGION_FLASH_ATTR \
{ \
.rbar = P_RW_U_RW_Msk | NON_SHAREABLE_Msk, /* AP, XN, SH */ \
/* Cache-ability */ \
.mair_idx = MPU_MAIR_INDEX_FLASH, \
}
#else /* CONFIG_MPU_ALLOW_FLASH_WRITE */
#define REGION_FLASH_ATTR \
{ \
.rbar = P_RO_U_RO_Msk | NON_SHAREABLE_Msk, /* AP, XN, SH */ \
/* Cache-ability */ \
.mair_idx = MPU_MAIR_INDEX_FLASH, \
}
#endif /* CONFIG_MPU_ALLOW_FLASH_WRITE */
#ifndef _ASMLANGUAGE
struct arm_mpu_region_attr {
/* Attributes belonging to PRBAR */
uint8_t rbar : 6;
/* MAIR index for attribute indirection */
uint8_t mair_idx : 3;
};
/* Region definition data structure */
struct arm_mpu_region {
/* Region Base Address */
uint64_t base;
/* Region limit Address */
uint64_t limit;
/* Region Name */
const char *name;
/* Region Attributes */
struct arm_mpu_region_attr attr;
};
/* MPU configuration data structure */
struct arm_mpu_config {
/* Number of regions */
uint32_t num_regions;
/* Regions */
const struct arm_mpu_region *mpu_regions;
};
#define MPU_REGION_ENTRY(_name, _base, _limit, _attr) \
{ \
.name = _name, \
.base = _base, \
.limit = _limit, \
.attr = _attr, \
}
#define K_MEM_PARTITION_P_RW_U_RW ((k_mem_partition_attr_t) \
{(P_RW_U_RW_Msk), MPU_MAIR_INDEX_SRAM})
#define K_MEM_PARTITION_P_RW_U_NA ((k_mem_partition_attr_t) \
{(P_RW_U_NA_Msk), MPU_MAIR_INDEX_SRAM})
#define K_MEM_PARTITION_P_RO_U_RO ((k_mem_partition_attr_t) \
{(P_RO_U_RO_Msk), MPU_MAIR_INDEX_SRAM})
#define K_MEM_PARTITION_P_RO_U_NA ((k_mem_partition_attr_t) \
{(P_RO_U_NA_Msk), MPU_MAIR_INDEX_SRAM})
typedef struct arm_mpu_region_attr k_mem_partition_attr_t;
/* Reference to the MPU configuration.
*
* This struct is defined and populated for each SoC (in the SoC definition),
* and holds the build-time configuration information for the fixed MPU
* regions enabled during kernel initialization. Dynamic MPU regions (e.g.
* for Thread Stack, Stack Guards, etc.) are programmed during runtime, thus,
* not kept here.
*/
extern const struct arm_mpu_config mpu_config;
struct dynamic_region_info {
int index;
struct arm_mpu_region region_conf;
};
#define ARM64_MPU_MAX_DYNAMIC_REGIONS \
1 + /* data section */ \
(CONFIG_MAX_DOMAIN_PARTITIONS + 2) + \
(IS_ENABLED(CONFIG_ARM64_STACK_PROTECTION) ? 2 : 0) + \
(IS_ENABLED(CONFIG_USERSPACE) ? 2 : 0)
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_ARM64_CORTEX_R_MPU_ARM_MPU_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm64/cortex_r/arm_mpu.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,591 |
```objective-c
/*
*
*/
/**
* @file
* @brief RISCV public exception handling
*
* RISCV-specific kernel exception handling interface.
*/
#ifndef ZEPHYR_INCLUDE_ARCH_RISCV_EXCEPTION_H_
#define ZEPHYR_INCLUDE_ARCH_RISCV_EXCEPTION_H_
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
#include <zephyr/toolchain.h>
#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
#include <soc_context.h>
#endif
#ifdef CONFIG_RISCV_SOC_HAS_ISR_STACKING
#include <soc_isr_stacking.h>
#endif
#ifdef __cplusplus
extern "C" {
#endif
/*
* The name of the structure which contains soc-specific state, if
* any, as well as the soc_esf_t typedef below, are part of the RISC-V
* arch API.
*
* The contents of the struct are provided by a SOC-specific
* definition in soc_context.h.
*/
#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
struct soc_esf {
SOC_ESF_MEMBERS;
};
#endif
#if defined(CONFIG_RISCV_SOC_HAS_ISR_STACKING)
SOC_ISR_STACKING_ESF_DECLARE;
#else
struct arch_esf {
unsigned long ra; /* return address */
unsigned long t0; /* Caller-saved temporary register */
unsigned long t1; /* Caller-saved temporary register */
unsigned long t2; /* Caller-saved temporary register */
#if !defined(CONFIG_RISCV_ISA_RV32E)
unsigned long t3; /* Caller-saved temporary register */
unsigned long t4; /* Caller-saved temporary register */
unsigned long t5; /* Caller-saved temporary register */
unsigned long t6; /* Caller-saved temporary register */
#endif /* !CONFIG_RISCV_ISA_RV32E */
unsigned long a0; /* function argument/return value */
unsigned long a1; /* function argument */
unsigned long a2; /* function argument */
unsigned long a3; /* function argument */
unsigned long a4; /* function argument */
unsigned long a5; /* function argument */
#if !defined(CONFIG_RISCV_ISA_RV32E)
unsigned long a6; /* function argument */
unsigned long a7; /* function argument */
#endif /* !CONFIG_RISCV_ISA_RV32E */
unsigned long mepc; /* machine exception program counter */
unsigned long mstatus; /* machine status register */
unsigned long s0; /* callee-saved s0 */
#ifdef CONFIG_USERSPACE
unsigned long sp; /* preserved (user or kernel) stack pointer */
#endif
#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
struct soc_esf soc_context;
#endif
} __aligned(16);
#endif /* CONFIG_RISCV_SOC_HAS_ISR_STACKING */
#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
typedef struct soc_esf soc_esf_t;
#endif
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_RISCV_EXCEPTION_H_ */
``` | /content/code_sandbox/include/zephyr/arch/riscv/exception.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 646 |
```linker script
/*
*
*/
/**
* @file
* @brief Linker command/script file
*
* Linker script for the Cortex-A platforms.
*/
#include <zephyr/linker/sections.h>
#include <zephyr/devicetree.h>
#include <zephyr/linker/devicetree_regions.h>
#include <zephyr/linker/linker-defs.h>
#include <zephyr/linker/linker-tool.h>
/* physical address of RAM */
#ifdef CONFIG_XIP
#define ROMABLE_REGION FLASH
#else
#define ROMABLE_REGION RAM
#endif
#define RAMABLE_REGION RAM
#if !defined(CONFIG_XIP) && (CONFIG_FLASH_SIZE == 0)
#define ROM_ADDR RAM_ADDR
#else
#define ROM_ADDR (CONFIG_FLASH_BASE_ADDRESS + CONFIG_FLASH_LOAD_OFFSET)
#endif
#if defined(CONFIG_ROM_END_OFFSET)
#define ROM_END_OFFSET CONFIG_ROM_END_OFFSET
#else
#define ROM_END_OFFSET 0
#endif
#if CONFIG_FLASH_LOAD_SIZE > 0
#define ROM_SIZE (CONFIG_FLASH_LOAD_SIZE - ROM_END_OFFSET)
#else
#define ROM_SIZE (CONFIG_FLASH_SIZE * 1K - CONFIG_FLASH_LOAD_OFFSET - ROM_END_OFFSET)
#endif
#define RAM_SIZE (CONFIG_SRAM_SIZE * 1K)
#define RAM_ADDR CONFIG_SRAM_BASE_ADDRESS
#if defined(CONFIG_ARM_MMU)
_region_min_align = CONFIG_MMU_PAGE_SIZE;
#elif defined(CONFIG_ARM_MPU)
_region_min_align = CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE;
#define BSS_ALIGN ALIGN(_region_min_align)
#else
/* If building without MMU support, use default 8-byte alignment. */
_region_min_align = 8;
#endif
#ifndef BSS_ALIGN
#define BSS_ALIGN
#endif
#define MMU_ALIGN . = ALIGN(_region_min_align)
MEMORY
{
FLASH (rx) : ORIGIN = ROM_ADDR, LENGTH = ROM_SIZE
RAM (wx) : ORIGIN = RAM_ADDR, LENGTH = RAM_SIZE
LINKER_DT_REGIONS()
/* Used by and documented in include/linker/intlist.ld */
IDT_LIST (wx) : ORIGIN = 0xFFFF8000, LENGTH = 32K
}
ENTRY(CONFIG_KERNEL_ENTRY)
SECTIONS
{
#include <zephyr/linker/rel-sections.ld>
#ifdef CONFIG_LLEXT
#include <zephyr/linker/llext-sections.ld>
#endif
/*
* .plt and .iplt are here according to 'arm-zephyr-elf-ld --verbose',
* before text section.
*/
/DISCARD/ :
{
*(.plt)
}
/DISCARD/ :
{
*(.iplt)
}
GROUP_START(ROMABLE_REGION)
__rom_region_start = ROM_ADDR;
SECTION_PROLOGUE(_TEXT_SECTION_NAME,,)
{
__text_region_start = .;
#ifndef CONFIG_XIP
z_mapped_start = .;
#endif
#ifdef CONFIG_AARCH64_IMAGE_HEADER
KEEP(*(.image_header))
KEEP(*(".image_header.*"))
#endif
_vector_start = .;
KEEP(*(.exc_vector_table))
KEEP(*(".exc_vector_table.*"))
#if LINKER_ZEPHYR_FINAL && defined(CONFIG_ISR_TABLES_LOCAL_DECLARATION)
INCLUDE isr_tables_vt.ld
#else
KEEP(*(.vectors))
#endif
_vector_end = .;
*(.text)
*(".text.*")
*(.gnu.linkonce.t.*)
#include <zephyr/linker/kobject-text.ld>
MMU_ALIGN;
} GROUP_ROM_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
__text_region_end = .;
__text_region_size = __text_region_end - __text_region_start;
#if defined (CONFIG_CPP)
SECTION_PROLOGUE(.ARM.extab,,)
{
/*
* .ARM.extab section containing exception unwinding information.
*/
*(.ARM.extab* .gnu.linkonce.armextab.*)
} GROUP_ROM_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
#endif
SECTION_PROLOGUE(.ARM.exidx,,)
{
/*
* This section, related to stack and exception unwinding, is placed
* explicitly to prevent it from being shared between multiple regions.
* It must be defined for gcc to support 64-bit math and avoid
* section overlap.
*/
__exidx_start = .;
#if defined (__GCC_LINKER_CMD__)
*(.ARM.exidx* gnu.linkonce.armexidx.*)
#endif
__exidx_end = .;
} GROUP_ROM_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
__rodata_region_start = .;
#include <zephyr/linker/common-rom.ld>
/* Located in generated directory. This file is populated by calling
* zephyr_linker_sources(ROM_SECTIONS ...). Useful for grouping iterable RO structs.
*/
#include <snippets-rom-sections.ld>
#include <zephyr/linker/thread-local-storage.ld>
SECTION_PROLOGUE(_RODATA_SECTION_NAME,,)
{
*(.rodata)
*(".rodata.*")
*(.gnu.linkonce.r.*)
/*
* The following is a workaround to allow compiling with GCC 12 and
* above, which may emit "GOT indirections" for the weak symbol
* references (see the GitHub issue zephyrproject-rtos/sdk-ng#547).
*/
*(.got)
*(.got.plt)
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-rodata.ld>
#include <zephyr/linker/kobject-rom.ld>
} GROUP_ROM_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
#include <zephyr/linker/cplusplus-rom.ld>
MMU_ALIGN;
__rodata_region_end = .;
__rodata_region_size = __rodata_region_end - __rodata_region_start;
__rom_region_end = .;
/*
* These are here according to 'arm-zephyr-elf-ld --verbose',
* before data section.
*/
/DISCARD/ :
{
*(.igot.plt)
*(.igot)
}
GROUP_END(ROMABLE_REGION)
GROUP_START(RAMABLE_REGION)
. = RAM_ADDR;
/* Align the start of image RAM with the
* minimum granularity required by MMU.
*/
. = ALIGN(_region_min_align);
_image_ram_start = .;
#ifdef CONFIG_XIP
z_mapped_start = .;
#endif
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-ram-sections.ld>
#if defined(CONFIG_USERSPACE)
#define APP_SHARED_ALIGN . = ALIGN(_region_min_align);
#define SMEM_PARTITION_ALIGN(size) MMU_ALIGN
#if defined(CONFIG_ARM_MPU)
/*
* When _app_smem region is empty, alignment is also needed. If there
* is no alignment, the _app_smem_start used by arm mpu can be lower
* than __rodata_region_end, and this two regions can overlap.
* The Armv8-R aarch64 MPU does not allow overlapped regions.
*/
#define EMPTY_APP_SHARED_ALIGN APP_SHARED_ALIGN
#endif
#include <app_smem.ld>
_app_smem_size = _app_smem_end - _app_smem_start;
_app_smem_rom_start = LOADADDR(_APP_SMEM_SECTION_NAME);
#endif /* CONFIG_USERSPACE */
SECTION_DATA_PROLOGUE(_BSS_SECTION_NAME,(NOLOAD), BSS_ALIGN)
{
. = ALIGN(8);
__bss_start = .;
__kernel_ram_start = .;
*(.bss)
*(".bss.*")
*(COMMON)
*(".kernel_bss.*")
__bss_end = ALIGN(8);
} GROUP_NOLOAD_LINK_IN(RAMABLE_REGION, RAMABLE_REGION)
#include <zephyr/linker/common-noinit.ld>
SECTION_DATA_PROLOGUE(_DATA_SECTION_NAME,,)
{
__data_region_start = .;
__data_start = .;
*(.data)
*(".data.*")
*(".kernel.*")
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-rwdata.ld>
__data_end = .;
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
__data_size = __data_end - __data_start;
__data_load_start = LOADADDR(_DATA_SECTION_NAME);
__data_region_load_start = LOADADDR(_DATA_SECTION_NAME);
#include <zephyr/linker/common-ram.ld>
#include <zephyr/linker/kobject-data.ld>
#include <zephyr/linker/cplusplus-ram.ld>
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-data-sections.ld>
__data_region_end = .;
/* Define linker symbols */
__kernel_ram_end = RAM_ADDR + RAM_SIZE;
__kernel_ram_size = __kernel_ram_end - __kernel_ram_start;
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-sections.ld>
#define LAST_RAM_ALIGN MMU_ALIGN;
#include <zephyr/linker/ram-end.ld>
GROUP_END(RAMABLE_REGION)
#include <zephyr/linker/debug-sections.ld>
SECTION_PROLOGUE(.ARM.attributes, 0,)
{
KEEP(*(.ARM.attributes))
KEEP(*(.gnu.attributes))
}
/DISCARD/ : { *(.note.GNU-stack) }
/* Output section descriptions are needed for these sections to suppress
* warnings when "--orphan-handling=warn" is set for lld.
*/
#if defined(CONFIG_LLVM_USE_LLD)
SECTION_PROLOGUE(.symtab, 0,) { *(.symtab) }
SECTION_PROLOGUE(.strtab, 0,) { *(.strtab) }
SECTION_PROLOGUE(.shstrtab, 0,) { *(.shstrtab) }
#endif
/* Sections generated from 'zephyr,memory-region' nodes */
LINKER_DT_SECTIONS()
/* Must be last in romable region */
SECTION_PROLOGUE(.last_section,,)
{
#ifdef CONFIG_LINKER_LAST_SECTION_ID
/* Fill last section with a word to ensure location counter and actual rom
* region data usage match. */
LONG(CONFIG_LINKER_LAST_SECTION_ID_PATTERN)
#endif
} GROUP_ROM_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
/* To provide the image size as a const expression,
* calculate this value here. */
_flash_used = LOADADDR(.last_section) + SIZEOF(.last_section) - __rom_region_start;
}
``` | /content/code_sandbox/include/zephyr/arch/arm64/scripts/linker.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,356 |
```objective-c
/*
*
*/
/**
* @file
* @brief RISC-V public interrupt handling
*
* RISC-V-specific kernel interrupt handling interface.
*/
#ifndef ZEPHYR_INCLUDE_ARCH_RISCV_IRQ_H_
#define ZEPHYR_INCLUDE_ARCH_RISCV_IRQ_H_
#ifdef __cplusplus
extern "C" {
#endif
#include <zephyr/sys/util_macro.h>
#ifndef _ASMLANGUAGE
#include <zephyr/irq.h>
#include <zephyr/sw_isr_table.h>
#include <stdbool.h>
#endif /* !_ASMLANGUAGE */
/* Exceptions 0-15 (MCAUSE interrupt=0) */
/* Environment Call from U-mode */
#define RISCV_EXC_ECALLU 8
/** Environment Call from M-mode */
#define RISCV_EXC_ECALLM 11
/* IRQs 0-15 (MCAUSE interrupt=1) */
/** Machine Software Interrupt */
#define RISCV_IRQ_MSOFT 3
/** Machine External Interrupt */
#define RISCV_IRQ_MEXT 11
#ifdef CONFIG_64BIT
#define RISCV_MCAUSE_IRQ_POS 63U
#define RISCV_MCAUSE_IRQ_BIT BIT64(RISCV_MCAUSE_IRQ_POS)
#else
#define RISCV_MCAUSE_IRQ_POS 31U
#define RISCV_MCAUSE_IRQ_BIT BIT(RISCV_MCAUSE_IRQ_POS)
#endif
#ifndef _ASMLANGUAGE
extern void arch_irq_enable(unsigned int irq);
extern void arch_irq_disable(unsigned int irq);
extern int arch_irq_is_enabled(unsigned int irq);
#if defined(CONFIG_RISCV_HAS_PLIC) || defined(CONFIG_RISCV_HAS_CLIC)
extern void z_riscv_irq_priority_set(unsigned int irq,
unsigned int prio,
uint32_t flags);
#else
#define z_riscv_irq_priority_set(i, p, f) /* Nothing */
#endif /* CONFIG_RISCV_HAS_PLIC || CONFIG_RISCV_HAS_CLIC */
#define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
{ \
Z_ISR_DECLARE(irq_p + CONFIG_RISCV_RESERVED_IRQ_ISR_TABLES_OFFSET, \
0, isr_p, isr_param_p); \
z_riscv_irq_priority_set(irq_p, priority_p, flags_p); \
}
#define ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \
{ \
Z_ISR_DECLARE_DIRECT(irq_p + CONFIG_RISCV_RESERVED_IRQ_ISR_TABLES_OFFSET, \
ISR_FLAG_DIRECT, isr_p); \
z_riscv_irq_priority_set(irq_p, priority_p, flags_p); \
}
#define ARCH_ISR_DIRECT_HEADER() arch_isr_direct_header()
#define ARCH_ISR_DIRECT_FOOTER(swap) arch_isr_direct_footer(swap)
#ifdef CONFIG_TRACING_ISR
extern void sys_trace_isr_enter(void);
extern void sys_trace_isr_exit(void);
#endif
static inline void arch_isr_direct_header(void)
{
#ifdef CONFIG_TRACING_ISR
sys_trace_isr_enter();
#endif
/* We need to increment this so that arch_is_in_isr() keeps working */
++(arch_curr_cpu()->nested);
}
extern void __soc_handle_irq(unsigned long mcause);
static inline void arch_isr_direct_footer(int swap)
{
ARG_UNUSED(swap);
unsigned long mcause;
/* Get the IRQ number */
__asm__ volatile("csrr %0, mcause" : "=r" (mcause));
mcause &= CONFIG_RISCV_MCAUSE_EXCEPTION_MASK;
/* Clear the pending IRQ */
__soc_handle_irq(mcause);
/* We are not in the ISR anymore */
--(arch_curr_cpu()->nested);
#ifdef CONFIG_TRACING_ISR
sys_trace_isr_exit();
#endif
}
/*
* TODO: Add support for rescheduling
*/
#define ARCH_ISR_DIRECT_DECLARE(name) \
static inline int name##_body(void); \
__attribute__ ((interrupt)) void name(void) \
{ \
ISR_DIRECT_HEADER(); \
name##_body(); \
ISR_DIRECT_FOOTER(0); \
} \
static inline int name##_body(void)
#endif /* _ASMLANGUAGE */
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_ARCH_RISCV_IRQ_H_ */
``` | /content/code_sandbox/include/zephyr/arch/riscv/irq.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 885 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_RISCV_STRUCTS_H_
#define ZEPHYR_INCLUDE_RISCV_STRUCTS_H_
/* Per CPU architecture specifics */
struct _cpu_arch {
#ifdef CONFIG_USERSPACE
unsigned long user_exc_sp;
unsigned long user_exc_tmp0;
unsigned long user_exc_tmp1;
#endif
#if defined(CONFIG_SMP) || (CONFIG_MP_MAX_NUM_CPUS > 1)
unsigned long hartid;
bool online;
#endif
#ifdef CONFIG_FPU_SHARING
atomic_ptr_val_t fpu_owner;
uint32_t fpu_state;
#endif
};
#endif /* ZEPHYR_INCLUDE_RISCV_STRUCTS_H_ */
``` | /content/code_sandbox/include/zephyr/arch/riscv/structs.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 136 |
```objective-c
/*
*
*/
#ifndef CSR_H_
#define CSR_H_
#define MSTATUS_UIE 0x00000001
#define MSTATUS_SIE 0x00000002
#define MSTATUS_HIE 0x00000004
#define MSTATUS_MIE 0x00000008
#define MSTATUS_UPIE 0x00000010
#define MSTATUS_SPIE 0x00000020
#define MSTATUS_HPIE 0x00000040
#define MSTATUS_MPIE 0x00000080
#define MSTATUS_SPP 0x00000100
#define MSTATUS_HPP 0x00000600
#define MSTATUS_MPP 0x00001800
#define MSTATUS_FS 0x00006000
#define MSTATUS_XS 0x00018000
#define MSTATUS_MPRV 0x00020000
#define MSTATUS_SUM 0x00040000
#define MSTATUS_MXR 0x00080000
#define MSTATUS_TVM 0x00100000
#define MSTATUS_TW 0x00200000
#define MSTATUS_TSR 0x00400000
#define MSTATUS32_SD 0x80000000
#define MSTATUS_UXL 0x0000000300000000
#define MSTATUS_SXL 0x0000000C00000000
#define MSTATUS64_SD 0x8000000000000000
#define SSTATUS_UIE 0x00000001
#define SSTATUS_SIE 0x00000002
#define SSTATUS_UPIE 0x00000010
#define SSTATUS_SPIE 0x00000020
#define SSTATUS_SPP 0x00000100
#define SSTATUS_FS 0x00006000
#define SSTATUS_XS 0x00018000
#define SSTATUS_SUM 0x00040000
#define SSTATUS_MXR 0x00080000
#define SSTATUS32_SD 0x80000000
#define SSTATUS_UXL 0x0000000300000000
#define SSTATUS64_SD 0x8000000000000000
#define DCSR_XDEBUGVER (3U<<30)
#define DCSR_NDRESET (1<<29)
#define DCSR_FULLRESET (1<<28)
#define DCSR_EBREAKM (1<<15)
#define DCSR_EBREAKH (1<<14)
#define DCSR_EBREAKS (1<<13)
#define DCSR_EBREAKU (1<<12)
#define DCSR_STOPCYCLE (1<<10)
#define DCSR_STOPTIME (1<<9)
#define DCSR_CAUSE (7<<6)
#define DCSR_DEBUGINT (1<<5)
#define DCSR_HALT (1<<3)
#define DCSR_STEP (1<<2)
#define DCSR_PRV (3<<0)
#define DCSR_CAUSE_NONE 0
#define DCSR_CAUSE_SWBP 1
#define DCSR_CAUSE_HWBP 2
#define DCSR_CAUSE_DEBUGINT 3
#define DCSR_CAUSE_STEP 4
#define DCSR_CAUSE_HALT 5
#define MCONTROL_TYPE(xlen) (0xfULL<<((xlen)-4))
#define MCONTROL_DMODE(xlen) (1ULL<<((xlen)-5))
#define MCONTROL_MASKMAX(xlen) (0x3fULL<<((xlen)-11))
#define MCONTROL_SELECT (1<<19)
#define MCONTROL_TIMING (1<<18)
#define MCONTROL_ACTION (0x3f<<12)
#define MCONTROL_CHAIN (1<<11)
#define MCONTROL_MATCH (0xf<<7)
#define MCONTROL_M (1<<6)
#define MCONTROL_H (1<<5)
#define MCONTROL_S (1<<4)
#define MCONTROL_U (1<<3)
#define MCONTROL_EXECUTE (1<<2)
#define MCONTROL_STORE (1<<1)
#define MCONTROL_LOAD (1<<0)
#define MCONTROL_TYPE_NONE 0
#define MCONTROL_TYPE_MATCH 2
#define MCONTROL_ACTION_DEBUG_EXCEPTION 0
#define MCONTROL_ACTION_DEBUG_MODE 1
#define MCONTROL_ACTION_TRACE_START 2
#define MCONTROL_ACTION_TRACE_STOP 3
#define MCONTROL_ACTION_TRACE_EMIT 4
#define MCONTROL_MATCH_EQUAL 0
#define MCONTROL_MATCH_NAPOT 1
#define MCONTROL_MATCH_GE 2
#define MCONTROL_MATCH_LT 3
#define MCONTROL_MATCH_MASK_LOW 4
#define MCONTROL_MATCH_MASK_HIGH 5
#define MIP_SSIP (1 << IRQ_S_SOFT)
#define MIP_HSIP (1 << IRQ_H_SOFT)
#define MIP_MSIP (1 << IRQ_M_SOFT)
#define MIP_STIP (1 << IRQ_S_TIMER)
#define MIP_HTIP (1 << IRQ_H_TIMER)
#define MIP_MTIP (1 << IRQ_M_TIMER)
#define MIP_SEIP (1 << IRQ_S_EXT)
#define MIP_HEIP (1 << IRQ_H_EXT)
#define MIP_MEIP (1 << IRQ_M_EXT)
#define SIP_SSIP MIP_SSIP
#define SIP_STIP MIP_STIP
#define PRV_U 0
#define PRV_S 1
#define PRV_H 2
#define PRV_M 3
#define SATP32_MODE 0x80000000
#define SATP32_ASID 0x7FC00000
#define SATP32_PPN 0x003FFFFF
#define SATP64_MODE 0xF000000000000000
#define SATP64_ASID 0x0FFFF00000000000
#define SATP64_PPN 0x00000FFFFFFFFFFF
#define SATP_MODE_OFF 0
#define SATP_MODE_SV32 1
#define SATP_MODE_SV39 8
#define SATP_MODE_SV48 9
#define SATP_MODE_SV57 10
#define SATP_MODE_SV64 11
#define PMP_R 0x01
#define PMP_W 0x02
#define PMP_X 0x04
#define PMP_A 0x18
#define PMP_L 0x80
#define PMP_SHIFT 2
#define PMP_TOR 0x08
#define PMP_NA4 0x10
#define PMP_NAPOT 0x18
#define IRQ_S_SOFT 1
#define IRQ_H_SOFT 2
#define IRQ_M_SOFT 3
#define IRQ_S_TIMER 5
#define IRQ_H_TIMER 6
#define IRQ_M_TIMER 7
#define IRQ_S_EXT 9
#define IRQ_H_EXT 10
#define IRQ_M_EXT 11
#define IRQ_COP 12
#define IRQ_HOST 13
#define DEFAULT_RSTVEC 0x00001000
#define CLINT_BASE 0x02000000
#define CLINT_SIZE 0x000c0000
#define EXT_IO_BASE 0x40000000
#define DRAM_BASE 0x80000000
/* page table entry (PTE) fields */
#define PTE_V 0x001 /* Valid */
#define PTE_R 0x002 /* Read */
#define PTE_W 0x004 /* Write */
#define PTE_X 0x008 /* Execute */
#define PTE_U 0x010 /* User */
#define PTE_G 0x020 /* Global */
#define PTE_A 0x040 /* Accessed */
#define PTE_D 0x080 /* Dirty */
#define PTE_SOFT 0x300 /* Reserved for Software */
#define PTE_PPN_SHIFT 10
#define PTE_TABLE(PTE) (((PTE) & (PTE_V | PTE_R | PTE_W | PTE_X)) == PTE_V)
#define INSERT_FIELD(val, which, fieldval) \
( \
((val) & ~(which)) | ((fieldval) * ((which) & ~((which)-1))) \
) \
#define csr_read(csr) \
({ \
register unsigned long __rv; \
__asm__ volatile ("csrr %0, " STRINGIFY(csr) \
: "=r" (__rv)); \
__rv; \
})
#define csr_write(csr, val) \
({ \
unsigned long __wv = (unsigned long)(val); \
__asm__ volatile ("csrw " STRINGIFY(csr) ", %0" \
: : "rK" (__wv) \
: "memory"); \
})
#define csr_read_set(csr, val) \
({ \
unsigned long __rsv = (unsigned long)(val); \
__asm__ volatile ("csrrs %0, " STRINGIFY(csr) ", %1" \
: "=r" (__rsv) : "rK" (__rsv) \
: "memory"); \
__rsv; \
})
#define csr_set(csr, val) \
({ \
unsigned long __sv = (unsigned long)(val); \
__asm__ volatile ("csrs " STRINGIFY(csr) ", %0" \
: : "rK" (__sv) \
: "memory"); \
})
#define csr_read_clear(csr, val) \
({ \
unsigned long __rcv = (unsigned long)(val); \
__asm__ volatile ("csrrc %0, " STRINGIFY(csr) ", %1" \
: "=r" (__rcv) : "rK" (__rcv) \
: "memory"); \
__rcv; \
})
#define csr_clear(csr, val) \
({ \
unsigned long __cv = (unsigned long)(val); \
__asm__ volatile ("csrc " STRINGIFY(csr) ", %0" \
: : "rK" (__cv) \
: "memory"); \
})
#endif /* CSR_H_ */
``` | /content/code_sandbox/include/zephyr/arch/riscv/csr.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,204 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_RISCV_ARCH_INLINES_H_
#define ZEPHYR_INCLUDE_ARCH_RISCV_ARCH_INLINES_H_
#ifndef _ASMLANGUAGE
#include <zephyr/kernel_structs.h>
#include "csr.h"
static ALWAYS_INLINE uint32_t arch_proc_id(void)
{
return csr_read(mhartid) & ((uintptr_t)CONFIG_RISCV_HART_MASK);
}
static ALWAYS_INLINE _cpu_t *arch_curr_cpu(void)
{
#if defined(CONFIG_SMP) || defined(CONFIG_USERSPACE)
return (_cpu_t *)csr_read(mscratch);
#else
return &_kernel.cpus[0];
#endif
}
static ALWAYS_INLINE unsigned int arch_num_cpus(void)
{
return CONFIG_MP_MAX_NUM_CPUS;
}
#endif /* !_ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_RISCV_ARCH_INLINES_H_ */
``` | /content/code_sandbox/include/zephyr/arch/riscv/arch_inlines.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 183 |
```objective-c
/*
* Contributors: 2018 Antmicro <www.antmicro.com>
*
*/
/**
* @file
* @brief RISCV specific kernel interface header
* This header contains the RISCV specific kernel interface. It is
* included by the generic kernel interface header (arch/cpu.h)
*/
#ifndef ZEPHYR_INCLUDE_ARCH_RISCV_ARCH_H_
#define ZEPHYR_INCLUDE_ARCH_RISCV_ARCH_H_
#include <zephyr/arch/riscv/thread.h>
#include <zephyr/arch/riscv/exception.h>
#include <zephyr/arch/riscv/irq.h>
#include <zephyr/arch/riscv/sys_io.h>
#include <zephyr/arch/common/sys_bitops.h>
#include <zephyr/arch/common/ffs.h>
#if defined(CONFIG_USERSPACE)
#include <zephyr/arch/riscv/syscall.h>
#endif /* CONFIG_USERSPACE */
#include <zephyr/irq.h>
#include <zephyr/sw_isr_table.h>
#include <zephyr/devicetree.h>
#include <zephyr/arch/riscv/csr.h>
#include <zephyr/arch/riscv/exception.h>
#ifdef CONFIG_RISCV_ISA_RV32E
/* Stack alignment for RV32E is 4 bytes */
#define ARCH_STACK_PTR_ALIGN 4
#else
/* stacks, for RISCV architecture stack should be 16byte-aligned */
#define ARCH_STACK_PTR_ALIGN 16
#endif
#define Z_RISCV_STACK_PMP_ALIGN \
MAX(CONFIG_PMP_GRANULARITY, ARCH_STACK_PTR_ALIGN)
#ifdef CONFIG_PMP_STACK_GUARD
/*
* The StackGuard is an area at the bottom of the kernel-mode stack made to
* fault when accessed. It is _not_ faulting when in exception mode as we rely
* on that area to save the exception stack frame and to process said fault.
* Therefore the guard area must be large enough to hold the esf, plus some
* configurable stack wiggle room to execute the fault handling code off of,
* as well as some guard size to cover possible sudden stack pointer
* displacement before the fault.
*/
#ifdef CONFIG_PMP_POWER_OF_TWO_ALIGNMENT
#define Z_RISCV_STACK_GUARD_SIZE \
Z_POW2_CEIL(MAX(sizeof(struct arch_esf) + CONFIG_PMP_STACK_GUARD_MIN_SIZE, \
Z_RISCV_STACK_PMP_ALIGN))
#define ARCH_KERNEL_STACK_OBJ_ALIGN Z_RISCV_STACK_GUARD_SIZE
#else
#define Z_RISCV_STACK_GUARD_SIZE \
ROUND_UP(sizeof(struct arch_esf) + CONFIG_PMP_STACK_GUARD_MIN_SIZE, \
Z_RISCV_STACK_PMP_ALIGN)
#define ARCH_KERNEL_STACK_OBJ_ALIGN Z_RISCV_STACK_PMP_ALIGN
#endif
/* Kernel-only stacks have the following layout if a stack guard is enabled:
*
* +------------+ <- thread.stack_obj
* | Guard | } Z_RISCV_STACK_GUARD_SIZE
* +------------+ <- thread.stack_info.start
* | Kernel |
* | stack |
* | |
* +............|
* | TLS | } thread.stack_info.delta
* +------------+ <- thread.stack_info.start + thread.stack_info.size
*/
#define ARCH_KERNEL_STACK_RESERVED Z_RISCV_STACK_GUARD_SIZE
#else /* !CONFIG_PMP_STACK_GUARD */
#define Z_RISCV_STACK_GUARD_SIZE 0
#endif
#ifdef CONFIG_PMP_POWER_OF_TWO_ALIGNMENT
/* The privilege elevation stack is located in another area of memory
* generated at build time by gen_kobject_list.py
*
* +------------+ <- thread.arch.priv_stack_start
* | Guard | } Z_RISCV_STACK_GUARD_SIZE
* +------------+
* | Priv Stack | } CONFIG_PRIVILEGED_STACK_SIZE
* +------------+ <- thread.arch.priv_stack_start +
* CONFIG_PRIVILEGED_STACK_SIZE +
* Z_RISCV_STACK_GUARD_SIZE
*
* The main stack will be initially (or potentially only) used by kernel
* mode so we need to make room for a possible stack guard area when enabled:
*
* +------------+ <- thread.stack_obj
* | Guard | } Z_RISCV_STACK_GUARD_SIZE
* +............| <- thread.stack_info.start
* | Thread |
* | stack |
* | |
* +............|
* | TLS | } thread.stack_info.delta
* +------------+ <- thread.stack_info.start + thread.stack_info.size
*
* When transitioning to user space, the guard area will be removed from
* the main stack. Any thread running in user mode will have full access
* to the region denoted by thread.stack_info. Make it PMP-NAPOT compatible.
*
* +------------+ <- thread.stack_obj = thread.stack_info.start
* | Thread |
* | stack |
* | |
* +............|
* | TLS | } thread.stack_info.delta
* +------------+ <- thread.stack_info.start + thread.stack_info.size
*/
#define ARCH_THREAD_STACK_RESERVED Z_RISCV_STACK_GUARD_SIZE
#define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
Z_POW2_CEIL(MAX(MAX(size, CONFIG_PRIVILEGED_STACK_SIZE), \
Z_RISCV_STACK_PMP_ALIGN))
#define ARCH_THREAD_STACK_OBJ_ALIGN(size) \
ARCH_THREAD_STACK_SIZE_ADJUST(size)
#else /* !CONFIG_PMP_POWER_OF_TWO_ALIGNMENT */
/* The stack object will contain the PMP guard, the privilege stack, and then
* the usermode stack buffer in that order:
*
* +------------+ <- thread.stack_obj
* | Guard | } Z_RISCV_STACK_GUARD_SIZE
* +------------+
* | Priv Stack | } CONFIG_PRIVILEGED_STACK_SIZE
* +------------+ <- thread.stack_info.start
* | Thread |
* | stack |
* | |
* +............|
* | TLS | } thread.stack_info.delta
* +------------+ <- thread.stack_info.start + thread.stack_info.size
*/
#define ARCH_THREAD_STACK_RESERVED \
ROUND_UP(Z_RISCV_STACK_GUARD_SIZE + CONFIG_PRIVILEGED_STACK_SIZE, \
Z_RISCV_STACK_PMP_ALIGN)
#define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
ROUND_UP(size, Z_RISCV_STACK_PMP_ALIGN)
#define ARCH_THREAD_STACK_OBJ_ALIGN(size) Z_RISCV_STACK_PMP_ALIGN
#endif /* CONFIG_PMP_POWER_OF_TWO_ALIGNMENT */
#ifdef CONFIG_64BIT
#define RV_REGSIZE 8
#define RV_REGSHIFT 3
#else
#define RV_REGSIZE 4
#define RV_REGSHIFT 2
#endif
/* Common mstatus bits. All supported cores today have the same
* layouts.
*/
#define MSTATUS_IEN (1UL << 3)
#define MSTATUS_MPP_M (3UL << 11)
#define MSTATUS_MPIE_EN (1UL << 7)
#define MSTATUS_FS_OFF (0UL << 13)
#define MSTATUS_FS_INIT (1UL << 13)
#define MSTATUS_FS_CLEAN (2UL << 13)
#define MSTATUS_FS_DIRTY (3UL << 13)
/* This comes from openisa_rv32m1, but doesn't seem to hurt on other
* platforms:
* - Preserve machine privileges in MPP. If you see any documentation
* telling you that MPP is read-only on this SoC, don't believe its
* lies.
* - Enable interrupts when exiting from exception into a new thread
* by setting MPIE now, so it will be copied into IE on mret.
*/
#define MSTATUS_DEF_RESTORE (MSTATUS_MPP_M | MSTATUS_MPIE_EN)
#ifndef _ASMLANGUAGE
#include <zephyr/sys/util.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifdef CONFIG_IRQ_VECTOR_TABLE_JUMP_BY_CODE
#define ARCH_IRQ_VECTOR_JUMP_CODE(v) "j " STRINGIFY(v)
#endif
/* Kernel macros for memory attribution
* (access permissions and cache-ability).
*
* The macros are to be stored in k_mem_partition_attr_t
* objects. The format of a k_mem_partition_attr_t object
* is an uint8_t composed by configuration register flags
* located in arch/riscv/include/core_pmp.h
*/
/* Read-Write access permission attributes */
#define K_MEM_PARTITION_P_RW_U_RW ((k_mem_partition_attr_t) \
{PMP_R | PMP_W})
#define K_MEM_PARTITION_P_RW_U_RO ((k_mem_partition_attr_t) \
{PMP_R})
#define K_MEM_PARTITION_P_RW_U_NA ((k_mem_partition_attr_t) \
{0})
#define K_MEM_PARTITION_P_RO_U_RO ((k_mem_partition_attr_t) \
{PMP_R})
#define K_MEM_PARTITION_P_RO_U_NA ((k_mem_partition_attr_t) \
{0})
#define K_MEM_PARTITION_P_NA_U_NA ((k_mem_partition_attr_t) \
{0})
/* Execution-allowed attributes */
#define K_MEM_PARTITION_P_RWX_U_RWX ((k_mem_partition_attr_t) \
{PMP_R | PMP_W | PMP_X})
#define K_MEM_PARTITION_P_RX_U_RX ((k_mem_partition_attr_t) \
{PMP_R | PMP_X})
/* Typedef for the k_mem_partition attribute */
typedef struct {
uint8_t pmp_attr;
} k_mem_partition_attr_t;
struct arch_mem_domain {
unsigned int pmp_update_nr;
};
extern void z_irq_spurious(const void *unused);
/*
* use atomic instruction csrrc to lock global irq
* csrrc: atomic read and clear bits in CSR register
*/
static ALWAYS_INLINE unsigned int arch_irq_lock(void)
{
#ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_LOCK_OPS
return z_soc_irq_lock();
#else
unsigned int key;
__asm__ volatile ("csrrc %0, mstatus, %1"
: "=r" (key)
: "rK" (MSTATUS_IEN)
: "memory");
return key;
#endif
}
/*
* use atomic instruction csrs to unlock global irq
* csrs: atomic set bits in CSR register
*/
static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
{
#ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_LOCK_OPS
z_soc_irq_unlock(key);
#else
__asm__ volatile ("csrs mstatus, %0"
:
: "r" (key & MSTATUS_IEN)
: "memory");
#endif
}
static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
{
#ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_LOCK_OPS
return z_soc_irq_unlocked(key);
#else
return (key & MSTATUS_IEN) != 0;
#endif
}
static ALWAYS_INLINE void arch_nop(void)
{
__asm__ volatile("nop");
}
extern uint32_t sys_clock_cycle_get_32(void);
static inline uint32_t arch_k_cycle_get_32(void)
{
return sys_clock_cycle_get_32();
}
extern uint64_t sys_clock_cycle_get_64(void);
static inline uint64_t arch_k_cycle_get_64(void)
{
return sys_clock_cycle_get_64();
}
#include <zephyr/arch/riscv/error.h>
#ifdef __cplusplus
}
#endif
#endif /*_ASMLANGUAGE */
#if defined(CONFIG_RISCV_PRIVILEGED)
#include <zephyr/arch/riscv/riscv-privileged/asm_inline.h>
#endif
#endif
``` | /content/code_sandbox/include/zephyr/arch/riscv/arch.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,392 |
```objective-c
/*
*
*/
/**
* @file
* @brief RISCV specific syscall header
*
* This header contains the RISCV specific syscall interface. It is
* included by the syscall interface architecture-abstraction header
* (include/arch/syscall.h)
*/
#ifndef ZEPHYR_INCLUDE_ARCH_RISCV_SYSCALL_H_
#define ZEPHYR_INCLUDE_ARCH_RISCV_SYSCALL_H_
/*
* Privileged mode system calls
*/
#define RV_ECALL_RUNTIME_EXCEPT 0
#define RV_ECALL_IRQ_OFFLOAD 1
#define RV_ECALL_SCHEDULE 2
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
#include <stdbool.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Syscall invocation macros. riscv-specific machine constraints used to ensure
* args land in the proper registers.
*/
static inline uintptr_t arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3, uintptr_t arg4,
uintptr_t arg5, uintptr_t arg6,
uintptr_t call_id)
{
register unsigned long a0 __asm__ ("a0") = arg1;
register unsigned long a1 __asm__ ("a1") = arg2;
register unsigned long a2 __asm__ ("a2") = arg3;
register unsigned long a3 __asm__ ("a3") = arg4;
register unsigned long a4 __asm__ ("a4") = arg5;
register unsigned long a5 __asm__ ("a5") = arg6;
register unsigned long t0 __asm__ ("t0") = call_id;
__asm__ volatile ("ecall"
: "+r" (a0)
: "r" (a1), "r" (a2), "r" (a3), "r" (a4), "r" (a5),
"r" (t0)
: "memory");
return a0;
}
static inline uintptr_t arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3, uintptr_t arg4,
uintptr_t arg5,
uintptr_t call_id)
{
register unsigned long a0 __asm__ ("a0") = arg1;
register unsigned long a1 __asm__ ("a1") = arg2;
register unsigned long a2 __asm__ ("a2") = arg3;
register unsigned long a3 __asm__ ("a3") = arg4;
register unsigned long a4 __asm__ ("a4") = arg5;
register unsigned long t0 __asm__ ("t0") = call_id;
__asm__ volatile ("ecall"
: "+r" (a0)
: "r" (a1), "r" (a2), "r" (a3), "r" (a4), "r" (t0)
: "memory");
return a0;
}
static inline uintptr_t arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3, uintptr_t arg4,
uintptr_t call_id)
{
register unsigned long a0 __asm__ ("a0") = arg1;
register unsigned long a1 __asm__ ("a1") = arg2;
register unsigned long a2 __asm__ ("a2") = arg3;
register unsigned long a3 __asm__ ("a3") = arg4;
register unsigned long t0 __asm__ ("t0") = call_id;
__asm__ volatile ("ecall"
: "+r" (a0)
: "r" (a1), "r" (a2), "r" (a3), "r" (t0)
: "memory");
return a0;
}
static inline uintptr_t arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3,
uintptr_t call_id)
{
register unsigned long a0 __asm__ ("a0") = arg1;
register unsigned long a1 __asm__ ("a1") = arg2;
register unsigned long a2 __asm__ ("a2") = arg3;
register unsigned long t0 __asm__ ("t0") = call_id;
__asm__ volatile ("ecall"
: "+r" (a0)
: "r" (a1), "r" (a2), "r" (t0)
: "memory");
return a0;
}
static inline uintptr_t arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2,
uintptr_t call_id)
{
register unsigned long a0 __asm__ ("a0") = arg1;
register unsigned long a1 __asm__ ("a1") = arg2;
register unsigned long t0 __asm__ ("t0") = call_id;
__asm__ volatile ("ecall"
: "+r" (a0)
: "r" (a1), "r" (t0)
: "memory");
return a0;
}
static inline uintptr_t arch_syscall_invoke1(uintptr_t arg1, uintptr_t call_id)
{
register unsigned long a0 __asm__ ("a0") = arg1;
register unsigned long t0 __asm__ ("t0") = call_id;
__asm__ volatile ("ecall"
: "+r" (a0)
: "r" (t0)
: "memory");
return a0;
}
static inline uintptr_t arch_syscall_invoke0(uintptr_t call_id)
{
register unsigned long a0 __asm__ ("a0");
register unsigned long t0 __asm__ ("t0") = call_id;
__asm__ volatile ("ecall"
: "=r" (a0)
: "r" (t0)
: "memory");
return a0;
}
#ifdef CONFIG_USERSPACE
register unsigned long riscv_tp_reg __asm__ ("tp");
static inline bool arch_is_user_context(void)
{
/* don't try accessing TLS variables if tp is not initialized */
if (riscv_tp_reg == 0) {
return false;
}
/* Defined in arch/riscv/core/thread.c */
extern __thread uint8_t is_user_mode;
return is_user_mode != 0;
}
#endif
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_RISCV_SYSCALL_H_ */
``` | /content/code_sandbox/include/zephyr/arch/riscv/syscall.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,357 |
```objective-c
/*
*
*/
/**
* @file
* @brief Per-arch thread definition
*
* This file contains definitions for
*
* struct _thread_arch
* struct _callee_saved
*
* necessary to instantiate instances of struct k_thread.
*/
#ifndef ZEPHYR_INCLUDE_ARCH_RISCV_THREAD_H_
#define ZEPHYR_INCLUDE_ARCH_RISCV_THREAD_H_
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
/*
* The following structure defines the list of registers that need to be
* saved/restored when a context switch occurs.
*/
struct _callee_saved {
unsigned long sp; /* Stack pointer, (x2 register) */
unsigned long ra; /* return address */
unsigned long s0; /* saved register/frame pointer */
unsigned long s1; /* saved register */
#if !defined(CONFIG_RISCV_ISA_RV32E)
unsigned long s2; /* saved register */
unsigned long s3; /* saved register */
unsigned long s4; /* saved register */
unsigned long s5; /* saved register */
unsigned long s6; /* saved register */
unsigned long s7; /* saved register */
unsigned long s8; /* saved register */
unsigned long s9; /* saved register */
unsigned long s10; /* saved register */
unsigned long s11; /* saved register */
#endif
};
typedef struct _callee_saved _callee_saved_t;
#if !defined(RV_FP_TYPE)
#ifdef CONFIG_CPU_HAS_FPU_DOUBLE_PRECISION
#define RV_FP_TYPE uint64_t
#else
#define RV_FP_TYPE uint32_t
#endif
#endif
struct z_riscv_fp_context {
RV_FP_TYPE fa0, fa1, fa2, fa3, fa4, fa5, fa6, fa7;
RV_FP_TYPE ft0, ft1, ft2, ft3, ft4, ft5, ft6, ft7, ft8, ft9, ft10, ft11;
RV_FP_TYPE fs0, fs1, fs2, fs3, fs4, fs5, fs6, fs7, fs8, fs9, fs10, fs11;
uint32_t fcsr;
};
typedef struct z_riscv_fp_context z_riscv_fp_context_t;
#define PMP_M_MODE_SLOTS 8 /* 8 is plenty enough for m-mode */
struct _thread_arch {
#ifdef CONFIG_FPU_SHARING
struct z_riscv_fp_context saved_fp_context;
bool fpu_recently_used;
uint8_t exception_depth;
#endif
#ifdef CONFIG_USERSPACE
unsigned long priv_stack_start;
unsigned long u_mode_pmpaddr_regs[CONFIG_PMP_SLOTS];
unsigned long u_mode_pmpcfg_regs[CONFIG_PMP_SLOTS / sizeof(unsigned long)];
unsigned int u_mode_pmp_domain_offset;
unsigned int u_mode_pmp_end_index;
unsigned int u_mode_pmp_update_nr;
#endif
#ifdef CONFIG_PMP_STACK_GUARD
unsigned int m_mode_pmp_end_index;
unsigned long m_mode_pmpaddr_regs[PMP_M_MODE_SLOTS];
unsigned long m_mode_pmpcfg_regs[PMP_M_MODE_SLOTS / sizeof(unsigned long)];
#endif
};
typedef struct _thread_arch _thread_arch_t;
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_RISCV_THREAD_H_ */
``` | /content/code_sandbox/include/zephyr/arch/riscv/thread.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 694 |
```objective-c
/*
*
*/
/* Memory mapped registers I/O functions in riscv arch C code */
#ifndef ZEPHYR_INCLUDE_ARCH_RISCV_SYS_IO_H_
#define ZEPHYR_INCLUDE_ARCH_RISCV_SYS_IO_H_
#ifndef _ASMLANGUAGE
#include <zephyr/toolchain.h>
#include <zephyr/types.h>
#include <zephyr/sys/sys_io.h>
#ifndef CONFIG_RISCV_SOC_HAS_CUSTOM_SYS_IO
#include <zephyr/arch/common/sys_io.h>
#endif
#ifdef __cplusplus
extern "C" {
#endif
#ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_SYS_IO
extern uint8_t z_soc_sys_read8(mem_addr_t addr);
extern void z_soc_sys_write8(uint8_t data, mem_addr_t addr);
extern uint16_t z_soc_sys_read16(mem_addr_t addr);
extern void z_soc_sys_write16(uint16_t data, mem_addr_t addr);
extern uint32_t z_soc_sys_read32(mem_addr_t addr);
extern void z_soc_sys_write32(uint32_t data, mem_addr_t addr);
extern uint64_t z_soc_sys_read64(mem_addr_t addr);
extern void z_soc_sys_write64(uint64_t data, mem_addr_t addr);
static ALWAYS_INLINE uint8_t sys_read8(mem_addr_t addr)
{
return z_soc_sys_read8(addr);
}
static ALWAYS_INLINE void sys_write8(uint8_t data, mem_addr_t addr)
{
return z_soc_sys_write8(data, addr);
}
static ALWAYS_INLINE uint16_t sys_read16(mem_addr_t addr)
{
return z_soc_sys_read16(addr);
}
static ALWAYS_INLINE void sys_write16(uint16_t data, mem_addr_t addr)
{
return z_soc_sys_write16(data, addr);
}
static ALWAYS_INLINE uint32_t sys_read32(mem_addr_t addr)
{
return z_soc_sys_read32(addr);
}
static ALWAYS_INLINE void sys_write32(uint32_t data, mem_addr_t addr)
{
return z_soc_sys_write32(data, addr);
}
static ALWAYS_INLINE uint64_t sys_read64(mem_addr_t addr)
{
return z_soc_sys_read64(addr);
}
static ALWAYS_INLINE void sys_write64(uint64_t data, mem_addr_t addr)
{
return z_soc_sys_write64(data, addr);
}
#endif /* CONFIG_RISCV_SOC_HAS_CUSTOM_SYS_IO */
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_RISCV_SYS_IO_H_ */
``` | /content/code_sandbox/include/zephyr/arch/riscv/sys_io.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 504 |
```objective-c
/*
*
*/
/**
* @file
* @brief RISCV public error handling
*
* RISCV-specific kernel error handling interface. Included by riscv/arch.h.
*/
#ifndef ZEPHYR_INCLUDE_ARCH_RISCV_ERROR_H_
#define ZEPHYR_INCLUDE_ARCH_RISCV_ERROR_H_
#include <zephyr/arch/riscv/syscall.h>
#include <zephyr/arch/riscv/exception.h>
#include <stdbool.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifdef CONFIG_USERSPACE
#define ARCH_EXCEPT(reason_p) do { \
if (k_is_user_context()) { \
arch_syscall_invoke1(reason_p, \
K_SYSCALL_USER_FAULT); \
} else { \
compiler_barrier(); \
arch_syscall_invoke1(reason_p, \
RV_ECALL_RUNTIME_EXCEPT);\
} \
CODE_UNREACHABLE; /* LCOV_EXCL_LINE */ \
} while (false)
#else
#define ARCH_EXCEPT(reason_p) \
arch_syscall_invoke1(reason_p, RV_ECALL_RUNTIME_EXCEPT)
#endif
__syscall void user_fault(unsigned int reason);
#include <zephyr/syscalls/error.h>
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_ARCH_RISCV_ERROR_H_ */
``` | /content/code_sandbox/include/zephyr/arch/riscv/error.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 293 |
```objective-c
/**
*/
#ifndef ZEPHYR_INCLUDE_ARCH_RISCV_ATOMIC_H_
#define ZEPHYR_INCLUDE_ARCH_RISCV_ATOMIC_H_
#ifdef __cplusplus
extern "C" {
#endif
/* The standard RISC-V atomic-instruction extension, "A", specifies
* the number of instructions that atomically read-modify-write memory,
* which RISC-V harts should support in order to synchronise harts
* running in the same memory space. This is the subset of RISC-V
* atomic-instructions not present in atomic_builtin.h file.
*/
#ifdef CONFIG_64BIT
static ALWAYS_INLINE atomic_val_t atomic_swap(const atomic_t *target, atomic_val_t newval)
{
atomic_val_t ret;
__asm__ volatile("amoswap.d.aq %0, %1, %2"
: "=r"(ret)
: "r"(newval), "A"(*target)
: "memory");
return ret;
}
static ALWAYS_INLINE atomic_val_t atomic_max(atomic_t *target, atomic_val_t value)
{
atomic_val_t ret;
__asm__ volatile("amomax.d.aq %0, %1, %2"
: "=r"(ret)
: "r"(value), "A"(*target)
: "memory");
return ret;
}
static ALWAYS_INLINE atomic_val_t atomic_min(atomic_t *target, atomic_val_t value)
{
atomic_val_t ret;
__asm__ volatile("amomin.d.aq %0, %1, %2"
: "=r"(ret)
: "r"(value), "A"(*target)
: "memory");
return ret;
}
static ALWAYS_INLINE atomic_val_t atomic_maxu(unsigned long *target, unsigned long value)
{
unsigned long ret;
__asm__ volatile("amomaxu.d.aq %0, %1, %2"
: "=r"(ret)
: "r"(value), "A"(*target)
: "memory");
return ret;
}
static ALWAYS_INLINE atomic_val_t atomic_minu(unsigned long *target, unsigned long value)
{
unsigned long ret;
__asm__ volatile("amominu.d.aq %0, %1, %2"
: "=r"(ret)
: "r"(value), "A"(*target)
: "memory");
return ret;
}
#else
static ALWAYS_INLINE atomic_val_t atomic_swap(const atomic_t *target, atomic_val_t newval)
{
atomic_val_t ret;
__asm__ volatile("amoswap.w.aq %0, %1, %2"
: "=r"(ret)
: "r"(newval), "A"(*target)
: "memory");
return ret;
}
static ALWAYS_INLINE atomic_val_t atomic_max(atomic_t *target, atomic_val_t value)
{
atomic_val_t ret;
__asm__ volatile("amomax.w.aq %0, %1, %2"
: "=r"(ret)
: "r"(value), "A"(*target)
: "memory");
return ret;
}
static ALWAYS_INLINE atomic_val_t atomic_min(atomic_t *target, atomic_val_t value)
{
atomic_val_t ret;
__asm__ volatile("amomin.w.aq %0, %1, %2"
: "=r"(ret)
: "r"(value), "A"(*target)
: "memory");
return ret;
}
static ALWAYS_INLINE unsigned long atomic_maxu(unsigned long *target, unsigned long value)
{
unsigned long ret;
__asm__ volatile("amomaxu.w.aq %0, %1, %2"
: "=r"(ret)
: "r"(value), "A"(*target)
: "memory");
return ret;
}
static ALWAYS_INLINE unsigned long atomic_minu(unsigned long *target, unsigned long value)
{
unsigned long ret;
__asm__ volatile("amominu.w.aq %0, %1, %2"
: "=r"(ret)
: "r"(value), "A"(*target)
: "memory");
return ret;
}
#endif /* CONFIG_64BIT */
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_ARCH_RISCV_ATOMIC_H_ */
``` | /content/code_sandbox/include/zephyr/arch/riscv/atomic.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 906 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_RISCV_RISCV_PRIVILEGED_ASM_INLINE_GCC_H_
#define ZEPHYR_INCLUDE_ARCH_RISCV_RISCV_PRIVILEGED_ASM_INLINE_GCC_H_
/*
* The file must not be included directly
* Include arch/cpu.h instead
* TEMPORARY
*/
#ifndef _ASMLANGUAGE
#include <zephyr/toolchain.h>
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_RISCV_RISCV_PRIVILEGED_ASM_INLINE_GCC_H_ */
``` | /content/code_sandbox/include/zephyr/arch/riscv/riscv-privileged/asm_inline_gcc.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 117 |
```objective-c
/*
* Contributors: 2018 Antmicro <www.antmicro.com>
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_RISCV_RISCV_PRIVILEGED_ASM_INLINE_H_
#define ZEPHYR_INCLUDE_ARCH_RISCV_RISCV_PRIVILEGED_ASM_INLINE_H_
/*
* The file must not be included directly
* Include arch/cpu.h instead
*/
#if defined(__GNUC__)
#include <zephyr/arch/riscv/riscv-privileged/asm_inline_gcc.h>
#else
#error "Supports only GNU C compiler"
#endif
#endif /* ZEPHYR_INCLUDE_ARCH_RISCV_RISCV_PRIVILEGED_ASM_INLINE_H_ */
``` | /content/code_sandbox/include/zephyr/arch/riscv/riscv-privileged/asm_inline.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 137 |
```objective-c
/*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_X86_MEMMAP_H_
#define ZEPHYR_INCLUDE_ARCH_X86_MEMMAP_H_
#ifndef _ASMLANGUAGE
/*
* The "source" of the memory map refers to where we got the data to fill in
* the map. Order is important: if multiple sources are available, then the
* numerically HIGHEST source wins, a manually-provided map being the "best".
*/
enum x86_memmap_source {
X86_MEMMAP_SOURCE_DEFAULT,
X86_MEMMAP_SOURCE_MULTIBOOT_MEM,
X86_MEMMAP_SOURCE_MULTIBOOT_MMAP,
X86_MEMMAP_SOURCE_MANUAL
};
extern enum x86_memmap_source x86_memmap_source;
/*
* For simplicity, we maintain a fixed-sized array of memory regions.
*
* We don't only track available RAM -- we track unavailable regions, too:
* sometimes we'll be given a map with overlapping regions. We have to be
* pessimistic about what is considered "available RAM" and it's easier to
* keep all the regions around than it is to correct incorrect maps. It's
* also handy to have the entire map intact for diagnostic purposes.
*/
enum x86_memmap_entry_type {
/*
* the UNUSED entry must have a numerical 0 value, so
* that partially-initialized arrays behave as expected.
*/
X86_MEMMAP_ENTRY_UNUSED, /* this entry is unused/invalid */
X86_MEMMAP_ENTRY_RAM, /* available RAM */
X86_MEMMAP_ENTRY_ACPI, /* reserved for ACPI */
X86_MEMMAP_ENTRY_NVS, /* preserve during hibernation */
X86_MEMMAP_ENTRY_DEFECTIVE, /* bad memory modules */
X86_MEMMAP_ENTRY_UNKNOWN /* unknown type, do not use */
};
struct x86_memmap_entry {
uint64_t base;
uint64_t length;
enum x86_memmap_entry_type type;
};
extern struct x86_memmap_entry x86_memmap[];
/*
* We keep track of kernel memory areas (text, data, etc.) in a table for
* ease of reference. There's really no reason to export this table, or to
* label the members, except for diagnostic purposes.
*/
struct x86_memmap_exclusion {
char *name;
void *start; /* address of first byte of exclusion */
void *end; /* one byte past end of exclusion */
};
extern struct x86_memmap_exclusion x86_memmap_exclusions[];
extern int x86_nr_memmap_exclusions;
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_X86_MEMMAP_H_ */
``` | /content/code_sandbox/include/zephyr/arch/x86/memmap.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 553 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_X86_MULTIBOOT_H_
#define ZEPHYR_INCLUDE_ARCH_X86_MULTIBOOT_H_
#ifndef _ASMLANGUAGE
#include <stdint.h>
/*
* Multiboot (version 1) boot information structure.
*
* Only fields/values of interest to Zephyr are enumerated: at
* present, that means only those pertaining to the framebuffer.
*/
struct multiboot_info {
uint32_t flags;
uint32_t mem_lower;
uint32_t mem_upper;
uint32_t unused0[8];
uint32_t mmap_length;
uint32_t mmap_addr;
uint32_t unused1[9];
uint32_t fb_addr_lo;
uint32_t fb_addr_hi;
uint32_t fb_pitch;
uint32_t fb_width;
uint32_t fb_height;
uint8_t fb_bpp;
uint8_t fb_type;
uint8_t fb_color_info[6];
};
extern struct multiboot_info multiboot_info;
#ifdef CONFIG_MULTIBOOT_INFO
void z_multiboot_init(struct multiboot_info *info_pa);
#else
inline void z_multiboot_init(struct multiboot_info *info_pa)
{
ARG_UNUSED(info_pa);
}
#endif /* CONFIG_MULTIBOOT_INFO */
/*
* the mmap_addr field points to a series of entries of the following form.
*/
struct multiboot_mmap {
uint32_t size;
uint64_t base;
uint64_t length;
uint32_t type;
} __packed;
#endif /* _ASMLANGUAGE */
/* Boot type value (see prep_c.c) */
#define MULTIBOOT_BOOT_TYPE 1
/*
* Possible values for multiboot_mmap.type field.
* Other values should be assumed to be unusable ranges.
*/
#define MULTIBOOT_MMAP_RAM 1 /* available RAM */
#define MULTIBOOT_MMAP_ACPI 3 /* reserved for ACPI */
#define MULTIBOOT_MMAP_NVS 4 /* ACPI non-volatile */
#define MULTIBOOT_MMAP_DEFECTIVE 5 /* defective RAM module */
/*
* Magic numbers: the kernel multiboot header (see crt0.S) begins with
* MULTIBOOT_HEADER_MAGIC to signal to the booter that it supports
* multiboot. On kernel entry, EAX is set to MULTIBOOT_EAX_MAGIC to
* signal that the boot loader is multiboot compliant.
*/
#define MULTIBOOT_HEADER_MAGIC 0x1BADB002
#define MULTIBOOT_EAX_MAGIC 0x2BADB002
/*
* Typically, we put no flags in the multiboot header, as it exists solely
* to reassure the loader that we're a valid binary. The exception to this
* is when we want the loader to configure the framebuffer for us.
*/
#define MULTIBOOT_HEADER_FLAG_MEM BIT(1) /* want mem_/mmap_* info */
#define MULTIBOOT_HEADER_FLAG_FB BIT(2) /* want fb_* info */
#ifdef CONFIG_INTEL_MULTIBOOTFB_DISPLAY
#define MULTIBOOT_HEADER_FLAGS \
(MULTIBOOT_HEADER_FLAG_FB | MULTIBOOT_HEADER_FLAG_MEM)
#else
#define MULTIBOOT_HEADER_FLAGS MULTIBOOT_HEADER_FLAG_MEM
#endif
/* The flags in the boot info structure tell us which fields are valid. */
#define MULTIBOOT_INFO_FLAGS_MEM (1 << 0) /* mem_* valid */
#define MULTIBOOT_INFO_FLAGS_MMAP (1 << 6) /* mmap_* valid */
#define MULTIBOOT_INFO_FLAGS_FB (1 << 12) /* fb_* valid */
/* The only fb_type we support is RGB. No text modes and no color palettes. */
#define MULTIBOOT_INFO_FB_TYPE_RGB 1
#endif /* ZEPHYR_INCLUDE_ARCH_X86_MULTIBOOT_H_ */
``` | /content/code_sandbox/include/zephyr/arch/x86/multiboot.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 799 |
```objective-c
/*
*
*/
/**
* @brief Encode interrupt flag for x86 architecture.
*
* @param polarity the interrupt polarity received from ACPICA lib
* @param trigger the interrupt level received from ACPICA lib
* @return return encoded interrupt flag
*/
uint32_t arch_acpi_encode_irq_flags(uint8_t polarity, uint8_t trigger);
``` | /content/code_sandbox/include/zephyr/arch/x86/x86_acpi.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 73 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_ARCH_X86_INCLUDE_EFI_H_
#define ZEPHYR_ARCH_X86_INCLUDE_EFI_H_
/* Boot type value (see prep_c.c) */
#define EFI_BOOT_TYPE 2
#ifndef _ASMLANGUAGE
struct efi_boot_arg {
void *efi_systab; /* EFI system table */
unsigned long long efi_cr3; /* EFI page table */
void *acpi_rsdp;
};
#if defined(CONFIG_X86_EFI)
/** @brief Initialize usage of EFI gathered information
*
* @param efi_arg The given pointer to EFI prepared boot argument
*/
void efi_init(struct efi_boot_arg *efi_arg);
/** @brief Get the ACPI RSDP table pointer from EFI boot argument
*
* @return A valid pointer to ACPI RSDP table or NULL otherwise.
*/
void *efi_get_acpi_rsdp(void);
#else /* CONFIG_X86_EFI */
#define efi_init(...)
#define efi_get_acpi_rsdp(...) NULL
#endif /* CONFIG_X86_EFI */
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_ARCH_X86_INCLUDE_EFI_H_ */
``` | /content/code_sandbox/include/zephyr/arch/x86/efi.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 243 |
```objective-c
/*
*
*/
#include <zephyr/arch/x86/efi.h>
#include <zephyr/arch/x86/legacy_bios.h>
#ifndef ZEPHYR_ARCH_X86_INCLUDE_X86_ACPI_H_
#define ZEPHYR_ARCH_X86_INCLUDE_X86_ACPI_H_
#if defined(CONFIG_X86_EFI)
static inline void *acpi_rsdp_get(void)
{
void *rsdp = efi_get_acpi_rsdp();
if (!rsdp) {
rsdp = bios_acpi_rsdp_get();
}
return rsdp;
}
#else
static inline void *acpi_rsdp_get(void)
{
return bios_acpi_rsdp_get();
}
#endif /* CONFIG_X86_EFI */
static inline uint64_t acpi_timer_get(void)
{
return z_tsc_read();
}
#endif /* ZEPHYR_ARCH_X86_INCLUDE_X86_ACPI_H_ */
``` | /content/code_sandbox/include/zephyr/arch/x86/x86_acpi_osal.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 185 |
```objective-c
/*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_X86_CPUID_H_
#define ZEPHYR_INCLUDE_ARCH_X86_CPUID_H_
#ifndef _ASMLANGUAGE
#ifdef __cplusplus
extern "C" {
#endif
#define CPUID_BASIC_INFO_1 0x01
#define CPUID_EXTENDED_FEATURES_LVL 0x07
#define CPUID_EXTENDED_TOPOLOGY_ENUMERATION 0x0B
#define CPUID_EXTENDED_TOPOLOGY_ENUMERATION_V2 0x1F
/* Bits to check in CPUID extended features */
#define CPUID_SPEC_CTRL_SSBD BIT(31)
#define CPUID_SPEC_CTRL_IBRS BIT(26)
uint32_t z_x86_cpuid_extended_features(void);
uint8_t z_x86_cpuid_get_current_physical_apic_id(void);
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_X86_CPUID_H_ */
``` | /content/code_sandbox/include/zephyr/arch/x86/cpuid.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 195 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_ARCH_X86_INCLUDE_LEGACY_BIOS_H_
#define ZEPHYR_ARCH_X86_INCLUDE_LEGACY_BIOS_H_
void *bios_acpi_rsdp_get(void);
#endif /* ZEPHYR_ARCH_X86_INCLUDE_LEGACY_BIOS_H_ */
``` | /content/code_sandbox/include/zephyr/arch/x86/legacy_bios.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 61 |
```linker script
/*
*
*/
/**
* @file
* @brief Linker command/script file
*
* Generic Linker script for the riscv platform
*/
#include <zephyr/devicetree.h>
#include <zephyr/linker/sections.h>
#include <zephyr/linker/devicetree_regions.h>
#include <zephyr/linker/linker-defs.h>
#include <zephyr/linker/linker-tool.h>
#ifdef CONFIG_XIP
#define ROMABLE_REGION ROM
#else
#define ROMABLE_REGION RAM
#endif
#define RAMABLE_REGION RAM
#define _EXCEPTION_SECTION_NAME exceptions
#define _RESET_SECTION_NAME reset
#if defined(CONFIG_ROM_END_OFFSET)
#define ROM_END_OFFSET CONFIG_ROM_END_OFFSET
#else
#define ROM_END_OFFSET 0
#endif
#if defined(CONFIG_FLASH_LOAD_OFFSET)
#define FLASH_LOAD_OFFSET CONFIG_FLASH_LOAD_OFFSET
#else
#define FLASH_LOAD_OFFSET 0
#endif
#ifdef CONFIG_XIP
#if CONFIG_FLASH_LOAD_SIZE > 0
#define ROM_SIZE (CONFIG_FLASH_LOAD_SIZE - ROM_END_OFFSET)
#endif
#if DT_NODE_HAS_COMPAT_STATUS(DT_CHOSEN(zephyr_flash), soc_nv_flash, okay)
#define ROM_BASE (DT_REG_ADDR(DT_CHOSEN(zephyr_flash)) + FLASH_LOAD_OFFSET)
#ifndef ROM_SIZE
#define ROM_SIZE (DT_REG_SIZE(DT_CHOSEN(zephyr_flash)) - ROM_END_OFFSET)
#endif
#elif DT_NODE_HAS_COMPAT(DT_CHOSEN(zephyr_flash), jedec_spi_nor)
/* For jedec,spi-nor we expect the spi controller to memory map the flash
* and for that mapping to be on the register with the name flash_mmap and if a register with that
* name doesn't exists, we expect it to be in the second register property of the spi controller.
*/
#define SPI_CTRL DT_PARENT(DT_CHOSEN(zephyr_flash))
#define FLASH_MMAP_NAME flash_mmap
#define ROM_BASE \
(DT_REG_ADDR_BY_NAME_OR(SPI_CTRL, FLASH_MMAP_NAME, DT_REG_ADDR_BY_IDX(SPI_CTRL, 1)) + \
FLASH_LOAD_OFFSET)
#ifndef ROM_SIZE
#define ROM_SIZE \
(DT_REG_SIZE_BY_NAME_OR(SPI_CTRL, FLASH_MMAP_NAME, DT_REG_SIZE_BY_IDX(SPI_CTRL, 1)) - \
ROM_END_OFFSET)
#endif
#else /* Use Kconfig to cover the remaining cases */
#define ROM_BASE (CONFIG_FLASH_BASE_ADDRESS + FLASH_LOAD_OFFSET)
#ifndef ROM_SIZE
#define ROM_SIZE (CONFIG_FLASH_SIZE * 1024 - FLASH_LOAD_OFFSET - ROM_END_OFFSET)
#endif
#endif /* DT_NODE_HAS_COMPAT_STATUS */
#else /* CONFIG_XIP */
#define ROM_BASE CONFIG_SRAM_BASE_ADDRESS
#define ROM_SIZE (KB(CONFIG_SRAM_SIZE) - ROM_END_OFFSET)
#endif /* CONFIG_XIP */
#define RAM_BASE CONFIG_SRAM_BASE_ADDRESS
#define RAM_SIZE KB(CONFIG_SRAM_SIZE)
#ifdef CONFIG_RISCV_PMP
#define MPU_MIN_SIZE CONFIG_PMP_GRANULARITY
#define MPU_MIN_SIZE_ALIGN . = ALIGN(MPU_MIN_SIZE);
#if defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT)
#define MPU_ALIGN(region_size) \
. = ALIGN(MPU_MIN_SIZE); \
. = ALIGN( 1 << LOG2CEIL(region_size))
#else
#define MPU_ALIGN(region_size) \
. = ALIGN(MPU_MIN_SIZE)
#endif
#else
#define MPU_MIN_SIZE_ALIGN
#define MPU_ALIGN(region_size) . = ALIGN(4)
#endif
#include <zephyr/linker/linker-devnull.h>
MEMORY
{
#ifdef CONFIG_XIP
ROM (rx) : ORIGIN = ROM_BASE, LENGTH = ROM_SIZE
#endif
RAM (rwx) : ORIGIN = RAM_BASE, LENGTH = RAM_SIZE
#if defined(CONFIG_LINKER_DEVNULL_MEMORY)
DEVNULL_ROM (rx) : ORIGIN = DEVNULL_ADDR, LENGTH = DEVNULL_SIZE
#endif
LINKER_DT_REGIONS()
/* Used by and documented in include/linker/intlist.ld */
IDT_LIST (wx) : ORIGIN = 0xFFFFF7FF, LENGTH = 2K
}
ENTRY(CONFIG_KERNEL_ENTRY)
SECTIONS
{
#include <zephyr/linker/rel-sections.ld>
#ifdef CONFIG_LLEXT
#include <zephyr/linker/llext-sections.ld>
#endif
/*
* The .plt and .iplt are here according to
* 'riscv32-zephyr-elf-ld --verbose', before text section.
*/
SECTION_PROLOGUE(.plt,,)
{
*(.plt)
}
SECTION_PROLOGUE(.iplt,,)
{
*(.iplt)
}
GROUP_START(ROMABLE_REGION)
__rom_region_start = ROM_BASE;
SECTION_PROLOGUE(rom_start,,)
{
. = ALIGN(16);
/* Located in generated directory. This file is populated by calling
* zephyr_linker_sources(ROM_START ...).
*/
#include <snippets-rom-start.ld>
} GROUP_LINK_IN(ROMABLE_REGION)
#ifdef CONFIG_CODE_DATA_RELOCATION
#include <linker_relocate.ld>
#endif
SECTION_PROLOGUE(_RESET_SECTION_NAME,,)
{
KEEP(*(.reset.*))
} GROUP_LINK_IN(ROMABLE_REGION)
SECTION_PROLOGUE(_EXCEPTION_SECTION_NAME,,)
{
KEEP(*(".exception.entry.*"))
*(".exception.other.*")
} GROUP_LINK_IN(ROMABLE_REGION)
SECTION_PROLOGUE(_TEXT_SECTION_NAME,,)
{
. = ALIGN(4);
KEEP(*(.openocd_debug))
KEEP(*(".openocd_debug.*"))
__text_region_start = .;
*(.text)
*(".text.*")
*(.gnu.linkonce.t.*)
#include <zephyr/linker/kobject-text.ld>
} GROUP_LINK_IN(ROMABLE_REGION)
__text_region_end = .;
__rodata_region_start = .;
#include <zephyr/linker/common-rom.ld>
/* Located in generated directory. This file is populated by calling
* zephyr_linker_sources(ROM_SECTIONS ...). Useful for grouping iterable RO structs.
*/
#include <snippets-rom-sections.ld>
#include <zephyr/linker/thread-local-storage.ld>
SECTION_PROLOGUE(_RODATA_SECTION_NAME,,)
{
. = ALIGN(4);
*(.srodata)
*(".srodata.*")
*(.rodata)
*(".rodata.*")
*(.gnu.linkonce.r.*)
*(.sdata2 .sdata2.* .gnu.linkonce.s2.*)
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-rodata.ld>
#include <zephyr/linker/kobject-rom.ld>
. = ALIGN(4);
} GROUP_LINK_IN(ROMABLE_REGION)
#include <zephyr/linker/cplusplus-rom.ld>
__rodata_region_end = .;
/* For non-XIP system, __rom_region_end symbol should be set to
* the end of common ROMABLE_REGIONs (text and rodata) instead of
* the linker script end, so it wouldn't mistakenly contain
* RAMABLE_REGION in it.
*/
#ifndef CONFIG_XIP
#ifdef CONFIG_RISCV_PMP
SECTION_PROLOGUE(rom_mpu_padding,,)
{
MPU_ALIGN(__rodata_region_end - __rom_region_start);
#ifdef CONFIG_QEMU_TARGET
/*
* QEMU doesn't vet each instruction fetch individually.
* Instead, it grabs a whole page and perform dynamic
* translation on it in a batch. It therefore validates
* PMP permissions using page-sized and -aligned chunks.
*/
. = ALIGN(0x1000);
#endif
} GROUP_LINK_IN(ROMABLE_REGION)
#endif /* CONFIG_RISCV_PMP */
__rom_region_end = .;
__rom_region_size = __rom_region_end - __rom_region_start;
#endif /* CONFIG_XIP */
GROUP_END(ROMABLE_REGION)
GROUP_START(RAMABLE_REGION)
. = RAM_BASE;
_image_ram_start = .;
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-ram-sections.ld>
#if defined(CONFIG_USERSPACE)
#define APP_SHARED_ALIGN MPU_MIN_SIZE_ALIGN
#define SMEM_PARTITION_ALIGN MPU_ALIGN
#include <app_smem.ld>
_app_smem_size = _app_smem_end - _app_smem_start;
_app_smem_rom_start = LOADADDR(_APP_SMEM_SECTION_NAME);
#endif /* CONFIG_USERSPACE */
SECTION_DATA_PROLOGUE(_BSS_SECTION_NAME,(NOLOAD),)
{
MPU_MIN_SIZE_ALIGN
/*
* For performance, BSS section is assumed to be 4 byte aligned and
* a multiple of 4 bytes
*/
. = ALIGN(4);
__bss_start = .;
__kernel_ram_start = .;
*(.sbss)
*(".sbss.*")
*(.bss)
*(".bss.*")
COMMON_SYMBOLS
#ifdef CONFIG_CODE_DATA_RELOCATION
#include <linker_sram_bss_relocate.ld>
#endif
/*
* As memory is cleared in words only, it is simpler to ensure the BSS
* section ends on a 4 byte boundary. This wastes a maximum of 3 bytes.
*/
__bss_end = ALIGN(4);
} GROUP_DATA_LINK_IN(RAMABLE_REGION, RAMABLE_REGION)
#include <zephyr/linker/common-noinit.ld>
SECTION_DATA_PROLOGUE(_DATA_SECTION_NAME,,)
{
. = ALIGN(4);
/* _image_ram_start = .; */
__data_region_start = .;
__data_start = .;
*(.data)
*(".data.*")
#ifdef CONFIG_RISCV_GP
/*
* RISC-V architecture has 12-bit signed immediate offsets in the
* instructions. If we can put the most commonly accessed globals
* in a special 4K span of memory addressed by the GP register, then
* we can access those values in a single instruction, saving both
* codespace and runtime.
*
* Since these immediate offsets are signed, place gp 0x800 past the
* beginning of .sdata so that we can use both positive and negative
* offsets.
*/
. = ALIGN(8);
PROVIDE (__global_pointer$ = . + 0x800);
#endif
*(.sdata .sdata.* .gnu.linkonce.s.*)
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-rwdata.ld>
#ifdef CONFIG_CODE_DATA_RELOCATION
#include <linker_sram_data_relocate.ld>
#endif
__data_end = .;
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
__data_size = __data_end - __data_start;
__data_load_start = LOADADDR(_DATA_SECTION_NAME);
__data_region_load_start = LOADADDR(_DATA_SECTION_NAME);
#include <zephyr/linker/common-ram.ld>
#include <zephyr/linker/kobject-data.ld>
#include <zephyr/linker/cplusplus-ram.ld>
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-data-sections.ld>
__data_region_end = .;
__kernel_ram_end = .;
__kernel_ram_size = __kernel_ram_end - __kernel_ram_start;
#if DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_itcm), okay)
GROUP_START(ITCM)
SECTION_PROLOGUE(_ITCM_SECTION_NAME,,SUBALIGN(8))
{
__itcm_start = .;
*(.itcm)
*(".itcm.*")
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function. */
#include <snippets-itcm-section.ld>
__itcm_end = .;
} GROUP_LINK_IN(ITCM AT> ROMABLE_REGION)
__itcm_size = __itcm_end - __itcm_start;
__itcm_load_start = LOADADDR(_ITCM_SECTION_NAME);
GROUP_END(ITCM)
#endif
#if DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_dtcm), okay)
GROUP_START(DTCM)
SECTION_PROLOGUE(_DTCM_BSS_SECTION_NAME, (NOLOAD),SUBALIGN(8))
{
__dtcm_start = .;
__dtcm_bss_start = .;
*(.dtcm_bss)
*(".dtcm_bss.*")
__dtcm_bss_end = .;
} GROUP_LINK_IN(DTCM)
SECTION_PROLOGUE(_DTCM_NOINIT_SECTION_NAME, (NOLOAD),SUBALIGN(8))
{
__dtcm_noinit_start = .;
*(.dtcm_noinit)
*(".dtcm_noinit.*")
__dtcm_noinit_end = .;
} GROUP_LINK_IN(DTCM)
SECTION_PROLOGUE(_DTCM_DATA_SECTION_NAME,,SUBALIGN(8))
{
__dtcm_data_start = .;
*(.dtcm_data)
*(".dtcm_data.*")
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function. */
#include <snippets-dtcm-section.ld>
__dtcm_data_end = .;
} GROUP_LINK_IN(DTCM AT> ROMABLE_REGION)
__dtcm_end = .;
__dtcm_data_load_start = LOADADDR(_DTCM_DATA_SECTION_NAME);
GROUP_END(DTCM)
#endif
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-sections.ld>
#define LAST_RAM_ALIGN MPU_MIN_SIZE_ALIGN
#include <zephyr/linker/ram-end.ld>
GROUP_END(RAMABLE_REGION)
#include <zephyr/linker/debug-sections.ld>
/DISCARD/ : { *(.note.GNU-stack) }
SECTION_PROLOGUE(.riscv.attributes, 0,)
{
KEEP(*(.riscv.attributes))
KEEP(*(.gnu.attributes))
}
/* Output section descriptions are needed for these sections to suppress
* warnings when "--orphan-handling=warn" is set for lld.
*/
#if defined(CONFIG_LLVM_USE_LLD)
SECTION_PROLOGUE(.symtab, 0,) { *(.symtab) }
SECTION_PROLOGUE(.strtab, 0,) { *(.strtab) }
SECTION_PROLOGUE(.shstrtab, 0,) { *(.shstrtab) }
#endif
/* Sections generated from 'zephyr,memory-region' nodes */
LINKER_DT_SECTIONS()
/* Because ROMABLE_REGION != RAMABLE_REGION in XIP-system, it is valid
* to set __rom_region_end symbol at the end of linker script and
* doesn't mistakenly contain the RAMABLE_REGION in it.
*/
#ifdef CONFIG_XIP
/* Must be last in romable region */
SECTION_PROLOGUE(.last_section,,)
{
#ifdef CONFIG_LINKER_LAST_SECTION_ID
/* Fill last section with a word to ensure location counter and actual rom
* region data usage match. */
LONG(CONFIG_LINKER_LAST_SECTION_ID_PATTERN)
/* __rom_region_size is used when configuring the PMP entry of the ROM region.
* Addresses (pmpaddr) in PMP registers need to be aligned to 4. Align
* __rom_region_size to 4 to meet that requirement. */
MPU_MIN_SIZE_ALIGN
#endif
} GROUP_LINK_IN(ROMABLE_REGION)
/* To provide the image size as a const expression,
* calculate this value here. */
__rom_region_end = LOADADDR(.last_section) + SIZEOF(.last_section);
__rom_region_size = __rom_region_end - __rom_region_start;
#endif
}
``` | /content/code_sandbox/include/zephyr/arch/riscv/common/linker.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,507 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_X86_ARCH_INLINES_H_
#define ZEPHYR_INCLUDE_ARCH_X86_ARCH_INLINES_H_
#ifndef _ASMLANGUAGE
#include <zephyr/arch/x86/x86_acpi.h>
#if defined(CONFIG_X86_64)
#include <zephyr/arch/x86/intel64/thread.h>
#include <zephyr/kernel_structs.h>
static inline struct _cpu *arch_curr_cpu(void)
{
struct _cpu *cpu;
__asm__ volatile("movq %%gs:(%c1), %0"
: "=r" (cpu)
: "i" (offsetof(x86_tss64_t, cpu)));
return cpu;
}
static ALWAYS_INLINE uint32_t arch_proc_id(void)
{
/*
* Placeholder implementation to be replaced with an architecture
* specific call to get processor ID
*/
return arch_curr_cpu()->id;
}
#endif /* CONFIG_X86_64 */
static ALWAYS_INLINE unsigned int arch_num_cpus(void)
{
return CONFIG_MP_MAX_NUM_CPUS;
}
#endif /* !_ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_X86_ARCH_INLINES_H_ */
``` | /content/code_sandbox/include/zephyr/arch/x86/arch_inlines.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 246 |
```objective-c
/*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_X86_MSR_H_
#define ZEPHYR_INCLUDE_ARCH_X86_MSR_H_
/*
* Model specific registers (MSR). Access with z_x86_msr_read/write().
*/
#define X86_TIME_STAMP_COUNTER_MSR 0x00000010
#define X86_SPEC_CTRL_MSR 0x00000048
#define X86_SPEC_CTRL_MSR_IBRS BIT(0)
#define X86_SPEC_CTRL_MSR_SSBD BIT(2)
#define X86_APIC_BASE_MSR 0x0000001b
#define X86_APIC_BASE_MSR_X2APIC BIT(10)
#define X86_MTRR_DEF_TYPE_MSR 0x000002ff
#define X86_MTRR_DEF_TYPE_MSR_ENABLE BIT(11)
#define X86_X2APIC_BASE_MSR 0x00000800 /* .. thru 0x00000BFF */
#define X86_EFER_MSR 0xC0000080U
#define X86_EFER_MSR_SCE BIT(0)
#define X86_EFER_MSR_LME BIT(8)
#define X86_EFER_MSR_NXE BIT(11)
/* STAR 31:0 Unused in long mode
* 47:32 Kernel CS (SS = CS+8)
* 63:48 User CS (SS = CS+8)
*/
#define X86_STAR_MSR 0xC0000081U
/* Location for system call entry point */
#define X86_LSTAR_MSR 0xC0000082U
/* Low 32 bits in this MSR are the SYSCALL mask applied to EFLAGS */
#define X86_FMASK_MSR 0xC0000084U
#define X86_FS_BASE 0xC0000100U
#define X86_GS_BASE 0xC0000101U
#define X86_KERNEL_GS_BASE 0xC0000102U
#ifndef _ASMLANGUAGE
#ifdef __cplusplus
extern "C" {
#endif
/*
* z_x86_msr_write() is shared between 32- and 64-bit implementations, but
* due to ABI differences with long return values, z_x86_msr_read() is not.
*/
static inline void z_x86_msr_write(unsigned int msr, uint64_t data)
{
uint32_t high = data >> 32;
uint32_t low = data & 0xFFFFFFFFU;
__asm__ volatile ("wrmsr" : : "c"(msr), "a"(low), "d"(high));
}
#ifdef CONFIG_X86_64
static inline uint64_t z_x86_msr_read(unsigned int msr)
{
union {
struct {
uint32_t lo;
uint32_t hi;
};
uint64_t value;
} rv;
__asm__ volatile ("rdmsr" : "=a" (rv.lo), "=d" (rv.hi) : "c" (msr));
return rv.value;
}
#else
static inline uint64_t z_x86_msr_read(unsigned int msr)
{
uint64_t ret;
__asm__ volatile("rdmsr" : "=A" (ret) : "c" (msr));
return ret;
}
#endif
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_X86_MSR_H_ */
``` | /content/code_sandbox/include/zephyr/arch/x86/msr.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 733 |
```objective-c
/*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_X86_ARCH_H_
#define ZEPHYR_INCLUDE_ARCH_X86_ARCH_H_
#include <zephyr/devicetree.h>
/* Changing this value will require manual changes to exception and IDT setup
* in locore.S for intel64
*/
#define Z_X86_OOPS_VECTOR 32
#if !defined(_ASMLANGUAGE)
#include <zephyr/sys/sys_io.h>
#include <zephyr/types.h>
#include <stddef.h>
#include <stdbool.h>
#include <zephyr/irq.h>
#include <zephyr/arch/x86/mmustructs.h>
#include <zephyr/arch/x86/thread_stack.h>
#include <zephyr/linker/sections.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifdef CONFIG_PCIE_MSI
struct x86_msi_vector {
unsigned int irq;
uint8_t vector;
#ifdef CONFIG_INTEL_VTD_ICTL
bool remap;
uint8_t irte;
#endif
};
typedef struct x86_msi_vector arch_msi_vector_t;
#endif /* CONFIG_PCIE_MSI */
static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
{
if ((key & 0x00000200U) != 0U) { /* 'IF' bit */
__asm__ volatile ("sti" ::: "memory");
}
}
static ALWAYS_INLINE void sys_out8(uint8_t data, io_port_t port)
{
__asm__ volatile("outb %b0, %w1" :: "a"(data), "Nd"(port));
}
static ALWAYS_INLINE uint8_t sys_in8(io_port_t port)
{
uint8_t ret;
__asm__ volatile("inb %w1, %b0" : "=a"(ret) : "Nd"(port));
return ret;
}
static ALWAYS_INLINE void sys_out16(uint16_t data, io_port_t port)
{
__asm__ volatile("outw %w0, %w1" :: "a"(data), "Nd"(port));
}
static ALWAYS_INLINE uint16_t sys_in16(io_port_t port)
{
uint16_t ret;
__asm__ volatile("inw %w1, %w0" : "=a"(ret) : "Nd"(port));
return ret;
}
static ALWAYS_INLINE void sys_out32(uint32_t data, io_port_t port)
{
__asm__ volatile("outl %0, %w1" :: "a"(data), "Nd"(port));
}
static ALWAYS_INLINE uint32_t sys_in32(io_port_t port)
{
uint32_t ret;
__asm__ volatile("inl %w1, %0" : "=a"(ret) : "Nd"(port));
return ret;
}
static ALWAYS_INLINE void sys_write8(uint8_t data, mm_reg_t addr)
{
__asm__ volatile("movb %0, %1"
:
: "q"(data), "m" (*(volatile uint8_t *)(uintptr_t) addr)
: "memory");
}
static ALWAYS_INLINE uint8_t sys_read8(mm_reg_t addr)
{
uint8_t ret;
__asm__ volatile("movb %1, %0"
: "=q"(ret)
: "m" (*(volatile uint8_t *)(uintptr_t) addr)
: "memory");
return ret;
}
static ALWAYS_INLINE void sys_write16(uint16_t data, mm_reg_t addr)
{
__asm__ volatile("movw %0, %1"
:
: "r"(data), "m" (*(volatile uint16_t *)(uintptr_t) addr)
: "memory");
}
static ALWAYS_INLINE uint16_t sys_read16(mm_reg_t addr)
{
uint16_t ret;
__asm__ volatile("movw %1, %0"
: "=r"(ret)
: "m" (*(volatile uint16_t *)(uintptr_t) addr)
: "memory");
return ret;
}
static ALWAYS_INLINE void sys_write32(uint32_t data, mm_reg_t addr)
{
__asm__ volatile("movl %0, %1"
:
: "r"(data), "m" (*(volatile uint32_t *)(uintptr_t) addr)
: "memory");
}
static ALWAYS_INLINE uint32_t sys_read32(mm_reg_t addr)
{
uint32_t ret;
__asm__ volatile("movl %1, %0"
: "=r"(ret)
: "m" (*(volatile uint32_t *)(uintptr_t) addr)
: "memory");
return ret;
}
static ALWAYS_INLINE void sys_set_bit(mem_addr_t addr, unsigned int bit)
{
__asm__ volatile("btsl %1, %0"
: "+m" (*(volatile uint8_t *) (addr))
: "Ir" (bit)
: "memory");
}
static ALWAYS_INLINE void sys_clear_bit(mem_addr_t addr, unsigned int bit)
{
__asm__ volatile("btrl %1, %0"
: "+m" (*(volatile uint8_t *) (addr))
: "Ir" (bit));
}
static ALWAYS_INLINE int sys_test_bit(mem_addr_t addr, unsigned int bit)
{
int ret;
__asm__ volatile("btl %2, %1;"
"sbb %0, %0"
: "=r" (ret), "+m" (*(volatile uint8_t *) (addr))
: "Ir" (bit));
return ret;
}
static ALWAYS_INLINE int sys_test_and_set_bit(mem_addr_t addr,
unsigned int bit)
{
int ret;
__asm__ volatile("btsl %2, %1;"
"sbb %0, %0"
: "=r" (ret), "+m" (*(volatile uint8_t *) (addr))
: "Ir" (bit));
return ret;
}
static ALWAYS_INLINE int sys_test_and_clear_bit(mem_addr_t addr,
unsigned int bit)
{
int ret;
__asm__ volatile("btrl %2, %1;"
"sbb %0, %0"
: "=r" (ret), "+m" (*(volatile uint8_t *) (addr))
: "Ir" (bit));
return ret;
}
#define sys_bitfield_set_bit sys_set_bit
#define sys_bitfield_clear_bit sys_clear_bit
#define sys_bitfield_test_bit sys_test_bit
#define sys_bitfield_test_and_set_bit sys_test_and_set_bit
#define sys_bitfield_test_and_clear_bit sys_test_and_clear_bit
/*
* Map of IRQ numbers to their assigned vectors. On IA32, this is generated
* at build time and defined via the linker script. On Intel64, it's an array.
*/
extern unsigned char _irq_to_interrupt_vector[CONFIG_MAX_IRQ_LINES];
#define Z_IRQ_TO_INTERRUPT_VECTOR(irq) \
((unsigned int) _irq_to_interrupt_vector[(irq)])
#endif /* _ASMLANGUAGE */
#ifdef __cplusplus
}
#endif
#include <zephyr/drivers/interrupt_controller/sysapic.h>
#ifdef CONFIG_X86_64
#include <zephyr/arch/x86/intel64/arch.h>
#else
#include <zephyr/arch/x86/ia32/arch.h>
#endif
#include <zephyr/arch/common/ffs.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifndef _ASMLANGUAGE
void arch_irq_enable(unsigned int irq);
void arch_irq_disable(unsigned int irq);
uint32_t sys_clock_cycle_get_32(void);
__pinned_func
static inline uint32_t arch_k_cycle_get_32(void)
{
return sys_clock_cycle_get_32();
}
uint64_t sys_clock_cycle_get_64(void);
__pinned_func
static inline uint64_t arch_k_cycle_get_64(void)
{
return sys_clock_cycle_get_64();
}
static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
{
return (key & 0x200) != 0;
}
/**
* @brief read timestamp register, 32-bits only, unserialized
*/
static ALWAYS_INLINE uint32_t z_do_read_cpu_timestamp32(void)
{
uint32_t rv;
__asm__ volatile("rdtsc" : "=a" (rv) : : "%edx");
return rv;
}
/**
* @brief read timestamp register ensuring serialization
*/
__pinned_func
static inline uint64_t z_tsc_read(void)
{
union {
struct {
uint32_t lo;
uint32_t hi;
};
uint64_t value;
} rv;
#ifdef CONFIG_X86_64
/*
* According to Intel 64 and IA-32 Architectures Software
* Developers Manual, volume 3, chapter 8.2.5, LFENCE provides
* a more efficient method of controlling memory ordering than
* the CPUID instruction. So use LFENCE here, as all 64-bit
* CPUs have LFENCE.
*/
__asm__ volatile ("lfence");
#else
/* rdtsc & cpuid clobbers eax, ebx, ecx and edx registers */
__asm__ volatile (/* serialize */
"xorl %%eax,%%eax;"
"cpuid"
:
:
: "%eax", "%ebx", "%ecx", "%edx"
);
#endif
#ifdef CONFIG_X86_64
/*
* We cannot use "=A", since this would use %rax on x86_64 and
* return only the lower 32bits of the TSC
*/
__asm__ volatile ("rdtsc" : "=a" (rv.lo), "=d" (rv.hi));
#else
/* "=A" means that value is in eax:edx pair. */
__asm__ volatile ("rdtsc" : "=A" (rv.value));
#endif
return rv.value;
}
static ALWAYS_INLINE void arch_nop(void)
{
__asm__ volatile("nop");
}
#endif /* _ASMLANGUAGE */
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_ARCH_X86_ARCH_H_ */
``` | /content/code_sandbox/include/zephyr/arch/x86/arch.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,113 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_X86_MMU_H
#define ZEPHYR_INCLUDE_ARCH_X86_MMU_H
#include <zephyr/sys/util.h>
/*
* K_MEM_PARTITION_* defines
*
* Slated for removal when virtual memory is implemented, memory
* mapping APIs will replace memory domains.
*/
#define Z_X86_MMU_RW BIT64(1) /** Read-Write */
#define Z_X86_MMU_US BIT64(2) /** User-Supervisor */
#if defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
#define Z_X86_MMU_XD BIT64(63) /** Execute Disable */
#else
#define Z_X86_MMU_XD 0
#endif
/* For these we'll just use the same bits in the PTE */
#define ARCH_DATA_PAGE_DIRTY ((uintptr_t)BIT(6))
#define ARCH_DATA_PAGE_LOADED ((uintptr_t)BIT(0))
#define ARCH_DATA_PAGE_ACCESSED ((uintptr_t)BIT(5))
/* Use an PAT bit for this one since it's never set in a mapped PTE */
#define ARCH_DATA_PAGE_NOT_MAPPED ((uintptr_t)BIT(7))
/* Always true with 32-bit page tables, don't enable
* CONFIG_EXECUTE_XOR_WRITE and expect it to work for you
*/
#define K_MEM_PARTITION_IS_EXECUTABLE(attr) (((attr) & Z_X86_MMU_XD) == 0)
#define K_MEM_PARTITION_IS_WRITABLE(attr) (((attr) & Z_X86_MMU_RW) != 0)
/* memory partition arch/soc independent attribute */
#define K_MEM_PARTITION_P_RW_U_RW (Z_X86_MMU_RW | Z_X86_MMU_US | \
Z_X86_MMU_XD)
#define K_MEM_PARTITION_P_RW_U_NA (Z_X86_MMU_RW | Z_X86_MMU_XD)
#define K_MEM_PARTITION_P_RO_U_RO (Z_X86_MMU_US | Z_X86_MMU_XD)
#define K_MEM_PARTITION_P_RO_U_NA Z_X86_MMU_XD
/* Execution-allowed attributes */
#define K_MEM_PARTITION_P_RWX_U_RWX (Z_X86_MMU_RW | Z_X86_MMU_US)
#define K_MEM_PARTITION_P_RWX_U_NA Z_X86_MMU_RW
#define K_MEM_PARTITION_P_RX_U_RX Z_X86_MMU_US
#define K_MEM_PARTITION_P_RX_U_NA (0)
/* memory partition access permission mask */
#define K_MEM_PARTITION_PERM_MASK (Z_X86_MMU_RW | Z_X86_MMU_US | \
Z_X86_MMU_XD)
#ifndef _ASMLANGUAGE
#include <zephyr/sys/slist.h>
/* Page table entry data type at all levels. Defined here due to
* k_mem_partition_attr_t, eventually move to private x86_mmu.h
*/
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
typedef uint64_t pentry_t;
#else
typedef uint32_t pentry_t;
#endif
typedef pentry_t k_mem_partition_attr_t;
struct arch_mem_domain {
#ifdef CONFIG_X86_PAE
/* 4-entry, 32-byte top-level PDPT */
pentry_t pdpt[4];
#endif
/* Pointer to top-level structure, either a PML4, PDPT, PD */
pentry_t *ptables;
/* Linked list of all active memory domains */
sys_snode_t node;
#ifdef CONFIG_X86_PAE
} __aligned(32);
#else
};
#endif /* CONFIG_X86_PAE */
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_X86_MMU_H */
``` | /content/code_sandbox/include/zephyr/arch/x86/mmustructs.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 781 |
```linker script
/*
*
*/
/**
* @file Directives for linker MEMORY regions for all x86
*
* By default, the kernel is linked at its physical address and all addresses
* are in RAM.
*
* If CONFIG_XIP is enabled, then another MEMORY region is declared for ROM,
* and this is where the Zephyr image is booted from. The linker LMAs and VMAs
* are set up, such that read/write data/bss have their VMA addresses
* in RAM and are copied from flash at boot. Text/rodata linked in-place in
* flash.
*
* If CONFIG_MMU is enabled, then the ROM region in MEMORY is used to set the
* LMA for all sections relative to physical address. The virtual address VMAs
* for all sections are relative to the base virtual address for the kernel.
* Setting LMAs here helps let QEMU or any other ELF-aware loader know where to
* physically load the image.
*/
#ifndef ARCH_X86_MEMORY_LD
#define ARCH_X86_MEMORY_LD
#include <zephyr/devicetree.h>
#include <zephyr/linker/devicetree_regions.h>
#include <zephyr/kernel/mm.h>
/* Bounds of physical RAM from DTS */
#define PHYS_RAM_ADDR DT_REG_ADDR(DT_CHOSEN(zephyr_sram))
#define PHYS_RAM_SIZE DT_REG_SIZE(DT_CHOSEN(zephyr_sram))
/* Virtual base address for the kernel; with CONFIG_MMU this is not necessarily
* the same as its physical location, although an identity mapping for RAM
* is still supported by setting CONFIG_KERNEL_VM_BASE=CONFIG_SRAM_BASE_ADDRESS.
*/
#ifdef K_MEM_IS_VM_KERNEL
#define KERNEL_BASE_ADDR (CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_OFFSET)
#define KERNEL_RAM_SIZE (CONFIG_KERNEL_VM_SIZE - CONFIG_KERNEL_VM_OFFSET)
#define PHYS_RAM_AVAIL (PHYS_RAM_SIZE - CONFIG_SRAM_OFFSET)
#else
#define KERNEL_BASE_ADDR (PHYS_RAM_ADDR + CONFIG_SRAM_OFFSET)
#define KERNEL_RAM_SIZE (PHYS_RAM_SIZE - CONFIG_SRAM_OFFSET)
#endif
/* "kernel RAM" for linker VMA allocations starts at the offset */
#if defined(CONFIG_ROM_END_OFFSET)
#define ROM_END_OFFSET CONFIG_ROM_END_OFFSET
#else
#define ROM_END_OFFSET 0
#endif
#ifdef CONFIG_XIP
/* "ROM" is flash, we leave rodata and text there and just copy in data.
* Board-level DTS must specify a flash region that doesn't overlap with
* sram0, so that DT_PHYS_LOAD_ADDR is set.
*/
#define FLASH_ROM_SIZE (DT_REG_SIZE(DT_CHOSEN(zephyr_flash)) - ROM_END_OFFSET)
#define PHYS_LOAD_ADDR DT_REG_ADDR(DT_CHOSEN(zephyr_flash))
#else
/* Physical RAM location where the kernel image is loaded */
#define PHYS_LOAD_ADDR (PHYS_RAM_ADDR + CONFIG_SRAM_OFFSET)
#endif /* CONFIG_XIP */
#ifdef CONFIG_X86_64
/* Locore must be addressable by real mode and so cannot extend past 64K.
* Skip reserved stuff in the first page
*/
#define LOCORE_BASE 0x1000
#define LOCORE_SIZE (0x10000 - LOCORE_BASE)
#if PHYS_RAM_ADDR != CONFIG_KERNEL_VM_BASE
#error Virtual kernel linking is not yet implemented for 64-bit
#endif
#endif /* CONFIG_X86_64 */
MEMORY
{
#if defined(CONFIG_XIP)
/* Address range where the kernel will be installed on a flash part (XIP),
* or copied into physical RAM by a loader (MMU)
*/
ROM (rx) : ORIGIN = PHYS_LOAD_ADDR, LENGTH = FLASH_ROM_SIZE
#elif defined(K_MEM_IS_VM_KERNEL)
ROM (rx) : ORIGIN = PHYS_LOAD_ADDR, LENGTH = PHYS_RAM_AVAIL
#endif
/* Linear address range to link the kernel. If non-XIP, everything is
* linked in this space. Otherwise, rodata and text are linked at their
* physical ROM locations
*/
RAM (wx) : ORIGIN = KERNEL_BASE_ADDR, LENGTH = KERNEL_RAM_SIZE
LINKER_DT_REGIONS()
#ifdef CONFIG_X86_64
/* Special low-memory area for bootstrapping other CPUs from real mode */
LOCORE (wx) : ORIGIN = LOCORE_BASE, LENGTH = LOCORE_SIZE
#else
/*
* On 32-bit x86, fake memory area for build-time IDT generation data.
* 64-bit doesn't use this, interrupts are all managed at runtime.
*
* It doesn't matter where this region goes as it is stripped from the
* final ELF image. The address doesn't even have to be valid on the
* target. However, it shouldn't overlap any other regions.
*/
IDT_LIST : ORIGIN = 0xFFFF1000, LENGTH = 2K
#endif /* !CONFIG_X86_64 */
}
#endif /* ARCH_X86_MEMORY_LD */
``` | /content/code_sandbox/include/zephyr/arch/x86/memory.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,062 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_X86_THREAD_STACK_H
#define ZEPHYR_INCLUDE_ARCH_X86_THREAD_STACK_H
#include <zephyr/arch/x86/mmustructs.h>
#ifdef CONFIG_X86_64
#define ARCH_STACK_PTR_ALIGN 16UL
#else
#define ARCH_STACK_PTR_ALIGN 4UL
#endif
#if defined(CONFIG_X86_STACK_PROTECTION) || defined(CONFIG_USERSPACE) \
|| defined(CONFIG_THREAD_STACK_MEM_MAPPED)
#define Z_X86_STACK_BASE_ALIGN CONFIG_MMU_PAGE_SIZE
#else
#define Z_X86_STACK_BASE_ALIGN ARCH_STACK_PTR_ALIGN
#endif
#if defined(CONFIG_USERSPACE) || defined(CONFIG_THREAD_STACK_MEM_MAPPED)
/* If user mode enabled, expand any stack size to fill a page since that is
* the access control granularity and we don't want other kernel data to
* unintentionally fall in the latter part of the page
*
* This is also true when memory mapped stacks are used with since
* access control applies to one page at a time.
*/
#define Z_X86_STACK_SIZE_ALIGN CONFIG_MMU_PAGE_SIZE
#else
#define Z_X86_STACK_SIZE_ALIGN ARCH_STACK_PTR_ALIGN
#endif
#ifndef _ASMLANGUAGE
/* With both hardware stack protection and userspace enabled, stacks are
* arranged as follows:
*
* --- Without stack being memory mapped:
* High memory addresses
* +-----------------------------------------+
* | Thread stack (varies) |
* +-----------------------------------------+
* | Privilege elevation stack |
* | (CONFIG_PRIVILEGED_STACK_SIZE) |
* +-----------------------------------------+
* | Guard page (4096 bytes) |
* | - 'guard_page' in struct |
* | z_x86_thread_stack_header |
* +-----------------------------------------+
* Low Memory addresses
*
* --- With stack being memory mapped:
* High memory addresses
* +-----------------------------------------+
* | Guard page (empty page) |
* +-----------------------------------------+
* | Thread stack (varies) |
* +-----------------------------------------+
* | Privilege elevation stack |
* | (CONFIG_PRIVILEGED_STACK_SIZE) |
* +-----------------------------------------+
* | Guard page (empty page) |
* +-----------------------------------------+
* Low Memory addresses
*
* Without memory mapped stacks, the guard page is actually allocated
* as part of the stack struct, which takes up physical memory during
* linking.
*
* Privilege elevation stacks are fixed-size. All the pages containing the
* thread stack are marked as user-accessible. The guard page is marked
* read-only to catch stack overflows in supervisor mode.
*
* If a thread starts in supervisor mode, the page containing the
* privilege elevation stack is also marked read-only.
*
* If a thread starts in, or drops down to user mode, the privilege stack page
* will be marked as present, supervisor-only.
*
* If KPTI is not enabled, the _main_tss.esp0 field will always be updated
* updated to point to the top of the privilege elevation stack. Otherwise
* _main_tss.esp0 always points to the trampoline stack, which handles the
* page table switch to the kernel PDPT and transplants context to the
* privileged mode stack.
*/
struct z_x86_thread_stack_header {
#if defined(CONFIG_X86_STACK_PROTECTION) && !defined(CONFIG_THREAD_STACK_MEM_MAPPED)
char guard_page[CONFIG_MMU_PAGE_SIZE];
#endif
#ifdef CONFIG_USERSPACE
char privilege_stack[CONFIG_PRIVILEGED_STACK_SIZE];
#endif /* CONFIG_USERSPACE */
} __packed __aligned(Z_X86_STACK_BASE_ALIGN);
#define ARCH_THREAD_STACK_OBJ_ALIGN(size) Z_X86_STACK_BASE_ALIGN
#define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
ROUND_UP((size), Z_X86_STACK_SIZE_ALIGN)
#define ARCH_THREAD_STACK_RESERVED \
sizeof(struct z_x86_thread_stack_header)
#ifdef CONFIG_X86_STACK_PROTECTION
#define ARCH_KERNEL_STACK_RESERVED CONFIG_MMU_PAGE_SIZE
#define ARCH_KERNEL_STACK_OBJ_ALIGN CONFIG_MMU_PAGE_SIZE
#else
#define ARCH_KERNEL_STACK_RESERVED 0
#define ARCH_KERNEL_STACK_OBJ_ALIGN ARCH_STACK_PTR_ALIGN
#endif
#endif /* !_ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_X86_THREAD_STACK_H */
``` | /content/code_sandbox/include/zephyr/arch/x86/thread_stack.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 886 |
```linker script
/*
*/
/* Pagetables. These are produced by arch/x86/gen-mmu.py based on
* data in zephyr_prebuilt.elf (the result of linker pass 1).
* For the pass 1 build, an equal-sized dummy area is provided as
* to not shift memory addresses that occur after this.
*/
#ifdef CONFIG_MMU
SECTION_DATA_PROLOGUE(pagetables,,)
{
. = ALIGN(4096);
z_x86_pagetables_start = .;
#ifdef LINKER_ZEPHYR_FINAL
KEEP(*(pagetables)) /* gen_mmu.py */
#else
KEEP(*(.dummy_pagetables)) /* from x86_mmu.c, just an empty array */
#endif /* LINKER_ZEPHYR_FINAL */
/* Top-level paging structure is the last thing in this section */
#if CONFIG_X86_PAE
/* 4-entry PDPT */
z_x86_kernel_ptables = . - 32;
#else
/* Page directory or PML4 */
z_x86_kernel_ptables = . - 4096;
#endif /* CONFIG_X86_PAE */
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
#ifdef LINKER_ZEPHYR_FINAL
/DISCARD/ :
{
/* We have the real ones in this build */
*(.dummy_pagetables)
}
#endif /* LINKER_ZEPHYR_FINAL */
#endif /* CONFIG_MMU */
``` | /content/code_sandbox/include/zephyr/arch/x86/pagetables.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 309 |
```objective-c
/*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_X86_INTEL64_EXPCEPTION_H_
#define ZEPHYR_INCLUDE_ARCH_X86_INTEL64_EXPCEPTION_H_
#ifndef _ASMLANGUAGE
#include <zephyr/arch/x86/intel64/thread.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* the exception stack frame
*/
struct arch_esf {
#ifdef CONFIG_EXCEPTION_DEBUG
/* callee-saved */
unsigned long rbx;
unsigned long r12;
unsigned long r13;
unsigned long r14;
unsigned long r15;
#endif /* CONFIG_EXCEPTION_DEBUG */
unsigned long rbp;
/* Caller-saved regs */
unsigned long rax;
unsigned long rcx;
unsigned long rdx;
unsigned long rsi;
unsigned long rdi;
unsigned long r8;
unsigned long r9;
unsigned long r10;
/* Must be aligned 16 bytes from the end of this struct due to
* requirements of 'fxsave (%rsp)'
*/
char fxsave[X86_FXSAVE_SIZE];
unsigned long r11;
/* Pushed by CPU or assembly stub */
unsigned long vector;
unsigned long code;
unsigned long rip;
unsigned long cs;
unsigned long rflags;
unsigned long rsp;
unsigned long ss;
};
struct x86_ssf {
unsigned long rip;
unsigned long rflags;
unsigned long r10;
unsigned long r9;
unsigned long r8;
unsigned long rdx;
unsigned long rsi;
char fxsave[X86_FXSAVE_SIZE];
unsigned long rdi;
unsigned long rsp;
};
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_X86_INTEL64_EXPCEPTION_H_ */
``` | /content/code_sandbox/include/zephyr/arch/x86/intel64/exception.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 349 |
```objective-c
/*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_X86_INTEL64_ARCH_H_
#define ZEPHYR_INCLUDE_ARCH_X86_INTEL64_ARCH_H_
#include <zephyr/arch/x86/intel64/exception.h>
#include <zephyr/arch/x86/intel64/thread.h>
#include <zephyr/arch/x86/thread_stack.h>
#if defined(CONFIG_PCIE) && !defined(_ASMLANGUAGE)
#include <zephyr/sys/iterable_sections.h>
#endif
#if CONFIG_ISR_STACK_SIZE != (CONFIG_ISR_SUBSTACK_SIZE * CONFIG_ISR_DEPTH)
#error "Check ISR stack configuration (CONFIG_ISR_*)"
#endif
#if CONFIG_ISR_SUBSTACK_SIZE % ARCH_STACK_PTR_ALIGN
#error "CONFIG_ISR_SUBSTACK_SIZE must be a multiple of 16"
#endif
#ifndef _ASMLANGUAGE
static ALWAYS_INLINE void sys_write64(uint64_t data, mm_reg_t addr)
{
__asm__ volatile("movq %0, %1"
:
: "r"(data), "m" (*(volatile uint64_t *)
(uintptr_t) addr)
: "memory");
}
static ALWAYS_INLINE uint64_t sys_read64(mm_reg_t addr)
{
uint64_t ret;
__asm__ volatile("movq %1, %0"
: "=r"(ret)
: "m" (*(volatile uint64_t *)(uintptr_t) addr)
: "memory");
return ret;
}
static ALWAYS_INLINE unsigned int arch_irq_lock(void)
{
unsigned long key;
__asm__ volatile ("pushfq; cli; popq %0" : "=g" (key) : : "memory");
return (unsigned int) key;
}
#define ARCH_EXCEPT(reason_p) do { \
__asm__ volatile( \
"movq %[reason], %%rax\n\t" \
"int $32\n\t" \
: \
: [reason] "i" (reason_p)); \
CODE_UNREACHABLE; /* LCOV_EXCL_LINE */ \
} while (false)
#ifdef CONFIG_PCIE
#define X86_RESERVE_IRQ(irq_p, name) \
static TYPE_SECTION_ITERABLE(uint8_t, name, irq_alloc, name) = irq_p
#else
#define X86_RESERVE_IRQ(irq_p, name)
#endif
#endif /* _ASMLANGUAGE */
/*
* All Intel64 interrupts are dynamically connected.
*/
#define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
X86_RESERVE_IRQ(irq_p, _CONCAT(_irq_alloc_fixed, __COUNTER__)); \
arch_irq_connect_dynamic(irq_p, priority_p, \
(void (*)(const void *))isr_p, \
isr_param_p, flags_p)
#ifdef CONFIG_PCIE
#define ARCH_PCIE_IRQ_CONNECT(bdf_p, irq_p, priority_p, \
isr_p, isr_param_p, flags_p) \
X86_RESERVE_IRQ(irq_p, _CONCAT(_irq_alloc_fixed, __COUNTER__)); \
pcie_connect_dynamic_irq(bdf_p, irq_p, priority_p, \
(void (*)(const void *))isr_p, \
isr_param_p, flags_p)
#endif /* CONFIG_PCIE */
/*
* Thread object needs to be 16-byte aligned.
*/
#define ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT 16
#endif /* ZEPHYR_INCLUDE_ARCH_X86_INTEL64_ARCH_H_ */
``` | /content/code_sandbox/include/zephyr/arch/x86/intel64/arch.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 724 |
```objective-c
/*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_X86_INTEL_VTD_H
#define ZEPHYR_INCLUDE_ARCH_X86_INTEL_VTD_H
#ifndef _ASMLANGUAGE
/*************\
* Registers *
\*************/
#define VTD_VER_REG 0x000 /* Version */
#define VTD_CAP_REG 0x008 /* Capability */
#define VTD_ECAP_REG 0x010 /* Extended Capability */
#define VTD_GCMD_REG 0x018 /* Global Command */
#define VTD_GSTS_REG 0x01C /* Global Status */
#define VTD_RTADDR_REG 0x020 /* Root Table Address */
#define VTD_CCMD_REG 0x028 /* Context Command */
#define VTD_FSTS_REG 0x034 /* Fault Status */
#define VTD_FECTL_REG 0x038 /* Fault Event Control */
#define VTD_FEDATA_REG 0x03C /* Fault Event Data */
#define VTD_FEADDR_REG 0x040 /* Fault Event Address */
#define VTD_FEUADDR_REG 0x044 /* Fault Event Upper Address */
#define VTD_AFLOG_REG 0x058 /* Advanced Fault Log */
#define VTD_PMEN_REG 0x064 /* Protected Memory Enable */
#define VTD_PLMBASE_REG 0x068 /* Protected Low Memory Base */
#define VTD_PLMLIMIT_REG 0x06C /* Protected Low Memory Limit */
#define VTD_PHMBASE_REG 0x070 /* Protected High Memory Base */
#define VTD_PHMLIMIT_REG 0x078 /* Protected High Memory Limit */
#define VTD_IQH_REG 0x080 /* Invalidation Queue Head */
#define VTD_IQT_REG 0x088 /* Invalidation Queue Tail */
#define VTD_IQA_REG 0x090 /* Invalidation Queue Address */
#define VTD_ICS_REG 0x09C /* Invalidation Completion Status */
#define VTD_IECTL_REG 0x0A0 /* Invalidation Completion Event Control */
#define VTD_IEDATA_REG 0x0A4 /* Invalidation Completion Event Data */
#define VTD_IEADDR_REG 0x0A8 /* Invalidation Completion Event Address */
#define VTD_IEUADDR_REG 0x0AC /* Invalidation Completion Event Upper Address */
#define VTD_IQERCD_REG 0x0B0 /* Invalidation Queue Error Record */
#define VTD_IRTA_REG 0x0B8 /* Interrupt Remapping Table Address */
#define VTD_PQH_REG 0x0C0 /* Page Request Queue Head */
#define VTD_PQT_REG 0x0C8 /* Page Request Queue Tail */
#define VTD_PQA_REG 0x0D0 /* Page Request Queue Address */
#define VTD_PRS_REG 0x0DC /* Page Request Status */
#define VTD_PECTL_REG 0x0E0 /* Page Request Event Control */
#define VTD_PEDATA_REG 0x0E4 /* Page Request Event Data */
#define VTD_PEADDR_REG 0x0E8 /* Page Request Event Address */
#define VTD_PEUADDR_REG 0x0EC /* Page Request Event Upper Address */
#define VTD_MTRRCAP_REG 0x100 /* MTRR Capability */
#define VTD_MTRRDEF_REG 0x108 /* MTRR Default Type */
#define VTD_MTRR_FIX64K_00000_REG 0x120 /* Fixed-range MTRR for 64K_00000 */
#define VTD_MTRR_FIX16K_80000_REG 0x128 /* Fixed-range MTRR for 16K_80000 */
#define VTD_MTRR_FIX16K_A0000_REG 0x130 /* Fixed-range MTRR for 16K_A0000 */
#define VTD_MTRR_FIX4K_C0000_REG 0x138 /* Fixed-range MTRR for 4K_C0000 */
#define VTD_MTRR_FIX4K_C8000_REG 0x140 /* Fixed-range MTRR for 4K_C8000 */
#define VTD_MTRR_FIX4K_D0000_REG 0x148 /* Fixed-range MTRR for 4K_D0000 */
#define VTD_MTRR_FIX4K_D8000_REG 0x150 /* Fixed-range MTRR for 4K_D8000 */
#define VTD_MTRR_FIX4K_E0000_REG 0x158 /* Fixed-range MTRR for 4K_E0000 */
#define VTD_MTRR_FIX4K_E8000_REG 0x160 /* Fixed-range MTRR for 4K_E8000 */
#define VTD_MTRR_FIX4K_F0000_REG 0x168 /* Fixed-range MTRR for 4K_F0000 */
#define VTD_MTRR_FIX4K_F8000_REG 0x170 /* Fixed-range MTRR for 4K_F8000 */
#define VTD_MTRR_PHYSBASE0_REG 0x180 /* Variable-range MTRR Base0 */
#define VTD_MTRR_PHYSMASK0_REG 0x188 /* Variable-range MTRR Mask0 */
#define VTD_MTRR_PHYSBASE1_REG 0x190 /* Variable-range MTRR Base1 */
#define VTD_MTRR_PHYSMASK1_REG 0x198 /* Variable-range MTRR Mask1 */
#define VTD_MTRR_PHYSBASE2_REG 0x1A0 /* Variable-range MTRR Base2 */
#define VTD_MTRR_PHYSMASK2_REG 0x1A8 /* Variable-range MTRR Mask2 */
#define VTD_MTRR_PHYSBASE3_REG 0x1B0 /* Variable-range MTRR Base3 */
#define VTD_MTRR_PHYSMASK3_REG 0x1B8 /* Variable-range MTRR Mask3 */
#define VTD_MTRR_PHYSBASE4_REG 0x1C0 /* Variable-range MTRR Base4 */
#define VTD_MTRR_PHYSMASK4_REG 0x1C8 /* Variable-range MTRR Mask4 */
#define VTD_MTRR_PHYSBASE5_REG 0x1D0 /* Variable-range MTRR Base5 */
#define VTD_MTRR_PHYSMASK5_REG 0x1D8 /* Variable-range MTRR Mask5 */
#define VTD_MTRR_PHYSBASE6_REG 0x1E0 /* Variable-range MTRR Base6 */
#define VTD_MTRR_PHYSMASK6_REG 0x1E8 /* Variable-range MTRR Mask6 */
#define VTD_MTRR_PHYSBASE7_REG 0x1F0 /* Variable-range MTRR Base7 */
#define VTD_MTRR_PHYSMASK7_REG 0x1F8 /* Variable-range MTRR Mask7 */
#define VTD_MTRR_PHYSBASE8_REG 0x200 /* Variable-range MTRR Base8 */
#define VTD_MTRR_PHYSMASK8_REG 0x208 /* Variable-range MTRR Mask8 */
#define VTD_MTRR_PHYSBASE9_REG 0x210 /* Variable-range MTRR Base9 */
#define VTD_MTRR_PHYSMASK9_REG 0x218 /* Variable-range MTRR Mask9 */
#define VTD_VCCAP_REG 0xE00 /* Virtual Command Capability */
#define VTD_VCMD 0xE10 /* Virtual Command */
#define VTD_VCRSP 0xE20 /* Virtual Command Response */
/* Capability Register details */
#define VTD_CAP_NFR_POS 40
#define VTD_CAP_NFR_MASK ((uint64_t)0xFFUL << VTD_CAP_NFR_POS)
#define VTD_CAP_NFR(cap) \
(((uint64_t)cap & VTD_CAP_NFR_MASK) >> VTD_CAP_NFR_POS)
#define VTD_CAP_FRO_POS 24
#define VTD_CAP_FRO_MASK ((uint64_t)0x3FFUL << VTD_CAP_FRO_POS)
#define VTD_CAP_FRO(cap) \
(((uint64_t)cap & VTD_CAP_FRO_MASK) >> VTD_CAP_FRO_POS)
/* Extended Capability Register details */
#define VTD_ECAP_C BIT(0)
/* Global Command Register details */
#define VTD_GCMD_CFI 23
#define VTD_GCMD_SIRTP 24
#define VTD_GCMD_IRE 25
#define VTD_GCMD_QIE 26
#define VTD_GCMD_WBF 27
#define VTD_GCMD_EAFL 28
#define VTD_GCMD_SFL 29
#define VTD_GCMD_SRTP 30
#define VTD_GCMD_TE 31
/* Global Status Register details */
#define VTD_GSTS_CFIS 23
#define VTD_GSTS_SIRTPS 24
#define VTD_GSTS_IRES 25
#define VTD_GSTS_QIES 26
#define VTD_GSTS_WBFS 27
#define VTD_GSTS_EAFLS 28
#define VTD_GSTS_SFLS 29
#define VTD_GSTS_SRTPS 30
#define VTD_GSTS_TES 31
/* Interrupt Remapping Table Address Register details */
#define VTD_IRTA_SIZE_MASK 0x000000000000000FUL
#define VTD_IRTA_EIME BIT(11)
#define VTD_IRTA_REG_GEN_CONTENT(addr, size, mode) \
((uint64_t)(addr) | (mode) | (size & VTD_IRTA_SIZE_MASK))
/* Fault event control register details */
#define VTD_FECTL_REG_IP 30
#define VTD_FECTL_REG_IM 31
/* Fault event status register details */
#define VTD_FSTS_PFO BIT(0)
#define VTD_FSTS_PPF BIT(1)
#define VTD_FSTS_AFO BIT(2)
#define VTD_FSTS_APF BIT(3)
#define VTD_FSTS_IQE BIT(4)
#define VTD_FSTS_ICE BIT(5)
#define VTD_FSTS_ITE BIT(6)
#define VTD_FSTS_FRI_POS 8
#define VTD_FSTS_FRI_MASK (0xF << VTD_FSTS_FRI_POS)
#define VTD_FSTS_FRI(status) \
((status & VTD_FSTS_FRI_MASK) >> VTD_FSTS_FRI_POS)
#define VTD_FSTS_CLEAR_STATUS \
(VTD_FSTS_PFO | VTD_FSTS_AFO | VTD_FSTS_APF | \
VTD_FSTS_IQE | VTD_FSTS_ICE | VTD_FSTS_ITE)
#define VTD_FSTS_CLEAR(status) \
(status & VTD_FSTS_CLEAR_STATUS)
/* Fault recording register(s) details
* Note: parts of the register are split into highest and lowest 64bits
* so bit positions are depending on it and are not based on 128bits reg.
*/
#define VTD_FRCD_REG_SIZE 16
/* Highest 64bits info */
#define VTD_FRCD_F BIT(63)
#define VTD_FRCD_T BIT(62)
#define VTD_FRCD_FR_POS 32
#define VTD_FRCD_FR_MASK ((uint64_t)0xFF << VTD_FRCD_FR_POS)
#define VTD_FRCD_FR(fault) \
((uint8_t)((fault & VTD_FRCD_FR_MASK) >> VTD_FRCD_FR_POS))
#define VTD_FRCD_SID_MASK 0xFFFF
#define VTD_FRCD_SID(fault) \
((uint16_t)(fault & VTD_FRCD_SID_MASK))
/* Lowest 64bits info */
#define VTD_FRCD_FI_POS 12
#define VTD_FRCD_FI_MASK ((uint64_t)0xFFFFFFFFFFFFF << VTD_FRCD_FI_POS)
#define VTD_FRCD_FI(fault) \
((fault & VTD_FRCD_FI_MASK) >> VTD_FRCD_FI_POS)
#define VTD_FRCD_FI_IR_POS 48
#define VTD_FRCD_FI_IR_MASK ((uint64_t)0xFFFF << VTD_FRCD_FI_IR_POS)
#define VTD_FRCD_FI_IR(fault) \
((fault & VTD_FRCD_FI_IR_MASK) >> VTD_FRCD_FI_IR_POS)
/* Invalidation Queue Address register details */
#define VTD_IQA_SIZE_MASK 0x7
#define VTD_IQA_WIDTH_128_BIT 0
#define VTD_IQA_WIDTH_256_BIT BIT(11)
#define VTD_IQA_REG_GEN_CONTENT(addr, width, size) \
((uint64_t)0 | (addr) | (width) | (size & VTD_IQA_SIZE_MASK))
/* Invalidation Queue Head register details */
#define VTD_IQH_QH_POS_128 4
#define VTD_IQH_QH_MASK ((uint64_t)0xEF << VTD_IQH_QH_POS_128)
/* Invalidation Queue Tail register details */
#define VTD_IQT_QT_POS_128 4
#define VTD_IQT_QT_MASK ((uint64_t)0xEF << VTD_IQT_QT_POS_128)
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_X86_INTEL_VTD_H */
``` | /content/code_sandbox/include/zephyr/arch/x86/intel_vtd.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,874 |
```objective-c
/*
*
*/
/**
* @file
* @brief x86 (INTEL64) specific syscall header
*
* This header contains the x86 specific syscall interface. It is
* included by the syscall interface architecture-abstraction header
* (include/arch/syscall.h)
*/
#ifndef ZEPHYR_INCLUDE_ARCH_X86_INTEL64_SYSCALL_H_
#define ZEPHYR_INCLUDE_ARCH_X86_INTEL64_SYSCALL_H_
#ifdef CONFIG_USERSPACE
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
#include <stdbool.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* x86_64 System V calling convention:
* First six arguments passed in via RDI, RSI, RDX, RCX, R8, R9
* We'll use RAX for the call_id, and the return value
*
* Arrange registers so that they are in-place as much as possible when
* doing the system call. Because RCX get overwritten by the CPU, put arg 4
* in r10 instead.
*
* SYSCALL instruction stores return address in RCX and RFLAGS in R11. RIP is
* loaded from LSTAR MSR, masks RFLAGS with the low 32 bits of EFER.SFMASK. CS
* and SS are loaded from values derived from bits 47:32 of STAR MSR (+0
* for CS, +8 for SS)
*
* SYSRET loads RIP from RCX and RFLAGS from r11. CS and SS are set with
* values derived from STAR MSR bits 63:48 (+8 for CS, +16 for SS)
*
* The kernel is in charge of not clobbering across the system call
* the remaining registers: RBX, RBP, R12-R15, SIMD/FPU, and any unused
* argument registers.
*/
static inline uintptr_t arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3, uintptr_t arg4,
uintptr_t arg5, uintptr_t arg6,
uintptr_t call_id)
{
register uintptr_t rax __asm__("%rax") = call_id;
register uintptr_t rdi __asm__("%rdi") = arg1;
register uintptr_t rsi __asm__("%rsi") = arg2;
register uintptr_t rdx __asm__("%rdx") = arg3;
register uintptr_t r10 __asm__("%r10") = arg4; /* RCX unavailable */
register uintptr_t r8 __asm__("%r8") = arg5;
register uintptr_t r9 __asm__("%r9") = arg6;
__asm__ volatile("syscall\n\t"
: "=r" (rax)
: "r" (rax), "r" (rdi), "r" (rsi), "r" (rdx),
"r" (r10), "r" (r8), "r" (r9)
: "memory", "rcx", "r11");
return rax;
}
static inline uintptr_t arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3, uintptr_t arg4,
uintptr_t arg5,
uintptr_t call_id)
{
register uintptr_t rax __asm__("%rax") = call_id;
register uintptr_t rdi __asm__("%rdi") = arg1;
register uintptr_t rsi __asm__("%rsi") = arg2;
register uintptr_t rdx __asm__("%rdx") = arg3;
register uintptr_t r10 __asm__("%r10") = arg4; /* RCX unavailable */
register uintptr_t r8 __asm__("%r8") = arg5;
__asm__ volatile("syscall\n\t"
: "=r" (rax)
: "r" (rax), "r" (rdi), "r" (rsi), "r" (rdx),
"r" (r10), "r" (r8)
: "memory", "rcx", "r11");
return rax;
}
static inline uintptr_t arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3, uintptr_t arg4,
uintptr_t call_id)
{
register uintptr_t rax __asm__("%rax") = call_id;
register uintptr_t rdi __asm__("%rdi") = arg1;
register uintptr_t rsi __asm__("%rsi") = arg2;
register uintptr_t rdx __asm__("%rdx") = arg3;
register uintptr_t r10 __asm__("%r10") = arg4; /* RCX unavailable */
__asm__ volatile("syscall\n\t"
: "=r" (rax)
: "r" (rax), "r" (rdi), "r" (rsi), "r" (rdx),
"r" (r10)
: "memory", "rcx", "r11");
return rax;
}
static inline uintptr_t arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3,
uintptr_t call_id)
{
register uintptr_t rax __asm__("%rax") = call_id;
register uintptr_t rdi __asm__("%rdi") = arg1;
register uintptr_t rsi __asm__("%rsi") = arg2;
register uintptr_t rdx __asm__("%rdx") = arg3;
__asm__ volatile("syscall\n\t"
: "=r" (rax)
: "r" (rax), "r" (rdi), "r" (rsi), "r" (rdx)
: "memory", "rcx", "r11");
return rax;
}
static inline uintptr_t arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2,
uintptr_t call_id)
{
register uintptr_t rax __asm__("%rax") = call_id;
register uintptr_t rdi __asm__("%rdi") = arg1;
register uintptr_t rsi __asm__("%rsi") = arg2;
__asm__ volatile("syscall\n\t"
: "=r" (rax)
: "r" (rax), "r" (rdi), "r" (rsi)
: "memory", "rcx", "r11");
return rax;
}
static inline uintptr_t arch_syscall_invoke1(uintptr_t arg1,
uintptr_t call_id)
{
register uintptr_t rax __asm__("%rax") = call_id;
register uintptr_t rdi __asm__("%rdi") = arg1;
__asm__ volatile("syscall\n\t"
: "=r" (rax)
: "r" (rax), "r" (rdi)
: "memory", "rcx", "r11");
return rax;
}
static inline uintptr_t arch_syscall_invoke0(uintptr_t call_id)
{
register uintptr_t rax __asm__("%rax") = call_id;
__asm__ volatile("syscall\n\t"
: "=r" (rax)
: "r" (rax)
: "memory", "rcx", "r11");
return rax;
}
static inline bool arch_is_user_context(void)
{
int cs;
__asm__ volatile ("mov %%cs, %[cs_val]" : [cs_val] "=r" (cs));
return (cs & 0x3) != 0;
}
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* CONFIG_USERSPACE */
#endif /* ZEPHYR_INCLUDE_ARCH_X86_INTEL64_SYSCALL_H_ */
``` | /content/code_sandbox/include/zephyr/arch/x86/intel64/syscall.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,621 |
```objective-c
/*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_X86_INTEL64_THREAD_H_
#define ZEPHYR_INCLUDE_ARCH_X86_INTEL64_THREAD_H_
#define X86_THREAD_FLAG_ALL 0x01 /* _thread_arch.flags: entire state saved */
/*
* GDT selectors - these must agree with the GDT layout in locore.S.
*/
#define X86_KERNEL_CS_32 0x08 /* 32-bit kernel code */
#define X86_KERNEL_DS_32 0x10 /* 32-bit kernel data */
#define X86_KERNEL_CS 0x18 /* 64-bit kernel code */
#define X86_KERNEL_DS 0x20 /* 64-bit kernel data */
#define X86_USER_CS_32 0x28 /* 32-bit user data (unused) */
#define X86_USER_DS 0x30 /* 64-bit user mode data */
#define X86_USER_CS 0x38 /* 64-bit user mode code */
/* Value programmed into bits 63:32 of STAR MSR with proper segment
* descriptors for implementing user mode with syscall/sysret
*/
#define X86_STAR_UPPER ((X86_USER_CS_32 << 16) | X86_KERNEL_CS)
#define X86_KERNEL_CPU0_TR 0x40 /* 64-bit task state segment */
#define X86_KERNEL_CPU1_TR 0x50 /* 64-bit task state segment */
#define X86_KERNEL_CPU2_TR 0x60 /* 64-bit task state segment */
#define X86_KERNEL_CPU3_TR 0x70 /* 64-bit task state segment */
/*
* Some SSE definitions. Ideally these will ultimately be shared with 32-bit.
*/
#define X86_FXSAVE_SIZE 512 /* size and alignment of buffer ... */
#define X86_FXSAVE_ALIGN 16 /* ... for FXSAVE/FXRSTOR ops */
/* MXCSR Control and Status Register for SIMD floating-point operations.
* Set default value 1F80H according to the Intel(R) 64 and IA-32 Manual.
* Disable denormals-are-zeros mode.
*/
#define X86_MXCSR_SANE 0x1f80
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
#include <zephyr/arch/x86/mmustructs.h>
/*
* 64-bit Task State Segment. One defined per CPU.
*/
struct x86_tss64 {
/*
* Architecturally-defined portion. It is somewhat tedious to
* enumerate each member specifically (rather than using arrays)
* but we need to get (some of) their offsets from assembly.
*/
uint8_t reserved0[4];
uint64_t rsp0; /* privileged stacks */
uint64_t rsp1;
uint64_t rsp2;
uint8_t reserved[8];
uint64_t ist1; /* interrupt stacks */
uint64_t ist2;
uint64_t ist3;
uint64_t ist4;
uint64_t ist5;
uint64_t ist6;
uint64_t ist7;
uint8_t reserved1[10];
uint16_t iomapb; /* offset to I/O base */
/*
* Zephyr specific portion. Stash per-CPU data here for convenience.
*/
struct _cpu *cpu;
#ifdef CONFIG_USERSPACE
/* Privilege mode stack pointer value when doing a system call */
char *psp;
/* Storage area for user mode stack pointer when doing a syscall */
char *usp;
#endif /* CONFIG_USERSPACE */
} __packed __aligned(8);
typedef struct x86_tss64 x86_tss64_t;
/*
* The _callee_saved registers are unconditionally saved/restored across
* context switches; the _thread_arch registers are only preserved when
* the thread is interrupted. _arch_thread.flags tells __resume when to
* cheat and only restore the first set. For more details see locore.S.
*/
struct _callee_saved {
uint64_t rsp;
uint64_t rbx;
uint64_t rbp;
uint64_t r12;
uint64_t r13;
uint64_t r14;
uint64_t r15;
uint64_t rip;
uint64_t rflags;
};
typedef struct _callee_saved _callee_saved_t;
struct _thread_arch {
uint8_t flags;
#ifdef CONFIG_USERSPACE
#ifndef CONFIG_X86_COMMON_PAGE_TABLE
/* Physical address of the page tables used by this thread */
uintptr_t ptables;
#endif /* CONFIG_X86_COMMON_PAGE_TABLE */
/* Initial privilege mode stack pointer when doing a system call.
* Un-set for supervisor threads.
*/
char *psp;
/* SS and CS selectors for this thread when restoring context */
uint64_t ss;
uint64_t cs;
#endif
uint64_t rax;
uint64_t rcx;
uint64_t rdx;
uint64_t rsi;
uint64_t rdi;
uint64_t r8;
uint64_t r9;
uint64_t r10;
uint64_t r11;
char __aligned(X86_FXSAVE_ALIGN) sse[X86_FXSAVE_SIZE];
};
typedef struct _thread_arch _thread_arch_t;
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_X86_INTEL64_THREAD_H_ */
``` | /content/code_sandbox/include/zephyr/arch/x86/intel64/thread.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,109 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_X86_IA32_EXPCEPTION_H_
#define ZEPHYR_INCLUDE_ARCH_X86_IA32_EXPCEPTION_H_
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Exception Stack Frame
*
* A pointer to an "exception stack frame" (ESF) is passed as an argument
* to exception handlers registered via nanoCpuExcConnect(). As the system
* always operates at ring 0, only the EIP, CS and EFLAGS registers are pushed
* onto the stack when an exception occurs.
*
* The exception stack frame includes the volatile registers (EAX, ECX, and
* EDX) as well as the 5 non-volatile registers (EDI, ESI, EBX, EBP and ESP).
* Those registers are pushed onto the stack by _ExcEnt().
*/
struct arch_esf {
#ifdef CONFIG_GDBSTUB
unsigned int ss;
unsigned int gs;
unsigned int fs;
unsigned int es;
unsigned int ds;
#endif
unsigned int esp;
unsigned int ebp;
unsigned int ebx;
unsigned int esi;
unsigned int edi;
unsigned int edx;
unsigned int eax;
unsigned int ecx;
unsigned int errorCode;
unsigned int eip;
unsigned int cs;
unsigned int eflags;
};
extern unsigned int z_x86_exception_vector;
struct _x86_syscall_stack_frame {
uint32_t eip;
uint32_t cs;
uint32_t eflags;
/* These are only present if cs = USER_CODE_SEG */
uint32_t esp;
uint32_t ss;
};
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_X86_IA32_EXPCEPTION_H_ */
``` | /content/code_sandbox/include/zephyr/arch/x86/ia32/exception.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 371 |
```objective-c
/* asm.h - x86 tool dependent headers */
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_X86_IA32_ASM_H_
#define ZEPHYR_INCLUDE_ARCH_X86_IA32_ASM_H_
#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
#if defined(_ASMLANGUAGE)
#ifdef CONFIG_X86_KPTI
GTEXT(z_x86_trampoline_to_user)
GTEXT(z_x86_trampoline_to_kernel)
#define KPTI_IRET jmp z_x86_trampoline_to_user
#define KPTI_IRET_USER jmp z_x86_trampoline_to_user_always
#else
#define KPTI_IRET iret
#define KPTI_IRET_USER iret
#endif /* CONFIG_X86_KPTI */
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_X86_IA32_ASM_H_ */
``` | /content/code_sandbox/include/zephyr/arch/x86/ia32/asm.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 187 |
```linker script
/*
*/
#include <zephyr/linker/linker-defs.h>
#include <zephyr/linker/linker-tool.h>
#define ROMABLE_REGION RAM
#define RAMABLE_REGION RAM
#define MMU_PAGE_ALIGN . = ALIGN(CONFIG_MMU_PAGE_SIZE);
/* Used to align areas with separate memory permission characteristics
* so that the page permissions can be set in the MMU. Without this,
* the kernel is just one blob with the same RWX permissions on all RAM
*/
#ifdef CONFIG_SRAM_REGION_PERMISSIONS
#define MMU_PAGE_ALIGN_PERM MMU_PAGE_ALIGN
#else
#define MMU_PAGE_ALIGN_PERM
#endif
ENTRY(CONFIG_KERNEL_ENTRY)
SECTIONS
{
/*
* The "locore" must be in the 64K of RAM, so that 16-bit code (with
* segment registers == 0x0000) and 32/64-bit code agree on addresses.
* ... there is no 16-bit code yet, but there will be when we add SMP.
*/
SECTION_PROLOGUE(.locore,,)
{
_locore_start = .;
*(.locore)
*(.locore.*)
MMU_PAGE_ALIGN_PERM
_locore_end = .;
_lorodata_start = .;
*(.lorodata)
MMU_PAGE_ALIGN_PERM
_lodata_start = .;
*(.lodata)
#ifdef CONFIG_X86_KPTI
/* Special page containing supervisor data that is still mapped in
* user mode page tables. GDT, TSSes, trampoline stack, and
* any LDT must go here as they always must live in a page that is
* marked 'present'. Still not directly user accessible, but
* no sensitive data should be here as Meltdown exploits may read it.
*
* On x86-64 the IDT is in rodata and doesn't need to be in the
* trampoline page.
*/
MMU_PAGE_ALIGN_PERM
z_shared_kernel_page_start = .;
#endif /* CONFIG_X86_KPTI */
*(.boot_arg)
*(.tss)
*(.gdt)
#ifdef CONFIG_X86_KPTI
*(.trampolines)
MMU_PAGE_ALIGN_PERM
z_shared_kernel_page_end = .;
ASSERT(z_shared_kernel_page_end - z_shared_kernel_page_start == 4096,
"shared kernel area is not one memory page");
#endif /* CONFIG_X86_KPTI */
. = ALIGN(CONFIG_MMU_PAGE_SIZE);
_lodata_end = .;
} > LOCORE
_locore_size = _lorodata_start - _locore_start;
_lorodata_size = _lodata_start - _lorodata_start;
_lodata_size = _lodata_end - _lodata_start;
/*
* The rest of the system is loaded in "normal" memory (typically
* placed above 1MB to avoid the by memory hole at 0x90000-0xFFFFF).
*/
SECTION_PROLOGUE(_TEXT_SECTION_NAME,,)
{
. = ALIGN(16);
__rom_region_start = .;
__text_region_start = .;
z_mapped_start = .;
*(.text)
*(.text.*)
#include <zephyr/linker/kobject-text.ld>
MMU_PAGE_ALIGN_PERM
} GROUP_ROM_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
__text_region_end = .;
__text_region_size = __text_region_end - __text_region_start;
__rodata_region_start = .;
#include <zephyr/linker/common-rom.ld>
/* Located in generated directory. This file is populated by calling
* zephyr_linker_sources(ROM_SECTIONS ...). Useful for grouping iterable RO structs.
*/
#include <snippets-rom-sections.ld>
#include <zephyr/linker/thread-local-storage.ld>
SECTION_PROLOGUE(_RODATA_SECTION_NAME,,)
{
. = ALIGN(16);
*(.rodata)
*(.rodata.*)
MMU_PAGE_ALIGN
#include <snippets-rodata.ld>
#ifdef CONFIG_X86_MMU
. = ALIGN(8);
_mmu_region_list_start = .;
KEEP(*("._mmu_region.static.*"))
_mmu_region_list_end = .;
#endif /* CONFIG_X86_MMU */
#include <zephyr/linker/kobject-rom.ld>
} GROUP_ROM_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
#include <zephyr/linker/cplusplus-rom.ld>
MMU_PAGE_ALIGN_PERM
__rodata_region_end = .;
__rodata_region_size = __rodata_region_end - __rodata_region_start;
__rom_region_end = .;
#ifdef CONFIG_USERSPACE
/* APP SHARED MEMORY REGION */
#define SMEM_PARTITION_ALIGN(size) MMU_PAGE_ALIGN_PERM
#define APP_SHARED_ALIGN MMU_PAGE_ALIGN_PERM
#include <app_smem.ld>
_image_ram_start = _app_smem_start;
_app_smem_size = _app_smem_end - _app_smem_start;
_app_smem_num_words = _app_smem_size >> 2;
_app_smem_rom_start = LOADADDR(_APP_SMEM_SECTION_NAME);
_app_smem_num_words = _app_smem_size >> 2;
#endif /* CONFIG_USERSPACE */
/* This should be put here before BSS section, otherwise the .bss.__gcov will
* be put in BSS section. That causes gcov not work properly */
#include <snippets-ram-sections.ld>
SECTION_PROLOGUE(_BSS_SECTION_NAME, (NOLOAD),)
{
. = ALIGN(16);
MMU_PAGE_ALIGN_PERM
#ifndef CONFIG_USERSPACE
_image_ram_start = .;
#endif
__kernel_ram_start = .;
__bss_start = .;
*(.bss)
*(.bss.*)
*(COMMON)
. = ALIGN(4); /* so __bss_num_dwords is exact */
__bss_end = .;
} GROUP_NOLOAD_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
__bss_num_dwords = (__bss_end - __bss_start) >> 2;
#include <zephyr/linker/common-noinit.ld>
#include <snippets-sections.ld>
SECTION_PROLOGUE(_DATA_SECTION_NAME,,)
{
. = ALIGN(16);
*(.data)
*(.data.*)
#include <snippets-rwdata.ld>
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
#include <zephyr/linker/common-ram.ld>
#include <zephyr/linker/cplusplus-ram.ld>
#include <zephyr/arch/x86/pagetables.ld>
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-data-sections.ld>
/* Must be last in RAM */
#include <zephyr/linker/kobject-data.ld>
#define LAST_RAM_ALIGN MMU_PAGE_ALIGN
#include <zephyr/linker/ram-end.ld>
GROUP_END(RAMABLE_REGION)
/* All unused memory also owned by the kernel for heaps */
__kernel_ram_end = KERNEL_BASE_ADDR + KERNEL_RAM_SIZE;
__kernel_ram_size = __kernel_ram_end - __kernel_ram_start;
z_mapped_size = z_mapped_end - z_mapped_start;
#include <zephyr/linker/debug-sections.ld>
/DISCARD/ :
{
*(.got)
*(.got.plt)
*(.igot)
*(.igot.plt)
*(.iplt)
*(.plt)
*(.note.GNU-stack)
*(.rel.*)
*(.rela.*)
}
/*
* eh_frame section won't be removed even with "--gc-sections" by LLVM lld.
*/
#if !defined(CONFIG_CPP_EXCEPTIONS)
/DISCARD/ : { *(.eh_frame) }
#endif
/*
* The sections below are still treated as warnings
* with "--orphan-handling=warn" by LLVM lld.
*/
#if !defined(CONFIG_LLVM_USE_LD)
.symtab 0 : { *(.symtab) }
.strtab 0 : { *(.strtab) }
.shstrtab 0 : { *(.shstrtab) }
#endif
#ifdef CONFIG_LLEXT
#include <zephyr/linker/llext-sections.ld>
#endif
}
``` | /content/code_sandbox/include/zephyr/arch/x86/intel64/linker.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,834 |
```objective-c
/*
*
*/
/**
* @file
* @brief IA-32 specific gdbstub interface header
*/
#ifndef ZEPHYR_INCLUDE_ARCH_X86_GDBSTUB_SYS_H_
#define ZEPHYR_INCLUDE_ARCH_X86_GDBSTUB_SYS_H_
#ifndef _ASMLANGUAGE
#include <stdint.h>
#include <zephyr/toolchain.h>
/**
* @brief Number of register used by gdbstub in IA-32
*/
#define GDB_STUB_NUM_REGISTERS 16
/**
* @brief GDB interruption context
*
* The exception stack frame contents used by gdbstub. The contents
* of this struct are used to display information about the current
* cpu state.
*/
struct gdb_interrupt_ctx {
uint32_t ss;
uint32_t gs;
uint32_t fs;
uint32_t es;
uint32_t ds;
uint32_t edi;
uint32_t esi;
uint32_t ebp;
uint32_t esp;
uint32_t ebx;
uint32_t edx;
uint32_t ecx;
uint32_t eax;
uint32_t vector;
uint32_t error_code;
uint32_t eip;
uint32_t cs;
uint32_t eflags;
} __packed;
/**
* @brief IA-32 register used in gdbstub
*/
enum GDB_REGISTER {
GDB_EAX,
GDB_ECX,
GDB_EDX,
GDB_EBX,
GDB_ESP,
GDB_EBP,
GDB_ESI,
GDB_EDI,
GDB_PC,
GDB_EFLAGS,
GDB_CS,
GDB_SS,
GDB_DS,
GDB_ES,
GDB_FS,
GDB_GS,
GDB_ORIG_EAX = 41,
};
struct gdb_ctx {
unsigned int exception;
unsigned int registers[GDB_STUB_NUM_REGISTERS];
};
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_X86_GDBSTUB_SYS_H_ */
``` | /content/code_sandbox/include/zephyr/arch/x86/ia32/gdbstub.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 397 |
```objective-c
/*
*
*/
/**
* @file
* @brief x86 (IA32) specific syscall header
*
* This header contains the x86 specific syscall interface. It is
* included by the syscall interface architecture-abstraction header
* (include/arch/syscall.h)
*/
#ifndef ZEPHYR_INCLUDE_ARCH_X86_IA32_SYSCALL_H_
#define ZEPHYR_INCLUDE_ARCH_X86_IA32_SYSCALL_H_
#define USER_CODE_SEG 0x2b /* at dpl=3 */
#define USER_DATA_SEG 0x33 /* at dpl=3 */
#ifdef CONFIG_USERSPACE
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
#include <stdbool.h>
#include <zephyr/linker/sections.h>
#ifdef __cplusplus
extern "C" {
#endif
/* Syscall invocation macros. x86-specific machine constraints used to ensure
* args land in the proper registers, see implementation of
* z_x86_syscall_entry_stub in userspace.S
*/
__pinned_func
static inline uintptr_t arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3, uintptr_t arg4,
uintptr_t arg5, uintptr_t arg6,
uintptr_t call_id)
{
uint32_t ret;
__asm__ volatile("push %%ebp\n\t"
"mov %[arg6], %%ebp\n\t"
"int $0x80\n\t"
"pop %%ebp\n\t"
: "=a" (ret)
: "S" (call_id), "a" (arg1), "d" (arg2),
"c" (arg3), "b" (arg4), "D" (arg5),
[arg6] "m" (arg6)
: "memory");
return ret;
}
__pinned_func
static inline uintptr_t arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3, uintptr_t arg4,
uintptr_t arg5,
uintptr_t call_id)
{
uint32_t ret;
__asm__ volatile("int $0x80"
: "=a" (ret)
: "S" (call_id), "a" (arg1), "d" (arg2),
"c" (arg3), "b" (arg4), "D" (arg5)
: "memory");
return ret;
}
__pinned_func
static inline uintptr_t arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3, uintptr_t arg4,
uintptr_t call_id)
{
uint32_t ret;
__asm__ volatile("int $0x80"
: "=a" (ret)
: "S" (call_id), "a" (arg1), "d" (arg2), "c" (arg3),
"b" (arg4)
: "memory");
return ret;
}
__pinned_func
static inline uintptr_t arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3,
uintptr_t call_id)
{
uint32_t ret;
__asm__ volatile("int $0x80"
: "=a" (ret)
: "S" (call_id), "a" (arg1), "d" (arg2), "c" (arg3)
: "memory");
return ret;
}
__pinned_func
static inline uintptr_t arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2,
uintptr_t call_id)
{
uint32_t ret;
__asm__ volatile("int $0x80"
: "=a" (ret)
: "S" (call_id), "a" (arg1), "d" (arg2)
: "memory"
);
return ret;
}
__pinned_func
static inline uintptr_t arch_syscall_invoke1(uintptr_t arg1,
uintptr_t call_id)
{
uint32_t ret;
__asm__ volatile("int $0x80"
: "=a" (ret)
: "S" (call_id), "a" (arg1)
: "memory"
);
return ret;
}
__pinned_func
static inline uintptr_t arch_syscall_invoke0(uintptr_t call_id)
{
uint32_t ret;
__asm__ volatile("int $0x80"
: "=a" (ret)
: "S" (call_id)
: "memory"
);
return ret;
}
__pinned_func
static inline bool arch_is_user_context(void)
{
int cs;
/* On x86, read the CS register (which cannot be manually set) */
__asm__ volatile ("mov %%cs, %[cs_val]" : [cs_val] "=r" (cs));
return cs == USER_CODE_SEG;
}
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* CONFIG_USERSPACE */
#endif /* ZEPHYR_INCLUDE_ARCH_X86_IA32_SYSCALL_H_ */
``` | /content/code_sandbox/include/zephyr/arch/x86/ia32/syscall.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,068 |
```objective-c
/*
*
*/
/**
* @file
* @brief IA-32 specific kernel interface header
* This header contains the IA-32 specific kernel interface. It is included
* by the generic kernel interface header (include/arch/cpu.h)
*/
#ifndef ZEPHYR_INCLUDE_ARCH_X86_IA32_ARCH_H_
#define ZEPHYR_INCLUDE_ARCH_X86_IA32_ARCH_H_
#include "sys_io.h"
#include <stdbool.h>
#include <zephyr/kernel_structs.h>
#include <zephyr/arch/common/ffs.h>
#include <zephyr/sys/util.h>
#include <zephyr/arch/x86/ia32/exception.h>
#include <zephyr/arch/x86/ia32/gdbstub.h>
#include <zephyr/arch/x86/ia32/thread.h>
#include <zephyr/arch/x86/ia32/syscall.h>
#ifndef _ASMLANGUAGE
#include <stddef.h> /* for size_t */
#include <zephyr/arch/common/addr_types.h>
#include <zephyr/arch/x86/ia32/segmentation.h>
#include <zephyr/pm/pm.h>
#endif /* _ASMLANGUAGE */
/* GDT layout */
#define CODE_SEG 0x08
#define DATA_SEG 0x10
#define MAIN_TSS 0x18
#define DF_TSS 0x20
/*
* Use for thread local storage.
* Match these to gen_gdt.py.
* The 0x03 is added to limit privilege.
*/
#if defined(CONFIG_USERSPACE)
#define GS_TLS_SEG (0x38 | 0x03)
#elif defined(CONFIG_X86_STACK_PROTECTION)
#define GS_TLS_SEG (0x28 | 0x03)
#else
#define GS_TLS_SEG (0x18 | 0x03)
#endif
/**
* Macro used internally by NANO_CPU_INT_REGISTER and NANO_CPU_INT_REGISTER_ASM.
* Not meant to be used explicitly by platform, driver or application code.
*/
#define MK_ISR_NAME(x) __isr__##x
#define Z_DYN_STUB_SIZE 4
#define Z_DYN_STUB_OFFSET 0
#define Z_DYN_STUB_LONG_JMP_EXTRA_SIZE 3
#define Z_DYN_STUB_PER_BLOCK 32
#ifndef _ASMLANGUAGE
#ifdef __cplusplus
extern "C" {
#endif
/* interrupt/exception/error related definitions */
typedef struct s_isrList {
/** Address of ISR/stub */
void *fnc;
/** IRQ associated with the ISR/stub, or -1 if this is not
* associated with a real interrupt; in this case vec must
* not be -1
*/
unsigned int irq;
/** Priority associated with the IRQ. Ignored if vec is not -1 */
unsigned int priority;
/** Vector number associated with ISR/stub, or -1 to assign based
* on priority
*/
unsigned int vec;
/** Privilege level associated with ISR/stub */
unsigned int dpl;
/** If nonzero, specifies a TSS segment selector. Will configure
* a task gate instead of an interrupt gate. fnc parameter will be
* ignored
*/
unsigned int tss;
} ISR_LIST;
/**
* @brief Connect a routine to an interrupt vector
*
* This macro "connects" the specified routine, @a r, to the specified interrupt
* vector, @a v using the descriptor privilege level @a d. On the IA-32
* architecture, an interrupt vector is a value from 0 to 255. This macro
* populates the special intList section with the address of the routine, the
* vector number and the descriptor privilege level. The genIdt tool then picks
* up this information and generates an actual IDT entry with this information
* properly encoded.
*
* The @a d argument specifies the privilege level for the interrupt-gate
* descriptor; (hardware) interrupts and exceptions should specify a level of 0,
* whereas handlers for user-mode software generated interrupts should specify 3.
* @param r Routine to be connected
* @param n IRQ number
* @param p IRQ priority
* @param v Interrupt Vector
* @param d Descriptor Privilege Level
*/
#define NANO_CPU_INT_REGISTER(r, n, p, v, d) \
static ISR_LIST __attribute__((section(".intList"))) \
__attribute__((used)) MK_ISR_NAME(r) = \
{ \
.fnc = &(r), \
.irq = (n), \
.priority = (p), \
.vec = (v), \
.dpl = (d), \
.tss = 0 \
}
/**
* @brief Connect an IA hardware task to an interrupt vector
*
* This is very similar to NANO_CPU_INT_REGISTER but instead of connecting
* a handler function, the interrupt will induce an IA hardware task
* switch to another hardware task instead.
*
* @param tss_p GDT/LDT segment selector for the TSS representing the task
* @param irq_p IRQ number
* @param priority_p IRQ priority
* @param vec_p Interrupt vector
* @param dpl_p Descriptor privilege level
*/
#define _X86_IDT_TSS_REGISTER(tss_p, irq_p, priority_p, vec_p, dpl_p) \
static ISR_LIST __attribute__((section(".intList"))) \
__attribute__((used)) MK_ISR_NAME(vec_p) = \
{ \
.fnc = NULL, \
.irq = (irq_p), \
.priority = (priority_p), \
.vec = (vec_p), \
.dpl = (dpl_p), \
.tss = (tss_p) \
}
/**
* Code snippets for populating the vector ID and priority into the intList
*
* The 'magic' of static interrupts is accomplished by building up an array
* 'intList' at compile time, and the gen_idt tool uses this to create the
* actual IDT data structure.
*
* For controllers like APIC, the vectors in the IDT are not normally assigned
* at build time; instead the sentinel value -1 is saved, and gen_idt figures
* out the right vector to use based on our priority scheme. Groups of 16
* vectors starting at 32 correspond to each priority level.
*
* These macros are only intended to be used by IRQ_CONNECT() macro.
*/
#define _VECTOR_ARG(irq_p) (-1)
#ifdef CONFIG_LINKER_USE_PINNED_SECTION
#define IRQSTUBS_TEXT_SECTION ".pinned_text.irqstubs"
#else
#define IRQSTUBS_TEXT_SECTION ".text.irqstubs"
#endif
/* Internally this function does a few things:
*
* 1. There is a declaration of the interrupt parameters in the .intList
* section, used by gen_idt to create the IDT. This does the same thing
* as the NANO_CPU_INT_REGISTER() macro, but is done in assembly as we
* need to populate the .fnc member with the address of the assembly
* IRQ stub that we generate immediately afterwards.
*
* 2. The IRQ stub itself is declared. The code will go in its own named
* section .text.irqstubs section (which eventually gets linked into 'text')
* and the stub shall be named (isr_name)_irq(irq_line)_stub
*
* 3. The IRQ stub pushes the ISR routine and its argument onto the stack
* and then jumps to the common interrupt handling code in _interrupt_enter().
*
* 4. z_irq_controller_irq_config() is called at runtime to set the mapping
* between the vector and the IRQ line as well as triggering flags
*/
#define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
{ \
__asm__ __volatile__( \
".pushsection .intList\n\t" \
".long %c[isr]_irq%c[irq]_stub\n\t" /* ISR_LIST.fnc */ \
".long %c[irq]\n\t" /* ISR_LIST.irq */ \
".long %c[priority]\n\t" /* ISR_LIST.priority */ \
".long %c[vector]\n\t" /* ISR_LIST.vec */ \
".long 0\n\t" /* ISR_LIST.dpl */ \
".long 0\n\t" /* ISR_LIST.tss */ \
".popsection\n\t" \
".pushsection " IRQSTUBS_TEXT_SECTION "\n\t" \
".global %c[isr]_irq%c[irq]_stub\n\t" \
"%c[isr]_irq%c[irq]_stub:\n\t" \
"pushl %[isr_param]\n\t" \
"pushl %[isr]\n\t" \
"jmp _interrupt_enter\n\t" \
".popsection\n\t" \
: \
: [isr] "i" (isr_p), \
[isr_param] "i" (isr_param_p), \
[priority] "i" (priority_p), \
[vector] "i" _VECTOR_ARG(irq_p), \
[irq] "i" (irq_p)); \
z_irq_controller_irq_config(Z_IRQ_TO_INTERRUPT_VECTOR(irq_p), (irq_p), \
(flags_p)); \
}
#ifdef CONFIG_PCIE
#define ARCH_PCIE_IRQ_CONNECT(bdf_p, irq_p, priority_p, \
isr_p, isr_param_p, flags_p) \
ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p)
#endif /* CONFIG_PCIE */
/* Direct interrupts won't work as expected with KPTI turned on, because
* all non-user accessible pages in the page table are marked non-present.
* It's likely possible to add logic to ARCH_ISR_DIRECT_HEADER/FOOTER to do
* the necessary trampolining to switch page tables / stacks, but this
* probably loses all the latency benefits that direct interrupts provide
* and one might as well use a regular interrupt anyway.
*/
#ifndef CONFIG_X86_KPTI
#define ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \
{ \
NANO_CPU_INT_REGISTER(isr_p, irq_p, priority_p, -1, 0); \
z_irq_controller_irq_config(Z_IRQ_TO_INTERRUPT_VECTOR(irq_p), (irq_p), \
(flags_p)); \
}
#ifdef CONFIG_PM
static inline void arch_irq_direct_pm(void)
{
if (_kernel.idle) {
_kernel.idle = 0;
pm_system_resume();
}
}
#define ARCH_ISR_DIRECT_PM() arch_irq_direct_pm()
#else
#define ARCH_ISR_DIRECT_PM() do { } while (false)
#endif
#define ARCH_ISR_DIRECT_HEADER() arch_isr_direct_header()
#define ARCH_ISR_DIRECT_FOOTER(swap) arch_isr_direct_footer(swap)
/* FIXME:
* tracing/tracing.h cannot be included here due to circular dependency
*/
#if defined(CONFIG_TRACING)
void sys_trace_isr_enter(void);
void sys_trace_isr_exit(void);
#endif
static inline void arch_isr_direct_header(void)
{
#if defined(CONFIG_TRACING)
sys_trace_isr_enter();
#endif
/* We're not going to unlock IRQs, but we still need to increment this
* so that arch_is_in_isr() works
*/
++_kernel.cpus[0].nested;
}
/*
* FIXME: z_swap_irqlock is an inline function declared in a private header and
* cannot be referenced from a public header, so we move it to an
* external function.
*/
void arch_isr_direct_footer_swap(unsigned int key);
static inline void arch_isr_direct_footer(int swap)
{
z_irq_controller_eoi();
#if defined(CONFIG_TRACING)
sys_trace_isr_exit();
#endif
--_kernel.cpus[0].nested;
/* Call swap if all the following is true:
*
* 1) swap argument was enabled to this function
* 2) We are not in a nested interrupt
* 3) Next thread to run in the ready queue is not this thread
*/
if (swap != 0 && _kernel.cpus[0].nested == 0 &&
_kernel.ready_q.cache != _current) {
unsigned int flags;
/* Fetch EFLAGS argument to z_swap() */
__asm__ volatile (
"pushfl\n\t"
"popl %0\n\t"
: "=g" (flags)
:
: "memory"
);
arch_isr_direct_footer_swap(flags);
}
}
#define ARCH_ISR_DIRECT_DECLARE(name) \
static inline int name##_body(void); \
__attribute__ ((interrupt)) void name(void *stack_frame) \
{ \
ARG_UNUSED(stack_frame); \
int check_reschedule; \
ISR_DIRECT_HEADER(); \
check_reschedule = name##_body(); \
ISR_DIRECT_FOOTER(check_reschedule); \
} \
static inline int name##_body(void)
#endif /* !CONFIG_X86_KPTI */
static ALWAYS_INLINE unsigned int arch_irq_lock(void)
{
unsigned int key;
__asm__ volatile ("pushfl; cli; popl %0" : "=g" (key) :: "memory");
return key;
}
/**
* The NANO_SOFT_IRQ macro must be used as the value for the @a irq parameter
* to NANO_CPU_INT_REGISTER when connecting to an interrupt that does not
* correspond to any IRQ line (such as spurious vector or SW IRQ)
*/
#define NANO_SOFT_IRQ ((unsigned int) (-1))
#ifdef CONFIG_X86_ENABLE_TSS
extern struct task_state_segment _main_tss;
#endif
#define ARCH_EXCEPT(reason_p) do { \
__asm__ volatile( \
"push %[reason]\n\t" \
"int %[vector]\n\t" \
: \
: [vector] "i" (Z_X86_OOPS_VECTOR), \
[reason] "i" (reason_p)); \
CODE_UNREACHABLE; /* LCOV_EXCL_LINE */ \
} while (false)
/*
* Dynamic thread object memory alignment.
*
* If support for SSEx extensions is enabled a 16 byte boundary is required,
* since the 'fxsave' and 'fxrstor' instructions require this. In all other
* cases a 4 byte boundary is sufficient.
*/
#if defined(CONFIG_EAGER_FPU_SHARING) || defined(CONFIG_LAZY_FPU_SHARING)
#ifdef CONFIG_SSE
#define ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT 16
#else
#define ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT (sizeof(void *))
#endif
#else
/* No special alignment requirements, simply align on pointer size. */
#define ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT (sizeof(void *))
#endif /* CONFIG_*_FP_SHARING */
#ifdef __cplusplus
}
#endif
#endif /* !_ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_X86_IA32_ARCH_H_ */
``` | /content/code_sandbox/include/zephyr/arch/x86/ia32/arch.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,228 |
```objective-c
/*
*
*/
/* Implementation of sys_io.h's documented functions */
#ifndef ZEPHYR_INCLUDE_ARCH_X86_IA32_SYS_IO_H_
#define ZEPHYR_INCLUDE_ARCH_X86_IA32_SYS_IO_H_
#if !defined(_ASMLANGUAGE)
#include <zephyr/sys/sys_io.h>
#include <zephyr/types.h>
#include <stddef.h>
static ALWAYS_INLINE
void sys_io_set_bit(io_port_t port, unsigned int bit)
{
uint32_t reg = 0;
__asm__ volatile("inl %w1, %0;\n\t"
"btsl %2, %0;\n\t"
"outl %0, %w1;\n\t"
:
: "a" (reg), "Nd" (port), "Ir" (bit));
}
static ALWAYS_INLINE
void sys_io_clear_bit(io_port_t port, unsigned int bit)
{
uint32_t reg = 0;
__asm__ volatile("inl %w1, %0;\n\t"
"btrl %2, %0;\n\t"
"outl %0, %w1;\n\t"
:
: "a" (reg), "Nd" (port), "Ir" (bit));
}
static ALWAYS_INLINE
int sys_io_test_bit(io_port_t port, unsigned int bit)
{
uint32_t ret;
__asm__ volatile("inl %w1, %0\n\t"
"btl %2, %0\n\t"
: "=a" (ret)
: "Nd" (port), "Ir" (bit));
return (ret & 1U);
}
static ALWAYS_INLINE
int sys_io_test_and_set_bit(io_port_t port, unsigned int bit)
{
int ret;
ret = sys_io_test_bit(port, bit);
sys_io_set_bit(port, bit);
return ret;
}
static ALWAYS_INLINE
int sys_io_test_and_clear_bit(io_port_t port, unsigned int bit)
{
int ret;
ret = sys_io_test_bit(port, bit);
sys_io_clear_bit(port, bit);
return ret;
}
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_X86_IA32_SYS_IO_H_ */
``` | /content/code_sandbox/include/zephyr/arch/x86/ia32/sys_io.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 474 |
```objective-c
/*
*
*/
/**
* @file
* @brief Per-arch thread definition
*
* This file contains definitions for
*
* struct _thread_arch
* struct _callee_saved
*
* necessary to instantiate instances of struct k_thread.
*/
#ifndef ZEPHYR_INCLUDE_ARCH_X86_IA32_THREAD_H_
#define ZEPHYR_INCLUDE_ARCH_X86_IA32_THREAD_H_
/**
* Floating point register set alignment.
*
* If support for SSEx extensions is enabled a 16 byte boundary is required,
* since the 'fxsave' and 'fxrstor' instructions require this. In all other
* cases a 4 byte boundary is sufficient.
*/
#if defined(CONFIG_EAGER_FPU_SHARING) || defined(CONFIG_LAZY_FPU_SHARING)
#ifdef CONFIG_X86_SSE
#define FP_REG_SET_ALIGN 16
#else
#define FP_REG_SET_ALIGN 4
#endif
#else
/* Unused, no special alignment requirements, use default alignment for
* char buffers on this arch
*/
#define FP_REG_SET_ALIGN 1
#endif /* CONFIG_*_FP_SHARING */
/*
* Bits for _thread_arch.flags, see their use in intstub.S et al.
*/
#define X86_THREAD_FLAG_INT 0x01
#define X86_THREAD_FLAG_EXC 0x02
#define X86_THREAD_FLAG_ALL (X86_THREAD_FLAG_INT | X86_THREAD_FLAG_EXC)
#ifndef _ASMLANGUAGE
#include <stdint.h>
#include <zephyr/arch/x86/mmustructs.h>
/*
* The following structure defines the set of 'non-volatile' integer registers.
* These registers must be preserved by a called C function. These are the
* only registers that need to be saved/restored when a cooperative context
* switch occurs.
*/
struct _callee_saved {
unsigned long esp;
/*
* The following registers are considered non-volatile, i.e.
* callee-save,
* but their values are pushed onto the stack rather than stored in the
* TCS
* structure:
*
* unsigned long ebp;
* unsigned long ebx;
* unsigned long esi;
* unsigned long edi;
*/
};
typedef struct _callee_saved _callee_saved_t;
/*
* The macros CONFIG_{LAZY|EAGER}_FPU_SHARING shall be set to indicate that the
* saving/restoring of the traditional x87 floating point (and MMX) registers
* are supported by the kernel's context swapping code. The macro
* CONFIG_X86_SSE shall _also_ be set if saving/restoring of the XMM
* registers is also supported in the kernel's context swapping code.
*/
#if defined(CONFIG_EAGER_FPU_SHARING) || defined(CONFIG_LAZY_FPU_SHARING)
/* definition of a single x87 (floating point / MMX) register */
typedef struct s_FpReg {
unsigned char reg[10]; /* 80 bits: ST[0-7] */
} tFpReg;
/*
* The following is the "normal" floating point register save area, or
* more accurately the save area required by the 'fnsave' and 'frstor'
* instructions. The structure matches the layout described in the
* "Intel(r) 64 and IA-32 Architectures Software Developer's Manual
* Volume 1: Basic Architecture": Protected Mode x87 FPU State Image in
* Memory, 32-Bit Format.
*/
typedef struct s_FpRegSet { /* # of bytes: name of register */
unsigned short fcw; /* 2 : x87 FPU control word */
unsigned short pad1; /* 2 : N/A */
unsigned short fsw; /* 2 : x87 FPU status word */
unsigned short pad2; /* 2 : N/A */
unsigned short ftw; /* 2 : x87 FPU tag word */
unsigned short pad3; /* 2 : N/A */
unsigned int fpuip; /* 4 : x87 FPU instruction pointer offset */
unsigned short cs; /* 2 : x87 FPU instruction pointer selector */
unsigned short fop : 11; /* 2 : x87 FPU opcode */
unsigned short pad4 : 5; /* : 5 bits = 00000 */
unsigned int fpudp; /* 4 : x87 FPU instr operand ptr offset */
unsigned short ds; /* 2 : x87 FPU instr operand ptr selector */
unsigned short pad5; /* 2 : N/A */
tFpReg fpReg[8]; /* 80 : ST0 -> ST7 */
} tFpRegSet __aligned(FP_REG_SET_ALIGN);
#ifdef CONFIG_X86_SSE
/* definition of a single x87 (floating point / MMX) register */
typedef struct s_FpRegEx {
unsigned char reg[10]; /* 80 bits: ST[0-7] or MM[0-7] */
unsigned char rsrvd[6]; /* 48 bits: reserved */
} tFpRegEx;
/* definition of a single XMM register */
typedef struct s_XmmReg {
unsigned char reg[16]; /* 128 bits: XMM[0-7] */
} tXmmReg;
/*
* The following is the "extended" floating point register save area, or
* more accurately the save area required by the 'fxsave' and 'fxrstor'
* instructions. The structure matches the layout described in the
* "Intel 64 and IA-32 Architectures Software Developer's Manual
* Volume 2A: Instruction Set Reference, A-M", except for the bytes from offset
* 464 to 511 since these "are available to software use. The processor does
* not write to bytes 464:511 of an FXSAVE area".
*
* This structure must be aligned on a 16 byte boundary when the instructions
* fxsave/fxrstor are used to write/read the data to/from the structure.
*/
typedef struct s_FpRegSetEx /* # of bytes: name of register */
{
unsigned short fcw; /* 2 : x87 FPU control word */
unsigned short fsw; /* 2 : x87 FPU status word */
unsigned char ftw; /* 1 : x87 FPU abridged tag word */
unsigned char rsrvd0; /* 1 : reserved */
unsigned short fop; /* 2 : x87 FPU opcode */
unsigned int fpuip; /* 4 : x87 FPU instruction pointer offset */
unsigned short cs; /* 2 : x87 FPU instruction pointer selector */
unsigned short rsrvd1; /* 2 : reserved */
unsigned int fpudp; /* 4 : x87 FPU instr operand ptr offset */
unsigned short ds; /* 2 : x87 FPU instr operand ptr selector */
unsigned short rsrvd2; /* 2 : reserved */
unsigned int mxcsr; /* 4 : MXCSR register state */
unsigned int mxcsrMask; /* 4 : MXCSR register mask */
tFpRegEx fpReg[8]; /* 128 : x87 FPU/MMX registers */
tXmmReg xmmReg[8]; /* 128 : XMM registers */
unsigned char rsrvd3[176]; /* 176 : reserved */
} tFpRegSetEx __aligned(FP_REG_SET_ALIGN);
#else /* CONFIG_X86_SSE == 0 */
typedef struct s_FpRegSetEx {
} tFpRegSetEx;
#endif /* CONFIG_X86_SSE == 0 */
#else /* !CONFIG_LAZY_FPU_SHARING && !CONFIG_EAGER_FPU_SHARING */
/* empty floating point register definition */
typedef struct s_FpRegSet {
} tFpRegSet;
typedef struct s_FpRegSetEx {
} tFpRegSetEx;
#endif /* CONFIG_LAZY_FPU_SHARING || CONFIG_EAGER_FPU_SHARING */
/*
* The following structure defines the set of 'volatile' x87 FPU/MMX/SSE
* registers. These registers need not be preserved by a called C function.
* Given that they are not preserved across function calls, they must be
* save/restored (along with s_coopFloatReg) when a preemptive context
* switch occurs.
*/
typedef struct s_preempFloatReg {
union {
/* threads with K_FP_REGS utilize this format */
tFpRegSet fpRegs;
/* threads with K_SSE_REGS utilize this format */
tFpRegSetEx fpRegsEx;
} floatRegsUnion;
} tPreempFloatReg;
/*
* The thread control structure definition. It contains the
* various fields to manage a _single_ thread. The TCS will be aligned
* to the appropriate architecture specific boundary via the
* arch_new_thread() call.
*/
struct _thread_arch {
uint8_t flags;
#ifdef CONFIG_USERSPACE
#ifndef CONFIG_X86_COMMON_PAGE_TABLE
/* Physical address of the page tables used by this thread */
uintptr_t ptables;
#endif /* CONFIG_X86_COMMON_PAGE_TABLE */
/* Initial privilege mode stack pointer when doing a system call.
* Un-set for supervisor threads.
*/
char *psp;
#endif
#if defined(CONFIG_LAZY_FPU_SHARING)
/*
* Nested exception count to maintain setting of EXC_ACTIVE flag across
* outermost exception. EXC_ACTIVE is used by z_swap() lazy FP
* save/restore and by debug tools.
*/
unsigned excNestCount; /* nested exception count */
#endif /* CONFIG_LAZY_FPU_SHARING */
tPreempFloatReg preempFloatReg; /* volatile float register storage */
};
typedef struct _thread_arch _thread_arch_t;
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_X86_IA32_THREAD_H_ */
``` | /content/code_sandbox/include/zephyr/arch/x86/ia32/thread.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,109 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_X86_IA32_SEGMENTATION_H_
#define ZEPHYR_INCLUDE_ARCH_X86_IA32_SEGMENTATION_H_
#include <zephyr/types.h>
/* Host gen_idt uses this header as well, don't depend on toolchain.h */
#ifndef __packed
#define __packed __attribute__((packed))
#endif
#ifdef __cplusplus
extern "C" {
#endif
/* NOTE: We currently do not have definitions for 16-bit segment, currently
* assume everything we are working with is 32-bit
*/
#define SEG_TYPE_LDT 0x2
#define SEG_TYPE_TASK_GATE 0x5
#define SEG_TYPE_TSS 0x9
#define SEG_TYPE_TSS_BUSY 0xB
#define SEG_TYPE_CALL_GATE 0xC
#define SEG_TYPE_IRQ_GATE 0xE
#define SEG_TYPE_TRAP_GATE 0xF
#define DT_GRAN_BYTE 0
#define DT_GRAN_PAGE 1
#define DT_READABLE 1
#define DT_NON_READABLE 0
#define DT_WRITABLE 1
#define DT_NON_WRITABLE 0
#define DT_EXPAND_DOWN 1
#define DT_EXPAND_UP 0
#define DT_CONFORM 1
#define DT_NONCONFORM 0
#define DT_TYPE_SYSTEM 0
#define DT_TYPE_CODEDATA 1
#ifndef _ASMLANGUAGE
/* Section 7.2.1 of IA architecture SW developer manual, Vol 3. */
struct __packed task_state_segment {
uint16_t backlink;
uint16_t reserved_1;
uint32_t esp0;
uint16_t ss0;
uint16_t reserved_2;
uint32_t esp1;
uint16_t ss1;
uint16_t reserved_3;
uint32_t esp2;
uint16_t ss2;
uint16_t reserved_4;
uint32_t cr3;
uint32_t eip;
uint32_t eflags;
uint32_t eax;
uint32_t ecx;
uint32_t edx;
uint32_t ebx;
uint32_t esp;
uint32_t ebp;
uint32_t esi;
uint32_t edi;
uint16_t es;
uint16_t reserved_5;
uint16_t cs;
uint16_t reserved_6;
uint16_t ss;
uint16_t reserved_7;
uint16_t ds;
uint16_t reserved_8;
uint16_t fs;
uint16_t reserved_9;
uint16_t gs;
uint16_t reserved_10;
uint16_t ldt_ss;
uint16_t reserved_11;
uint8_t t:1; /* Trap bit */
uint16_t reserved_12:15;
uint16_t iomap;
};
#define SEG_SELECTOR(index, table, dpl) (index << 3 | table << 2 | dpl)
/* References
*
* Section 5.8.3 (Call gates)
* Section 7.2.2 (TSS Descriptor)
* Section 3.4.5 (Segment descriptors)
* Section 6.11 (IDT Descriptors)
*
* IA architecture SW developer manual, Vol 3.
*/
struct __packed segment_descriptor {
/* First DWORD: 0-15 */
union {
/* IRQ, call, trap gates */
uint16_t limit_low;
/* Task gates */
uint16_t reserved_task_gate_0;
/* Everything else */
uint16_t offset_low;
};
/* First DWORD: 16-31 */
union {
/* Call/Task/Interrupt/Trap gates */
uint16_t segment_selector;
/* TSS/LDT/Segments */
uint16_t base_low; /* Bits 0-15 */
};
/* Second DWORD: 0-7 */
union {
/* TSS/LDT/Segments */
uint8_t base_mid; /* Bits 16-23 */
/* Task gates */
uint8_t reserved_task_gate_1;
/* IRQ/Trap/Call Gates */
struct {
/* Reserved except in case of call gates */
uint8_t reserved_or_param:5;
/* Bits 5-7 0 0 0 per CPU manual */
uint8_t always_0_0:3;
};
};
/* Second DWORD: 8-15 */
union {
/* Code or data Segments */
struct {
/* Set by the processor, init to 0 */
uint8_t accessed:1;
/* executable ? readable : writable */
uint8_t rw:1;
/* executable ? conforming : direction */
uint8_t cd:1;
/* 1=code 0=data */
uint8_t executable:1;
/* Next 3 fields actually common to all */
/* 1=code or data, 0=system type */
uint8_t descriptor_type:1;
uint8_t dpl:2;
uint8_t present:1;
};
/* System types */
struct {
/* One of the SEG_TYPE_* macros above */
uint8_t type:4;
/* Alas, C doesn't let you do a union of the first
* 4 bits of a bitfield and put the rest outside of it,
* it ends up getting padded.
*/
uint8_t use_other_union:4;
};
};
/* Second DWORD: 16-31 */
union {
/* Call/IRQ/trap gates */
uint16_t offset_hi;
/* Task Gates */
uint16_t reserved_task_gate_2;
/* segment/LDT/TSS */
struct {
uint8_t limit_hi:4;
/* flags */
uint8_t avl:1; /* CPU ignores this */
/* 1=Indicates 64-bit code segment in IA-32e mode */
uint8_t flags_l:1; /* L field */
uint8_t db:1; /* D/B field 1=32-bit 0=16-bit*/
uint8_t granularity:1;
uint8_t base_hi; /* Bits 24-31 */
};
};
};
/* Address of this passed to lidt/lgdt.
* IA manual calls this a 'pseudo descriptor'.
*/
struct __packed pseudo_descriptor {
uint16_t size;
struct segment_descriptor *entries;
};
/*
* Full linear address (segment selector+offset), for far jumps/calls
*/
struct __packed far_ptr {
/** Far pointer offset, unused when invoking a task. */
void *offset;
/** Far pointer segment/gate selector. */
uint16_t sel;
};
#define DT_ZERO_ENTRY { { 0 } }
/* NOTE: the below macros only work for fixed addresses provided at build time.
* Base addresses or offsets cannot be &some_variable, as pointer values are not
* known until link time and the compiler has to split the address into various
* fields in the segment selector well before that.
*
* If you really need to put &some_variable as the base address in some
* segment descriptor, you will either need to do the assignment at runtime
* or implement some tool to populate values post-link like gen_idt does.
*/
#define _LIMIT_AND_BASE(base_p, limit_p, granularity_p) \
.base_low = (((uint32_t)base_p) & 0xFFFF), \
.base_mid = (((base_p) >> 16) & 0xFF), \
.base_hi = (((base_p) >> 24) & 0xFF), \
.limit_low = ((limit_p) & 0xFFFF), \
.limit_hi = (((limit_p) >> 16) & 0xF), \
.granularity = (granularity_p), \
.flags_l = 0, \
.db = 1, \
.avl = 0
#define _SEGMENT_AND_OFFSET(segment_p, offset_p) \
.segment_selector = (segment_p), \
.offset_low = ((offset_p) & 0xFFFF), \
.offset_hi = ((offset_p) >> 16)
#define _DESC_COMMON(dpl_p) \
.dpl = (dpl_p), \
.present = 1
#define _SYS_DESC(type_p) \
.type = type_p, \
.descriptor_type = 0
#define DT_CODE_SEG_ENTRY(base_p, limit_p, granularity_p, dpl_p, readable_p, \
conforming_p) \
{ \
_DESC_COMMON(dpl_p), \
_LIMIT_AND_BASE(base_p, limit_p, granularity_p), \
.accessed = 0, \
.rw = (readable_p), \
.cd = (conforming_p), \
.executable = 1, \
.descriptor_type = 1 \
}
#define DT_DATA_SEG_ENTRY(base_p, limit_p, granularity_p, dpl_p, writable_p, \
direction_p) \
{ \
_DESC_COMMON(dpl_p), \
_LIMIT_AND_BASE(base_p, limit_p, granularity_p), \
.accessed = 0, \
.rw = (writable_p), \
.cd = (direction_p), \
.executable = 0, \
.descriptor_type = 1 \
}
#define DT_LDT_ENTRY(base_p, limit_p, granularity_p, dpl_p) \
{ \
_DESC_COMMON(dpl_p), \
_LIMIT_AND_BASE(base_p, limit_p, granularity_p), \
_SYS_DESC(SEG_TYPE_LDT) \
}
#define DT_TSS_ENTRY(base_p, limit_p, granularity_p, dpl_p) \
{ \
_DESC_COMMON(dpl_p), \
_LIMIT_AND_BASE(base_p, limit_p, granularity_p), \
_SYS_DESC(SEG_TYPE_TSS) \
}
/* "standard" TSS segments that don't stuff extra data past the end of the
* TSS struct
*/
#define DT_TSS_STD_ENTRY(base_p, dpl_p) \
DT_TSS_ENTRY(base_p, sizeof(struct task_state_segment), DT_GRAN_BYTE, \
dpl_p)
#define DT_TASK_GATE_ENTRY(segment_p, dpl_p) \
{ \
_DESC_COMMON(dpl_p), \
_SYS_DESC(SEG_TYPE_TASK_GATE), \
.segment_selector = (segment_p) \
}
#define DT_IRQ_GATE_ENTRY(segment_p, offset_p, dpl_p) \
{ \
_DESC_COMMON(dpl_p), \
_SEGMENT_AND_OFFSET(segment_p, offset_p), \
_SYS_DESC(SEG_TYPE_IRQ_GATE), \
.always_0_0 = 0 \
}
#define DT_TRAP_GATE_ENTRY(segment_p, offset_p, dpl_p) \
{ \
_DESC_COMMON(dpl_p), \
_SEGMENT_AND_OFFSET(segment_p, offset_p), \
_SYS_DESC(SEG_TYPE_TRAP_GATE), \
.always_0_0 = 0 \
}
#define DT_CALL_GATE_ENTRY(segment_p, offset_p, dpl_p, param_count_p) \
{ \
_DESC_COMMON(dpl_p), \
_SEGMENT_AND_OFFSET(segment_p, offset_p), \
_SYS_DESC(SEG_TYPE_TRAP_GATE), \
.reserved_or_param = (param_count_p), \
.always_0_0 = 0 \
}
#define DTE_BASE(dt_entry) ((dt_entry)->base_low | \
((dt_entry)->base_mid << 16) | \
((dt_entry)->base_hi << 24))
#define DTE_LIMIT(dt_entry) ((dt_entry)->limit_low | \
((dt_entry)->limit_hi << 16))
#define DTE_OFFSET(dt_entry) ((dt_entry)->offset_low | \
((dt_entry)->offset_hi << 16))
#define DT_INIT(entries) { sizeof(entries) - 1, &entries[0] }
#ifdef CONFIG_SET_GDT
/* This is either the ROM-based GDT in crt0.S or generated by gen_gdt.py,
* depending on CONFIG_GDT_DYNAMIC
*/
extern struct pseudo_descriptor _gdt;
#endif
extern const struct pseudo_descriptor z_idt;
/**
* Properly set the segment descriptor segment and offset
*
* Used for call/interrupt/trap gates
*
* @param sd Segment descriptor
* @param offset Offset within segment
* @param segment_selector Segment selector
*/
static inline void z_sd_set_seg_offset(struct segment_descriptor *sd,
uint16_t segment_selector,
uint32_t offset)
{
sd->offset_low = offset & 0xFFFFU;
sd->offset_hi = offset >> 16U;
sd->segment_selector = segment_selector;
sd->always_0_0 = 0U;
}
/**
* Initialize an segment descriptor to be a 32-bit IRQ gate
*
* @param sd Segment descriptor memory
* @param seg_selector Segment selector of handler
* @param offset offset of handler
* @param dpl descriptor privilege level
*/
static inline void z_init_irq_gate(struct segment_descriptor *sd,
uint16_t seg_selector, uint32_t offset,
uint32_t dpl)
{
z_sd_set_seg_offset(sd, seg_selector, offset);
sd->dpl = dpl;
sd->descriptor_type = DT_TYPE_SYSTEM;
sd->present = 1U;
sd->type = SEG_TYPE_IRQ_GATE;
}
/**
* Set current IA task TSS
*
* @param sel Segment selector in GDT for desired TSS
*/
static inline void _set_tss(uint16_t sel)
{
__asm__ __volatile__ ("ltr %0" :: "r" (sel));
}
/**
* Get the TSS segment selector in the GDT for the current IA task
*
* @return Segment selector for current IA task
*/
static inline uint16_t _get_tss(void)
{
uint16_t sel;
__asm__ __volatile__ ("str %0" : "=r" (sel));
return sel;
}
/**
* Get the current global descriptor table
*
* @param gdt Pointer to memory to receive GDT pseudo descriptor information
*/
static inline void _get_gdt(struct pseudo_descriptor *gdt)
{
__asm__ __volatile__ ("sgdt %0" : "=m" (*gdt));
}
/**
* Get the current interrupt descriptor table
*
* @param idt Pointer to memory to receive IDT pseudo descriptor information
*/
static inline void _get_idt(struct pseudo_descriptor *idt)
{
__asm__ __volatile__ ("sidt %0" : "=m" (*idt));
}
/**
* Get the current local descriptor table (LDT)
*
* @return Segment selector in the GDT for the current LDT
*/
static inline uint16_t _get_ldt(void)
{
uint16_t ret;
__asm__ __volatile__ ("sldt %0" : "=m" (ret));
return ret;
}
/**
* Set the local descriptor table for the current IA Task
*
* @param ldt Segment selector in the GDT for an LDT
*/
static inline void _set_ldt(uint16_t ldt)
{
__asm__ __volatile__ ("lldt %0" :: "m" (ldt));
}
/**
* Set the global descriptor table
*
* You will most likely need to update all the data segment registers
* and do a far call to the code segment.
*
* @param gdt Pointer to GDT pseudo descriptor.
*/
static inline void _set_gdt(const struct pseudo_descriptor *gdt)
{
__asm__ __volatile__ ("lgdt %0" :: "m" (*gdt));
}
/**
* Set the interrupt descriptor table
*
* @param idt Pointer to IDT pseudo descriptor.
*/
static inline void z_set_idt(const struct pseudo_descriptor *idt)
{
__asm__ __volatile__ ("lidt %0" :: "m" (*idt));
}
/**
* Get the segment selector for the current code segment
*
* @return Segment selector
*/
static inline uint16_t _get_cs(void)
{
uint16_t cs = 0U;
__asm__ __volatile__ ("mov %%cs, %0" : "=r" (cs));
return cs;
}
/**
* Get the segment selector for the current data segment
*
* @return Segment selector
*/
static inline uint16_t _get_ds(void)
{
uint16_t ds = 0U;
__asm__ __volatile__ ("mov %%ds, %0" : "=r" (ds));
return ds;
}
#endif /* _ASMLANGUAGE */
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_ARCH_X86_IA32_SEGMENTATION_H_ */
``` | /content/code_sandbox/include/zephyr/arch/x86/ia32/segmentation.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,525 |
```linker script
/*
*
*/
#ifndef CONFIG_DYNAMIC_INTERRUPTS
. = ALIGN(8);
_idt_base_address = .;
#ifdef LINKER_ZEPHYR_FINAL
KEEP(*(staticIdt))
#else
. += CONFIG_IDT_NUM_VECTORS * 8;
#endif /* LINKER_ZEPHYR_FINAL */
. = ALIGN(4);
_irq_to_interrupt_vector = .;
#ifdef LINKER_ZEPHYR_FINAL
KEEP(*(irq_int_vector_map))
#else
. += CONFIG_MAX_IRQ_LINES;
#endif
#endif /* CONFIG_DYNAMIC_INTERRUPTS */
``` | /content/code_sandbox/include/zephyr/arch/x86/ia32/scripts/static_intr.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 118 |
```linker script
/*
*
*/
#ifdef CONFIG_DYNAMIC_INTERRUPTS
. = ALIGN(4);
_irq_to_interrupt_vector = .;
#ifdef LINKER_ZEPHYR_FINAL
KEEP(*(irq_int_vector_map))
#else
. += CONFIG_MAX_IRQ_LINES;
#endif /* LINKER_ZEPHYR_FINAL */
z_interrupt_vectors_allocated = .;
#ifdef LINKER_ZEPHYR_FINAL
KEEP(*(irq_vectors_alloc))
#else
. += (CONFIG_IDT_NUM_VECTORS + 7) / 8;
#endif /* LINKER_ZEPHYR_FINAL */
#endif /* CONFIG_DYNAMIC_INTERRUPTS */
``` | /content/code_sandbox/include/zephyr/arch/x86/ia32/scripts/dynamic_intr.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 123 |
```linker script
/*
*
*/
#define AON_C_OBJECT_FILE_IN_SECT(lsect, objfile) \
KEEP(*_intel_hal.a:objfile.c.obj(.##lsect)) \
KEEP(*_intel_hal.a:objfile.c.obj(.##lsect##.*))
#define AON_S_OBJECT_FILE_IN_SECT(lsect, objfile) \
KEEP(*_intel_hal.a:objfile.S.obj(.##lsect)) \
KEEP(*_intel_hal.a:objfile.S.obj(.##lsect##.*))
#define AON_IN_SECT(lsect) \
AON_C_OBJECT_FILE_IN_SECT(lsect, aon_task) \
AON_C_OBJECT_FILE_IN_SECT(lsect, ish_dma) \
AON_S_OBJECT_FILE_IN_SECT(lsect, ipapg)
GROUP_START(AON)
SECTION_PROLOGUE(aon,,)
{
aon_start = .;
KEEP(*(.data.aon_share))
AON_IN_SECT(data)
AON_IN_SECT(text)
AON_IN_SECT(bss)
aon_end = .;
} GROUP_LINK_IN(AON)
GROUP_END(AON)
``` | /content/code_sandbox/include/zephyr/arch/x86/ia32/scripts/ish_aon.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 261 |
```linker script
/*
*
*/
#ifdef CONFIG_X86_KPTI
MMU_PAGE_ALIGN_PERM
z_shared_kernel_page_start = .;
/* Special page containing supervisor data that is still mapped in
* user mode page tables. IDT, GDT, TSSes, trampoline stack, and
* any LDT must go here as they always must live in a page that is
* marked 'present'. Still not directly user accessible, but
* no sensitive data should be here as Meltdown exploits may read it.
*/
#endif /* CONFIG_X86_KPTI */
#ifdef CONFIG_DYNAMIC_INTERRUPTS
. = ALIGN(8);
_idt_base_address = .;
#ifdef LINKER_ZEPHYR_FINAL
KEEP(*(staticIdt))
#else
. += CONFIG_IDT_NUM_VECTORS * 8;
#endif /* LINKER_ZEPHYR_FINAL */
#endif /* CONFIG_DYNAMIC_INTERRUPTS */
#ifdef CONFIG_GDT_DYNAMIC
KEEP(*(.tss))
. = ALIGN(8);
_gdt = .;
#ifdef LINKER_ZEPHYR_FINAL
KEEP(*(gdt))
#else /* LINKER_ZEPHYR_FINAL */
#if defined(CONFIG_THREAD_LOCAL_STORAGE) && !defined(CONFIG_X86_64)
#define GDT_NUM_TLS_ENTRIES 1
#else
#define GDT_NUM_TLS_ENTRIES 0
#endif
#ifdef CONFIG_USERSPACE
#define GDT_NUM_ENTRIES 7
#elif defined(CONFIG_X86_STACK_PROTECTION)
#define GDT_NUM_ENTRIES 5
#else
#define GDT_NUM_ENTRIES 3
#endif /* CONFIG_X86_USERSPACE */
. += (GDT_NUM_ENTRIES + GDT_NUM_TLS_ENTRIES) * 8;
#endif /* LINKER_ZEPHYR_FINAL */
. += CONFIG_GDT_RESERVED_NUM_ENTRIES * 8;
#endif /* CONFIG_GDT_DYNAMIC */
#ifdef CONFIG_X86_KPTI
z_trampoline_stack_start = .;
MMU_PAGE_ALIGN_PERM
z_trampoline_stack_end = .;
z_shared_kernel_page_end = .;
ASSERT(z_trampoline_stack_end - z_trampoline_stack_start >= 40,
"trampoline stack too small");
ASSERT(z_shared_kernel_page_end - z_shared_kernel_page_start == 4096,
"shared kernel area is not one memory page");
#endif /* CONFIG_X86_KPTI */
``` | /content/code_sandbox/include/zephyr/arch/x86/ia32/scripts/shared_kernel_pages.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 484 |
```linker script
/*
*
*/
/**
* @file
* @brief Common linker sections
*
* This script defines the memory location of the various sections that make up
* a Zephyr Kernel image. This file is used by the linker.
*
* This script places the various sections of the image according to what
* features are enabled by the kernel's configuration options.
*
* For a build that does not use the execute in place (XIP) feature, the script
* generates an image suitable for loading into and executing from RAMABLE_REGION by
* placing all the sections adjacent to each other. There is also no separate
* load address for the DATA section which means it doesn't have to be copied
* into RAMABLE_REGION.
*
* For builds using XIP, there is a different load memory address (LMA) and
* virtual memory address (VMA) for the DATA section. In this case the DATA
* section is copied into RAMABLE_REGION at runtime.
*
* When building an XIP image the data section is placed into ROMABLE_REGION. In this
* case, the LMA is set to __data_rom_start so the data section is concatenated
* at the end of the RODATA section. At runtime, the DATA section is copied
* into the RAMABLE_REGION region so it can be accessed with read and write permission.
*
* Most symbols defined in the sections below are subject to be referenced in
* the Zephyr Kernel image. If a symbol is used but not defined the linker will
* emit an undefined symbol error.
*
* Please do not change the order of the section as the kernel expects this
* order when programming the MMU.
*/
#include <zephyr/linker/linker-defs.h>
#include <zephyr/offsets.h>
#include <zephyr/sys/util.h>
#include <zephyr/kernel/mm.h>
#include <zephyr/linker/linker-tool.h>
#if defined(CONFIG_XIP) || defined(K_MEM_IS_VM_KERNEL)
#define ROMABLE_REGION ROM
#define RAMABLE_REGION RAM
#else
#define ROMABLE_REGION RAM
#define RAMABLE_REGION RAM
#endif
#ifdef CONFIG_MMU
#define MMU_PAGE_ALIGN . = ALIGN(CONFIG_MMU_PAGE_SIZE);
#else
#define MMU_PAGE_ALIGN
#endif
/* Used to align areas with separate memory permission characteristics
* so that the page permissions can be set in the MMU. Without this,
* the kernel is just one blob with the same RWX permissions on all RAM
*/
#ifdef CONFIG_SRAM_REGION_PERMISSIONS
#define MMU_PAGE_ALIGN_PERM MMU_PAGE_ALIGN
#else
#define MMU_PAGE_ALIGN_PERM
#endif
epoint = K_MEM_PHYS_ADDR(CONFIG_KERNEL_ENTRY);
ENTRY(epoint)
/* SECTIONS definitions */
SECTIONS
{
#include <zephyr/linker/rel-sections.ld>
#ifdef CONFIG_LLEXT
#include <zephyr/linker/llext-sections.ld>
#endif
/DISCARD/ :
{
*(.plt)
}
/DISCARD/ :
{
*(.iplt)
}
#if defined(CONFIG_SOC_FAMILY_INTEL_ISH) && defined(CONFIG_PM)
#include <zephyr/arch/x86/ia32/scripts/ish_aon.ld>
#endif
#ifdef CONFIG_LINKER_USE_BOOT_SECTION
SECTION_PROLOGUE(boot.text,,)
{
#include <snippets-rom-start.ld>
MMU_PAGE_ALIGN
lnkr_boot_start = .;
z_mapped_start = .;
lnkr_boot_text_start = .;
KEEP(*(.boot_text.__start))
*(.boot_text)
*(.boot_text.*)
MMU_PAGE_ALIGN_PERM
lnkr_boot_text_end = .;
} GROUP_ROM_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
SECTION_PROLOGUE(boot.rodata,,)
{
MMU_PAGE_ALIGN_PERM
lnkr_boot_rodata_start = .;
*(.boot_rodata)
*(.boot_rodata.*)
MMU_PAGE_ALIGN_PERM
lnkr_boot_rodata_end = .;
} GROUP_ROM_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
SECTION_PROLOGUE(boot.data,,)
{
MMU_PAGE_ALIGN_PERM
. = ALIGN(4);
lnkr_boot_data_start = .;
*(.boot_data)
*(.boot_data.*)
lnkr_boot_data_end = .;
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
SECTION_PROLOGUE(boot.bss, (NOLOAD),)
{
. = ALIGN(4);
lnkr_boot_bss_start = .;
*(.boot_bss)
*(.boot_bss.*)
lnkr_boot_bss_end = .;
} GROUP_NOLOAD_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
SECTION_PROLOGUE(boot.noinit, (NOLOAD),)
{
. = ALIGN(4);
lnkr_boot_noinit_start = .;
*(.boot_noinit)
*(.boot_noinit.*)
lnkr_boot_noinit_end = .;
MMU_PAGE_ALIGN
lnkr_boot_end = .;
} GROUP_NOLOAD_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
lnkr_boot_text_size = lnkr_boot_text_end - lnkr_boot_text_start;
lnkr_boot_rodata_size = lnkr_boot_rodata_end - lnkr_boot_rodata_start;
lnkr_boot_data_size = lnkr_boot_data_end - lnkr_boot_data_start;
lnkr_boot_bss_size = lnkr_boot_bss_end - lnkr_boot_bss_start;
lnkr_boot_noinit_size = lnkr_boot_noinit_end - lnkr_boot_noinit_start;
#endif /* CONFIG_LINKER_USE_BOOT_SECTION */
#ifdef CONFIG_LINKER_USE_PINNED_SECTION
SECTION_PROLOGUE(pinned.text,,)
{
#ifndef CONFIG_LINKER_USE_BOOT_SECTION
#include <snippets-rom-start.ld>
#endif
MMU_PAGE_ALIGN
lnkr_pinned_start = .;
#ifndef CONFIG_LINKER_USE_BOOT_SECTION
z_mapped_start = .;
#endif
lnkr_pinned_text_start = .;
*(.pinned_text)
*(.pinned_text.*)
*(.gnu.linkonce.t.exc_*)
#include <zephyr/linker/kobject-text.ld>
MMU_PAGE_ALIGN_PERM
lnkr_pinned_text_end = .;
} GROUP_ROM_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
MMU_PAGE_ALIGN_PERM
lnkr_pinned_rodata_start = .;
#include <zephyr/linker/common-rom.ld>
/* Located in generated directory. This file is populated by calling
* zephyr_linker_sources(ROM_SECTIONS ...). Useful for grouping iterable RO structs.
*/
#include <snippets-rom-sections.ld>
#include <zephyr/linker/thread-local-storage.ld>
SECTION_PROLOGUE(pinned.rodata,,)
{
#include <zephyr/arch/x86/ia32/scripts/static_intr.ld>
*(.pinned_rodata)
*(.pinned_rodata.*)
#include <snippets-rodata.ld>
#include <zephyr/linker/kobject-rom.ld>
MMU_PAGE_ALIGN_PERM
lnkr_pinned_rodata_end = .;
} GROUP_ROM_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
SECTION_PROLOGUE(pinned.data,,)
{
MMU_PAGE_ALIGN_PERM
lnkr_pinned_data_start = .;
. = ALIGN(4);
#include <zephyr/arch/x86/ia32/scripts/shared_kernel_pages.ld>
#include <zephyr/arch/x86/ia32/scripts/dynamic_intr.ld>
*(.pinned_data)
*(.pinned_data.*)
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
#include <zephyr/linker/common-ram.ld>
#include <zephyr/arch/x86/pagetables.ld>
#include <zephyr/linker/kobject-data.ld>
lnkr_pinned_data_end = .;
SECTION_PROLOGUE(pinned.bss, (NOLOAD),)
{
. = ALIGN(4);
lnkr_pinned_bss_start = .;
*(.pinned_bss)
*(.pinned_bss.*)
lnkr_pinned_bss_end = .;
} GROUP_NOLOAD_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
SECTION_PROLOGUE(pinned.noinit, (NOLOAD),)
{
. = ALIGN(4);
lnkr_pinned_noinit_start = .;
*(.pinned_noinit)
*(.pinned_noinit.*)
lnkr_pinned_noinit_end = .;
MMU_PAGE_ALIGN
lnkr_pinned_end = .;
} GROUP_NOLOAD_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
lnkr_pinned_text_size = lnkr_pinned_text_end - lnkr_pinned_text_start;
lnkr_pinned_rodata_size = lnkr_pinned_rodata_end - lnkr_pinned_rodata_start;
lnkr_pinned_data_size = lnkr_pinned_data_end - lnkr_pinned_data_start;
lnkr_pinned_bss_size = lnkr_pinned_bss_end - lnkr_pinned_bss_start;
lnkr_pinned_noinit_size = lnkr_pinned_noinit_end - lnkr_pinned_noinit_start;
#endif /* CONFIG_LINKER_USE_PINNED_SECTION */
GROUP_START(ROMABLE_REGION)
. = ALIGN(8);
#ifdef CONFIG_XIP
__rom_region_start = PHYS_LOAD_ADDR;
#endif
SECTION_PROLOGUE(_TEXT_SECTION_NAME,,)
{
__text_region_start = .;
#if !defined(CONFIG_LINKER_USE_BOOT_SECTION) || \
!defined(CONFIG_LINKER_USE_PINNED_SECTION)
#ifndef CONFIG_XIP
z_mapped_start = .;
#endif
#endif
#if !defined(CONFIG_LINKER_USE_BOOT_SECTION) || \
!defined(CONFIG_LINKER_USE_PINNED_SECTION)
/* Located in generated directory. This file is populated by calling
* zephyr_linker_sources(ROM_START ...). This typically contains the vector
* table and debug information.
*/
#include <snippets-rom-start.ld>
#endif
/* Needs KEEP() as ENTRY() is given a physical address */
KEEP(*(.text.__start))
/*
* We need these sections to extract interrupt information, but they
* will be removed with "--gc-sections" by LLVM lld, so add keep
* command to save them.
*/
#ifndef CONFIG_LLVM_USE_LD
KEEP(*(.text.irqstubs))
KEEP(*(".gnu.linkonce.t.exc_*_stub"))
#endif
*(.text)
*(".text.*")
*(.gnu.linkonce.t.*)
*(.init)
*(.fini)
*(.eini)
#ifndef CONFIG_LINKER_USE_PINNED_SECTION
#include <zephyr/linker/kobject-text.ld>
#endif
MMU_PAGE_ALIGN_PERM
} GROUP_ROM_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
__text_region_end = .;
__text_region_size = __text_region_end - __text_region_start;
__rodata_region_start = .;
#ifndef CONFIG_LINKER_USE_PINNED_SECTION
#include <zephyr/linker/common-rom.ld>
/* Located in generated directory. This file is populated by calling
* zephyr_linker_sources(ROM_SECTIONS ...). Useful for grouping iterable RO structs.
*/
#include <snippets-rom-sections.ld>
#include <zephyr/linker/thread-local-storage.ld>
#endif
SECTION_PROLOGUE(_RODATA_SECTION_NAME,,)
{
*(.rodata)
*(".rodata.*")
*(.gnu.linkonce.r.*)
#ifndef CONFIG_DYNAMIC_INTERRUPTS
#ifndef CONFIG_LINKER_USE_PINNED_SECTION
#include <zephyr/arch/x86/ia32/scripts/static_intr.ld>
#endif /* !CONFIG_LINKER_USE_PINNED_SECTION */
#endif /* CONFIG_DYNAMIC_INTERRUPTS */
#ifndef CONFIG_LINKER_USE_PINNED_SECTION
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-rodata.ld>
#include <zephyr/linker/kobject-rom.ld>
#endif
} GROUP_ROM_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
#include <zephyr/linker/cplusplus-rom.ld>
MMU_PAGE_ALIGN_PERM
/* ROM ends here, position counter will now be in RAM areas */
#ifdef CONFIG_XIP
__rom_region_end = .;
__rom_region_size = __rom_region_end - __rom_region_start;
#endif
__rodata_region_end = .;
__rodata_region_size = __rodata_region_end - __rodata_region_start;
GROUP_END(ROMABLE_REGION)
/*
* Needed for dynamic linking which we do not have, do discard
*/
/DISCARD/ : {
*(.got.plt)
*(.igot.plt)
*(.got)
*(.igot)
}
/* RAMABLE_REGION */
GROUP_START(RAMABLE_REGION)
#ifdef CONFIG_XIP
MMU_PAGE_ALIGN
z_mapped_start = .;
#endif
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-ram-sections.ld>
#ifdef CONFIG_USERSPACE
/* APP SHARED MEMORY REGION */
#define SMEM_PARTITION_ALIGN(size) MMU_PAGE_ALIGN_PERM
#define APP_SHARED_ALIGN MMU_PAGE_ALIGN_PERM
#include <app_smem.ld>
_image_ram_start = _app_smem_start;
_app_smem_size = _app_smem_end - _app_smem_start;
_app_smem_num_words = _app_smem_size >> 2;
_app_smem_rom_start = LOADADDR(_APP_SMEM_SECTION_NAME);
_app_smem_num_words = _app_smem_size >> 2;
#endif /* CONFIG_USERSPACE */
SECTION_DATA_PROLOGUE(_DATA_SECTION_NAME,,)
{
MMU_PAGE_ALIGN_PERM
__data_region_start = .;
__data_start = .;
*(.data)
*(".data.*")
*(".kernel.*")
#ifdef CONFIG_DYNAMIC_INTERRUPTS
#ifndef CONFIG_LINKER_USE_PINNED_SECTION
#include <zephyr/arch/x86/ia32/scripts/dynamic_intr.ld>
#endif /* !CONFIG_LINKER_USE_PINNED_SECTION */
#endif /* CONFIG_DYNAMIC_INTERRUPTS */
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-rwdata.ld>
#ifndef CONFIG_LINKER_USE_PINNED_SECTION
#include <zephyr/arch/x86/ia32/scripts/shared_kernel_pages.ld>
#endif /* !CONFIG_LINKER_USE_PINNED_SECTION */
. = ALIGN(4);
__data_end = .;
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
__data_size = __data_end - __data_start;
__data_load_start = LOADADDR(_DATA_SECTION_NAME);
__data_region_load_start = LOADADDR(_DATA_SECTION_NAME);
#include <zephyr/linker/cplusplus-ram.ld>
#ifndef CONFIG_LINKER_USE_PINNED_SECTION
#include <zephyr/linker/common-ram.ld>
#include <zephyr/arch/x86/pagetables.ld>
/* Must be last in RAM */
#include <zephyr/linker/kobject-data.ld>
#endif /* !CONFIG_LINKER_USE_PINNED_SECTION */
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-data-sections.ld>
MMU_PAGE_ALIGN
__data_region_end = .;
SECTION_PROLOGUE(_BSS_SECTION_NAME, (NOLOAD),)
{
MMU_PAGE_ALIGN_PERM
#if !defined(CONFIG_USERSPACE)
_image_ram_start = .;
#endif
/*
* For performance, BSS section is forced to be both 4 byte aligned and
* a multiple of 4 bytes.
*/
. = ALIGN(4);
__kernel_ram_start = .;
__bss_start = .;
*(.bss)
*(".bss.*")
*(COMMON)
*(".kernel_bss.*")
/*
* As memory is cleared in words only, it is simpler to ensure the BSS
* section ends on a 4 byte boundary. This wastes a maximum of 3 bytes.
*/
. = ALIGN(4);
__bss_end = .;
} GROUP_NOLOAD_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
__bss_num_words = (__bss_end - __bss_start) >> 2;
#include <zephyr/linker/common-noinit.ld>
MMU_PAGE_ALIGN_PERM
/* All unused memory also owned by the kernel for heaps */
__kernel_ram_end = KERNEL_BASE_ADDR + KERNEL_RAM_SIZE;
__kernel_ram_size = __kernel_ram_end - __kernel_ram_start;
_image_ram_all = (KERNEL_BASE_ADDR + KERNEL_RAM_SIZE) - _image_ram_start;
z_mapped_end = .;
z_mapped_size = z_mapped_end - z_mapped_start;
#ifndef LINKER_ZEPHYR_FINAL
/* static interrupts */
SECTION_PROLOGUE(intList,,)
{
KEEP(*(.spurIsr))
KEEP(*(.spurNoErrIsr))
KEEP(*(.intList))
KEEP(*(.gnu.linkonce.intList.*))
} > IDT_LIST
#else
/DISCARD/ :
{
KEEP(*(.spurIsr))
KEEP(*(.spurNoErrIsr))
KEEP(*(.intList))
KEEP(*(.gnu.linkonce.intList.*))
}
#endif
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-sections.ld>
#include <zephyr/linker/ram-end.ld>
GROUP_END(RAMABLE_REGION)
#include <zephyr/linker/debug-sections.ld>
/DISCARD/ : { *(.note.GNU-stack) }
/*
* eh_frame section won't be removed even with "--gc-sections" by LLVM lld.
*/
#if !defined(CONFIG_CPP_EXCEPTIONS)
/DISCARD/ : { *(.eh_frame) }
#endif
/*
* The sections below are still treated as warnings
* with "--orphan-handling=warn" by LLVM lld.
*/
#if !defined(CONFIG_LLVM_USE_LD)
.symtab 0 : { *(.symtab) }
.strtab 0 : { *(.strtab) }
.shstrtab 0 : { *(.shstrtab) }
#endif
}
#ifdef CONFIG_XIP
/*
* Round up number of words for DATA section to ensure that XIP copies the
* entire data section. XIP copy is done in words only, so there may be up
* to 3 extra bytes copied in next section (BSS). At run time, the XIP copy
* is done first followed by clearing the BSS section.
*/
__data_size = (__data_region_end - __data_region_start);
__data_num_words = (__data_size + 3) >> 2;
#endif
``` | /content/code_sandbox/include/zephyr/arch/x86/ia32/linker.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 4,217 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARC_ADDR_TYPES_H_
#define ZEPHYR_INCLUDE_ARCH_ARC_ADDR_TYPES_H_
#ifndef _ASMLANGUAGE
/*
* MWDT provides paddr_t type and it conflicts with Zephyr definition:
* - Zephyr defines paddr_t as a uintptr_t
* - MWDT defines paddr_t as a unsigned long
* This causes extra warnings. However we can safely define
* paddr_t as a unsigned long for the case when MWDT toolchain is used as
* they are both unsigned, have same size and aligning.
*/
#ifdef __CCAC__
typedef unsigned long paddr_t;
typedef void *vaddr_t;
#else
#include <zephyr/arch/common/addr_types.h>
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_ARC_ADDR_TYPES_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arc/arc_addr_types.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 184 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARC_SYS_IO_COMMON_H_
#define ZEPHYR_INCLUDE_ARCH_ARC_SYS_IO_COMMON_H_
#ifndef _ASMLANGUAGE
#include <zephyr/toolchain.h>
#include <zephyr/sys/sys_io.h>
#include <zephyr/arch/arc/v2/aux_regs.h>
#include <zephyr/types.h>
#include <stddef.h>
#ifdef __cplusplus
extern "C" {
#endif
static ALWAYS_INLINE uint8_t sys_read8(mem_addr_t addr)
{
uint8_t value;
compiler_barrier();
value = *(volatile uint8_t *)addr;
compiler_barrier();
return value;
}
static ALWAYS_INLINE void sys_write8(uint8_t data, mem_addr_t addr)
{
compiler_barrier();
*(volatile uint8_t *)addr = data;
compiler_barrier();
}
static ALWAYS_INLINE uint16_t sys_read16(mem_addr_t addr)
{
uint16_t value;
compiler_barrier();
value = *(volatile uint16_t *)addr;
compiler_barrier();
return value;
}
static ALWAYS_INLINE void sys_write16(uint16_t data, mem_addr_t addr)
{
compiler_barrier();
*(volatile uint16_t *)addr = data;
compiler_barrier();
}
static ALWAYS_INLINE uint32_t sys_read32(mem_addr_t addr)
{
uint32_t value;
compiler_barrier();
value = *(volatile uint32_t *)addr;
compiler_barrier();
return value;
}
static ALWAYS_INLINE void sys_write32(uint32_t data, mem_addr_t addr)
{
compiler_barrier();
*(volatile uint32_t *)addr = data;
compiler_barrier();
}
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_ARC_SYS_IO_COMMON_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arc/sys-io-common.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 366 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_XTENSA_ARCH_INLINES_H_
#define ZEPHYR_INCLUDE_ARCH_XTENSA_ARCH_INLINES_H_
#ifndef _ASMLANGUAGE
#include <zephyr/kernel_structs.h>
#include <zephyr/arch/arc/v2/aux_regs.h>
static ALWAYS_INLINE _cpu_t *arch_curr_cpu(void)
{
#ifdef CONFIG_SMP
uint32_t core;
core = z_arc_v2_core_id();
return &_kernel.cpus[core];
#else
return &_kernel.cpus[0];
#endif /* CONFIG_SMP */
}
static ALWAYS_INLINE uint32_t arch_proc_id(void)
{
/*
* Placeholder implementation to be replaced with an architecture
* specific call to get processor ID
*/
return arch_curr_cpu()->id;
}
static ALWAYS_INLINE unsigned int arch_num_cpus(void)
{
return CONFIG_MP_MAX_NUM_CPUS;
}
#endif /* !_ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_XTENSA_ARCH_INLINES_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arc/arch_inlines.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 216 |
```objective-c
/*
*
*/
/**
* @file
* @brief ARC specific syscall header
*
* This header contains the ARC specific syscall interface. It is
* included by the syscall interface architecture-abstraction header
* (include/arch/syscall.h)
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARC_SYSCALL_H_
#define ZEPHYR_INCLUDE_ARCH_ARC_SYSCALL_H_
#define _TRAP_S_CALL_RUNTIME_EXCEPT 2
#define _TRAP_S_CALL_SYSTEM_CALL 3
#ifdef CONFIG_USERSPACE
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
#include <stdbool.h>
#ifdef CONFIG_ISA_ARCV2
#include <zephyr/arch/arc/v2/aux_regs.h>
#endif
#ifdef __cplusplus
extern "C" {
#endif
/* Syscall invocation macros. arc-specific machine constraints used to ensure
* args land in the proper registers. Currently, they are all stub functions
* just for enabling CONFIG_USERSPACE on arc w/o errors.
*/
static inline uintptr_t arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3, uintptr_t arg4,
uintptr_t arg5, uintptr_t arg6,
uintptr_t call_id)
{
register uint32_t ret __asm__("r0") = arg1;
register uint32_t r1 __asm__("r1") = arg2;
register uint32_t r2 __asm__("r2") = arg3;
register uint32_t r3 __asm__("r3") = arg4;
register uint32_t r4 __asm__("r4") = arg5;
register uint32_t r5 __asm__("r5") = arg6;
register uint32_t r6 __asm__("r6") = call_id;
compiler_barrier();
__asm__ volatile(
"trap_s %[trap_s_id]\n"
: "=r"(ret)
: [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r2), "r" (r3),
"r" (r4), "r" (r5), "r" (r6));
return ret;
}
static inline uintptr_t arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3, uintptr_t arg4,
uintptr_t arg5,
uintptr_t call_id)
{
register uint32_t ret __asm__("r0") = arg1;
register uint32_t r1 __asm__("r1") = arg2;
register uint32_t r2 __asm__("r2") = arg3;
register uint32_t r3 __asm__("r3") = arg4;
register uint32_t r4 __asm__("r4") = arg5;
register uint32_t r6 __asm__("r6") = call_id;
compiler_barrier();
__asm__ volatile(
"trap_s %[trap_s_id]\n"
: "=r"(ret)
: [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r2), "r" (r3),
"r" (r4), "r" (r6));
return ret;
}
static inline uintptr_t arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3, uintptr_t arg4,
uintptr_t call_id)
{
register uint32_t ret __asm__("r0") = arg1;
register uint32_t r1 __asm__("r1") = arg2;
register uint32_t r2 __asm__("r2") = arg3;
register uint32_t r3 __asm__("r3") = arg4;
register uint32_t r6 __asm__("r6") = call_id;
compiler_barrier();
__asm__ volatile(
"trap_s %[trap_s_id]\n"
: "=r"(ret)
: [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r2), "r" (r3),
"r" (r6));
return ret;
}
static inline uintptr_t arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3,
uintptr_t call_id)
{
register uint32_t ret __asm__("r0") = arg1;
register uint32_t r1 __asm__("r1") = arg2;
register uint32_t r2 __asm__("r2") = arg3;
register uint32_t r6 __asm__("r6") = call_id;
compiler_barrier();
__asm__ volatile(
"trap_s %[trap_s_id]\n"
: "=r"(ret)
: [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r2), "r" (r6));
return ret;
}
static inline uintptr_t arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2,
uintptr_t call_id)
{
register uint32_t ret __asm__("r0") = arg1;
register uint32_t r1 __asm__("r1") = arg2;
register uint32_t r6 __asm__("r6") = call_id;
compiler_barrier();
__asm__ volatile(
"trap_s %[trap_s_id]\n"
: "=r"(ret)
: [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r6));
return ret;
}
static inline uintptr_t arch_syscall_invoke1(uintptr_t arg1, uintptr_t call_id)
{
register uint32_t ret __asm__("r0") = arg1;
register uint32_t r6 __asm__("r6") = call_id;
compiler_barrier();
__asm__ volatile(
"trap_s %[trap_s_id]\n"
: "=r"(ret)
: [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL),
"r" (ret), "r" (r6));
return ret;
}
static inline uintptr_t arch_syscall_invoke0(uintptr_t call_id)
{
register uint32_t ret __asm__("r0");
register uint32_t r6 __asm__("r6") = call_id;
compiler_barrier();
__asm__ volatile(
"trap_s %[trap_s_id]\n"
: "=r"(ret)
: [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL),
"r" (ret), "r" (r6));
return ret;
}
static inline bool arch_is_user_context(void)
{
uint32_t status;
compiler_barrier();
__asm__ volatile("lr %0, [%[status32]]\n"
: "=r"(status)
: [status32] "i" (_ARC_V2_STATUS32));
return !(status & _ARC_V2_STATUS32_US) ? true : false;
}
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* CONFIG_USERSPACE */
#endif /* ZEPHYR_INCLUDE_ARCH_ARC_SYSCALL_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arc/syscall.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,557 |
```objective-c
/*
*
*/
/**
* @file
* @brief ARC specific kernel interface header
*
* This header contains the ARC specific kernel interface. It is
* included by the kernel interface architecture-abstraction header
* include/arch/cpu.h)
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARC_ARCH_H_
#define ZEPHYR_INCLUDE_ARCH_ARC_ARCH_H_
#include <zephyr/devicetree.h>
#include <zephyr/sw_isr_table.h>
#include <zephyr/arch/common/ffs.h>
#include <zephyr/arch/arc/thread.h>
#include <zephyr/arch/common/sys_bitops.h>
#include "sys-io-common.h"
#include <zephyr/arch/arc/v2/exception.h>
#include <zephyr/arch/arc/v2/irq.h>
#include <zephyr/arch/arc/v2/misc.h>
#include <zephyr/arch/arc/v2/aux_regs.h>
#include <zephyr/arch/arc/v2/arcv2_irq_unit.h>
#include <zephyr/arch/arc/v2/asm_inline.h>
#include <zephyr/arch/arc/arc_addr_types.h>
#include <zephyr/arch/arc/v2/error.h>
#ifdef CONFIG_ARC_CONNECT
#include <zephyr/arch/arc/v2/arc_connect.h>
#endif
#ifdef CONFIG_ISA_ARCV2
#include "v2/sys_io.h"
#ifdef CONFIG_ARC_HAS_SECURE
#include <zephyr/arch/arc/v2/secureshield/arc_secure.h>
#endif
#endif
#if defined(CONFIG_ARC_FIRQ) && defined(CONFIG_ISA_ARCV3)
#error "Unsupported configuration: ARC_FIRQ and ISA_ARCV3"
#endif
/*
* We don't allow the configuration with FIRQ enabled and only one interrupt priority level
* (so all interrupts are FIRQ). Such configuration isn't supported in software and it is not
* beneficial from the performance point of view.
*/
#if defined(CONFIG_ARC_FIRQ) && CONFIG_NUM_IRQ_PRIO_LEVELS < 2
#error "Unsupported configuration: ARC_FIRQ and (NUM_IRQ_PRIO_LEVELS < 2)"
#endif
#if CONFIG_RGF_NUM_BANKS > 1 && !defined(CONFIG_ARC_FIRQ)
#error "Unsupported configuration: (RGF_NUM_BANKS > 1) and !ARC_FIRQ"
#endif
/*
* It's required to have more than one interrupt priority level to use second register bank
* - otherwise all interrupts will use same register bank. Such configuration isn't supported in
* software and it is not beneficial from the performance point of view.
*/
#if CONFIG_RGF_NUM_BANKS > 1 && CONFIG_NUM_IRQ_PRIO_LEVELS < 2
#error "Unsupported configuration: (RGF_NUM_BANKS > 1) and (NUM_IRQ_PRIO_LEVELS < 2)"
#endif
#if defined(CONFIG_ARC_FIRQ_STACK) && !defined(CONFIG_ARC_FIRQ)
#error "Unsupported configuration: ARC_FIRQ_STACK and !ARC_FIRQ"
#endif
#if defined(CONFIG_ARC_FIRQ_STACK) && CONFIG_RGF_NUM_BANKS < 2
#error "Unsupported configuration: ARC_FIRQ_STACK and (RGF_NUM_BANKS < 2)"
#endif
/* In case of ARC 2+2 secure mode enabled the firq are not supported by HW */
#if defined(CONFIG_ARC_FIRQ) && defined(CONFIG_ARC_HAS_SECURE)
#error "Unsupported configuration: ARC_FIRQ and ARC_HAS_SECURE"
#endif
#if defined(CONFIG_SMP) && !defined(CONFIG_MULTITHREADING)
#error "Non-multithreading mode isn't supported on SMP targets"
#endif
#ifndef _ASMLANGUAGE
#ifdef __cplusplus
extern "C" {
#endif
#ifdef CONFIG_64BIT
#define ARCH_STACK_PTR_ALIGN 8
#else
#define ARCH_STACK_PTR_ALIGN 4
#endif /* CONFIG_64BIT */
BUILD_ASSERT(CONFIG_ISR_STACK_SIZE % ARCH_STACK_PTR_ALIGN == 0,
"CONFIG_ISR_STACK_SIZE must be a multiple of ARCH_STACK_PTR_ALIGN");
BUILD_ASSERT(CONFIG_ARC_EXCEPTION_STACK_SIZE % ARCH_STACK_PTR_ALIGN == 0,
"CONFIG_ARC_EXCEPTION_STACK_SIZE must be a multiple of ARCH_STACK_PTR_ALIGN");
/* Indicate, for a minimally sized MPU region, how large it must be and what
* its base address must be aligned to.
*
* For regions that are NOT the minimum size, this define has no semantics
* on ARC MPUv2 as its regions must be power of two size and aligned to their
* own size. On ARC MPUv4, region sizes are arbitrary and this just indicates
* the required size granularity.
*/
#ifdef CONFIG_ARC_CORE_MPU
#if CONFIG_ARC_MPU_VER == 2
#define Z_ARC_MPU_ALIGN 2048
#elif (CONFIG_ARC_MPU_VER == 3) || (CONFIG_ARC_MPU_VER == 4) || \
(CONFIG_ARC_MPU_VER == 6) || (CONFIG_ARC_MPU_VER == 8)
#define Z_ARC_MPU_ALIGN 32
#else
#error "Unsupported MPU version"
#endif
#endif
#ifdef CONFIG_MPU_STACK_GUARD
#define Z_ARC_STACK_GUARD_SIZE Z_ARC_MPU_ALIGN
#else
#define Z_ARC_STACK_GUARD_SIZE 0
#endif
/* Kernel-only stacks have the following layout if a stack guard is enabled:
*
* +------------+ <- thread.stack_obj
* | Guard | } Z_ARC_STACK_GUARD_SIZE
* +------------+ <- thread.stack_info.start
* | Kernel |
* | stack |
* | |
* +............|
* | TLS | } thread.stack_info.delta
* +------------+ <- thread.stack_info.start + thread.stack_info.size
*/
#ifdef CONFIG_MPU_STACK_GUARD
#define ARCH_KERNEL_STACK_RESERVED Z_ARC_STACK_GUARD_SIZE
#define ARCH_KERNEL_STACK_OBJ_ALIGN Z_ARC_MPU_ALIGN
#endif
#ifdef CONFIG_USERSPACE
/* Any thread running In user mode will have full access to the region denoted
* by thread.stack_info.
*
* Thread-local storage is at the very highest memory locations of this area.
* Memory for TLS and any initial random stack pointer offset is captured
* in thread.stack_info.delta.
*/
#ifdef CONFIG_MPU_STACK_GUARD
/* MPU guards are only supported with V3 MPU and later. In this configuration
* the stack object will contain the MPU guard, the privilege stack, and then
* the stack buffer in that order:
*
* +------------+ <- thread.stack_obj
* | Guard | } Z_ARC_STACK_GUARD_SIZE
* +------------+ <- thread.arch.priv_stack_start
* | Priv Stack | } CONFIG_PRIVILEGED_STACK_SIZE
* +------------+ <- thread.stack_info.start
* | Thread |
* | stack |
* | |
* +............|
* | TLS | } thread.stack_info.delta
* +------------+ <- thread.stack_info.start + thread.stack_info.size
*/
#define ARCH_THREAD_STACK_RESERVED (Z_ARC_STACK_GUARD_SIZE + \
CONFIG_PRIVILEGED_STACK_SIZE)
#define ARCH_THREAD_STACK_OBJ_ALIGN(size) Z_ARC_MPU_ALIGN
/* We need to be able to exactly cover the stack buffer with an MPU region,
* so round its size up to the required granularity of the MPU
*/
#define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
(ROUND_UP((size), Z_ARC_MPU_ALIGN))
BUILD_ASSERT(CONFIG_PRIVILEGED_STACK_SIZE % Z_ARC_MPU_ALIGN == 0,
"improper privilege stack size");
#else /* !CONFIG_MPU_STACK_GUARD */
/* Userspace enabled, but supervisor stack guards are not in use */
#ifdef CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT
/* Use defaults for everything. The privilege elevation stack is located
* in another area of memory generated at build time by gen_kobject_list.py
*
* +------------+ <- thread.arch.priv_stack_start
* | Priv Stack | } K_KERNEL_STACK_LEN(CONFIG_PRIVILEGED_STACK_SIZE)
* +------------+
*
* +------------+ <- thread.stack_obj = thread.stack_info.start
* | Thread |
* | stack |
* | |
* +............|
* | TLS | } thread.stack_info.delta
* +------------+ <- thread.stack_info.start + thread.stack_info.size
*/
#define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
Z_POW2_CEIL(ROUND_UP((size), Z_ARC_MPU_ALIGN))
#define ARCH_THREAD_STACK_OBJ_ALIGN(size) \
ARCH_THREAD_STACK_SIZE_ADJUST(size)
#define ARCH_THREAD_STACK_RESERVED 0
#else /* !CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT */
/* Reserved area of the thread object just contains the privilege stack:
*
* +------------+ <- thread.stack_obj = thread.arch.priv_stack_start
* | Priv Stack | } CONFIG_PRIVILEGED_STACK_SIZE
* +------------+ <- thread.stack_info.start
* | Thread |
* | stack |
* | |
* +............|
* | TLS | } thread.stack_info.delta
* +------------+ <- thread.stack_info.start + thread.stack_info.size
*/
#define ARCH_THREAD_STACK_RESERVED CONFIG_PRIVILEGED_STACK_SIZE
#define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
(ROUND_UP((size), Z_ARC_MPU_ALIGN))
#define ARCH_THREAD_STACK_OBJ_ALIGN(size) Z_ARC_MPU_ALIGN
BUILD_ASSERT(CONFIG_PRIVILEGED_STACK_SIZE % Z_ARC_MPU_ALIGN == 0,
"improper privilege stack size");
#endif /* CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT */
#endif /* CONFIG_MPU_STACK_GUARD */
#else /* !CONFIG_USERSPACE */
#ifdef CONFIG_MPU_STACK_GUARD
/* Only supported on ARC MPU V3 and higher. Reserve some memory for the stack
* guard. This is just a minimally-sized region at the beginning of the stack
* object, which is programmed to produce an exception if written to.
*
* +------------+ <- thread.stack_obj
* | Guard | } Z_ARC_STACK_GUARD_SIZE
* +------------+ <- thread.stack_info.start
* | Thread |
* | stack |
* | |
* +............|
* | TLS | } thread.stack_info.delta
* +------------+ <- thread.stack_info.start + thread.stack_info.size
*/
#define ARCH_THREAD_STACK_RESERVED Z_ARC_STACK_GUARD_SIZE
#define ARCH_THREAD_STACK_OBJ_ALIGN(size) Z_ARC_MPU_ALIGN
/* Default for ARCH_THREAD_STACK_SIZE_ADJUST */
#else /* !CONFIG_MPU_STACK_GUARD */
/* No stack guard, no userspace, Use defaults for everything. */
#endif /* CONFIG_MPU_STACK_GUARD */
#endif /* CONFIG_USERSPACE */
#ifdef CONFIG_ARC_MPU
/* Legacy case: retain containing extern "C" with C++ */
#include <zephyr/arch/arc/v2/mpu/arc_mpu.h>
#define K_MEM_PARTITION_P_NA_U_NA AUX_MPU_ATTR_N
#define K_MEM_PARTITION_P_RW_U_RW (AUX_MPU_ATTR_UW | AUX_MPU_ATTR_UR | \
AUX_MPU_ATTR_KW | AUX_MPU_ATTR_KR)
#define K_MEM_PARTITION_P_RW_U_RO (AUX_MPU_ATTR_UR | \
AUX_MPU_ATTR_KW | AUX_MPU_ATTR_KR)
#define K_MEM_PARTITION_P_RW_U_NA (AUX_MPU_ATTR_KW | AUX_MPU_ATTR_KR)
#define K_MEM_PARTITION_P_RO_U_RO (AUX_MPU_ATTR_UR | AUX_MPU_ATTR_KR)
#define K_MEM_PARTITION_P_RO_U_NA (AUX_MPU_ATTR_KR)
/* Execution-allowed attributes */
#define K_MEM_PARTITION_P_RWX_U_RWX (AUX_MPU_ATTR_UW | AUX_MPU_ATTR_UR | \
AUX_MPU_ATTR_KW | AUX_MPU_ATTR_KR | \
AUX_MPU_ATTR_KE | AUX_MPU_ATTR_UE)
#define K_MEM_PARTITION_P_RWX_U_RX (AUX_MPU_ATTR_UR | \
AUX_MPU_ATTR_KW | AUX_MPU_ATTR_KR | \
AUX_MPU_ATTR_KE | AUX_MPU_ATTR_UE)
#define K_MEM_PARTITION_P_RX_U_RX (AUX_MPU_ATTR_UR | \
AUX_MPU_ATTR_KR | \
AUX_MPU_ATTR_KE | AUX_MPU_ATTR_UE)
#define K_MEM_PARTITION_IS_WRITABLE(attr) \
({ \
int __is_writable__; \
switch (attr & (AUX_MPU_ATTR_UW | AUX_MPU_ATTR_KW)) { \
case (AUX_MPU_ATTR_UW | AUX_MPU_ATTR_KW): \
case AUX_MPU_ATTR_UW: \
case AUX_MPU_ATTR_KW: \
__is_writable__ = 1; \
break; \
default: \
__is_writable__ = 0; \
break; \
} \
__is_writable__; \
})
#define K_MEM_PARTITION_IS_EXECUTABLE(attr) \
((attr) & (AUX_MPU_ATTR_KE | AUX_MPU_ATTR_UE))
/*
* BUILD_ASSERT in case of MWDT is a bit more picky in performing compile-time check.
* For example it can't evaluate variable address at build time like GCC toolchain can do.
* That's why we provide custom _ARCH_MEM_PARTITION_ALIGN_CHECK implementation for MWDT toolchain
* with additional check for arguments availability in compile time.
*/
#ifdef __CCAC__
#define IS_BUILTIN_MWDT(val) __builtin_constant_p((uintptr_t)(val))
#if CONFIG_ARC_MPU_VER == 2 || CONFIG_ARC_MPU_VER == 3 || CONFIG_ARC_MPU_VER == 6
#define _ARCH_MEM_PARTITION_ALIGN_CHECK(start, size) \
BUILD_ASSERT(IS_BUILTIN_MWDT(size) ? !((size) & ((size) - 1)) : 1, \
"partition size must be power of 2"); \
BUILD_ASSERT(IS_BUILTIN_MWDT(size) ? (size) >= Z_ARC_MPU_ALIGN : 1, \
"partition size must be >= mpu address alignment."); \
BUILD_ASSERT(IS_BUILTIN_MWDT(size) ? IS_BUILTIN_MWDT(start) ? \
!((uintptr_t)(start) & ((size) - 1)) : 1 : 1, \
"partition start address must align with size.")
#elif CONFIG_ARC_MPU_VER == 4 || CONFIG_ARC_MPU_VER == 8
#define _ARCH_MEM_PARTITION_ALIGN_CHECK(start, size) \
BUILD_ASSERT(IS_BUILTIN_MWDT(size) ? (size) % Z_ARC_MPU_ALIGN == 0 : 1, \
"partition size must align with " STRINGIFY(Z_ARC_MPU_ALIGN)); \
BUILD_ASSERT(IS_BUILTIN_MWDT(size) ? (size) >= Z_ARC_MPU_ALIGN : 1, \
"partition size must be >= " STRINGIFY(Z_ARC_MPU_ALIGN)); \
BUILD_ASSERT(IS_BUILTIN_MWDT(start) ? (uintptr_t)(start) % Z_ARC_MPU_ALIGN == 0 : 1, \
"partition start address must align with " STRINGIFY(Z_ARC_MPU_ALIGN))
#endif
#else /* __CCAC__ */
#if CONFIG_ARC_MPU_VER == 2 || CONFIG_ARC_MPU_VER == 3 || CONFIG_ARC_MPU_VER == 6
#define _ARCH_MEM_PARTITION_ALIGN_CHECK(start, size) \
BUILD_ASSERT(!((size) & ((size) - 1)), \
"partition size must be power of 2"); \
BUILD_ASSERT((size) >= Z_ARC_MPU_ALIGN, \
"partition size must be >= mpu address alignment."); \
BUILD_ASSERT(!((uintptr_t)(start) & ((size) - 1)), \
"partition start address must align with size.")
#elif CONFIG_ARC_MPU_VER == 4 || CONFIG_ARC_MPU_VER == 8
#define _ARCH_MEM_PARTITION_ALIGN_CHECK(start, size) \
BUILD_ASSERT((size) % Z_ARC_MPU_ALIGN == 0, \
"partition size must align with " STRINGIFY(Z_ARC_MPU_ALIGN)); \
BUILD_ASSERT((size) >= Z_ARC_MPU_ALIGN, \
"partition size must be >= " STRINGIFY(Z_ARC_MPU_ALIGN)); \
BUILD_ASSERT((uintptr_t)(start) % Z_ARC_MPU_ALIGN == 0, \
"partition start address must align with " STRINGIFY(Z_ARC_MPU_ALIGN))
#endif
#endif /* __CCAC__ */
#endif /* CONFIG_ARC_MPU*/
/* Typedef for the k_mem_partition attribute*/
typedef uint32_t k_mem_partition_attr_t;
static ALWAYS_INLINE void arch_nop(void)
{
__builtin_arc_nop();
}
#ifndef CONFIG_XIP
extern char __arc_rw_sram_size[];
#endif /* CONFIG_XIP */
#endif /* _ASMLANGUAGE */
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_ARCH_ARC_ARCH_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arc/arch.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,638 |
```objective-c
/*
*
*/
/**
* @file
* @brief Per-arch thread definition
*
* This file contains definitions for
*
* struct _thread_arch
* struct _callee_saved
*
* necessary to instantiate instances of struct k_thread.
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARC_THREAD_H_
#define ZEPHYR_INCLUDE_ARCH_ARC_THREAD_H_
/*
* Reason a thread has relinquished control.
*/
#define _CAUSE_NONE 0
#define _CAUSE_COOP 1
#define _CAUSE_RIRQ 2
#define _CAUSE_FIRQ 3
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
#ifdef __cplusplus
extern "C" {
#endif
struct _callee_saved {
uintptr_t sp; /* r28 */
};
typedef struct _callee_saved _callee_saved_t;
struct _thread_arch {
/* one of the _CAUSE_xxxx definitions above */
int32_t relinquish_cause;
#ifdef CONFIG_ARC_STACK_CHECKING
/* High address of stack region, stack grows downward from this
* location. Usesd for hardware stack checking
*/
uintptr_t k_stack_base;
uintptr_t k_stack_top;
#ifdef CONFIG_USERSPACE
uintptr_t u_stack_base;
uintptr_t u_stack_top;
#endif
#endif
#ifdef CONFIG_USERSPACE
uintptr_t priv_stack_start;
#endif
};
typedef struct _thread_arch _thread_arch_t;
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_ARC_THREAD_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arc/thread.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 324 |
```objective-c
/*
* Author: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARC_TOOL_COMPAT_H_
#define ZEPHYR_INCLUDE_ARCH_ARC_TOOL_COMPAT_H_
#ifdef _ASMLANGUAGE
/*
* GNU toolchain and MWDT (Metware) toolchain have different style for accessing
* arguments in assembly macro. Here is the preprocessor macro to handle the
* difference.
* __CCAC__ is a pre-defined macro of metaware compiler.
*/
#if defined(__CCAC__)
#define MACRO_ARG(x) x
#else
#define MACRO_ARG(x) \x
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_ARC_TOOL_COMPAT_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arc/tool-compat.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 164 |
```objective-c
/*
*
*/
/**
* @file
* @brief ARCv2 public exception handling
*
* ARC-specific kernel exception handling interface. Included by arc/arch.h.
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARC_V2_EXCEPTION_H_
#define ZEPHYR_INCLUDE_ARCH_ARC_V2_EXCEPTION_H_
#ifdef __cplusplus
extern "C" {
#endif
#ifdef __cplusplus
}
#endif
/* ARCv2 Exception vector numbers */
#define ARC_EV_RESET 0x0
#define ARC_EV_MEM_ERROR 0x1
#define ARC_EV_INS_ERROR 0x2
#define ARC_EV_MACHINE_CHECK 0x3
#define ARC_EV_TLB_MISS_I 0x4
#define ARC_EV_TLB_MISS_D 0x5
#define ARC_EV_PROT_V 0x6
#define ARC_EV_PRIVILEGE_V 0x7
#define ARC_EV_SWI 0x8
#define ARC_EV_TRAP 0x9
#define ARC_EV_EXTENSION 0xA
#define ARC_EV_DIV_ZERO 0xB
#define ARC_EV_DC_ERROR 0xC
#define ARC_EV_MISALIGNED 0xD
#define ARC_EV_VEC_UNIT 0xE
#endif /* ZEPHYR_INCLUDE_ARCH_ARC_V2_EXCEPTION_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arc/v2/exception.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 272 |
```objective-c
/*
*
*/
/**
* @file
* @brief ARC Cluster registers and accessors
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARC_CLUSTER_H_
#define ZEPHYR_INCLUDE_ARCH_ARC_CLUSTER_H_
#include <zephyr/arch/arc/v2/aux_regs.h>
#include <zephyr/sys/util_macro.h>
/* Cluster AUX */
#define _ARC_REG_CLN_BCR 0xcf
#define _ARC_CLNR_ADDR 0x640 /* CLN address for CLNR_DATA */
#define _ARC_CLNR_DATA 0x641 /* CLN data indicated by CLNR_ADDR */
#define _ARC_CLNR_DATA_NXT 0x642 /* CLNR_DATA and then CLNR_ADDR++ */
#define _ARC_CLNR_BCR_0 0xF61
#define _ARC_CLNR_BCR_1 0xF62
#define _ARC_CLNR_BCR_2 0xF63
#define _ARC_CLNR_SCM_BCR_0 0xF64
#define _ARC_CLNR_SCM_BCR_1 0xF65
#define _ARC_REG_CLN_BCR_VER_MAJOR_ARCV3_MIN 32 /* Minimal version of cluster in ARCv3 */
#define _ARC_CLN_BCR_VER_MAJOR_MASK 0xff
#define _ARC_CLNR_BCR_0_HAS_SCM BIT(29)
/* Cluster registers (not in the AUX address space - indirect access via CLNR_ADDR + CLNR_DATA) */
#define ARC_CLN_MST_NOC_0_BCR 0
#define ARC_CLN_MST_NOC_1_BCR 1
#define ARC_CLN_MST_NOC_2_BCR 2
#define ARC_CLN_MST_NOC_3_BCR 3
#define ARC_CLN_MST_PER_0_BCR 16
#define ARC_CLN_MST_PER_1_BCR 17
#define ARC_CLN_PER_0_BASE 2688
#define ARC_CLN_PER_0_SIZE 2689
#define ARC_CLN_PER_1_BASE 2690
#define ARC_CLN_PER_1_SIZE 2691
#define ARC_CLN_SCM_BCR_0 100
#define ARC_CLN_SCM_BCR_1 101
#define ARC_CLN_MST_NOC_0_0_ADDR 292
#define ARC_CLN_MST_NOC_0_0_SIZE 293
#define ARC_CLN_MST_NOC_0_1_ADDR 2560
#define ARC_CLN_MST_NOC_0_1_SIZE 2561
#define ARC_CLN_MST_NOC_0_2_ADDR 2562
#define ARC_CLN_MST_NOC_0_2_SIZE 2563
#define ARC_CLN_MST_NOC_0_3_ADDR 2564
#define ARC_CLN_MST_NOC_0_3_SIZE 2565
#define ARC_CLN_MST_NOC_0_4_ADDR 2566
#define ARC_CLN_MST_NOC_0_4_SIZE 2567
#define ARC_CLN_PER0_BASE 2688
#define ARC_CLN_PER0_SIZE 2689
#define ARC_CLN_SHMEM_ADDR 200
#define ARC_CLN_SHMEM_SIZE 201
#define ARC_CLN_CACHE_ADDR_LO0 204
#define ARC_CLN_CACHE_ADDR_LO1 205
#define ARC_CLN_CACHE_ADDR_HI0 206
#define ARC_CLN_CACHE_ADDR_HI1 207
#define ARC_CLN_CACHE_CMD 207
#define ARC_CLN_CACHE_CMD_OP_NOP 0b0000
#define ARC_CLN_CACHE_CMD_OP_LOOKUP 0b0001
#define ARC_CLN_CACHE_CMD_OP_PROBE 0b0010
#define ARC_CLN_CACHE_CMD_OP_IDX_INV 0b0101
#define ARC_CLN_CACHE_CMD_OP_IDX_CLN 0b0110
#define ARC_CLN_CACHE_CMD_OP_IDX_CLN_INV 0b0111
#define ARC_CLN_CACHE_CMD_OP_REG_INV 0b1001
#define ARC_CLN_CACHE_CMD_OP_REG_CLN 0b1010
#define ARC_CLN_CACHE_CMD_OP_REG_CLN_INV 0b1011
#define ARC_CLN_CACHE_CMD_OP_ADDR_INV 0b1101
#define ARC_CLN_CACHE_CMD_OP_ADDR_CLN 0b1110
#define ARC_CLN_CACHE_CMD_OP_ADDR_CLN_INV 0b1111
#define ARC_CLN_CACHE_CMD_INCR BIT(4)
#define ARC_CLN_CACHE_STATUS 209
#define ARC_CLN_CACHE_STATUS_BUSY BIT(23)
#define ARC_CLN_CACHE_STATUS_DONE BIT(24)
#define ARC_CLN_CACHE_STATUS_MASK BIT(26)
#define ARC_CLN_CACHE_STATUS_EN BIT(27)
#define ARC_CLN_CACHE_ERR 210
#define ARC_CLN_CACHE_ERR_ADDR0 211
#define ARC_CLN_CACHE_ERR_ADDR1 212
static inline unsigned int arc_cln_read_reg_nolock(unsigned int reg)
{
z_arc_v2_aux_reg_write(_ARC_CLNR_ADDR, reg);
return z_arc_v2_aux_reg_read(_ARC_CLNR_DATA);
}
static inline void arc_cln_write_reg_nolock(unsigned int reg, unsigned int data)
{
z_arc_v2_aux_reg_write(_ARC_CLNR_ADDR, reg);
z_arc_v2_aux_reg_write(_ARC_CLNR_DATA, data);
}
#endif /* ZEPHYR_INCLUDE_ARCH_ARC_CLUSTER_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arc/cluster.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,185 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.