text
stringlengths
9
39.2M
dir
stringlengths
25
226
lang
stringclasses
163 values
created_date
timestamp[s]
updated_date
timestamp[s]
repo_name
stringclasses
751 values
repo_full_name
stringclasses
752 values
star
int64
1.01k
183k
len_tokens
int64
1
18.5M
```unknown config RISCV_ISA_RV32I bool help RV32I Base Integer Instruction Set - 32bit config RISCV_ISA_RV32E bool help RV32E Base Integer Instruction Set (Embedded) - 32bit config RISCV_ISA_RV64I bool default y if 64BIT help RV64I Base Integer Instruction Set - 64bit config RISCV_ISA_RV128I bool help RV128I Base Integer Instruction Set - 128bit config RISCV_ISA_EXT_M bool help (M) - Standard Extension for Integer Multiplication and Division Standard integer multiplication and division instruction extension, which is named "M" and contains instructions that multiply or divide values held in two integer registers. config RISCV_ISA_EXT_A bool help (A) - Standard Extension for Atomic Instructions The standard atomic instruction extension is denoted by instruction subset name "A", and contains instructions that atomically read-modify-write memory to support synchronization between multiple RISC-V threads running in the same memory space. config RISCV_ISA_EXT_F bool help (F) - Standard Extension for Single-Precision Floating-Point Standard instruction-set extension for single-precision floating-point, which is named "F" and adds single-precision floating-point computational instructions compliant with the IEEE 754-2008 arithmetic standard. config RISCV_ISA_EXT_D bool depends on RISCV_ISA_EXT_F help (D) - Standard Extension for Double-Precision Floating-Point Standard double-precision floating-point instruction-set extension, which is named "D" and adds double-precision floating-point computational instructions compliant with the IEEE 754-2008 arithmetic standard. config RISCV_ISA_EXT_G bool select RISCV_ISA_EXT_M select RISCV_ISA_EXT_A select RISCV_ISA_EXT_F select RISCV_ISA_EXT_D select RISCV_ISA_EXT_ZICSR select RISCV_ISA_EXT_ZIFENCEI help (IMAFDZicsr_Zifencei) IMAFDZicsr_Zifencei extensions config RISCV_ISA_EXT_Q bool depends on RISCV_ISA_RV64I depends on RISCV_ISA_EXT_F depends on RISCV_ISA_EXT_D help (Q) - Standard Extension for Quad-Precision Floating-Point Standard extension for 128-bit binary floating-point instructions compliant with the IEEE 754-2008 arithmetic standard. The 128-bit or quad-precision binary floatingpoint instruction subset is named "Q". config RISCV_ISA_EXT_C bool help (C) - Standard Extension for Compressed Instructions RISC-V standard compressed instruction set extension, named "C", which reduces static and dynamic code size by adding short 16-bit instruction encodings for common operations. config RISCV_ISA_EXT_ZICSR bool help (Zicsr) - Standard Extension for Control and Status Register (CSR) Instructions The "Zicsr" extension introduces support for the full set of CSR instructions that operate on CSRs registers. config RISCV_ISA_EXT_ZIFENCEI bool help (Zifencei) - Standard Extension for Instruction-Fetch Fence The "Zifencei" extension includes the FENCE.I instruction that provides explicit synchronization between writes to instruction memory and instruction fetches on the same hart. config RISCV_ISA_EXT_ZBA bool help (Zba) - Zba BitManip Extension The Zba instructions can be used to accelerate the generation of addresses that index into arrays of basic types (halfword, word, doubleword) using both unsigned word-sized and XLEN-sized indices: a shifted index is added to a base address. config RISCV_ISA_EXT_ZBB bool help (Zbb) - Zbb BitManip Extension (Basic bit-manipulation) The Zbb instructions can be used for basic bit-manipulation (logical with negate, count leading / trailing zero bits, count population, etc...). config RISCV_ISA_EXT_ZBC bool help (Zbc) - Zbc BitManip Extension (Carry-less multiplication) The Zbc instructions can be used for carry-less multiplication that is the multiplication in the polynomial ring over GF(2). config RISCV_ISA_EXT_ZBS bool help (Zbs) - Zbs BitManip Extension (Single-bit instructions) The Zbs instructions can be used for single-bit instructions that provide a mechanism to set, clear, invert, or extract a single bit in a register. ```
/content/code_sandbox/arch/riscv/Kconfig.isa
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,080
```c /* * */ #include <zephyr/device.h> #include <zephyr/init.h> #include <zephyr/kernel.h> #include <kernel_arch_func.h> #include <zephyr/arch/arm64/mm.h> #include <zephyr/linker/linker-defs.h> #include <zephyr/logging/log.h> #include <zephyr/sys/check.h> #include <zephyr/sys/barrier.h> #include <zephyr/cache.h> #include <kernel_internal.h> #include <zephyr/mem_mgmt/mem_attr.h> #include <zephyr/dt-bindings/memory-attr/memory-attr-arm.h> LOG_MODULE_REGISTER(mpu, CONFIG_MPU_LOG_LEVEL); #define NODE_HAS_PROP_AND_OR(node_id, prop) \ DT_NODE_HAS_PROP(node_id, prop) || BUILD_ASSERT((DT_FOREACH_STATUS_OKAY_NODE_VARGS( NODE_HAS_PROP_AND_OR, zephyr_memory_region_mpu) false) == false, "`zephyr,memory-region-mpu` was deprecated in favor of `zephyr,memory-attr`"); #define MPU_DYNAMIC_REGION_AREAS_NUM 3 #if defined(CONFIG_USERSPACE) || defined(CONFIG_ARM64_STACK_PROTECTION) static struct dynamic_region_info sys_dyn_regions[CONFIG_MP_MAX_NUM_CPUS][MPU_DYNAMIC_REGION_AREAS_NUM]; static int sys_dyn_regions_num[CONFIG_MP_MAX_NUM_CPUS]; static void dynamic_regions_init(void); static int dynamic_areas_init(uintptr_t start, size_t size); static int flush_dynamic_regions_to_mpu(struct dynamic_region_info *dyn_regions, uint8_t region_num); #if defined(CONFIG_USERSPACE) #define MPU_DYNAMIC_REGIONS_AREA_START ((uintptr_t)&_app_smem_start) #else #define MPU_DYNAMIC_REGIONS_AREA_START ((uintptr_t)&__kernel_ram_start) #endif #define MPU_DYNAMIC_REGIONS_AREA_SIZE ((size_t)((uintptr_t)&__kernel_ram_end - \ MPU_DYNAMIC_REGIONS_AREA_START)) #endif /* * AArch64 Memory Model Feature Register 0 * Provides information about the implemented memory model and memory * management support in AArch64 state. * See Arm Architecture Reference Manual Supplement * Armv8, for Armv8-R AArch64 architecture profile, G1.3.7 * * ID_AA64MMFR0_MSA_FRAC, bits[55:52] * ID_AA64MMFR0_MSA, bits [51:48] */ #define ID_AA64MMFR0_MSA_msk (0xFFUL << 48U) #define ID_AA64MMFR0_PMSA_EN (0x1FUL << 48U) #define ID_AA64MMFR0_PMSA_VMSA_EN (0x2FUL << 48U) /* * Global status variable holding the number of HW MPU region indices, which * have been reserved by the MPU driver to program the static (fixed) memory * regions. */ static uint8_t static_regions_num; /* Get the number of supported MPU regions. */ static ALWAYS_INLINE uint8_t get_num_regions(void) { uint64_t type; type = read_mpuir_el1(); type = type & MPU_IR_REGION_Msk; return (uint8_t)type; } /* ARM Core MPU Driver API Implementation for ARM MPU */ /** * @brief enable the MPU * * On the SMP system, The function that enables MPU can not insert stack protector * code because the canary values read by the secondary CPUs before enabling MPU * and after enabling it are not equal due to cache coherence issues. */ FUNC_NO_STACK_PROTECTOR void arm_core_mpu_enable(void) { uint64_t val; val = read_sctlr_el1(); val |= SCTLR_M_BIT; write_sctlr_el1(val); barrier_dsync_fence_full(); barrier_isync_fence_full(); } /** * @brief disable the MPU */ void arm_core_mpu_disable(void) { uint64_t val; /* Force any outstanding transfers to complete before disabling MPU */ barrier_dmem_fence_full(); val = read_sctlr_el1(); val &= ~SCTLR_M_BIT; write_sctlr_el1(val); barrier_dsync_fence_full(); barrier_isync_fence_full(); } /* ARM MPU Driver Initial Setup * * Configure the cache-ability attributes for all the * different types of memory regions. */ static void mpu_init(void) { /* Device region(s): Attribute-0 * Flash region(s): Attribute-1 * SRAM region(s): Attribute-2 * SRAM no cache-able regions(s): Attribute-3 */ uint64_t mair = MPU_MAIR_ATTRS; write_mair_el1(mair); barrier_dsync_fence_full(); barrier_isync_fence_full(); } /* * Changing the MPU region may change the cache related attribute and cause * cache coherence issues, so it's necessary to avoid invoking functions in such * critical scope to avoid memory access before the MPU regions are all configured. */ static ALWAYS_INLINE void mpu_set_region(uint32_t rnr, uint64_t rbar, uint64_t rlar) { write_prselr_el1(rnr); barrier_dsync_fence_full(); write_prbar_el1(rbar); write_prlar_el1(rlar); barrier_dsync_fence_full(); barrier_isync_fence_full(); } static ALWAYS_INLINE void mpu_clr_region(uint32_t rnr) { write_prselr_el1(rnr); barrier_dsync_fence_full(); /* * Have to set limit register first as the enable/disable bit of the * region is in the limit register. */ write_prlar_el1(0); write_prbar_el1(0); barrier_dsync_fence_full(); barrier_isync_fence_full(); } /* * This internal functions performs MPU region initialization. * * Changing the MPU region may change the cache related attribute and cause * cache coherence issues, so it's necessary to avoid invoking functions in such * critical scope to avoid memory access before the MPU regions are all configured. */ static ALWAYS_INLINE void region_init(const uint32_t index, const struct arm_mpu_region *region_conf) { uint64_t rbar = region_conf->base & MPU_RBAR_BASE_Msk; uint64_t rlar = (region_conf->limit - 1) & MPU_RLAR_LIMIT_Msk; rbar |= region_conf->attr.rbar & (MPU_RBAR_XN_Msk | MPU_RBAR_AP_Msk | MPU_RBAR_SH_Msk); rlar |= (region_conf->attr.mair_idx << MPU_RLAR_AttrIndx_Pos) & MPU_RLAR_AttrIndx_Msk; rlar |= MPU_RLAR_EN_Msk; mpu_set_region(index, rbar, rlar); } #define _BUILD_REGION_CONF(reg, _ATTR) \ (struct arm_mpu_region) { .name = (reg).dt_name, \ .base = (reg).dt_addr, \ .limit = (reg).dt_addr + (reg).dt_size, \ .attr = _ATTR, \ } /* This internal function programs the MPU regions defined in the DT when using * the `zephyr,memory-attr = <( DT_MEM_ARM(...) )>` property. */ static int mpu_configure_regions_from_dt(uint8_t *reg_index) { const struct mem_attr_region_t *region; size_t num_regions; num_regions = mem_attr_get_regions(&region); for (size_t idx = 0; idx < num_regions; idx++) { struct arm_mpu_region region_conf; switch (DT_MEM_ARM_GET(region[idx].dt_attr)) { case DT_MEM_ARM_MPU_RAM: region_conf = _BUILD_REGION_CONF(region[idx], REGION_RAM_ATTR); break; #ifdef REGION_RAM_NOCACHE_ATTR case DT_MEM_ARM_MPU_RAM_NOCACHE: region_conf = _BUILD_REGION_CONF(region[idx], REGION_RAM_NOCACHE_ATTR); __ASSERT(!(region[idx].dt_attr & DT_MEM_CACHEABLE), "RAM_NOCACHE with DT_MEM_CACHEABLE attribute\n"); break; #endif #ifdef REGION_FLASH_ATTR case DT_MEM_ARM_MPU_FLASH: region_conf = _BUILD_REGION_CONF(region[idx], REGION_FLASH_ATTR); break; #endif #ifdef REGION_IO_ATTR case DT_MEM_ARM_MPU_IO: region_conf = _BUILD_REGION_CONF(region[idx], REGION_IO_ATTR); break; #endif default: /* Either the specified `ATTR_MPU_*` attribute does not * exists or the `REGION_*_ATTR` macro is not defined * for that attribute. */ LOG_ERR("Invalid attribute for the region\n"); return -EINVAL; } region_init((*reg_index), (const struct arm_mpu_region *) &region_conf); (*reg_index)++; } return 0; } /* * @brief MPU default configuration * * This function here provides the default configuration mechanism * for the Memory Protection Unit (MPU). * * On the SMP system, The function that enables MPU can not insert stack protector * code because the canary values read by the secondary CPUs before enabling MPU * and after enabling it are not equal due to cache coherence issues. */ FUNC_NO_STACK_PROTECTOR void z_arm64_mm_init(bool is_primary_core) { uint64_t val; uint32_t r_index; uint8_t tmp_static_num; /* Current MPU code supports only EL1 */ val = read_currentel(); __ASSERT(GET_EL(val) == MODE_EL1, "Exception level not EL1, MPU not enabled!\n"); /* Check whether the processor supports MPU */ val = read_id_aa64mmfr0_el1() & ID_AA64MMFR0_MSA_msk; if ((val != ID_AA64MMFR0_PMSA_EN) && (val != ID_AA64MMFR0_PMSA_VMSA_EN)) { __ASSERT(0, "MPU not supported!\n"); return; } if (mpu_config.num_regions > get_num_regions()) { /* Attempt to configure more MPU regions than * what is supported by hardware. As this operation * is executed during system (pre-kernel) initialization, * we want to ensure we can detect an attempt to * perform invalid configuration. */ __ASSERT(0, "Request to configure: %u regions (supported: %u)\n", mpu_config.num_regions, get_num_regions()); return; } arm_core_mpu_disable(); /* Architecture-specific configuration */ mpu_init(); /* Program fixed regions configured at SOC definition. */ for (r_index = 0U; r_index < mpu_config.num_regions; r_index++) { region_init(r_index, &mpu_config.mpu_regions[r_index]); } /* Update the number of programmed MPU regions. */ tmp_static_num = mpu_config.num_regions; /* DT-defined MPU regions. */ if (mpu_configure_regions_from_dt(&tmp_static_num) == -EINVAL) { __ASSERT(0, "Failed to allocate MPU regions from DT\n"); return; } arm_core_mpu_enable(); if (!is_primary_core) { /* * primary core might reprogram the sys_regions, so secondary cores * should re-flush the sys regions */ goto out; } /* Only primary core init the static_regions_num */ static_regions_num = tmp_static_num; #if defined(CONFIG_USERSPACE) || defined(CONFIG_ARM64_STACK_PROTECTION) dynamic_regions_init(); /* Only primary core do the dynamic_areas_init. */ int rc = dynamic_areas_init(MPU_DYNAMIC_REGIONS_AREA_START, MPU_DYNAMIC_REGIONS_AREA_SIZE); if (rc < 0) { __ASSERT(0, "Dynamic areas init fail"); return; } #endif out: #if defined(CONFIG_ARM64_STACK_PROTECTION) (void)flush_dynamic_regions_to_mpu(sys_dyn_regions[arch_curr_cpu()->id], sys_dyn_regions_num[arch_curr_cpu()->id]); #endif return; } #if defined(CONFIG_USERSPACE) || defined(CONFIG_ARM64_STACK_PROTECTION) static int insert_region(struct dynamic_region_info *dyn_regions, uint8_t region_num, uintptr_t start, size_t size, struct arm_mpu_region_attr *attr); static void arm_core_mpu_background_region_enable(void) { uint64_t val; val = read_sctlr_el1(); val |= SCTLR_BR_BIT; write_sctlr_el1(val); barrier_dsync_fence_full(); barrier_isync_fence_full(); } static void arm_core_mpu_background_region_disable(void) { uint64_t val; /* Force any outstanding transfers to complete before disabling MPU */ barrier_dmem_fence_full(); val = read_sctlr_el1(); val &= ~SCTLR_BR_BIT; write_sctlr_el1(val); barrier_dsync_fence_full(); barrier_isync_fence_full(); } static void dynamic_regions_init(void) { for (int cpuid = 0; cpuid < arch_num_cpus(); cpuid++) { for (int i = 0; i < MPU_DYNAMIC_REGION_AREAS_NUM; i++) { sys_dyn_regions[cpuid][i].index = -1; } } } static int dynamic_areas_init(uintptr_t start, size_t size) { const struct arm_mpu_region *region; struct dynamic_region_info *tmp_info; int ret = -ENOENT; uint64_t base = start; uint64_t limit = base + size; for (int cpuid = 0; cpuid < arch_num_cpus(); cpuid++) { /* Check the following searching does not overflow the room */ if (sys_dyn_regions_num[cpuid] + 1 > MPU_DYNAMIC_REGION_AREAS_NUM) { return -ENOSPC; } ret = -ENOENT; for (int i = 0; i < mpu_config.num_regions; i++) { region = &mpu_config.mpu_regions[i]; tmp_info = &sys_dyn_regions[cpuid][sys_dyn_regions_num[cpuid]]; if (base >= region->base && limit <= region->limit) { tmp_info->index = i; tmp_info->region_conf = *region; sys_dyn_regions_num[cpuid] += 1; /* find the region, reset ret to no error */ ret = 0; break; } } #if defined(CONFIG_ARM64_STACK_PROTECTION) ret = insert_region(sys_dyn_regions[cpuid], MPU_DYNAMIC_REGION_AREAS_NUM, (uintptr_t)z_interrupt_stacks[cpuid], Z_ARM64_STACK_GUARD_SIZE, NULL /* delete this region */); if (ret < 0) { break; } /* * No need to check here if (sys_dyn_regions[cpuid] + ret) overflows, * because the insert_region has checked it. */ sys_dyn_regions_num[cpuid] += ret; #endif } return ret < 0 ? ret : 0; } static void set_region(struct arm_mpu_region *region, uint64_t base, uint64_t limit, struct arm_mpu_region_attr *attr) { region->base = base; region->limit = limit; if (attr != NULL) { region->attr = *attr; } else { memset(&region->attr, 0, sizeof(struct arm_mpu_region_attr)); } } static void clear_region(struct arm_mpu_region *region) { set_region(region, 0, 0, NULL); } static int dup_dynamic_regions(struct dynamic_region_info *dst, int len) { size_t i; int num = sys_dyn_regions_num[arch_curr_cpu()->id]; if (num >= len) { LOG_ERR("system dynamic region nums too large."); return -EINVAL; } for (i = 0; i < num; i++) { dst[i] = sys_dyn_regions[arch_curr_cpu()->id][i]; } for (; i < len; i++) { clear_region(&dst[i].region_conf); dst[i].index = -1; } return num; } static struct dynamic_region_info *get_underlying_region(struct dynamic_region_info *dyn_regions, uint8_t region_num, uint64_t base, uint64_t limit) { for (int idx = 0; idx < region_num; idx++) { struct arm_mpu_region *region = &(dyn_regions[idx].region_conf); if (base >= region->base && limit <= region->limit) { return &(dyn_regions[idx]); } } return NULL; } static struct dynamic_region_info *find_available_region(struct dynamic_region_info *dyn_regions, uint8_t region_num) { return get_underlying_region(dyn_regions, region_num, 0, 0); } /* * return -ENOENT if there is no more available region * do nothing if attr is NULL */ static int _insert_region(struct dynamic_region_info *dyn_regions, uint8_t region_num, uint64_t base, uint64_t limit, struct arm_mpu_region_attr *attr) { struct dynamic_region_info *tmp_region; if (attr == NULL) { return 0; } tmp_region = find_available_region(dyn_regions, region_num); if (tmp_region == NULL) { return -ENOENT; } set_region(&tmp_region->region_conf, base, limit, attr); return 0; } static int insert_region(struct dynamic_region_info *dyn_regions, uint8_t region_num, uintptr_t start, size_t size, struct arm_mpu_region_attr *attr) { int ret = 0; /* base: inclusive, limit: exclusive */ uint64_t base = (uint64_t)start; uint64_t limit = base + size; struct dynamic_region_info *u_region; uint64_t u_base; uint64_t u_limit; struct arm_mpu_region_attr u_attr; int count = 0; u_region = get_underlying_region(dyn_regions, region_num, base, limit); if (u_region == NULL) { return -ENOENT; } /* restore the underlying region range and attr */ u_base = u_region->region_conf.base; u_limit = u_region->region_conf.limit; u_attr = u_region->region_conf.attr; clear_region(&u_region->region_conf); count--; /* if attr is NULL, meaning we are going to delete a region */ if (base == u_base && limit == u_limit) { /* * The new region overlaps entirely with the * underlying region. Simply update the attr. */ ret += _insert_region(dyn_regions, region_num, base, limit, attr); count++; } else if (base == u_base) { ret += _insert_region(dyn_regions, region_num, limit, u_limit, &u_attr); count++; ret += _insert_region(dyn_regions, region_num, base, limit, attr); count++; } else if (limit == u_limit) { ret += _insert_region(dyn_regions, region_num, u_base, base, &u_attr); count++; ret += _insert_region(dyn_regions, region_num, base, limit, attr); count++; } else { ret += _insert_region(dyn_regions, region_num, u_base, base, &u_attr); count++; ret += _insert_region(dyn_regions, region_num, base, limit, attr); count++; ret += _insert_region(dyn_regions, region_num, limit, u_limit, &u_attr); count++; } if (ret < 0) { return -ENOENT; } if (attr == NULL) { /* meanning we removed a region, so fix the count by decreasing 1 */ count--; } return count; } static int flush_dynamic_regions_to_mpu(struct dynamic_region_info *dyn_regions, uint8_t region_num) { __ASSERT(read_daif() & DAIF_IRQ_BIT, "mpu flushing must be called with IRQs disabled"); int reg_avail_idx = static_regions_num; if (region_num >= get_num_regions()) { LOG_ERR("Out-of-bounds error for mpu regions. " "region num: %d, total mpu regions: %d", region_num, get_num_regions()); return -ENOENT; } arm_core_mpu_background_region_enable(); /* * Clean the dynamic regions * Before cleaning them, we need to flush dyn_regions to memory, because we need to read it * in updating mpu region. */ sys_cache_data_flush_range(dyn_regions, sizeof(struct dynamic_region_info) * region_num); for (size_t i = reg_avail_idx; i < get_num_regions(); i++) { mpu_clr_region(i); } /* * flush the dyn_regions to MPU */ for (size_t i = 0; i < region_num; i++) { int region_idx = dyn_regions[i].index; /* * dyn_regions has two types of regions: * 1) The fixed dyn background region which has a real index. * 2) The normal region whose index will accumulate from * static_regions_num. * * Region_idx < 0 means not the fixed dyn background region. * In this case, region_idx should be the reg_avail_idx which * is accumulated from static_regions_num. */ if (region_idx < 0) { region_idx = reg_avail_idx++; } region_init(region_idx, &(dyn_regions[i].region_conf)); } arm_core_mpu_background_region_disable(); return 0; } static int configure_dynamic_mpu_regions(struct k_thread *thread) { __ASSERT(read_daif() & DAIF_IRQ_BIT, "must be called with IRQs disabled"); struct dynamic_region_info *dyn_regions = thread->arch.regions; const uint8_t max_region_num = ARM64_MPU_MAX_DYNAMIC_REGIONS; int region_num; int ret = 0; /* Busy wait if it is flushing somewhere else */ while (!atomic_cas(&thread->arch.flushing, 0, 1)) { } thread->arch.region_num = 0; ret = dup_dynamic_regions(dyn_regions, max_region_num); if (ret < 0) { goto out; } region_num = ret; #if defined(CONFIG_USERSPACE) struct k_mem_domain *mem_domain = thread->mem_domain_info.mem_domain; if (mem_domain) { LOG_DBG("configure domain: %p", mem_domain); uint32_t num_parts = mem_domain->num_partitions; uint32_t max_parts = CONFIG_MAX_DOMAIN_PARTITIONS; struct k_mem_partition *partition; for (size_t i = 0; i < max_parts && num_parts > 0; i++, num_parts--) { partition = &mem_domain->partitions[i]; if (partition->size == 0) { continue; } LOG_DBG("set region 0x%lx 0x%lx\n", partition->start, partition->size); ret = insert_region(dyn_regions, max_region_num, partition->start, partition->size, &partition->attr); if (ret < 0) { goto out; } region_num += ret; } } LOG_DBG("configure user thread %p's context", thread); if ((thread->base.user_options & K_USER) != 0) { /* K_USER thread stack needs a region */ ret = insert_region(dyn_regions, max_region_num, thread->stack_info.start, thread->stack_info.size, &K_MEM_PARTITION_P_RW_U_RW); if (ret < 0) { goto out; } region_num += ret; } #endif #if defined(CONFIG_ARM64_STACK_PROTECTION) uintptr_t guard_start; if (thread->arch.stack_limit != 0) { guard_start = (uintptr_t)thread->arch.stack_limit - Z_ARM64_STACK_GUARD_SIZE; ret = insert_region(dyn_regions, max_region_num, guard_start, Z_ARM64_STACK_GUARD_SIZE, NULL); if (ret < 0) { goto out; } region_num += ret; } #endif /* * There is no need to check if region_num is overflow the uint8_t, * because the insert_region make sure there is enough room to store a region, * otherwise the insert_region will return a negtive error number */ thread->arch.region_num = (uint8_t)region_num; if (thread == _current) { ret = flush_dynamic_regions_to_mpu(dyn_regions, region_num); } out: atomic_clear(&thread->arch.flushing); return ret < 0 ? ret : 0; } #endif /* defined(CONFIG_USERSPACE) || defined(CONFIG_ARM64_STACK_PROTECTION) */ #if defined(CONFIG_USERSPACE) int arch_mem_domain_max_partitions_get(void) { int remaining_regions = get_num_regions() - static_regions_num + 1; /* * Check remianing regions, should more than ARM64_MPU_MAX_DYNAMIC_REGIONS * which equals CONFIG_MAX_DOMAIN_PARTITIONS + necessary regions (stack, guard) */ if (remaining_regions < ARM64_MPU_MAX_DYNAMIC_REGIONS) { LOG_WRN("MPU regions not enough, demand: %d, regions: %d", ARM64_MPU_MAX_DYNAMIC_REGIONS, remaining_regions); return remaining_regions; } return CONFIG_MAX_DOMAIN_PARTITIONS; } static int configure_domain_partitions(struct k_mem_domain *domain) { struct k_thread *thread; int ret; SYS_DLIST_FOR_EACH_CONTAINER(&domain->mem_domain_q, thread, mem_domain_info.mem_domain_q_node) { ret = configure_dynamic_mpu_regions(thread); if (ret != 0) { return ret; } } #ifdef CONFIG_SMP /* the thread could be running on another CPU right now */ z_arm64_mem_cfg_ipi(); #endif return 0; } int arch_mem_domain_partition_add(struct k_mem_domain *domain, uint32_t partition_id) { ARG_UNUSED(partition_id); return configure_domain_partitions(domain); } int arch_mem_domain_partition_remove(struct k_mem_domain *domain, uint32_t partition_id) { ARG_UNUSED(partition_id); return configure_domain_partitions(domain); } int arch_mem_domain_thread_add(struct k_thread *thread) { int ret = 0; ret = configure_dynamic_mpu_regions(thread); #ifdef CONFIG_SMP if (ret == 0 && thread != _current) { /* the thread could be running on another CPU right now */ z_arm64_mem_cfg_ipi(); } #endif return ret; } int arch_mem_domain_thread_remove(struct k_thread *thread) { int ret = 0; ret = configure_dynamic_mpu_regions(thread); #ifdef CONFIG_SMP if (ret == 0 && thread != _current) { /* the thread could be running on another CPU right now */ z_arm64_mem_cfg_ipi(); } #endif return ret; } #endif /* CONFIG_USERSPACE */ #if defined(CONFIG_USERSPACE) || defined(CONFIG_ARM64_STACK_PROTECTION) void z_arm64_thread_mem_domains_init(struct k_thread *thread) { unsigned int key = arch_irq_lock(); configure_dynamic_mpu_regions(thread); arch_irq_unlock(key); } void z_arm64_swap_mem_domains(struct k_thread *thread) { int cpuid = arch_curr_cpu()->id; /* Busy wait if it is configuring somewhere else */ while (!atomic_cas(&thread->arch.flushing, 0, 1)) { } if (thread->arch.region_num == 0) { (void)flush_dynamic_regions_to_mpu(sys_dyn_regions[cpuid], sys_dyn_regions_num[cpuid]); } else { (void)flush_dynamic_regions_to_mpu(thread->arch.regions, thread->arch.region_num); } atomic_clear(&thread->arch.flushing); } #endif ```
/content/code_sandbox/arch/arm64/core/cortex_r/arm_mpu.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
6,059
```unknown menu "RISCV Options" depends on RISCV config ARCH string default "riscv" config FLOAT_HARD bool "Hard-float calling convention" default y depends on FPU help This option enables the hard-float calling convention. config RISCV_GP bool "RISC-V global pointer relative addressing" default n help Use global pointer relative addressing for small globals declared anywhere in the executable. It can benefit performance and reduce the code size. Note: To support this feature, RISC-V SoC needs to initialize global pointer at program start or earlier than any instruction using GP relative addressing. config RISCV_ALWAYS_SWITCH_THROUGH_ECALL bool "Do not use mret outside a trap handler context" depends on MULTITHREADING help Use mret instruction only when in a trap handler. This is for RISC-V implementations that require every mret to be balanced with an ecall. This is not required by the RISC-V spec and most people should say n here to minimize context switching overhead. config RISCV_EXCEPTION_STACK_TRACE bool default y imply THREAD_STACK_INFO help Internal config to enable runtime stack traces on fatal exceptions. menu "RISCV Processor Options" config INCLUDE_RESET_VECTOR bool "Jumps to __initialize directly" help Select 'y' here to use the Zephyr provided default implementation that jumps to `__initialize` directly. Otherwise a SOC needs to provide its custom `__reset` routine. config RISCV_PRIVILEGED bool select ARCH_HAS_RAMFUNC_SUPPORT if XIP help Option selected by SoCs implementing the RISC-V privileged ISA. config RISCV_SOC_HAS_ISR_STACKING bool depends on !USERSPACE help Enable low-level SOC-specific hardware stacking / unstacking operations during ISR. This hidden option needs to be selected by SoC if this feature is supported. Some SOCs implement a mechanism for which, on interrupt handling, part of the context is automatically saved by the hardware on the stack according to a custom ESF format. The same part of the context is automatically restored by hardware on mret. Enabling this option requires that the SoC provides a soc_isr_stacking.h header which defines the following: - SOC_ISR_SW_STACKING: macro guarded by _ASMLANGUAGE called by the IRQ wrapper assembly code on ISR entry to save in the ESF the remaining part of the context not pushed already on the stack by the hardware. - SOC_ISR_SW_UNSTACKING: macro guarded by _ASMLANGUAGE called by the IRQ wrapper assembly code on ISR exit to restore the part of the context from the ESF that won't be restored by hardware on mret. - SOC_ISR_STACKING_ESF_DECLARE: structure declaration for the ESF guarded by !_ASMLANGUAGE. The ESF should be defined to account for the hardware stacked registers in the proper order as they are saved on the stack by the hardware, and the registers saved by the software macros. The structure must be called 'struct arch_esf'. config RISCV_SOC_HAS_CUSTOM_IRQ_HANDLING bool help This allows the SoC to overwrite the irq handling. If enabled, the function __soc_handle_all_irqs has to be implemented. It shall service and clear all pending interrupts. config RISCV_SOC_HAS_CUSTOM_IRQ_LOCK_OPS bool help Hidden option to allow SoC to overwrite arch_irq_lock(), arch_irq_unlock() and arch_irq_unlocked() functions with platform-specific versions named z_soc_irq_lock(), z_soc_irq_unlock() and z_soc_irq_unlocked(). Enable this hidden option and specialize the z_soc_* functions when the RISC-V SoC needs to do something different and more than reading and writing the mstatus register to lock and unlock the IRQs. config RISCV_SOC_HAS_CUSTOM_SYS_IO bool help Hidden option to allow SoC to overwrite sys_read*(), sys_write*() functions with platform-specific versions named z_soc_sys_read*() and z_soc_sys_write*(). Enable this hidden option and specialize the z_soc_* functions when the RISC-V SoC needs to do something different and more than reading and writing the registers. config RISCV_SOC_CONTEXT_SAVE bool "SOC-based context saving in IRQ handlers" select RISCV_SOC_OFFSETS help Enable low-level SOC-specific context management, for SOCs with extra state that must be saved when entering an interrupt/exception, and restored on exit. If unsure, leave this at the default value. Enabling this option requires that the SoC provide a soc_context.h header which defines the following macros: - SOC_ESF_MEMBERS: structure component declarations to allocate space for. The last such declaration should not end in a semicolon, for portability. The generic RISC-V architecture code will allocate space for these members in a "struct soc_esf" type (typedefed to soc_esf_t), which will be available if arch.h is included. - SOC_ESF_INIT: structure contents initializer for struct soc_esf state. The last initialized member should not end in a comma. The generic architecture IRQ wrapper will also call \_\_soc_save_context and \_\_soc_restore_context routines at ISR entry and exit, respectively. These should typically be implemented in assembly. If they were C functions, they would have these signatures: ``void __soc_save_context(soc_esf_t *state);`` ``void __soc_restore_context(soc_esf_t *state);`` The calls obey standard calling conventions; i.e., the state pointer address is in a0, and ra contains the return address. config RISCV_SOC_OFFSETS bool "SOC-based offsets" help Enabling this option requires that the SoC provide a soc_offsets.h header which defines the following macros: - GEN_SOC_OFFSET_SYMS(): a macro which expands to GEN_OFFSET_SYM(soc_esf_t, soc_specific_member) calls to ensure offset macros for SOC_ESF_MEMBERS are defined in offsets.h. The last one should not end in a semicolon. See gen_offset.h for more details. config RISCV_HAS_PLIC bool depends on RISCV_PRIVILEGED help Does the SOC provide support for a Platform Level Interrupt Controller (PLIC). config RISCV_HAS_CLIC bool depends on RISCV_PRIVILEGED help Does the SOC provide support for a Core-Local Interrupt Controller (CLIC). config RISCV_SOC_EXCEPTION_FROM_IRQ bool help Option selected by SoCs that require a custom mechanism to check if an exception is the result of an interrupt or not. If selected, __soc_is_irq() needs to be implemented by the SoC. config RISCV_SOC_INTERRUPT_INIT bool "SOC-based interrupt initialization" help Enable SOC-based interrupt initialization (call soc_interrupt_init, within _IntLibInit when enabled) config RISCV_MCAUSE_EXCEPTION_MASK hex default 0x7FFFFFFFFFFFFFFF if 64BIT default 0x7FFFFFFF help Specify the bits to use for exception code in mcause register. config RISCV_GENERIC_TOOLCHAIN bool "Compile using generic riscv32 toolchain" default y help Compile using generic riscv32 toolchain. Allow SOCs that have custom extended riscv ISA to still compile with generic riscv32 toolchain. config GEN_ISR_TABLES default y config GEN_IRQ_VECTOR_TABLE default n config RISCV_RESERVED_IRQ_ISR_TABLES_OFFSET int default 0 depends on GEN_ISR_TABLES help On some RISCV platform the first interrupt vectors are primarly intended for inter-hart interrupt signaling and so retained for that purpose and not available. When this option is set, all the IRQ vectors are shifted by this offset value when installed into the software ISR table and the IRQ vector table. CONFIG_NUM_IRQS must be properly sized to take into account this offset. This is a hidden option which needs to be set per architecture and left alone. config NUM_IRQS int config RV_BOOT_HART int "Starting HART ID" default 0 help This option sets the starting HART ID for the SMP core. For RISC-V systems such as MPFS and FU540 this would be set to 1 to skip the E51 HART 0 as it is not usable in SMP configurations. config RISCV_HART_MASK int default -1 help Configures the mask for the HART ID. For RISC-V systems with HART ID starting from non-zero value, i.e. 128, 129, ..(0x80, 8x81, ..), this can be configured to 63 (0x7f) such that we can extract the bits that start from 0. config RISCV_PMP bool "RISC-V PMP Support" select THREAD_STACK_INFO select CPU_HAS_MPU select ARCH_HAS_USERSPACE select ARCH_HAS_STACK_PROTECTION select MPU select SRAM_REGION_PERMISSIONS select ARCH_MEM_DOMAIN_SYNCHRONOUS_API if USERSPACE select ARCH_MEM_DOMAIN_DATA if USERSPACE select THREAD_LOCAL_STORAGE if USERSPACE select ARCH_MEM_DOMAIN_SUPPORTS_ISOLATED_STACKS select MEM_DOMAIN_ISOLATED_STACKS help MCU implements Physical Memory Protection. if RISCV_PMP config PMP_SLOTS int "Number of PMP slots" default 8 help This is the number of PMP entries implemented by the hardware. Typical values are 8 or 16. config PMP_NO_TOR bool help Set this if TOR (Top Of Range) mode is not supported. config PMP_NO_NA4 bool help Set this if NA4 (Naturally Aligned 4-byte) mode is not supported. config PMP_NO_NAPOT bool help Set this if NAPOT (Naturally Aligned Power Of Two) is not supported. config PMP_POWER_OF_TWO_ALIGNMENT bool "Enforce power-of-two alignment on PMP memory areas" if !PMP_NO_TOR default y if TEST_USERSPACE default y if (PMP_SLOTS = 8) default y if PMP_NO_TOR select MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT select GEN_PRIV_STACKS help This option reduces the PMP slot usage but increases memory consumption. Useful when enabling userspace mode with many memory domains and/or few PMP slots available. config PMP_GRANULARITY int "The granularity of PMP address matching" default 8 if (PMP_NO_TOR && PMP_NO_NA4) default 4 help The granularity must be a power of 2 greater than or equal to 4 (ie 4, 8, 16, ...), but if neither TOR mode nor NA4 mode is supported, the minimum granularity is 8. endif #RISCV_PMP config PMP_STACK_GUARD def_bool y depends on MULTITHREADING depends on HW_STACK_PROTECTION config PMP_STACK_GUARD_MIN_SIZE int "Stack Guard area size" depends on PMP_STACK_GUARD default 1024 if 64BIT default 512 help The Hardware Stack Protection implements a guard area at the bottom of the stack using the PMP to catch stack overflows by marking that guard area not accessible. This is the size of the guard area. This should be large enough to catch any sudden jump in stack pointer decrement, plus some wiggle room to accommodate the eventual overflow exception stack usage. # Implement the null pointer detection using the Physical Memory Protection # (PMP) Unit. config NULL_POINTER_EXCEPTION_DETECTION_PMP bool "Use PMP for null pointer exception detection" depends on RISCV_PMP help Null pointer dereference detection implemented using PMP functionality. if NULL_POINTER_EXCEPTION_DETECTION_PMP config NULL_POINTER_EXCEPTION_REGION_SIZE hex "Inaccessible region to implement null pointer detection" default 0x10 help Use a PMP slot to make region (starting at address 0x0) inaccessible for detecting null pointer dereferencing (raising a CPU access fault). Minimum is 4 bytes. endif # NULL_POINTER_EXCEPTION_DETECTION_PMP endmenu config MAIN_STACK_SIZE default 4096 if 64BIT default 2048 if PMP_STACK_GUARD config TEST_EXTRA_STACK_SIZE default 1536 config CMSIS_THREAD_MAX_STACK_SIZE default 1024 if 64BIT config CMSIS_V2_THREAD_MAX_STACK_SIZE default 1024 if 64BIT config ARCH_IRQ_VECTOR_TABLE_ALIGN default 256 config RISCV_TRAP_HANDLER_ALIGNMENT int "Alignment of RISC-V trap handler in bytes" default 64 if RISCV_HAS_CLIC default 4 help This value configures the alignment of RISC-V trap handling code. The requirement for a particular alignment arises from the format of MTVEC register which is RISC-V platform-specific. The minimum alignment is 4 bytes according to the Spec. config GEN_IRQ_VECTOR_TABLE select RISCV_VECTORED_MODE if RISCV_PRIVILEGED config ARCH_HAS_SINGLE_THREAD_SUPPORT default y if !SMP rsource "Kconfig.isa" endmenu ```
/content/code_sandbox/arch/riscv/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,994
```unknown /* * */ #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #include <zephyr/kernel.h> #include <zephyr/sys/util.h> #include <offsets_short.h> #include <zephyr/arch/cpu.h> #include "asm_macros.inc" /* Convenience macros for loading/storing register states. */ #define DO_CALLEE_SAVED(op, reg) \ RV_E( op ra, _thread_offset_to_ra(reg) );\ RV_E( op s0, _thread_offset_to_s0(reg) );\ RV_E( op s1, _thread_offset_to_s1(reg) );\ RV_I( op s2, _thread_offset_to_s2(reg) );\ RV_I( op s3, _thread_offset_to_s3(reg) );\ RV_I( op s4, _thread_offset_to_s4(reg) );\ RV_I( op s5, _thread_offset_to_s5(reg) );\ RV_I( op s6, _thread_offset_to_s6(reg) );\ RV_I( op s7, _thread_offset_to_s7(reg) );\ RV_I( op s8, _thread_offset_to_s8(reg) );\ RV_I( op s9, _thread_offset_to_s9(reg) );\ RV_I( op s10, _thread_offset_to_s10(reg) );\ RV_I( op s11, _thread_offset_to_s11(reg) ) GTEXT(z_riscv_switch) GTEXT(z_thread_mark_switched_in) GTEXT(z_riscv_configure_stack_guard) GTEXT(z_riscv_fpu_thread_context_switch) /* void z_riscv_switch(k_thread_t *switch_to, k_thread_t *switch_from) */ SECTION_FUNC(TEXT, z_riscv_switch) /* Save the old thread's callee-saved registers */ DO_CALLEE_SAVED(sr, a1) /* Save the old thread's stack pointer */ sr sp, _thread_offset_to_sp(a1) /* Set thread->switch_handle = thread to mark completion */ sr a1, ___thread_t_switch_handle_OFFSET(a1) /* Get the new thread's stack pointer */ lr sp, _thread_offset_to_sp(a0) #if defined(CONFIG_THREAD_LOCAL_STORAGE) /* Get the new thread's tls pointer */ lr tp, _thread_offset_to_tls(a0) #endif #if defined(CONFIG_FPU_SHARING) /* Preserve a0 across following call. s0 is not yet restored. */ mv s0, a0 call z_riscv_fpu_thread_context_switch mv a0, s0 #endif #if defined(CONFIG_PMP_STACK_GUARD) /* Stack guard has priority over user space for PMP usage. */ mv s0, a0 call z_riscv_pmp_stackguard_enable mv a0, s0 #elif defined(CONFIG_USERSPACE) /* * When stackguard is not enabled, we need to configure the PMP only * at context switch time as the PMP is not in effect while inm-mode. * (it is done on every exception return otherwise). */ lb t0, _thread_offset_to_user_options(a0) andi t0, t0, K_USER beqz t0, not_user_task mv s0, a0 call z_riscv_pmp_usermode_enable mv a0, s0 not_user_task: #endif #if CONFIG_INSTRUMENT_THREAD_SWITCHING mv s0, a0 call z_thread_mark_switched_in mv a0, s0 #endif /* Restore the new thread's callee-saved registers */ DO_CALLEE_SAVED(lr, a0) /* Return to arch_switch() or _irq_wrapper() */ ret ```
/content/code_sandbox/arch/riscv/core/switch.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
810
```c /* * Written by: Nicolas Pitre * */ #include <zephyr/kernel.h> #include <zephyr/kernel_structs.h> #include <kernel_arch_interface.h> #include <zephyr/sys/atomic.h> /* to be found in fpu.S */ extern void z_riscv_fpu_save(struct z_riscv_fp_context *saved_fp_context); extern void z_riscv_fpu_restore(struct z_riscv_fp_context *saved_fp_context); #define FPU_DEBUG 0 #if FPU_DEBUG /* * Debug traces have to be produced without printk() or any other functions * using a va_list as va_start() may copy the FPU registers that could be * used to pass float arguments, and that would trigger an FPU access trap. * Note: Apparently gcc doesn't use float regs with variadic functions on * RISC-V even if -mabi is used with f or d so this precaution might be * unnecessary. But better be safe than sorry especially for debugging code. */ #include <string.h> static void DBG(char *msg, struct k_thread *th) { char buf[80], *p; unsigned int v; strcpy(buf, "CPU# exc# "); buf[3] = '0' + _current_cpu->id; buf[8] = '0' + _current->arch.exception_depth; strcat(buf, _current->name); strcat(buf, ": "); strcat(buf, msg); strcat(buf, " "); strcat(buf, th->name); v = *(unsigned char *)&th->arch.saved_fp_context; p = buf + strlen(buf); *p++ = ' '; *p++ = ((v >> 4) < 10) ? ((v >> 4) + '0') : ((v >> 4) - 10 + 'a'); *p++ = ((v & 15) < 10) ? ((v & 15) + '0') : ((v & 15) - 10 + 'a'); *p++ = '\n'; *p = 0; k_str_out(buf, p - buf); } #else static inline void DBG(char *msg, struct k_thread *t) { } #endif /* FPU_DEBUG */ static void z_riscv_fpu_disable(void) { unsigned long status = csr_read(mstatus); __ASSERT((status & MSTATUS_IEN) == 0, "must be called with IRQs disabled"); if ((status & MSTATUS_FS) != 0) { csr_clear(mstatus, MSTATUS_FS); /* remember its clean/dirty state */ _current_cpu->arch.fpu_state = (status & MSTATUS_FS); } } static void z_riscv_fpu_load(void) { __ASSERT((csr_read(mstatus) & MSTATUS_IEN) == 0, "must be called with IRQs disabled"); __ASSERT((csr_read(mstatus) & MSTATUS_FS) == 0, "must be called with FPU access disabled"); /* become new owner */ atomic_ptr_set(&_current_cpu->arch.fpu_owner, _current); /* restore our content */ csr_set(mstatus, MSTATUS_FS_INIT); z_riscv_fpu_restore(&_current->arch.saved_fp_context); DBG("restore", _current); } /* * Flush FPU content and clear ownership. If the saved FPU state is "clean" * then we know the in-memory copy is up to date and skip the FPU content * transfer. The saved FPU state is updated upon disabling FPU access so * we require that this be called only when the FPU is disabled. * * This is called locally and also from flush_fpu_ipi_handler(). */ void arch_flush_local_fpu(void) { __ASSERT((csr_read(mstatus) & MSTATUS_IEN) == 0, "must be called with IRQs disabled"); __ASSERT((csr_read(mstatus) & MSTATUS_FS) == 0, "must be called with FPU access disabled"); struct k_thread *owner = atomic_ptr_get(&_current_cpu->arch.fpu_owner); if (owner != NULL) { bool dirty = (_current_cpu->arch.fpu_state == MSTATUS_FS_DIRTY); if (dirty) { /* turn on FPU access */ csr_set(mstatus, MSTATUS_FS_CLEAN); /* save current owner's content */ z_riscv_fpu_save(&owner->arch.saved_fp_context); } /* dirty means active use */ owner->arch.fpu_recently_used = dirty; /* disable FPU access */ csr_clear(mstatus, MSTATUS_FS); /* release ownership */ atomic_ptr_clear(&_current_cpu->arch.fpu_owner); DBG("disable", owner); } } #ifdef CONFIG_SMP static void flush_owned_fpu(struct k_thread *thread) { __ASSERT((csr_read(mstatus) & MSTATUS_IEN) == 0, "must be called with IRQs disabled"); int i; atomic_ptr_val_t owner; /* search all CPUs for the owner we want */ unsigned int num_cpus = arch_num_cpus(); for (i = 0; i < num_cpus; i++) { owner = atomic_ptr_get(&_kernel.cpus[i].arch.fpu_owner); if (owner != thread) { continue; } /* we found it live on CPU i */ if (i == _current_cpu->id) { z_riscv_fpu_disable(); arch_flush_local_fpu(); break; } /* the FPU context is live on another CPU */ arch_flush_fpu_ipi(i); /* * Wait for it only if this is about the thread * currently running on this CPU. Otherwise the * other CPU running some other thread could regain * ownership the moment it is removed from it and * we would be stuck here. * * Also, if this is for the thread running on this * CPU, then we preemptively flush any live context * on this CPU as well since we're likely to * replace it, and this avoids a deadlock where * two CPUs want to pull each other's FPU context. */ if (thread == _current) { z_riscv_fpu_disable(); arch_flush_local_fpu(); do { arch_nop(); owner = atomic_ptr_get(&_kernel.cpus[i].arch.fpu_owner); } while (owner == thread); } break; } } #endif void z_riscv_fpu_enter_exc(void) { /* always deny FPU access whenever an exception is entered */ z_riscv_fpu_disable(); } /* * Process the FPU trap. * * This usually means that FP regs belong to another thread. Save them * to that thread's save area and restore the current thread's content. * * We also get here when FP regs are used while in exception as FP access * is always disabled by default in that case. If so we save the FPU content * to the owning thread and simply enable FPU access. Exceptions should be * short and don't have persistent register contexts when they're done so * there is nothing to save/restore for that context... as long as we * don't get interrupted that is. To ensure that we mask interrupts to * the triggering exception context. * * Note that the exception depth count was not incremented before this call * as no further exceptions are expected before returning to normal mode. */ void z_riscv_fpu_trap(struct arch_esf *esf) { __ASSERT((esf->mstatus & MSTATUS_FS) == 0 && (csr_read(mstatus) & MSTATUS_FS) == 0, "called despite FPU being accessible"); /* save current owner's content if any */ arch_flush_local_fpu(); if (_current->arch.exception_depth > 0) { /* * We were already in exception when the FPU access trapped. * We give it access and prevent any further IRQ recursion * by disabling IRQs as we wouldn't be able to preserve the * interrupted exception's FPU context. */ esf->mstatus &= ~MSTATUS_MPIE_EN; /* make it accessible to the returning context */ esf->mstatus |= MSTATUS_FS_INIT; return; } #ifdef CONFIG_SMP /* * Make sure the FPU context we need isn't live on another CPU. * The current CPU's FPU context is NULL at this point. */ flush_owned_fpu(_current); #endif /* make it accessible and clean to the returning context */ esf->mstatus |= MSTATUS_FS_CLEAN; /* and load it with corresponding content */ z_riscv_fpu_load(); } /* * Perform lazy FPU context switching by simply granting or denying * access to FP regs based on FPU ownership before leaving the last * exception level in case of exceptions, or during a thread context * switch with the exception level of the new thread being 0. * If current thread doesn't own the FP regs then it will trap on its * first access and then the actual FPU context switching will occur. */ static bool fpu_access_allowed(unsigned int exc_update_level) { __ASSERT((csr_read(mstatus) & MSTATUS_IEN) == 0, "must be called with IRQs disabled"); if (_current->arch.exception_depth == exc_update_level) { /* We're about to execute non-exception code */ if (_current_cpu->arch.fpu_owner == _current) { /* everything is already in place */ return true; } if (_current->arch.fpu_recently_used) { /* * Before this thread was context-switched out, * it made active use of the FPU, but someone else * took it away in the mean time. Let's preemptively * claim it back to avoid the likely exception trap * to come otherwise. */ z_riscv_fpu_disable(); arch_flush_local_fpu(); #ifdef CONFIG_SMP flush_owned_fpu(_current); #endif z_riscv_fpu_load(); _current_cpu->arch.fpu_state = MSTATUS_FS_CLEAN; return true; } return false; } /* * Any new exception level should always trap on FPU * access as we want to make sure IRQs are disabled before * granting it access (see z_riscv_fpu_trap() documentation). */ return false; } /* * This is called on every exception exit except for z_riscv_fpu_trap(). * In that case the exception level of interest is 1 (soon to be 0). */ void z_riscv_fpu_exit_exc(struct arch_esf *esf) { if (fpu_access_allowed(1)) { esf->mstatus &= ~MSTATUS_FS; esf->mstatus |= _current_cpu->arch.fpu_state; } else { esf->mstatus &= ~MSTATUS_FS; } } /* * This is called from z_riscv_context_switch(). FPU access may be granted * only if exception level is 0. If we switch to a thread that is still in * some exception context then FPU access would be re-evaluated at exception * exit time via z_riscv_fpu_exit_exc(). */ void z_riscv_fpu_thread_context_switch(void) { if (fpu_access_allowed(0)) { csr_clear(mstatus, MSTATUS_FS); csr_set(mstatus, _current_cpu->arch.fpu_state); } else { z_riscv_fpu_disable(); } } int arch_float_disable(struct k_thread *thread) { if (thread != NULL) { unsigned int key = arch_irq_lock(); #ifdef CONFIG_SMP flush_owned_fpu(thread); #else if (thread == _current_cpu->arch.fpu_owner) { z_riscv_fpu_disable(); arch_flush_local_fpu(); } #endif arch_irq_unlock(key); } return 0; } int arch_float_enable(struct k_thread *thread, unsigned int options) { /* floats always gets enabled automatically at the moment */ return 0; } ```
/content/code_sandbox/arch/riscv/core/fpu.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,665
```c /* * */ #include <zephyr/kernel.h> #include <ksched.h> #include <zephyr/arch/riscv/csr.h> #include <stdio.h> #include <pmp.h> #ifdef CONFIG_USERSPACE /* * Per-thread (TLS) variable indicating whether execution is in user mode. */ __thread uint8_t is_user_mode; #endif void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, char *stack_ptr, k_thread_entry_t entry, void *p1, void *p2, void *p3) { extern void z_riscv_thread_start(void); struct arch_esf *stack_init; #ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE const struct soc_esf soc_esf_init = {SOC_ESF_INIT}; #endif /* Initial stack frame for thread */ stack_init = (struct arch_esf *)Z_STACK_PTR_ALIGN( Z_STACK_PTR_TO_FRAME(struct arch_esf, stack_ptr) ); /* Setup the initial stack frame */ stack_init->a0 = (unsigned long)entry; stack_init->a1 = (unsigned long)p1; stack_init->a2 = (unsigned long)p2; stack_init->a3 = (unsigned long)p3; /* * Following the RISC-V architecture, * the MSTATUS register (used to globally enable/disable interrupt), * as well as the MEPC register (used to by the core to save the * value of the program counter at which an interrupt/exception occurs) * need to be saved on the stack, upon an interrupt/exception * and restored prior to returning from the interrupt/exception. * This shall allow to handle nested interrupts. * * Given that thread startup happens through the exception exit * path, initially set: * 1) MSTATUS to MSTATUS_DEF_RESTORE in the thread stack to enable * interrupts when the newly created thread will be scheduled; * 2) MEPC to the address of the z_thread_entry in the thread * stack. * Hence, when going out of an interrupt/exception/context-switch, * after scheduling the newly created thread: * 1) interrupts will be enabled, as the MSTATUS register will be * restored following the MSTATUS value set within the thread stack; * 2) the core will jump to z_thread_entry, as the program * counter will be restored following the MEPC value set within the * thread stack. */ stack_init->mstatus = MSTATUS_DEF_RESTORE; #if defined(CONFIG_FPU_SHARING) /* thread birth happens through the exception return path */ thread->arch.exception_depth = 1; #elif defined(CONFIG_FPU) /* Unshared FP mode: enable FPU of each thread. */ stack_init->mstatus |= MSTATUS_FS_INIT; #endif #if defined(CONFIG_USERSPACE) /* Clear user thread context */ z_riscv_pmp_usermode_init(thread); thread->arch.priv_stack_start = 0; #endif /* CONFIG_USERSPACE */ /* Assign thread entry point and mstatus.MPRV mode. */ if (IS_ENABLED(CONFIG_USERSPACE) && (thread->base.user_options & K_USER)) { /* User thread */ stack_init->mepc = (unsigned long)k_thread_user_mode_enter; } else { /* Supervisor thread */ stack_init->mepc = (unsigned long)z_thread_entry; #if defined(CONFIG_PMP_STACK_GUARD) /* Enable PMP in mstatus.MPRV mode for RISC-V machine mode * if thread is supervisor thread. */ stack_init->mstatus |= MSTATUS_MPRV; #endif /* CONFIG_PMP_STACK_GUARD */ } #if defined(CONFIG_PMP_STACK_GUARD) /* Setup PMP regions of PMP stack guard of thread. */ z_riscv_pmp_stackguard_prepare(thread); #endif /* CONFIG_PMP_STACK_GUARD */ #ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE stack_init->soc_context = soc_esf_init; #endif thread->callee_saved.sp = (unsigned long)stack_init; /* where to go when returning from z_riscv_switch() */ thread->callee_saved.ra = (unsigned long)z_riscv_thread_start; /* our switch handle is the thread pointer itself */ thread->switch_handle = thread; } #ifdef CONFIG_USERSPACE /* * User space entry function * * This function is the entry point to user mode from privileged execution. * The conversion is one way, and threads which transition to user mode do * not transition back later, unless they are doing system calls. */ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, void *p1, void *p2, void *p3) { unsigned long top_of_user_stack, top_of_priv_stack; unsigned long status; /* Set up privileged stack */ #ifdef CONFIG_GEN_PRIV_STACKS _current->arch.priv_stack_start = (unsigned long)z_priv_stack_find(_current->stack_obj); /* remove the stack guard from the main stack */ _current->stack_info.start -= K_THREAD_STACK_RESERVED; _current->stack_info.size += K_THREAD_STACK_RESERVED; #else _current->arch.priv_stack_start = (unsigned long)_current->stack_obj; #endif /* CONFIG_GEN_PRIV_STACKS */ top_of_priv_stack = Z_STACK_PTR_ALIGN(_current->arch.priv_stack_start + K_KERNEL_STACK_RESERVED + CONFIG_PRIVILEGED_STACK_SIZE); top_of_user_stack = Z_STACK_PTR_ALIGN( _current->stack_info.start + _current->stack_info.size - _current->stack_info.delta); status = csr_read(mstatus); /* Set next CPU status to user mode */ status = INSERT_FIELD(status, MSTATUS_MPP, PRV_U); /* Enable IRQs for user mode */ status = INSERT_FIELD(status, MSTATUS_MPIE, 1); /* Disable IRQs for m-mode until the mode switch */ status = INSERT_FIELD(status, MSTATUS_MIE, 0); csr_write(mstatus, status); csr_write(mepc, z_thread_entry); #ifdef CONFIG_PMP_STACK_GUARD /* reconfigure as the kernel mode stack will be different */ z_riscv_pmp_stackguard_prepare(_current); #endif /* Set up Physical Memory Protection */ z_riscv_pmp_usermode_prepare(_current); z_riscv_pmp_usermode_enable(_current); /* preserve stack pointer for next exception entry */ arch_curr_cpu()->arch.user_exc_sp = top_of_priv_stack; is_user_mode = true; register void *a0 __asm__("a0") = user_entry; register void *a1 __asm__("a1") = p1; register void *a2 __asm__("a2") = p2; register void *a3 __asm__("a3") = p3; __asm__ volatile ( "mv sp, %4; mret" : : "r" (a0), "r" (a1), "r" (a2), "r" (a3), "r" (top_of_user_stack) : "memory"); CODE_UNREACHABLE; } #endif /* CONFIG_USERSPACE */ #ifndef CONFIG_MULTITHREADING K_KERNEL_STACK_ARRAY_DECLARE(z_interrupt_stacks, CONFIG_MP_MAX_NUM_CPUS, CONFIG_ISR_STACK_SIZE); K_THREAD_STACK_DECLARE(z_main_stack, CONFIG_MAIN_STACK_SIZE); FUNC_NORETURN void z_riscv_switch_to_main_no_multithreading(k_thread_entry_t main_entry, void *p1, void *p2, void *p3) { void *main_stack; ARG_UNUSED(p1); ARG_UNUSED(p2); ARG_UNUSED(p3); _kernel.cpus[0].id = 0; _kernel.cpus[0].irq_stack = (K_KERNEL_STACK_BUFFER(z_interrupt_stacks[0]) + K_KERNEL_STACK_SIZEOF(z_interrupt_stacks[0])); main_stack = (K_THREAD_STACK_BUFFER(z_main_stack) + K_THREAD_STACK_SIZEOF(z_main_stack)); irq_unlock(MSTATUS_IEN); __asm__ volatile ( "mv sp, %0; jalr ra, %1, 0" : : "r" (main_stack), "r" (main_entry) : "memory"); /* infinite loop */ irq_lock(); while (true) { } CODE_UNREACHABLE; /* LCOV_EXCL_LINE */ } #endif /* !CONFIG_MULTITHREADING */ ```
/content/code_sandbox/arch/riscv/core/thread.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,805
```c /* * */ #include <zephyr/irq_offload.h> #include <zephyr/arch/riscv/syscall.h> void arch_irq_offload(irq_offload_routine_t routine, const void *parameter) { arch_syscall_invoke2((uintptr_t)routine, (uintptr_t)parameter, RV_ECALL_IRQ_OFFLOAD); } ```
/content/code_sandbox/arch/riscv/core/irq_offload.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
74
```unknown /* * */ #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #include <zephyr/arch/cpu.h> #include "asm_macros.inc" #define CSR_PMPCFG_BASE 0x3a0 #define CSR_PMPADDR_BASE 0x3b0 /* * Prototype: * * void z_riscv_write_pmp_entries(unsigned int start, // a0 * unsigned int end, // a1 * bool clear_trailing_entries, // a2 * unsigned long *pmp_addr, // a3 * unsigned long *pmp_cfg) // a4 * * Called from pmp.c to write a range of PMP entries. * * PMP registers are accessed with the csr instruction which only takes an * immediate value as the actual register. In order to avoid traversing * the whole register list, we use the start index to jump directly to the * location corresponding to the start of the wanted range. For this to work * we disallow compressed instructions so the update block sizes are easily * known (luckily they're all power-of-2's simplifying the code further). * * start < end && end <= CONFIG_PMP_SLOTS must be true. */ GTEXT(z_riscv_write_pmp_entries) SECTION_FUNC(TEXT, z_riscv_write_pmp_entries) la t0, pmpaddr_store slli t1, a0, 4 /* 16-byte instruction blocks */ add t0, t0, t1 jr t0 pmpaddr_store: .option push .option norvc .set _index, 0 .rept CONFIG_PMP_SLOTS lr t0, (RV_REGSIZE * _index)(a3) li t1, _index + 1 csrw (CSR_PMPADDR_BASE + _index), t0 beq t1, a1, pmpaddr_done .set _index, _index + 1 .endr .option pop pmpaddr_done: /* * Move to the pmpcfg space: * a0 = a0 / RV_REGSIZE * a1 = (a1 + RV_REGSIZE - 1) / RV_REGSIZE */ la t0, pmpcfg_store srli a0, a0, RV_REGSHIFT slli t1, a0, 4 /* 16-byte instruction blocks */ add t0, t0, t1 addi a1, a1, RV_REGSIZE - 1 srli a1, a1, RV_REGSHIFT jr t0 pmpcfg_store: .option push .option norvc .set _index, 0 .rept (CONFIG_PMP_SLOTS / RV_REGSIZE) lr t0, (RV_REGSIZE * _index)(a4) addi a0, a0, 1 csrw (CSR_PMPCFG_BASE + RV_REGSIZE/4 * _index), t0 beq a0, a1, pmpcfg_done .set _index, _index + 1 .endr .option pop pmpcfg_done: beqz a2, done la t0, pmpcfg_zerotail slli a0, a0, 2 /* 4-byte instruction blocks */ add t0, t0, a0 jr t0 pmpcfg_zerotail: .option push .option norvc .set _index, 0 .rept (CONFIG_PMP_SLOTS / RV_REGSIZE) csrw (CSR_PMPCFG_BASE + RV_REGSIZE/4 * _index), zero .set _index, _index + 1 .endr .option pop done: ret ```
/content/code_sandbox/arch/riscv/core/pmp.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
843
```c /* * */ /** * @file * @brief RISC-V reboot interface */ #include <zephyr/kernel.h> #include <zephyr/arch/cpu.h> #include <zephyr/sys/util.h> /** * @brief Reset the system * * This is stub function to avoid build error with CONFIG_REBOOT=y * RISC-V specification does not have a common interface for system reset. * Each RISC-V SoC that has reset feature should implement own reset function. */ void __weak sys_arch_reboot(int type) { ARG_UNUSED(type); } ```
/content/code_sandbox/arch/riscv/core/reboot.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
119
```c /* * */ #include <string.h> #include <zephyr/debug/coredump.h> #ifndef CONFIG_64BIT #define ARCH_HDR_VER 1 #else #define ARCH_HDR_VER 2 #endif struct riscv_arch_block { #ifdef CONFIG_64BIT struct { uint64_t ra; uint64_t tp; uint64_t t0; uint64_t t1; uint64_t t2; uint64_t a0; uint64_t a1; uint64_t a2; uint64_t a3; uint64_t a4; uint64_t a5; uint64_t a6; uint64_t a7; uint64_t t3; uint64_t t4; uint64_t t5; uint64_t t6; uint64_t pc; } r; #else /* !CONFIG_64BIT */ struct { uint32_t ra; uint32_t tp; uint32_t t0; uint32_t t1; uint32_t t2; uint32_t a0; uint32_t a1; uint32_t a2; uint32_t a3; uint32_t a4; uint32_t a5; #if !defined(CONFIG_RISCV_ISA_RV32E) uint32_t a6; uint32_t a7; uint32_t t3; uint32_t t4; uint32_t t5; uint32_t t6; #endif /* !CONFIG_RISCV_ISA_RV32E */ uint32_t pc; } r; #endif /* CONFIG_64BIT */ } __packed; /* * This might be too large for stack space if defined * inside function. So do it here. */ static struct riscv_arch_block arch_blk; void arch_coredump_info_dump(const struct arch_esf *esf) { struct coredump_arch_hdr_t hdr = { .id = COREDUMP_ARCH_HDR_ID, .hdr_version = ARCH_HDR_VER, .num_bytes = sizeof(arch_blk), }; /* Nothing to process */ if (esf == NULL) { return; } (void)memset(&arch_blk, 0, sizeof(arch_blk)); /* * 33 registers expected by GDB. Not all are in ESF but the GDB stub will need * to send all 33 as one packet. The stub will need to send undefined for * registers not presented in coredump. */ arch_blk.r.ra = esf->ra; arch_blk.r.t0 = esf->t0; arch_blk.r.t1 = esf->t1; arch_blk.r.t2 = esf->t2; arch_blk.r.a0 = esf->a0; arch_blk.r.a1 = esf->a1; arch_blk.r.a2 = esf->a2; arch_blk.r.a3 = esf->a3; arch_blk.r.a4 = esf->a4; arch_blk.r.a5 = esf->a5; #if !defined(CONFIG_RISCV_ISA_RV32E) arch_blk.r.t3 = esf->t3; arch_blk.r.t4 = esf->t4; arch_blk.r.t5 = esf->t5; arch_blk.r.t6 = esf->t6; arch_blk.r.a6 = esf->a6; arch_blk.r.a7 = esf->a7; #endif /* !CONFIG_RISCV_ISA_RV32E */ arch_blk.r.pc = esf->mepc; /* Send for output */ coredump_buffer_output((uint8_t *)&hdr, sizeof(hdr)); coredump_buffer_output((uint8_t *)&arch_blk, sizeof(arch_blk)); } uint16_t arch_coredump_tgt_code_get(void) { return COREDUMP_TGT_RISC_V; } ```
/content/code_sandbox/arch/riscv/core/coredump.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
858
```linker script /* * */ KEEP(*(.vectors.*)) ```
/content/code_sandbox/arch/riscv/core/vector_table.ld
linker script
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
11
```unknown /* * Written by: Nicolas Pitre * */ #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #include <zephyr/offsets.h> #ifdef CONFIG_CPU_HAS_FPU_DOUBLE_PRECISION #define LOAD fld #define STORE fsd #else #define LOAD flw #define STORE fsw #endif #define DO_FP_REGS(op, ptr) \ op fa0, __z_riscv_fp_context_t_fa0_OFFSET (ptr); \ op fa1, __z_riscv_fp_context_t_fa1_OFFSET (ptr); \ op fa2, __z_riscv_fp_context_t_fa2_OFFSET (ptr); \ op fa3, __z_riscv_fp_context_t_fa3_OFFSET (ptr); \ op fa4, __z_riscv_fp_context_t_fa4_OFFSET (ptr); \ op fa5, __z_riscv_fp_context_t_fa5_OFFSET (ptr); \ op fa6, __z_riscv_fp_context_t_fa6_OFFSET (ptr); \ op fa7, __z_riscv_fp_context_t_fa7_OFFSET (ptr); \ op fs0, __z_riscv_fp_context_t_fs0_OFFSET (ptr); \ op fs1, __z_riscv_fp_context_t_fs1_OFFSET (ptr); \ op fs2, __z_riscv_fp_context_t_fs2_OFFSET (ptr); \ op fs3, __z_riscv_fp_context_t_fs3_OFFSET (ptr); \ op fs4, __z_riscv_fp_context_t_fs4_OFFSET (ptr); \ op fs5, __z_riscv_fp_context_t_fs5_OFFSET (ptr); \ op fs6, __z_riscv_fp_context_t_fs6_OFFSET (ptr); \ op fs7, __z_riscv_fp_context_t_fs7_OFFSET (ptr); \ op fs8, __z_riscv_fp_context_t_fs8_OFFSET (ptr); \ op fs9, __z_riscv_fp_context_t_fs9_OFFSET (ptr); \ op fs10, __z_riscv_fp_context_t_fs10_OFFSET(ptr); \ op fs11, __z_riscv_fp_context_t_fs11_OFFSET(ptr); \ op ft0, __z_riscv_fp_context_t_ft0_OFFSET (ptr); \ op ft1, __z_riscv_fp_context_t_ft1_OFFSET (ptr); \ op ft2, __z_riscv_fp_context_t_ft2_OFFSET (ptr); \ op ft3, __z_riscv_fp_context_t_ft3_OFFSET (ptr); \ op ft4, __z_riscv_fp_context_t_ft4_OFFSET (ptr); \ op ft5, __z_riscv_fp_context_t_ft5_OFFSET (ptr); \ op ft6, __z_riscv_fp_context_t_ft6_OFFSET (ptr); \ op ft7, __z_riscv_fp_context_t_ft7_OFFSET (ptr); \ op ft8, __z_riscv_fp_context_t_ft8_OFFSET (ptr); \ op ft9, __z_riscv_fp_context_t_ft9_OFFSET (ptr); \ op ft10, __z_riscv_fp_context_t_ft10_OFFSET(ptr); \ op ft11, __z_riscv_fp_context_t_ft11_OFFSET(ptr) GTEXT(z_riscv_fpu_save) SECTION_FUNC(TEXT, z_riscv_fpu_save) frcsr t0 DO_FP_REGS(STORE, a0) sw t0, __z_riscv_fp_context_t_fcsr_OFFSET(a0) ret GTEXT(z_riscv_fpu_restore) SECTION_FUNC(TEXT, z_riscv_fpu_restore) DO_FP_REGS(LOAD, a0) lw t0, __z_riscv_fp_context_t_fcsr_OFFSET(a0) fscsr t0 ret ```
/content/code_sandbox/arch/riscv/core/fpu.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
833
```unknown /* * */ #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #include <offsets_short.h> #include <zephyr/arch/cpu.h> #include <zephyr/sys/util.h> #include <zephyr/kernel.h> #include <zephyr/syscall.h> #include <zephyr/arch/riscv/csr.h> #include <zephyr/arch/riscv/irq.h> #include <zephyr/arch/riscv/syscall.h> #include "asm_macros.inc" #ifdef CONFIG_RISCV_SOC_HAS_ISR_STACKING #include <soc_isr_stacking.h> #endif /* Convenience macro for loading/storing register states. */ #define DO_CALLER_SAVED(op) \ RV_E( op t0, __struct_arch_esf_t0_OFFSET(sp) );\ RV_E( op t1, __struct_arch_esf_t1_OFFSET(sp) );\ RV_E( op t2, __struct_arch_esf_t2_OFFSET(sp) );\ RV_I( op t3, __struct_arch_esf_t3_OFFSET(sp) );\ RV_I( op t4, __struct_arch_esf_t4_OFFSET(sp) );\ RV_I( op t5, __struct_arch_esf_t5_OFFSET(sp) );\ RV_I( op t6, __struct_arch_esf_t6_OFFSET(sp) );\ RV_E( op a0, __struct_arch_esf_a0_OFFSET(sp) );\ RV_E( op a1, __struct_arch_esf_a1_OFFSET(sp) );\ RV_E( op a2, __struct_arch_esf_a2_OFFSET(sp) );\ RV_E( op a3, __struct_arch_esf_a3_OFFSET(sp) );\ RV_E( op a4, __struct_arch_esf_a4_OFFSET(sp) );\ RV_E( op a5, __struct_arch_esf_a5_OFFSET(sp) );\ RV_I( op a6, __struct_arch_esf_a6_OFFSET(sp) );\ RV_I( op a7, __struct_arch_esf_a7_OFFSET(sp) );\ RV_E( op ra, __struct_arch_esf_ra_OFFSET(sp) ) #ifdef CONFIG_EXCEPTION_DEBUG /* Convenience macro for storing callee saved register [s0 - s11] states. */ #define STORE_CALLEE_SAVED() \ RV_E( sr s0, ___callee_saved_t_s0_OFFSET(sp) );\ RV_E( sr s1, ___callee_saved_t_s1_OFFSET(sp) );\ RV_I( sr s2, ___callee_saved_t_s2_OFFSET(sp) );\ RV_I( sr s3, ___callee_saved_t_s3_OFFSET(sp) );\ RV_I( sr s4, ___callee_saved_t_s4_OFFSET(sp) );\ RV_I( sr s5, ___callee_saved_t_s5_OFFSET(sp) );\ RV_I( sr s6, ___callee_saved_t_s6_OFFSET(sp) );\ RV_I( sr s7, ___callee_saved_t_s7_OFFSET(sp) );\ RV_I( sr s8, ___callee_saved_t_s8_OFFSET(sp) );\ RV_I( sr s9, ___callee_saved_t_s9_OFFSET(sp) );\ RV_I( sr s10, ___callee_saved_t_s10_OFFSET(sp) );\ RV_I( sr s11, ___callee_saved_t_s11_OFFSET(sp) ) #endif /* CONFIG_EXCEPTION_DEBUG */ .macro get_current_cpu dst #if defined(CONFIG_SMP) || defined(CONFIG_USERSPACE) csrr \dst, mscratch #else la \dst, _kernel + ___kernel_t_cpus_OFFSET #endif .endm /* imports */ GDATA(_sw_isr_table) #ifdef CONFIG_RISCV_SOC_EXCEPTION_FROM_IRQ GTEXT(__soc_is_irq) #endif GTEXT(__soc_handle_irq) GTEXT(_Fault) #ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE GTEXT(__soc_save_context) GTEXT(__soc_restore_context) #endif /* CONFIG_RISCV_SOC_CONTEXT_SAVE */ #ifdef CONFIG_EXCEPTION_DEBUG GTEXT(z_riscv_fatal_error_csf) #else GTEXT(z_riscv_fatal_error) #endif /* CONFIG_EXCEPTION_DEBUG */ GTEXT(z_get_next_switch_handle) GTEXT(z_riscv_switch) GTEXT(z_riscv_thread_start) #ifdef CONFIG_TRACING GTEXT(sys_trace_isr_enter) GTEXT(sys_trace_isr_exit) #endif #ifdef CONFIG_USERSPACE GDATA(_k_syscall_table) #endif #ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_HANDLING GTEXT(__soc_handle_all_irqs) #endif /* exports */ GTEXT(_isr_wrapper) /* use ABI name of registers for the sake of simplicity */ /* * Generic architecture-level IRQ handling, along with callouts to * SoC-specific routines. * * Architecture level IRQ handling includes basic context save/restore * of standard registers and calling ISRs registered at Zephyr's driver * level. * * Since RISC-V does not completely prescribe IRQ handling behavior, * implementations vary (some implementations also deviate from * what standard behavior is defined). Hence, the arch level code expects * the following functions to be provided at the SOC level: * * - __soc_is_irq (optional): decide if we're handling an interrupt or an exception * - __soc_handle_irq: handle SoC-specific details for a pending IRQ * (e.g. clear a pending bit in a SoC-specific register) * * If CONFIG_RISCV_SOC_CONTEXT_SAVE=y, calls to SoC-level context save/restore * routines are also made here. For details, see the Kconfig help text. */ /* * Handler called upon each exception/interrupt/fault */ SECTION_FUNC(exception.entry, _isr_wrapper) /* Provide requested alignment, which depends e.g. on MTVEC format */ .balign CONFIG_RISCV_TRAP_HANDLER_ALIGNMENT #ifdef CONFIG_USERSPACE /* retrieve address of _current_cpu preserving s0 */ csrrw s0, mscratch, s0 /* preserve t0 and t1 temporarily */ sr t0, _curr_cpu_arch_user_exc_tmp0(s0) sr t1, _curr_cpu_arch_user_exc_tmp1(s0) /* determine if we come from user space */ csrr t0, mstatus li t1, MSTATUS_MPP and t0, t0, t1 bnez t0, 1f /* in user space we were: switch to our privileged stack */ mv t0, sp lr sp, _curr_cpu_arch_user_exc_sp(s0) /* Save user stack value. Coming from user space, we know this * can't overflow the privileged stack. The esf will be allocated * later but it is safe to store our saved user sp here. */ sr t0, (-__struct_arch_esf_SIZEOF + __struct_arch_esf_sp_OFFSET)(sp) /* Make sure tls pointer is sane */ lr t0, ___cpu_t_current_OFFSET(s0) lr tp, _thread_offset_to_tls(t0) /* Clear our per-thread usermode flag */ lui t0, %tprel_hi(is_user_mode) add t0, t0, tp, %tprel_add(is_user_mode) sb zero, %tprel_lo(is_user_mode)(t0) 1: /* retrieve original t0/t1 values */ lr t0, _curr_cpu_arch_user_exc_tmp0(s0) lr t1, _curr_cpu_arch_user_exc_tmp1(s0) /* retrieve original s0 and restore _current_cpu in mscratch */ csrrw s0, mscratch, s0 #endif #ifdef CONFIG_RISCV_SOC_HAS_ISR_STACKING SOC_ISR_SW_STACKING #else /* Save caller-saved registers on current thread stack. */ addi sp, sp, -__struct_arch_esf_SIZEOF DO_CALLER_SAVED(sr) ; #endif /* CONFIG_RISCV_SOC_HAS_ISR_STACKING */ /* Save s0 in the esf and load it with &_current_cpu. */ sr s0, __struct_arch_esf_s0_OFFSET(sp) get_current_cpu s0 /* Save MEPC register */ csrr t0, mepc sr t0, __struct_arch_esf_mepc_OFFSET(sp) /* Save MSTATUS register */ csrr t2, mstatus sr t2, __struct_arch_esf_mstatus_OFFSET(sp) #if defined(CONFIG_FPU_SHARING) /* determine if FPU access was disabled */ li t1, MSTATUS_FS and t1, t1, t2 bnez t1, no_fp /* determine if this is an Illegal Instruction exception */ csrr t2, mcause li t1, CONFIG_RISCV_MCAUSE_EXCEPTION_MASK and t2, t2, t1 li t1, 2 /* 2 = illegal instruction */ bne t1, t2, no_fp /* determine if we trapped on an FP instruction. */ csrr t2, mtval /* get faulting instruction */ #ifdef CONFIG_QEMU_TARGET /* * Some implementations may not support MTVAL in this capacity. * Notably QEMU when a CSR instruction is involved. */ bnez t2, 1f lw t2, 0(t0) /* t0 = mepc */ 1: #endif andi t0, t2, 0x7f /* keep only the opcode bits */ /* * Major FP opcodes: * 0000111 = LOAD-FP * 0100111 = STORE-FP * 1000011 = MADD * 1000111 = MSUB * 1001011 = NMSUB * 1001111 = NMADD * 1010011 = OP-FP */ xori t1, t0, 0b1010011 /* OP-FP */ beqz t1, is_fp ori t1, t0, 0b0100000 xori t1, t1, 0b0100111 /* LOAD-FP / STORE-FP */ beqz t1, is_fp ori t1, t0, 0b0001100 xori t1, t1, 0b1001111 /* MADD / MSUB / NMSUB / NMADD */ beqz t1, is_fp /* * The FRCSR, FSCSR, FRRM, FSRM, FSRMI, FRFLAGS, FSFLAGS and FSFLAGSI * are in fact CSR instructions targeting the fcsr, frm and fflags * registers. They should be caught as FPU instructions as well. * * CSR format: csr#[31-20] src[19-15] op[14-12] dst[11-7] SYSTEM[6-0] * SYSTEM = 0b1110011, op = 0b.xx where xx is never 0 * The csr# of interest are: 1=fflags, 2=frm, 3=fcsr */ xori t1, t0, 0b1110011 /* SYSTEM opcode */ bnez t1, 2f /* not a CSR insn */ srli t0, t2, 12 andi t0, t0, 0x3 beqz t0, 2f /* not a CSR insn */ srli t0, t2, 20 /* isolate the csr register number */ beqz t0, 2f /* 0=ustatus */ andi t0, t0, ~0x3 /* 1=fflags, 2=frm, 3=fcsr */ #if !defined(CONFIG_RISCV_ISA_EXT_C) bnez t0, no_fp #else beqz t0, is_fp 2: /* remaining non RVC (0b11) and RVC with 0b01 are not FP instructions */ andi t1, t2, 1 bnez t1, no_fp /* * 001...........00 = C.FLD RV32/64 (RV128 = C.LQ) * 001...........10 = C.FLDSP RV32/64 (RV128 = C.LQSP) * 011...........00 = C.FLW RV32 (RV64/128 = C.LD) * 011...........10 = C.FLWSPP RV32 (RV64/128 = C.LDSP) * 101...........00 = C.FSD RV32/64 (RV128 = C.SQ) * 101...........10 = C.FSDSP RV32/64 (RV128 = C.SQSP) * 111...........00 = C.FSW RV32 (RV64/128 = C.SD) * 111...........10 = C.FSWSP RV32 (RV64/128 = C.SDSP) * * so must be .01............. on RV64 and ..1............. on RV32. */ srli t0, t2, 8 #if defined(CONFIG_64BIT) andi t1, t0, 0b01100000 xori t1, t1, 0b00100000 bnez t1, no_fp #else andi t1, t0, 0b00100000 beqz t1, no_fp #endif #endif /* CONFIG_RISCV_ISA_EXT_C */ is_fp: /* Process the FP trap and quickly return from exception */ la ra, fp_trap_exit mv a0, sp tail z_riscv_fpu_trap 2: no_fp: /* increment _current->arch.exception_depth */ lr t0, ___cpu_t_current_OFFSET(s0) lb t1, _thread_offset_to_exception_depth(t0) add t1, t1, 1 sb t1, _thread_offset_to_exception_depth(t0) /* configure the FPU for exception mode */ call z_riscv_fpu_enter_exc #endif /* CONFIG_FPU_SHARING */ #ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE /* Handle context saving at SOC level. */ addi a0, sp, __struct_arch_esf_soc_context_OFFSET jal ra, __soc_save_context #endif /* CONFIG_RISCV_SOC_CONTEXT_SAVE */ /* * Check if exception is the result of an interrupt or not. * (SOC dependent). Following the RISC-V architecture spec, the MSB * of the mcause register is used to indicate whether an exception * is the result of an interrupt or an exception/fault. But for some * SOCs (like pulpino or riscv-qemu), the MSB is never set to indicate * interrupt. Hence, check for interrupt/exception via the __soc_is_irq * function (that needs to be implemented by each SOC). The result is * returned via register a0 (1: interrupt, 0 exception) */ #ifdef CONFIG_RISCV_SOC_EXCEPTION_FROM_IRQ jal ra, __soc_is_irq bnez a0, is_interrupt #else csrr t0, mcause srli t0, t0, RISCV_MCAUSE_IRQ_POS bnez t0, is_interrupt #endif /* * If the exception is the result of an ECALL, check whether to * perform a context-switch or an IRQ offload. Otherwise call _Fault * to report the exception. */ csrr t0, mcause li t2, CONFIG_RISCV_MCAUSE_EXCEPTION_MASK and t0, t0, t2 /* * If mcause == RISCV_EXC_ECALLM, handle system call from * kernel thread. */ li t1, RISCV_EXC_ECALLM beq t0, t1, is_kernel_syscall #ifdef CONFIG_USERSPACE /* * If mcause == RISCV_EXC_ECALLU, handle system call * for user mode thread. */ li t1, RISCV_EXC_ECALLU beq t0, t1, is_user_syscall #ifdef CONFIG_PMP_STACK_GUARD /* * Determine if we come from user space. If so, reconfigure the PMP for * kernel mode stack guard. */ csrr t0, mstatus li t1, MSTATUS_MPP and t0, t0, t1 bnez t0, 1f lr a0, ___cpu_t_current_OFFSET(s0) call z_riscv_pmp_stackguard_enable 1: #endif /* CONFIG_PMP_STACK_GUARD */ #endif /* CONFIG_USERSPACE */ /* * Call _Fault to handle exception. * Stack pointer is pointing to a struct_arch_esf structure, pass it * to _Fault (via register a0). * If _Fault shall return, set return address to * no_reschedule to restore stack. */ mv a0, sp la ra, no_reschedule tail _Fault is_kernel_syscall: /* * A syscall is the result of an ecall instruction, in which case the * MEPC will contain the address of the ecall instruction. * Increment saved MEPC by 4 to prevent triggering the same ecall * again upon exiting the ISR. * * It's safe to always increment by 4, even with compressed * instructions, because the ecall instruction is always 4 bytes. */ lr t0, __struct_arch_esf_mepc_OFFSET(sp) addi t0, t0, 4 sr t0, __struct_arch_esf_mepc_OFFSET(sp) #ifdef CONFIG_PMP_STACK_GUARD /* Re-activate PMP for m-mode */ li t1, MSTATUS_MPP csrc mstatus, t1 li t1, MSTATUS_MPRV csrs mstatus, t1 #endif /* Determine what to do. Operation code is in t0. */ lr t0, __struct_arch_esf_t0_OFFSET(sp) .if RV_ECALL_RUNTIME_EXCEPT != 0; .err; .endif beqz t0, do_fault #if defined(CONFIG_IRQ_OFFLOAD) li t1, RV_ECALL_IRQ_OFFLOAD beq t0, t1, do_irq_offload #endif #ifdef CONFIG_RISCV_ALWAYS_SWITCH_THROUGH_ECALL li t1, RV_ECALL_SCHEDULE bne t0, t1, skip_schedule lr a0, __struct_arch_esf_a0_OFFSET(sp) lr a1, __struct_arch_esf_a1_OFFSET(sp) #ifdef CONFIG_FPU_SHARING /* * When an ECALL is used for a context-switch, the current thread has * been updated to the next thread. * Add the exception_depth back to the previous thread. */ lb t1, _thread_offset_to_exception_depth(a0) add t1, t1, -1 sb t1, _thread_offset_to_exception_depth(a0) lb t1, _thread_offset_to_exception_depth(a1) add t1, t1, 1 sb t1, _thread_offset_to_exception_depth(a1) #endif j reschedule skip_schedule: #endif /* default fault code is K_ERR_KERNEL_OOPS */ li a0, 3 j 1f do_fault: /* Handle RV_ECALL_RUNTIME_EXCEPT. Retrieve reason in a0, esf in A1. */ lr a0, __struct_arch_esf_a0_OFFSET(sp) 1: mv a1, sp #ifdef CONFIG_EXCEPTION_DEBUG /* Allocate space for caller-saved registers on current thread stack */ addi sp, sp, -__callee_saved_t_SIZEOF /* Save callee-saved registers to be passed as 3rd arg */ STORE_CALLEE_SAVED() ; mv a2, sp tail z_riscv_fatal_error_csf #else tail z_riscv_fatal_error #endif #if defined(CONFIG_IRQ_OFFLOAD) do_irq_offload: /* * Retrieve provided routine and argument from the stack. * Routine pointer is in saved a0, argument in saved a1 * so we load them with a1/a0 (reversed). */ lr a1, __struct_arch_esf_a0_OFFSET(sp) lr a0, __struct_arch_esf_a1_OFFSET(sp) /* Increment _current_cpu->nested */ lw t1, ___cpu_t_nested_OFFSET(s0) addi t2, t1, 1 sw t2, ___cpu_t_nested_OFFSET(s0) bnez t1, 1f /* Switch to interrupt stack */ mv t0, sp lr sp, ___cpu_t_irq_stack_OFFSET(s0) /* Save thread stack pointer on interrupt stack */ addi sp, sp, -16 sr t0, 0(sp) 1: /* Execute provided routine (argument is in a0 already). */ jalr ra, a1, 0 /* Leave through the regular IRQ exit path */ j irq_done #endif /* CONFIG_IRQ_OFFLOAD */ #ifdef CONFIG_USERSPACE is_user_syscall: #ifdef CONFIG_PMP_STACK_GUARD /* * We came from userspace and need to reconfigure the * PMP for kernel mode stack guard. */ lr a0, ___cpu_t_current_OFFSET(s0) call z_riscv_pmp_stackguard_enable #endif /* It is safe to re-enable IRQs now */ csrs mstatus, MSTATUS_IEN /* * Same as for is_kernel_syscall: increment saved MEPC by 4 to * prevent triggering the same ecall again upon exiting the ISR. */ lr t1, __struct_arch_esf_mepc_OFFSET(sp) addi t1, t1, 4 sr t1, __struct_arch_esf_mepc_OFFSET(sp) /* Restore argument registers from user stack */ lr a0, __struct_arch_esf_a0_OFFSET(sp) lr a1, __struct_arch_esf_a1_OFFSET(sp) lr a2, __struct_arch_esf_a2_OFFSET(sp) lr a3, __struct_arch_esf_a3_OFFSET(sp) lr a4, __struct_arch_esf_a4_OFFSET(sp) lr a5, __struct_arch_esf_a5_OFFSET(sp) lr t0, __struct_arch_esf_t0_OFFSET(sp) #if defined(CONFIG_RISCV_ISA_RV32E) /* Stack alignment for RV32E is 4 bytes */ addi sp, sp, -4 mv t1, sp sw t1, 0(sp) #else mv a6, sp #endif /* CONFIG_RISCV_ISA_RV32E */ /* validate syscall limit */ li t1, K_SYSCALL_LIMIT bltu t0, t1, valid_syscall_id /* bad syscall id. Set arg1 to bad id and set call_id to SYSCALL_BAD */ mv a0, t0 li t0, K_SYSCALL_BAD valid_syscall_id: la t2, _k_syscall_table slli t1, t0, RV_REGSHIFT # Determine offset from indice value add t2, t2, t1 # Table addr + offset = function addr lr t2, 0(t2) # Load function address /* Execute syscall function */ jalr ra, t2, 0 #if defined(CONFIG_RISCV_ISA_RV32E) addi sp, sp, 4 #endif /* CONFIG_RISCV_ISA_RV32E */ /* Update a0 (return value) on the stack */ sr a0, __struct_arch_esf_a0_OFFSET(sp) /* Disable IRQs again before leaving */ csrc mstatus, MSTATUS_IEN j might_have_rescheduled #endif /* CONFIG_USERSPACE */ is_interrupt: #ifdef CONFIG_PMP_STACK_GUARD #ifdef CONFIG_USERSPACE /* * If we came from userspace then we need to reconfigure the * PMP for kernel mode stack guard. */ lr t0, __struct_arch_esf_mstatus_OFFSET(sp) li t1, MSTATUS_MPP and t0, t0, t1 bnez t0, 1f lr a0, ___cpu_t_current_OFFSET(s0) call z_riscv_pmp_stackguard_enable j 2f #endif /* CONFIG_USERSPACE */ 1: /* Re-activate PMP for m-mode */ li t1, MSTATUS_MPP csrc mstatus, t1 li t1, MSTATUS_MPRV csrs mstatus, t1 2: #endif /* Increment _current_cpu->nested */ lw t1, ___cpu_t_nested_OFFSET(s0) addi t2, t1, 1 sw t2, ___cpu_t_nested_OFFSET(s0) bnez t1, on_irq_stack /* Switch to interrupt stack */ mv t0, sp lr sp, ___cpu_t_irq_stack_OFFSET(s0) /* * Save thread stack pointer on interrupt stack * In RISC-V, stack pointer needs to be 16-byte aligned */ addi sp, sp, -16 sr t0, 0(sp) on_irq_stack: #ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_HANDLING call __soc_handle_all_irqs #else #ifdef CONFIG_TRACING_ISR call sys_trace_isr_enter #endif /* Get IRQ causing interrupt */ csrr a0, mcause li t0, CONFIG_RISCV_MCAUSE_EXCEPTION_MASK and a0, a0, t0 /* * Clear pending IRQ generating the interrupt at SOC level * Pass IRQ number to __soc_handle_irq via register a0 */ jal ra, __soc_handle_irq /* * Call corresponding registered function in _sw_isr_table. * (table is 2-word wide, we should shift index accordingly) */ la t0, _sw_isr_table slli a0, a0, (RV_REGSHIFT + 1) add t0, t0, a0 /* Load argument in a0 register */ lr a0, 0(t0) /* Load ISR function address in register t1 */ lr t1, RV_REGSIZE(t0) /* Call ISR function */ jalr ra, t1, 0 #ifdef CONFIG_TRACING_ISR call sys_trace_isr_exit #endif #endif irq_done: /* Decrement _current_cpu->nested */ lw t2, ___cpu_t_nested_OFFSET(s0) addi t2, t2, -1 sw t2, ___cpu_t_nested_OFFSET(s0) bnez t2, no_reschedule /* nested count is back to 0: Return to thread stack */ lr sp, 0(sp) #ifdef CONFIG_STACK_SENTINEL call z_check_stack_sentinel #endif check_reschedule: #ifdef CONFIG_MULTITHREADING /* Get pointer to current thread on this CPU */ lr a1, ___cpu_t_current_OFFSET(s0) /* * Get next thread to schedule with z_get_next_switch_handle(). * We pass it a NULL as we didn't save the whole thread context yet. * If no scheduling is necessary then NULL will be returned. */ addi sp, sp, -16 sr a1, 0(sp) mv a0, zero call z_get_next_switch_handle lr a1, 0(sp) addi sp, sp, 16 beqz a0, no_reschedule reschedule: /* * Perform context switch: * a0 = new thread * a1 = old thread */ call z_riscv_switch z_riscv_thread_start: might_have_rescheduled: /* reload s0 with &_current_cpu as it might have changed or be unset */ get_current_cpu s0 #endif /* CONFIG_MULTITHREADING */ no_reschedule: #ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE /* Restore context at SOC level */ addi a0, sp, __struct_arch_esf_soc_context_OFFSET jal ra, __soc_restore_context #endif /* CONFIG_RISCV_SOC_CONTEXT_SAVE */ #if defined(CONFIG_FPU_SHARING) /* FPU handling upon exception mode exit */ mv a0, sp call z_riscv_fpu_exit_exc /* decrement _current->arch.exception_depth */ lr t0, ___cpu_t_current_OFFSET(s0) lb t1, _thread_offset_to_exception_depth(t0) add t1, t1, -1 sb t1, _thread_offset_to_exception_depth(t0) fp_trap_exit: #endif /* Restore MEPC and MSTATUS registers */ lr t0, __struct_arch_esf_mepc_OFFSET(sp) lr t2, __struct_arch_esf_mstatus_OFFSET(sp) csrw mepc, t0 csrw mstatus, t2 #ifdef CONFIG_USERSPACE /* * Check if we are returning to user mode. If so then we must * set is_user_mode to true and preserve our kernel mode stack for * the next exception to come. */ li t1, MSTATUS_MPP and t0, t2, t1 bnez t0, 1f #ifdef CONFIG_PMP_STACK_GUARD /* Remove kernel stack guard and Reconfigure PMP for user mode */ lr a0, ___cpu_t_current_OFFSET(s0) call z_riscv_pmp_usermode_enable #endif /* Set our per-thread usermode flag */ li t1, 1 lui t0, %tprel_hi(is_user_mode) add t0, t0, tp, %tprel_add(is_user_mode) sb t1, %tprel_lo(is_user_mode)(t0) /* preserve stack pointer for next exception entry */ add t0, sp, __struct_arch_esf_SIZEOF sr t0, _curr_cpu_arch_user_exc_sp(s0) j 2f 1: /* * We are returning to kernel mode. Store the stack pointer to * be re-loaded further down. */ addi t0, sp, __struct_arch_esf_SIZEOF sr t0, __struct_arch_esf_sp_OFFSET(sp) 2: #endif /* Restore s0 (it is no longer ours) */ lr s0, __struct_arch_esf_s0_OFFSET(sp) #ifdef CONFIG_RISCV_SOC_HAS_ISR_STACKING SOC_ISR_SW_UNSTACKING #else /* Restore caller-saved registers from thread stack */ DO_CALLER_SAVED(lr) #ifdef CONFIG_USERSPACE /* retrieve saved stack pointer */ lr sp, __struct_arch_esf_sp_OFFSET(sp) #else /* remove esf from the stack */ addi sp, sp, __struct_arch_esf_SIZEOF #endif #endif /* CONFIG_RISCV_SOC_HAS_ISR_STACKING */ mret ```
/content/code_sandbox/arch/riscv/core/isr.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
6,815
```unknown /* * Userspace and service handler hooks * * */ #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #include <offsets_short.h> #include <zephyr/arch/cpu.h> #include <zephyr/syscall.h> #include <zephyr/kernel_structs.h> #include <zephyr/arch/riscv/csr.h> #include "asm_macros.inc" /* exports */ GTEXT(arch_user_string_nlen) GTEXT(z_riscv_user_string_nlen_fault_start) GTEXT(z_riscv_user_string_nlen_fault_end) GTEXT(z_riscv_user_string_nlen_fixup) /* * size_t arch_user_string_nlen(const char *s, size_t maxsize, int *err_arg) */ SECTION_FUNC(TEXT, arch_user_string_nlen) li a5, 0 # Counter sw a5, 0(a2) # Init error value to 0 loop: add a4, a0, a5 # Determine character address z_riscv_user_string_nlen_fault_start: lbu a4, 0(a4) # Load string's character z_riscv_user_string_nlen_fault_end: beqz a4, exit # Test string's end of line bne a5, a1, continue # Check if max length is reached exit: mv a0, a5 # Return counter value (length) ret continue: addi a5, a5, 1 # Increment counter j loop z_riscv_user_string_nlen_fixup: li a4, -1 # Put error to -1 sw a4, 0(a2) j exit ```
/content/code_sandbox/arch/riscv/core/userspace.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
371
```c /* * */ /** * @file * @brief Full C support initialization * * * Initialization of full C support: zero the .bss and call z_cstart(). * * Stack is available in this module, but not the global data/bss until their * initialization is performed. */ #include <stddef.h> #include <zephyr/toolchain.h> #include <zephyr/kernel_structs.h> #include <kernel_internal.h> #if defined(CONFIG_RISCV_SOC_INTERRUPT_INIT) void soc_interrupt_init(void); #endif /** * * @brief Prepare to and run C code * * This routine prepares for the execution of and runs C code. */ void z_prep_c(void) { z_bss_zero(); z_data_copy(); #if defined(CONFIG_RISCV_SOC_INTERRUPT_INIT) soc_interrupt_init(); #endif z_cstart(); CODE_UNREACHABLE; } ```
/content/code_sandbox/arch/riscv/core/prep_c.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
182
```c /* * */ #include <zephyr/irq.h> #include <zephyr/tracing/tracing.h> #ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_IDLE void arch_cpu_idle(void) { sys_trace_idle(); __asm__ volatile("wfi"); irq_unlock(MSTATUS_IEN); } #endif #ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_ATOMIC_IDLE void arch_cpu_atomic_idle(unsigned int key) { sys_trace_idle(); __asm__ volatile("wfi"); irq_unlock(key); } #endif ```
/content/code_sandbox/arch/riscv/core/cpu_idle.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
103
```c /* * */ #include <zephyr/kernel.h> #include <zephyr/kernel_structs.h> #include <kernel_internal.h> #include <kernel_tls.h> #include <zephyr/app_memory/app_memdomain.h> #include <zephyr/sys/util.h> size_t arch_tls_stack_setup(struct k_thread *new_thread, char *stack_ptr) { /* * TLS area for RISC-V is simple without any extra * data. */ /* * Since we are populating things backwards, * setup the TLS data/bss area first. */ stack_ptr -= z_tls_data_size(); z_tls_copy(stack_ptr); /* * Set thread TLS pointer which is used in * context switch to point to TLS area. */ new_thread->tls = POINTER_TO_UINT(stack_ptr); return z_tls_data_size(); } ```
/content/code_sandbox/arch/riscv/core/tls.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
178
```c /* * */ #include <zephyr/init.h> #include <zephyr/kernel.h> #include <ksched.h> #include <ipi.h> #include <zephyr/irq.h> #include <zephyr/sys/atomic.h> #include <zephyr/arch/riscv/irq.h> #include <zephyr/drivers/pm_cpu_ops.h> volatile struct { arch_cpustart_t fn; void *arg; } riscv_cpu_init[CONFIG_MP_MAX_NUM_CPUS]; volatile uintptr_t __noinit riscv_cpu_wake_flag; volatile uintptr_t riscv_cpu_boot_flag; volatile void *riscv_cpu_sp; extern void __start(void); #if defined(CONFIG_RISCV_SOC_INTERRUPT_INIT) void soc_interrupt_init(void); #endif void arch_cpu_start(int cpu_num, k_thread_stack_t *stack, int sz, arch_cpustart_t fn, void *arg) { riscv_cpu_init[cpu_num].fn = fn; riscv_cpu_init[cpu_num].arg = arg; riscv_cpu_sp = K_KERNEL_STACK_BUFFER(stack) + sz; riscv_cpu_boot_flag = 0U; #ifdef CONFIG_PM_CPU_OPS if (pm_cpu_on(cpu_num, (uintptr_t)&__start)) { printk("Failed to boot secondary CPU %d\n", cpu_num); return; } #endif while (riscv_cpu_boot_flag == 0U) { riscv_cpu_wake_flag = _kernel.cpus[cpu_num].arch.hartid; } } void arch_secondary_cpu_init(int hartid) { unsigned int i; unsigned int cpu_num = 0; for (i = 0; i < CONFIG_MP_MAX_NUM_CPUS; i++) { if (_kernel.cpus[i].arch.hartid == hartid) { cpu_num = i; } } csr_write(mscratch, &_kernel.cpus[cpu_num]); #ifdef CONFIG_SMP _kernel.cpus[cpu_num].arch.online = true; #endif #if defined(CONFIG_MULTITHREADING) && defined(CONFIG_THREAD_LOCAL_STORAGE) __asm__("mv tp, %0" : : "r" (z_idle_threads[cpu_num].tls)); #endif #if defined(CONFIG_RISCV_SOC_INTERRUPT_INIT) soc_interrupt_init(); #endif #ifdef CONFIG_RISCV_PMP z_riscv_pmp_init(); #endif #ifdef CONFIG_SMP irq_enable(RISCV_IRQ_MSOFT); #endif riscv_cpu_init[cpu_num].fn(riscv_cpu_init[cpu_num].arg); } #ifdef CONFIG_SMP #define MSIP_BASE 0x2000000UL #define MSIP(hartid) ((volatile uint32_t *)MSIP_BASE)[hartid] static atomic_val_t cpu_pending_ipi[CONFIG_MP_MAX_NUM_CPUS]; #define IPI_SCHED 0 #define IPI_FPU_FLUSH 1 void arch_sched_directed_ipi(uint32_t cpu_bitmap) { unsigned int key = arch_irq_lock(); unsigned int id = _current_cpu->id; unsigned int num_cpus = arch_num_cpus(); for (unsigned int i = 0; i < num_cpus; i++) { if ((i != id) && _kernel.cpus[i].arch.online && ((cpu_bitmap & BIT(i)) != 0)) { atomic_set_bit(&cpu_pending_ipi[i], IPI_SCHED); MSIP(_kernel.cpus[i].arch.hartid) = 1; } } arch_irq_unlock(key); } void arch_sched_broadcast_ipi(void) { arch_sched_directed_ipi(IPI_ALL_CPUS_MASK); } #ifdef CONFIG_FPU_SHARING void arch_flush_fpu_ipi(unsigned int cpu) { atomic_set_bit(&cpu_pending_ipi[cpu], IPI_FPU_FLUSH); MSIP(_kernel.cpus[cpu].arch.hartid) = 1; } #endif static void sched_ipi_handler(const void *unused) { ARG_UNUSED(unused); MSIP(csr_read(mhartid)) = 0; atomic_val_t pending_ipi = atomic_clear(&cpu_pending_ipi[_current_cpu->id]); if (pending_ipi & ATOMIC_MASK(IPI_SCHED)) { z_sched_ipi(); } #ifdef CONFIG_FPU_SHARING if (pending_ipi & ATOMIC_MASK(IPI_FPU_FLUSH)) { /* disable IRQs */ csr_clear(mstatus, MSTATUS_IEN); /* perform the flush */ arch_flush_local_fpu(); /* * No need to re-enable IRQs here as long as * this remains the last case. */ } #endif } #ifdef CONFIG_FPU_SHARING /* * Make sure there is no pending FPU flush request for this CPU while * waiting for a contended spinlock to become available. This prevents * a deadlock when the lock we need is already taken by another CPU * that also wants its FPU content to be reinstated while such content * is still live in this CPU's FPU. */ void arch_spin_relax(void) { atomic_val_t *pending_ipi = &cpu_pending_ipi[_current_cpu->id]; if (atomic_test_and_clear_bit(pending_ipi, IPI_FPU_FLUSH)) { /* * We may not be in IRQ context here hence cannot use * arch_flush_local_fpu() directly. */ arch_float_disable(_current_cpu->arch.fpu_owner); } } #endif int arch_smp_init(void) { IRQ_CONNECT(RISCV_IRQ_MSOFT, 0, sched_ipi_handler, NULL, 0); irq_enable(RISCV_IRQ_MSOFT); return 0; } #endif /* CONFIG_SMP */ ```
/content/code_sandbox/arch/riscv/core/smp.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,225
```c /* * */ #include <zephyr/debug/symtab.h> #include <zephyr/kernel.h> #include <zephyr/kernel_structs.h> #include <kernel_internal.h> #include <zephyr/logging/log.h> LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); uintptr_t z_riscv_get_sp_before_exc(const struct arch_esf *esf); #define MAX_STACK_FRAMES \ MAX(CONFIG_EXCEPTION_STACK_TRACE_MAX_FRAMES, CONFIG_ARCH_STACKWALK_MAX_FRAMES) struct stackframe { uintptr_t fp; uintptr_t ra; }; typedef bool (*stack_verify_fn)(uintptr_t, const struct k_thread *const, const struct arch_esf *); static inline bool in_irq_stack_bound(uintptr_t addr, uint8_t cpu_id) { uintptr_t start, end; start = (uintptr_t)K_KERNEL_STACK_BUFFER(z_interrupt_stacks[cpu_id]); end = start + CONFIG_ISR_STACK_SIZE; return (addr >= start) && (addr < end); } static inline bool in_kernel_thread_stack_bound(uintptr_t addr, const struct k_thread *const thread) { #ifdef CONFIG_THREAD_STACK_INFO uintptr_t start, end; start = thread->stack_info.start; end = Z_STACK_PTR_ALIGN(thread->stack_info.start + thread->stack_info.size); return (addr >= start) && (addr < end); #else ARG_UNUSED(addr); ARG_UNUSED(thread); /* Return false as we can't check if the addr is in the thread stack without stack info */ return false; #endif } #ifdef CONFIG_USERSPACE static inline bool in_user_thread_stack_bound(uintptr_t addr, const struct k_thread *const thread) { uintptr_t start, end; /* See: zephyr/include/zephyr/arch/riscv/arch.h */ if (IS_ENABLED(CONFIG_PMP_POWER_OF_TWO_ALIGNMENT)) { start = thread->arch.priv_stack_start + Z_RISCV_STACK_GUARD_SIZE; } else { start = thread->stack_info.start - CONFIG_PRIVILEGED_STACK_SIZE; } end = Z_STACK_PTR_ALIGN(thread->arch.priv_stack_start + K_KERNEL_STACK_RESERVED + CONFIG_PRIVILEGED_STACK_SIZE); return (addr >= start) && (addr < end); } #endif /* CONFIG_USERSPACE */ static bool in_stack_bound(uintptr_t addr, const struct k_thread *const thread, const struct arch_esf *esf) { ARG_UNUSED(esf); if (!IS_ALIGNED(addr, sizeof(uintptr_t))) { return false; } #ifdef CONFIG_USERSPACE if ((thread->base.user_options & K_USER) != 0) { return in_user_thread_stack_bound(addr, thread); } #endif /* CONFIG_USERSPACE */ return in_kernel_thread_stack_bound(addr, thread); } static bool in_fatal_stack_bound(uintptr_t addr, const struct k_thread *const thread, const struct arch_esf *esf) { const uintptr_t align = COND_CODE_1(CONFIG_FRAME_POINTER, (ARCH_STACK_PTR_ALIGN), (sizeof(uintptr_t))); if (!IS_ALIGNED(addr, align)) { return false; } if ((thread == NULL) || arch_is_in_isr()) { /* We were servicing an interrupt */ uint8_t cpu_id = IS_ENABLED(CONFIG_SMP) ? arch_curr_cpu()->id : 0U; return in_irq_stack_bound(addr, cpu_id); } return in_stack_bound(addr, thread, esf); } static inline bool in_text_region(uintptr_t addr) { extern uintptr_t __text_region_start, __text_region_end; return (addr >= (uintptr_t)&__text_region_start) && (addr < (uintptr_t)&__text_region_end); } #ifdef CONFIG_FRAME_POINTER static void walk_stackframe(stack_trace_callback_fn cb, void *cookie, const struct k_thread *thread, const struct arch_esf *esf, stack_verify_fn vrfy, const _callee_saved_t *csf) { uintptr_t fp, last_fp = 0; uintptr_t ra; struct stackframe *frame; if (esf != NULL) { /* Unwind the provided exception stack frame */ fp = esf->s0; ra = esf->mepc; } else if ((csf == NULL) || (csf == &_current->callee_saved)) { /* Unwind current thread (default case when nothing is provided ) */ fp = (uintptr_t)__builtin_frame_address(0); ra = (uintptr_t)walk_stackframe; } else { /* Unwind the provided thread */ fp = csf->s0; ra = csf->ra; } for (int i = 0; (i < MAX_STACK_FRAMES) && vrfy(fp, thread, esf) && (fp > last_fp); i++) { if (in_text_region(ra) && !cb(cookie, ra)) { break; } last_fp = fp; /* Unwind to the previous frame */ frame = (struct stackframe *)fp - 1; if ((i == 0) && (esf != NULL)) { /* Print `esf->ra` if we are at the top of the stack */ if (in_text_region(esf->ra) && !cb(cookie, esf->ra)) { break; } /** * For the first stack frame, the `ra` is not stored in the frame if the * preempted function doesn't call any other function, we can observe: * * .-------------. * frame[0]->fp ---> | frame[0] fp | * :-------------: * frame[0]->ra ---> | frame[1] fp | * | frame[1] ra | * :~~~~~~~~~~~~~: * | frame[N] fp | * * Instead of: * * .-------------. * frame[0]->fp ---> | frame[0] fp | * frame[0]->ra ---> | frame[1] ra | * :-------------: * | frame[1] fp | * | frame[1] ra | * :~~~~~~~~~~~~~: * | frame[N] fp | * * Check if `frame->ra` actually points to a `fp`, and adjust accordingly */ if (vrfy(frame->ra, thread, esf)) { fp = frame->ra; frame = (struct stackframe *)fp; } } fp = frame->fp; ra = frame->ra; } } #else /* !CONFIG_FRAME_POINTER */ register uintptr_t current_stack_pointer __asm__("sp"); static void walk_stackframe(stack_trace_callback_fn cb, void *cookie, const struct k_thread *thread, const struct arch_esf *esf, stack_verify_fn vrfy, const _callee_saved_t *csf) { uintptr_t sp; uintptr_t ra; uintptr_t *ksp, last_ksp = 0; if (esf != NULL) { /* Unwind the provided exception stack frame */ sp = z_riscv_get_sp_before_exc(esf); ra = esf->mepc; } else if ((csf == NULL) || (csf == &_current->callee_saved)) { /* Unwind current thread (default case when nothing is provided ) */ sp = current_stack_pointer; ra = (uintptr_t)walk_stackframe; } else { /* Unwind the provided thread */ sp = csf->sp; ra = csf->ra; } ksp = (uintptr_t *)sp; for (int i = 0; (i < MAX_STACK_FRAMES) && vrfy((uintptr_t)ksp, thread, esf) && ((uintptr_t)ksp > last_ksp);) { if (in_text_region(ra)) { if (!cb(cookie, ra)) { break; } /* * Increment the iterator only if `ra` is within the text region to get the * most out of it */ i++; } last_ksp = (uintptr_t)ksp; /* Unwind to the previous frame */ ra = ((struct arch_esf *)ksp++)->ra; } } #endif /* CONFIG_FRAME_POINTER */ void arch_stack_walk(stack_trace_callback_fn callback_fn, void *cookie, const struct k_thread *thread, const struct arch_esf *esf) { if (thread == NULL) { /* In case `thread` is NULL, default that to `_current` and try to unwind */ thread = _current; } walk_stackframe(callback_fn, cookie, thread, esf, in_stack_bound, &thread->callee_saved); } #if __riscv_xlen == 32 #define PR_REG "%08" PRIxPTR #elif __riscv_xlen == 64 #define PR_REG "%016" PRIxPTR #endif #ifdef CONFIG_EXCEPTION_STACK_TRACE_SYMTAB #define LOG_STACK_TRACE(idx, ra, name, offset) \ LOG_ERR(" %2d: ra: " PR_REG " [%s+0x%x]", idx, ra, name, offset) #else #define LOG_STACK_TRACE(idx, ra, name, offset) LOG_ERR(" %2d: ra: " PR_REG, idx, ra) #endif /* CONFIG_EXCEPTION_STACK_TRACE_SYMTAB */ static bool print_trace_address(void *arg, unsigned long ra) { int *i = arg; #ifdef CONFIG_EXCEPTION_STACK_TRACE_SYMTAB uint32_t offset = 0; const char *name = symtab_find_symbol_name(ra, &offset); #endif LOG_STACK_TRACE((*i)++, ra, name, offset); return true; } void z_riscv_unwind_stack(const struct arch_esf *esf, const _callee_saved_t *csf) { int i = 0; LOG_ERR("call trace:"); walk_stackframe(print_trace_address, &i, _current, esf, in_fatal_stack_bound, csf); LOG_ERR(""); } ```
/content/code_sandbox/arch/riscv/core/stacktrace.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,183
```c /* * Organisation (CSIRO) ABN 41 687 119 230. * */ #include <zephyr/toolchain.h> #include <zephyr/arch/common/semihost.h> /* * QEMU requires that the semihosting trap instruction sequence, consisting of * three uncompressed instructions, lie in the same page, and refuses to * interpret the trap sequence if these instructions are placed across two * different pages. * * The `semihost_exec` function, which occupies 12 bytes, is aligned at a * 16-byte boundary to ensure that the three trap sequence instructions are * never placed across two different pages. */ long __aligned(16) semihost_exec(enum semihost_instr instr, void *args) { register unsigned long a0 __asm__ ("a0") = instr; register void *a1 __asm__ ("a1") = args; register long ret __asm__ ("a0"); __asm__ volatile ( ".option push\n\t" ".option norvc\n\t" "slli zero, zero, 0x1f\n\t" "ebreak\n\t" "srai zero, zero, 0x7\n\t" ".option pop" : "=r" (ret) : "r" (a0), "r" (a1) : "memory"); return ret; } ```
/content/code_sandbox/arch/riscv/core/semihost.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
296
```sourcepawn /* * Assembly macros and helpers * * */ #ifdef CONFIG_64BIT /* register-wide load/store based on ld/sd (XLEN = 64) */ .macro lr, rd, mem ld \rd, \mem .endm .macro sr, rs, mem sd \rs, \mem .endm #else /* register-wide load/store based on lw/sw (XLEN = 32) */ .macro lr, rd, mem lw \rd, \mem .endm .macro sr, rs, mem sw \rs, \mem .endm #endif #ifdef CONFIG_CPU_HAS_FPU_DOUBLE_PRECISION .macro flr, rd, mem fld \rd, \mem .endm .macro fsr, rs, mem fsd \rs, \mem .endm #else .macro flr, rd, mem flw \rd, \mem .endm .macro fsr, rs, mem fsw \rs, \mem .endm #endif /* * Perform rd += rs * mult using only shifts and adds. * Useful when the mul instruction isn't available. * mult must be a constant. rs will be clobbered. */ .macro shiftmul_add rd, rs, mult beqz \rs, 999f .set _bitpos, 0 .set _lastbitpos, 0 .rept 32 .if ((\mult) & (1 << _bitpos)) .if (_bitpos - _lastbitpos) != 0 slli \rs, \rs, (_bitpos - _lastbitpos) .set _lastbitpos, _bitpos .endif add \rd, \rd, \rs .endif .set _bitpos, _bitpos + 1 .endr 999: .endm /* lowest common denominator for register availability */ #if defined(CONFIG_RISCV_ISA_RV32E) #define RV_E(op...) op #define RV_I(op...) /* unavailable */ #else #define RV_E(op...) op #define RV_I(op...) op #endif ```
/content/code_sandbox/arch/riscv/core/asm_macros.inc
sourcepawn
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
485
```unknown /* * Contributors: 2018 Antmicro <www.antmicro.com> * */ #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #include <zephyr/arch/cpu.h> #include <zephyr/offsets.h> #include "asm_macros.inc" /* exports */ GTEXT(__initialize) GTEXT(__reset) /* imports */ GTEXT(z_prep_c) GTEXT(riscv_cpu_wake_flag) GTEXT(riscv_cpu_sp) GTEXT(arch_secondary_cpu_init) #if CONFIG_INCLUDE_RESET_VECTOR SECTION_FUNC(reset, __reset) /* * jump to __initialize * use call opcode in case __initialize is far away. * This will be dependent on linker.ld configuration. */ call __initialize #endif /* CONFIG_INCLUDE_RESET_VECTOR */ /* use ABI name of registers for the sake of simplicity */ /* * Remainder of asm-land initialization code before we can jump into * the C domain */ SECTION_FUNC(TEXT, __initialize) csrr a0, mhartid li t0, CONFIG_RV_BOOT_HART beq a0, t0, boot_first_core j boot_secondary_core boot_first_core: #ifdef CONFIG_FPU /* * Enable floating-point. */ li t0, MSTATUS_FS_INIT csrs mstatus, t0 /* * Floating-point rounding mode set to IEEE-754 default, and clear * all exception flags. */ fscsr zero #endif #ifdef CONFIG_INIT_STACKS /* Pre-populate all bytes in z_interrupt_stacks with 0xAA */ la t0, z_interrupt_stacks li t1, __z_interrupt_stack_SIZEOF add t1, t1, t0 /* Populate z_interrupt_stacks with 0xaaaaaaaa */ li t2, 0xaaaaaaaa aa_loop: sw t2, 0x00(t0) addi t0, t0, 4 blt t0, t1, aa_loop #endif /* * Initially, setup stack pointer to * z_interrupt_stacks + __z_interrupt_stack_SIZEOF */ la sp, z_interrupt_stacks li t0, __z_interrupt_stack_SIZEOF add sp, sp, t0 #ifdef CONFIG_WDOG_INIT call _WdogInit #endif /* * Jump into C domain. z_prep_c zeroes BSS, copies rw data into RAM, * and then enters kernel z_cstart */ call z_prep_c boot_secondary_core: #if CONFIG_MP_MAX_NUM_CPUS > 1 la t0, riscv_cpu_wake_flag li t1, -1 sr t1, 0(t0) la t0, riscv_cpu_boot_flag sr zero, 0(t0) wait_secondary_wake_flag: la t0, riscv_cpu_wake_flag lr t0, 0(t0) bne a0, t0, wait_secondary_wake_flag /* Set up stack */ la t0, riscv_cpu_sp lr sp, 0(t0) la t0, riscv_cpu_boot_flag li t1, 1 sr t1, 0(t0) j arch_secondary_cpu_init #else j loop_unconfigured_cores #endif loop_unconfigured_cores: wfi j loop_unconfigured_cores ```
/content/code_sandbox/arch/riscv/core/reset.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
741
```c /* * */ #include <zephyr/kernel.h> #include <kernel_internal.h> #include <zephyr/logging/log.h> #include <zephyr/arch/riscv/csr.h> #include <zephyr/irq_multilevel.h> #include <zephyr/sw_isr_table.h> #ifdef CONFIG_RISCV_HAS_PLIC #include <zephyr/drivers/interrupt_controller/riscv_plic.h> #endif LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); FUNC_NORETURN void z_irq_spurious(const void *unused) { unsigned long mcause; ARG_UNUSED(unused); mcause = csr_read(mcause); mcause &= CONFIG_RISCV_MCAUSE_EXCEPTION_MASK; LOG_ERR("Spurious interrupt detected! IRQ: %ld", mcause); #if defined(CONFIG_RISCV_HAS_PLIC) if (mcause == RISCV_IRQ_MEXT) { unsigned int save_irq = riscv_plic_get_irq(); const struct device *save_dev = riscv_plic_get_dev(); LOG_ERR("PLIC interrupt line causing the IRQ: %d (%p)", save_irq, save_dev); } #endif z_riscv_fatal_error(K_ERR_SPURIOUS_IRQ, NULL); } #ifdef CONFIG_DYNAMIC_INTERRUPTS int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, void (*routine)(const void *parameter), const void *parameter, uint32_t flags) { z_isr_install(irq + CONFIG_RISCV_RESERVED_IRQ_ISR_TABLES_OFFSET, routine, parameter); #if defined(CONFIG_RISCV_HAS_PLIC) || defined(CONFIG_RISCV_HAS_CLIC) z_riscv_irq_priority_set(irq, priority, flags); #else ARG_UNUSED(flags); ARG_UNUSED(priority); #endif return irq; } #ifdef CONFIG_SHARED_INTERRUPTS int arch_irq_disconnect_dynamic(unsigned int irq, unsigned int priority, void (*routine)(const void *parameter), const void *parameter, uint32_t flags) { ARG_UNUSED(priority); ARG_UNUSED(flags); return z_isr_uninstall(irq + CONFIG_RISCV_RESERVED_IRQ_ISR_TABLES_OFFSET, routine, parameter); } #endif /* CONFIG_SHARED_INTERRUPTS */ #endif /* CONFIG_DYNAMIC_INTERRUPTS */ ```
/content/code_sandbox/arch/riscv/core/irq_manage.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
461
```c /* * */ #include <zephyr/kernel.h> #include <zephyr/kernel_structs.h> #include <kernel_internal.h> #include <inttypes.h> #include <zephyr/arch/common/exc_handle.h> #include <zephyr/logging/log.h> LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); #ifdef CONFIG_USERSPACE Z_EXC_DECLARE(z_riscv_user_string_nlen); static const struct z_exc_handle exceptions[] = { Z_EXC_HANDLE(z_riscv_user_string_nlen), }; #endif /* CONFIG_USERSPACE */ #if __riscv_xlen == 32 #define PR_REG "%08" PRIxPTR #define NO_REG " " #elif __riscv_xlen == 64 #define PR_REG "%016" PRIxPTR #define NO_REG " " #endif /* Stack trace function */ void z_riscv_unwind_stack(const struct arch_esf *esf, const _callee_saved_t *csf); uintptr_t z_riscv_get_sp_before_exc(const struct arch_esf *esf) { /* * Kernel stack pointer prior this exception i.e. before * storing the exception stack frame. */ uintptr_t sp = (uintptr_t)esf + sizeof(struct arch_esf); #ifdef CONFIG_USERSPACE if ((esf->mstatus & MSTATUS_MPP) == PRV_U) { /* * Exception happened in user space: * consider the saved user stack instead. */ sp = esf->sp; } #endif return sp; } FUNC_NORETURN void z_riscv_fatal_error(unsigned int reason, const struct arch_esf *esf) { z_riscv_fatal_error_csf(reason, esf, NULL); } FUNC_NORETURN void z_riscv_fatal_error_csf(unsigned int reason, const struct arch_esf *esf, const _callee_saved_t *csf) { #ifdef CONFIG_EXCEPTION_DEBUG if (esf != NULL) { LOG_ERR(" a0: " PR_REG " t0: " PR_REG, esf->a0, esf->t0); LOG_ERR(" a1: " PR_REG " t1: " PR_REG, esf->a1, esf->t1); LOG_ERR(" a2: " PR_REG " t2: " PR_REG, esf->a2, esf->t2); #if defined(CONFIG_RISCV_ISA_RV32E) LOG_ERR(" a3: " PR_REG, esf->a3); LOG_ERR(" a4: " PR_REG, esf->a4); LOG_ERR(" a5: " PR_REG, esf->a5); #else LOG_ERR(" a3: " PR_REG " t3: " PR_REG, esf->a3, esf->t3); LOG_ERR(" a4: " PR_REG " t4: " PR_REG, esf->a4, esf->t4); LOG_ERR(" a5: " PR_REG " t5: " PR_REG, esf->a5, esf->t5); LOG_ERR(" a6: " PR_REG " t6: " PR_REG, esf->a6, esf->t6); LOG_ERR(" a7: " PR_REG, esf->a7); #endif /* CONFIG_RISCV_ISA_RV32E */ LOG_ERR(" sp: " PR_REG, z_riscv_get_sp_before_exc(esf)); LOG_ERR(" ra: " PR_REG, esf->ra); LOG_ERR(" mepc: " PR_REG, esf->mepc); LOG_ERR("mstatus: " PR_REG, esf->mstatus); LOG_ERR(""); } if (csf != NULL) { #if defined(CONFIG_RISCV_ISA_RV32E) LOG_ERR(" s0: " PR_REG, csf->s0); LOG_ERR(" s1: " PR_REG, csf->s1); #else LOG_ERR(" s0: " PR_REG " s6: " PR_REG, csf->s0, csf->s6); LOG_ERR(" s1: " PR_REG " s7: " PR_REG, csf->s1, csf->s7); LOG_ERR(" s2: " PR_REG " s8: " PR_REG, csf->s2, csf->s8); LOG_ERR(" s3: " PR_REG " s9: " PR_REG, csf->s3, csf->s9); LOG_ERR(" s4: " PR_REG " s10: " PR_REG, csf->s4, csf->s10); LOG_ERR(" s5: " PR_REG " s11: " PR_REG, csf->s5, csf->s11); #endif /* CONFIG_RISCV_ISA_RV32E */ LOG_ERR(""); } if (IS_ENABLED(CONFIG_EXCEPTION_STACK_TRACE)) { z_riscv_unwind_stack(esf, csf); } #endif /* CONFIG_EXCEPTION_DEBUG */ z_fatal_error(reason, esf); CODE_UNREACHABLE; } static char *cause_str(unsigned long cause) { switch (cause) { case 0: return "Instruction address misaligned"; case 1: return "Instruction Access fault"; case 2: return "Illegal instruction"; case 3: return "Breakpoint"; case 4: return "Load address misaligned"; case 5: return "Load access fault"; case 6: return "Store/AMO address misaligned"; case 7: return "Store/AMO access fault"; case 8: return "Environment call from U-mode"; case 9: return "Environment call from S-mode"; case 11: return "Environment call from M-mode"; case 12: return "Instruction page fault"; case 13: return "Load page fault"; case 15: return "Store/AMO page fault"; default: return "unknown"; } } static bool bad_stack_pointer(struct arch_esf *esf) { #ifdef CONFIG_PMP_STACK_GUARD /* * Check if the kernel stack pointer prior this exception (before * storing the exception stack frame) was in the stack guard area. */ uintptr_t sp = (uintptr_t)esf + sizeof(struct arch_esf); #ifdef CONFIG_USERSPACE if (_current->arch.priv_stack_start != 0 && sp >= _current->arch.priv_stack_start && sp < _current->arch.priv_stack_start + Z_RISCV_STACK_GUARD_SIZE) { return true; } if (z_stack_is_user_capable(_current->stack_obj) && sp >= _current->stack_info.start - K_THREAD_STACK_RESERVED && sp < _current->stack_info.start - K_THREAD_STACK_RESERVED + Z_RISCV_STACK_GUARD_SIZE) { return true; } #endif /* CONFIG_USERSPACE */ if (sp >= _current->stack_info.start - K_KERNEL_STACK_RESERVED && sp < _current->stack_info.start - K_KERNEL_STACK_RESERVED + Z_RISCV_STACK_GUARD_SIZE) { return true; } #endif /* CONFIG_PMP_STACK_GUARD */ #ifdef CONFIG_USERSPACE if ((esf->mstatus & MSTATUS_MPP) == 0 && (esf->sp < _current->stack_info.start || esf->sp > _current->stack_info.start + _current->stack_info.size - _current->stack_info.delta)) { /* user stack pointer moved outside of its allowed stack */ return true; } #endif return false; } void _Fault(struct arch_esf *esf) { #ifdef CONFIG_USERSPACE /* * Perform an assessment whether an PMP fault shall be * treated as recoverable. */ for (int i = 0; i < ARRAY_SIZE(exceptions); i++) { unsigned long start = (unsigned long)exceptions[i].start; unsigned long end = (unsigned long)exceptions[i].end; if (esf->mepc >= start && esf->mepc < end) { esf->mepc = (unsigned long)exceptions[i].fixup; return; } } #endif /* CONFIG_USERSPACE */ unsigned long mcause; __asm__ volatile("csrr %0, mcause" : "=r" (mcause)); #ifndef CONFIG_SOC_OPENISA_RV32M1 unsigned long mtval; __asm__ volatile("csrr %0, mtval" : "=r" (mtval)); #endif mcause &= CONFIG_RISCV_MCAUSE_EXCEPTION_MASK; LOG_ERR(""); LOG_ERR(" mcause: %ld, %s", mcause, cause_str(mcause)); #ifndef CONFIG_SOC_OPENISA_RV32M1 LOG_ERR(" mtval: %lx", mtval); #endif unsigned int reason = K_ERR_CPU_EXCEPTION; if (bad_stack_pointer(esf)) { #ifdef CONFIG_PMP_STACK_GUARD /* * Remove the thread's PMP setting to prevent triggering a stack * overflow error again due to the previous configuration. */ z_riscv_pmp_stackguard_disable(); #endif /* CONFIG_PMP_STACK_GUARD */ reason = K_ERR_STACK_CHK_FAIL; } z_riscv_fatal_error(reason, esf); } #ifdef CONFIG_USERSPACE FUNC_NORETURN void arch_syscall_oops(void *ssf_ptr) { user_fault(K_ERR_KERNEL_OOPS); CODE_UNREACHABLE; } void z_impl_user_fault(unsigned int reason) { struct arch_esf *oops_esf = _current->syscall_frame; if (((_current->base.user_options & K_USER) != 0) && reason != K_ERR_STACK_CHK_FAIL) { reason = K_ERR_KERNEL_OOPS; } z_riscv_fatal_error(reason, oops_esf); } static void z_vrfy_user_fault(unsigned int reason) { z_impl_user_fault(reason); } #include <zephyr/syscalls/user_fault_mrsh.c> #endif /* CONFIG_USERSPACE */ ```
/content/code_sandbox/arch/riscv/core/fatal.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,214
```c /* * */ /** * @file * @brief RISCV32 kernel structure member offset definition file * * This module is responsible for the generation of the absolute symbols whose * value represents the member offsets for various RISCV32 kernel * structures. */ #include <zephyr/arch/exception.h> #include <zephyr/kernel.h> #include <kernel_arch_data.h> #include <gen_offset.h> #ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE #include <soc_context.h> #endif #ifdef CONFIG_RISCV_SOC_OFFSETS #include <soc_offsets.h> #endif #include <kernel_offsets.h> /* struct _callee_saved member offsets */ GEN_OFFSET_SYM(_callee_saved_t, sp); GEN_OFFSET_SYM(_callee_saved_t, ra); GEN_OFFSET_SYM(_callee_saved_t, s0); GEN_OFFSET_SYM(_callee_saved_t, s1); #if !defined(CONFIG_RISCV_ISA_RV32E) GEN_OFFSET_SYM(_callee_saved_t, s2); GEN_OFFSET_SYM(_callee_saved_t, s3); GEN_OFFSET_SYM(_callee_saved_t, s4); GEN_OFFSET_SYM(_callee_saved_t, s5); GEN_OFFSET_SYM(_callee_saved_t, s6); GEN_OFFSET_SYM(_callee_saved_t, s7); GEN_OFFSET_SYM(_callee_saved_t, s8); GEN_OFFSET_SYM(_callee_saved_t, s9); GEN_OFFSET_SYM(_callee_saved_t, s10); GEN_OFFSET_SYM(_callee_saved_t, s11); #endif /* !CONFIG_RISCV_ISA_RV32E */ #if defined(CONFIG_FPU_SHARING) GEN_OFFSET_SYM(z_riscv_fp_context_t, fa0); GEN_OFFSET_SYM(z_riscv_fp_context_t, fa1); GEN_OFFSET_SYM(z_riscv_fp_context_t, fa2); GEN_OFFSET_SYM(z_riscv_fp_context_t, fa3); GEN_OFFSET_SYM(z_riscv_fp_context_t, fa4); GEN_OFFSET_SYM(z_riscv_fp_context_t, fa5); GEN_OFFSET_SYM(z_riscv_fp_context_t, fa6); GEN_OFFSET_SYM(z_riscv_fp_context_t, fa7); GEN_OFFSET_SYM(z_riscv_fp_context_t, ft0); GEN_OFFSET_SYM(z_riscv_fp_context_t, ft1); GEN_OFFSET_SYM(z_riscv_fp_context_t, ft2); GEN_OFFSET_SYM(z_riscv_fp_context_t, ft3); GEN_OFFSET_SYM(z_riscv_fp_context_t, ft4); GEN_OFFSET_SYM(z_riscv_fp_context_t, ft5); GEN_OFFSET_SYM(z_riscv_fp_context_t, ft6); GEN_OFFSET_SYM(z_riscv_fp_context_t, ft7); GEN_OFFSET_SYM(z_riscv_fp_context_t, ft8); GEN_OFFSET_SYM(z_riscv_fp_context_t, ft9); GEN_OFFSET_SYM(z_riscv_fp_context_t, ft10); GEN_OFFSET_SYM(z_riscv_fp_context_t, ft11); GEN_OFFSET_SYM(z_riscv_fp_context_t, fs0); GEN_OFFSET_SYM(z_riscv_fp_context_t, fs1); GEN_OFFSET_SYM(z_riscv_fp_context_t, fs2); GEN_OFFSET_SYM(z_riscv_fp_context_t, fs3); GEN_OFFSET_SYM(z_riscv_fp_context_t, fs4); GEN_OFFSET_SYM(z_riscv_fp_context_t, fs5); GEN_OFFSET_SYM(z_riscv_fp_context_t, fs6); GEN_OFFSET_SYM(z_riscv_fp_context_t, fs7); GEN_OFFSET_SYM(z_riscv_fp_context_t, fs8); GEN_OFFSET_SYM(z_riscv_fp_context_t, fs9); GEN_OFFSET_SYM(z_riscv_fp_context_t, fs10); GEN_OFFSET_SYM(z_riscv_fp_context_t, fs11); GEN_OFFSET_SYM(z_riscv_fp_context_t, fcsr); GEN_OFFSET_SYM(_thread_arch_t, exception_depth); #endif /* CONFIG_FPU_SHARING */ /* esf member offsets */ GEN_OFFSET_STRUCT(arch_esf, ra); GEN_OFFSET_STRUCT(arch_esf, t0); GEN_OFFSET_STRUCT(arch_esf, t1); GEN_OFFSET_STRUCT(arch_esf, t2); GEN_OFFSET_STRUCT(arch_esf, a0); GEN_OFFSET_STRUCT(arch_esf, a1); GEN_OFFSET_STRUCT(arch_esf, a2); GEN_OFFSET_STRUCT(arch_esf, a3); GEN_OFFSET_STRUCT(arch_esf, a4); GEN_OFFSET_STRUCT(arch_esf, a5); #if !defined(CONFIG_RISCV_ISA_RV32E) GEN_OFFSET_STRUCT(arch_esf, t3); GEN_OFFSET_STRUCT(arch_esf, t4); GEN_OFFSET_STRUCT(arch_esf, t5); GEN_OFFSET_STRUCT(arch_esf, t6); GEN_OFFSET_STRUCT(arch_esf, a6); GEN_OFFSET_STRUCT(arch_esf, a7); #endif /* !CONFIG_RISCV_ISA_RV32E */ GEN_OFFSET_STRUCT(arch_esf, mepc); GEN_OFFSET_STRUCT(arch_esf, mstatus); GEN_OFFSET_STRUCT(arch_esf, s0); #ifdef CONFIG_USERSPACE GEN_OFFSET_STRUCT(arch_esf, sp); #endif #if defined(CONFIG_RISCV_SOC_CONTEXT_SAVE) GEN_OFFSET_STRUCT(arch_esf, soc_context); #endif #if defined(CONFIG_RISCV_SOC_OFFSETS) GEN_SOC_OFFSET_SYMS(); #endif GEN_ABSOLUTE_SYM(__struct_arch_esf_SIZEOF, sizeof(struct arch_esf)); #ifdef CONFIG_EXCEPTION_DEBUG GEN_ABSOLUTE_SYM(__callee_saved_t_SIZEOF, ROUND_UP(sizeof(_callee_saved_t), ARCH_STACK_PTR_ALIGN)); #endif /* CONFIG_EXCEPTION_DEBUG */ #ifdef CONFIG_USERSPACE GEN_OFFSET_SYM(_cpu_arch_t, user_exc_sp); GEN_OFFSET_SYM(_cpu_arch_t, user_exc_tmp0); GEN_OFFSET_SYM(_cpu_arch_t, user_exc_tmp1); #endif GEN_ABS_SYM_END ```
/content/code_sandbox/arch/riscv/core/offsets/offsets.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,208
```objective-c /* * */ /** * @file * @brief Private kernel definitions * * This file contains private kernel function/macro definitions and various * other definitions for the RISCV processor architecture. */ #ifndef ZEPHYR_ARCH_RISCV_INCLUDE_KERNEL_ARCH_FUNC_H_ #define ZEPHYR_ARCH_RISCV_INCLUDE_KERNEL_ARCH_FUNC_H_ #include <kernel_arch_data.h> #include <pmp.h> #ifdef __cplusplus extern "C" { #endif #ifndef _ASMLANGUAGE static ALWAYS_INLINE void arch_kernel_init(void) { #ifdef CONFIG_THREAD_LOCAL_STORAGE __asm__ volatile ("li tp, 0"); #endif #if defined(CONFIG_SMP) || defined(CONFIG_USERSPACE) csr_write(mscratch, &_kernel.cpus[0]); #endif #ifdef CONFIG_SMP _kernel.cpus[0].arch.hartid = csr_read(mhartid); _kernel.cpus[0].arch.online = true; #endif #if ((CONFIG_MP_MAX_NUM_CPUS) > 1) unsigned int cpu_node_list[] = { DT_FOREACH_CHILD_STATUS_OKAY_SEP(DT_PATH(cpus), DT_REG_ADDR, (,)) }; unsigned int cpu_num, hart_x; for (cpu_num = 1, hart_x = 0; cpu_num < arch_num_cpus(); cpu_num++) { if (cpu_node_list[hart_x] == _kernel.cpus[0].arch.hartid) { hart_x++; } _kernel.cpus[cpu_num].arch.hartid = cpu_node_list[hart_x]; hart_x++; } #endif #ifdef CONFIG_RISCV_PMP z_riscv_pmp_init(); #endif } static ALWAYS_INLINE void arch_switch(void *switch_to, void **switched_from) { extern void z_riscv_switch(struct k_thread *new, struct k_thread *old); struct k_thread *new = switch_to; struct k_thread *old = CONTAINER_OF(switched_from, struct k_thread, switch_handle); #ifdef CONFIG_RISCV_ALWAYS_SWITCH_THROUGH_ECALL arch_syscall_invoke2((uintptr_t)new, (uintptr_t)old, RV_ECALL_SCHEDULE); #else z_riscv_switch(new, old); #endif } /* Thin wrapper around z_riscv_fatal_error_csf */ FUNC_NORETURN void z_riscv_fatal_error(unsigned int reason, const struct arch_esf *esf); FUNC_NORETURN void z_riscv_fatal_error_csf(unsigned int reason, const struct arch_esf *esf, const _callee_saved_t *csf); static inline bool arch_is_in_isr(void) { #ifdef CONFIG_SMP unsigned int key = arch_irq_lock(); bool ret = arch_curr_cpu()->nested != 0U; arch_irq_unlock(key); return ret; #else return _kernel.cpus[0].nested != 0U; #endif } extern FUNC_NORETURN void z_riscv_userspace_enter(k_thread_entry_t user_entry, void *p1, void *p2, void *p3, uint32_t stack_end, uint32_t stack_start); #ifdef CONFIG_IRQ_OFFLOAD int z_irq_do_offload(void); #endif #ifdef CONFIG_FPU_SHARING void arch_flush_local_fpu(void); void arch_flush_fpu_ipi(unsigned int cpu); #endif #ifndef CONFIG_MULTITHREADING extern FUNC_NORETURN void z_riscv_switch_to_main_no_multithreading( k_thread_entry_t main_func, void *p1, void *p2, void *p3); #define ARCH_SWITCH_TO_MAIN_NO_MULTITHREADING \ z_riscv_switch_to_main_no_multithreading #endif /* !CONFIG_MULTITHREADING */ #endif /* _ASMLANGUAGE */ #ifdef __cplusplus } #endif #endif /* ZEPHYR_ARCH_RISCV_INCLUDE_KERNEL_ARCH_FUNC_H_ */ ```
/content/code_sandbox/arch/riscv/include/kernel_arch_func.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
810
```objective-c /* * */ /** * @file * @brief Private kernel definitions * * This file contains private kernel structures definitions and various * other definitions for the RISCV processor architecture. */ #ifndef ZEPHYR_ARCH_RISCV_INCLUDE_KERNEL_ARCH_DATA_H_ #define ZEPHYR_ARCH_RISCV_INCLUDE_KERNEL_ARCH_DATA_H_ #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #include <zephyr/arch/cpu.h> #ifndef _ASMLANGUAGE #include <zephyr/kernel.h> #include <zephyr/types.h> #include <zephyr/sys/util.h> #include <zephyr/sys/dlist.h> #ifdef __cplusplus extern "C" { #endif #ifdef __cplusplus } #endif #endif /* _ASMLANGUAGE */ #endif /* ZEPHYR_ARCH_RISCV_INCLUDE_KERNEL_ARCH_DATA_H_ */ ```
/content/code_sandbox/arch/riscv/include/kernel_arch_data.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
182
```objective-c /* * */ #ifndef ZEPHYR_ARCH_RISCV_INCLUDE_OFFSETS_SHORT_ARCH_H_ #define ZEPHYR_ARCH_RISCV_INCLUDE_OFFSETS_SHORT_ARCH_H_ #include <zephyr/offsets.h> #define _thread_offset_to_sp \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_sp_OFFSET) #define _thread_offset_to_ra \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_ra_OFFSET) #define _thread_offset_to_s0 \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_s0_OFFSET) #define _thread_offset_to_s1 \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_s1_OFFSET) #define _thread_offset_to_s2 \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_s2_OFFSET) #define _thread_offset_to_s3 \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_s3_OFFSET) #define _thread_offset_to_s4 \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_s4_OFFSET) #define _thread_offset_to_s5 \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_s5_OFFSET) #define _thread_offset_to_s6 \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_s6_OFFSET) #define _thread_offset_to_s7 \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_s7_OFFSET) #define _thread_offset_to_s8 \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_s8_OFFSET) #define _thread_offset_to_s9 \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_s9_OFFSET) #define _thread_offset_to_s10 \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_s10_OFFSET) #define _thread_offset_to_s11 \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_s11_OFFSET) #define _thread_offset_to_swap_return_value \ (___thread_t_arch_OFFSET + ___thread_arch_t_swap_return_value_OFFSET) #if defined(CONFIG_FPU_SHARING) #define _thread_offset_to_exception_depth \ (___thread_t_arch_OFFSET + ___thread_arch_t_exception_depth_OFFSET) #endif #ifdef CONFIG_USERSPACE #define _curr_cpu_arch_user_exc_sp \ (___cpu_t_arch_OFFSET + ___cpu_arch_t_user_exc_sp_OFFSET) #define _curr_cpu_arch_user_exc_tmp0 \ (___cpu_t_arch_OFFSET + ___cpu_arch_t_user_exc_tmp0_OFFSET) #define _curr_cpu_arch_user_exc_tmp1 \ (___cpu_t_arch_OFFSET + ___cpu_arch_t_user_exc_tmp1_OFFSET) #endif #endif /* ZEPHYR_ARCH_RISCV_INCLUDE_OFFSETS_SHORT_ARCH_H_ */ ```
/content/code_sandbox/arch/riscv/include/offsets_short_arch.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
554
```objective-c /* * */ #ifndef PMP_H_ #define PMP_H_ void z_riscv_pmp_init(void); void z_riscv_pmp_stackguard_prepare(struct k_thread *thread); void z_riscv_pmp_stackguard_enable(struct k_thread *thread); void z_riscv_pmp_stackguard_disable(void); void z_riscv_pmp_usermode_init(struct k_thread *thread); void z_riscv_pmp_usermode_prepare(struct k_thread *thread); void z_riscv_pmp_usermode_enable(struct k_thread *thread); #endif /* PMP_H_ */ ```
/content/code_sandbox/arch/riscv/include/pmp.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
123
```c /* * * * Physical Memory Protection (PMP) is RISC-V parlance for an MPU. * * The PMP is comprized of a number of entries or slots. This number depends * on the hardware design. For each slot there is an address register and * a configuration register. While each address register is matched to an * actual CSR register, configuration registers are small and therefore * several of them are bundled in a few additional CSR registers. * * PMP slot configurations are updated in memory to avoid read-modify-write * cycles on corresponding CSR registers. Relevant CSR registers are always * written in batch from their shadow copy in RAM for better efficiency. * * In the stackguard case we keep an m-mode copy for each thread. Each user * mode threads also has a u-mode copy. This makes faster context switching * as precomputed content just have to be written to actual registers with * no additional processing. * * Thread-specific m-mode and u-mode PMP entries start from the PMP slot * indicated by global_pmp_end_index. Lower slots are used by global entries * which are never modified. */ #include <zephyr/kernel.h> #include <kernel_internal.h> #include <zephyr/linker/linker-defs.h> #include <pmp.h> #include <zephyr/arch/arch_interface.h> #include <zephyr/arch/riscv/csr.h> #define LOG_LEVEL CONFIG_MPU_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(mpu); #define PMP_DEBUG_DUMP 0 #ifdef CONFIG_64BIT # define PR_ADDR "0x%016lx" #else # define PR_ADDR "0x%08lx" #endif #define PMP_TOR_SUPPORTED !IS_ENABLED(CONFIG_PMP_NO_TOR) #define PMP_NA4_SUPPORTED !IS_ENABLED(CONFIG_PMP_NO_NA4) #define PMP_NAPOT_SUPPORTED !IS_ENABLED(CONFIG_PMP_NO_NAPOT) #define PMPCFG_STRIDE sizeof(unsigned long) #define PMP_ADDR(addr) ((addr) >> 2) #define NAPOT_RANGE(size) (((size) - 1) >> 1) #define PMP_ADDR_NAPOT(addr, size) PMP_ADDR(addr | NAPOT_RANGE(size)) #define PMP_NONE 0 static void print_pmp_entries(unsigned int pmp_start, unsigned int pmp_end, unsigned long *pmp_addr, unsigned long *pmp_cfg, const char *banner) { uint8_t *pmp_n_cfg = (uint8_t *)pmp_cfg; unsigned int index; LOG_DBG("PMP %s:", banner); for (index = pmp_start; index < pmp_end; index++) { unsigned long start, end, tmp; switch (pmp_n_cfg[index] & PMP_A) { case PMP_TOR: start = (index == 0) ? 0 : (pmp_addr[index - 1] << 2); end = (pmp_addr[index] << 2) - 1; break; case PMP_NA4: start = pmp_addr[index] << 2; end = start + 3; break; case PMP_NAPOT: tmp = (pmp_addr[index] << 2) | 0x3; start = tmp & (tmp + 1); end = tmp | (tmp + 1); break; default: start = 0; end = 0; break; } if (end == 0) { LOG_DBG("%3d: "PR_ADDR" 0x%02x", index, pmp_addr[index], pmp_n_cfg[index]); } else { LOG_DBG("%3d: "PR_ADDR" 0x%02x --> " PR_ADDR"-"PR_ADDR" %c%c%c%s", index, pmp_addr[index], pmp_n_cfg[index], start, end, (pmp_n_cfg[index] & PMP_R) ? 'R' : '-', (pmp_n_cfg[index] & PMP_W) ? 'W' : '-', (pmp_n_cfg[index] & PMP_X) ? 'X' : '-', (pmp_n_cfg[index] & PMP_L) ? " LOCKED" : ""); } } } static void dump_pmp_regs(const char *banner) { unsigned long pmp_addr[CONFIG_PMP_SLOTS]; unsigned long pmp_cfg[CONFIG_PMP_SLOTS / PMPCFG_STRIDE]; #define PMPADDR_READ(x) pmp_addr[x] = csr_read(pmpaddr##x) FOR_EACH(PMPADDR_READ, (;), 0, 1, 2, 3, 4, 5, 6, 7); #if CONFIG_PMP_SLOTS > 8 FOR_EACH(PMPADDR_READ, (;), 8, 9, 10, 11, 12, 13, 14, 15); #endif #undef PMPADDR_READ #ifdef CONFIG_64BIT pmp_cfg[0] = csr_read(pmpcfg0); #if CONFIG_PMP_SLOTS > 8 pmp_cfg[1] = csr_read(pmpcfg2); #endif #else pmp_cfg[0] = csr_read(pmpcfg0); pmp_cfg[1] = csr_read(pmpcfg1); #if CONFIG_PMP_SLOTS > 8 pmp_cfg[2] = csr_read(pmpcfg2); pmp_cfg[3] = csr_read(pmpcfg3); #endif #endif print_pmp_entries(0, CONFIG_PMP_SLOTS, pmp_addr, pmp_cfg, banner); } /** * @brief Set PMP shadow register values in memory * * Register content is built using this function which selects the most * appropriate address matching mode automatically. Note that the special * case start=0 size=0 is valid and means the whole address range. * * @param index_p Location of the current PMP slot index to use. This index * will be updated according to the number of slots used. * @param perm PMP permission flags * @param start Start address of the memory area to cover * @param size Size of the memory area to cover * @param pmp_addr Array of pmpaddr values (starting at entry 0). * @param pmp_cfg Array of pmpcfg values (starting at entry 0). * @param index_limit Index value representing the size of the provided arrays. * @return true on success, false when out of free PMP slots. */ static bool set_pmp_entry(unsigned int *index_p, uint8_t perm, uintptr_t start, size_t size, unsigned long *pmp_addr, unsigned long *pmp_cfg, unsigned int index_limit) { uint8_t *pmp_n_cfg = (uint8_t *)pmp_cfg; unsigned int index = *index_p; bool ok = true; __ASSERT((start & (CONFIG_PMP_GRANULARITY - 1)) == 0, "misaligned start address"); __ASSERT((size & (CONFIG_PMP_GRANULARITY - 1)) == 0, "misaligned size"); if (index >= index_limit) { LOG_ERR("out of PMP slots"); ok = false; } else if (PMP_TOR_SUPPORTED && ((index == 0 && start == 0) || (index != 0 && pmp_addr[index - 1] == PMP_ADDR(start)))) { /* We can use TOR using only one additional slot */ pmp_addr[index] = PMP_ADDR(start + size); pmp_n_cfg[index] = perm | PMP_TOR; index += 1; } else if (PMP_NA4_SUPPORTED && size == 4) { pmp_addr[index] = PMP_ADDR(start); pmp_n_cfg[index] = perm | PMP_NA4; index += 1; } else if (PMP_NAPOT_SUPPORTED && ((size & (size - 1)) == 0) /* power of 2 */ && ((start & (size - 1)) == 0) /* naturally aligned */ && (PMP_NA4_SUPPORTED || (size != 4))) { pmp_addr[index] = PMP_ADDR_NAPOT(start, size); pmp_n_cfg[index] = perm | PMP_NAPOT; index += 1; } else if (PMP_TOR_SUPPORTED && index + 1 >= index_limit) { LOG_ERR("out of PMP slots"); ok = false; } else if (PMP_TOR_SUPPORTED) { pmp_addr[index] = PMP_ADDR(start); pmp_n_cfg[index] = 0; index += 1; pmp_addr[index] = PMP_ADDR(start + size); pmp_n_cfg[index] = perm | PMP_TOR; index += 1; } else { LOG_ERR("inappropriate PMP range (start=%#lx size=%#zx)", start, size); ok = false; } *index_p = index; return ok; } static inline bool set_pmp_mprv_catchall(unsigned int *index_p, unsigned long *pmp_addr, unsigned long *pmp_cfg, unsigned int index_limit) { /* * We'll be using MPRV. Make a fallback entry with everything * accessible as if no PMP entries were matched which is otherwise * the default behavior for m-mode without MPRV. */ bool ok = set_pmp_entry(index_p, PMP_R | PMP_W | PMP_X, 0, 0, pmp_addr, pmp_cfg, index_limit); #ifdef CONFIG_QEMU_TARGET if (ok) { /* * Workaround: The above produced 0x1fffffff which is correct. * But there is a QEMU bug that prevents it from interpreting * this value correctly. Hardcode the special case used by * QEMU to bypass this bug for now. The QEMU fix is here: * path_to_url */ pmp_addr[*index_p - 1] = -1L; } #endif return ok; } /** * @brief Write a range of PMP entries to corresponding PMP registers * * PMP registers are accessed with the csr instruction which only takes an * immediate value as the actual register. This is performed more efficiently * in assembly code (pmp.S) than what is possible with C code. * * Requirement: start < end && end <= CONFIG_PMP_SLOTS * * @param start Start of the PMP range to be written * @param end End (exclusive) of the PMP range to be written * @param clear_trailing_entries True if trailing entries must be turned off * @param pmp_addr Array of pmpaddr values (starting at entry 0). * @param pmp_cfg Array of pmpcfg values (starting at entry 0). */ extern void z_riscv_write_pmp_entries(unsigned int start, unsigned int end, bool clear_trailing_entries, const unsigned long *pmp_addr, const unsigned long *pmp_cfg); /** * @brief Write a range of PMP entries to corresponding PMP registers * * This performs some sanity checks before calling z_riscv_write_pmp_entries(). * * @param start Start of the PMP range to be written * @param end End (exclusive) of the PMP range to be written * @param clear_trailing_entries True if trailing entries must be turned off * @param pmp_addr Array of pmpaddr values (starting at entry 0). * @param pmp_cfg Array of pmpcfg values (starting at entry 0). * @param index_limit Index value representing the size of the provided arrays. */ static void write_pmp_entries(unsigned int start, unsigned int end, bool clear_trailing_entries, unsigned long *pmp_addr, unsigned long *pmp_cfg, unsigned int index_limit) { __ASSERT(start < end && end <= index_limit && index_limit <= CONFIG_PMP_SLOTS, "bad PMP range (start=%u end=%u)", start, end); /* Be extra paranoid in case assertions are disabled */ if (start >= end || end > index_limit) { k_panic(); } if (clear_trailing_entries) { /* * There are many config entries per pmpcfg register. * Make sure to clear trailing garbage in the last * register to be written if any. Remaining registers * will be cleared in z_riscv_write_pmp_entries(). */ uint8_t *pmp_n_cfg = (uint8_t *)pmp_cfg; unsigned int index; for (index = end; index % PMPCFG_STRIDE != 0; index++) { pmp_n_cfg[index] = 0; } } print_pmp_entries(start, end, pmp_addr, pmp_cfg, "register write"); #ifdef CONFIG_QEMU_TARGET /* * A QEMU bug may create bad transient PMP representations causing * false access faults to be reported. Work around it by setting * pmp registers to zero from the update start point to the end * before updating them with new values. * The QEMU fix is here with more details about this bug: * path_to_url */ static const unsigned long pmp_zero[CONFIG_PMP_SLOTS] = { 0, }; z_riscv_write_pmp_entries(start, CONFIG_PMP_SLOTS, false, pmp_zero, pmp_zero); #endif z_riscv_write_pmp_entries(start, end, clear_trailing_entries, pmp_addr, pmp_cfg); } /** * @brief Abstract the last 3 arguments to set_pmp_entry() and * write_pmp_entries( for m-mode. */ #define PMP_M_MODE(thread) \ thread->arch.m_mode_pmpaddr_regs, \ thread->arch.m_mode_pmpcfg_regs, \ ARRAY_SIZE(thread->arch.m_mode_pmpaddr_regs) /** * @brief Abstract the last 3 arguments to set_pmp_entry() and * write_pmp_entries( for u-mode. */ #define PMP_U_MODE(thread) \ thread->arch.u_mode_pmpaddr_regs, \ thread->arch.u_mode_pmpcfg_regs, \ ARRAY_SIZE(thread->arch.u_mode_pmpaddr_regs) /* * This is used to seed thread PMP copies with global m-mode cfg entries * sharing the same cfg register. Locked entries aren't modifiable but * we could have non-locked entries here too. */ static unsigned long global_pmp_cfg[1]; static unsigned long global_pmp_last_addr; /* End of global PMP entry range */ static unsigned int global_pmp_end_index; /** * @Brief Initialize the PMP with global entries on each CPU */ void z_riscv_pmp_init(void) { unsigned long pmp_addr[5]; unsigned long pmp_cfg[2]; unsigned int index = 0; /* The read-only area is always there for every mode */ set_pmp_entry(&index, PMP_R | PMP_X | PMP_L, (uintptr_t)__rom_region_start, (size_t)__rom_region_size, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr)); #ifdef CONFIG_NULL_POINTER_EXCEPTION_DETECTION_PMP /* * Use a PMP slot to make region (starting at address 0x0) inaccessible * for detecting null pointer dereferencing. */ set_pmp_entry(&index, PMP_NONE | PMP_L, 0, CONFIG_NULL_POINTER_EXCEPTION_REGION_SIZE, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr)); #endif #ifdef CONFIG_PMP_STACK_GUARD /* * Set the stack guard for this CPU's IRQ stack by making the bottom * addresses inaccessible. This will never change so we do it here * and lock it too. */ set_pmp_entry(&index, PMP_NONE | PMP_L, (uintptr_t)z_interrupt_stacks[_current_cpu->id], Z_RISCV_STACK_GUARD_SIZE, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr)); /* * This early, the kernel init code uses the IRQ stack and we want to * safeguard it as soon as possible. But we need a temporary default * "catch all" PMP entry for MPRV to work. Later on, this entry will * be set for each thread by z_riscv_pmp_stackguard_prepare(). */ set_pmp_mprv_catchall(&index, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr)); /* Write those entries to PMP regs. */ write_pmp_entries(0, index, true, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr)); /* Activate our non-locked PMP entries for m-mode */ csr_set(mstatus, MSTATUS_MPRV); /* And forget about that last entry as we won't need it later */ index--; #else /* Write those entries to PMP regs. */ write_pmp_entries(0, index, true, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr)); #endif #ifdef CONFIG_SMP #ifdef CONFIG_PMP_STACK_GUARD /* * The IRQ stack guard area is different for each CPU. * Make sure TOR entry sharing won't be attempted with it by * remembering a bogus address for those entries. */ pmp_addr[index - 1] = -1L; #endif /* Make sure secondary CPUs produced the same values */ if (global_pmp_end_index != 0) { __ASSERT(global_pmp_end_index == index, ""); __ASSERT(global_pmp_cfg[0] == pmp_cfg[0], ""); __ASSERT(global_pmp_last_addr == pmp_addr[index - 1], ""); } #endif __ASSERT(index <= PMPCFG_STRIDE, "provision for one global word only"); global_pmp_cfg[0] = pmp_cfg[0]; global_pmp_last_addr = pmp_addr[index - 1]; global_pmp_end_index = index; if (PMP_DEBUG_DUMP) { dump_pmp_regs("initial register dump"); } } /** * @Brief Initialize the per-thread PMP register copy with global values. */ static inline unsigned int z_riscv_pmp_thread_init(unsigned long *pmp_addr, unsigned long *pmp_cfg, unsigned int index_limit) { ARG_UNUSED(index_limit); /* * Retrieve pmpcfg0 partial content from global entries. */ pmp_cfg[0] = global_pmp_cfg[0]; /* * Retrieve the pmpaddr value matching the last global PMP slot. * This is so that set_pmp_entry() can safely attempt TOR with it. */ pmp_addr[global_pmp_end_index - 1] = global_pmp_last_addr; return global_pmp_end_index; } #ifdef CONFIG_PMP_STACK_GUARD /** * @brief Prepare the PMP stackguard content for given thread. * * This is called once during new thread creation. */ void z_riscv_pmp_stackguard_prepare(struct k_thread *thread) { unsigned int index = z_riscv_pmp_thread_init(PMP_M_MODE(thread)); uintptr_t stack_bottom; /* make the bottom addresses of our stack inaccessible */ stack_bottom = thread->stack_info.start - K_KERNEL_STACK_RESERVED; #ifdef CONFIG_USERSPACE if (thread->arch.priv_stack_start != 0) { stack_bottom = thread->arch.priv_stack_start; } else if (z_stack_is_user_capable(thread->stack_obj)) { stack_bottom = thread->stack_info.start - K_THREAD_STACK_RESERVED; } #endif set_pmp_entry(&index, PMP_NONE, stack_bottom, Z_RISCV_STACK_GUARD_SIZE, PMP_M_MODE(thread)); set_pmp_mprv_catchall(&index, PMP_M_MODE(thread)); /* remember how many entries we use */ thread->arch.m_mode_pmp_end_index = index; } /** * @brief Write PMP stackguard content to actual PMP registers * * This is called on every context switch. */ void z_riscv_pmp_stackguard_enable(struct k_thread *thread) { LOG_DBG("pmp_stackguard_enable for thread %p", thread); /* * Disable (non-locked) PMP entries for m-mode while we update them. * While at it, also clear MSTATUS_MPP as it must be cleared for * MSTATUS_MPRV to be effective later. */ csr_clear(mstatus, MSTATUS_MPRV | MSTATUS_MPP); /* Write our m-mode MPP entries */ write_pmp_entries(global_pmp_end_index, thread->arch.m_mode_pmp_end_index, false /* no need to clear to the end */, PMP_M_MODE(thread)); if (PMP_DEBUG_DUMP) { dump_pmp_regs("m-mode register dump"); } /* Activate our non-locked PMP entries in m-mode */ csr_set(mstatus, MSTATUS_MPRV); } /** * @brief Remove PMP stackguard content to actual PMP registers */ void z_riscv_pmp_stackguard_disable(void) { unsigned long pmp_addr[PMP_M_MODE_SLOTS]; unsigned long pmp_cfg[PMP_M_MODE_SLOTS / sizeof(unsigned long)]; unsigned int index = global_pmp_end_index; /* Retrieve the pmpaddr value matching the last global PMP slot. */ pmp_addr[global_pmp_end_index - 1] = global_pmp_last_addr; /* Disable (non-locked) PMP entries for m-mode while we update them. */ csr_clear(mstatus, MSTATUS_MPRV); /* * Set a temporary default "catch all" PMP entry for MPRV to work, * except for the global locked entries. */ set_pmp_mprv_catchall(&index, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr)); /* Write "catch all" entry and clear unlocked entries to PMP regs. */ write_pmp_entries(global_pmp_end_index, index, true, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr)); if (PMP_DEBUG_DUMP) { dump_pmp_regs("catch all register dump"); } } #endif /* CONFIG_PMP_STACK_GUARD */ #ifdef CONFIG_USERSPACE /** * @brief Initialize the usermode portion of the PMP configuration. * * This is called once during new thread creation. */ void z_riscv_pmp_usermode_init(struct k_thread *thread) { /* Only indicate that the u-mode PMP is not prepared yet */ thread->arch.u_mode_pmp_end_index = 0; } /** * @brief Prepare the u-mode PMP content for given thread. * * This is called once before making the transition to usermode. */ void z_riscv_pmp_usermode_prepare(struct k_thread *thread) { unsigned int index = z_riscv_pmp_thread_init(PMP_U_MODE(thread)); LOG_DBG("pmp_usermode_prepare for thread %p", thread); /* Map the usermode stack */ set_pmp_entry(&index, PMP_R | PMP_W, thread->stack_info.start, thread->stack_info.size, PMP_U_MODE(thread)); thread->arch.u_mode_pmp_domain_offset = index; thread->arch.u_mode_pmp_end_index = index; thread->arch.u_mode_pmp_update_nr = 0; } /** * @brief Convert partition information into PMP entries */ static void resync_pmp_domain(struct k_thread *thread, struct k_mem_domain *domain) { unsigned int index = thread->arch.u_mode_pmp_domain_offset; int p_idx, remaining_partitions; bool ok; k_spinlock_key_t key = k_spin_lock(&z_mem_domain_lock); remaining_partitions = domain->num_partitions; for (p_idx = 0; remaining_partitions > 0; p_idx++) { struct k_mem_partition *part = &domain->partitions[p_idx]; if (part->size == 0) { /* skip empty partition */ continue; } remaining_partitions--; if (part->size < 4) { /* * 4 bytes is the minimum we can map */ LOG_ERR("non-empty partition too small"); __ASSERT(false, ""); continue; } ok = set_pmp_entry(&index, part->attr.pmp_attr, part->start, part->size, PMP_U_MODE(thread)); __ASSERT(ok, "no PMP slot left for %d remaining partitions in domain %p", remaining_partitions + 1, domain); } thread->arch.u_mode_pmp_end_index = index; thread->arch.u_mode_pmp_update_nr = domain->arch.pmp_update_nr; k_spin_unlock(&z_mem_domain_lock, key); } /** * @brief Write PMP usermode content to actual PMP registers * * This is called on every context switch. */ void z_riscv_pmp_usermode_enable(struct k_thread *thread) { struct k_mem_domain *domain = thread->mem_domain_info.mem_domain; LOG_DBG("pmp_usermode_enable for thread %p with domain %p", thread, domain); if (thread->arch.u_mode_pmp_end_index == 0) { /* z_riscv_pmp_usermode_prepare() has not been called yet */ return; } if (thread->arch.u_mode_pmp_update_nr != domain->arch.pmp_update_nr) { /* * Resynchronize our PMP entries with * the latest domain partition information. */ resync_pmp_domain(thread, domain); } #ifdef CONFIG_PMP_STACK_GUARD /* Make sure m-mode PMP usage is disabled before we reprogram it */ csr_clear(mstatus, MSTATUS_MPRV); #endif /* Write our u-mode MPP entries */ write_pmp_entries(global_pmp_end_index, thread->arch.u_mode_pmp_end_index, true /* must clear to the end */, PMP_U_MODE(thread)); if (PMP_DEBUG_DUMP) { dump_pmp_regs("u-mode register dump"); } } int arch_mem_domain_max_partitions_get(void) { int available_pmp_slots = CONFIG_PMP_SLOTS; /* remove those slots dedicated to global entries */ available_pmp_slots -= global_pmp_end_index; /* * User thread stack mapping: * 1 slot if CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT=y, * most likely 2 slots otherwise. */ available_pmp_slots -= IS_ENABLED(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT) ? 1 : 2; /* * Each partition may require either 1 or 2 PMP slots depending * on a couple factors that are not known in advance. Even when * arch_mem_domain_partition_add() is called, we can't tell if a * given partition will fit in the remaining PMP slots of an * affected thread if it hasn't executed in usermode yet. * * Give the most optimistic answer here (which should be pretty * accurate if CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT=y) and be * prepared to deny availability in resync_pmp_domain() if this * estimate was too high. */ return available_pmp_slots; } int arch_mem_domain_init(struct k_mem_domain *domain) { domain->arch.pmp_update_nr = 0; return 0; } int arch_mem_domain_partition_add(struct k_mem_domain *domain, uint32_t partition_id) { /* Force resynchronization for every thread using this domain */ domain->arch.pmp_update_nr += 1; return 0; } int arch_mem_domain_partition_remove(struct k_mem_domain *domain, uint32_t partition_id) { /* Force resynchronization for every thread using this domain */ domain->arch.pmp_update_nr += 1; return 0; } int arch_mem_domain_thread_add(struct k_thread *thread) { /* Force resynchronization for this thread */ thread->arch.u_mode_pmp_update_nr = 0; return 0; } int arch_mem_domain_thread_remove(struct k_thread *thread) { return 0; } #define IS_WITHIN(inner_start, inner_size, outer_start, outer_size) \ ((inner_start) >= (outer_start) && (inner_size) <= (outer_size) && \ ((inner_start) - (outer_start)) <= ((outer_size) - (inner_size))) int arch_buffer_validate(const void *addr, size_t size, int write) { uintptr_t start = (uintptr_t)addr; int ret = -1; /* Check if this is on the stack */ if (IS_WITHIN(start, size, _current->stack_info.start, _current->stack_info.size)) { return 0; } /* Check if this is within the global read-only area */ if (!write) { uintptr_t ro_start = (uintptr_t)__rom_region_start; size_t ro_size = (size_t)__rom_region_size; if (IS_WITHIN(start, size, ro_start, ro_size)) { return 0; } } /* Look for a matching partition in our memory domain */ struct k_mem_domain *domain = _current->mem_domain_info.mem_domain; int p_idx, remaining_partitions; k_spinlock_key_t key = k_spin_lock(&z_mem_domain_lock); remaining_partitions = domain->num_partitions; for (p_idx = 0; remaining_partitions > 0; p_idx++) { struct k_mem_partition *part = &domain->partitions[p_idx]; if (part->size == 0) { /* unused partition */ continue; } remaining_partitions--; if (!IS_WITHIN(start, size, part->start, part->size)) { /* unmatched partition */ continue; } /* partition matched: determine access result */ if ((part->attr.pmp_attr & (write ? PMP_W : PMP_R)) != 0) { ret = 0; } break; } k_spin_unlock(&z_mem_domain_lock, key); return ret; } #endif /* CONFIG_USERSPACE */ ```
/content/code_sandbox/arch/riscv/core/pmp.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
6,590
```python #!/usr/bin/env python3 # # """Generate Interrupt Descriptor Table for x86 CPUs. This script generates the interrupt descriptor table (IDT) for x86. Please consult the IA Architecture SW Developer Manual, volume 3, for more details on this data structure. This script accepts as input the zephyr_prebuilt.elf binary, which is a link of the Zephyr kernel without various build-time generated data structures (such as the IDT) inserted into it. This kernel image has been properly padded such that inserting these data structures will not disturb the memory addresses of other symbols. From the kernel binary we read a special section "intList" which contains the desired interrupt routing configuration for the kernel, populated by instances of the IRQ_CONNECT() macro. This script outputs three binary tables: 1. The interrupt descriptor table itself. 2. A bitfield indicating which vectors in the IDT are free for installation of dynamic interrupts at runtime. 3. An array which maps configured IRQ lines to their associated vector entries in the IDT, used to program the APIC at runtime. """ import argparse import sys import struct import os import elftools from packaging import version from elftools.elf.elffile import ELFFile from elftools.elf.sections import SymbolTableSection if version.parse(elftools.__version__) < version.parse('0.24'): sys.exit("pyelftools is out of date, need version 0.24 or later") # This will never change, first selector in the GDT after the null selector KERNEL_CODE_SEG = 0x08 # These exception vectors push an error code onto the stack. ERR_CODE_VECTORS = [8, 10, 11, 12, 13, 14, 17] def debug(text): if not args.verbose: return sys.stdout.write(os.path.basename(sys.argv[0]) + ": " + text + "\n") def error(text): sys.exit(os.path.basename(sys.argv[0]) + ": " + text) # See Section 6.11 of the Intel Architecture Software Developer's Manual gate_desc_format = "<HHBBH" def create_irq_gate(handler, dpl): present = 1 gate_type = 0xE # 32-bit interrupt gate type_attr = gate_type | (dpl << 5) | (present << 7) offset_hi = handler >> 16 offset_lo = handler & 0xFFFF data = struct.pack(gate_desc_format, offset_lo, KERNEL_CODE_SEG, 0, type_attr, offset_hi) return data def create_task_gate(tss, dpl): present = 1 gate_type = 0x5 # 32-bit task gate type_attr = gate_type | (dpl << 5) | (present << 7) data = struct.pack(gate_desc_format, 0, tss, 0, type_attr, 0) return data def create_idt_binary(idt_config, filename): with open(filename, "wb") as fp: for handler, tss, dpl in idt_config: if handler and tss: error("entry specifies both handler function and tss") if not handler and not tss: error("entry does not specify either handler or tss") if handler: data = create_irq_gate(handler, dpl) else: data = create_task_gate(tss, dpl) fp.write(data) map_fmt = "<B" def create_irq_vec_map_binary(irq_vec_map, filename): with open(filename, "wb") as fp: for i in irq_vec_map: fp.write(struct.pack(map_fmt, i)) def priority_range(prio): # Priority levels are represented as groups of 16 vectors within the IDT base = 32 + (prio * 16) return range(base, base + 16) def update_irq_vec_map(irq_vec_map, irq, vector, max_irq): # No IRQ associated; exception or software interrupt if irq == -1: return if irq >= max_irq: error("irq %d specified, but CONFIG_MAX_IRQ_LINES is %d" % (irq, max_irq)) # This table will never have values less than 32 since those are for # exceptions; 0 means unconfigured if irq_vec_map[irq] != 0: error("multiple vector assignments for interrupt line %d" % irq) debug("assign IRQ %d to vector %d" % (irq, vector)) irq_vec_map[irq] = vector def setup_idt(spur_code, spur_nocode, intlist, max_vec, max_irq): irq_vec_map = [0 for i in range(max_irq)] vectors = [None for i in range(max_vec)] # Pass 1: sanity check and set up hard-coded interrupt vectors for handler, irq, prio, vec, dpl, tss in intlist: if vec == -1: if prio == -1: error("entry does not specify vector or priority level") continue if vec >= max_vec: error("Vector %d specified, but size of IDT is only %d vectors" % (vec, max_vec)) if vectors[vec] is not None: error("Multiple assignments for vector %d" % vec) vectors[vec] = (handler, tss, dpl) update_irq_vec_map(irq_vec_map, irq, vec, max_irq) # Pass 2: set up priority-based interrupt vectors for handler, irq, prio, vec, dpl, tss in intlist: if vec != -1: continue for vi in priority_range(prio): if vi >= max_vec: break if vectors[vi] is None: vec = vi break if vec == -1: error("can't find a free vector in priority level %d" % prio) vectors[vec] = (handler, tss, dpl) update_irq_vec_map(irq_vec_map, irq, vec, max_irq) # Pass 3: fill in unused vectors with spurious handler at dpl=0 for i in range(max_vec): if vectors[i] is not None: continue if i in ERR_CODE_VECTORS: handler = spur_code else: handler = spur_nocode vectors[i] = (handler, 0, 0) return vectors, irq_vec_map def get_symbols(obj): for section in obj.iter_sections(): if isinstance(section, SymbolTableSection): return {sym.name: sym.entry.st_value for sym in section.iter_symbols()} raise LookupError("Could not find symbol table") # struct genidt_header_s { # uint32_t spurious_addr; # uint32_t spurious_no_error_addr; # int32_t num_entries; # }; intlist_header_fmt = "<II" # struct genidt_entry_s { # uint32_t isr; # int32_t irq; # int32_t priority; # int32_t vector_id; # int32_t dpl; # int32_t tss; # }; intlist_entry_fmt = "<Iiiiii" def get_intlist(elf): intdata = elf.get_section_by_name("intList").data() header_sz = struct.calcsize(intlist_header_fmt) header = struct.unpack_from(intlist_header_fmt, intdata, 0) intdata = intdata[header_sz:] spurious_code = header[0] spurious_nocode = header[1] debug("spurious handler (code) : %s" % hex(header[0])) debug("spurious handler (no code) : %s" % hex(header[1])) intlist = [i for i in struct.iter_unpack(intlist_entry_fmt, intdata)] debug("Configured interrupt routing") debug("handler irq pri vec dpl") debug("--------------------------") for irq in intlist: debug("{0:<10} {1:<3} {2:<3} {3:<3} {4:<2}".format( hex(irq[0]), "-" if irq[1] == -1 else irq[1], "-" if irq[2] == -1 else irq[2], "-" if irq[3] == -1 else irq[3], irq[4])) return (spurious_code, spurious_nocode, intlist) def parse_args(): global args parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, allow_abbrev=False) parser.add_argument("-m", "--vector-map", required=True, help="Output file mapping IRQ lines to IDT vectors") parser.add_argument("-o", "--output-idt", required=True, help="Output file containing IDT binary") parser.add_argument("-a", "--output-vectors-alloc", required=False, help="Output file indicating allocated vectors") parser.add_argument("-k", "--kernel", required=True, help="Zephyr kernel image") parser.add_argument("-v", "--verbose", action="store_true", help="Print extra debugging information") args = parser.parse_args() if "VERBOSE" in os.environ: args.verbose = 1 def create_irq_vectors_allocated(vectors, spur_code, spur_nocode, filename): # Construct a bitfield over all the IDT vectors, where if bit n is 1, # that vector is free. those vectors have either of the two spurious # interrupt handlers installed, they are free for runtime installation # of interrupts num_chars = (len(vectors) + 7) // 8 vbits = num_chars*[0] for i, (handler, _, _) in enumerate(vectors): if handler not in (spur_code, spur_nocode): continue vbit_index = i // 8 vbit_val = 1 << (i % 8) vbits[vbit_index] = vbits[vbit_index] | vbit_val with open(filename, "wb") as fp: for char in vbits: fp.write(struct.pack("<B", char)) def main(): parse_args() with open(args.kernel, "rb") as fp: kernel = ELFFile(fp) syms = get_symbols(kernel) spur_code, spur_nocode, intlist = get_intlist(kernel) max_irq = syms["CONFIG_MAX_IRQ_LINES"] max_vec = syms["CONFIG_IDT_NUM_VECTORS"] vectors, irq_vec_map = setup_idt(spur_code, spur_nocode, intlist, max_vec, max_irq) create_idt_binary(vectors, args.output_idt) create_irq_vec_map_binary(irq_vec_map, args.vector_map) if args.output_vectors_alloc: create_irq_vectors_allocated(vectors, spur_code, spur_nocode, args.output_vectors_alloc) if __name__ == "__main__": main() ```
/content/code_sandbox/arch/x86/gen_idt.py
python
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,404
```python #!/usr/bin/env python3 # # """Generate a Global Descriptor Table (GDT) for x86 CPUs. For additional detail on GDT and x86 memory management, please consult the IA Architecture SW Developer Manual, vol. 3. This script accepts as input the zephyr_prebuilt.elf binary, which is a link of the Zephyr kernel without various build-time generated data structures (such as the GDT) inserted into it. This kernel image has been properly padded such that inserting these data structures will not disturb the memory addresses of other symbols. The input kernel ELF binary is used to obtain the following information: - Memory addresses of the Main and Double Fault TSS structures so GDT descriptors can be created for them - Memory addresses of where the GDT lives in memory, so that this address can be populated in the GDT pseudo descriptor - whether userspace or HW stack protection are enabled in Kconfig The output is a GDT whose contents depend on the kernel configuration. With no memory protection features enabled, we generate flat 32-bit code and data segments. If hardware- based stack overflow protection or userspace is enabled, we additionally create descriptors for the main and double- fault IA tasks, needed for userspace privilege elevation and double-fault handling. If userspace is enabled, we also create flat code/data segments for ring 3 execution. """ import argparse import sys import struct import os from packaging import version import elftools from elftools.elf.elffile import ELFFile from elftools.elf.sections import SymbolTableSection if version.parse(elftools.__version__) < version.parse('0.24'): sys.exit("pyelftools is out of date, need version 0.24 or later") def debug(text): """Display debug message if --verbose""" if args.verbose: sys.stdout.write(os.path.basename(sys.argv[0]) + ": " + text + "\n") def error(text): """Exit program with an error message""" sys.exit(os.path.basename(sys.argv[0]) + ": " + text) GDT_PD_FMT = "<HIH" FLAGS_GRAN = 1 << 7 # page granularity ACCESS_EX = 1 << 3 # executable ACCESS_DC = 1 << 2 # direction/conforming ACCESS_RW = 1 << 1 # read or write permission # 6 byte pseudo descriptor, but we're going to actually use this as the # zero descriptor and return 8 bytes def create_gdt_pseudo_desc(addr, size): """Create pseudo GDT descriptor""" debug("create pseudo descriptor: %x %x" % (addr, size)) # ...and take back one byte for the Intel god whose Ark this is... size = size - 1 return struct.pack(GDT_PD_FMT, size, addr, 0) def chop_base_limit(base, limit): """Limit argument always in bytes""" base_lo = base & 0xFFFF base_mid = (base >> 16) & 0xFF base_hi = (base >> 24) & 0xFF limit_lo = limit & 0xFFFF limit_hi = (limit >> 16) & 0xF return (base_lo, base_mid, base_hi, limit_lo, limit_hi) GDT_ENT_FMT = "<HHBBBB" def create_code_data_entry(base, limit, dpl, flags, access): """Create GDT entry for code or data""" debug("create code or data entry: %x %x %x %x %x" % (base, limit, dpl, flags, access)) base_lo, base_mid, base_hi, limit_lo, limit_hi = chop_base_limit(base, limit) # This is a valid descriptor present = 1 # 32-bit protected mode size = 1 # 1 = code or data, 0 = system type desc_type = 1 # Just set accessed to 1 already so the CPU doesn't need it update it, # prevents freakouts if the GDT is in ROM, we don't care about this # bit in the OS accessed = 1 access = access | (present << 7) | (dpl << 5) | (desc_type << 4) | accessed flags = flags | (size << 6) | limit_hi return struct.pack(GDT_ENT_FMT, limit_lo, base_lo, base_mid, access, flags, base_hi) def create_tss_entry(base, limit, dpl): """Create GDT TSS entry""" debug("create TSS entry: %x %x %x" % (base, limit, dpl)) present = 1 base_lo, base_mid, base_hi, limit_lo, limit_hi, = chop_base_limit(base, limit) type_code = 0x9 # non-busy 32-bit TSS descriptor gran = 0 flags = (gran << 7) | limit_hi type_byte = (present << 7) | (dpl << 5) | type_code return struct.pack(GDT_ENT_FMT, limit_lo, base_lo, base_mid, type_byte, flags, base_hi) def get_symbols(obj): """Extract all symbols from ELF file object""" for section in obj.iter_sections(): if isinstance(section, SymbolTableSection): return {sym.name: sym.entry.st_value for sym in section.iter_symbols()} raise LookupError("Could not find symbol table") def parse_args(): """Parse command line arguments""" global args parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, allow_abbrev=False) parser.add_argument("-k", "--kernel", required=True, help="Zephyr kernel image") parser.add_argument("-v", "--verbose", action="store_true", help="Print extra debugging information") parser.add_argument("-o", "--output-gdt", required=True, help="output GDT binary") args = parser.parse_args() if "VERBOSE" in os.environ: args.verbose = 1 def main(): """Main Program""" parse_args() with open(args.kernel, "rb") as elf_fp: kernel = ELFFile(elf_fp) syms = get_symbols(kernel) # NOTE: use-cases are extremely limited; we always have a basic flat # code/data segments. If we are doing stack protection, we are going to # have two TSS to manage the main task and the special task for double # fault exception handling if "CONFIG_USERSPACE" in syms: num_entries = 7 elif "CONFIG_X86_STACK_PROTECTION" in syms: num_entries = 5 else: num_entries = 3 use_tls = False if ("CONFIG_THREAD_LOCAL_STORAGE" in syms) and ("CONFIG_X86_64" not in syms): use_tls = True # x86_64 does not use descriptor for thread local storage num_entries += 1 gdt_base = syms["_gdt"] with open(args.output_gdt, "wb") as output_fp: # The pseudo descriptor is stuffed into the NULL descriptor # since the CPU never looks at it output_fp.write(create_gdt_pseudo_desc(gdt_base, num_entries * 8)) # Selector 0x08: code descriptor output_fp.write(create_code_data_entry(0, 0xFFFFF, 0, FLAGS_GRAN, ACCESS_EX | ACCESS_RW)) # Selector 0x10: data descriptor output_fp.write(create_code_data_entry(0, 0xFFFFF, 0, FLAGS_GRAN, ACCESS_RW)) if num_entries >= 5: main_tss = syms["_main_tss"] df_tss = syms["_df_tss"] # Selector 0x18: main TSS output_fp.write(create_tss_entry(main_tss, 0x67, 0)) # Selector 0x20: double-fault TSS output_fp.write(create_tss_entry(df_tss, 0x67, 0)) if num_entries >= 7: # Selector 0x28: code descriptor, dpl = 3 output_fp.write(create_code_data_entry(0, 0xFFFFF, 3, FLAGS_GRAN, ACCESS_EX | ACCESS_RW)) # Selector 0x30: data descriptor, dpl = 3 output_fp.write(create_code_data_entry(0, 0xFFFFF, 3, FLAGS_GRAN, ACCESS_RW)) if use_tls: # Selector 0x18, 0x28 or 0x38 (depending on entries above): # data descriptor, dpl = 3 # # for use with thread local storage while this will be # modified at runtime. output_fp.write(create_code_data_entry(0, 0xFFFFF, 3, FLAGS_GRAN, ACCESS_RW)) if __name__ == "__main__": main() ```
/content/code_sandbox/arch/x86/gen_gdt.py
python
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,994
```cmake zephyr_cc_option(-m64) set_property(GLOBAL PROPERTY PROPERTY_OUTPUT_ARCH "i386:x86-64") set_property(GLOBAL PROPERTY PROPERTY_OUTPUT_FORMAT "elf64-x86-64") get_property(OUTPUT_ARCH GLOBAL PROPERTY PROPERTY_OUTPUT_ARCH) get_property(OUTPUT_FORMAT GLOBAL PROPERTY PROPERTY_OUTPUT_FORMAT) if(CONFIG_X86_SSE) # x86-64 by default has SSE and SSE2 # so no need to add compiler flags for them. if(CONFIG_X86_SSE3) zephyr_cc_option(-msse3) else() zephyr_cc_option(-mno-sse3) endif() if(CONFIG_X86_SSSE3) zephyr_cc_option(-mssse3) else() zephyr_cc_option(-mno-ssse3) endif() if(CONFIG_X86_SSE41) zephyr_cc_option(-msse4.1) else() zephyr_cc_option(-mno-sse4.1) endif() if(CONFIG_X86_SSE42) zephyr_cc_option(-msse4.2) else() zephyr_cc_option(-mno-sse4.2) endif() if(CONFIG_X86_SSE4A) zephyr_cc_option(-msse4a) else() zephyr_cc_option(-mno-sse4a) endif() endif() add_subdirectory(core) ```
/content/code_sandbox/arch/x86/intel64.cmake
cmake
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
314
```c /* * */ #include <zephyr/arch/x86/arch.h> #include <zephyr/kernel.h> #include <zephyr/sys_clock.h> #include <zephyr/timing/timing.h> #include <zephyr/app_memory/app_memdomain.h> K_APP_BMEM(z_libc_partition) static uint64_t tsc_freq; void arch_timing_x86_init(void) { uint32_t cyc_start, cyc_end; uint64_t tsc_start, tsc_end; uint64_t cyc_freq = sys_clock_hw_cycles_per_sec(); uint64_t dcyc, dtsc; do { cyc_start = k_cycle_get_32(); tsc_start = z_tsc_read(); k_busy_wait(10 * USEC_PER_MSEC); cyc_end = k_cycle_get_32(); tsc_end = z_tsc_read(); /* * cycles are in 32-bit, and delta must be * calculated in 32-bit precision. Or it would be * wrapping around in 64-bit. */ dcyc = (uint32_t)cyc_end - (uint32_t)cyc_start; dtsc = tsc_end - tsc_start; } while ((dcyc == 0) || (dtsc == 0)); tsc_freq = (cyc_freq * dtsc) / dcyc; } uint64_t arch_timing_x86_freq_get(void) { return tsc_freq; } void arch_timing_init(void) { arch_timing_x86_init(); } void arch_timing_start(void) { } void arch_timing_stop(void) { } timing_t arch_timing_counter_get(void) { return z_tsc_read(); } uint64_t arch_timing_cycles_get(volatile timing_t *const start, volatile timing_t *const end) { return (*end - *start); } uint64_t arch_timing_freq_get(void) { return arch_timing_x86_freq_get(); } uint64_t arch_timing_cycles_to_ns(uint64_t cycles) { return ((cycles) * NSEC_PER_SEC / tsc_freq); } uint64_t arch_timing_cycles_to_ns_avg(uint64_t cycles, uint32_t count) { return arch_timing_cycles_to_ns(cycles) / count; } uint32_t arch_timing_freq_get_mhz(void) { return (uint32_t)(arch_timing_freq_get() / 1000000U); } ```
/content/code_sandbox/arch/x86/timing.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
498
```cmake # Find out if we are optimizing for size get_target_property(zephyr_COMPILE_OPTIONS zephyr_interface INTERFACE_COMPILE_OPTIONS) if ("-Os" IN_LIST zephyr_COMPILE_OPTIONS) zephyr_cc_option(-mpreferred-stack-boundary=2) else() zephyr_compile_definitions(PERF_OPT) endif() set_property(GLOBAL PROPERTY PROPERTY_OUTPUT_ARCH "i386") set_property(GLOBAL PROPERTY PROPERTY_OUTPUT_FORMAT "elf32-i386") if(CMAKE_C_COMPILER_ID STREQUAL "Clang" OR CMAKE_C_COMPILER_ID STREQUAL "IntelLLVM") zephyr_compile_options(-Qunused-arguments) zephyr_cc_option( -m32 -gdwarf-2 ) endif() if(CONFIG_X86_MMX) zephyr_cc_option(-mmmx) else() zephyr_cc_option(-mno-mmx) endif() if(CONFIG_X86_SSE) zephyr_cc_option(-msse) if(CONFIG_X86_SSE_FP_MATH) zephyr_cc_option(-mfpmath=sse) else() zephyr_cc_option(-mfpmath=387) endif() if(CONFIG_X86_SSE2) zephyr_cc_option(-msse2) else() zephyr_cc_option(-mno-sse2) endif() if(CONFIG_X86_SSE3) zephyr_cc_option(-msse3) else() zephyr_cc_option(-mno-sse3) endif() if(CONFIG_X86_SSSE3) zephyr_cc_option(-mssse3) else() zephyr_cc_option(-mno-ssse3) endif() if(CONFIG_X86_SSE41) zephyr_cc_option(-msse4.1) else() zephyr_cc_option(-mno-sse4.1) endif() if(CONFIG_X86_SSE42) zephyr_cc_option(-msse4.2) else() zephyr_cc_option(-mno-sse4.2) endif() if(CONFIG_X86_SSE4A) zephyr_cc_option(-msse4a) else() zephyr_cc_option(-mno-sse4a) endif() else() zephyr_cc_option(-mno-sse) endif() if(CMAKE_VERBOSE_MAKEFILE) set(GENIDT_EXTRA_ARGS --verbose) else() set(GENIDT_EXTRA_ARGS "") endif() set(GENIDT ${ZEPHYR_BASE}/arch/x86/gen_idt.py) define_property(GLOBAL PROPERTY PROPERTY_OUTPUT_ARCH BRIEF_DOCS " " FULL_DOCS " ") # Use gen_idt.py and objcopy to generate irq_int_vector_map.o, # irq_vectors_alloc.o, and staticIdt.o from the elf file ${ZEPHYR_PREBUILT_EXECUTABLE} set(gen_idt_output_files ${CMAKE_CURRENT_BINARY_DIR}/irq_int_vector_map.bin ${CMAKE_CURRENT_BINARY_DIR}/staticIdt.bin ${CMAKE_CURRENT_BINARY_DIR}/irq_vectors_alloc.bin ) add_custom_target( gen_idt_output DEPENDS ${gen_idt_output_files} ) add_custom_command( OUTPUT irq_int_vector_map.bin staticIdt.bin irq_vectors_alloc.bin COMMAND ${PYTHON_EXECUTABLE} ${GENIDT} --kernel $<TARGET_FILE:${ZEPHYR_PREBUILT_EXECUTABLE}> --output-idt staticIdt.bin --vector-map irq_int_vector_map.bin --output-vectors-alloc irq_vectors_alloc.bin ${GENIDT_EXTRA_ARGS} DEPENDS ${ZEPHYR_PREBUILT_EXECUTABLE} WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} ) # Must be last so that soc/ can override default exception handlers add_subdirectory(core) get_property(OUTPUT_ARCH GLOBAL PROPERTY PROPERTY_OUTPUT_ARCH) get_property(OUTPUT_FORMAT GLOBAL PROPERTY PROPERTY_OUTPUT_FORMAT) add_bin_file_to_the_next_link(gen_idt_output staticIdt) add_bin_file_to_the_next_link(gen_idt_output irq_int_vector_map) add_bin_file_to_the_next_link(gen_idt_output irq_vectors_alloc) if(CONFIG_GDT_DYNAMIC) # Use gen_gdt.py and objcopy to generate gdt.o from the elf # file ${ZEPHYR_PREBUILT_EXECUTABLE}, creating the temp file gdt.bin along the # way. # # ${ZEPHYR_PREBUILT_EXECUTABLE}.elf -> gdt.bin -> gdt.o add_custom_target( gdt_bin_target DEPENDS gdt.bin ) add_custom_command( OUTPUT gdt.bin COMMAND ${PYTHON_EXECUTABLE} ${ZEPHYR_BASE}/arch/x86/gen_gdt.py --kernel $<TARGET_FILE:${ZEPHYR_PREBUILT_EXECUTABLE}> --output-gdt gdt.bin $<$<BOOL:${CMAKE_VERBOSE_MAKEFILE}>:--verbose> DEPENDS ${ZEPHYR_PREBUILT_EXECUTABLE} WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} ) add_bin_file_to_the_next_link(gdt_bin_target gdt) endif() ```
/content/code_sandbox/arch/x86/ia32.cmake
cmake
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,146
```unknown # x86 general configuration options menu "X86 Architecture Options" depends on X86 config ARCH default "x86" # # CPU Families - the SoC configuration should select the right one. # config CPU_ATOM bool select CPU_HAS_FPU select ARCH_HAS_STACK_PROTECTION if X86_MMU select ARCH_HAS_USERSPACE if X86_MMU select X86_CPU_HAS_MMX select X86_CPU_HAS_SSE select X86_CPU_HAS_SSE2 select X86_CPU_HAS_SSE3 select CPU_HAS_DCACHE help This option signifies the use of a CPU from the Atom family. config CPU_APOLLO_LAKE bool select CPU_HAS_FPU select ARCH_HAS_STACK_PROTECTION if X86_MMU select ARCH_HAS_USERSPACE if X86_MMU select X86_MMU select X86_CPU_HAS_MMX select X86_CPU_HAS_SSE select X86_CPU_HAS_SSE2 select X86_CPU_HAS_SSE3 select X86_CPU_HAS_SSSE3 select X86_CPU_HAS_SSE41 select X86_CPU_HAS_SSE42 select CPU_HAS_DCACHE help This option signifies the use of a CPU from the Apollo Lake family. config CPU_LAKEMONT bool select CPU_HAS_FPU select ARCH_HAS_STACK_PROTECTION if X86_MMU select ARCH_HAS_USERSPACE if X86_MMU select X86_CPU_HAS_MMX select X86_CPU_HAS_SSE select X86_CPU_HAS_SSE2 select X86_CPU_HAS_SSE3 select X86_CPU_HAS_SSSE3 select CPU_HAS_DCACHE help This option signifies the use of a CPU from the Lakemont family. # # Configuration common to both IA32 and Intel64 sub-architectures. # config X86_64 bool "Run in 64-bit mode" select 64BIT select USE_SWITCH select USE_SWITCH_SUPPORTED select SCHED_IPI_SUPPORTED select X86_MMU select X86_CPU_HAS_MMX select X86_CPU_HAS_SSE select X86_CPU_HAS_SSE2 select X86_MMX select X86_SSE select X86_SSE2 menu "x86 Features" config X86_CPU_HAS_MMX bool config X86_CPU_HAS_SSE bool config X86_CPU_HAS_SSE2 bool config X86_CPU_HAS_SSE3 bool config X86_CPU_HAS_SSSE3 bool config X86_CPU_HAS_SSE41 bool config X86_CPU_HAS_SSE42 bool config X86_CPU_HAS_SSE4A bool if FPU || X86_64 config X86_MMX bool "MMX Support" depends on X86_CPU_HAS_MMX help This option enables MMX support, and the use of MMX registers by threads. config X86_SSE bool "SSE Support" depends on X86_CPU_HAS_SSE help This option enables SSE support, and the use of SSE registers by threads. config X86_SSE2 bool "SSE2 Support" depends on X86_CPU_HAS_SSE2 select X86_SSE help This option enables SSE2 support. config X86_SSE3 bool "SSE3 Support" depends on X86_CPU_HAS_SSE3 select X86_SSE help This option enables SSE3 support. config X86_SSSE3 bool "SSSE3 (Supplemental SSE3) Support" depends on X86_CPU_HAS_SSSE3 select X86_SSE help This option enables Supplemental SSE3 support. config X86_SSE41 bool "SSE4.1 Support" depends on X86_CPU_HAS_SSE41 select X86_SSE help This option enables SSE4.1 support. config X86_SSE42 bool "SSE4.2 Support" depends on X86_CPU_HAS_SSE42 select X86_SSE help This option enables SSE4.2 support. config X86_SSE4A bool "SSE4A Support" depends on X86_CPU_HAS_SSE4A select X86_SSE help This option enables SSE4A support. config X86_SSE_FP_MATH bool "Compiler-generated SSEx instructions for floating point math" depends on X86_SSE help This option allows the compiler to generate SSEx instructions for performing floating point math. This can greatly improve performance when exactly the same operations are to be performed on multiple data objects; however, it can also significantly reduce performance when preemptive task switches occur because of the larger register set that must be saved and restored. Disabling this option means that the compiler utilizes only the x87 instruction set for floating point operations. endif # FPU || X86_64 endmenu config SRAM_OFFSET default 0x100000 if X86_PC_COMPATIBLE help A lot of x86 that resemble PCs have many reserved physical memory regions within the first megabyte. Specify an offset from the beginning of RAM to load the kernel in physical memory, avoiding these regions. Note that this does not include the "locore" which contains real mode bootstrap code within the first 64K of physical memory. This value normally need to be page-aligned. config KERNEL_VM_OFFSET default 0x100000 if MMU config MAX_IRQ_LINES int "Number of IRQ lines" default 128 range 0 224 help This option specifies the number of IRQ lines in the system. It determines the size of the _irq_to_interrupt_vector_table, which is used to track the association between vectors and IRQ numbers. config IRQ_OFFLOAD_VECTOR int "IDT vector to use for IRQ offload" default 33 range 32 $(UINT8_MAX) depends on IRQ_OFFLOAD config PIC_DISABLE bool "Disable PIC" help This option disables all interrupts on the legacy i8259 PICs at boot. choice prompt "Reboot implementation" depends on REBOOT default REBOOT_RST_CNT config REBOOT_RST_CNT bool "Reboot via RST_CNT register" help Reboot via the RST_CNT register, going back to BIOS. endchoice config PCIE_MMIO_CFG bool "Use MMIO PCI configuration space access" select ACPI help Selects the use of the memory-mapped PCI Express Extended Configuration Space instead of the traditional 0xCF8/0xCFC IO Port registers. config KERNEL_VM_SIZE default 0x40000000 if ACPI config X86_PC_COMPATIBLE bool default y select ARCH_HAS_RESERVED_PAGE_FRAMES select HAS_SRAM_OFFSET help Hidden option to signal building for PC-compatible platforms with BIOS, ACPI, etc. config X86_MEMMAP bool "Use memory map" select ARCH_HAS_RESERVED_PAGE_FRAMES help Enable the use of memory map to identify regions of memory. The memory map can be populated via Multiboot (CONFIG_MULTIBOOT=y and CONFIG_MULTIBOOT_MEMMAP=y) or can be manually defined via x86_memmap[]. config X86_MEMMAP_ENTRIES int "Number of memory map entries" depends on X86_MEMMAP range 1 256 default 1 if !MULTIBOOT_MEMMAP default 64 if MULTIBOOT_MEMMAP help Maximum number of memory regions to hold in the memory map. config MULTIBOOT bool "Generate multiboot header" depends on X86_PC_COMPATIBLE default y help Embed a multiboot header in the output executable. This is used by some boot loaders (e.g., GRUB) when loading Zephyr. It is safe to leave this option on if you're not sure. It only expands the text segment by 12-16 bytes and is typically ignored if not needed. if MULTIBOOT config MULTIBOOT_INFO bool "Preserve multiboot information structure" help Multiboot passes a pointer to an information structure to the kernel entry point. Some drivers (e.g., the multiboot framebuffer display driver) need to refer to information in this structure, and so set this option to preserve the data in a permanent location. config MULTIBOOT_MEMMAP bool "Use multiboot memory map if provided" select MULTIBOOT_INFO select X86_MEMMAP help Use the multiboot memory map if the loader provides one. endif # MULTIBOOT config X86_VERY_EARLY_CONSOLE bool "Support very early boot printk" depends on PRINTK help Non-emulated X86 devices often require special hardware to attach a debugger, which may not be easily available. This option adds a very minimal serial driver which gets initialized at the very beginning of z_cstart(), via arch_kernel_init(). This driver enables printk to emit messages to the 16550 UART port 0 instance in device tree. This mini-driver assumes I/O to the UART is done via ports. config X86_MMU bool "Memory Management Unit" select MMU help This options enables the memory management unit present in x86 and creates a set of page tables at boot time that is runtime- mutable. config X86_COMMON_PAGE_TABLE bool "Use a single page table for all threads" default n depends on USERSPACE depends on !SMP depends on !X86_KPTI help If this option is enabled, userspace memory domains will not have their own page tables. Instead, context switching operations will modify page tables in place. This is much slower, but uses much less RAM for page tables. config X86_MAX_ADDITIONAL_MEM_DOMAINS int "Maximum number of memory domains" default 3 depends on X86_MMU && USERSPACE && !X86_COMMON_PAGE_TABLE help The initial page tables at boot are pre-allocated, and used for the default memory domain. Instantiation of additional memory domains if common page tables are in use requires a pool of free pinned memory pages for constructing page tables. Zephyr test cases assume 3 additional domains can be instantiated. config X86_EXTRA_PAGE_TABLE_PAGES int "Reserve extra pages in page table" default 1 if X86_PAE && (KERNEL_VM_BASE != SRAM_BASE_ADDRESS) default 0 depends on X86_MMU help The whole page table is pre-allocated at build time and is dependent on the range of address space. This allows reserving extra pages (of size CONFIG_MMU_PAGE_SIZE) to the page table so that gen_mmu.py can make use of these extra pages. Says 0 unless absolutely sure that this is necessary. config X86_NO_MELTDOWN bool help This hidden option should be set on a per-SOC basis to indicate that a particular SOC is not vulnerable to the Meltdown CPU vulnerability, as described in CVE-2017-5754. config X86_NO_SPECTRE_V1 bool help This hidden option should be set on a per-SOC basis to indicate that a particular SOC is not vulnerable to the Spectre V1, V1.1, V1.2, and swapgs CPU vulnerabilities as described in CVE-2017-5753, CVE-2018-3693, and CVE-2019-1125. config X86_NO_SPECTRE_V2 bool help This hidden option should be set on a per-SOC basis to indicate that a particular SOC is not vulnerable to the Spectre V2 CPU vulnerability, as described in CVE-2017-5715. config X86_NO_SPECTRE_V4 bool help This hidden option should be set on a per-SOC basis to indicate that a particular SOC is not vulnerable to the Spectre V4 CPU vulnerability, as described in CVE-2018-3639. config X86_NO_LAZY_FP bool help This hidden option should be set on a per-SOC basis to indicate that a particular SOC is not vulnerable to the Lazy FP CPU vulnerability, as described in CVE-2018-3665. config X86_NO_SPECULATIVE_VULNERABILITIES bool select X86_NO_MELTDOWN select X86_NO_SPECTRE_V1 select X86_NO_SPECTRE_V2 select X86_NO_SPECTRE_V4 select X86_NO_LAZY_FP help This hidden option should be set on a per-SOC basis to indicate that a particular SOC does not perform any kind of speculative execution, or is a newer chip which is immune to the class of vulnerabilities which exploit speculative execution side channel attacks. config X86_DISABLE_SSBD bool "Disable Speculative Store Bypass" depends on USERSPACE default y if !X86_NO_SPECTRE_V4 help This option will disable Speculative Store Bypass in order to mitigate against certain kinds of side channel attacks. Quoting the "Speculative Execution Side Channels" document, version 2.0: When SSBD is set, loads will not execute speculatively until the addresses of all older stores are known. This ensure s that a load does not speculatively consume stale data values due to bypassing an older store on the same logical processor. If enabled, this applies to all threads in the system. Even if enabled, will have no effect on CPUs that do not require this feature. config DISABLE_SSBD bool "Disable Speculative Store Bypass [DEPRECATED]" depends on USERSPACE default y if !X86_NO_SPECTRE_V4 select X86_DISABLE_SSBD select DEPRECATED help Deprecated. Use CONFIG_X86_DISABLE_SSBD instead. config X86_ENABLE_EXTENDED_IBRS bool "Extended IBRS" depends on USERSPACE default y if !X86_NO_SPECTRE_V2 help This option will enable the Extended Indirect Branch Restricted Speculation 'always on' feature. This mitigates Indirect Branch Control vulnerabilities (aka Spectre V2). config ENABLE_EXTENDED_IBRS bool "Extended IBRS [DEPRECATED]" depends on USERSPACE default y if !X86_NO_SPECTRE_V2 select X86_ENABLE_EXTENDED_IBRS select DEPRECATED help Deprecated. Use CONFIG_X86_ENABLE_EXTENDED_IBRS instead. config X86_BOUNDS_CHECK_BYPASS_MITIGATION bool depends on USERSPACE default y if !X86_NO_SPECTRE_V1 select BOUNDS_CHECK_BYPASS_MITIGATION help Hidden config to select arch-independent option to enable Spectre V1 mitigations by default if the CPU is not known to be immune to it. config X86_KPTI bool "Kernel page table isolation" default y depends on USERSPACE depends on !X86_NO_MELTDOWN help Implements kernel page table isolation to mitigate Meltdown exploits to read Kernel RAM. Incurs a significant performance cost for user thread interrupts and system calls, and significant footprint increase for additional page tables and trampoline stacks. config X86_EFI bool "EFI" default y depends on BUILD_OUTPUT_EFI help Enable EFI support. This means you build your image with zefi support. See arch/x86/zefi/README.txt for more information. config X86_EFI_CONSOLE bool depends on X86_EFI && X86_64 && !X86_VERY_EARLY_CONSOLE select EFI_CONSOLE default y if !UART_CONSOLE help This enables the use of the UEFI console device as the Zephyr printk handler. It requires that no interferences with hardware used by the firmware console (e.g. a UART or framebuffer) happens from Zephyr code, and that all memory used by the firmware environment and its page tables be separate and preserved. In general this is safe to assume, but no automatic checking exists at runtime to verify. Likewise be sure to disable any other console/printk drivers! config PRIVILEGED_STACK_SIZE # Must be multiple of CONFIG_MMU_PAGE_SIZE default 4096 if X86_MMU source "arch/x86/core/Kconfig.ia32" source "arch/x86/core/Kconfig.intel64" endmenu ```
/content/code_sandbox/arch/x86/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,620
```python #!/usr/bin/env python3 # # """Create the kernel's page tables for x86 CPUs. For additional detail on paging and x86 memory management, please consult the IA Architecture SW Developer Manual, volume 3a, chapter 4. This script produces the initial page tables installed into the CPU at early boot. These pages will have an identity mapping of the kernel image. The script takes the 'zephyr_prebuilt.elf' as input to obtain region sizes, certain memory addresses, and configuration values. If CONFIG_SRAM_REGION_PERMISSIONS is not enabled, the kernel image will be mapped with the Present and Write bits set. The linker scripts shouldn't add page alignment padding between sections. If CONFIG_SRAM_REGION_PERMISSIONS is enabled, the access permissions vary: - By default, the Present, Write, and Execute Disable bits are set. - The __text_region region will have Present and User bits set - The __rodata_region region will have Present, User, and Execute Disable bits set - On x86_64, the _locore region will have Present set and the _lorodata region will have Present and Execute Disable set. This script will establish a dual mapping at the address defined by CONFIG_KERNEL_VM_BASE if it is not the same as CONFIG_SRAM_BASE_ADDRESS. - The double-mapping is used to transition the instruction pointer from a physical address at early boot to the virtual address where the kernel is actually linked. - The mapping is always double-mapped at the top-level paging structure and the physical/virtual base addresses must have the same alignment with respect to the scope of top-level paging structure entries. This allows the same second-level paging structure(s) to be used for both memory bases. - The double-mapping is needed so that we can still fetch instructions from identity-mapped physical addresses after we program this table into the MMU, then jump to the equivalent virtual address. The kernel then unlinks the identity mapping before continuing, the address space is purely virtual after that. Because the set of page tables are linked together by physical address, we must know a priori the physical address of each table. The linker script must define a z_x86_pagetables_start symbol where the page tables will be placed, and this memory address must not shift between prebuilt and final ELF builds. This script will not work on systems where the physical load address of the kernel is unknown at build time. 64-bit systems will always build IA-32e page tables. 32-bit systems build PAE page tables if CONFIG_X86_PAE is set, otherwise standard 32-bit page tables are built. The kernel will expect to find the top-level structure of the produced page tables at the physical address corresponding to the symbol z_x86_kernel_ptables. The linker script will need to set that symbol to the end of the binary produced by this script, minus the size of the top-level paging structure as it is written out last. """ import sys import array import argparse import ctypes import os import struct import re import textwrap from packaging import version import elftools from elftools.elf.elffile import ELFFile from elftools.elf.sections import SymbolTableSection if version.parse(elftools.__version__) < version.parse('0.24'): sys.exit("pyelftools is out of date, need version 0.24 or later") def bit(pos): """Get value by shifting 1 by pos""" return 1 << pos # Page table entry flags FLAG_P = bit(0) FLAG_RW = bit(1) FLAG_US = bit(2) FLAG_CD = bit(4) FLAG_SZ = bit(7) FLAG_G = bit(8) FLAG_XD = bit(63) FLAG_IGNORED0 = bit(9) FLAG_IGNORED1 = bit(10) FLAG_IGNORED2 = bit(11) ENTRY_RW = FLAG_RW | FLAG_IGNORED0 ENTRY_US = FLAG_US | FLAG_IGNORED1 ENTRY_XD = FLAG_XD | FLAG_IGNORED2 # PD_LEVEL and PT_LEVEL are used as list index to PtableSet.levels[] # to get table from back of list. PD_LEVEL = -2 PT_LEVEL = -1 def debug(text): """Display verbose debug message""" if not args.verbose: return sys.stdout.write(os.path.basename(sys.argv[0]) + ": " + text + "\n") def verbose(text): """Display --verbose --verbose message""" if args.verbose and args.verbose > 1: sys.stdout.write(os.path.basename(sys.argv[0]) + ": " + text + "\n") def error(text): """Display error message and exit program""" sys.exit(os.path.basename(sys.argv[0]) + ": " + text) def align_check(base, size, scope=4096): """Make sure base and size are page-aligned""" if (base % scope) != 0: error("unaligned base address %x" % base) if (size % scope) != 0: error("Unaligned region size 0x%x for base %x" % (size, base)) def dump_flags(flags): """Translate page table flags into string""" ret = "" if flags & FLAG_P: ret += "P " if flags & FLAG_RW: ret += "RW " if flags & FLAG_US: ret += "US " if flags & FLAG_G: ret += "G " if flags & FLAG_XD: ret += "XD " if flags & FLAG_SZ: ret += "SZ " if flags & FLAG_CD: ret += "CD " return ret.strip() def round_up(val, align): """Round up val to the next multiple of align""" return (val + (align - 1)) & (~(align - 1)) def round_down(val, align): """Round down val to the previous multiple of align""" return val & (~(align - 1)) # Hard-coded flags for intermediate paging levels. Permissive, we only control # access or set caching properties at leaf levels. INT_FLAGS = FLAG_P | FLAG_RW | FLAG_US class MMUTable(): """Represents a particular table in a set of page tables, at any level""" def __init__(self): self.entries = array.array(self.type_code, [0 for i in range(self.num_entries)]) def get_binary(self): """Return a bytearray representation of this table""" # Always little-endian ctype = "<" + self.type_code entry_size = struct.calcsize(ctype) ret = bytearray(entry_size * self.num_entries) for i in range(self.num_entries): struct.pack_into(ctype, ret, entry_size * i, self.entries[i]) return ret @property def supported_flags(self): """Class property indicating what flag bits are supported""" raise NotImplementedError() @property def addr_shift(self): """Class property for how much to shift virtual addresses to obtain the appropriate index in the table for it""" raise NotImplementedError() @property def addr_mask(self): """Mask to apply to an individual entry to get the physical address mapping""" raise NotImplementedError() @property def type_code(self): """Struct packing letter code for table entries. Either I for 32-bit entries, or Q for PAE/IA-32e""" raise NotImplementedError() @property def num_entries(self): """Number of entries in the table. Varies by table type and paging mode""" raise NotImplementedError() def entry_index(self, virt_addr): """Get the index of the entry in this table that corresponds to the provided virtual address""" return (virt_addr >> self.addr_shift) & (self.num_entries - 1) def has_entry(self, virt_addr): """Indicate whether an entry is present in this table for the provided virtual address""" index = self.entry_index(virt_addr) return (self.entries[index] & FLAG_P) != 0 def lookup(self, virt_addr): """Look up the physical mapping for a virtual address. If this is a leaf table, this is the physical address mapping. If not, this is the physical address of the next level table""" index = self.entry_index(virt_addr) return self.entries[index] & self.addr_mask def map(self, virt_addr, phys_addr, entry_flags): """For the table entry corresponding to the provided virtual address, set the corresponding physical entry in the table. Unsupported flags will be filtered out. If this is a leaf table, this is the physical address mapping. If not, this is the physical address of the next level table""" index = self.entry_index(virt_addr) verbose("%s: mapping 0x%x to 0x%x : %s" % (self.__class__.__name__, phys_addr, virt_addr, dump_flags(entry_flags))) self.entries[index] = ((phys_addr & self.addr_mask) | (entry_flags & self.supported_flags)) def set_perms(self, virt_addr, entry_flags): """"For the table entry corresponding to the provided virtual address, update just the flags, leaving the physical mapping alone. Unsupported flags will be filtered out.""" index = self.entry_index(virt_addr) verbose("%s: changing perm at 0x%x : %s" % (self.__class__.__name__, virt_addr, dump_flags(entry_flags))) self.entries[index] = ((self.entries[index] & self.addr_mask) | (entry_flags & self.supported_flags)) # Specific supported table types class Pml4(MMUTable): """Page mapping level 4 for IA-32e""" addr_shift = 39 addr_mask = 0x7FFFFFFFFFFFF000 type_code = 'Q' num_entries = 512 supported_flags = INT_FLAGS class Pdpt(MMUTable): """Page directory pointer table for IA-32e""" addr_shift = 30 addr_mask = 0x7FFFFFFFFFFFF000 type_code = 'Q' num_entries = 512 supported_flags = INT_FLAGS | FLAG_SZ | FLAG_CD class PdptPAE(Pdpt): """Page directory pointer table for PAE""" num_entries = 4 class Pd(MMUTable): """Page directory for 32-bit""" addr_shift = 22 addr_mask = 0xFFFFF000 type_code = 'I' num_entries = 1024 supported_flags = INT_FLAGS | FLAG_SZ | FLAG_CD class PdXd(Pd): """Page directory for either PAE or IA-32e""" addr_shift = 21 addr_mask = 0x7FFFFFFFFFFFF000 num_entries = 512 type_code = 'Q' class Pt(MMUTable): """Page table for 32-bit""" addr_shift = 12 addr_mask = 0xFFFFF000 type_code = 'I' num_entries = 1024 supported_flags = (FLAG_P | FLAG_RW | FLAG_US | FLAG_G | FLAG_CD | FLAG_IGNORED0 | FLAG_IGNORED1) class PtXd(Pt): """Page table for either PAE or IA-32e""" addr_mask = 0x07FFFFFFFFFFF000 type_code = 'Q' num_entries = 512 supported_flags = (FLAG_P | FLAG_RW | FLAG_US | FLAG_G | FLAG_XD | FLAG_CD | FLAG_IGNORED0 | FLAG_IGNORED1 | FLAG_IGNORED2) class PtableSet(): """Represents a complete set of page tables for any paging mode""" def __init__(self, pages_start): """Instantiate a set of page tables which will be located in the image starting at the provided physical memory location""" self.toplevel = self.levels[0]() self.page_pos = pages_start debug("%s starting at physical address 0x%x" % (self.__class__.__name__, self.page_pos)) # Database of page table pages. Maps physical memory address to # MMUTable objects, excluding the top-level table which is tracked # separately. Starts out empty as we haven't mapped anything and # the top-level table is tracked separately. self.tables = {} def get_new_mmutable_addr(self): """If we need to instantiate a new MMUTable, return a physical address location for it""" ret = self.page_pos self.page_pos += 4096 return ret @property def levels(self): """Class hierarchy of paging levels, with the first entry being the toplevel table class, and the last entry always being some kind of leaf page table class (Pt or PtXd)""" raise NotImplementedError() def is_mapped(self, virt_addr, level): """ Return True if virt_addr has already been mapped. level_from_last == 0 only searches leaf level page tables. level_from_last == 1 searches both page directories and page tables. """ table = self.toplevel num_levels = len(self.levels) + level + 1 has_mapping = False # Create and link up intermediate tables if necessary for depth in range(0, num_levels): # Create child table if needed if table.has_entry(virt_addr): if depth == num_levels: has_mapping = True else: table = self.tables[table.lookup(virt_addr)] if has_mapping: # pylint doesn't like break in the above if-block break return has_mapping def is_region_mapped(self, virt_base, size, level=PT_LEVEL): """Find out if a region has been mapped""" align_check(virt_base, size) for vaddr in range(virt_base, virt_base + size, 4096): if self.is_mapped(vaddr, level): return True return False def new_child_table(self, table, virt_addr, depth): """Create a new child table""" new_table_addr = self.get_new_mmutable_addr() new_table = self.levels[depth]() debug("new %s at physical addr 0x%x" % (self.levels[depth].__name__, new_table_addr)) self.tables[new_table_addr] = new_table table.map(virt_addr, new_table_addr, INT_FLAGS) return new_table def map_page(self, virt_addr, phys_addr, flags, reserve, level=PT_LEVEL): """Map a virtual address to a physical address in the page tables, with provided access flags""" table = self.toplevel num_levels = len(self.levels) + level + 1 # Create and link up intermediate tables if necessary for depth in range(1, num_levels): # Create child table if needed if not table.has_entry(virt_addr): table = self.new_child_table(table, virt_addr, depth) else: table = self.tables[table.lookup(virt_addr)] # Set up entry in leaf page table if not reserve: table.map(virt_addr, phys_addr, flags) def reserve(self, virt_base, size, to_level=PT_LEVEL): """Reserve page table space with already aligned virt_base and size""" debug("Reserving paging structures for 0x%x (0x%x)" % (virt_base, size)) align_check(virt_base, size) # How much memory is covered by leaf page table scope = 1 << self.levels[PD_LEVEL].addr_shift if virt_base % scope != 0: error("misaligned virtual address space, 0x%x not a multiple of 0x%x" % (virt_base, scope)) for addr in range(virt_base, virt_base + size, scope): self.map_page(addr, 0, 0, True, to_level) def reserve_unaligned(self, virt_base, size, to_level=PT_LEVEL): """Reserve page table space with virt_base and size alignment""" # How much memory is covered by leaf page table scope = 1 << self.levels[PD_LEVEL].addr_shift mem_start = round_down(virt_base, scope) mem_end = round_up(virt_base + size, scope) mem_size = mem_end - mem_start self.reserve(mem_start, mem_size, to_level) def map(self, phys_base, virt_base, size, flags, level=PT_LEVEL): """Map an address range in the page tables provided access flags. If virt_base is None, identity mapping using phys_base is done. """ is_identity_map = virt_base is None or virt_base == phys_base if virt_base is None: virt_base = phys_base scope = 1 << self.levels[level].addr_shift debug("Mapping 0x%x (0x%x) to 0x%x: %s" % (phys_base, size, virt_base, dump_flags(flags))) align_check(phys_base, size, scope) align_check(virt_base, size, scope) for paddr in range(phys_base, phys_base + size, scope): if is_identity_map and paddr == 0 and level == PT_LEVEL: # Never map the NULL page at page table level. continue vaddr = virt_base + (paddr - phys_base) self.map_page(vaddr, paddr, flags, False, level) def identity_map_unaligned(self, phys_base, size, flags, level=PT_LEVEL): """Identity map a region of memory""" scope = 1 << self.levels[level].addr_shift phys_aligned_base = round_down(phys_base, scope) phys_aligned_end = round_up(phys_base + size, scope) phys_aligned_size = phys_aligned_end - phys_aligned_base self.map(phys_aligned_base, None, phys_aligned_size, flags, level) def map_region(self, name, flags, virt_to_phys_offset, level=PT_LEVEL): """Map a named region""" if not isdef(name + "_start"): # Region may not exists return region_start = syms[name + "_start"] region_end = syms[name + "_end"] region_size = region_end - region_start region_start_phys = region_start if virt_to_phys_offset is not None: region_start_phys += virt_to_phys_offset self.map(region_start_phys, region_start, region_size, flags, level) def set_region_perms(self, name, flags, level=PT_LEVEL): """Set access permissions for a named region that is already mapped The bounds of the region will be looked up in the symbol table with _start and _size suffixes. The physical address mapping is unchanged and this will not disturb any double-mapping.""" if not isdef(name + "_start"): # Region may not exists return # Doesn't matter if this is a virtual address, we have a # either dual mapping or it's the same as physical base = syms[name + "_start"] if isdef(name + "_size"): size = syms[name + "_size"] else: region_end = syms[name + "_end"] size = region_end - base if size == 0: return debug("change flags for %s at 0x%x (0x%x): %s" % (name, base, size, dump_flags(flags))) num_levels = len(self.levels) + level + 1 scope = 1 << self.levels[level].addr_shift align_check(base, size, scope) try: for addr in range(base, base + size, scope): # Never map the NULL page if addr == 0: continue table = self.toplevel for _ in range(1, num_levels): table = self.tables[table.lookup(addr)] table.set_perms(addr, flags) except KeyError: error("no mapping for %s region 0x%x (size 0x%x)" % (name, base, size)) def write_output(self, filename): """Write the page tables to the output file in binary format""" written_size = 0 with open(filename, "wb") as output_fp: for addr in sorted(self.tables): mmu_table = self.tables[addr] mmu_table_bin = mmu_table.get_binary() output_fp.write(mmu_table_bin) written_size += len(mmu_table_bin) # We always have the top-level table be last. This is because # in PAE, the top-level PDPT has only 4 entries and is not a # full page in size. We do not put it in the tables dictionary # and treat it as a special case. debug("top-level %s at physical addr 0x%x" % (self.toplevel.__class__.__name__, self.get_new_mmutable_addr())) top_level_bin = self.toplevel.get_binary() output_fp.write(top_level_bin) written_size += len(top_level_bin) return written_size # Paging mode classes, we'll use one depending on configuration class Ptables32bit(PtableSet): """32-bit Page Tables""" levels = [Pd, Pt] class PtablesPAE(PtableSet): """PAE Page Tables""" levels = [PdptPAE, PdXd, PtXd] class PtablesIA32e(PtableSet): """Page Tables under IA32e mode""" levels = [Pml4, Pdpt, PdXd, PtXd] def parse_args(): """Parse command line arguments""" global args parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, allow_abbrev=False) parser.add_argument("-k", "--kernel", required=True, help="path to prebuilt kernel ELF binary") parser.add_argument("-o", "--output", required=True, help="output file") parser.add_argument("--map", action='append', help=textwrap.dedent('''\ Map extra memory: <physical address>,<size>[,<flags:LUWXD>[,<virtual address>]] where flags can be empty or combination of: L - Large page (2MB or 4MB), U - Userspace accessible, W - Writable, X - Executable, D - Cache disabled. Default is small (4KB) page, supervisor only, read only, and execution disabled. ''')) parser.add_argument("-v", "--verbose", action="count", help="Print extra debugging information") args = parser.parse_args() if "VERBOSE" in os.environ: args.verbose = 1 def get_symbols(elf_obj): """Get all symbols from the ELF file""" for section in elf_obj.iter_sections(): if isinstance(section, SymbolTableSection): return {sym.name: sym.entry.st_value for sym in section.iter_symbols()} raise LookupError("Could not find symbol table") def isdef(sym_name): """True if symbol is defined in ELF file""" return sym_name in syms def find_symbol(obj, name): """Find symbol object from ELF file""" for section in obj.iter_sections(): if isinstance(section, SymbolTableSection): for sym in section.iter_symbols(): if sym.name == name: return sym return None def map_extra_regions(pt): """Map extra regions specified in command line""" # Extract command line arguments mappings = [] for entry in args.map: elements = entry.split(',') if len(elements) < 2: error("Not enough arguments for --map %s" % entry) one_map = {} one_map['cmdline'] = entry one_map['phys'] = int(elements[0], 0) one_map['size']= int(elements[1], 0) one_map['large_page'] = False flags = FLAG_P | ENTRY_XD if len(elements) > 2: map_flags = elements[2] # Check for allowed flags if not bool(re.match('^[LUWXD]*$', map_flags)): error("Unrecognized flags: %s" % map_flags) flags = FLAG_P | ENTRY_XD if 'W' in map_flags: flags |= ENTRY_RW if 'X' in map_flags: flags &= ~ENTRY_XD if 'U' in map_flags: flags |= ENTRY_US if 'L' in map_flags: flags |= FLAG_SZ one_map['large_page'] = True if 'D' in map_flags: flags |= FLAG_CD one_map['flags'] = flags if len(elements) > 3: one_map['virt'] = int(elements[3], 16) else: one_map['virt'] = one_map['phys'] mappings.append(one_map) # Map the regions for one_map in mappings: phys = one_map['phys'] size = one_map['size'] flags = one_map['flags'] virt = one_map['virt'] level = PD_LEVEL if one_map['large_page'] else PT_LEVEL # Check if addresses have already been mapped. # Error out if so as they could override kernel mappings. if pt.is_region_mapped(virt, size, level): error(("Region 0x%x (%d) already been mapped " "for --map %s" % (virt, size, one_map['cmdline']))) # Reserve space in page table, and map the region pt.reserve_unaligned(virt, size, level) pt.map(phys, virt, size, flags, level) def main(): """Main program""" global syms parse_args() with open(args.kernel, "rb") as elf_fp: kernel = ELFFile(elf_fp) syms = get_symbols(kernel) sym_dummy_pagetables = find_symbol(kernel, "dummy_pagetables") if sym_dummy_pagetables: reserved_pt_size = sym_dummy_pagetables['st_size'] else: reserved_pt_size = None if isdef("CONFIG_X86_64"): pclass = PtablesIA32e elif isdef("CONFIG_X86_PAE"): pclass = PtablesPAE else: pclass = Ptables32bit debug("building %s" % pclass.__name__) vm_base = syms["CONFIG_KERNEL_VM_BASE"] vm_size = syms["CONFIG_KERNEL_VM_SIZE"] vm_offset = syms["CONFIG_KERNEL_VM_OFFSET"] sram_base = syms["CONFIG_SRAM_BASE_ADDRESS"] sram_size = syms["CONFIG_SRAM_SIZE"] * 1024 mapped_kernel_base = syms["z_mapped_start"] mapped_kernel_size = syms["z_mapped_size"] if isdef("CONFIG_SRAM_OFFSET"): sram_offset = syms["CONFIG_SRAM_OFFSET"] else: sram_offset = 0 # Figure out if there is any need to do virtual-to-physical # address translation virt_to_phys_offset = (sram_base + sram_offset) - (vm_base + vm_offset) if isdef("CONFIG_ARCH_MAPS_ALL_RAM"): image_base = sram_base image_size = sram_size else: image_base = mapped_kernel_base image_size = mapped_kernel_size image_base_phys = image_base + virt_to_phys_offset ptables_phys = syms["z_x86_pagetables_start"] + virt_to_phys_offset debug("Address space: 0x%x - 0x%x size 0x%x" % (vm_base, vm_base + vm_size - 1, vm_size)) debug("Zephyr image: 0x%x - 0x%x size 0x%x" % (image_base, image_base + image_size - 1, image_size)) if virt_to_phys_offset != 0: debug("Physical address space: 0x%x - 0x%x size 0x%x" % (sram_base, sram_base + sram_size - 1, sram_size)) is_perm_regions = isdef("CONFIG_SRAM_REGION_PERMISSIONS") # Are pages in non-boot, non-pinned sections present at boot. is_generic_section_present = isdef("CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT") if image_size >= vm_size: error("VM size is too small (have 0x%x need more than 0x%x)" % (vm_size, image_size)) map_flags = 0 if is_perm_regions: # Don't allow execution by default for any pages. We'll adjust this # in later calls to pt.set_region_perms() map_flags = ENTRY_XD pt = pclass(ptables_phys) # Instantiate all the paging structures for the address space pt.reserve(vm_base, vm_size) # Map the zephyr image if is_generic_section_present: map_flags = map_flags | FLAG_P pt.map(image_base_phys, image_base, image_size, map_flags | ENTRY_RW) else: # When generic linker sections are not present in physical memory, # the corresponding virtual pages should not be mapped to non-existent # physical pages. So simply identity map them to create the page table # entries but without the present bit set. # Boot and pinned sections (if configured) will be mapped to # physical memory below. pt.map(image_base, image_base, image_size, map_flags | ENTRY_RW) if virt_to_phys_offset != 0: # Need to identity map the physical address space # as it is needed during early boot process. # This will be unmapped once z_x86_mmu_init() # is called. # Note that this only does the identity mapping # at the page directory level to minimize wasted space. pt.reserve_unaligned(image_base_phys, image_size, to_level=PD_LEVEL) pt.identity_map_unaligned(image_base_phys, image_size, FLAG_P | FLAG_RW | FLAG_SZ, level=PD_LEVEL) if isdef("CONFIG_X86_64"): # 64-bit has a special region in the first 64K to bootstrap other CPUs # from real mode locore_base = syms["_locore_start"] locore_size = syms["_lodata_end"] - locore_base debug("Base addresses: physical 0x%x size 0x%x" % (locore_base, locore_size)) pt.map(locore_base, None, locore_size, map_flags | FLAG_P | ENTRY_RW) if isdef("CONFIG_XIP"): # Additionally identity-map all ROM as read-only pt.map(syms["CONFIG_FLASH_BASE_ADDRESS"], None, syms["CONFIG_FLASH_SIZE"] * 1024, map_flags | FLAG_P) if isdef("CONFIG_LINKER_USE_BOOT_SECTION"): pt.map_region("lnkr_boot", map_flags | FLAG_P | ENTRY_RW, virt_to_phys_offset) if isdef("CONFIG_LINKER_USE_PINNED_SECTION"): pt.map_region("lnkr_pinned", map_flags | FLAG_P | ENTRY_RW, virt_to_phys_offset) # Process extra mapping requests if args.map: map_extra_regions(pt) # Adjust mapped region permissions if configured if is_perm_regions: # Need to accomplish the following things: # - Text regions need the XD flag cleared and RW flag removed # if not built with gdbstub support # - Rodata regions need the RW flag cleared # - User mode needs access as we currently do not separate application # text/rodata from kernel text/rodata if isdef("CONFIG_GDBSTUB"): flags = ENTRY_US | ENTRY_RW else: flags = ENTRY_US if is_generic_section_present: flags = flags | FLAG_P pt.set_region_perms("__text_region", flags) if isdef("CONFIG_LINKER_USE_BOOT_SECTION"): pt.set_region_perms("lnkr_boot_text", flags | FLAG_P) if isdef("CONFIG_LINKER_USE_PINNED_SECTION"): pt.set_region_perms("lnkr_pinned_text", flags | FLAG_P) flags = ENTRY_US | ENTRY_XD if is_generic_section_present: flags = flags | FLAG_P pt.set_region_perms("__rodata_region", flags) if isdef("CONFIG_LINKER_USE_BOOT_SECTION"): pt.set_region_perms("lnkr_boot_rodata", flags | FLAG_P) if isdef("CONFIG_LINKER_USE_PINNED_SECTION"): pt.set_region_perms("lnkr_pinned_rodata", flags | FLAG_P) if isdef("CONFIG_COVERAGE_GCOV") and isdef("CONFIG_USERSPACE"): # If GCOV is enabled, user mode must be able to write to its # common data area pt.set_region_perms("__gcov_bss", FLAG_P | ENTRY_RW | ENTRY_US | ENTRY_XD) if isdef("CONFIG_X86_64"): # Set appropriate permissions for locore areas much like we did # with the main text/rodata regions if isdef("CONFIG_X86_KPTI"): # Set the User bit for the read-only locore/lorodata areas. # This ensures they get mapped into the User page tables if # KPTI is turned on. There is no sensitive data in them, and # they contain text/data needed to take an exception or # interrupt. flag_user = ENTRY_US else: flag_user = 0 pt.set_region_perms("_locore", FLAG_P | flag_user) pt.set_region_perms("_lorodata", FLAG_P | ENTRY_XD | flag_user) written_size = pt.write_output(args.output) debug("Written %d bytes to %s" % (written_size, args.output)) # Warn if reserved page table is not of correct size if reserved_pt_size and written_size != reserved_pt_size: # Figure out how many extra pages needed size_diff = written_size - reserved_pt_size page_size = syms["CONFIG_MMU_PAGE_SIZE"] extra_pages_needed = int(round_up(size_diff, page_size) / page_size) if isdef("CONFIG_X86_EXTRA_PAGE_TABLE_PAGES"): extra_pages_kconfig = syms["CONFIG_X86_EXTRA_PAGE_TABLE_PAGES"] if isdef("CONFIG_X86_64"): extra_pages_needed += ctypes.c_int64(extra_pages_kconfig).value else: extra_pages_needed += ctypes.c_int32(extra_pages_kconfig).value reason = "big" if reserved_pt_size > written_size else "small" error(("Reserved space for page table is too %s." " Set CONFIG_X86_EXTRA_PAGE_TABLE_PAGES=%d") % (reason, extra_pages_needed)) if __name__ == "__main__": main() ```
/content/code_sandbox/arch/x86/gen_mmu.py
python
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
7,648
```python #!/usr/bin/env python3 import os.path import subprocess import elftools.elf.elffile import argparse ENTRY_SYM = "__start64" def verbose(msg): if args.verbose: print(msg) def build_elf(elf_file, include_dirs): base_dir = os.path.dirname(os.path.abspath(__file__)) cfile = os.path.join(base_dir, "zefi.c") ldscript = os.path.join(base_dir, "efi.ld") assert os.path.isfile(cfile) assert os.path.isfile(ldscript) # # Open the ELF file up and find our entry point # fp = open(elf_file, "rb") ef = elftools.elf.elffile.ELFFile(fp) symtab = ef.get_section_by_name(".symtab") entry_addr = symtab.get_symbol_by_name(ENTRY_SYM)[0].entry.st_value verbose("Entry point address (symbol: %s) 0x%x" % (ENTRY_SYM, entry_addr)) # # Parse the ELF file and extract segment data # data_blob = b'' data_segs = [] zero_segs = [] for seg in ef.iter_segments(): h = seg.header if h.p_type != "PT_LOAD": continue assert h.p_memsz >= h.p_filesz assert len(seg.data()) == h.p_filesz if h.p_filesz > 0: sd = seg.data() verbose("%d bytes of data at 0x%x, data offset %d" % (len(sd), h.p_vaddr, len(data_blob))) data_segs.append((h.p_vaddr, len(sd), len(data_blob))) data_blob = data_blob + sd if h.p_memsz > h.p_filesz: bytesz = h.p_memsz - h.p_filesz addr = h.p_vaddr + h.p_filesz verbose("%d bytes of zero-fill at 0x%x" % (bytesz, addr)) zero_segs.append((addr, bytesz)) verbose(f"{len(data_blob)} bytes of data to include in image") # # Emit a C header containing the metadata # cf = open("zefi-segments.h", "w") cf.write("/* GENERATED CODE. DO NOT EDIT. */\n\n") cf.write("/* Sizes and offsets specified in 4-byte units.\n") cf.write(" * All addresses 4-byte aligned.\n") cf.write(" */\n") cf.write("struct data_seg { uint64_t addr; uint32_t sz; uint32_t off; };\n\n") cf.write("static struct data_seg zefi_dsegs[] = {\n") for s in data_segs: cf.write(" { 0x%x, %d, %d },\n" % (s[0], s[1], s[2])) cf.write("};\n\n") cf.write("struct zero_seg { uint64_t addr; uint32_t sz; };\n\n") cf.write("static struct zero_seg zefi_zsegs[] = {\n") for s in zero_segs: cf.write(" { 0x%x, %d },\n" % (s[0], s[1])) cf.write("};\n\n") cf.write("static uintptr_t zefi_entry = 0x%xUL;\n" % (entry_addr)) cf.close() verbose("Metadata header generated.") # # Build # # First stage ELF binary. Flag notes: # + Stack protector is default on some distros and needs library support # + We need pic to enforce that the linker adds no relocations # + UEFI can take interrupts on our stack, so no red zone # + UEFI API assumes 16-bit wchar_t includes = [] for include_dir in include_dirs: includes.extend(["-I", include_dir]) cmd = ([args.compiler, "-shared", "-Wall", "-Werror", "-I."] + includes + ["-fno-stack-protector", "-fpic", "-mno-red-zone", "-fshort-wchar", "-Wl,-nostdlib", "-T", ldscript, "-o", "zefi.elf", cfile]) verbose(" ".join(cmd)) subprocess.run(cmd, check = True) # Extract the .data segment and append our extra blob cmd = [args.objcopy, "-O", "binary", "-j", ".data", "zefi.elf", "data.dat"] verbose(" ".join(cmd)) subprocess.run(cmd, check = True) assert (os.stat("data.dat").st_size % 8) == 0 df = open("data.dat", "ab") df.write(data_blob) df.close() # FIXME: this generates warnings about our unused trash section having to be moved to make room. Set its address far away... subprocess.run([args.objcopy, "--update-section", ".data=data.dat", "zefi.elf"], check = True) # Convert it to a PE-COFF DLL. cmd = [args.objcopy, "--target=efi-app-x86_64", "-j", ".text", "-j", ".reloc", "-j", ".data", "zefi.elf", "zephyr.efi"] verbose(" ".join(cmd)) subprocess.run(cmd, check = True) verbose("Build complete; zephyr.efi wrapper binary is ready") def parse_args(): parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, allow_abbrev=False) parser.add_argument("-c", "--compiler", required=True, help="Compiler to be used") parser.add_argument("-o", "--objcopy", required=True, help="objcopy to be used") parser.add_argument("-f", "--elf-file", required=True, help="Input file") parser.add_argument("-v", "--verbose", action="store_true", help="Verbose output") parser.add_argument("-i", "--includes", required=True, nargs="+", help="Zephyr base include directories") return parser.parse_args() if __name__ == "__main__": args = parse_args() verbose(f"Working on {args.elf_file} with {args.includes}...") build_elf(args.elf_file, args.includes) ```
/content/code_sandbox/arch/x86/zefi/zefi.py
python
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,421
```c /* * */ #include <stdint.h> #include <stddef.h> #include <string.h> #include "efi.h" #include "printf.h" #include <zefi-segments.h> #include <zephyr/arch/x86/efi.h> #define PUTCHAR_BUFSZ 128 /* EFI GUID for RSDP * See "Finding the RSDP on UEFI Enabled Systems" in ACPI specs. */ #define ACPI_1_0_RSDP_EFI_GUID \ { \ .Data1 = 0xeb9d2d30, \ .Data2 = 0x2d88, \ .Data3 = 0x11d3, \ .Data4 = { 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d }, \ } #define ACPI_2_0_RSDP_EFI_GUID \ { \ .Data1 = 0x8868e871, \ .Data2 = 0xe4f1, \ .Data3 = 0x11d3, \ .Data4 = { 0xbc, 0x22, 0x00, 0x80, 0xc7, 0x3c, 0x88, 0x81 }, \ } /* The linker places this dummy last in the data memory. We can't use * traditional linker address symbols because we're relocatable; the * linker doesn't know what the runtime address will be. The compiler * has to emit code to find this thing's address at runtime via an * offset from RIP. It's a qword so we can guarantee alignment of the * stuff after. */ static __attribute__((section(".runtime_data_end"))) uint64_t runtime_data_end[1] = { 0x1111aa8888aa1111L }; #define EXT_DATA_START ((void *) &runtime_data_end[1]) static struct efi_system_table *efi; static struct efi_boot_arg efi_arg; static void efi_putchar(int c) { static uint16_t efibuf[PUTCHAR_BUFSZ + 1]; static int n; if (c == '\n') { efi_putchar('\r'); } efibuf[n] = c; ++n; if (c == '\n' || n == PUTCHAR_BUFSZ) { efibuf[n] = 0U; efi->ConOut->OutputString(efi->ConOut, efibuf); n = 0; } } static inline bool efi_guid_compare(efi_guid_t *s1, efi_guid_t *s2) { return ((s1->Part1 == s2->Part1) && (s1->Part2 == s2->Part2)); } static void *efi_config_get_vendor_table_by_guid(efi_guid_t *guid) { struct efi_configuration_table *ect_tmp; int n_ct; if (efi == NULL) { return NULL; } ect_tmp = efi->ConfigurationTable; for (n_ct = 0; n_ct < efi->NumberOfTableEntries; n_ct++) { if (efi_guid_compare(&ect_tmp->VendorGuid, guid)) { return ect_tmp->VendorTable; } ect_tmp++; } return NULL; } static void efi_prepare_boot_arg(void) { efi_guid_t rsdp_guid_1 = ACPI_1_0_RSDP_EFI_GUID; efi_guid_t rsdp_guid_2 = ACPI_2_0_RSDP_EFI_GUID; /* Let's lookup for most recent ACPI table first */ efi_arg.acpi_rsdp = efi_config_get_vendor_table_by_guid(&rsdp_guid_2); if (efi_arg.acpi_rsdp == NULL) { efi_arg.acpi_rsdp = efi_config_get_vendor_table_by_guid(&rsdp_guid_1); } if (efi_arg.acpi_rsdp != NULL) { printf("RSDP found at %p\n", efi_arg.acpi_rsdp); } } /* Existing x86_64 EFI environments have a bad habit of leaving the * HPET timer running. This then fires later on, once the OS has * started. If the timing isn't right, it can happen before the OS * HPET driver gets a chance to disable it. And because we do the * handoff (necessarily) with interrupts disabled, it's not actually * possible for the OS to reliably disable it in time anyway. * * Basically: it's our job as the bootloader to ensure that no * interrupt sources are live before entering the OS. Clear the * interrupt enable bit of HPET timer zero. */ static void disable_hpet(void) { uint64_t *hpet = (uint64_t *)0xfed00000L; hpet[32] &= ~4; } /* FIXME: if you check the generated code, "ms_abi" calls like this * have to SPILL HALF OF THE SSE REGISTER SET TO THE STACK on entry * because of the way the conventions collide. Is there a way to * prevent/suppress that? */ uintptr_t __abi efi_entry(void *img_handle, struct efi_system_table *sys_tab) { (void)img_handle; efi = sys_tab; z_putchar = efi_putchar; printf("*** Zephyr EFI Loader ***\n"); efi_prepare_boot_arg(); for (int i = 0; i < sizeof(zefi_zsegs)/sizeof(zefi_zsegs[0]); i++) { int bytes = zefi_zsegs[i].sz; uint8_t *dst = (uint8_t *)zefi_zsegs[i].addr; printf("Zeroing %d bytes of memory at %p\n", bytes, dst); for (int j = 0; j < bytes; j++) { dst[j] = 0U; } } for (int i = 0; i < sizeof(zefi_dsegs)/sizeof(zefi_dsegs[0]); i++) { int bytes = zefi_dsegs[i].sz; int off = zefi_dsegs[i].off; uint8_t *dst = (uint8_t *)zefi_dsegs[i].addr; uint8_t *src = &((uint8_t *)EXT_DATA_START)[off]; printf("Copying %d data bytes to %p from image offset %d\n", bytes, dst, zefi_dsegs[i].off); for (int j = 0; j < bytes; j++) { dst[j] = src[j]; } /* Page-aligned blocks below 1M are the .locore * section, which has a jump in its first bytes for * the benefit of 32 bit entry. Those have to be * written over with NOP instructions. (See comment * about OUTRAGEOUS HACK in locore.S) before Zephyr * starts, because the very first thing it does is * install its own page table that disallows writes. */ if (((long)dst & 0xfff) == 0 && dst < (uint8_t *)0x100000L) { for (int i = 0; i < 8; i++) { dst[i] = 0x90; /* 0x90 == 1-byte NOP */ } } } unsigned char *code = (void *)zefi_entry; efi_arg.efi_systab = efi; __asm__ volatile("movq %%cr3, %0" : "=r"(efi_arg.efi_cr3)); printf("Jumping to Entry Point: %p (%x %x %x %x %x %x %x)\n", code, code[0], code[1], code[2], code[3], code[4], code[5], code[6]); disable_hpet(); /* The EFI console seems to be buffered, give it a little time * to drain before we start banging on the same UART from the * OS. */ for (volatile int i = 0; i < 50000000; i++) { } __asm__ volatile("cli; movq %0, %%rbx; jmp *%1" :: "r"(&efi_arg), "r"(code) : "rbx"); return 0; } /* Trick cribbed shamelessly from gnu-efi. We need to emit a ".reloc" * section into the image with a single dummy entry for the EFI loader * to think we're a valid PE file, gcc won't because it thinks we're * ELF. */ uint32_t relocation_dummy; __asm__(".section .reloc\n" "base_relocation_block:\n" ".long relocation_dummy - base_relocation_block\n" ".long 0x0a\n" ".word 0\n"); ```
/content/code_sandbox/arch/x86/zefi/zefi.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,026
```objective-c /* * */ #include <stdarg.h> #include <stdbool.h> #include <stddef.h> /* Tiny, but not-as-primitive-as-it-looks implementation of something * like s/n/printf(). Handles %d, %x, %p, %c and %s only, allows a * "l" qualifier on %d and %x (and silently ignores one %s/%c/%p). * Accepts, but ignores, field width and precision values that match: * the regex: [0-9]*\.?[0-9]* */ struct _pfr { char *buf; int len; int idx; }; /* Set this function pointer to something that generates output */ static void (*z_putchar)(int c); static void pc(struct _pfr *r, int c) { if (r->buf != NULL) { if (r->idx <= r->len) { r->buf[r->idx] = c; } } else { z_putchar(c); } r->idx++; } static void prdec(struct _pfr *r, long v) { if (v < 0) { pc(r, '-'); v = -v; } char digs[11 * sizeof(long)/4]; int i = sizeof(digs) - 1; digs[i] = 0; --i; while (v || i == 9) { digs[i] = '0' + (v % 10); --i; v /= 10; } ++i; while (digs[i] != '\0') { pc(r, digs[i]); ++i; } } static void endrec(struct _pfr *r) { if (r->buf && r->idx < r->len) { r->buf[r->idx] = 0; } } static int vpf(struct _pfr *r, const char *f, va_list ap) { for (/**/; *f != '\0'; f++) { bool islong = false; if (*f != '%') { pc(r, *f); continue; } if (f[1] == 'l') { islong = sizeof(long) > 4; f++; } /* Ignore (but accept) field width and precision values */ while (f[1] >= '0' && f[1] <= '9') { f++; } if (f[1] == '.') { f++; } while (f[1] >= '0' && f[1] <= '9') { f++; } switch (*(++f)) { case 0: return r->idx; case '%': pc(r, '%'); break; case 'c': pc(r, va_arg(ap, int)); break; case 's': { char *s = va_arg(ap, char *); while (*s != '\0') { pc(r, *s); ++s; } break; } case 'p': pc(r, '0'); pc(r, 'x'); /* fall through... */ islong = sizeof(long) > 4; case 'x': { int sig = 0; unsigned long v = islong ? va_arg(ap, unsigned long) : va_arg(ap, unsigned int); for (int i = 2*sizeof(long) - 1; i >= 0; i--) { int d = (v >> (i*4)) & 0xf; sig += !!d; if (sig || i == 0) { pc(r, "0123456789abcdef"[d]); } } break; } case 'd': prdec(r, va_arg(ap, int)); break; default: pc(r, '%'); pc(r, *f); } } endrec(r); return r->idx; } #define CALL_VPF(rec) \ va_list ap; \ va_start(ap, f); \ ret = vpf(&r, f, ap); \ va_end(ap); static inline int snprintf(char *buf, unsigned long len, const char *f, ...) { int ret; struct _pfr r = { .buf = buf, .len = len }; CALL_VPF(&r); return ret; } static inline int sprintf(char *buf, const char *f, ...) { int ret; struct _pfr r = { .buf = buf, .len = 0x7fffffff }; CALL_VPF(&r); return ret; } static inline int printf(const char *f, ...) { int ret; struct _pfr r = {0}; CALL_VPF(&r); return ret; } ```
/content/code_sandbox/arch/x86/zefi/printf.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,054
```linker script /* * */ ENTRY(efi_entry) SECTIONS { /* Pick a reasonable base address, EFI won't load us there anyway */ . = 0x4000000; .text : { *(.text) *(.rodata) } /* Must be a separately visible section to the EFI loader, doesn't * need to be page-aligned and can be immediately after text/rodata */ .reloc : { *(.reloc) } /* Must be page-aligned or EFI balks */ . = ALIGN(4096); .data : { *(.data) *(COMMON) *(.bss) *(.runtime_data_end) /* Must be last. Obviously. */ } /* Because binutils ld really likes to "helpfully" ignore the section * directives and drop junk in weird places. */ .trash_bin : { *(.comment) *(.data.rel*) *(.dynamic) *(.dynbss) *(.dynstr) *(.dynsym) *(.eh_frame) *(.gnu.hash) *(.gnu.version*) *(.got) *(.got.plt) *(.hash) *(.note.*) *(.plt) *(.plt.*) *(.rela.*) *(.rel.local) } } /* SECTIONS */ ```
/content/code_sandbox/arch/x86/zefi/efi.ld
linker script
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
302
```c /* * */ /** * @file * @brief Cache manipulation * * This module contains functions for manipulation caches. */ #include <zephyr/kernel.h> #include <zephyr/arch/cpu.h> #include <zephyr/sys/util.h> #include <zephyr/toolchain.h> #include <zephyr/cache.h> #include <stdbool.h> /* Not Write-through bit */ #define X86_REG_CR0_NW BIT(29) /* Cache Disable bit */ #define X86_REG_CR0_CD BIT(30) static inline void z_x86_wbinvd(void) { __asm__ volatile("wbinvd;\n\t" : : : "memory"); } void arch_dcache_enable(void) { unsigned long cr0 = 0; /* Enable write-back caching by clearing the NW and CD bits */ __asm__ volatile("mov %%cr0, %0;\n\t" "and %1, %0;\n\t" "mov %0, %%cr0;\n\t" : "=r" (cr0) : "i" (~(X86_REG_CR0_NW | X86_REG_CR0_CD))); } void arch_dcache_disable(void) { unsigned long cr0 = 0; /* Enter the no-fill mode by setting NW=0 and CD=1 */ __asm__ volatile("mov %%cr0, %0;\n\t" "and %1, %0;\n\t" "or %2, %0;\n\t" "mov %0, %%cr0;\n\t" : "=r" (cr0) : "i" (~(X86_REG_CR0_NW)), "i" (X86_REG_CR0_CD)); /* Flush all caches */ z_x86_wbinvd(); } int arch_dcache_flush_all(void) { z_x86_wbinvd(); return 0; } int arch_dcache_invd_all(void) { z_x86_wbinvd(); return 0; } int arch_dcache_flush_and_invd_all(void) { z_x86_wbinvd(); return 0; } /** * No alignment is required for either <virt> or <size>, but since * sys_cache_flush() iterates on the cache lines, a cache line alignment for * both is optimal. * * The cache line size is specified via the d-cache-line-size DTS property. */ int arch_dcache_flush_range(void *start_addr, size_t size) { size_t line_size = sys_cache_data_line_size_get(); uintptr_t start = (uintptr_t)start_addr; uintptr_t end = start + size; if (line_size == 0U) { return -ENOTSUP; } end = ROUND_UP(end, line_size); for (; start < end; start += line_size) { __asm__ volatile("clflush %0;\n\t" : "+m"(*(volatile char *)start)); } #if defined(CONFIG_X86_MFENCE_INSTRUCTION_SUPPORTED) __asm__ volatile("mfence;\n\t":::"memory"); #else __asm__ volatile("lock; addl $0,-4(%%esp);\n\t":::"memory", "cc"); #endif return 0; } int arch_dcache_invd_range(void *start_addr, size_t size) { return arch_dcache_flush_range(start_addr, size); } int arch_dcache_flush_and_invd_range(void *start_addr, size_t size) { return arch_dcache_flush_range(start_addr, size); } ```
/content/code_sandbox/arch/x86/core/cache.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
749
```c /* */ #include <zephyr/kernel.h> #include <zephyr/tracing/tracing.h> #include <zephyr/arch/cpu.h> #ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_IDLE __pinned_func void arch_cpu_idle(void) { sys_trace_idle(); __asm__ volatile ( "sti\n\t" "hlt\n\t"); } #endif #ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_ATOMIC_IDLE __pinned_func void arch_cpu_atomic_idle(unsigned int key) { sys_trace_idle(); __asm__ volatile ( "sti\n\t" /* * The following statement appears in "Intel 64 and IA-32 * Architectures Software Developer's Manual", regarding the 'sti' * instruction: * * "After the IF flag is set, the processor begins responding to * external, maskable interrupts after the next instruction is * executed." * * Thus the IA-32 implementation of arch_cpu_atomic_idle() will * atomically re-enable interrupts and enter a low-power mode. */ "hlt\n\t"); /* restore interrupt lockout state before returning to caller */ if ((key & 0x200U) == 0U) { __asm__ volatile("cli"); } } #endif ```
/content/code_sandbox/arch/x86/core/cpuhalt.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
273
```objective-c /* * */ #ifndef ZEPHYR_ARCH_X86_EFI_H_ #define ZEPHYR_ARCH_X86_EFI_H_ #ifndef _ASMLANGUAGE #include <stdbool.h> #define __abi __attribute__((ms_abi)) /* * This is a quick installment of EFI structures and functions. * Only a very minimal subset will be used and documented, thus the * lack of documentation at the moment. * See the UEFI 2.8b specifications for more information * at path_to_url */ /* Note: all specified attributes/parameters of type char16_t have been * translated to uint16_t as, for now, we don't have char16_t and we don't * care being pedantic, plus we do not use it yet. * This will need to be changed if required. */ typedef uintptr_t efi_status_t; #define EFI_STATUS(_status) (_status | BIT((BITS_PER_LONG-1))) #define EFI_SUCCESS 0 #define EFI_LOAD_ERROR EFI_STATUS(1) #define EFI_INVALID_PARAMETER EFI_STATUS(2) #define EFI_UNSUPPORTED EFI_STATUS(3) #define EFI_BAD_BUFFER_SIZE EFI_STATUS(4) #define EFI_BUFFER_TOO_SMALL EFI_STATUS(5) #define EFI_NOT_READY EFI_STATUS(6) #define EFI_DEVICE_ERROR EFI_STATUS(7) #define EFI_WRITE_PROTECTED EFI_STATUS(8) #define EFI_OUT_OF_RESOURCES EFI_STATUS(9) #define EFI_VOLUME_CORRUPTED EFI_STATUS(10) #define EFI_VOLUME_FULL EFI_STATUS(11) #define EFI_NO_MEDIA EFI_STATUS(12) #define EFI_MEDIA_CHANGED EFI_STATUS(13) #define EFI_NOT_FOUND EFI_STATUS(14) #define EFI_ACCESS_DENIED EFI_STATUS(15) #define EFI_NO_RESPONSE EFI_STATUS(16) #define EFI_NO_MAPPING EFI_STATUS(17) #define EFI_TIMEOUT EFI_STATUS(18) #define EFI_NOT_STARTED EFI_STATUS(19) #define EFI_ALREADY_STARTED EFI_STATUS(20) #define EFI_ABORTED EFI_STATUS(21) #define EFI_ICMP_ERROR EFI_STATUS(22) #define EFI_TFTP_ERROR EFI_STATUS(23) #define EFI_PROTOCOL_ERROR EFI_STATUS(24) #define EFI_INCOMPATIBLE_VERSION EFI_STATUS(25) #define EFI_SECURITY_VIOLATION EFI_STATUS(26) #define EFI_CRC_ERROR EFI_STATUS(27) #define EFI_END_OF_MEDIA EFI_STATUS(28) #define EFI_END_OF_FILE EFI_STATUS(31) #define EFI_INVALID_LANGUAGE EFI_STATUS(32) #define EFI_COMPROMISED_DATA EFI_STATUS(33) #define EFI_IP_ADDRESS_CONFLICT EFI_STATUS(34) #define EFI_HTTP_ERROR EFI_STATUS(35) typedef struct { union { struct { uint32_t Data1; uint16_t Data2; uint16_t Data3; uint8_t Data4[8]; }; /* Easier for comparison */ struct { uint64_t Part1; uint64_t Part2; }; }; } efi_guid_t; struct efi_input_key { uint16_t ScanCode; uint16_t UnicodeChar; }; struct efi_table_header { uint64_t Signature; uint32_t Revision; uint32_t HeaderSize; uint32_t CRC32; uint32_t Reserved; }; struct efi_simple_text_input; typedef efi_status_t __abi (*efi_input_reset_t)( struct efi_simple_text_input *This, bool ExtendedVerification); typedef efi_status_t __abi (*efi_input_read_key_t)( struct efi_simple_text_input *This, struct efi_input_key *Key); struct efi_simple_text_input { efi_input_reset_t Reset; efi_input_read_key_t ReadKeyStroke; void *WaitForKey; }; struct efi_simple_text_output_mode { int32_t MaxMode; int32_t Mode; int32_t Attribute; int32_t CursorColumn; int32_t CursorRow; bool CursorVisible; }; struct efi_simple_text_output; typedef efi_status_t __abi (*efi_text_reset_t)( struct efi_simple_text_output *This, bool ExtendedVerification); typedef efi_status_t __abi (*efi_text_string_t)( struct efi_simple_text_output *This, uint16_t *String); typedef efi_status_t __abi (*efi_text_test_string_t)( struct efi_simple_text_output *This, uint16_t *String); typedef efi_status_t __abi (*efi_text_query_mode_t)( struct efi_simple_text_output *This, uintptr_t ModeNumber, uintptr_t *Columns, uintptr_t *Rows); typedef efi_status_t __abi (*efi_text_set_mode_t)( struct efi_simple_text_output *This, uintptr_t ModeNumber); typedef efi_status_t __abi (*efi_text_set_attribute_t)( struct efi_simple_text_output *This, uintptr_t Attribute); typedef efi_status_t __abi (*efi_text_clear_screen_t)( struct efi_simple_text_output *This); typedef efi_status_t __abi (*efi_text_cursor_position_t)( struct efi_simple_text_output *This, uintptr_t Column, uintptr_t Row); typedef efi_status_t __abi (*efi_text_enable_cursor_t)( struct efi_simple_text_output *This, bool Visible); struct efi_simple_text_output { efi_text_reset_t Reset; efi_text_string_t OutputString; efi_text_test_string_t TestString; efi_text_query_mode_t QueryMode; efi_text_set_mode_t SetMode; efi_text_set_attribute_t SetAttribute; efi_text_clear_screen_t ClearScreen; efi_text_cursor_position_t SetCursorPosition; efi_text_enable_cursor_t EnableCursor; struct efi_simple_text_output_mode *Mode; }; struct efi_time { uint16_t Year; uint8_t Month; uint8_t Day; uint8_t Hour; uint8_t Minute; uint8_t Second; uint8_t Pad1; uint32_t NanoSecond; int16_t TimeZone; uint8_t DayLight; uint8_t Pad2; }; struct efi_time_capabilities { uint32_t Resolution; uint32_t Accuracy; bool SetsToZero; }; struct efi_memory_descriptor { uint32_t Type; uint64_t PhysicalStart; uint64_t VirtualStart; uint64_t NumberOfPages; uint64_t Attribute; }; typedef efi_status_t __abi (*efi_get_time_t)( struct efi_time *Time, struct efi_time_capabilities *Capabilities); typedef efi_status_t __abi (*efi_set_time_t)(struct efi_time *Time); typedef efi_status_t __abi (*efi_get_wakeup_time_t)(bool *Enabled, bool *Pending, struct efi_time *Time); typedef efi_status_t __abi (*efi_set_wakeup_time_t)(bool Enabled, struct efi_time *Time); typedef efi_status_t __abi (*efi_set_virtual_address_map_t)( uintptr_t MemoryMapSize, uintptr_t DescriptorSize, uint32_t DescriptorVersion, struct efi_memory_descriptor *VirtualMap); typedef efi_status_t __abi (*efi_convert_pointer_t)(uintptr_t DebugDisposition, void **Address); typedef efi_status_t __abi (*efi_get_variable_t)(uint16_t *VariableName, efi_guid_t *VendorGuid, uint32_t *Attributes, uintptr_t *DataSize, void *Data); typedef efi_status_t __abi (*efi_get_next_variable_name_t)( uintptr_t *VariableNameSize, uint16_t *VariableName, efi_guid_t *VendorGuid); typedef efi_status_t __abi (*efi_set_variable_t)(uint16_t *VariableName, efi_guid_t *VendorGuid, uint32_t *Attributes, uintptr_t *DataSize, void *Data); typedef efi_status_t __abi (*efi_get_next_high_monotonic_count_t)( uint32_t *HighCount); enum efi_reset_type { EfiResetCold, EfiResetWarm, EfiResetShutdown, EfiResetPlatformSpecific }; typedef efi_status_t __abi (*efi_reset_system_t)( enum efi_reset_type ResetType, uintptr_t ResetStatus, uintptr_t DataSize, void *ResetData); struct efi_capsule_header { efi_guid_t CapsuleGuid; uint32_t HeaderSize; uint32_t Flags; uint32_t CapsuleImageSize; }; typedef efi_status_t __abi (*efi_update_capsule_t)( struct efi_capsule_header **CapsuleHeaderArray, uintptr_t CapsuleCount, uint64_t ScatterGatherList); typedef efi_status_t __abi (*efi_query_capsule_capabilities_t)( struct efi_capsule_header **CapsuleHeaderArray, uintptr_t CapsuleCount, uint64_t *MaximumCapsuleSize, enum efi_reset_type ResetType); typedef efi_status_t __abi (*efi_query_variable_info_t)( uint32_t Attributes, uint64_t *MaximumVariableStorageSize, uint64_t *RemainingVariableStorageSize, uint64_t *MaximumVariableSize); struct efi_runtime_services { struct efi_table_header Hdr; efi_get_time_t GetTime; efi_set_time_t SetTime; efi_get_wakeup_time_t GetWakeupTime; efi_set_wakeup_time_t SetWakeupTime; efi_set_virtual_address_map_t SetVirtualAddressMap; efi_convert_pointer_t ConvertPointer; efi_get_variable_t GetVariable; efi_get_next_variable_name_t GetNextVariableName; efi_set_variable_t SetVariable; efi_get_next_high_monotonic_count_t GetNextHighMonotonicCount; efi_reset_system_t ResetSystem; efi_update_capsule_t UpdateCapsule; efi_query_capsule_capabilities_t QueryCapsuleCapabilities; efi_query_variable_info_t QueryVariableInfo; }; typedef uintptr_t __abi (*efi_raise_tpl_t)(uintptr_t NewTpl); typedef void __abi (*efi_restore_tpl_t)(uintptr_t OldTpl); enum efi_allocate_type { AllocateAnyPages, AllocateMaxAddress, AllocateAddress, MaxAllocateType }; enum efi_memory_type { EfiReservedMemoryType, EfiLoaderCode, EfiLoaderData, EfiBootServicesCode, EfiBootServicesData, EfiRuntimeServicesCode, EfiRuntimeServicesData, EfiConventionalMemory, EfiUnusableMemory, EfiACPIReclaimMemory, EfiACPIMemoryNVS, EfiMemoryMappedIO, EfiMemoryMappedIOPortSpace, EfiPalCode, EfiPersistentMemory, EfiMaxMemoryType }; typedef efi_status_t __abi (*efi_allocate_pages_t)( enum efi_allocate_type Type, enum efi_memory_type MemoryType, uintptr_t Pages, uint64_t *Memory); typedef efi_status_t __abi (*efi_free_pages_t)(uint64_t Memory, uintptr_t Pages); typedef efi_status_t __abi (*efi_get_memory_map_t)( uintptr_t *MemoryMapSize, struct efi_memory_descriptor *MemoryMap, uintptr_t *MapKey, uintptr_t *DescriptorSize, uint32_t *DescriptorVersion); typedef efi_status_t __abi (*efi_allocate_pool_t)( enum efi_memory_type PoolType, uintptr_t Size, void **Buffer); typedef efi_status_t __abi (*efi_free_pool_t)(void *Buffer); typedef void __abi (*efi_notify_function_t)(void *Event, void *context); typedef efi_status_t __abi (*efi_create_event_t)( uint32_t Type, uintptr_t NotifyTpl, efi_notify_function_t NotifyFunction, void *NotifyContext, void **Event); enum efi_timer_delay { TimerCancel, TimerPeriodic, TimerRelative }; typedef efi_status_t __abi (*efi_set_timer_t)(void *Event, enum efi_timer_delay Type, uint64_t TriggerTime); typedef efi_status_t __abi (*efi_wait_for_event_t)(uintptr_t NumberOfEvents, void **Event, uintptr_t *Index); typedef efi_status_t __abi (*efi_signal_event_t)(void *Event); typedef efi_status_t __abi (*efi_close_event_t)(void *Event); typedef efi_status_t __abi (*efi_check_event_t)(void *Event); enum efi_interface_type { EFI_NATIVE_INTERFACE }; typedef efi_status_t __abi (*efi_install_protocol_interface_t)( void **Handle, efi_guid_t *Protocol, enum efi_interface_type InterfaceType, void *Interface); typedef efi_status_t __abi (*efi_reinstall_protocol_interface_t)( void **Handle, efi_guid_t *Protocol, void *OldInterface, void *NewInterface); typedef efi_status_t __abi (*efi_uninstall_protocol_interface_t)( void **Handle, efi_guid_t *Protocol, void *Interface); typedef efi_status_t __abi (*efi_handle_protocol_t)( void **Handle, efi_guid_t *Protocol, void **Interface); typedef efi_status_t __abi (*efi_register_protocol_notify_t)( efi_guid_t *Protocol, void *Event, void **Registration); enum efi_locate_search_type { AllHandles, ByRegisterNotify, ByProtocol }; typedef efi_status_t __abi (*efi_locate_handle_t)( enum efi_locate_search_type SearchType, efi_guid_t *Protocol, void *SearchKey, uintptr_t *BufferSize, void **Buffer); struct efi_device_path_protocol { uint8_t Type; uint8_t SubType; uint8_t Length[2]; }; typedef efi_status_t __abi (*efi_locate_device_path_t)( efi_guid_t *Protocol, struct efi_device_path_protocol **DevicePath, void **Handle); typedef efi_status_t __abi (*efi_install_configuration_table_t)( efi_guid_t *Guid, void *Table); typedef efi_status_t __abi (*efi_load_image_t)( bool BootPolicy, void *ParentImageHandle, struct efi_device_path_protocol *DevicePath, void *SourceBuffer, uintptr_t SourceSize, void **ImageHandle); typedef efi_status_t __abi (*efi_start_image_t)(void *ImageHandle, uintptr_t *ExitDataSize, uint16_t **ExitData); typedef efi_status_t __abi (*efi_exit_t)(void *ImageHandle, uintptr_t ExitStatus, uintptr_t ExitDataSize, uint16_t *ExitData); typedef efi_status_t __abi (*efi_unload_image_t)(void *ImageHandle); typedef efi_status_t __abi (*efi_exit_boot_services_t)(void *ImageHandle, uintptr_t MapKey); typedef efi_status_t __abi (*efi_get_next_monotonic_count_t)(uint64_t *Count); typedef efi_status_t __abi (*efi_stall_t)(uintptr_t Microseconds); typedef efi_status_t __abi (*efi_set_watchdog_timer_t)(uintptr_t Timeout, uint64_t WatchdogCode, uintptr_t DataSize, uint16_t *WatchdogData); typedef efi_status_t __abi (*efi_connect_controller_t)( void *ControllerHandle, void **DriverImageHandle, struct efi_device_path_protocol *RemainingDevicePath, bool Recursive); typedef efi_status_t __abi (*efi_disconnect_controller_t)( void *ControllerHandle, void *DriverImageHandle, void *ChildHandle); typedef efi_status_t __abi (*efi_open_protocol_t)(void *Handle, efi_guid_t *Protocol, void **Interface, void *AgentHandle, void *ControllerHandle, uint32_t Attributes); typedef efi_status_t __abi (*efi_close_protocol_t)(void *Handle, efi_guid_t *Protocol, void *AgentHandle, void *ControllerHandle); struct efi_open_protocol_information_entry { void *AgentHandle; void *ControllerHandle; uint32_t Attributes; uint32_t OpenCount; }; typedef efi_status_t __abi (*efi_open_protocol_information_t)( void *Handle, efi_guid_t *Protocol, struct efi_open_protocol_information_entry **EntryBuffer, uintptr_t *EntryCount); typedef efi_status_t __abi (*efi_protocols_per_handle_t)( void *Handle, efi_guid_t ***ProtocolBuffer, uintptr_t *ProtocolBufferCount); typedef efi_status_t __abi (*efi_locate_handle_buffer_t)( enum efi_locate_search_type SearchType, efi_guid_t *Protocol, void *SearchKey, uintptr_t *NoHandles, void ***Buffer); typedef efi_status_t __abi (*efi_locate_protocol_t)(efi_guid_t *Protocol, void *Registration, void **Interface); typedef efi_status_t __abi (*efi_multiple_protocol_interface_t)( void *Handle, ...); typedef efi_status_t __abi (*efi_calculate_crc32_t)(void *Data, uintptr_t DataSize, uint32_t CRC32); typedef efi_status_t __abi (*efi_copy_mem_t)(void *Destination, void *Source, uintptr_t Size); typedef efi_status_t __abi (*efi_set_mem_t)(void *Buffer, uintptr_t Size, uint8_t Value); typedef efi_status_t __abi (*efi_create_event_ex_t)( uint32_t Type, uintptr_t NotifyTpl, efi_notify_function_t NotifyFunction, const void *NotifyContext, const efi_guid_t *EventGroup, void **Event); struct efi_boot_services { struct efi_table_header Hdr; efi_raise_tpl_t RaiseTPL; efi_restore_tpl_t RestoreTPL; efi_allocate_pages_t AllocatePages; efi_free_pages_t FreePages; efi_get_memory_map_t GetMemoryMap; efi_allocate_pool_t AllocatePool; efi_free_pool_t FreePool; efi_create_event_t CreateEvent; efi_set_timer_t SetTimer; efi_wait_for_event_t WaitForEvent; efi_signal_event_t SignalEvent; efi_close_event_t CloseEvent; efi_check_event_t CheckEvent; efi_install_protocol_interface_t InstallProtocolInterface; efi_reinstall_protocol_interface_t ReinstallProtocolInterface; efi_uninstall_protocol_interface_t UninstallProtocolInterface; efi_handle_protocol_t HandleProtocol; efi_register_protocol_notify_t RegisterProtocolNotify; efi_locate_handle_t LocateHandle; efi_locate_device_path_t LocateDevicePath; efi_install_configuration_table_t InstallConfigurationTable; efi_load_image_t LoadImage; efi_start_image_t StartImage; efi_exit_t Exit; efi_unload_image_t UnloadImage; efi_exit_boot_services_t ExitBootServices; efi_get_next_monotonic_count_t GetNextMonotonicCount; efi_stall_t Stall; efi_set_watchdog_timer_t SetWatchdogTimer; efi_connect_controller_t ConnectController; efi_disconnect_controller_t DisconnectController; efi_open_protocol_t OpenProtocol; efi_close_protocol_t CloseProtocol; efi_open_protocol_information_t OpenProtocolInformation; efi_protocols_per_handle_t ProtocolsPerHandle; efi_locate_handle_buffer_t LocateHandleBuffer; efi_locate_protocol_t LocateProtocol; efi_multiple_protocol_interface_t InstallMultipleProtocolInterfaces; efi_multiple_protocol_interface_t UninstallMultipleProtocolInterfaces; efi_calculate_crc32_t CalculateCrc32; efi_copy_mem_t CopyMem; efi_set_mem_t SetMem; efi_create_event_ex_t CreateEventEx; }; struct efi_configuration_table { /** Vendor EFI GUID Identifier */ efi_guid_t VendorGuid; /** Vendor table pointer */ void *VendorTable; }; struct efi_system_table { struct efi_table_header Hdr; uint16_t *FirmwareVendor; uint32_t FirmwareRevision; void *ConsoleInHandle; struct efi_simple_text_input *ConIn; void *ConsoleOutHandle; struct efi_simple_text_output *ConOut; void *StandardErrorHandle; struct efi_simple_text_output *StdErr; struct efi_runtime_services *RuntimeServices; struct efi_boot_services *BootServices; /** The amount of entries to expect in the next attribute */ uint64_t NumberOfTableEntries; /** A pointer to the configuration table(s) */ struct efi_configuration_table *ConfigurationTable; }; #endif /* _ASMLANGUAGE */ #endif /* ZEPHYR_INCLUDE_ARCH_X86_EFI_H_ */ ```
/content/code_sandbox/arch/x86/zefi/efi.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,421
```c /* * */ #include <zephyr/kernel.h> #include <zephyr/sys/device_mmio.h> #include <zephyr/sys/util.h> #include <zephyr/drivers/pcie/pcie.h> #include <soc.h> #if DT_PROP_OR(DT_CHOSEN(zephyr_console), io_mapped, 0) != 0 #define UART_IS_IOPORT_ACCESS 1 #endif #if defined(UART_IS_IOPORT_ACCESS) /* Legacy I/O Port Access to a NS16550 UART */ #define IN(reg) sys_in8(reg + DT_REG_ADDR(DT_CHOSEN(zephyr_console))) #define OUT(reg, val) sys_out8(val, reg + DT_REG_ADDR(DT_CHOSEN(zephyr_console))) #elif defined(X86_SOC_EARLY_SERIAL_PCIDEV) /* "Modern" mapping of a UART into a PCI MMIO device. The registers * are still bytes, but spaced at a 32 bit stride instead of packed * together. */ static mm_reg_t mmio; #define IN(reg) (sys_read32(mmio + (reg) * 4) & 0xff) #define OUT(reg, val) sys_write32((val) & 0xff, mmio + (reg) * 4) #elif defined(X86_SOC_EARLY_SERIAL_MMIO8_ADDR) /* Still other devices use a MMIO region containing packed byte * registers */ #ifdef DEVICE_MMIO_IS_IN_RAM static mm_reg_t mmio; #define BASE mmio #else #define BASE X86_SOC_EARLY_SERIAL_MMIO8_ADDR #endif /* DEVICE_MMIO_IS_IN_RAM */ #define IN(reg) sys_read8(BASE + reg) #define OUT(reg, val) sys_write8(val, BASE + reg) #else #error "Unsupported configuration" #endif #define REG_THR 0x00 /* Transmitter holding reg. */ #define REG_IER 0x01 /* Interrupt enable reg. */ #define REG_FCR 0x02 /* FIFO control reg. */ #define REG_LCR 0x03 /* Line control reg. */ #define REG_MCR 0x04 /* Modem control reg. */ #define REG_LSR 0x05 /* Line status reg. */ #define REG_BRDL 0x00 /* Baud rate divisor (LSB) */ #define REG_BRDH 0x01 /* Baud rate divisor (MSB) */ #define IER_DISABLE 0x00 #define LCR_8N1 (BIT(0) | BIT(1)) #define LCR_DLAB_SELECT BIT(7) #define MCR_DTR BIT(0) #define MCR_RTS BIT(1) #define LSR_THRE BIT(5) #define FCR_FIFO BIT(0) /* enable XMIT and RCVR FIFO */ #define FCR_RCVRCLR BIT(1) /* clear RCVR FIFO */ #define FCR_XMITCLR BIT(2) /* clear XMIT FIFO */ #define FCR_FIFO_1 0 /* 1 byte in RCVR FIFO */ static bool early_serial_init_done; static uint32_t suppressed_chars; static void serout(int c) { while ((IN(REG_LSR) & LSR_THRE) == 0) { } OUT(REG_THR, c); } int arch_printk_char_out(int c) { if (!early_serial_init_done) { suppressed_chars++; return c; } if (c == '\n') { serout('\r'); } serout(c); return c; } void z_x86_early_serial_init(void) { #if defined(DEVICE_MMIO_IS_IN_RAM) && !defined(UART_IS_IOPORT_ACCESS) #ifdef X86_SOC_EARLY_SERIAL_PCIDEV struct pcie_bar mbar; pcie_get_mbar(X86_SOC_EARLY_SERIAL_PCIDEV, 0, &mbar); pcie_set_cmd(X86_SOC_EARLY_SERIAL_PCIDEV, PCIE_CONF_CMDSTAT_MEM, true); device_map(&mmio, mbar.phys_addr, mbar.size, K_MEM_CACHE_NONE); #else device_map(&mmio, X86_SOC_EARLY_SERIAL_MMIO8_ADDR, 0x1000, K_MEM_CACHE_NONE); #endif #endif /* DEVICE_MMIO_IS_IN_RAM */ OUT(REG_IER, IER_DISABLE); /* Disable interrupts */ OUT(REG_LCR, LCR_DLAB_SELECT); /* DLAB select */ OUT(REG_BRDL, 1); /* Baud divisor = 1 */ OUT(REG_BRDH, 0); OUT(REG_LCR, LCR_8N1); /* LCR = 8n1 + DLAB off */ OUT(REG_MCR, MCR_DTR | MCR_RTS); /* Turn on FIFO. Some hardware needs this before transmitting */ OUT(REG_FCR, FCR_FIFO | FCR_FIFO_1 | FCR_RCVRCLR | FCR_XMITCLR); early_serial_init_done = true; if (suppressed_chars != 0U) { printk("WARNING: %u chars lost before early serial init\n", suppressed_chars); } } ```
/content/code_sandbox/arch/x86/core/early_serial.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,117
```c /* * */ #include <cpuid.h> /* Header provided by the toolchain. */ #include <zephyr/kernel_structs.h> #include <zephyr/arch/x86/cpuid.h> #include <zephyr/kernel.h> uint32_t z_x86_cpuid_extended_features(void) { uint32_t eax, ebx, ecx = 0U, edx; if (__get_cpuid(CPUID_EXTENDED_FEATURES_LVL, &eax, &ebx, &ecx, &edx) == 0) { return 0; } return edx; } #define INITIAL_APIC_ID_SHIFT (24) #define INITIAL_APIC_ID_MASK (0xFF) uint8_t z_x86_cpuid_get_current_physical_apic_id(void) { uint32_t eax, ebx, ecx, edx; if (IS_ENABLED(CONFIG_X2APIC)) { /* leaf 0x1F should be used first prior to using 0x0B */ if (__get_cpuid(CPUID_EXTENDED_TOPOLOGY_ENUMERATION_V2, &eax, &ebx, &ecx, &edx) == 0) { if (__get_cpuid(CPUID_EXTENDED_TOPOLOGY_ENUMERATION, &eax, &ebx, &ecx, &edx) == 0) { return 0; } } } else { if (__get_cpuid(CPUID_BASIC_INFO_1, &eax, &ebx, &ecx, &edx) == 0) { return 0; } edx = (ebx >> INITIAL_APIC_ID_SHIFT); } return (uint8_t)(edx & INITIAL_APIC_ID_MASK); } ```
/content/code_sandbox/arch/x86/core/cpuid.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
361
```c /* * */ #include <zephyr/init.h> #include <zephyr/kernel.h> #include <kernel_arch_data.h> #include <kernel_arch_func.h> #include <zephyr/arch/x86/msr.h> #include <zephyr/arch/x86/cpuid.h> /* * See: * path_to_url */ #if defined(CONFIG_X86_DISABLE_SSBD) || defined(CONFIG_X86_ENABLE_EXTENDED_IBRS) static int spec_ctrl_init(void) { uint32_t enable_bits = 0U; uint32_t cpuid7 = z_x86_cpuid_extended_features(); #ifdef CONFIG_X86_DISABLE_SSBD if ((cpuid7 & CPUID_SPEC_CTRL_SSBD) != 0U) { enable_bits |= X86_SPEC_CTRL_MSR_SSBD; } #endif #ifdef CONFIG_X86_ENABLE_EXTENDED_IBRS if ((cpuid7 & CPUID_SPEC_CTRL_IBRS) != 0U) { enable_bits |= X86_SPEC_CTRL_MSR_IBRS; } #endif if (enable_bits != 0U) { uint64_t cur = z_x86_msr_read(X86_SPEC_CTRL_MSR); z_x86_msr_write(X86_SPEC_CTRL_MSR, cur | enable_bits); } return 0; } SYS_INIT(spec_ctrl_init, PRE_KERNEL_1, 0); #endif /* CONFIG_X86_DISABLE_SSBD || CONFIG_X86_ENABLE_EXTENDED_IBRS */ ```
/content/code_sandbox/arch/x86/core/spec_ctrl.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
299
```c /* * */ #include <zephyr/kernel.h> #include <zephyr/sys/speculation.h> #include <zephyr/internal/syscall_handler.h> #include <kernel_arch_func.h> #include <ksched.h> #include <x86_mmu.h> BUILD_ASSERT((CONFIG_PRIVILEGED_STACK_SIZE > 0) && (CONFIG_PRIVILEGED_STACK_SIZE % CONFIG_MMU_PAGE_SIZE) == 0); #ifdef CONFIG_DEMAND_PAGING #include <zephyr/kernel/mm/demand_paging.h> #endif #ifndef CONFIG_X86_KPTI /* Update the to the incoming thread's page table, and update the location of * the privilege elevation stack. * * May be called ONLY during context switch. Hot code path! * * Nothing to do here if KPTI is enabled. We are in supervisor mode, so the * active page tables are the kernel's page tables. If the incoming thread is * in user mode we are going to switch CR3 to the domain-specific tables when * we go through z_x86_trampoline_to_user. * * We don't need to update the privilege mode initial stack pointer either, * privilege elevation always lands on the trampoline stack and the irq/syscall * code has to manually transition off of it to the appropriate stack after * switching page tables. */ __pinned_func void z_x86_swap_update_page_tables(struct k_thread *incoming) { #ifndef CONFIG_X86_64 /* Set initial stack pointer when elevating privileges from Ring 3 * to Ring 0. */ _main_tss.esp0 = (uintptr_t)incoming->arch.psp; #endif #ifdef CONFIG_X86_COMMON_PAGE_TABLE z_x86_swap_update_common_page_table(incoming); #else /* Check first that we actually need to do this, since setting * CR3 involves an expensive full TLB flush. */ uintptr_t ptables_phys = incoming->arch.ptables; __ASSERT(ptables_phys != 0, "NULL page tables for thread %p\n", incoming); if (ptables_phys != z_x86_cr3_get()) { z_x86_cr3_set(ptables_phys); } #endif /* CONFIG_X86_COMMON_PAGE_TABLE */ } #endif /* CONFIG_X86_KPTI */ /* Preparation steps needed for all threads if user mode is turned on. * * Returns the initial entry point to swap into. */ void *z_x86_userspace_prepare_thread(struct k_thread *thread) { void *initial_entry; struct z_x86_thread_stack_header *header = #ifdef CONFIG_THREAD_STACK_MEM_MAPPED (struct z_x86_thread_stack_header *)thread->stack_info.mapped.addr; #else (struct z_x86_thread_stack_header *)thread->stack_obj; #endif /* CONFIG_THREAD_STACK_MEM_MAPPED */ thread->arch.psp = header->privilege_stack + sizeof(header->privilege_stack); #ifndef CONFIG_X86_COMMON_PAGE_TABLE /* Important this gets cleared, so that arch_mem_domain_* APIs * can distinguish between new threads, and threads migrating * between domains */ thread->arch.ptables = (uintptr_t)NULL; #endif /* CONFIG_X86_COMMON_PAGE_TABLE */ if ((thread->base.user_options & K_USER) != 0U) { initial_entry = arch_user_mode_enter; } else { initial_entry = z_thread_entry; } return initial_entry; } FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, void *p1, void *p2, void *p3) { size_t stack_end; /* Transition will reset stack pointer to initial, discarding * any old context since this is a one-way operation */ stack_end = Z_STACK_PTR_ALIGN(_current->stack_info.start + _current->stack_info.size - _current->stack_info.delta); #ifdef CONFIG_X86_64 /* x86_64 SysV ABI requires 16 byte stack alignment, which * means that on entry to a C function (which follows a CALL * that pushes 8 bytes) the stack must be MISALIGNED by * exactly 8 bytes. */ stack_end -= 8; #endif #if defined(CONFIG_DEMAND_PAGING) && \ !defined(CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT) /* If generic section is not present at boot, * the thread stack may not be in physical memory. * Unconditionally page in the stack instead of * relying on page fault to speed up a little bit * on starting the thread. * * Note that this also needs to page in the reserved * portion of the stack (which is usually the page just * before the beginning of stack in * _current->stack_info.start. */ uintptr_t stack_start; size_t stack_size; uintptr_t stack_aligned_start; size_t stack_aligned_size; stack_start = POINTER_TO_UINT(_current->stack_obj); stack_size = K_THREAD_STACK_LEN(_current->stack_info.size); #if defined(CONFIG_X86_STACK_PROTECTION) /* With hardware stack protection, the first page of stack * is a guard page. So need to skip it. */ stack_start += CONFIG_MMU_PAGE_SIZE; stack_size -= CONFIG_MMU_PAGE_SIZE; #endif (void)k_mem_region_align(&stack_aligned_start, &stack_aligned_size, stack_start, stack_size, CONFIG_MMU_PAGE_SIZE); k_mem_page_in(UINT_TO_POINTER(stack_aligned_start), stack_aligned_size); #endif z_x86_userspace_enter(user_entry, p1, p2, p3, stack_end, _current->stack_info.start); CODE_UNREACHABLE; } ```
/content/code_sandbox/arch/x86/core/userspace.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,203
```c /* * */ /** * @file x86-specific reboot functionalities * * @details Implements the required 'arch' sub-APIs. */ #include <zephyr/kernel.h> #include <zephyr/sys/reboot.h> /* reboot through Reset Control Register (I/O port 0xcf9) */ #define X86_RST_CNT_REG 0xcf9 #define X86_RST_CNT_SYS_RST 0x02 #define X86_RST_CNT_CPU_RST 0x4 #define X86_RST_CNT_FULL_RST 0x08 static inline void cold_reboot(void) { uint8_t reset_value = X86_RST_CNT_CPU_RST | X86_RST_CNT_SYS_RST | X86_RST_CNT_FULL_RST; sys_out8(reset_value, X86_RST_CNT_REG); } void __weak sys_arch_reboot(int type) { switch (type) { case SYS_REBOOT_COLD: cold_reboot(); break; default: /* do nothing */ break; } } ```
/content/code_sandbox/arch/x86/core/reboot_rst_cnt.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
207
```c /* * */ #include <zephyr/acpi/acpi.h> #include <zephyr/dt-bindings/interrupt-controller/intel-ioapic.h> uint32_t arch_acpi_encode_irq_flags(uint8_t polarity, uint8_t trigger) { uint32_t irq_flag = IRQ_DELIVERY_LOWEST; if (trigger == ACPI_LEVEL_SENSITIVE) { irq_flag |= IRQ_TYPE_LEVEL; } else { irq_flag |= IRQ_TYPE_EDGE; } if (polarity == ACPI_ACTIVE_HIGH) { irq_flag |= IRQ_TYPE_HIGH; } else if (polarity == ACPI_ACTIVE_LOW) { irq_flag |= IRQ_TYPE_LOW; } return irq_flag; } ```
/content/code_sandbox/arch/x86/core/x86_acpi.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
148
```c /* * */ #include <zephyr/kernel.h> #include <zephyr/sys/device_mmio.h> #include <zephyr/drivers/pcie/pcie.h> #ifdef CONFIG_ACPI #include <zephyr/acpi/acpi.h> #endif #ifdef CONFIG_PCIE_MSI #include <kernel_arch_func.h> #include <zephyr/device.h> #include <zephyr/drivers/pcie/msi.h> #include <zephyr/drivers/interrupt_controller/sysapic.h> #include <zephyr/arch/x86/cpuid.h> #endif /* PCI Express Extended Configuration Mechanism (MMIO) */ #ifdef CONFIG_PCIE_MMIO_CFG #define MAX_PCI_BUS_SEGMENTS 4 static struct { uint32_t start_bus; uint32_t n_buses; uint8_t *mmio; } bus_segs[MAX_PCI_BUS_SEGMENTS]; static bool do_pcie_mmio_cfg; static void pcie_mm_init(void) { #ifdef CONFIG_ACPI struct acpi_mcfg *m = acpi_table_get("MCFG", 0); if (m != NULL) { int n = (m->header.Length - sizeof(*m)) / sizeof(m->pci_segs[0]); for (int i = 0; i < n && i < MAX_PCI_BUS_SEGMENTS; i++) { size_t size; uintptr_t phys_addr; bus_segs[i].start_bus = m->pci_segs[i].StartBusNumber; bus_segs[i].n_buses = 1 + m->pci_segs[i].EndBusNumber - m->pci_segs[i].StartBusNumber; phys_addr = m->pci_segs[i].Address; /* 32 devices & 8 functions per bus, 4k per device */ size = bus_segs[i].n_buses * (32 * 8 * 4096); device_map((mm_reg_t *)&bus_segs[i].mmio, phys_addr, size, K_MEM_CACHE_NONE); } do_pcie_mmio_cfg = true; } #endif } static inline void pcie_mm_conf(pcie_bdf_t bdf, unsigned int reg, bool write, uint32_t *data) { for (int i = 0; i < ARRAY_SIZE(bus_segs); i++) { int off = PCIE_BDF_TO_BUS(bdf) - bus_segs[i].start_bus; if (off >= 0 && off < bus_segs[i].n_buses) { bdf = PCIE_BDF(off, PCIE_BDF_TO_DEV(bdf), PCIE_BDF_TO_FUNC(bdf)); volatile uint32_t *regs = (void *)&bus_segs[i].mmio[bdf << 4]; if (write) { regs[reg] = *data; } else { *data = regs[reg]; } } } } #endif /* CONFIG_PCIE_MMIO_CFG */ /* Traditional Configuration Mechanism */ #define PCIE_X86_CAP 0xCF8U /* Configuration Address Port */ #define PCIE_X86_CAP_BDF_MASK 0x00FFFF00U /* b/d/f bits */ #define PCIE_X86_CAP_EN 0x80000000U /* enable bit */ #define PCIE_X86_CAP_WORD_MASK 0x3FU /* 6-bit word index .. */ #define PCIE_X86_CAP_WORD_SHIFT 2U /* .. is in CAP[7:2] */ #define PCIE_X86_CDP 0xCFCU /* Configuration Data Port */ /* * Helper function for exported configuration functions. Configuration access * is not atomic, so spinlock to keep drivers from clobbering each other. */ static inline void pcie_io_conf(pcie_bdf_t bdf, unsigned int reg, bool write, uint32_t *data) { static struct k_spinlock lock; k_spinlock_key_t k; bdf &= PCIE_X86_CAP_BDF_MASK; bdf |= PCIE_X86_CAP_EN; bdf |= (reg & PCIE_X86_CAP_WORD_MASK) << PCIE_X86_CAP_WORD_SHIFT; k = k_spin_lock(&lock); sys_out32(bdf, PCIE_X86_CAP); if (write) { sys_out32(*data, PCIE_X86_CDP); } else { *data = sys_in32(PCIE_X86_CDP); } sys_out32(0U, PCIE_X86_CAP); k_spin_unlock(&lock, k); } static inline void pcie_conf(pcie_bdf_t bdf, unsigned int reg, bool write, uint32_t *data) { #ifdef CONFIG_PCIE_MMIO_CFG if (bus_segs[0].mmio == NULL) { pcie_mm_init(); } if (do_pcie_mmio_cfg) { pcie_mm_conf(bdf, reg, write, data); } else #endif { pcie_io_conf(bdf, reg, write, data); } } /* these functions are explained in include/drivers/pcie/pcie.h */ uint32_t pcie_conf_read(pcie_bdf_t bdf, unsigned int reg) { uint32_t data = 0U; pcie_conf(bdf, reg, false, &data); return data; } void pcie_conf_write(pcie_bdf_t bdf, unsigned int reg, uint32_t data) { pcie_conf(bdf, reg, true, &data); } #ifdef CONFIG_PCIE_MSI #ifdef CONFIG_INTEL_VTD_ICTL #include <zephyr/drivers/interrupt_controller/intel_vtd.h> static const struct device *const vtd = DEVICE_DT_GET_ONE(intel_vt_d); #endif /* CONFIG_INTEL_VTD_ICTL */ /* these functions are explained in include/drivers/pcie/msi.h */ #define MSI_MAP_DESTINATION_ID_SHIFT 12 #define MSI_RH BIT(3) uint32_t pcie_msi_map(unsigned int irq, msi_vector_t *vector, uint8_t n_vector) { uint32_t dest_id; ARG_UNUSED(irq); #if defined(CONFIG_INTEL_VTD_ICTL) if (vector != NULL && n_vector > 0) { return vtd_remap_msi(vtd, vector, n_vector); } #endif dest_id = z_x86_cpuid_get_current_physical_apic_id() << MSI_MAP_DESTINATION_ID_SHIFT; /* Directing to current physical CPU (may not be BSP) * Destination ID - RH 1 - DM 0 */ return 0xFEE00000U | dest_id | MSI_RH; } uint16_t pcie_msi_mdr(unsigned int irq, msi_vector_t *vector) { if (vector != NULL) { if (IS_ENABLED(CONFIG_INTEL_VTD_ICTL)) { return 0; } #if defined(CONFIG_PCIE_MSI_X) if (vector->msix) { return 0x4000U | vector->arch.vector; } #endif } return 0x4000U | Z_IRQ_TO_INTERRUPT_VECTOR(irq); } #if defined(CONFIG_INTEL_VTD_ICTL) || defined(CONFIG_PCIE_MSI_X) uint8_t arch_pcie_msi_vectors_allocate(unsigned int priority, msi_vector_t *vectors, uint8_t n_vector) { int prev_vector = -1; int i, irq, vector; if (vectors == NULL || n_vector == 0) { return 0; } #ifdef CONFIG_INTEL_VTD_ICTL { int irte; if (!device_is_ready(vtd)) { return 0; } irte = vtd_allocate_entries(vtd, n_vector); if (irte < 0) { return 0; } for (i = 0; i < n_vector; i++, irte++) { vectors[i].arch.irte = irte; vectors[i].arch.remap = true; } } #endif /* CONFIG_INTEL_VTD_ICTL */ for (i = 0; i < n_vector; i++) { if (n_vector == 1) { /* This path is taken by PCIE device with fixed * or single MSI: IRQ has been already allocated * and/or set on the PCIe bus. Thus we only require * to get it. */ irq = pcie_get_irq(vectors->bdf); } else { irq = arch_irq_allocate(); } if ((irq == PCIE_CONF_INTR_IRQ_NONE) || (irq == -1)) { return -1; } vector = z_x86_allocate_vector(priority, prev_vector); if (vector < 0) { return 0; } vectors[i].arch.irq = irq; vectors[i].arch.vector = vector; #ifdef CONFIG_INTEL_VTD_ICTL vtd_set_irte_vector(vtd, vectors[i].arch.irte, vectors[i].arch.vector); vtd_set_irte_irq(vtd, vectors[i].arch.irte, vectors[i].arch.irq); vtd_set_irte_msi(vtd, vectors[i].arch.irte, true); #endif prev_vector = vectors[i].arch.vector; } return n_vector; } bool arch_pcie_msi_vector_connect(msi_vector_t *vector, void (*routine)(const void *parameter), const void *parameter, uint32_t flags) { #ifdef CONFIG_INTEL_VTD_ICTL if (vector->arch.remap) { union acpi_dmar_id id; if (!device_is_ready(vtd)) { return false; } id.bits.bus = PCIE_BDF_TO_BUS(vector->bdf); id.bits.device = PCIE_BDF_TO_DEV(vector->bdf); id.bits.function = PCIE_BDF_TO_FUNC(vector->bdf); vtd_remap(vtd, vector->arch.irte, vector->arch.vector, flags, id.raw); } #endif /* CONFIG_INTEL_VTD_ICTL */ z_x86_irq_connect_on_vector(vector->arch.irq, vector->arch.vector, routine, parameter); return true; } #endif /* CONFIG_INTEL_VTD_ICTL || CONFIG_PCIE_MSI_X */ #endif /* CONFIG_PCIE_MSI */ ```
/content/code_sandbox/arch/x86/core/pcie.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,235
```cmake # # XXX: When using the Intel toolchain, cmake doesn't recognize .S files # automatically, and I can't be bothered to figure out how to fix this. # set_property(SOURCE intel64/locore.S PROPERTY LANGUAGE ASM) zephyr_library_sources( intel64/locore.S intel64/cpu.c intel64/irq.c intel64/thread.c intel64/fatal.c ) zephyr_library_sources_ifdef(CONFIG_SMP intel64/smp.c) zephyr_library_sources_ifdef(CONFIG_IRQ_OFFLOAD intel64/irq_offload.c) zephyr_library_sources_ifdef(CONFIG_USERSPACE intel64/userspace.S) zephyr_library_sources_ifdef(CONFIG_THREAD_LOCAL_STORAGE intel64/tls.c) zephyr_library_sources_ifdef(CONFIG_DEBUG_COREDUMP intel64/coredump.c) ```
/content/code_sandbox/arch/x86/core/intel64.cmake
cmake
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
187
```c /* */ #include <zephyr/kernel.h> #include <string.h> #include <zephyr/arch/x86/memmap.h> #include <zephyr/linker/linker-defs.h> #include <kernel_arch_data.h> struct x86_memmap_exclusion x86_memmap_exclusions[] = { #ifdef CONFIG_X86_64 { "locore", _locore_start, _locore_end }, #endif #ifdef CONFIG_XIP { "rom", __rom_region_start, __rom_region_end }, #endif { "ram", _image_ram_start, _image_ram_end }, #ifdef CONFIG_USERSPACE { "app_smem", _app_smem_start, _app_smem_end }, #endif #ifdef CONFIG_COVERAGE_GCOV { "gcov", __gcov_bss_start, __gcov_bss_end }, #endif }; int x86_nr_memmap_exclusions = sizeof(x86_memmap_exclusions) / sizeof(struct x86_memmap_exclusion); /* * The default map symbols are weak so that an application * can override with a hardcoded manual map if desired. */ __weak enum x86_memmap_source x86_memmap_source = X86_MEMMAP_SOURCE_DEFAULT; __weak struct x86_memmap_entry x86_memmap[CONFIG_X86_MEMMAP_ENTRIES] = { { DT_REG_ADDR(DT_CHOSEN(zephyr_sram)), DT_REG_SIZE(DT_CHOSEN(zephyr_sram)), X86_MEMMAP_ENTRY_RAM } }; ```
/content/code_sandbox/arch/x86/core/memmap.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
323
```c /* * */ #include <zephyr/kernel.h> #include <string.h> #include <zephyr/arch/x86/multiboot.h> #include <zephyr/arch/x86/memmap.h> struct multiboot_info multiboot_info; /* * called very early in the boot process to fetch data out of the multiboot * info struct. we need to grab the relevant data before any dynamic memory * allocation takes place, lest the struct itself or any data it points to * be overwritten before we read it. */ static inline void clear_memmap(int index) { while (index < CONFIG_X86_MEMMAP_ENTRIES) { x86_memmap[index].type = X86_MEMMAP_ENTRY_UNUSED; ++index; } } void z_multiboot_init(struct multiboot_info *info_pa) { struct multiboot_info *info; #if defined(CONFIG_ARCH_MAPS_ALL_RAM) || !defined(CONFIG_X86_MMU) /* * Since the struct from bootloader resides in memory * and all memory is mapped, there is no need to * manually map it before accessing. * * Without MMU, all memory are identity-mapped already * so there is no need to map them again. */ info = info_pa; #else k_mem_map_phys_bare((uint8_t **)&info, POINTER_TO_UINT(info_pa), sizeof(*info_pa), K_MEM_CACHE_NONE); #endif /* CONFIG_ARCH_MAPS_ALL_RAM */ if (info == NULL) { return; } memcpy(&multiboot_info, info, sizeof(*info)); #ifdef CONFIG_MULTIBOOT_MEMMAP /* * If the extended map (basically, the equivalent of * the BIOS E820 map) is available, then use that. */ if ((info->flags & MULTIBOOT_INFO_FLAGS_MMAP) && (x86_memmap_source < X86_MEMMAP_SOURCE_MULTIBOOT_MMAP)) { uintptr_t address; uintptr_t address_end; struct multiboot_mmap *mmap; int index = 0; uint32_t type; #if defined(CONFIG_ARCH_MAPS_ALL_RAM) || !defined(CONFIG_X86_MMU) address = info->mmap_addr; #else uint8_t *address_va; k_mem_map_phys_bare(&address_va, info->mmap_addr, info->mmap_length, K_MEM_CACHE_NONE); address = POINTER_TO_UINT(address_va); #endif /* CONFIG_ARCH_MAPS_ALL_RAM */ address_end = address + info->mmap_length; while ((address < address_end) && (index < CONFIG_X86_MEMMAP_ENTRIES)) { mmap = UINT_TO_POINTER(address); x86_memmap[index].base = mmap->base; x86_memmap[index].length = mmap->length; switch (mmap->type) { case MULTIBOOT_MMAP_RAM: type = X86_MEMMAP_ENTRY_RAM; break; case MULTIBOOT_MMAP_ACPI: type = X86_MEMMAP_ENTRY_ACPI; break; case MULTIBOOT_MMAP_NVS: type = X86_MEMMAP_ENTRY_NVS; break; case MULTIBOOT_MMAP_DEFECTIVE: type = X86_MEMMAP_ENTRY_DEFECTIVE; break; default: type = X86_MEMMAP_ENTRY_UNKNOWN; } x86_memmap[index].type = type; ++index; address += mmap->size + sizeof(mmap->size); } x86_memmap_source = X86_MEMMAP_SOURCE_MULTIBOOT_MMAP; clear_memmap(index); } /* If no extended map is available, fall back to the basic map. */ if ((info->flags & MULTIBOOT_INFO_FLAGS_MEM) && (x86_memmap_source < X86_MEMMAP_SOURCE_MULTIBOOT_MEM)) { x86_memmap[0].base = 0; x86_memmap[0].length = info->mem_lower * 1024ULL; x86_memmap[0].type = X86_MEMMAP_ENTRY_RAM; if (CONFIG_X86_MEMMAP_ENTRIES > 1) { x86_memmap[1].base = 1048576U; /* 1MB */ x86_memmap[1].length = info->mem_upper * 1024ULL; x86_memmap[1].type = X86_MEMMAP_ENTRY_RAM; clear_memmap(2); } x86_memmap_source = X86_MEMMAP_SOURCE_MULTIBOOT_MEM; } #endif /* CONFIG_MULTIBOOT_MEMMAP */ } ```
/content/code_sandbox/arch/x86/core/multiboot.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
993
```c /* * */ #include <zephyr/kernel.h> #include <kernel_internal.h> #include <zephyr/arch/x86/multiboot.h> #include <zephyr/arch/x86/efi.h> #include <x86_mmu.h> extern FUNC_NORETURN void z_cstart(void); extern void x86_64_irq_init(void); #if !defined(CONFIG_X86_64) __pinned_data x86_boot_arg_t x86_cpu_boot_arg; #endif /* Early global initialization functions, C domain. This runs only on the first * CPU for SMP systems. */ __boot_func FUNC_NORETURN void z_prep_c(void *arg) { x86_boot_arg_t *cpu_arg = arg; _kernel.cpus[0].nested = 0; #ifdef CONFIG_MMU z_x86_mmu_init(); #endif #if defined(CONFIG_LOAPIC) z_loapic_enable(0); #endif #ifdef CONFIG_X86_64 x86_64_irq_init(); #endif if (IS_ENABLED(CONFIG_MULTIBOOT_INFO) && cpu_arg->boot_type == MULTIBOOT_BOOT_TYPE) { z_multiboot_init((struct multiboot_info *)cpu_arg->arg); } else if (IS_ENABLED(CONFIG_X86_EFI) && cpu_arg->boot_type == EFI_BOOT_TYPE) { efi_init((struct efi_boot_arg *)cpu_arg->arg); } else { ARG_UNUSED(cpu_arg); } #ifdef CONFIG_X86_VERY_EARLY_CONSOLE z_x86_early_serial_init(); #if defined(CONFIG_BOARD_QEMU_X86) || defined(CONFIG_BOARD_QEMU_X86_64) /* * Under QEMU and SeaBIOS, everything gets to be printed * immediately after "Booting from ROM.." as there is no newline. * This prevents parsing QEMU console output for the very first * line where it needs to match from the beginning of the line. * So add a dummy newline here so the next output is at * the beginning of a line. */ arch_printk_char_out('\n'); #endif #endif #ifdef CONFIG_X86_STACK_PROTECTION unsigned int num_cpus = arch_num_cpus(); for (int i = 0; i < num_cpus; i++) { z_x86_set_stack_guard(z_interrupt_stacks[i]); } #endif z_cstart(); CODE_UNREACHABLE; } ```
/content/code_sandbox/arch/x86/core/prep_c.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
511
```unknown # Intel64-specific X86 subarchitecture options if X86_64 config MAIN_STACK_SIZE default 8192 config IDLE_STACK_SIZE default 4096 config ISR_STACK_SIZE default 16384 config TEST_EXTRA_STACK_SIZE default 4096 config SYSTEM_WORKQUEUE_STACK_SIZE default 8192 config X86_EXCEPTION_STACK_SIZE int "Size of the exception stack(s)" default 4096 help The exception stack(s) (one per CPU) are used both for exception processing and early kernel/CPU initialization. They need only support limited call-tree depth and must fit into the low core, so they are typically smaller than the ISR stacks. config X86_EXCEPTION_STACK_TRACE bool default y select DEBUG_INFO select THREAD_STACK_INFO depends on !OMIT_FRAME_POINTER depends on NO_OPTIMIZATIONS help Internal config to enable runtime stack traces on fatal exceptions. config SCHED_IPI_VECTOR int "IDT vector to use for scheduler IPI" default 34 range 33 $(UINT8_MAX) depends on SMP config TLB_IPI_VECTOR int "IDT vector to use for TLB shootdown IPI" default 35 range 33 $(UINT8_MAX) depends on SMP # We should really only have to provide one of the following two values, # but a bug in the Zephyr SDK for x86 precludes the use of division in # the assembler. For now, we require that these values be specified manually, # and we check to be sure they're a valid combination in arch.h. yes, ugh. config ISR_DEPTH int "Maximum IRQ nesting depth" default 4 help The more nesting allowed, the more room is required for IRQ stacks. config ISR_SUBSTACK_SIZE int "Size of ISR substacks" default 4096 help Number of bytes from the ISR stack to reserve for each nested IRQ level. Must be a multiple of 16 to main stack alignment. Note that CONFIG_ISR_SUBSTACK_SIZE * CONFIG_ISR_DEPTH must be equal to CONFIG_ISR_STACK_SIZE. config X86_STACK_PROTECTION bool default y if HW_STACK_PROTECTION select THREAD_STACK_INFO imply THREAD_STACK_MEM_MAPPED help This option leverages the MMU to cause a system fatal error if the bounds of the current process stack are overflowed. This is done by preceding all stack areas with a 4K guard page. config X86_USERSPACE bool default y if USERSPACE select THREAD_STACK_INFO help This option enables APIs to drop a thread's privileges down to ring 3, supporting user-level threads that are protected from each other and from crashing the kernel. endif # X86_64 ```
/content/code_sandbox/arch/x86/core/Kconfig.intel64
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
600
```c /* * */ #include <zephyr/kernel.h> #include <zephyr/kernel_structs.h> #include <kernel_internal.h> #include <kernel_tls.h> #include <zephyr/sys/util.h> size_t arch_tls_stack_setup(struct k_thread *new_thread, char *stack_ptr) { /* * TLS area for x86 and x86_64 has the data/bss first, * then a pointer pointing to itself. The address to * this pointer needs to be stored in register GS (x86) * or FS (x86_64). GCC generates code which reads this * pointer and offsets from this pointer are used to * access data. */ uintptr_t *self_ptr; /* * Since we are populating things backwards, store * the pointer to the TLS area at top of stack. */ stack_ptr -= sizeof(uintptr_t); self_ptr = (void *)stack_ptr; *self_ptr = POINTER_TO_UINT(stack_ptr); /* * Set thread TLS pointer as this is used to populate * FS/GS at context switch. */ new_thread->tls = POINTER_TO_UINT(self_ptr); /* Setup the TLS data */ stack_ptr -= z_tls_data_size(); z_tls_copy(stack_ptr); return (z_tls_data_size() + sizeof(uintptr_t)); } ```
/content/code_sandbox/arch/x86/core/tls.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
282
```unknown # IA32-specific X86 subarchitecture options if !X86_64 config NESTED_INTERRUPTS bool "Nested interrupts" default y help This option enables support for nested interrupts. menu "Memory Layout Options" config IDT_NUM_VECTORS int "Number of IDT vectors" default 256 range 32 256 help This option specifies the number of interrupt vector entries in the Interrupt Descriptor Table (IDT). By default all 256 vectors are supported in an IDT requiring 2048 bytes of memory. config SET_GDT bool "Setup GDT as part of boot process" default y help This option sets up the GDT as part of the boot process. However, this may conflict with some security scenarios where the GDT is already appropriately set by an earlier bootloader stage, in which case this should be disabled. If disabled, the global _gdt pointer will not be available. config GDT_DYNAMIC bool "Store GDT in RAM so that it can be modified" depends on SET_GDT help This option stores the GDT in RAM instead of ROM, so that it may be modified at runtime at the expense of some memory. config GDT_RESERVED_NUM_ENTRIES int "Number of reserved GDT entry place holders" depends on GDT_DYNAMIC default 0 help This option defines the number of GDT entry place holders revserved that can be filled at runtime. endmenu menu "Processor Capabilities" config X86_ENABLE_TSS bool help This hidden option enables defining a Task State Segment (TSS) for kernel execution. This is needed to handle double-faults or do privilege elevation. It also defines a special TSS and handler for correctly handling double-fault exceptions, instead of just letting the system triple-fault and reset. config X86_STACK_PROTECTION bool default y if HW_STACK_PROTECTION select THREAD_STACK_INFO select SET_GDT select GDT_DYNAMIC select X86_ENABLE_TSS imply THREAD_STACK_MEM_MAPPED if !DEMAND_PAGING help This option leverages the MMU to cause a system fatal error if the bounds of the current process stack are overflowed. This is done by preceding all stack areas with a 4K guard page. config X86_USERSPACE bool default y if USERSPACE select THREAD_STACK_INFO select SET_GDT select GDT_DYNAMIC select X86_ENABLE_TSS help This option enables APIs to drop a thread's privileges down to ring 3, supporting user-level threads that are protected from each other and from crashing the kernel. config X86_PAE bool "Use PAE page tables" default y depends on X86_MMU help If enabled, use PAE-style page tables instead of 32-bit page tables. The advantage is support for the Execute Disable bit, at a cost of more memory for paging structures. menu "Architecture Floating Point Options" if CPU_HAS_FPU config SSE bool "SSE registers" depends on FPU select X86_SSE help This option is deprecated. Please use CONFIG_X86_SSE instead. config SSE_FP_MATH bool "Compiler-generated SSEx instructions" depends on X86_SSE select X86_SSE_FP_MATH help This option is deprecated. Please use CONFIG_X86_SSE_FP_MATH instead. config EAGER_FPU_SHARING bool depends on FPU depends on USERSPACE default y if !X86_NO_LAZY_FP help This hidden option unconditionally saves/restores the FPU/SIMD register state on every context switch. Mitigates CVE-2018-3665, but incurs a performance hit. For vulnerable systems that process sensitive information in the FPU register set, should be used any time CONFIG_FPU is enabled, regardless if the FPU is used by one thread or multiple. config LAZY_FPU_SHARING bool depends on FPU depends on !EAGER_FPU_SHARING depends on FPU_SHARING default y if X86_NO_LAZY_FP || !USERSPACE help This hidden option allows multiple threads to use the floating point registers, using logic to lazily save/restore the floating point register state on context switch. On Intel Core processors, may be vulnerable to exploits which allows malware to read the contents of all floating point registers, see CVE-2018-3665. endif # CPU_HAS_FPU config X86_FP_USE_SOFT_FLOAT bool default y if !FPU help Enable using software floating point operations. endmenu config X86_DYNAMIC_IRQ_STUBS int "Number of dynamic interrupt stubs" depends on DYNAMIC_INTERRUPTS default 4 help Installing interrupt handlers with irq_connect_dynamic() requires some stub code to be generated at build time, one stub per dynamic interrupt. endmenu config X86_EXCEPTION_STACK_TRACE bool default y select DEBUG_INFO select THREAD_STACK_INFO depends on !OMIT_FRAME_POINTER help Internal config to enable runtime stack traces on fatal exceptions. config X86_USE_THREAD_LOCAL_STORAGE bool default y if THREAD_LOCAL_STORAGE select SET_GDT select GDT_DYNAMIC help Internal config to enable thread local storage. config X86_MFENCE_INSTRUCTION_SUPPORTED bool "X86 MFENCE instruction supported" default y depends on CACHE_MANAGEMENT help Set n to disable the use of MFENCE instruction in arch_dcache_flush() for X86 CPUs have CLFLUSH instruction but no MFENCE config X86_RUNTIME_IRQ_STATS bool help Add irq runtime statistics to allow runtime profiling irq performance data with Host tools, enable this and implement platform dependent API runtime_irq_stats(). endif # !X86_64 ```
/content/code_sandbox/arch/x86/core/Kconfig.ia32
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,296
```unknown /* */ #include <zephyr/arch/x86/multiboot.h> #include <zephyr/devicetree.h> /* * This is included by ia32/crt0.S and intel64/locore.S * at their 32-bit entry points to cover common ground. */ #ifdef CONFIG_MULTIBOOT_INFO /* * If we were loaded by a multiboot-compliant loader, then EAX * contains MULTIBOOT_EAX_MAGIC and EBX points to a valid 'struct * multiboot_info'; otherwise EBX is just junk. Check EAX early * before it's clobbered and leave a sentinel (0) in EBX if invalid. * The valid in EBX will be the argument to z_prep_c(), so the * subsequent code must, of course, be sure to preserve it meanwhile. */ cmpl $MULTIBOOT_EAX_MAGIC, %eax je 1f xorl %ebx, %ebx 1: #endif #ifdef CONFIG_PIC_DISABLE /* * "Disable" legacy i8259 interrupt controllers. Note that we * can't actually disable them, but we mask all their interrupt * sources which is effectively the same thing (almost). */ movb $0xff, %al outb %al, $0x21 outb %al, $0xA1 #endif #ifdef CONFIG_MULTIBOOT jmp 1f .align 4 .long MULTIBOOT_HEADER_MAGIC .long MULTIBOOT_HEADER_FLAGS .long -(MULTIBOOT_HEADER_MAGIC + MULTIBOOT_HEADER_FLAGS) #if DT_HAS_COMPAT_STATUS_OKAY(intel_multiboot_framebuffer) .fill 5,4,0 /* (unused exec layout) */ .long 0 /* linear graphics mode */ .long DT_PROP(DT_INST(0, intel_multiboot_framebuffer), width) /* width */ .long DT_PROP(DT_INST(0, intel_multiboot_framebuffer), height) /* height */ .long 32 /* depth */ #endif 1: #endif ```
/content/code_sandbox/arch/x86/core/common.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
454
```c /* * */ #include <zephyr/kernel.h> #define DATA_SIZE_K(n) (n * 1024u) #define RSDP_SIGNATURE ((uint64_t)0x2052545020445352) #define EBDA_ADD (0x040e) #define BIOS_RODATA_ADD (0xe0000) #define BIOS_EXT_DATA_LOW (0x80000UL) #define BIOS_EXT_DATA_HIGH (0x100000UL) static uintptr_t bios_search_rsdp_buff(uintptr_t search_phy_add, uint32_t search_length) { uint64_t *search_buff; k_mem_map_phys_bare((uint8_t **)&search_buff, search_phy_add, search_length, 0); if (!search_buff) { return 0; } for (int i = 0; i < search_length / 8u; i++) { if (search_buff[i] == RSDP_SIGNATURE) { k_mem_unmap_phys_bare((uint8_t *)search_buff, search_length); return (search_phy_add + (i * 8u)); } } k_mem_unmap_phys_bare((uint8_t *)search_buff, search_length); return 0; } void *bios_acpi_rsdp_get(void) { uint8_t *bios_ext_data, *zero_page_base; uintptr_t search_phy_add, rsdp_phy_add; k_mem_map_phys_bare(&zero_page_base, 0, DATA_SIZE_K(4u), 0); bios_ext_data = EBDA_ADD + zero_page_base; search_phy_add = (uintptr_t)((*(uint16_t *)bios_ext_data) << 4u); k_mem_unmap_phys_bare(zero_page_base, DATA_SIZE_K(4u)); if ((search_phy_add >= BIOS_EXT_DATA_LOW) && (search_phy_add < BIOS_EXT_DATA_HIGH)) { rsdp_phy_add = bios_search_rsdp_buff(search_phy_add, DATA_SIZE_K(1u)); if (rsdp_phy_add) { return (void *)rsdp_phy_add; } } return (void *)bios_search_rsdp_buff(BIOS_RODATA_ADD, DATA_SIZE_K(128u)); } ```
/content/code_sandbox/arch/x86/core/legacy_bios.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
462
```c /* * */ #include <zephyr/spinlock.h> #include <zephyr/arch/x86/efi.h> #include <zephyr/kernel/mm.h> #include "../zefi/efi.h" /* ZEFI not on include path */ #include <zephyr/kernel.h> #include <kernel_arch_func.h> #define EFI_CON_BUFSZ 128 /* Big stack for the EFI code to use */ static uint64_t __aligned(64) efi_stack[1024]; struct efi_boot_arg *efi; void *efi_get_acpi_rsdp(void) { if (efi == NULL) { return NULL; } return efi->acpi_rsdp; } void efi_init(struct efi_boot_arg *efi_arg) { if (efi_arg == NULL) { return; } k_mem_map_phys_bare((uint8_t **)&efi, (uintptr_t)efi_arg, sizeof(struct efi_boot_arg), 0); } /* EFI thunk. Not a lot of code, but lots of context: * * We need to swap in the original EFI page tables for this to work, * as Zephyr has only mapped memory it uses and IO it knows about. In * theory we might need to restore more state too (maybe the EFI code * uses special segment descriptors from its own GDT, maybe it relies * on interrupts in its own IDT, maybe it twiddles custom MSRs or * plays with the IO-MMU... the posibilities are endless). But * experimentally, only the memory state seems to be required on known * hardware. This is safe because in the existing architecture Zephyr * has already initialized all its own memory and left the rest of the * system as-is; we already know it doesn't overlap with the EFI * environment (because we've always just assumed that's the case, * heh). * * Similarly we need to swap the stack: EFI firmware was written in an * environment where it would be running on multi-gigabyte systems and * likes to overflow the tiny stacks Zephyr code uses. (There is also * the problem of the red zone -- SysV reserves 128 bytes of * unpreserved data "under" the stack pointer for the use of the * current function. Our compiler would be free to write things there * that might be clobbered by the EFI call, which doesn't understand * that rule. Inspection of generated code shows that we're safe, but * still, best to swap stacks explicitly.) * * And the calling conventions are different: the EFI function uses * Microsoft's ABI, not SysV. Parameters go in RCX/RDX/R8/R9 (though * we only pass two here), and return value is in RAX (which we * multiplex as an input to hold the function pointer). R10 and R11 * are also caller-save. Technically X/YMM0-5 are caller-save too, * but as long as this (SysV) function was called per its own ABI they * have already been saved by our own caller. Also note that there is * a 32 byte region ABOVE the return value that must be allocated by * the caller as spill space for the 4 register-passed arguments (this * ABI is so weird...). We also need two call-preserved scratch * registers (for preserving the stack pointer and page table), those * are R12/R13. * * Finally: note that the firmware on at least one board (an Up * Squared APL device) will internally ENABLE INTERRUPTS before * returing from its OutputString method. This is... unfortunate, and * says poor things about reliability using this code as it will * implicitly break the spinlock we're using. The OS will be able to * take an interrupt just fine, but if the resulting ISR tries to log, * we'll end up in EFI firmware reentrantly! The best we can do is an * unconditional CLI immediately after returning. */ static uint64_t efi_call(void *fn, uint64_t arg1, uint64_t arg2) { void *stack_top = &efi_stack[ARRAY_SIZE(efi_stack) - 4]; /* During the efi_call window the interrupt is enabled, that * means an interrupt could happen and trigger scheduler at * end of the interrupt. Try to prevent swap happening. */ k_sched_lock(); __asm__ volatile("movq %%cr3, %%r12;" /* save zephyr page table */ "movq %%rsp, %%r13;" /* save stack pointer */ "movq %%rsi, %%rsp;" /* set stack */ "movq %%rdi, %%cr3;" /* set EFI page table */ "callq *%%rax;" "cli;" "movq %%r12, %%cr3;" /* reset paging */ "movq %%r13, %%rsp;" /* reset stack */ : "+a"(fn) : "c"(arg1), "d"(arg2), "S"(stack_top), "D"(efi->efi_cr3) : "r8", "r9", "r10", "r11", "r12", "r13"); k_sched_unlock(); return (uint64_t) fn; } int efi_console_putchar(int c) { static struct k_spinlock lock; static uint16_t efibuf[EFI_CON_BUFSZ + 1]; static int n; static void *conout; static void *output_string_fn; struct efi_system_table *efist = efi->efi_systab; /* Limit the printk call in interrupt context for * EFI cosnsole. This is a workaround that prevents * the printk call re-entries when an interrupt * happens during the EFI call window. */ if (arch_is_in_isr()) { return 0; } if (c == '\n') { efi_console_putchar('\r'); } k_spinlock_key_t key = k_spin_lock(&lock); /* These structs live in EFI memory and aren't mapped by * Zephyr. Extract the needed pointers by swapping page * tables. Do it via lazy evaluation because this code is * routinely needed much earlier than any feasible init hook. */ if (conout == NULL) { uint64_t cr3; __asm__ volatile("movq %%cr3, %0" : "=r"(cr3)); __asm__ volatile("movq %0, %%cr3" :: "r"(efi->efi_cr3)); conout = efist->ConOut; output_string_fn = efist->ConOut->OutputString; __asm__ volatile("movq %0, %%cr3" :: "r"(cr3)); } /* Buffer, to reduce trips through the thunking layer. * Flushes when full and at newlines. */ efibuf[n++] = c; if (c == '\n' || n == EFI_CON_BUFSZ) { efibuf[n] = 0U; (void)efi_call(output_string_fn, (uint64_t)conout, (uint64_t)efibuf); n = 0; } k_spin_unlock(&lock, key); return 0; } #ifdef CONFIG_X86_EFI_CONSOLE int arch_printk_char_out(int c) { return efi_console_putchar(c); } #endif ```
/content/code_sandbox/arch/x86/core/efi.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,639
```cmake if (CMAKE_C_COMPILER_ID STREQUAL "Clang" OR CMAKE_C_COMPILER_ID STREQUAL "IntelLLVM") # We rely on GAS for assembling, so don't use the integrated assembler zephyr_compile_options($<$<COMPILE_LANGUAGE:ASM>:-no-integrated-as>) elseif(CMAKE_C_COMPILER_ID STREQUAL "GNU") zephyr_compile_options($<$<COMPILE_LANGUAGE:ASM>:-Wa,--divide>) endif() zephyr_library_sources( ia32/crt0.S ia32/excstub.S ia32/intstub.S ia32/irq_manage.c ia32/swap.S ia32/thread.c ) zephyr_library_sources_ifdef(CONFIG_IRQ_OFFLOAD ia32/irq_offload.c) zephyr_library_sources_ifdef(CONFIG_X86_USERSPACE ia32/userspace.S) zephyr_library_sources_ifdef(CONFIG_LAZY_FPU_SHARING ia32/float.c) zephyr_library_sources_ifdef(CONFIG_GDBSTUB ia32/gdbstub.c) zephyr_library_sources_ifdef(CONFIG_DEBUG_COREDUMP ia32/coredump.c) zephyr_library_sources_ifdef( CONFIG_X86_USE_THREAD_LOCAL_STORAGE ia32/tls.c ) # Last since we declare default exception handlers here zephyr_library_sources(ia32/fatal.c) zephyr_library_sources_ifdef( CONFIG_X86_FP_USE_SOFT_FLOAT ia32/soft_float_stubs.c ) ```
/content/code_sandbox/arch/x86/core/ia32.cmake
cmake
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
332
```c /* */ #include <zephyr/kernel.h> #include <ksched.h> #include <zephyr/kernel_structs.h> #include <kernel_internal.h> #include <zephyr/arch/common/exc_handle.h> #include <zephyr/logging/log.h> #include <x86_mmu.h> #include <mmu.h> LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); #if defined(CONFIG_BOARD_QEMU_X86) || defined(CONFIG_BOARD_QEMU_X86_64) FUNC_NORETURN void arch_system_halt(unsigned int reason) { ARG_UNUSED(reason); /* Causes QEMU to exit. We passed the following on the command line: * -device isa-debug-exit,iobase=0xf4,iosize=0x04 * * For any value of the first argument X, the return value of the * QEMU process is (X * 2) + 1. * * It has been observed that if the emulator exits for a triple-fault * (often due to bad page tables or other CPU structures) it will * terminate with 0 error code. */ sys_out32(reason, 0xf4); CODE_UNREACHABLE; } #endif #ifdef CONFIG_THREAD_STACK_INFO static inline uintptr_t esf_get_sp(const struct arch_esf *esf) { #ifdef CONFIG_X86_64 return esf->rsp; #else return esf->esp; #endif } __pinned_func bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, uint16_t cs) { uintptr_t start, end; if (_current == NULL || arch_is_in_isr()) { /* We were servicing an interrupt or in early boot environment * and are supposed to be on the interrupt stack */ int cpu_id; #ifdef CONFIG_SMP cpu_id = arch_curr_cpu()->id; #else cpu_id = 0; #endif start = (uintptr_t)K_KERNEL_STACK_BUFFER( z_interrupt_stacks[cpu_id]); end = start + CONFIG_ISR_STACK_SIZE; #ifdef CONFIG_USERSPACE } else if ((cs & 0x3U) == 0U && (_current->base.user_options & K_USER) != 0) { /* The low two bits of the CS register is the privilege * level. It will be 0 in supervisor mode and 3 in user mode * corresponding to ring 0 / ring 3. * * If we get here, we must have been doing a syscall, check * privilege elevation stack bounds */ start = _current->stack_info.start - CONFIG_PRIVILEGED_STACK_SIZE; end = _current->stack_info.start; #endif /* CONFIG_USERSPACE */ } else { /* Normal thread operation, check its stack buffer */ start = _current->stack_info.start; end = Z_STACK_PTR_ALIGN(_current->stack_info.start + _current->stack_info.size); } return (addr <= start) || (addr + size > end); } #endif #ifdef CONFIG_THREAD_STACK_MEM_MAPPED /** * Check if the fault is in the guard pages. * * @param addr Address to be tested. * * @return True Address is in guard pages, false otherwise. */ __pinned_func bool z_x86_check_guard_page(uintptr_t addr) { struct k_thread *thread = _current; uintptr_t start, end; /* Front guard size - before thread stack area */ start = (uintptr_t)thread->stack_info.mapped.addr - CONFIG_MMU_PAGE_SIZE; end = (uintptr_t)thread->stack_info.mapped.addr; if ((addr >= start) && (addr < end)) { return true; } /* Rear guard size - after thread stack area */ start = (uintptr_t)thread->stack_info.mapped.addr + thread->stack_info.mapped.sz; end = start + CONFIG_MMU_PAGE_SIZE; if ((addr >= start) && (addr < end)) { return true; } return false; } #endif /* CONFIG_THREAD_STACK_MEM_MAPPED */ #ifdef CONFIG_EXCEPTION_DEBUG static inline uintptr_t esf_get_code(const struct arch_esf *esf) { #ifdef CONFIG_X86_64 return esf->code; #else return esf->errorCode; #endif } #if defined(CONFIG_EXCEPTION_STACK_TRACE) struct stack_frame { uintptr_t next; uintptr_t ret_addr; #ifndef CONFIG_X86_64 uintptr_t args; #endif }; #define MAX_STACK_FRAMES CONFIG_EXCEPTION_STACK_TRACE_MAX_FRAMES __pinned_func static void unwind_stack(uintptr_t base_ptr, uint16_t cs) { struct stack_frame *frame; int i; if (base_ptr == 0U) { LOG_ERR("NULL base ptr"); return; } for (i = 0; i < MAX_STACK_FRAMES; i++) { if (base_ptr % sizeof(base_ptr) != 0U) { LOG_ERR("unaligned frame ptr"); return; } frame = (struct stack_frame *)base_ptr; if (frame == NULL) { break; } #ifdef CONFIG_THREAD_STACK_INFO /* Ensure the stack frame is within the faulting context's * stack buffer */ if (z_x86_check_stack_bounds((uintptr_t)frame, sizeof(*frame), cs)) { LOG_ERR(" corrupted? (bp=%p)", frame); break; } #endif if (frame->ret_addr == 0U) { break; } #ifdef CONFIG_X86_64 LOG_ERR(" 0x%016lx", frame->ret_addr); #else LOG_ERR(" 0x%08lx (0x%lx)", frame->ret_addr, frame->args); #endif base_ptr = frame->next; } } #endif /* CONFIG_EXCEPTION_STACK_TRACE */ static inline uintptr_t get_cr3(const struct arch_esf *esf) { #if defined(CONFIG_USERSPACE) && defined(CONFIG_X86_KPTI) /* If the interrupted thread was in user mode, we did a page table * switch when we took the exception via z_x86_trampoline_to_kernel */ if ((esf->cs & 0x3) != 0) { return _current->arch.ptables; } #else ARG_UNUSED(esf); #endif /* Return the current CR3 value, it didn't change when we took * the exception */ return z_x86_cr3_get(); } static inline pentry_t *get_ptables(const struct arch_esf *esf) { return k_mem_virt_addr(get_cr3(esf)); } #ifdef CONFIG_X86_64 __pinned_func static void dump_regs(const struct arch_esf *esf) { LOG_ERR("RAX: 0x%016lx RBX: 0x%016lx RCX: 0x%016lx RDX: 0x%016lx", esf->rax, esf->rbx, esf->rcx, esf->rdx); LOG_ERR("RSI: 0x%016lx RDI: 0x%016lx RBP: 0x%016lx RSP: 0x%016lx", esf->rsi, esf->rdi, esf->rbp, esf->rsp); LOG_ERR(" R8: 0x%016lx R9: 0x%016lx R10: 0x%016lx R11: 0x%016lx", esf->r8, esf->r9, esf->r10, esf->r11); LOG_ERR("R12: 0x%016lx R13: 0x%016lx R14: 0x%016lx R15: 0x%016lx", esf->r12, esf->r13, esf->r14, esf->r15); LOG_ERR("RSP: 0x%016lx RFLAGS: 0x%016lx CS: 0x%04lx CR3: 0x%016lx", esf->rsp, esf->rflags, esf->cs & 0xFFFFU, get_cr3(esf)); #ifdef CONFIG_EXCEPTION_STACK_TRACE LOG_ERR("call trace:"); #endif LOG_ERR("RIP: 0x%016lx", esf->rip); #ifdef CONFIG_EXCEPTION_STACK_TRACE unwind_stack(esf->rbp, esf->cs); #endif } #else /* 32-bit */ __pinned_func static void dump_regs(const struct arch_esf *esf) { LOG_ERR("EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x", esf->eax, esf->ebx, esf->ecx, esf->edx); LOG_ERR("ESI: 0x%08x, EDI: 0x%08x, EBP: 0x%08x, ESP: 0x%08x", esf->esi, esf->edi, esf->ebp, esf->esp); LOG_ERR("EFLAGS: 0x%08x CS: 0x%04x CR3: 0x%08lx", esf->eflags, esf->cs & 0xFFFFU, get_cr3(esf)); #ifdef CONFIG_EXCEPTION_STACK_TRACE LOG_ERR("call trace:"); #endif LOG_ERR("EIP: 0x%08x", esf->eip); #ifdef CONFIG_EXCEPTION_STACK_TRACE unwind_stack(esf->ebp, esf->cs); #endif } #endif /* CONFIG_X86_64 */ __pinned_func static void log_exception(uintptr_t vector, uintptr_t code) { switch (vector) { case IV_DIVIDE_ERROR: LOG_ERR("Divide by zero"); break; case IV_DEBUG: LOG_ERR("Debug"); break; case IV_NON_MASKABLE_INTERRUPT: LOG_ERR("Non-maskable interrupt"); break; case IV_BREAKPOINT: LOG_ERR("Breakpoint"); break; case IV_OVERFLOW: LOG_ERR("Overflow"); break; case IV_BOUND_RANGE: LOG_ERR("Bound range exceeded"); break; case IV_INVALID_OPCODE: LOG_ERR("Invalid opcode"); break; case IV_DEVICE_NOT_AVAILABLE: LOG_ERR("Floating point unit device not available"); break; case IV_DOUBLE_FAULT: LOG_ERR("Double fault (code 0x%lx)", code); break; case IV_COPROC_SEGMENT_OVERRUN: LOG_ERR("Co-processor segment overrun"); break; case IV_INVALID_TSS: LOG_ERR("Invalid TSS (code 0x%lx)", code); break; case IV_SEGMENT_NOT_PRESENT: LOG_ERR("Segment not present (code 0x%lx)", code); break; case IV_STACK_FAULT: LOG_ERR("Stack segment fault"); break; case IV_GENERAL_PROTECTION: LOG_ERR("General protection fault (code 0x%lx)", code); break; /* IV_PAGE_FAULT skipped, we have a dedicated handler */ case IV_X87_FPU_FP_ERROR: LOG_ERR("x87 floating point exception"); break; case IV_ALIGNMENT_CHECK: LOG_ERR("Alignment check (code 0x%lx)", code); break; case IV_MACHINE_CHECK: LOG_ERR("Machine check"); break; case IV_SIMD_FP: LOG_ERR("SIMD floating point exception"); break; case IV_VIRT_EXCEPTION: LOG_ERR("Virtualization exception"); break; case IV_SECURITY_EXCEPTION: LOG_ERR("Security exception"); break; default: LOG_ERR("Exception not handled (code 0x%lx)", code); break; } } __pinned_func static void dump_page_fault(struct arch_esf *esf) { uintptr_t err; void *cr2; cr2 = z_x86_cr2_get(); err = esf_get_code(esf); LOG_ERR("Page fault at address %p (error code 0x%lx)", cr2, err); if ((err & PF_RSVD) != 0) { LOG_ERR("Reserved bits set in page tables"); } else { if ((err & PF_P) == 0) { LOG_ERR("Linear address not present in page tables"); } LOG_ERR("Access violation: %s thread not allowed to %s", (err & PF_US) != 0U ? "user" : "supervisor", (err & PF_ID) != 0U ? "execute" : ((err & PF_WR) != 0U ? "write" : "read")); if ((err & PF_PK) != 0) { LOG_ERR("Protection key disallowed"); } else if ((err & PF_SGX) != 0) { LOG_ERR("SGX access control violation"); } } #ifdef CONFIG_X86_MMU z_x86_dump_mmu_flags(get_ptables(esf), cr2); #endif /* CONFIG_X86_MMU */ } #endif /* CONFIG_EXCEPTION_DEBUG */ __pinned_func FUNC_NORETURN void z_x86_fatal_error(unsigned int reason, const struct arch_esf *esf) { if (esf != NULL) { #ifdef CONFIG_EXCEPTION_DEBUG dump_regs(esf); #endif #if defined(CONFIG_ASSERT) && defined(CONFIG_X86_64) if (esf->rip == 0xb9) { /* See implementation of __resume in locore.S. This is * never a valid RIP value. Treat this as a kernel * panic. */ LOG_ERR("Attempt to resume un-suspended thread object"); reason = K_ERR_KERNEL_PANIC; } #endif } z_fatal_error(reason, esf); CODE_UNREACHABLE; } __pinned_func FUNC_NORETURN void z_x86_unhandled_cpu_exception(uintptr_t vector, const struct arch_esf *esf) { #ifdef CONFIG_EXCEPTION_DEBUG log_exception(vector, esf_get_code(esf)); #else ARG_UNUSED(vector); #endif z_x86_fatal_error(K_ERR_CPU_EXCEPTION, esf); } #ifdef CONFIG_USERSPACE Z_EXC_DECLARE(z_x86_user_string_nlen); static const struct z_exc_handle exceptions[] = { Z_EXC_HANDLE(z_x86_user_string_nlen) }; #endif __pinned_func void z_x86_page_fault_handler(struct arch_esf *esf) { #ifdef CONFIG_DEMAND_PAGING if ((esf->errorCode & PF_P) == 0) { /* Page was non-present at time exception happened. * Get faulting virtual address from CR2 register */ void *virt = z_x86_cr2_get(); bool was_valid_access; #ifdef CONFIG_X86_KPTI /* Protection ring is lowest 2 bits in interrupted CS */ bool was_user = ((esf->cs & 0x3) != 0U); /* Need to check if the interrupted context was a user thread * that hit a non-present page that was flipped due to KPTI in * the thread's page tables, in which case this is an access * violation and we should treat this as an error. * * We're probably not locked, but if there is a race, we will * be fine, the kernel page fault code will later detect that * the page is present in the kernel's page tables and the * instruction will just be re-tried, producing another fault. */ if (was_user && !z_x86_kpti_is_access_ok(virt, get_ptables(esf))) { was_valid_access = false; } else #else { was_valid_access = k_mem_page_fault(virt); } #endif /* CONFIG_X86_KPTI */ if (was_valid_access) { /* Page fault handled, re-try */ return; } } #endif /* CONFIG_DEMAND_PAGING */ #if !defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_COREDUMP) z_x86_exception_vector = IV_PAGE_FAULT; #endif #ifdef CONFIG_USERSPACE int i; for (i = 0; i < ARRAY_SIZE(exceptions); i++) { #ifdef CONFIG_X86_64 if ((void *)esf->rip >= exceptions[i].start && (void *)esf->rip < exceptions[i].end) { esf->rip = (uint64_t)(exceptions[i].fixup); return; } #else if ((void *)esf->eip >= exceptions[i].start && (void *)esf->eip < exceptions[i].end) { esf->eip = (unsigned int)(exceptions[i].fixup); return; } #endif /* CONFIG_X86_64 */ } #endif #ifdef CONFIG_EXCEPTION_DEBUG dump_page_fault(esf); #endif #ifdef CONFIG_THREAD_STACK_INFO if (z_x86_check_stack_bounds(esf_get_sp(esf), 0, esf->cs)) { z_x86_fatal_error(K_ERR_STACK_CHK_FAIL, esf); } #endif #ifdef CONFIG_THREAD_STACK_MEM_MAPPED void *fault_addr = z_x86_cr2_get(); if (z_x86_check_guard_page((uintptr_t)fault_addr)) { z_x86_fatal_error(K_ERR_STACK_CHK_FAIL, esf); } #endif z_x86_fatal_error(K_ERR_CPU_EXCEPTION, esf); CODE_UNREACHABLE; } __pinned_func void z_x86_do_kernel_oops(const struct arch_esf *esf) { uintptr_t reason; #ifdef CONFIG_X86_64 reason = esf->rax; #else uintptr_t *stack_ptr = (uintptr_t *)esf->esp; reason = *stack_ptr; #endif #ifdef CONFIG_USERSPACE /* User mode is only allowed to induce oopses and stack check * failures via this software interrupt */ if ((esf->cs & 0x3) != 0 && !(reason == K_ERR_KERNEL_OOPS || reason == K_ERR_STACK_CHK_FAIL)) { reason = K_ERR_KERNEL_OOPS; } #endif z_x86_fatal_error(reason, esf); } ```
/content/code_sandbox/arch/x86/core/fatal.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,959
```c /* * */ /** * @file * @brief Kernel structure member offset definition file * * This module is responsible for the generation of the absolute symbols whose * value represents the member offsets for various IA-32 structures. * * All of the absolute symbols defined by this module will be present in the * final kernel ELF image (due to the linker's reference to the _OffsetAbsSyms * symbol). * * INTERNAL * It is NOT necessary to define the offset for every member of a structure. * Typically, only those members that are accessed by assembly language routines * are defined; however, it doesn't hurt to define all fields for the sake of * completeness. */ /* list of headers that define whose structure offsets will be generated */ #ifndef _X86_OFFSETS_INC_ #define _X86_OFFSETS_INC_ #include <zephyr/arch/x86/mmustructs.h> #if defined(CONFIG_LAZY_FPU_SHARING) GEN_OFFSET_SYM(_thread_arch_t, excNestCount); #endif #ifdef CONFIG_USERSPACE GEN_OFFSET_SYM(_thread_arch_t, psp); #ifndef CONFIG_X86_COMMON_PAGE_TABLE GEN_OFFSET_SYM(_thread_arch_t, ptables); #endif #endif GEN_OFFSET_SYM(_thread_arch_t, preempFloatReg); /** * size of the struct k_thread structure sans save area for floating * point regs */ GEN_ABSOLUTE_SYM(_K_THREAD_NO_FLOAT_SIZEOF, sizeof(struct k_thread) - sizeof(tPreempFloatReg)); GEN_OFFSET_SYM(_callee_saved_t, esp); /* struct arch_esf structure member offsets */ GEN_OFFSET_STRUCT(arch_esf, eflags); #endif /* _X86_OFFSETS_INC_ */ ```
/content/code_sandbox/arch/x86/core/offsets/ia32_offsets.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
350
```c /* */ #ifndef _X86_OFFSETS_INC_ #define _X86_OFFSETS_INC_ GEN_OFFSET_SYM(_callee_saved_t, rsp); GEN_OFFSET_SYM(_callee_saved_t, rbp); GEN_OFFSET_SYM(_callee_saved_t, rbx); GEN_OFFSET_SYM(_callee_saved_t, r12); GEN_OFFSET_SYM(_callee_saved_t, r13); GEN_OFFSET_SYM(_callee_saved_t, r14); GEN_OFFSET_SYM(_callee_saved_t, r15); GEN_OFFSET_SYM(_callee_saved_t, rip); GEN_OFFSET_SYM(_callee_saved_t, rflags); GEN_OFFSET_SYM(_thread_arch_t, rax); GEN_OFFSET_SYM(_thread_arch_t, rcx); GEN_OFFSET_SYM(_thread_arch_t, rdx); GEN_OFFSET_SYM(_thread_arch_t, rsi); GEN_OFFSET_SYM(_thread_arch_t, rdi); GEN_OFFSET_SYM(_thread_arch_t, r8); GEN_OFFSET_SYM(_thread_arch_t, r9); GEN_OFFSET_SYM(_thread_arch_t, r10); GEN_OFFSET_SYM(_thread_arch_t, r11); GEN_OFFSET_SYM(_thread_arch_t, sse); #ifdef CONFIG_USERSPACE GEN_OFFSET_SYM(_thread_arch_t, ss); GEN_OFFSET_SYM(_thread_arch_t, cs); GEN_OFFSET_SYM(_thread_arch_t, psp); #ifndef CONFIG_X86_COMMON_PAGE_TABLE GEN_OFFSET_SYM(_thread_arch_t, ptables); #endif #endif /* CONFIG_USERSPACE */ GEN_OFFSET_SYM(x86_tss64_t, ist1); GEN_OFFSET_SYM(x86_tss64_t, ist2); GEN_OFFSET_SYM(x86_tss64_t, ist6); GEN_OFFSET_SYM(x86_tss64_t, ist7); GEN_OFFSET_SYM(x86_tss64_t, cpu); #ifdef CONFIG_USERSPACE GEN_OFFSET_SYM(x86_tss64_t, psp); GEN_OFFSET_SYM(x86_tss64_t, usp); #endif /* CONFIG_USERSPACE */ GEN_ABSOLUTE_SYM(__X86_TSS64_SIZEOF, sizeof(x86_tss64_t)); GEN_OFFSET_SYM(x86_cpuboot_t, tr); GEN_OFFSET_SYM(x86_cpuboot_t, gs_base); GEN_OFFSET_SYM(x86_cpuboot_t, sp); GEN_OFFSET_SYM(x86_cpuboot_t, stack_size); GEN_ABSOLUTE_SYM(__X86_CPUBOOT_SIZEOF, sizeof(x86_cpuboot_t)); #endif /* _X86_OFFSETS_INC_ */ ```
/content/code_sandbox/arch/x86/core/offsets/intel64_offsets.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
492
```c /* */ #include <zephyr/kernel.h> #include <kernel_arch_data.h> #include <gen_offset.h> #include <kernel_offsets.h> #ifdef CONFIG_X86_64 #include "intel64_offsets.c" #else #include "ia32_offsets.c" #endif GEN_OFFSET_SYM(x86_boot_arg_t, boot_type); GEN_OFFSET_SYM(x86_boot_arg_t, arg); GEN_OFFSET_SYM(_thread_arch_t, flags); GEN_ABS_SYM_END ```
/content/code_sandbox/arch/x86/core/offsets/offsets.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
93
```c /* * */ #include <zephyr/kernel.h> #include <zephyr/arch/x86/mmustructs.h> #include <zephyr/kernel/mm.h> #include <zephyr/sys/__assert.h> #include <zephyr/sys/check.h> #include <zephyr/logging/log.h> #include <errno.h> #include <ctype.h> #include <zephyr/spinlock.h> #include <kernel_arch_func.h> #include <x86_mmu.h> #include <zephyr/init.h> #include <kernel_internal.h> #include <mmu.h> #include <zephyr/drivers/interrupt_controller/loapic.h> #include <mmu.h> #include <zephyr/arch/x86/memmap.h> LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); /* We will use some ignored bits in the PTE to backup permission settings * when the mapping was made. This is used to un-apply memory domain memory * partitions to page tables when the partitions are removed. */ #define MMU_RW_ORIG MMU_IGNORED0 #define MMU_US_ORIG MMU_IGNORED1 #define MMU_XD_ORIG MMU_IGNORED2 /* Bits in the PTE that form the set of permission bits, when resetting */ #define MASK_PERM (MMU_RW | MMU_US | MMU_XD) /* When we want to set up a new mapping, discarding any previous state */ #define MASK_ALL (~((pentry_t)0U)) /* Bits to set at mapping time for particular permissions. We set the actual * page table bit effecting the policy and also the backup bit. */ #define ENTRY_RW (MMU_RW | MMU_RW_ORIG) #define ENTRY_US (MMU_US | MMU_US_ORIG) #define ENTRY_XD (MMU_XD | MMU_XD_ORIG) /* Bit position which is always zero in a PTE. We'll use the PAT bit. * This helps disambiguate PTEs that do not have the Present bit set (MMU_P): * - If the entire entry is zero, it's an un-mapped virtual page * - If PTE_ZERO is set, we flipped this page due to KPTI * - Otherwise, this was a page-out */ #define PTE_ZERO MMU_PAT /* Protects x86_domain_list and serializes instantiation of intermediate * paging structures. */ __pinned_bss static struct k_spinlock x86_mmu_lock; #if defined(CONFIG_USERSPACE) && !defined(CONFIG_X86_COMMON_PAGE_TABLE) /* List of all active and initialized memory domains. This is used to make * sure all memory mappings are the same across all page tables when invoking * range_map() */ __pinned_bss static sys_slist_t x86_domain_list; #endif /* * Definitions for building an ontology of paging levels and capabilities * at each level */ /* Data structure describing the characteristics of a particular paging * level */ struct paging_level { /* What bits are used to store physical address */ pentry_t mask; /* Number of entries in this paging structure */ size_t entries; /* How many bits to right-shift a virtual address to obtain the * appropriate entry within this table. * * The memory scope of each entry in this table is 1 << shift. */ unsigned int shift; #ifdef CONFIG_EXCEPTION_DEBUG /* Name of this level, for debug purposes */ const char *name; #endif }; /* Flags for all entries in intermediate paging levels. * Fortunately, the same bits are set for all intermediate levels for all * three paging modes. * * Obviously P is set. * * We want RW and US bit always set; actual access control will be * done at the leaf level. * * XD (if supported) always 0. Disabling execution done at leaf level. * * PCD/PWT always 0. Caching properties again done at leaf level. */ #define INT_FLAGS (MMU_P | MMU_RW | MMU_US) /* Paging level ontology for the selected paging mode. * * See Figures 4-4, 4-7, 4-11 in the Intel SDM, vol 3A */ __pinned_rodata static const struct paging_level paging_levels[] = { #ifdef CONFIG_X86_64 /* Page Map Level 4 */ { .mask = 0x7FFFFFFFFFFFF000ULL, .entries = 512U, .shift = 39U, #ifdef CONFIG_EXCEPTION_DEBUG .name = "PML4" #endif }, #endif /* CONFIG_X86_64 */ #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) /* Page Directory Pointer Table */ { .mask = 0x7FFFFFFFFFFFF000ULL, #ifdef CONFIG_X86_64 .entries = 512U, #else /* PAE version */ .entries = 4U, #endif .shift = 30U, #ifdef CONFIG_EXCEPTION_DEBUG .name = "PDPT" #endif }, #endif /* CONFIG_X86_64 || CONFIG_X86_PAE */ /* Page Directory */ { #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) .mask = 0x7FFFFFFFFFFFF000ULL, .entries = 512U, .shift = 21U, #else /* 32-bit */ .mask = 0xFFFFF000U, .entries = 1024U, .shift = 22U, #endif /* CONFIG_X86_64 || CONFIG_X86_PAE */ #ifdef CONFIG_EXCEPTION_DEBUG .name = "PD" #endif }, /* Page Table */ { #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) .mask = 0x07FFFFFFFFFFF000ULL, .entries = 512U, .shift = 12U, #else /* 32-bit */ .mask = 0xFFFFF000U, .entries = 1024U, .shift = 12U, #endif /* CONFIG_X86_64 || CONFIG_X86_PAE */ #ifdef CONFIG_EXCEPTION_DEBUG .name = "PT" #endif } }; #define NUM_LEVELS ARRAY_SIZE(paging_levels) #define PTE_LEVEL (NUM_LEVELS - 1) #define PDE_LEVEL (NUM_LEVELS - 2) /* * Macros for reserving space for page tables * * We need to reserve a block of memory equal in size to the page tables * generated by gen_mmu.py so that memory addresses do not shift between * build phases. These macros ultimately specify INITIAL_PAGETABLE_SIZE. */ #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) #ifdef CONFIG_X86_64 #define NUM_PML4_ENTRIES 512U #define NUM_PDPT_ENTRIES 512U #else #define NUM_PDPT_ENTRIES 4U #endif /* CONFIG_X86_64 */ #define NUM_PD_ENTRIES 512U #define NUM_PT_ENTRIES 512U #else #define NUM_PD_ENTRIES 1024U #define NUM_PT_ENTRIES 1024U #endif /* !CONFIG_X86_64 && !CONFIG_X86_PAE */ /* Memory range covered by an instance of various table types */ #define PT_AREA ((uintptr_t)(CONFIG_MMU_PAGE_SIZE * NUM_PT_ENTRIES)) #define PD_AREA (PT_AREA * NUM_PD_ENTRIES) #ifdef CONFIG_X86_64 #define PDPT_AREA (PD_AREA * NUM_PDPT_ENTRIES) #endif #define VM_ADDR CONFIG_KERNEL_VM_BASE #define VM_SIZE CONFIG_KERNEL_VM_SIZE /* Define a range [PT_START, PT_END) which is the memory range * covered by all the page tables needed for the address space */ #define PT_START ((uintptr_t)ROUND_DOWN(VM_ADDR, PT_AREA)) #define PT_END ((uintptr_t)ROUND_UP(VM_ADDR + VM_SIZE, PT_AREA)) /* Number of page tables needed to cover address space. Depends on the specific * bounds, but roughly 1 page table per 2MB of RAM */ #define NUM_PT ((PT_END - PT_START) / PT_AREA) #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) /* Same semantics as above, but for the page directories needed to cover * system RAM. */ #define PD_START ((uintptr_t)ROUND_DOWN(VM_ADDR, PD_AREA)) #define PD_END ((uintptr_t)ROUND_UP(VM_ADDR + VM_SIZE, PD_AREA)) /* Number of page directories needed to cover the address space. Depends on the * specific bounds, but roughly 1 page directory per 1GB of RAM */ #define NUM_PD ((PD_END - PD_START) / PD_AREA) #else /* 32-bit page tables just have one toplevel page directory */ #define NUM_PD 1 #endif #ifdef CONFIG_X86_64 /* Same semantics as above, but for the page directory pointer tables needed * to cover the address space. On 32-bit there is just one 4-entry PDPT. */ #define PDPT_START ((uintptr_t)ROUND_DOWN(VM_ADDR, PDPT_AREA)) #define PDPT_END ((uintptr_t)ROUND_UP(VM_ADDR + VM_SIZE, PDPT_AREA)) /* Number of PDPTs needed to cover the address space. 1 PDPT per 512GB of VM */ #define NUM_PDPT ((PDPT_END - PDPT_START) / PDPT_AREA) /* All pages needed for page tables, using computed values plus one more for * the top-level PML4 */ #define NUM_TABLE_PAGES (NUM_PT + NUM_PD + NUM_PDPT + 1) #else /* !CONFIG_X86_64 */ /* Number of pages we need to reserve in the stack for per-thread page tables */ #define NUM_TABLE_PAGES (NUM_PT + NUM_PD) #endif /* CONFIG_X86_64 */ #define INITIAL_PTABLE_PAGES \ (NUM_TABLE_PAGES + CONFIG_X86_EXTRA_PAGE_TABLE_PAGES) #ifdef CONFIG_X86_PAE /* Toplevel PDPT wasn't included as it is not a page in size */ #define INITIAL_PTABLE_SIZE \ ((INITIAL_PTABLE_PAGES * CONFIG_MMU_PAGE_SIZE) + 0x20) #else #define INITIAL_PTABLE_SIZE \ (INITIAL_PTABLE_PAGES * CONFIG_MMU_PAGE_SIZE) #endif /* "dummy" pagetables for the first-phase build. The real page tables * are produced by gen-mmu.py based on data read in zephyr-prebuilt.elf, * and this dummy array is discarded. */ Z_GENERIC_SECTION(.dummy_pagetables) static __used char dummy_pagetables[INITIAL_PTABLE_SIZE]; /* * Utility functions */ /* For a table at a particular level, get the entry index that corresponds to * the provided virtual address */ __pinned_func static inline int get_index(void *virt, int level) { return (((uintptr_t)virt >> paging_levels[level].shift) % paging_levels[level].entries); } __pinned_func static inline pentry_t *get_entry_ptr(pentry_t *ptables, void *virt, int level) { return &ptables[get_index(virt, level)]; } __pinned_func static inline pentry_t get_entry(pentry_t *ptables, void *virt, int level) { return ptables[get_index(virt, level)]; } /* Get the physical memory address associated with this table entry */ __pinned_func static inline uintptr_t get_entry_phys(pentry_t entry, int level) { return entry & paging_levels[level].mask; } /* Return the virtual address of a linked table stored in the provided entry */ __pinned_func static inline pentry_t *next_table(pentry_t entry, int level) { return k_mem_virt_addr(get_entry_phys(entry, level)); } /* Number of table entries at this level */ __pinned_func static inline size_t get_num_entries(int level) { return paging_levels[level].entries; } /* 4K for everything except PAE PDPTs */ __pinned_func static inline size_t table_size(int level) { return get_num_entries(level) * sizeof(pentry_t); } /* For a table at a particular level, size of the amount of virtual memory * that an entry within the table covers */ __pinned_func static inline size_t get_entry_scope(int level) { return (1UL << paging_levels[level].shift); } /* For a table at a particular level, size of the amount of virtual memory * that this entire table covers */ __pinned_func static inline size_t get_table_scope(int level) { return get_entry_scope(level) * get_num_entries(level); } /* Must have checked Present bit first! Non-present entries may have OS data * stored in any other bits */ __pinned_func static inline bool is_leaf(int level, pentry_t entry) { if (level == PTE_LEVEL) { /* Always true for PTE */ return true; } return ((entry & MMU_PS) != 0U); } /* This does NOT (by design) un-flip KPTI PTEs, it's just the raw PTE value */ __pinned_func static inline void pentry_get(int *paging_level, pentry_t *val, pentry_t *ptables, void *virt) { pentry_t *table = ptables; for (int level = 0; level < NUM_LEVELS; level++) { pentry_t entry = get_entry(table, virt, level); if ((entry & MMU_P) == 0 || is_leaf(level, entry)) { *val = entry; if (paging_level != NULL) { *paging_level = level; } break; } else { table = next_table(entry, level); } } } __pinned_func static inline void tlb_flush_page(void *addr) { /* Invalidate TLB entries corresponding to the page containing the * specified address */ char *page = (char *)addr; __asm__ ("invlpg %0" :: "m" (*page)); } #ifdef CONFIG_X86_KPTI __pinned_func static inline bool is_flipped_pte(pentry_t pte) { return (pte & MMU_P) == 0 && (pte & PTE_ZERO) != 0; } #endif #if defined(CONFIG_SMP) __pinned_func void z_x86_tlb_ipi(const void *arg) { uintptr_t ptables_phys; ARG_UNUSED(arg); #ifdef CONFIG_X86_KPTI /* We're always on the kernel's set of page tables in this context * if KPTI is turned on */ ptables_phys = z_x86_cr3_get(); __ASSERT(ptables_phys == k_mem_phys_addr(&z_x86_kernel_ptables), ""); #else /* We might have been moved to another memory domain, so always invoke * z_x86_thread_page_tables_get() instead of using current CR3 value. */ ptables_phys = k_mem_phys_addr(z_x86_thread_page_tables_get(_current)); #endif /* * In the future, we can consider making this smarter, such as * propagating which page tables were modified (in case they are * not active on this CPU) or an address range to call * tlb_flush_page() on. */ LOG_DBG("%s on CPU %d\n", __func__, arch_curr_cpu()->id); z_x86_cr3_set(ptables_phys); } /* NOTE: This is not synchronous and the actual flush takes place some short * time after this exits. */ __pinned_func static inline void tlb_shootdown(void) { z_loapic_ipi(0, LOAPIC_ICR_IPI_OTHERS, CONFIG_TLB_IPI_VECTOR); } #endif /* CONFIG_SMP */ __pinned_func static inline void assert_addr_aligned(uintptr_t addr) { #if __ASSERT_ON __ASSERT((addr & (CONFIG_MMU_PAGE_SIZE - 1)) == 0U, "unaligned address 0x%" PRIxPTR, addr); #else ARG_UNUSED(addr); #endif } __pinned_func static inline bool is_addr_aligned(uintptr_t addr) { if ((addr & (CONFIG_MMU_PAGE_SIZE - 1)) == 0U) { return true; } else { return false; } } __pinned_func static inline void assert_virt_addr_aligned(void *addr) { assert_addr_aligned((uintptr_t)addr); } __pinned_func static inline bool is_virt_addr_aligned(void *addr) { return is_addr_aligned((uintptr_t)addr); } __pinned_func static inline void assert_size_aligned(size_t size) { #if __ASSERT_ON __ASSERT((size & (CONFIG_MMU_PAGE_SIZE - 1)) == 0U, "unaligned size %zu", size); #else ARG_UNUSED(size); #endif } __pinned_func static inline bool is_size_aligned(size_t size) { if ((size & (CONFIG_MMU_PAGE_SIZE - 1)) == 0U) { return true; } else { return false; } } __pinned_func static inline void assert_region_page_aligned(void *addr, size_t size) { assert_virt_addr_aligned(addr); assert_size_aligned(size); } __pinned_func static inline bool is_region_page_aligned(void *addr, size_t size) { if (!is_virt_addr_aligned(addr)) { return false; } return is_size_aligned(size); } /* * Debug functions. All conditionally compiled with CONFIG_EXCEPTION_DEBUG. */ #ifdef CONFIG_EXCEPTION_DEBUG /* Add colors to page table dumps to indicate mapping type */ #define COLOR_PAGE_TABLES 1 #if COLOR_PAGE_TABLES #define ANSI_DEFAULT "\x1B" "[0m" #define ANSI_RED "\x1B" "[1;31m" #define ANSI_GREEN "\x1B" "[1;32m" #define ANSI_YELLOW "\x1B" "[1;33m" #define ANSI_BLUE "\x1B" "[1;34m" #define ANSI_MAGENTA "\x1B" "[1;35m" #define ANSI_CYAN "\x1B" "[1;36m" #define ANSI_GREY "\x1B" "[1;90m" #define COLOR(x) printk(_CONCAT(ANSI_, x)) #else #define COLOR(x) do { } while (false) #endif __pinned_func static char get_entry_code(pentry_t value) { char ret; if (value == 0U) { /* Unmapped entry */ ret = '.'; } else { if ((value & MMU_RW) != 0U) { /* Writable page */ if ((value & MMU_XD) != 0U) { /* RW */ ret = 'w'; } else { /* RWX */ ret = 'a'; } } else { if ((value & MMU_XD) != 0U) { /* R */ ret = 'r'; } else { /* RX */ ret = 'x'; } } if ((value & MMU_US) != 0U) { /* Uppercase indicates user mode access */ ret = toupper((unsigned char)ret); } } return ret; } __pinned_func static void print_entries(pentry_t entries_array[], uint8_t *base, int level, size_t count) { int column = 0; for (int i = 0; i < count; i++) { pentry_t entry = entries_array[i]; uintptr_t phys = get_entry_phys(entry, level); uintptr_t virt = (uintptr_t)base + (get_entry_scope(level) * i); if ((entry & MMU_P) != 0U) { if (is_leaf(level, entry)) { if (phys == virt) { /* Identity mappings */ COLOR(YELLOW); } else if (phys + K_MEM_VIRT_OFFSET == virt) { /* Permanent RAM mappings */ COLOR(GREEN); } else { /* General mapped pages */ COLOR(CYAN); } } else { /* Intermediate entry */ COLOR(MAGENTA); } } else { if (is_leaf(level, entry)) { if (entry == 0U) { /* Unmapped */ COLOR(GREY); #ifdef CONFIG_X86_KPTI } else if (is_flipped_pte(entry)) { /* KPTI, un-flip it */ COLOR(BLUE); entry = ~entry; phys = get_entry_phys(entry, level); if (phys == virt) { /* Identity mapped */ COLOR(CYAN); } else { /* Non-identity mapped */ COLOR(BLUE); } #endif } else { /* Paged out */ COLOR(RED); } } else { /* Un-mapped intermediate entry */ COLOR(GREY); } } printk("%c", get_entry_code(entry)); column++; if (column == 64) { column = 0; printk("\n"); } } COLOR(DEFAULT); if (column != 0) { printk("\n"); } } __pinned_func static void dump_ptables(pentry_t *table, uint8_t *base, int level) { const struct paging_level *info = &paging_levels[level]; #ifdef CONFIG_X86_64 /* Account for the virtual memory "hole" with sign-extension */ if (((uintptr_t)base & BITL(47)) != 0) { base = (uint8_t *)((uintptr_t)base | (0xFFFFULL << 48)); } #endif printk("%s at %p (0x%" PRIxPTR "): ", info->name, table, k_mem_phys_addr(table)); if (level == 0) { printk("entire address space\n"); } else { printk("for %p - %p\n", base, base + get_table_scope(level) - 1); } print_entries(table, base, level, info->entries); /* Check if we're a page table */ if (level == PTE_LEVEL) { return; } /* Dump all linked child tables */ for (int j = 0; j < info->entries; j++) { pentry_t entry = table[j]; pentry_t *next; if ((entry & MMU_P) == 0U || (entry & MMU_PS) != 0U) { /* Not present or big page, skip */ continue; } next = next_table(entry, level); dump_ptables(next, base + (j * get_entry_scope(level)), level + 1); } } __pinned_func void z_x86_dump_page_tables(pentry_t *ptables) { dump_ptables(ptables, NULL, 0); } /* Enable to dump out the kernel's page table right before main() starts, * sometimes useful for deep debugging. May overwhelm twister. */ #define DUMP_PAGE_TABLES 0 #if DUMP_PAGE_TABLES __pinned_func static int dump_kernel_tables(void) { z_x86_dump_page_tables(z_x86_kernel_ptables); return 0; } SYS_INIT(dump_kernel_tables, APPLICATION, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT); #endif __pinned_func static void str_append(char **buf, size_t *size, const char *str) { int ret = snprintk(*buf, *size, "%s", str); if (ret >= *size) { /* Truncated */ *size = 0U; } else { *size -= ret; *buf += ret; } } __pinned_func static void dump_entry(int level, void *virt, pentry_t entry) { const struct paging_level *info = &paging_levels[level]; char buf[24] = { 0 }; char *pos = buf; size_t sz = sizeof(buf); uint8_t *virtmap = (uint8_t *)ROUND_DOWN(virt, get_entry_scope(level)); #define DUMP_BIT(bit) do { \ if ((entry & MMU_##bit) != 0U) { \ str_append(&pos, &sz, #bit " "); \ } \ } while (false) DUMP_BIT(RW); DUMP_BIT(US); DUMP_BIT(PWT); DUMP_BIT(PCD); DUMP_BIT(A); DUMP_BIT(D); DUMP_BIT(G); DUMP_BIT(XD); LOG_ERR("%sE: %p -> " PRI_ENTRY ": %s", info->name, virtmap, entry & info->mask, buf); #undef DUMP_BIT } __pinned_func void z_x86_pentry_get(int *paging_level, pentry_t *val, pentry_t *ptables, void *virt) { pentry_get(paging_level, val, ptables, virt); } /* * Debug function for dumping out MMU table information to the LOG for a * specific virtual address, such as when we get an unexpected page fault. */ __pinned_func void z_x86_dump_mmu_flags(pentry_t *ptables, void *virt) { pentry_t entry = 0; int level = 0; pentry_get(&level, &entry, ptables, virt); if ((entry & MMU_P) == 0) { LOG_ERR("%sE: not present", paging_levels[level].name); } else { dump_entry(level, virt, entry); } } #endif /* CONFIG_EXCEPTION_DEBUG */ /* Reset permissions on a PTE to original state when the mapping was made */ __pinned_func static inline pentry_t reset_pte(pentry_t old_val) { pentry_t new_val; /* Clear any existing state in permission bits */ new_val = old_val & (~K_MEM_PARTITION_PERM_MASK); /* Now set permissions based on the stashed original values */ if ((old_val & MMU_RW_ORIG) != 0) { new_val |= MMU_RW; } if ((old_val & MMU_US_ORIG) != 0) { new_val |= MMU_US; } #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) if ((old_val & MMU_XD_ORIG) != 0) { new_val |= MMU_XD; } #endif return new_val; } /* Wrapper functions for some gross stuff we have to do for Kernel * page table isolation. If these are User mode page tables, the user bit * isn't set, and this is not the shared page, all the bits in the PTE * are flipped. This serves three purposes: * - The page isn't present, implementing page table isolation * - Flipping the physical address bits cheaply mitigates L1TF * - State is preserved; to get original PTE, just complement again */ __pinned_func static inline pentry_t pte_finalize_value(pentry_t val, bool user_table, int level) { #ifdef CONFIG_X86_KPTI static const uintptr_t shared_phys_addr = K_MEM_PHYS_ADDR(POINTER_TO_UINT(&z_shared_kernel_page_start)); if (user_table && (val & MMU_US) == 0 && (val & MMU_P) != 0 && get_entry_phys(val, level) != shared_phys_addr) { val = ~val; } #else ARG_UNUSED(user_table); ARG_UNUSED(level); #endif return val; } /* Atomic functions for modifying PTEs. These don't map nicely to Zephyr's * atomic API since the only types supported are 'int' and 'void *' and * the size of pentry_t depends on other factors like PAE. */ #ifndef CONFIG_X86_PAE /* Non-PAE, pentry_t is same size as void ptr so use atomic_ptr_* APIs */ __pinned_func static inline pentry_t atomic_pte_get(const pentry_t *target) { return (pentry_t)atomic_ptr_get((const atomic_ptr_t *)target); } __pinned_func static inline bool atomic_pte_cas(pentry_t *target, pentry_t old_value, pentry_t new_value) { return atomic_ptr_cas((atomic_ptr_t *)target, (void *)old_value, (void *)new_value); } #else /* Atomic builtins for 64-bit values on 32-bit x86 require floating point. * Don't do this, just lock local interrupts. Needless to say, this * isn't workable if someone ever adds SMP to the 32-bit x86 port. */ BUILD_ASSERT(!IS_ENABLED(CONFIG_SMP)); __pinned_func static inline pentry_t atomic_pte_get(const pentry_t *target) { return *target; } __pinned_func static inline bool atomic_pte_cas(pentry_t *target, pentry_t old_value, pentry_t new_value) { bool ret = false; int key = arch_irq_lock(); if (*target == old_value) { *target = new_value; ret = true; } arch_irq_unlock(key); return ret; } #endif /* CONFIG_X86_PAE */ /* Indicates that the target page tables will be used by user mode threads. * This only has implications for CONFIG_X86_KPTI where user thread facing * page tables need nearly all pages that don't have the US bit to also * not be Present. */ #define OPTION_USER BIT(0) /* Indicates that the operation requires TLBs to be flushed as we are altering * existing mappings. Not needed for establishing new mappings */ #define OPTION_FLUSH BIT(1) /* Indicates that each PTE's permission bits should be restored to their * original state when the memory was mapped. All other bits in the PTE are * preserved. */ #define OPTION_RESET BIT(2) /* Indicates that the mapping will need to be cleared entirely. This is * mainly used for unmapping the memory region. */ #define OPTION_CLEAR BIT(3) /** * Atomically update bits in a page table entry * * This is atomic with respect to modifications by other CPUs or preempted * contexts, which can be very important when making decisions based on * the PTE's prior "dirty" state. * * @param pte Pointer to page table entry to update * @param update_val Updated bits to set/clear in PTE. Ignored with * OPTION_RESET or OPTION_CLEAR. * @param update_mask Which bits to modify in the PTE. Ignored with * OPTION_RESET or OPTION_CLEAR. * @param options Control flags * @retval Old PTE value */ __pinned_func static inline pentry_t pte_atomic_update(pentry_t *pte, pentry_t update_val, pentry_t update_mask, uint32_t options) { bool user_table = (options & OPTION_USER) != 0U; bool reset = (options & OPTION_RESET) != 0U; bool clear = (options & OPTION_CLEAR) != 0U; pentry_t old_val, new_val; do { old_val = atomic_pte_get(pte); new_val = old_val; #ifdef CONFIG_X86_KPTI if (is_flipped_pte(new_val)) { /* Page was flipped for KPTI. Un-flip it */ new_val = ~new_val; } #endif /* CONFIG_X86_KPTI */ if (reset) { new_val = reset_pte(new_val); } else if (clear) { new_val = 0; } else { new_val = ((new_val & ~update_mask) | (update_val & update_mask)); } new_val = pte_finalize_value(new_val, user_table, PTE_LEVEL); } while (atomic_pte_cas(pte, old_val, new_val) == false); #ifdef CONFIG_X86_KPTI if (is_flipped_pte(old_val)) { /* Page was flipped for KPTI. Un-flip it */ old_val = ~old_val; } #endif /* CONFIG_X86_KPTI */ return old_val; } /** * Low level page table update function for a virtual page * * For the provided set of page tables, update the PTE associated with the * virtual address to a new value, using the mask to control what bits * need to be preserved. * * It is permitted to set up mappings without the Present bit set, in which * case all other bits may be used for OS accounting. * * This function is atomic with respect to the page table entries being * modified by another CPU, using atomic operations to update the requested * bits and return the previous PTE value. * * Common mask values: * MASK_ALL - Update all PTE bits. Existing state totally discarded. * MASK_PERM - Only update permission bits. All other bits and physical * mapping preserved. * * @param ptables Page tables to modify * @param virt Virtual page table entry to update * @param entry_val Value to update in the PTE (ignored if OPTION_RESET or * OPTION_CLEAR) * @param [out] old_val_ptr Filled in with previous PTE value. May be NULL. * @param mask What bits to update in the PTE (ignored if OPTION_RESET or * OPTION_CLEAR) * @param options Control options, described above * * @retval 0 if successful * @retval -EFAULT if large page encountered or missing page table level */ __pinned_func static int page_map_set(pentry_t *ptables, void *virt, pentry_t entry_val, pentry_t *old_val_ptr, pentry_t mask, uint32_t options) { pentry_t *table = ptables; bool flush = (options & OPTION_FLUSH) != 0U; int ret = 0; for (int level = 0; level < NUM_LEVELS; level++) { int index; pentry_t *entryp; index = get_index(virt, level); entryp = &table[index]; /* Check if we're a PTE */ if (level == PTE_LEVEL) { pentry_t old_val = pte_atomic_update(entryp, entry_val, mask, options); if (old_val_ptr != NULL) { *old_val_ptr = old_val; } break; } /* We bail out early here due to no support for * splitting existing bigpage mappings. * If the PS bit is not supported at some level (like * in a PML4 entry) it is always reserved and must be 0 */ CHECKIF(!((*entryp & MMU_PS) == 0U)) { /* Cannot continue since we cannot split * bigpage mappings. */ LOG_ERR("large page encountered"); ret = -EFAULT; goto out; } table = next_table(*entryp, level); CHECKIF(!(table != NULL)) { /* Cannot continue since table is NULL, * and it cannot be dereferenced in next loop * iteration. */ LOG_ERR("missing page table level %d when trying to map %p", level + 1, virt); ret = -EFAULT; goto out; } } out: if (flush) { tlb_flush_page(virt); } return ret; } /** * Map a physical region in a specific set of page tables. * * See documentation for page_map_set() for additional notes about masks and * supported options. * * It is vital to remember that all virtual-to-physical mappings must be * the same with respect to supervisor mode regardless of what thread is * scheduled (and therefore, if multiple sets of page tables exist, which one * is active). * * It is permitted to set up mappings without the Present bit set. * * @param ptables Page tables to modify * @param virt Base page-aligned virtual memory address to map the region. * @param phys Base page-aligned physical memory address for the region. * Ignored if OPTION_RESET or OPTION_CLEAR. Also affected by the mask * parameter. This address is not directly examined, it will simply be * programmed into the PTE. * @param size Size of the physical region to map * @param entry_flags Non-address bits to set in every PTE. Ignored if * OPTION_RESET. Also affected by the mask parameter. * @param mask What bits to update in each PTE. Un-set bits will never be * modified. Ignored if OPTION_RESET or OPTION_CLEAR. * @param options Control options, described above * * @retval 0 if successful * @retval -EINVAL if invalid parameters are supplied * @retval -EFAULT if errors encountered when updating page tables */ __pinned_func static int range_map_ptables(pentry_t *ptables, void *virt, uintptr_t phys, size_t size, pentry_t entry_flags, pentry_t mask, uint32_t options) { bool zero_entry = (options & (OPTION_RESET | OPTION_CLEAR)) != 0U; int ret = 0, ret2; CHECKIF(!is_addr_aligned(phys) || !is_size_aligned(size)) { ret = -EINVAL; goto out; } CHECKIF(!((entry_flags & paging_levels[0].mask) == 0U)) { LOG_ERR("entry_flags " PRI_ENTRY " overlaps address area", entry_flags); ret = -EINVAL; goto out; } /* This implementation is stack-efficient but not particularly fast. * We do a full page table walk for every page we are updating. * Recursive approaches are possible, but use much more stack space. */ for (size_t offset = 0; offset < size; offset += CONFIG_MMU_PAGE_SIZE) { uint8_t *dest_virt = (uint8_t *)virt + offset; pentry_t entry_val; if (zero_entry) { entry_val = 0; } else { entry_val = (pentry_t)(phys + offset) | entry_flags; } ret2 = page_map_set(ptables, dest_virt, entry_val, NULL, mask, options); ARG_UNUSED(ret2); CHECKIF(ret2 != 0) { ret = ret2; } } out: return ret; } /** * Establish or update a memory mapping for all page tables * * The physical region noted from phys to phys + size will be mapped to * an equal sized virtual region starting at virt, with the provided flags. * The mask value denotes what bits in PTEs will actually be modified. * * See range_map_ptables() for additional details. * * @param virt Page-aligned starting virtual address * @param phys Page-aligned starting physical address. Ignored if the mask * parameter does not enable address bits or OPTION_RESET used. * This region is not directly examined, it will simply be * programmed into the page tables. * @param size Size of the physical region to map * @param entry_flags Desired state of non-address PTE bits covered by mask, * ignored if OPTION_RESET * @param mask What bits in the PTE to actually modify; unset bits will * be preserved. Ignored if OPTION_RESET. * @param options Control options. Do not set OPTION_USER here. OPTION_FLUSH * will trigger a TLB shootdown after all tables are updated. * * @retval 0 if successful * @retval -EINVAL if invalid parameters are supplied * @retval -EFAULT if errors encountered when updating page tables */ __pinned_func static int range_map(void *virt, uintptr_t phys, size_t size, pentry_t entry_flags, pentry_t mask, uint32_t options) { int ret = 0, ret2; LOG_DBG("%s: 0x%" PRIxPTR " -> %p (%zu) flags " PRI_ENTRY " mask " PRI_ENTRY " opt 0x%x", __func__, phys, virt, size, entry_flags, mask, options); #ifdef CONFIG_X86_64 /* There's a gap in the "64-bit" address space, as 4-level paging * requires bits 48 to 63 to be copies of bit 47. Test this * by treating as a signed value and shifting. */ __ASSERT(((((intptr_t)virt) << 16) >> 16) == (intptr_t)virt, "non-canonical virtual address mapping %p (size %zu)", virt, size); #endif /* CONFIG_X86_64 */ CHECKIF(!((options & OPTION_USER) == 0U)) { LOG_ERR("invalid option for mapping"); ret = -EINVAL; goto out; } /* All virtual-to-physical mappings are the same in all page tables. * What can differ is only access permissions, defined by the memory * domain associated with the page tables, and the threads that are * members of that domain. * * Any new mappings need to be applied to all page tables. */ #if defined(CONFIG_USERSPACE) && !defined(CONFIG_X86_COMMON_PAGE_TABLE) sys_snode_t *node; SYS_SLIST_FOR_EACH_NODE(&x86_domain_list, node) { struct arch_mem_domain *domain = CONTAINER_OF(node, struct arch_mem_domain, node); ret2 = range_map_ptables(domain->ptables, virt, phys, size, entry_flags, mask, options | OPTION_USER); ARG_UNUSED(ret2); CHECKIF(ret2 != 0) { ret = ret2; } } #endif /* CONFIG_USERSPACE */ ret2 = range_map_ptables(z_x86_kernel_ptables, virt, phys, size, entry_flags, mask, options); ARG_UNUSED(ret2); CHECKIF(ret2 != 0) { ret = ret2; } out: #ifdef CONFIG_SMP if ((options & OPTION_FLUSH) != 0U) { tlb_shootdown(); } #endif /* CONFIG_SMP */ return ret; } __pinned_func static inline int range_map_unlocked(void *virt, uintptr_t phys, size_t size, pentry_t entry_flags, pentry_t mask, uint32_t options) { k_spinlock_key_t key; int ret; key = k_spin_lock(&x86_mmu_lock); ret = range_map(virt, phys, size, entry_flags, mask, options); k_spin_unlock(&x86_mmu_lock, key); return ret; } __pinned_func static pentry_t flags_to_entry(uint32_t flags) { pentry_t entry_flags = MMU_P; /* Translate flags argument into HW-recognized entry flags. * * Support for PAT is not implemented yet. Many systems may have * BIOS-populated MTRR values such that these cache settings are * redundant. */ switch (flags & K_MEM_CACHE_MASK) { case K_MEM_CACHE_NONE: entry_flags |= MMU_PCD; break; case K_MEM_CACHE_WT: entry_flags |= MMU_PWT; break; case K_MEM_CACHE_WB: break; default: __ASSERT(false, "bad memory mapping flags 0x%x", flags); } if ((flags & K_MEM_PERM_RW) != 0U) { entry_flags |= ENTRY_RW; } if ((flags & K_MEM_PERM_USER) != 0U) { entry_flags |= ENTRY_US; } if ((flags & K_MEM_PERM_EXEC) == 0U) { entry_flags |= ENTRY_XD; } return entry_flags; } /* map new region virt..virt+size to phys with provided arch-neutral flags */ __pinned_func void arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags) { int ret; ret = range_map_unlocked(virt, phys, size, flags_to_entry(flags), MASK_ALL, 0); __ASSERT_NO_MSG(ret == 0); ARG_UNUSED(ret); } /* unmap region addr..addr+size, reset entries and flush TLB */ void arch_mem_unmap(void *addr, size_t size) { int ret; ret = range_map_unlocked(addr, 0, size, 0, 0, OPTION_FLUSH | OPTION_CLEAR); __ASSERT_NO_MSG(ret == 0); ARG_UNUSED(ret); } #ifdef K_MEM_IS_VM_KERNEL __boot_func static void identity_map_remove(uint32_t level) { size_t size, scope = get_entry_scope(level); pentry_t *table; uint32_t cur_level; uint8_t *pos; pentry_t entry; pentry_t *entry_ptr; k_mem_region_align((uintptr_t *)&pos, &size, (uintptr_t)CONFIG_SRAM_BASE_ADDRESS, (size_t)CONFIG_SRAM_SIZE * 1024U, scope); while (size != 0U) { /* Need to get to the correct table */ table = z_x86_kernel_ptables; for (cur_level = 0; cur_level < level; cur_level++) { entry = get_entry(table, pos, cur_level); table = next_table(entry, level); } entry_ptr = get_entry_ptr(table, pos, level); /* set_pte */ *entry_ptr = 0; pos += scope; size -= scope; } } #endif /* Invoked to remove the identity mappings in the page tables, * they were only needed to transition the instruction pointer at early boot */ __boot_func void z_x86_mmu_init(void) { #ifdef K_MEM_IS_VM_KERNEL /* We booted with physical address space being identity mapped. * As we are now executing in virtual address space, * the identity map is no longer needed. So remove them. * * Without PAE, only need to remove the entries at the PD level. * With PAE, need to also remove the entry at PDP level. */ identity_map_remove(PDE_LEVEL); #ifdef CONFIG_X86_PAE identity_map_remove(0); #endif #endif } #ifdef CONFIG_X86_STACK_PROTECTION __pinned_func void z_x86_set_stack_guard(k_thread_stack_t *stack) { int ret; /* Applied to all page tables as this affects supervisor mode. * XXX: This never gets reset when the thread exits, which can * cause problems if the memory is later used for something else. * See #29499 * * Guard page is always the first page of the stack object for both * kernel and thread stacks. */ ret = range_map_unlocked(stack, 0, CONFIG_MMU_PAGE_SIZE, MMU_P | ENTRY_XD, MASK_PERM, OPTION_FLUSH); __ASSERT_NO_MSG(ret == 0); ARG_UNUSED(ret); } #endif /* CONFIG_X86_STACK_PROTECTION */ #ifdef CONFIG_USERSPACE __pinned_func static bool page_validate(pentry_t *ptables, uint8_t *addr, bool write) { pentry_t *table = ptables; for (int level = 0; level < NUM_LEVELS; level++) { pentry_t entry = get_entry(table, addr, level); if (is_leaf(level, entry)) { #ifdef CONFIG_X86_KPTI if (is_flipped_pte(entry)) { /* We flipped this to prevent user access * since just clearing US isn't sufficient */ return false; } #endif /* US and RW bits still carry meaning if non-present. * If the data page is paged out, access bits are * preserved. If un-mapped, the whole entry is 0. */ if (((entry & MMU_US) == 0U) || (write && ((entry & MMU_RW) == 0U))) { return false; } } else { if ((entry & MMU_P) == 0U) { /* Missing intermediate table, address is * un-mapped */ return false; } table = next_table(entry, level); } } return true; } __pinned_func static inline void bcb_fence(void) { #ifdef CONFIG_X86_BOUNDS_CHECK_BYPASS_MITIGATION __asm__ volatile ("lfence" : : : "memory"); #endif } __pinned_func int arch_buffer_validate(const void *addr, size_t size, int write) { pentry_t *ptables = z_x86_thread_page_tables_get(_current); uint8_t *virt; size_t aligned_size; int ret = 0; /* addr/size arbitrary, fix this up into an aligned region */ (void)k_mem_region_align((uintptr_t *)&virt, &aligned_size, (uintptr_t)addr, size, CONFIG_MMU_PAGE_SIZE); for (size_t offset = 0; offset < aligned_size; offset += CONFIG_MMU_PAGE_SIZE) { if (!page_validate(ptables, virt + offset, write)) { ret = -1; break; } } bcb_fence(); return ret; } #ifdef CONFIG_X86_COMMON_PAGE_TABLE /* Very low memory configuration. A single set of page tables is used for * all threads. This relies on some assumptions: * * - No KPTI. If that were supported, we would need both a kernel and user * set of page tables. * - No SMP. If that were supported, we would need per-core page tables. * - Memory domains don't affect supervisor mode. * - All threads have the same virtual-to-physical mappings. * - Memory domain APIs can't be called by user mode. * * Because there is no SMP, only one set of page tables, and user threads can't * modify their own memory domains, we don't have to do much when * arch_mem_domain_* APIs are called. We do use a caching scheme to avoid * updating page tables if the last user thread scheduled was in the same * domain. * * We don't set CONFIG_ARCH_MEM_DOMAIN_DATA, since we aren't setting * up any arch-specific memory domain data (per domain page tables.) * * This is all nice and simple and saves a lot of memory. The cost is that * context switching is not trivial CR3 update. We have to reset all partitions * for the current domain configuration and then apply all the partitions for * the incoming thread's domain if they are not the same. We also need to * update permissions similarly on the thread stack region. */ __pinned_func static inline int reset_region(uintptr_t start, size_t size) { return range_map_unlocked((void *)start, 0, size, 0, 0, OPTION_FLUSH | OPTION_RESET); } __pinned_func static inline int apply_region(uintptr_t start, size_t size, pentry_t attr) { return range_map_unlocked((void *)start, 0, size, attr, MASK_PERM, OPTION_FLUSH); } /* Cache of the current memory domain applied to the common page tables and * the stack buffer region that had User access granted. */ static __pinned_bss struct k_mem_domain *current_domain; static __pinned_bss uintptr_t current_stack_start; static __pinned_bss size_t current_stack_size; __pinned_func void z_x86_swap_update_common_page_table(struct k_thread *incoming) { k_spinlock_key_t key; if ((incoming->base.user_options & K_USER) == 0) { /* Incoming thread is not a user thread. Memory domains don't * affect supervisor threads and we don't need to enable User * bits for its stack buffer; do nothing. */ return; } /* Step 1: Make sure the thread stack is set up correctly for the * for the incoming thread */ if (incoming->stack_info.start != current_stack_start || incoming->stack_info.size != current_stack_size) { if (current_stack_size != 0U) { reset_region(current_stack_start, current_stack_size); } /* The incoming thread's stack region needs User permissions */ apply_region(incoming->stack_info.start, incoming->stack_info.size, K_MEM_PARTITION_P_RW_U_RW); /* Update cache */ current_stack_start = incoming->stack_info.start; current_stack_size = incoming->stack_info.size; } /* Step 2: The page tables always have some memory domain applied to * them. If the incoming thread's memory domain is different, * update the page tables */ key = k_spin_lock(&z_mem_domain_lock); if (incoming->mem_domain_info.mem_domain == current_domain) { /* The incoming thread's domain is already applied */ goto out_unlock; } /* Reset the current memory domain regions... */ if (current_domain != NULL) { for (int i = 0; i < CONFIG_MAX_DOMAIN_PARTITIONS; i++) { struct k_mem_partition *ptn = &current_domain->partitions[i]; if (ptn->size == 0) { continue; } reset_region(ptn->start, ptn->size); } } /* ...and apply all the incoming domain's regions */ for (int i = 0; i < CONFIG_MAX_DOMAIN_PARTITIONS; i++) { struct k_mem_partition *ptn = &incoming->mem_domain_info.mem_domain->partitions[i]; if (ptn->size == 0) { continue; } apply_region(ptn->start, ptn->size, ptn->attr); } current_domain = incoming->mem_domain_info.mem_domain; out_unlock: k_spin_unlock(&z_mem_domain_lock, key); } /* If a partition was added or removed in the cached domain, update the * page tables. */ __pinned_func int arch_mem_domain_partition_remove(struct k_mem_domain *domain, uint32_t partition_id) { struct k_mem_partition *ptn; if (domain != current_domain) { return 0; } ptn = &domain->partitions[partition_id]; return reset_region(ptn->start, ptn->size); } __pinned_func int arch_mem_domain_partition_add(struct k_mem_domain *domain, uint32_t partition_id) { struct k_mem_partition *ptn; if (domain != current_domain) { return 0; } ptn = &domain->partitions[partition_id]; return apply_region(ptn->start, ptn->size, ptn->attr); } /* Rest of the APIs don't need to do anything */ __pinned_func int arch_mem_domain_thread_add(struct k_thread *thread) { return 0; } __pinned_func int arch_mem_domain_thread_remove(struct k_thread *thread) { return 0; } #else /* Memory domains each have a set of page tables assigned to them */ /* * Pool of free memory pages for copying page tables, as needed. */ #define PTABLE_COPY_SIZE (INITIAL_PTABLE_PAGES * CONFIG_MMU_PAGE_SIZE) static uint8_t __pinned_noinit page_pool[PTABLE_COPY_SIZE * CONFIG_X86_MAX_ADDITIONAL_MEM_DOMAINS] __aligned(CONFIG_MMU_PAGE_SIZE); __pinned_data static uint8_t *page_pos = page_pool + sizeof(page_pool); /* Return a zeroed and suitably aligned memory page for page table data * from the global page pool */ __pinned_func static void *page_pool_get(void) { void *ret; if (page_pos == page_pool) { ret = NULL; } else { page_pos -= CONFIG_MMU_PAGE_SIZE; ret = page_pos; } if (ret != NULL) { memset(ret, 0, CONFIG_MMU_PAGE_SIZE); } return ret; } /* Debugging function to show how many pages are free in the pool */ __pinned_func static inline unsigned int pages_free(void) { return (page_pos - page_pool) / CONFIG_MMU_PAGE_SIZE; } /** * Duplicate an entire set of page tables * * Uses recursion, but depth at any given moment is limited by the number of * paging levels. * * x86_mmu_lock must be held. * * @param dst a zeroed out chunk of memory of sufficient size for the indicated * paging level. * @param src some paging structure from within the source page tables to copy * at the indicated paging level * @param level Current paging level * @retval 0 Success * @retval -ENOMEM Insufficient page pool memory */ __pinned_func static int copy_page_table(pentry_t *dst, pentry_t *src, int level) { if (level == PTE_LEVEL) { /* Base case: leaf page table */ for (int i = 0; i < get_num_entries(level); i++) { dst[i] = pte_finalize_value(reset_pte(src[i]), true, PTE_LEVEL); } } else { /* Recursive case: allocate sub-structures as needed and * make recursive calls on them */ for (int i = 0; i < get_num_entries(level); i++) { pentry_t *child_dst; int ret; if ((src[i] & MMU_P) == 0) { /* Non-present, skip */ continue; } if ((level == PDE_LEVEL) && ((src[i] & MMU_PS) != 0)) { /* large page: no lower level table */ dst[i] = pte_finalize_value(src[i], true, PDE_LEVEL); continue; } __ASSERT((src[i] & MMU_PS) == 0, "large page encountered"); child_dst = page_pool_get(); if (child_dst == NULL) { return -ENOMEM; } /* Page table links are by physical address. RAM * for page tables is identity-mapped, but double- * cast needed for PAE case where sizeof(void *) and * sizeof(pentry_t) are not the same. */ dst[i] = ((pentry_t)k_mem_phys_addr(child_dst) | INT_FLAGS); ret = copy_page_table(child_dst, next_table(src[i], level), level + 1); if (ret != 0) { return ret; } } } return 0; } __pinned_func static int region_map_update(pentry_t *ptables, void *start, size_t size, pentry_t flags, bool reset) { uint32_t options = OPTION_USER; int ret; k_spinlock_key_t key; if (reset) { options |= OPTION_RESET; } if (ptables == z_x86_page_tables_get()) { options |= OPTION_FLUSH; } key = k_spin_lock(&x86_mmu_lock); ret = range_map_ptables(ptables, start, 0, size, flags, MASK_PERM, options); k_spin_unlock(&x86_mmu_lock, key); #ifdef CONFIG_SMP tlb_shootdown(); #endif return ret; } __pinned_func static inline int reset_region(pentry_t *ptables, void *start, size_t size) { LOG_DBG("%s(%p, %p, %zu)", __func__, ptables, start, size); return region_map_update(ptables, start, size, 0, true); } __pinned_func static inline int apply_region(pentry_t *ptables, void *start, size_t size, pentry_t attr) { LOG_DBG("%s(%p, %p, %zu, " PRI_ENTRY ")", __func__, ptables, start, size, attr); return region_map_update(ptables, start, size, attr, false); } __pinned_func static void set_stack_perms(struct k_thread *thread, pentry_t *ptables) { LOG_DBG("update stack for thread %p's ptables at %p: 0x%" PRIxPTR " (size %zu)", thread, ptables, thread->stack_info.start, thread->stack_info.size); apply_region(ptables, (void *)thread->stack_info.start, thread->stack_info.size, MMU_P | MMU_XD | MMU_RW | MMU_US); } /* * Arch interface implementations for memory domains and userspace */ __boot_func int arch_mem_domain_init(struct k_mem_domain *domain) { int ret; k_spinlock_key_t key = k_spin_lock(&x86_mmu_lock); LOG_DBG("%s(%p)", __func__, domain); #if __ASSERT_ON sys_snode_t *node; /* Assert that we have not already initialized this domain */ SYS_SLIST_FOR_EACH_NODE(&x86_domain_list, node) { struct arch_mem_domain *list_domain = CONTAINER_OF(node, struct arch_mem_domain, node); __ASSERT(list_domain != &domain->arch, "%s(%p) called multiple times", __func__, domain); } #endif /* __ASSERT_ON */ #ifndef CONFIG_X86_KPTI /* If we're not using KPTI then we can use the build time page tables * (which are mutable) as the set of page tables for the default * memory domain, saving us some memory. * * We skip adding this domain to x86_domain_list since we already * update z_x86_kernel_ptables directly in range_map(). */ if (domain == &k_mem_domain_default) { domain->arch.ptables = z_x86_kernel_ptables; k_spin_unlock(&x86_mmu_lock, key); return 0; } #endif /* CONFIG_X86_KPTI */ #ifdef CONFIG_X86_PAE /* PDPT is stored within the memory domain itself since it is * much smaller than a full page */ (void)memset(domain->arch.pdpt, 0, sizeof(domain->arch.pdpt)); domain->arch.ptables = domain->arch.pdpt; #else /* Allocate a page-sized top-level structure, either a PD or PML4 */ domain->arch.ptables = page_pool_get(); if (domain->arch.ptables == NULL) { k_spin_unlock(&x86_mmu_lock, key); return -ENOMEM; } #endif /* CONFIG_X86_PAE */ LOG_DBG("copy_page_table(%p, %p, 0)", domain->arch.ptables, z_x86_kernel_ptables); /* Make a copy of the boot page tables created by gen_mmu.py */ ret = copy_page_table(domain->arch.ptables, z_x86_kernel_ptables, 0); if (ret == 0) { sys_slist_append(&x86_domain_list, &domain->arch.node); } k_spin_unlock(&x86_mmu_lock, key); return ret; } int arch_mem_domain_partition_remove(struct k_mem_domain *domain, uint32_t partition_id) { struct k_mem_partition *partition = &domain->partitions[partition_id]; /* Reset the partition's region back to defaults */ return reset_region(domain->arch.ptables, (void *)partition->start, partition->size); } /* Called on thread exit or when moving it to a different memory domain */ int arch_mem_domain_thread_remove(struct k_thread *thread) { struct k_mem_domain *domain = thread->mem_domain_info.mem_domain; if ((thread->base.user_options & K_USER) == 0) { return 0; } if ((thread->base.thread_state & _THREAD_DEAD) == 0) { /* Thread is migrating to another memory domain and not * exiting for good; we weren't called from * z_thread_abort(). Resetting the stack region will * take place in the forthcoming thread_add() call. */ return 0; } /* Restore permissions on the thread's stack area since it is no * longer a member of the domain. */ return reset_region(domain->arch.ptables, (void *)thread->stack_info.start, thread->stack_info.size); } __pinned_func int arch_mem_domain_partition_add(struct k_mem_domain *domain, uint32_t partition_id) { struct k_mem_partition *partition = &domain->partitions[partition_id]; /* Update the page tables with the partition info */ return apply_region(domain->arch.ptables, (void *)partition->start, partition->size, partition->attr | MMU_P); } /* Invoked from memory domain API calls, as well as during thread creation */ __pinned_func int arch_mem_domain_thread_add(struct k_thread *thread) { int ret = 0; /* New memory domain we are being added to */ struct k_mem_domain *domain = thread->mem_domain_info.mem_domain; /* This is only set for threads that were migrating from some other * memory domain; new threads this is NULL. * * Note that NULL check on old_ptables must be done before any * address translation or else (NULL + offset) != NULL. */ pentry_t *old_ptables = UINT_TO_POINTER(thread->arch.ptables); bool is_user = (thread->base.user_options & K_USER) != 0; bool is_migration = (old_ptables != NULL) && is_user; /* Allow US access to the thread's stack in its new domain if * we are migrating. If we are not migrating this is done in * z_x86_current_stack_perms() */ if (is_migration) { old_ptables = k_mem_virt_addr(thread->arch.ptables); set_stack_perms(thread, domain->arch.ptables); } thread->arch.ptables = k_mem_phys_addr(domain->arch.ptables); LOG_DBG("set thread %p page tables to 0x%" PRIxPTR, thread, thread->arch.ptables); /* Check if we're doing a migration from a different memory domain * and have to remove permissions from its old domain. * * XXX: The checks we have to do here and in * arch_mem_domain_thread_remove() are clumsy, it may be worth looking * into adding a specific arch_mem_domain_thread_migrate() API. * See #29601 */ if (is_migration) { ret = reset_region(old_ptables, (void *)thread->stack_info.start, thread->stack_info.size); } #if !defined(CONFIG_X86_KPTI) && !defined(CONFIG_X86_COMMON_PAGE_TABLE) /* Need to switch to using these new page tables, in case we drop * to user mode before we are ever context switched out. * IPI takes care of this if the thread is currently running on some * other CPU. */ if (thread == _current && thread->arch.ptables != z_x86_cr3_get()) { z_x86_cr3_set(thread->arch.ptables); } #endif /* CONFIG_X86_KPTI */ return ret; } #endif /* !CONFIG_X86_COMMON_PAGE_TABLE */ __pinned_func int arch_mem_domain_max_partitions_get(void) { return CONFIG_MAX_DOMAIN_PARTITIONS; } /* Invoked from z_x86_userspace_enter */ __pinned_func void z_x86_current_stack_perms(void) { /* Clear any previous context in the stack buffer to prevent * unintentional data leakage. */ (void)memset((void *)_current->stack_info.start, 0xAA, _current->stack_info.size - _current->stack_info.delta); /* Only now is it safe to grant access to the stack buffer since any * previous context has been erased. */ #ifdef CONFIG_X86_COMMON_PAGE_TABLE /* Re run swap page table update logic since we're entering User mode. * This will grant stack and memory domain access if it wasn't set * already (in which case this returns very quickly). */ z_x86_swap_update_common_page_table(_current); #else /* Memory domain access is already programmed into the page tables. * Need to enable access to this new user thread's stack buffer in * its domain-specific page tables. */ set_stack_perms(_current, z_x86_thread_page_tables_get(_current)); #endif } #endif /* CONFIG_USERSPACE */ #ifdef CONFIG_ARCH_HAS_RESERVED_PAGE_FRAMES __boot_func static void mark_addr_page_reserved(uintptr_t addr, size_t len) { uintptr_t pos = ROUND_DOWN(addr, CONFIG_MMU_PAGE_SIZE); uintptr_t end = ROUND_UP(addr + len, CONFIG_MMU_PAGE_SIZE); for (; pos < end; pos += CONFIG_MMU_PAGE_SIZE) { if (!k_mem_is_page_frame(pos)) { continue; } k_mem_page_frame_set(k_mem_phys_to_page_frame(pos), K_MEM_PAGE_FRAME_RESERVED); } } __boot_func void arch_reserved_pages_update(void) { #ifdef CONFIG_X86_PC_COMPATIBLE /* * Best is to do some E820 or similar enumeration to specifically * identify all page frames which are reserved by the hardware or * firmware. Or use x86_memmap[] with Multiboot if available. * * But still, reserve everything in the first megabyte of physical * memory on PC-compatible platforms. */ mark_addr_page_reserved(0, MB(1)); #endif /* CONFIG_X86_PC_COMPATIBLE */ #ifdef CONFIG_X86_MEMMAP for (int i = 0; i < CONFIG_X86_MEMMAP_ENTRIES; i++) { struct x86_memmap_entry *entry = &x86_memmap[i]; switch (entry->type) { case X86_MEMMAP_ENTRY_UNUSED: __fallthrough; case X86_MEMMAP_ENTRY_RAM: continue; case X86_MEMMAP_ENTRY_ACPI: __fallthrough; case X86_MEMMAP_ENTRY_NVS: __fallthrough; case X86_MEMMAP_ENTRY_DEFECTIVE: __fallthrough; default: /* If any of three above cases satisfied, exit switch * and mark page reserved */ break; } mark_addr_page_reserved(entry->base, entry->length); } #endif /* CONFIG_X86_MEMMAP */ } #endif /* CONFIG_ARCH_HAS_RESERVED_PAGE_FRAMES */ int arch_page_phys_get(void *virt, uintptr_t *phys) { pentry_t pte = 0; int level, ret; __ASSERT(POINTER_TO_UINT(virt) % CONFIG_MMU_PAGE_SIZE == 0U, "unaligned address %p to %s", virt, __func__); pentry_get(&level, &pte, z_x86_page_tables_get(), virt); if ((pte & MMU_P) != 0) { if (phys != NULL) { *phys = (uintptr_t)get_entry_phys(pte, PTE_LEVEL); } ret = 0; } else { /* Not mapped */ ret = -EFAULT; } return ret; } #ifdef CONFIG_DEMAND_PAGING #define PTE_MASK (paging_levels[PTE_LEVEL].mask) __pinned_func void arch_mem_page_out(void *addr, uintptr_t location) { int ret; pentry_t mask = PTE_MASK | MMU_P | MMU_A; /* Accessed bit set to guarantee the entry is not completely 0 in * case of location value 0. A totally 0 PTE is un-mapped. */ ret = range_map(addr, location, CONFIG_MMU_PAGE_SIZE, MMU_A, mask, OPTION_FLUSH); __ASSERT_NO_MSG(ret == 0); ARG_UNUSED(ret); } __pinned_func void arch_mem_page_in(void *addr, uintptr_t phys) { int ret; pentry_t mask = PTE_MASK | MMU_P | MMU_D | MMU_A; ret = range_map(addr, phys, CONFIG_MMU_PAGE_SIZE, MMU_P, mask, OPTION_FLUSH); __ASSERT_NO_MSG(ret == 0); ARG_UNUSED(ret); } __pinned_func void arch_mem_scratch(uintptr_t phys) { page_map_set(z_x86_page_tables_get(), K_MEM_SCRATCH_PAGE, phys | MMU_P | MMU_RW | MMU_XD, NULL, MASK_ALL, OPTION_FLUSH); } __pinned_func uintptr_t arch_page_info_get(void *addr, uintptr_t *phys, bool clear_accessed) { pentry_t all_pte, mask; uint32_t options; /* What to change, if anything, in the page_map_set() calls */ if (clear_accessed) { mask = MMU_A; options = OPTION_FLUSH; } else { /* In this configuration page_map_set() just queries the * page table and makes no changes */ mask = 0; options = 0U; } page_map_set(z_x86_kernel_ptables, addr, 0, &all_pte, mask, options); /* Un-mapped PTEs are completely zeroed. No need to report anything * else in this case. */ if (all_pte == 0) { return ARCH_DATA_PAGE_NOT_MAPPED; } #if defined(CONFIG_USERSPACE) && !defined(CONFIG_X86_COMMON_PAGE_TABLE) /* Don't bother looking at other page tables if non-present as we * are not required to report accurate accessed/dirty in this case * and all mappings are otherwise the same. */ if ((all_pte & MMU_P) != 0) { sys_snode_t *node; /* IRQs are locked, safe to do this */ SYS_SLIST_FOR_EACH_NODE(&x86_domain_list, node) { pentry_t cur_pte; struct arch_mem_domain *domain = CONTAINER_OF(node, struct arch_mem_domain, node); page_map_set(domain->ptables, addr, 0, &cur_pte, mask, options | OPTION_USER); /* Logical OR of relevant PTE in all page tables. * addr/location and present state should be identical * among them. */ all_pte |= cur_pte; } } #endif /* USERSPACE && ~X86_COMMON_PAGE_TABLE */ /* NOTE: We are truncating the PTE on PAE systems, whose pentry_t * are larger than a uintptr_t. * * We currently aren't required to report back XD state (bit 63), and * Zephyr just doesn't support large physical memory on 32-bit * systems, PAE was only implemented for XD support. */ if (phys != NULL) { *phys = (uintptr_t)get_entry_phys(all_pte, PTE_LEVEL); } /* We don't filter out any other bits in the PTE and the kernel * ignores them. For the case of ARCH_DATA_PAGE_NOT_MAPPED, * we use a bit which is never set in a real PTE (the PAT bit) in the * current system. * * The other ARCH_DATA_PAGE_* macros are defined to their corresponding * bits in the PTE. */ return (uintptr_t)all_pte; } __pinned_func enum arch_page_location arch_page_location_get(void *addr, uintptr_t *location) { pentry_t pte; int level; /* TODO: since we only have to query the current set of page tables, * could optimize this with recursive page table mapping */ pentry_get(&level, &pte, z_x86_page_tables_get(), addr); if (pte == 0) { /* Not mapped */ return ARCH_PAGE_LOCATION_BAD; } __ASSERT(level == PTE_LEVEL, "bigpage found at %p", addr); *location = (uintptr_t)get_entry_phys(pte, PTE_LEVEL); if ((pte & MMU_P) != 0) { return ARCH_PAGE_LOCATION_PAGED_IN; } else { return ARCH_PAGE_LOCATION_PAGED_OUT; } } #ifdef CONFIG_X86_KPTI __pinned_func bool z_x86_kpti_is_access_ok(void *addr, pentry_t *ptables) { pentry_t pte; int level; pentry_get(&level, &pte, ptables, addr); /* Might as well also check if it's un-mapped, normally we don't * fetch the PTE from the page tables until we are inside * k_mem_page_fault() and call arch_page_fault_status_get() */ if (level != PTE_LEVEL || pte == 0 || is_flipped_pte(pte)) { return false; } return true; } #endif /* CONFIG_X86_KPTI */ #endif /* CONFIG_DEMAND_PAGING */ ```
/content/code_sandbox/arch/x86/core/x86_mmu.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
16,694
```c /* */ #include <zephyr/kernel.h> #include <ksched.h> #include <zephyr/kernel_structs.h> #include <kernel_internal.h> #include <offsets_short.h> #include <x86_mmu.h> extern void x86_sse_init(struct k_thread *thread); /* in locore.S */ /* FIXME: This exists to make space for a "return address" at the top * of the stack. Obviously this is unused at runtime, but is required * for alignment: stacks at runtime should be 16-byte aligned, and a * CALL will therefore push a return address that leaves the stack * misaligned. Effectively we're wasting 8 bytes here to undo (!) the * alignment that the upper level code already tried to do for us. We * should clean this up. */ struct x86_initial_frame { /* zeroed return address for ABI */ uint64_t rip; }; void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, char *stack_ptr, k_thread_entry_t entry, void *p1, void *p2, void *p3) { void *switch_entry; struct x86_initial_frame *iframe; #if defined(CONFIG_X86_STACK_PROTECTION) && !defined(CONFIG_THREAD_STACK_MEM_MAPPED) /* This unconditionally set the first page of stack as guard page, * which is only needed if the stack is not memory mapped. */ z_x86_set_stack_guard(stack); #else ARG_UNUSED(stack); #endif #ifdef CONFIG_USERSPACE switch_entry = z_x86_userspace_prepare_thread(thread); thread->arch.cs = X86_KERNEL_CS; thread->arch.ss = X86_KERNEL_DS; #else switch_entry = z_thread_entry; #endif iframe = Z_STACK_PTR_TO_FRAME(struct x86_initial_frame, stack_ptr); iframe->rip = 0U; thread->callee_saved.rsp = (long) iframe; thread->callee_saved.rip = (long) switch_entry; thread->callee_saved.rflags = EFLAGS_INITIAL; /* Parameters to entry point, which is populated in * thread->callee_saved.rip */ thread->arch.rdi = (long) entry; thread->arch.rsi = (long) p1; thread->arch.rdx = (long) p2; thread->arch.rcx = (long) p3; x86_sse_init(thread); thread->arch.flags = X86_THREAD_FLAG_ALL; thread->switch_handle = thread; } int arch_float_disable(struct k_thread *thread) { /* x86-64 always has FP/SSE enabled so cannot be disabled */ ARG_UNUSED(thread); return -ENOTSUP; } int arch_float_enable(struct k_thread *thread, unsigned int options) { /* x86-64 always has FP/SSE enabled so nothing to do here */ ARG_UNUSED(thread); ARG_UNUSED(options); return 0; } ```
/content/code_sandbox/arch/x86/core/intel64/thread.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
617
```c /* * */ /** * @file IRQ offload - x8664 implementation */ #include <zephyr/kernel.h> #include <zephyr/init.h> #include <zephyr/irq_offload.h> #include <kernel_arch_data.h> #define NR_IRQ_VECTORS (IV_NR_VECTORS - IV_IRQS) /* # vectors free for IRQs */ extern void (*x86_irq_funcs[NR_IRQ_VECTORS])(const void *arg); extern const void *x86_irq_args[NR_IRQ_VECTORS]; static void (*irq_offload_funcs[CONFIG_MP_NUM_CPUS])(const void *arg); static const void *irq_offload_args[CONFIG_MP_NUM_CPUS]; static void dispatcher(const void *arg) { uint8_t cpu_id = _current_cpu->id; if (irq_offload_funcs[cpu_id] != NULL) { irq_offload_funcs[cpu_id](irq_offload_args[cpu_id]); } } void arch_irq_offload(irq_offload_routine_t routine, const void *parameter) { int key = arch_irq_lock(); uint8_t cpu_id = _current_cpu->id; irq_offload_funcs[cpu_id] = routine; irq_offload_args[cpu_id] = parameter; __asm__ volatile("int %0" : : "i" (CONFIG_IRQ_OFFLOAD_VECTOR) : "memory"); arch_irq_unlock(key); } int irq_offload_init(void) { x86_irq_funcs[CONFIG_IRQ_OFFLOAD_VECTOR - IV_IRQS] = dispatcher; return 0; } SYS_INIT(irq_offload_init, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT); ```
/content/code_sandbox/arch/x86/core/intel64/irq_offload.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
349
```c /* * */ #include <string.h> #include <zephyr/debug/coredump.h> #define ARCH_HDR_VER 1 struct x86_64_arch_block { uint64_t vector; uint64_t code; struct { uint64_t rax; uint64_t rcx; uint64_t rdx; uint64_t rsi; uint64_t rdi; uint64_t rsp; uint64_t r8; uint64_t r9; uint64_t r10; uint64_t r11; uint64_t rip; uint64_t eflags; uint64_t cs; uint64_t ss; uint64_t rbp; #ifdef CONFIG_EXCEPTION_DEBUG uint64_t rbx; uint64_t r12; uint64_t r13; uint64_t r14; uint64_t r15; #endif } r; } __packed; /* * Register block takes up too much stack space * if defined within function. So define it here. */ static struct x86_64_arch_block arch_blk; void arch_coredump_info_dump(const struct arch_esf *esf) { struct coredump_arch_hdr_t hdr = { .id = COREDUMP_ARCH_HDR_ID, .hdr_version = ARCH_HDR_VER, .num_bytes = sizeof(arch_blk), }; /* Nothing to process */ if (esf == NULL) { return; } (void)memset(&arch_blk, 0, sizeof(arch_blk)); arch_blk.vector = esf->vector; arch_blk.code = esf->code; /* * 34 registers expected by GDB. * Not all are in ESF but the GDB stub * will need to send all 34 as one packet. * The stub will need to send undefined * for registers not presented in coredump. */ arch_blk.r.rax = esf->rax; arch_blk.r.rcx = esf->rcx; arch_blk.r.rdx = esf->rdx; arch_blk.r.rsi = esf->rsi; arch_blk.r.rdi = esf->rdi; arch_blk.r.rsp = esf->rsp; arch_blk.r.rip = esf->rip; arch_blk.r.r8 = esf->r8; arch_blk.r.r9 = esf->r9; arch_blk.r.r10 = esf->r10; arch_blk.r.r11 = esf->r11; arch_blk.r.eflags = esf->rflags; arch_blk.r.cs = esf->cs & 0xFFFFU; arch_blk.r.ss = esf->ss; arch_blk.r.rbp = esf->rbp; #ifdef CONFIG_EXCEPTION_DEBUG arch_blk.r.rbx = esf->rbx; arch_blk.r.r12 = esf->r12; arch_blk.r.r13 = esf->r13; arch_blk.r.r14 = esf->r14; arch_blk.r.r15 = esf->r15; #endif /* Send for output */ coredump_buffer_output((uint8_t *)&hdr, sizeof(hdr)); coredump_buffer_output((uint8_t *)&arch_blk, sizeof(arch_blk)); } uint16_t arch_coredump_tgt_code_get(void) { return COREDUMP_TGT_X86_64; } ```
/content/code_sandbox/arch/x86/core/intel64/coredump.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
744
```c /* */ #include <cpuid.h> #include <zephyr/kernel.h> #include <zephyr/arch/cpu.h> #include <kernel_arch_data.h> #include <kernel_arch_func.h> #include <zephyr/kernel_structs.h> #include <kernel_internal.h> #include <zephyr/arch/x86/multiboot.h> #include <x86_mmu.h> #include <zephyr/drivers/interrupt_controller/loapic.h> #ifdef CONFIG_ACPI #include <zephyr/arch/x86/cpuid.h> #include <zephyr/acpi/acpi.h> #endif /* * Map of CPU logical IDs to CPU local APIC IDs. By default, * we assume this simple identity mapping, as found in QEMU. * The symbol is weak so that boards/SoC files can override. */ #if defined(CONFIG_ACPI) __weak uint8_t x86_cpu_loapics[CONFIG_MP_MAX_NUM_CPUS]; #else #define INIT_CPUID(n, _) n __weak uint8_t x86_cpu_loapics[] = { LISTIFY(CONFIG_MP_MAX_NUM_CPUS, INIT_CPUID, (,)),}; #endif extern char x86_ap_start[]; /* AP entry point in locore.S */ LISTIFY(CONFIG_MP_MAX_NUM_CPUS, ACPI_CPU_INIT, (;)); Z_GENERIC_SECTION(.boot_arg) x86_boot_arg_t x86_cpu_boot_arg; struct x86_cpuboot x86_cpuboot[] = { LISTIFY(CONFIG_MP_MAX_NUM_CPUS, X86_CPU_BOOT_INIT, (,)), }; /* * Send the INIT/STARTUP IPI sequence required to start up CPU 'cpu_num', which * will enter the kernel at fn(arg), running on the specified stack. */ void arch_cpu_start(int cpu_num, k_thread_stack_t *stack, int sz, arch_cpustart_t fn, void *arg) { #if CONFIG_MP_MAX_NUM_CPUS > 1 uint8_t vector = ((unsigned long) x86_ap_start) >> 12; uint8_t apic_id; IF_ENABLED(CONFIG_ACPI, ({ ACPI_MADT_LOCAL_APIC *lapic = acpi_local_apic_get(cpu_num); if (lapic != NULL) { /* We update the apic_id, __start will need it. */ x86_cpu_loapics[cpu_num] = lapic->Id; } else { /* TODO: kernel need to handle the error scenario if someone config * CONFIG_MP_MAX_NUM_CPUS more than what platform supported. */ __ASSERT(false, "CPU reached more than maximum supported!"); return; } })); apic_id = x86_cpu_loapics[cpu_num]; x86_cpuboot[cpu_num].sp = (uint64_t) K_KERNEL_STACK_BUFFER(stack) + sz; x86_cpuboot[cpu_num].stack_size = sz; x86_cpuboot[cpu_num].fn = fn; x86_cpuboot[cpu_num].arg = arg; x86_cpuboot[cpu_num].cpu_id = cpu_num; z_loapic_ipi(apic_id, LOAPIC_ICR_IPI_INIT, 0); k_busy_wait(10000); z_loapic_ipi(apic_id, LOAPIC_ICR_IPI_STARTUP, vector); while (x86_cpuboot[cpu_num].ready == 0) { } #else ARG_UNUSED(cpu_num); ARG_UNUSED(stack); ARG_UNUSED(sz); ARG_UNUSED(fn); ARG_UNUSED(arg); #endif } /* Per-CPU initialization, C domain. On the first CPU, z_prep_c is the * next step. For other CPUs it is probably smp_init_top(). */ FUNC_NORETURN void z_x86_cpu_init(struct x86_cpuboot *cpuboot) { #if defined(CONFIG_ACPI) __ASSERT(z_x86_cpuid_get_current_physical_apic_id() == x86_cpu_loapics[cpuboot->cpu_id], "APIC ID miss match!"); #endif x86_sse_init(NULL); if (cpuboot->cpu_id == 0U) { /* Only need to do these once per boot */ z_bss_zero(); z_data_copy(); } z_loapic_enable(cpuboot->cpu_id); #ifdef CONFIG_USERSPACE /* Set landing site for 'syscall' instruction */ z_x86_msr_write(X86_LSTAR_MSR, (uint64_t)z_x86_syscall_entry_stub); /* Set segment descriptors for syscall privilege transitions */ z_x86_msr_write(X86_STAR_MSR, (uint64_t)X86_STAR_UPPER << 32); /* Mask applied to RFLAGS when making a syscall */ z_x86_msr_write(X86_FMASK_MSR, EFLAGS_SYSCALL); #endif /* Enter kernel, never return */ cpuboot->ready++; cpuboot->fn(cpuboot->arg); CODE_UNREACHABLE; /* LCOV_EXCL_LINE */ } ```
/content/code_sandbox/arch/x86/core/intel64/cpu.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,069
```unknown /* * */ #include <zephyr/toolchain.h> #include <zephyr/arch/cpu.h> #include <offsets_short.h> #include <zephyr/syscall.h> #include <zephyr/kernel/mm.h> #ifdef CONFIG_X86_KPTI /* Copy interrupt return stack context to the trampoline stack, switch back * to the user page table, and only then 'iret'. We jump to this instead * of calling 'iret' if KPTI is turned on. This must be invoked with interrupts * locked. * * Stack layout is expected to be what 'iretq' expects, which is as follows: * * 32 SS * 24 RSP * 16 RFLAGS * 8 CS * 0 RIP */ .global z_x86_trampoline_to_user z_x86_trampoline_to_user: /* Stash EDI, need a free register */ pushq %rdi /* Store old stack pointer and switch to trampoline stack */ movq %rsp, %rdi movq %gs:__x86_tss64_t_ist2_OFFSET, %rsp /* Copy context */ pushq 40(%rdi) /* SS */ pushq 32(%rdi) /* RSP */ pushq 24(%rdi) /* RFLAGS */ pushq 16(%rdi) /* CS */ pushq 8(%rdi) /* RIP */ xchgq %rdi, (%rdi) /* Exchange old rdi to restore it and put trampoline stack address in its old storage area */ /* Switch to thread's page table */ pushq %rax movq %gs:__x86_tss64_t_cpu_OFFSET, %rax movq ___cpu_t_current_OFFSET(%rax), %rax movq _thread_offset_to_ptables(%rax), %rax movq %rax, %cr3 popq %rax movq $0, -8(%rsp) /* Delete stashed RAX data */ /* Trampoline stack should have nothing sensitive in it at this point */ swapgs iretq #endif /* CONFIG_X86_KPTI */ /* Landing site for 'syscall' instruction * * Call id is in RAX * Arguments are in RDI, RSI, RDX, R10, R8, R9 * Return address stored by CPU in RCX * User RFLAGS store by CPU in R11 * Current RFLAGS has been masked with ~X86_FMASK_MSR */ .global z_x86_syscall_entry_stub z_x86_syscall_entry_stub: swapgs /* Save original stack pointer from user mode in memory, at the * moment we have no free registers or stack to save it to. This * eventually gets put on the stack before we re-enable interrupts * as this is a per-cpu and not per-thread area. */ movq %rsp, %gs:__x86_tss64_t_usp_OFFSET #ifdef CONFIG_X86_KPTI /* We need to switch to the trampoline stack so that we can * switch to the kernel's page table */ movq %gs:__x86_tss64_t_ist2_OFFSET, %rsp /* Load kernel's page table */ pushq %rax /* NOTE: Presumes phys=virt */ movq $K_MEM_PHYS_ADDR(z_x86_kernel_ptables), %rax movq %rax, %cr3 popq %rax movq $0, -8(%rsp) /* Delete stashed RAX data */ #endif /* CONFIG_X86_KPTI */ /* Switch to the privilege mode stack pointer stored in * x86_tss64.psp */ movq %gs:__x86_tss64_t_psp_OFFSET, %rsp /* We're now on the privilege mode stack; push the old user stack * pointer onto it */ pushq %gs:__x86_tss64_t_usp_OFFSET #ifdef CONFIG_X86_KPTI movq $0, %gs:__x86_tss64_t_usp_OFFSET #endif sti /* re-enable interrupts */ /* call_id is in RAX. bounds-check it, must be less than * K_SYSCALL_LIMIT. */ cmp $K_SYSCALL_LIMIT, %rax jae _bad_syscall _id_ok: #ifdef CONFIG_X86_BOUNDS_CHECK_BYPASS_MITIGATION /* Prevent speculation with bogus system call IDs */ lfence #endif /* Remaining registers not involved in the syscall operation are * RBX, RBP, R12-R15, plus floating point / SIMD registers. * * We save caller-saved registers so we can restore to original values * when we call 'sysretq' at the end. */ pushq %rdi subq $X86_FXSAVE_SIZE, %rsp fxsave (%rsp) pushq %rsi pushq %rdx pushq %r8 pushq %r9 pushq %r10 pushq %r11 /* RFLAGS */ pushq %rcx /* Return address stored by 'syscall' */ pushq %rsp /* SSF parameter */ /* All other args are in the right registers, except arg4 which * we had to put in r10 instead of RCX */ movq %r10, %rcx /* from the call ID in RAX, load R10 with the actual function pointer * to call by looking it up in the system call dispatch table */ xorq %r11, %r11 movq _k_syscall_table(%r11, %rax, 8), %r10 /* Run the marshal function, which is some entry in _k_syscall_table */ call *%r10 /* RAX now contains the return value * * Callee-saved registers are un-touched from original values per C * calling convention, but sensitive data may lurk in caller-saved regs * RDI, RSI, RDX, R8, R9, R10, XMM* after we have serviced the system * call. We saved them earlier, restore their original values when * the syscall was made. This also preserves these registers if they * were not used as arguments. * * We also can't have RCX and R11 clobbered as we need the original * values to successfully 'sysretq'. */ addq $8, %rsp /* Discard ssf */ popq %rcx /* Restore return address for 'sysretq' */ popq %r11 /* Restore RFLAGS for 'sysretq' */ popq %r10 popq %r9 popq %r8 popq %rdx popq %rsi fxrstor (%rsp) addq $X86_FXSAVE_SIZE, %rsp popq %rdi #ifdef CONFIG_X86_KPTI /* Lock IRQs as we are using per-cpu memory areas and the * trampoline stack */ cli /* Stash user stack pointer and switch to trampoline stack */ popq %gs:__x86_tss64_t_usp_OFFSET movq %gs:__x86_tss64_t_ist2_OFFSET, %rsp /* Switch to thread's page table */ pushq %rax movq %gs:__x86_tss64_t_cpu_OFFSET, %rax movq ___cpu_t_current_OFFSET(%rax), %rax movq _thread_offset_to_ptables(%rax), %rax movq %rax, %cr3 popq %rax movq $0, -8(%rsp) /* Delete stashed RAX data */ /* Restore saved user stack pointer */ movq %gs:__x86_tss64_t_usp_OFFSET, %rsp movq $0, %gs:__x86_tss64_t_usp_OFFSET #else /* Restore user stack pointer */ popq %rsp /* Return to user mode, locking interrupts as the normal interrupt * handling path will get very confused if it occurs between * 'swapgs' and 'sysretq' */ cli #endif /* CONFIG_X86_KPTI */ swapgs sysretq _bad_syscall: /* RAX had a bogus syscall value in it, replace with the bad syscall * handler's ID, and put the bad ID as its first argument. * * TODO: On this and all other arches, simply immediately return * with -ENOSYS, once all syscalls have a return value */ movq %rax, %rdi movq $K_SYSCALL_BAD, %rax jmp _id_ok /* * size_t arch_user_string_nlen(const char *s, size_t maxsize, int *err_arg) * ^ RDI ^ RSI ^ RDX */ .global arch_user_string_nlen arch_user_string_nlen: /* Initial error value, strlen_done adjusts this if we succeed */ movl $-1, %r8d /* use RAX as our length count (this function's return value) */ xor %rax, %rax /* This code might page fault */ strlen_loop: .global z_x86_user_string_nlen_fault_start z_x86_user_string_nlen_fault_start: cmpb $0x0, (%rdi, %rax, 1) /* *(RDI + RAX) == 0? Could fault. */ .global z_x86_user_string_nlen_fault_end z_x86_user_string_nlen_fault_end: je strlen_done cmp %rsi, %rax /* Max length reached? */ je strlen_done inc %rax /* EAX++ and loop again */ jmp strlen_loop strlen_done: /* Set error value to 0 since we succeeded */ xorl %r8d, %r8d .global z_x86_user_string_nlen_fixup z_x86_user_string_nlen_fixup: /* Write error value to 32-bit integer err pointer parameter */ movl %r8d, (%rdx) retq /* * Trampoline function to put the p3 parameter in the register expected * by the calling convention, we couldn't use RCX when we called 'sysret' */ z_x86_userspace_landing_site: /* Place argument 4 in the correct position */ movq %r10, %rcx call z_thread_entry /* FUNC_NORETURN void z_x86_userspace_enter( * k_thread_entry_t user_entry, <- RDI * void *p1, void *p2, void *p3, <- RSI, RDX, RCX * uintptr_t stack_end, <- R8 * uintptr_t stack_start) <- R9 * * A one-way trip to userspace. */ .global z_x86_userspace_enter z_x86_userspace_enter: /* RCX is sysret return address, pass along p3 in r10, * z_x86_userspace_landing_site will fix this up */ movq %rcx, %r10 /* switch to privilege mode stack so we can erase thread stack buffer, * the buffer is the page immediately before the thread stack */ movq %r9, %rsp /* Push callee-saved regs and go back into C code to erase the stack * buffer and set US bit in page tables for it */ pushq %rdx pushq %rsi pushq %rdi pushq %r8 pushq %r10 callq z_x86_current_stack_perms popq %r10 popq %r8 popq %rdi popq %rsi popq %rdx /* Reset to the beginning of the user stack */ movq %r8, %rsp /* set sysret entry point */ movq $z_x86_userspace_landing_site, %rcx /* Copy RFLAGS into r11, required by sysret */ pushfq movq (%rsp), %r11 movq $0, (%rsp) /* Now a debugger-friendly return address */ /* cleanse other registers */ xorq %rbx, %rbx xorq %rbp, %rbp xorq %r12, %r12 xorq %r13, %r13 xorq %r14, %r14 xorq %r15, %r15 cli #ifdef CONFIG_X86_KPTI /* Switch to thread's page table. We have free registers so no need * to involve the trampoline stack. */ movq %gs:__x86_tss64_t_cpu_OFFSET, %rax movq ___cpu_t_current_OFFSET(%rax), %rax movq _thread_offset_to_ptables(%rax), %rax movq %rax, %cr3 #endif swapgs sysretq ```
/content/code_sandbox/arch/x86/core/intel64/userspace.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,962
```c /* * */ #include <kernel_internal.h> FUNC_NO_STACK_PROTECTOR void z_x86_early_tls_update_gdt(char *stack_ptr) { uintptr_t *self_ptr; uint32_t fs_base = X86_FS_BASE; /* * Since we are populating things backwards, store * the pointer to the TLS area at top of stack. */ stack_ptr -= sizeof(uintptr_t); self_ptr = (void *)stack_ptr; *self_ptr = POINTER_TO_UINT(stack_ptr); __asm__ volatile( "movl %0, %%ecx;\n\t" "movq %1, %%rax;\n\t" "movq %1, %%rdx;\n\t" "shrq $32, %%rdx;\n\t" "wrmsr;\n\t" : : "r"(fs_base), "r"(POINTER_TO_UINT(self_ptr))); } ```
/content/code_sandbox/arch/x86/core/intel64/tls.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
204
```c /* * */ #include <zephyr/kernel.h> #include <zephyr/irq_offload.h> #include <kernel_arch_data.h> #include <x86_mmu.h> #include <zephyr/init.h> #define NR_IRQ_VECTORS (IV_NR_VECTORS - IV_IRQS) /* # vectors free for IRQs */ extern void (*x86_irq_funcs[NR_IRQ_VECTORS])(const void *arg); extern const void *x86_irq_args[NR_IRQ_VECTORS]; int arch_smp_init(void) { /* * z_sched_ipi() doesn't have the same signature as a typical ISR, so * we fudge it with a cast. the argument is ignored, no harm done. */ x86_irq_funcs[CONFIG_SCHED_IPI_VECTOR - IV_IRQS] = (void *) z_sched_ipi; /* TLB shootdown handling */ x86_irq_funcs[CONFIG_TLB_IPI_VECTOR - IV_IRQS] = z_x86_tlb_ipi; return 0; } /* * it is not clear exactly how/where/why to abstract this, as it * assumes the use of a local APIC (but there's no other mechanism). */ void arch_sched_broadcast_ipi(void) { z_loapic_ipi(0, LOAPIC_ICR_IPI_OTHERS, CONFIG_SCHED_IPI_VECTOR); } ```
/content/code_sandbox/arch/x86/core/intel64/smp.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
292
```unknown /* */ #define LOAPIC_BASE_ADDRESS DT_REG_ADDR(DT_NODELABEL(intc_loapic)) #include <zephyr/toolchain.h> #include <zephyr/arch/x86/multiboot.h> #include <zephyr/arch/x86/efi.h> #include <zephyr/sys/util.h> #include <zephyr/arch/x86/msr.h> #include <kernel_arch_data.h> #include <offsets_short.h> #include <zephyr/drivers/interrupt_controller/loapic.h> #include <zephyr/arch/cpu.h> #include <zephyr/kernel/mm.h> /* * Definitions/macros for enabling paging */ /* Long mode, no-execute, syscall */ #define EFER_BITS (X86_EFER_MSR_LME | X86_EFER_MSR_NXE | X86_EFER_MSR_SCE) /* Paging, write-protect */ #define CR0_BITS (CR0_PG | CR0_WP) /* PAE, SSE */ #define CR4_BITS (CR4_PAE | CR4_OSFXSR) .macro set_efer movl $X86_EFER_MSR, %ecx rdmsr orl $EFER_BITS, %eax wrmsr .endm .macro install_pagetables_32 movl %cr4, %eax orl $CR4_BITS, %eax movl %eax, %cr4 clts /* Page tables created at build time by gen_mmu.py * NOTE: Presumes phys=virt */ movl $K_MEM_PHYS_ADDR(z_x86_kernel_ptables), %eax movl %eax, %cr3 set_efer movl %cr0, %eax orl $CR0_BITS, %eax movl %eax, %cr0 .endm .macro install_pagetables_64 /* Here, we are already in long mode with paging enabled and * just need to switch to our own page tables, but let's be * paranoid and ensure CR4, CR0, and EFER_MSR are set up * exactly how we expect. Logic is the same as install_pagetables_32 */ movq %cr4, %rax orq $CR4_BITS, %rax movq %rax, %cr4 clts /* NOTE: Presumes phys=virt */ movq $K_MEM_PHYS_ADDR(z_x86_kernel_ptables), %rax movq %rax, %cr3 set_efer movq %cr0, %rax /* Use 32-bit instructions due to assembler fussiness with large * immediate values with `orq`, CR0_PG is bit 31. We don't ever set any * high bits in cr0 anyway. */ orl $CR0_BITS, %eax movq %rax, %cr0 .endm .macro DEFINE_TSS_STACK_ARRAY .irp idx, DEFINE_STACK_ARRAY_IDX .word __X86_TSS64_SIZEOF-1 .word tss\idx .word 0x8900 .word 0, 0, 0, 0, 0 .endr .endm /* The .locore section begins the page-aligned initialization region * of low memory. The first address is used as the architectural * entry point for auxiliary CPUs being brought up (in real mode!) * via a startup IPI. It's is ALSO used by some loaders (well, * ACRN...) who hard-coded the address by inspecting _start on a * non-SMP build. * * === OUTRAGEOUS HACK FOLLOWS === * * Therefore it needs to start at OS entry with a 32 bit jump to the * 32 bit entry point, and gets clobbered later (see the beginning of * __start32) with NOP bytes such that the next CPU will fall through * to the 16 bit SMP entry. * * We write out the JMP followed by 8 NOPs for simplicity. No i386 * JMP encodes with more than 8 bytes, so we can come back later and * scribble over it with 8 0x90 bytes (which is the 1-byte NOP) and be * sure to get all of it without overwriting anything. */ .section .locore,"ax" .code32 .globl __start __start: jmp __start32 nop nop nop nop nop nop nop nop .code16 .global x86_ap_start x86_ap_start: /* * First, we move to 32-bit protected mode, and set up the * same flat environment that the BSP gets from the loader. */ lgdt gdt48 lidt idt48 movl %cr0, %eax or $1, %eax movl %eax, %cr0 jmpl $X86_KERNEL_CS_32, $1f .code32 1: movw $X86_KERNEL_DS_32, %ax movw %ax, %ds movw %ax, %es movw %ax, %ss movw %ax, %fs /* * Now, reverse-map our local APIC ID to our logical CPU ID * so we can locate our x86_cpuboot[] bundle. Put it in EBP. */ movl LOAPIC_BASE_ADDRESS+LOAPIC_ID, %eax shrl $24, %eax andl $0xFF, %eax /* local APIC ID -> EAX */ movl $x86_cpuboot, %ebp xorl %ebx, %ebx 1: cmpl $CONFIG_MP_MAX_NUM_CPUS, %ebx jz unknown_loapic_id cmpb %al, x86_cpu_loapics(%ebx) je go64 /* proceed to 64-bit mode */ incl %ebx addl $__X86_CPUBOOT_SIZEOF, %ebp jmp 1b unknown_loapic_id: jmp unknown_loapic_id .code32 .globl __start32 __start32: /* * kernel execution begins here in 32-bit mode, with flat-mode * descriptors in all segment registers, interrupts disabled. */ /* See note above, re: OUTRAGEOUS HACK */ movl $__start, %ebp movb $0x90, 0(%ebp) movb $0x90, 1(%ebp) movb $0x90, 2(%ebp) movb $0x90, 3(%ebp) movb $0x90, 4(%ebp) movb $0x90, 5(%ebp) movb $0x90, 6(%ebp) movb $0x90, 7(%ebp) wbinvd lgdt gdt48 lidt idt48 #include "../common.S" /* * N.B.: if multiboot info struct is present, "common.S" * has left a pointer to it in EBX. */ movl $x86_cpu_boot_arg, %ebp /* Inserting boot type */ movl $MULTIBOOT_BOOT_TYPE, __x86_boot_arg_t_boot_type_OFFSET(%ebp) /* and multiboot info */ movl %ebx, __x86_boot_arg_t_arg_OFFSET(%ebp) movl $x86_cpuboot, %ebp /* BSP is always logical CPU id 0 */ go64: /* Install page tables and transition to long mode */ install_pagetables_32 jmpl $X86_KERNEL_CS, $enter_code64 /* Long mode entry point. Arrive here from the code * immediately above (shared between main CPU startup and AP * startup), or from EFI entry in __start64. * * Here we reload the segment registers, * and configure per-CPU stuff: GS, task register, stack. */ .code64 enter_code64: movl $X86_KERNEL_DS, %eax movw %ax, %ds movw %ax, %es movw %ax, %ss movw %ax, %fs /* On Intel processors, if GS is not zero and is being set to * zero, GS_BASE is also being set to zero. This would interfere * with the actual use of GS_BASE for usespace. To avoid accidentally * clearing GS_BASE, simply set GS to 0 at boot, so any subsequent * clearing of GS will not clear GS_BASE. */ mov $0, %eax movw %ax, %gs movw __x86_cpuboot_t_tr_OFFSET(%rbp), %ax ltr %ax /* Set up MSRs for GS / KERNEL_GS base */ movq __x86_cpuboot_t_gs_base_OFFSET(%rbp), %rax movq %rax, %rdx shrq $32, %rdx /* X86_KERNEL_GS_BASE and X86_GS_BASE are swapped by the 'swapgs' * instruction. */ movl $X86_KERNEL_GS_BASE, %ecx wrmsr /* X86_GS_BASE shadows base fields of %gs, effectively setting %gs */ movl $X86_GS_BASE, %ecx wrmsr movq __x86_cpuboot_t_sp_OFFSET(%rbp), %rsp movq %rsp, %gs:__x86_tss64_t_ist1_OFFSET /* finally, complete environment for the C runtime and go. */ cld /* GCC presumes a clear direction flag */ #ifdef CONFIG_INIT_STACKS movq $0xAAAAAAAAAAAAAAAA, %rax movq %rsp, %rdi subq __x86_cpuboot_t_stack_size_OFFSET(%rbp), %rdi movq __x86_cpuboot_t_stack_size_OFFSET(%rbp), %rcx shr $3, %rcx /* moving 8 bytes a time, so fewer repeats */ rep stosq #endif #ifdef CONFIG_STACK_CANARIES_TLS movq %rsp, %rdi pushq %rsp call z_x86_early_tls_update_gdt popq %rsp #endif /* Enter C domain now that we have a stack set up, never to return */ movq %rbp, %rdi call z_x86_cpu_init /* 64 bit OS entry point, used by EFI support. UEFI * guarantees an identity-mapped page table that covers * physical memory, and the loader stub already used it to * write all of the Zephyr image, so we know it works for what * we need. Other things need fixups to match what multiboot * 32 bit startup does. */ .globl __start64 __start64: /* Zero the TSC */ xorq %rax, %rax xorq %rdx, %rdx movq $X86_TIME_STAMP_COUNTER_MSR, %rcx wrmsr lidt idt80 lgdt gdt80 install_pagetables_64 /* Disable 8259 PIT. Almost certainly not needed on modern * UEFI platforms taking this code path, but... */ movb $0xff, %al outb %al, $0x21 outb %al, $0xA1 movq $x86_cpu_boot_arg, %rbp /* Inserting boot type */ movq $EFI_BOOT_TYPE, __x86_boot_arg_t_boot_type_OFFSET(%rbp) /* and EFI boot arg (if any) */ movq %rbx, __x86_boot_arg_t_arg_OFFSET(%rbp) movq $x86_cpuboot, %rbp /* BSP is always logical CPU id 0 */ mov jmpdesc, %rax jmp *%rax jmpdesc: .quad enter_code64 .short X86_KERNEL_CS /* * void x86_sse_init(struct k_thread *thread); * * Initialize floating-point state to something sane. If 'thread' is * not NULL, then the resulting FP state is saved to thread->arch.sse. */ .global x86_sse_init x86_sse_init: fninit ldmxcsr mxcsr testq %rdi, %rdi jz 1f fxsave _thread_offset_to_sse(%rdi) 1: retq mxcsr: .long X86_MXCSR_SANE /* * void z_x86_switch(void *switch_to, void **switched_from); * * Note that switch_handle for us is simply a pointer to the containing * 'struct k_thread', thus: * * RDI = (struct k_thread *) switch_to * RSI = (struct k_thread **) address of output thread switch_handle field */ .globl z_x86_switch z_x86_switch: /* RSI contains the switch_handle field to which we are * notionally supposed to store. Offset it to get back to the * thread handle instead. */ subq $___thread_t_switch_handle_OFFSET, %rsi andb $~X86_THREAD_FLAG_ALL, _thread_offset_to_flags(%rsi) popq %rax movq %rax, _thread_offset_to_rip(%rsi) pushfq popq %rax movq %rax, _thread_offset_to_rflags(%rsi) movq %rsp, _thread_offset_to_rsp(%rsi) movq %rbx, _thread_offset_to_rbx(%rsi) movq %rbp, _thread_offset_to_rbp(%rsi) movq %r12, _thread_offset_to_r12(%rsi) movq %r13, _thread_offset_to_r13(%rsi) movq %r14, _thread_offset_to_r14(%rsi) movq %r15, _thread_offset_to_r15(%rsi) #ifdef CONFIG_USERSPACE /* We're always in supervisor mode if we get here, the other case * is when __resume is invoked from irq_dispatch */ movq $X86_KERNEL_CS, _thread_offset_to_cs(%rsi) movq $X86_KERNEL_DS, _thread_offset_to_ss(%rsi) #endif /* Store the handle (i.e. our thread struct address) into the * switch handle field, this is a synchronization signal that * must occur after the last data from the old context is * saved. */ movq %rsi, ___thread_t_switch_handle_OFFSET(%rsi) movq %gs:__x86_tss64_t_ist1_OFFSET, %rsp /* fall through to __resume */ /* * Entry: * RSP = top of CPU interrupt stack * RDI = (struct k_thread *) thread to resume */ __resume: #ifdef CONFIG_THREAD_LOCAL_STORAGE /* * Write the TLS base pointer to FS_BASE MSR, * where GCC emits code to access TLS data via * offset to FS. * Since wrmsr write EDX:EAX to MSR indicated by * ECX, the high 32-bit needs to be loaded into * RDX and right shifted by 32 bits so EDX has * the higher 32-bit value. */ movl $X86_FS_BASE, %ecx movq _thread_offset_to_tls(%rdi), %rax movq _thread_offset_to_tls(%rdi), %rdx shrq $32, %rdx wrmsr #endif #if (!defined(CONFIG_X86_KPTI) && defined(CONFIG_USERSPACE)) \ || defined(CONFIG_INSTRUMENT_THREAD_SWITCHING) pushq %rdi /* Caller-saved, stash it */ #if !defined(CONFIG_X86_KPTI) && defined(CONFIG_USERSPACE) /* If KPTI is enabled we're always on the kernel's page tables in * this context and the appropriate page table switch takes place * when trampolining back to user mode */ call z_x86_swap_update_page_tables #endif #ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING call z_thread_mark_switched_in #endif popq %rdi #endif /* (!CONFIG_X86_KPTI && CONFIG_USERSPACE) || \ CONFIG_INSTRUMENT_THREAD_SWITCHING */ #ifdef CONFIG_USERSPACE /* Set up exception return stack frame */ pushq _thread_offset_to_ss(%rdi) /* SS */ #else pushq $X86_KERNEL_DS /* SS */ #endif /* CONFIG_USERSPACE */ pushq _thread_offset_to_rsp(%rdi) /* RSP */ pushq _thread_offset_to_rflags(%rdi) /* RFLAGS */ #ifdef CONFIG_USERSPACE pushq _thread_offset_to_cs(%rdi) /* CS */ #else pushq $X86_KERNEL_CS /* CS */ #endif pushq _thread_offset_to_rip(%rdi) /* RIP */ #ifdef CONFIG_ASSERT /* Poison the old thread's saved RIP pointer with a * recognizable value near NULL, to easily catch reuse of the * thread object across CPUs in SMP. Strictly speaking this * is not an assertion, but it's very cheap and worth having * on during routine testing. */ movq $0xB9, _thread_offset_to_rip(%rdi) #endif movq _thread_offset_to_rbx(%rdi), %rbx movq _thread_offset_to_rbp(%rdi), %rbp movq _thread_offset_to_r12(%rdi), %r12 movq _thread_offset_to_r13(%rdi), %r13 movq _thread_offset_to_r14(%rdi), %r14 movq _thread_offset_to_r15(%rdi), %r15 #ifdef CONFIG_USERSPACE /* Set correct privilege elevation stack to manually switch to in * z_x86_syscall_entry_stub() */ movq _thread_offset_to_psp(%rdi), %rax movq %rax, %gs:__x86_tss64_t_psp_OFFSET #endif testb $X86_THREAD_FLAG_ALL, _thread_offset_to_flags(%rdi) jz 1f fxrstor _thread_offset_to_sse(%rdi) movq _thread_offset_to_rax(%rdi), %rax movq _thread_offset_to_rcx(%rdi), %rcx movq _thread_offset_to_rdx(%rdi), %rdx movq _thread_offset_to_rsi(%rdi), %rsi movq _thread_offset_to_r8(%rdi), %r8 movq _thread_offset_to_r9(%rdi), %r9 movq _thread_offset_to_r10(%rdi), %r10 movq _thread_offset_to_r11(%rdi), %r11 movq _thread_offset_to_rdi(%rdi), %rdi /* do last :-) */ #ifdef CONFIG_USERSPACE /* Swap GS register values if we are returning to user mode */ testb $0x3, 8(%rsp) jz 1f #ifdef CONFIG_X86_KPTI jmp z_x86_trampoline_to_user #else swapgs #endif /* CONFIG_X86_KPTI */ #endif /* CONFIG_USERSPACE */ 1: #ifdef CONFIG_X86_BOUNDS_CHECK_BYPASS_MITIGATION /* swapgs variant of Spectre V1. Disable speculation past this point */ lfence #endif /* CONFIG_X86_BOUNDS_CHECK_BYPASS_MITIGATION */ iretq #ifdef CONFIG_X86_KPTI #define EXCEPT_CODE(nr, ist) \ vector_ ## nr: pushq %gs:__x86_tss64_t_ist ## ist ## _OFFSET; \ pushq $nr; \ jmp except #define EXCEPT(nr, ist) \ vector_ ## nr: pushq $0; \ pushq %gs:__x86_tss64_t_ist ## ist ## _OFFSET; \ pushq $nr; \ jmp except #else #define EXCEPT_CODE(nr) vector_ ## nr: pushq $nr; jmp except #define EXCEPT(nr) vector_ ## nr: pushq $0; pushq $nr; jmp except #endif /* * When we arrive at 'except' from one of the EXCEPT(X) stubs, * we're on the exception stack with irqs unlocked (or the trampoline stack * with irqs locked if KPTI is enabled) and it contains: * * SS * RSP * RFLAGS * CS * RIP * Error Code if pushed by CPU, else 0 * IST index in TSS * Vector number <- RSP points here * */ except: /* * finish struct NANO_ESF on stack. 'vector' .. 'ss' are * already there from hardware trap and EXCEPT_*() stub. */ pushq %r11 #ifdef CONFIG_USERSPACE /* Swap GS register values and page tables if we came from user mode */ testb $0x3, 40(%rsp) jz 1f swapgs #ifdef CONFIG_X86_KPTI /* Load kernel's page table. NOTE: Presumes phys=virt */ movq $z_x86_kernel_ptables, %r11 movq %r11, %cr3 #endif /* CONFIG_X86_KPTI */ 1: #ifdef CONFIG_X86_BOUNDS_CHECK_BYPASS_MITIGATION /* swapgs variant of Spectre V1. Disable speculation past this point */ lfence #endif /* CONFIG_X86_BOUNDS_CHECK_BYPASS_MITIGATION */ #ifdef CONFIG_X86_KPTI /* Save old trampoline stack pointer in R11 */ movq %rsp, %r11 /* Switch to the correct stack */ movq 16(%r11), %rsp /* Transplant trampoline stack contents */ pushq 64(%r11) /* SS */ pushq 56(%r11) /* RSP */ pushq 48(%r11) /* RFLAGS */ pushq 40(%r11) /* CS */ pushq 32(%r11) /* RIP */ pushq 24(%r11) /* Error code */ pushq 8(%r11) /* Vector */ pushq (%r11) /* Stashed R11 */ movq $0, (%r11) /* Cover our tracks */ /* We're done, it's safe to re-enable interrupts. */ sti #endif /* CONFIG_X86_KPTI */ #endif /* CONFIG_USERSPACE */ /* In addition to r11, push the rest of the caller-saved regs */ /* Positioning of this fxsave is important, RSP must be 16-byte * aligned */ subq $X86_FXSAVE_SIZE, %rsp fxsave (%rsp) pushq %r10 pushq %r9 pushq %r8 pushq %rdi pushq %rsi pushq %rdx pushq %rcx pushq %rax pushq %rbp #ifdef CONFIG_EXCEPTION_DEBUG /* Callee saved regs */ pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx #endif /* CONFIG_EXCEPTION_DEBUG */ movq %rsp, %rdi call z_x86_exception /* If we returned, the exception was handled successfully and the * thread may resume (the pushed RIP may have been modified) */ #ifdef CONFIG_EXCEPTION_DEBUG popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 #endif /* CONFIG_EXCEPTION_DEBUG */ popq %rbp popq %rax popq %rcx popq %rdx popq %rsi popq %rdi popq %r8 popq %r9 popq %r10 fxrstor (%rsp) addq $X86_FXSAVE_SIZE, %rsp popq %r11 /* Drop the vector/err code pushed by the HW or EXCEPT_*() stub */ add $16, %rsp #ifdef CONFIG_USERSPACE /* Swap GS register values if we are returning to user mode */ testb $0x3, 8(%rsp) jz 1f cli #ifdef CONFIG_X86_KPTI jmp z_x86_trampoline_to_user #else swapgs #endif /* CONFIG_X86_KPTI */ 1: #endif /* CONFIG_USERSPACE */ iretq #ifdef CONFIG_X86_KPTI EXCEPT ( 0, 7); EXCEPT ( 1, 7); EXCEPT (2, 6); EXCEPT ( 3, 7) EXCEPT ( 4, 7); EXCEPT ( 5, 7); EXCEPT (6, 7); EXCEPT ( 7, 7) EXCEPT_CODE ( 8, 7); EXCEPT ( 9, 7); EXCEPT_CODE (10, 7); EXCEPT_CODE (11, 7) EXCEPT_CODE (12, 7); EXCEPT_CODE (13, 7); EXCEPT_CODE (14, 7); EXCEPT (15, 7) EXCEPT (16, 7); EXCEPT_CODE (17, 7); EXCEPT (18, 7); EXCEPT (19, 7) EXCEPT (20, 7); EXCEPT (21, 7); EXCEPT (22, 7); EXCEPT (23, 7) EXCEPT (24, 7); EXCEPT (25, 7); EXCEPT (26, 7); EXCEPT (27, 7) EXCEPT (28, 7); EXCEPT (29, 7); EXCEPT (30, 7); EXCEPT (31, 7) /* Vector reserved for handling a kernel oops; treat as an exception * and not an interrupt */ EXCEPT(Z_X86_OOPS_VECTOR, 7); #else EXCEPT ( 0); EXCEPT ( 1); EXCEPT ( 2); EXCEPT ( 3) EXCEPT ( 4); EXCEPT ( 5); EXCEPT ( 6); EXCEPT ( 7) EXCEPT_CODE ( 8); EXCEPT ( 9); EXCEPT_CODE (10); EXCEPT_CODE (11) EXCEPT_CODE (12); EXCEPT_CODE (13); EXCEPT_CODE (14); EXCEPT (15) EXCEPT (16); EXCEPT_CODE (17); EXCEPT (18); EXCEPT (19) EXCEPT (20); EXCEPT (21); EXCEPT (22); EXCEPT (23) EXCEPT (24); EXCEPT (25); EXCEPT (26); EXCEPT (27) EXCEPT (28); EXCEPT (29); EXCEPT (30); EXCEPT (31) /* Vector reserved for handling a kernel oops; treat as an exception * and not an interrupt */ EXCEPT(Z_X86_OOPS_VECTOR); #endif /* CONFIG_X86_KPTI */ /* * When we arrive at 'irq' from one of the IRQ(X) stubs, * we're on the "freshest" IRQ stack (or the trampoline stack if we came from * user mode and KPTI is enabled) and it contains: * * SS * RSP * RFLAGS * CS * RIP * (vector number - IV_IRQS) <-- RSP points here */ .globl x86_irq_funcs /* see irq_manage.c .. */ .globl x86_irq_args /* .. for these definitions */ irq: pushq %rsi #ifdef CONFIG_USERSPACE /* Swap GS register values if we came in from user mode */ testb $0x3, 24(%rsp) jz 1f swapgs #ifdef CONFIG_X86_KPTI /* Load kernel's page table. NOTE: presumes phys=virt */ movq $z_x86_kernel_ptables, %rsi movq %rsi, %cr3 #endif /* CONFIG_X86_KPTI */ 1: #ifdef CONFIG_X86_BOUNDS_CHECK_BYPASS_MITIGATION /* swapgs variant of Spectre V1. Disable speculation past this point */ lfence #endif /* CONFIG_X86_BOUNDS_CHECK_BYPASS_MITIGATION */ #ifdef CONFIG_X86_KPTI /* Save old trampoline stack pointer in RSI */ movq %rsp, %rsi /* Switch to the interrupt stack stack */ movq %gs:__x86_tss64_t_ist1_OFFSET, %rsp /* Transplant trampoline stack contents */ pushq 48(%rsi) /* SS */ pushq 40(%rsi) /* RSP */ pushq 32(%rsi) /* RFLAGS */ pushq 24(%rsi) /* CS */ pushq 16(%rsi) /* RIP */ pushq 8(%rsi) /* Vector */ pushq (%rsi) /* Stashed RSI value */ movq $0, (%rsi) /* Cover our tracks, stashed RSI might be sensitive */ #endif /* CONFIG_X86_KPTI */ #endif /* CONFIG_USERSPACE */ movq %gs:__x86_tss64_t_cpu_OFFSET, %rsi /* * Bump the IRQ nesting count and move to the next IRQ stack. * That's sufficient to safely re-enable interrupts, so if we * haven't reached the maximum nesting depth yet, do it. */ incl ___cpu_t_nested_OFFSET(%rsi) subq $CONFIG_ISR_SUBSTACK_SIZE, %gs:__x86_tss64_t_ist1_OFFSET cmpl $CONFIG_ISR_DEPTH, ___cpu_t_nested_OFFSET(%rsi) jz 1f sti 1: cmpl $1, ___cpu_t_nested_OFFSET(%rsi) je irq_enter_unnested /* * if we're a nested interrupt, we have to dump the state to the * stack. we play some games here to re-arrange the stack thusly: * * SS RSP RFLAGS CS RIP RAX RSI * RCX RDX RDI R8 R9 R10 R11 * X86_FXSAVE_SIZE bytes of SSE data <-- RSP points here * * note that the final value of RSP must be 16-byte aligned here, * both to satisfy FXSAVE/FXRSTOR but also to honor the C ABI. */ irq_enter_nested: /* Nested IRQ: dump register state to stack. */ pushq %rcx movq 16(%rsp), %rcx /* RCX = vector */ movq %rax, 16(%rsp) /* looks like we pushed RAX, not the vector */ pushq %rdx pushq %rdi pushq %r8 pushq %r9 pushq %r10 pushq %r11 subq $X86_FXSAVE_SIZE, %rsp fxsave (%rsp) jmp irq_dispatch irq_enter_unnested: /* Not nested: dump state to thread struct for __resume */ movq ___cpu_t_current_OFFSET(%rsi), %rsi orb $X86_THREAD_FLAG_ALL, _thread_offset_to_flags(%rsi) fxsave _thread_offset_to_sse(%rsi) movq %rbx, _thread_offset_to_rbx(%rsi) movq %rbp, _thread_offset_to_rbp(%rsi) movq %r12, _thread_offset_to_r12(%rsi) movq %r13, _thread_offset_to_r13(%rsi) movq %r14, _thread_offset_to_r14(%rsi) movq %r15, _thread_offset_to_r15(%rsi) movq %rax, _thread_offset_to_rax(%rsi) movq %rcx, _thread_offset_to_rcx(%rsi) movq %rdx, _thread_offset_to_rdx(%rsi) movq %rdi, _thread_offset_to_rdi(%rsi) movq %r8, _thread_offset_to_r8(%rsi) movq %r9, _thread_offset_to_r9(%rsi) movq %r10, _thread_offset_to_r10(%rsi) movq %r11, _thread_offset_to_r11(%rsi) popq %rax /* RSI */ movq %rax, _thread_offset_to_rsi(%rsi) popq %rcx /* vector number */ popq %rax /* RIP */ movq %rax, _thread_offset_to_rip(%rsi) popq %rax /* CS */ #ifdef CONFIG_USERSPACE movq %rax, _thread_offset_to_cs(%rsi) #endif popq %rax /* RFLAGS */ movq %rax, _thread_offset_to_rflags(%rsi) popq %rax /* RSP */ movq %rax, _thread_offset_to_rsp(%rsi) popq %rax /* SS */ #ifdef CONFIG_USERSPACE movq %rax, _thread_offset_to_ss(%rsi) #endif irq_dispatch: #ifdef CONFIG_SCHED_THREAD_USAGE pushq %rcx call z_sched_usage_stop popq %rcx #endif movq x86_irq_funcs(,%rcx,8), %rax movq x86_irq_args(,%rcx,8), %rdi call *%rax xorq %rax, %rax #ifdef CONFIG_X2APIC xorl %edx, %edx movl $(X86_X2APIC_BASE_MSR + (LOAPIC_EOI >> 4)), %ecx wrmsr #else /* xAPIC */ movq Z_TOPLEVEL_RAM_NAME(LOAPIC_REGS_STR), %rdx movl %eax, LOAPIC_EOI(%rdx) #endif /* CONFIG_X2APIC */ movq %gs:__x86_tss64_t_cpu_OFFSET, %rsi cli addq $CONFIG_ISR_SUBSTACK_SIZE, %gs:__x86_tss64_t_ist1_OFFSET decl ___cpu_t_nested_OFFSET(%rsi) jnz irq_exit_nested /* not nested; ask the scheduler who's up next and resume it */ movq ___cpu_t_current_OFFSET(%rsi), %rdi call z_get_next_switch_handle movq %rax, %rdi jmp __resume irq_exit_nested: fxrstor (%rsp) addq $X86_FXSAVE_SIZE, %rsp popq %r11 popq %r10 popq %r9 popq %r8 popq %rdi popq %rdx popq %rcx popq %rsi popq %rax iretq #define IRQ(nr) vector_ ## nr: pushq $(nr - IV_IRQS); jmp irq IRQ( 33); IRQ( 34); IRQ( 35); IRQ( 36); IRQ( 37); IRQ( 38); IRQ( 39) IRQ( 40); IRQ( 41); IRQ( 42); IRQ( 43); IRQ( 44); IRQ( 45); IRQ( 46); IRQ( 47) IRQ( 48); IRQ( 49); IRQ( 50); IRQ( 51); IRQ( 52); IRQ( 53); IRQ( 54); IRQ( 55) IRQ( 56); IRQ( 57); IRQ( 58); IRQ( 59); IRQ( 60); IRQ( 61); IRQ( 62); IRQ( 63) IRQ( 64); IRQ( 65); IRQ( 66); IRQ( 67); IRQ( 68); IRQ( 69); IRQ( 70); IRQ( 71) IRQ( 72); IRQ( 73); IRQ( 74); IRQ( 75); IRQ( 76); IRQ( 77); IRQ( 78); IRQ( 79) IRQ( 80); IRQ( 81); IRQ( 82); IRQ( 83); IRQ( 84); IRQ( 85); IRQ( 86); IRQ( 87) IRQ( 88); IRQ( 89); IRQ( 90); IRQ( 91); IRQ( 92); IRQ( 93); IRQ( 94); IRQ( 95) IRQ( 96); IRQ( 97); IRQ( 98); IRQ( 99); IRQ(100); IRQ(101); IRQ(102); IRQ(103) IRQ(104); IRQ(105); IRQ(106); IRQ(107); IRQ(108); IRQ(109); IRQ(110); IRQ(111) IRQ(112); IRQ(113); IRQ(114); IRQ(115); IRQ(116); IRQ(117); IRQ(118); IRQ(119) IRQ(120); IRQ(121); IRQ(122); IRQ(123); IRQ(124); IRQ(125); IRQ(126); IRQ(127) IRQ(128); IRQ(129); IRQ(130); IRQ(131); IRQ(132); IRQ(133); IRQ(134); IRQ(135) IRQ(136); IRQ(137); IRQ(138); IRQ(139); IRQ(140); IRQ(141); IRQ(142); IRQ(143) IRQ(144); IRQ(145); IRQ(146); IRQ(147); IRQ(148); IRQ(149); IRQ(150); IRQ(151) IRQ(152); IRQ(153); IRQ(154); IRQ(155); IRQ(156); IRQ(157); IRQ(158); IRQ(159) IRQ(160); IRQ(161); IRQ(162); IRQ(163); IRQ(164); IRQ(165); IRQ(166); IRQ(167) IRQ(168); IRQ(169); IRQ(170); IRQ(171); IRQ(172); IRQ(173); IRQ(174); IRQ(175) IRQ(176); IRQ(177); IRQ(178); IRQ(179); IRQ(180); IRQ(181); IRQ(182); IRQ(183) IRQ(184); IRQ(185); IRQ(186); IRQ(187); IRQ(188); IRQ(189); IRQ(190); IRQ(191) IRQ(192); IRQ(193); IRQ(194); IRQ(195); IRQ(196); IRQ(197); IRQ(198); IRQ(199) IRQ(200); IRQ(201); IRQ(202); IRQ(203); IRQ(204); IRQ(205); IRQ(206); IRQ(207) IRQ(208); IRQ(209); IRQ(210); IRQ(211); IRQ(212); IRQ(213); IRQ(214); IRQ(215) IRQ(216); IRQ(217); IRQ(218); IRQ(219); IRQ(220); IRQ(221); IRQ(222); IRQ(223) IRQ(224); IRQ(225); IRQ(226); IRQ(227); IRQ(228); IRQ(229); IRQ(230); IRQ(231) IRQ(232); IRQ(233); IRQ(234); IRQ(235); IRQ(236); IRQ(237); IRQ(238); IRQ(239) IRQ(240); IRQ(241); IRQ(242); IRQ(243); IRQ(244); IRQ(245); IRQ(246); IRQ(247) IRQ(248); IRQ(249); IRQ(250); IRQ(251); IRQ(252); IRQ(253); IRQ(254); IRQ(255) .section .lorodata,"a" /* * IDT. */ /* Descriptor type. Traps don't implicitly disable interrupts. User variants * can be invoked by software running in user mode (ring 3). * * For KPTI everything lands on the trampoline stack and we must get off of * it before re-enabling interrupts; use interrupt gates for everything. */ #define INTR 0x8e #define USER_INTR 0xee #ifdef CONFIG_X86_KPTI #define TRAP INTR #define USER_TRAP UINTR #else #define TRAP 0x8f #define USER_TRAP 0xef #endif #define IDT(nr, type, ist) \ .word vector_ ## nr, X86_KERNEL_CS; \ .byte ist, type; \ .word 0, 0, 0, 0, 0 /* Which IST entry in TSS to use for automatic stack switching, or 0 if * no automatic switch is to take place. Stack page must be present in * the current page tables, if KPTI is on only the trampoline stack and * the current user stack can be accessed. */ #ifdef CONFIG_X86_KPTI /* Everything lands on ist2, which is set to the trampoline stack. * Interrupt/exception entry updates page tables and manually switches to * the irq/exception stacks stored in ist1/ist7 */ #define IRQ_STACK 2 #define EXC_STACK 2 #define BAD_STACK 2 #define NMI_STACK 2 #else #define IRQ_STACK 1 #define NMI_STACK 6 /* NMI stack */ #define EXC_STACK 7 #define BAD_STACK 7 /* Horrible things: double faults, MCEs */ #endif .align 16 idt: IDT( 0, TRAP, EXC_STACK); IDT( 1, TRAP, EXC_STACK) IDT( 2, TRAP, NMI_STACK); IDT( 3, TRAP, EXC_STACK) IDT( 4, TRAP, EXC_STACK); IDT( 5, TRAP, EXC_STACK) IDT( 6, TRAP, EXC_STACK); IDT( 7, TRAP, EXC_STACK) IDT( 8, TRAP, BAD_STACK); IDT( 9, TRAP, EXC_STACK) IDT( 10, TRAP, EXC_STACK); IDT( 11, TRAP, EXC_STACK) IDT( 12, TRAP, EXC_STACK); IDT( 13, TRAP, EXC_STACK) IDT( 14, TRAP, EXC_STACK); IDT( 15, TRAP, EXC_STACK) IDT( 16, TRAP, EXC_STACK); IDT( 17, TRAP, EXC_STACK) IDT( 18, TRAP, BAD_STACK); IDT( 19, TRAP, EXC_STACK) IDT( 20, TRAP, EXC_STACK); IDT( 21, TRAP, EXC_STACK) IDT( 22, TRAP, EXC_STACK); IDT( 23, TRAP, EXC_STACK) IDT( 24, TRAP, EXC_STACK); IDT( 25, TRAP, EXC_STACK) IDT( 26, TRAP, EXC_STACK); IDT( 27, TRAP, EXC_STACK) IDT( 28, TRAP, EXC_STACK); IDT( 29, TRAP, EXC_STACK) IDT( 30, TRAP, EXC_STACK); IDT( 31, TRAP, EXC_STACK) /* Oops vector can be invoked from Ring 3 and runs on exception stack */ IDT(Z_X86_OOPS_VECTOR, USER_INTR, EXC_STACK); IDT( 33, INTR, IRQ_STACK) IDT( 34, INTR, IRQ_STACK); IDT( 35, INTR, IRQ_STACK) IDT( 36, INTR, IRQ_STACK); IDT( 37, INTR, IRQ_STACK) IDT( 38, INTR, IRQ_STACK); IDT( 39, INTR, IRQ_STACK) IDT( 40, INTR, IRQ_STACK); IDT( 41, INTR, IRQ_STACK) IDT( 42, INTR, IRQ_STACK); IDT( 43, INTR, IRQ_STACK) IDT( 44, INTR, IRQ_STACK); IDT( 45, INTR, IRQ_STACK) IDT( 46, INTR, IRQ_STACK); IDT( 47, INTR, IRQ_STACK) IDT( 48, INTR, IRQ_STACK); IDT( 49, INTR, IRQ_STACK) IDT( 50, INTR, IRQ_STACK); IDT( 51, INTR, IRQ_STACK) IDT( 52, INTR, IRQ_STACK); IDT( 53, INTR, IRQ_STACK) IDT( 54, INTR, IRQ_STACK); IDT( 55, INTR, IRQ_STACK) IDT( 56, INTR, IRQ_STACK); IDT( 57, INTR, IRQ_STACK) IDT( 58, INTR, IRQ_STACK); IDT( 59, INTR, IRQ_STACK) IDT( 60, INTR, IRQ_STACK); IDT( 61, INTR, IRQ_STACK) IDT( 62, INTR, IRQ_STACK); IDT( 63, INTR, IRQ_STACK) IDT( 64, INTR, IRQ_STACK); IDT( 65, INTR, IRQ_STACK) IDT( 66, INTR, IRQ_STACK); IDT( 67, INTR, IRQ_STACK) IDT( 68, INTR, IRQ_STACK); IDT( 69, INTR, IRQ_STACK) IDT( 70, INTR, IRQ_STACK); IDT( 71, INTR, IRQ_STACK) IDT( 72, INTR, IRQ_STACK); IDT( 73, INTR, IRQ_STACK) IDT( 74, INTR, IRQ_STACK); IDT( 75, INTR, IRQ_STACK) IDT( 76, INTR, IRQ_STACK); IDT( 77, INTR, IRQ_STACK) IDT( 78, INTR, IRQ_STACK); IDT( 79, INTR, IRQ_STACK) IDT( 80, INTR, IRQ_STACK); IDT( 81, INTR, IRQ_STACK) IDT( 82, INTR, IRQ_STACK); IDT( 83, INTR, IRQ_STACK) IDT( 84, INTR, IRQ_STACK); IDT( 85, INTR, IRQ_STACK) IDT( 86, INTR, IRQ_STACK); IDT( 87, INTR, IRQ_STACK) IDT( 88, INTR, IRQ_STACK); IDT( 89, INTR, IRQ_STACK) IDT( 90, INTR, IRQ_STACK); IDT( 91, INTR, IRQ_STACK) IDT( 92, INTR, IRQ_STACK); IDT( 93, INTR, IRQ_STACK) IDT( 94, INTR, IRQ_STACK); IDT( 95, INTR, IRQ_STACK) IDT( 96, INTR, IRQ_STACK); IDT( 97, INTR, IRQ_STACK) IDT( 98, INTR, IRQ_STACK); IDT( 99, INTR, IRQ_STACK) IDT(100, INTR, IRQ_STACK); IDT(101, INTR, IRQ_STACK) IDT(102, INTR, IRQ_STACK); IDT(103, INTR, IRQ_STACK) IDT(104, INTR, IRQ_STACK); IDT(105, INTR, IRQ_STACK) IDT(106, INTR, IRQ_STACK); IDT(107, INTR, IRQ_STACK) IDT(108, INTR, IRQ_STACK); IDT(109, INTR, IRQ_STACK) IDT(110, INTR, IRQ_STACK); IDT(111, INTR, IRQ_STACK) IDT(112, INTR, IRQ_STACK); IDT(113, INTR, IRQ_STACK) IDT(114, INTR, IRQ_STACK); IDT(115, INTR, IRQ_STACK) IDT(116, INTR, IRQ_STACK); IDT(117, INTR, IRQ_STACK) IDT(118, INTR, IRQ_STACK); IDT(119, INTR, IRQ_STACK) IDT(120, INTR, IRQ_STACK); IDT(121, INTR, IRQ_STACK) IDT(122, INTR, IRQ_STACK); IDT(123, INTR, IRQ_STACK) IDT(124, INTR, IRQ_STACK); IDT(125, INTR, IRQ_STACK) IDT(126, INTR, IRQ_STACK); IDT(127, INTR, IRQ_STACK) IDT(128, INTR, IRQ_STACK); IDT(129, INTR, IRQ_STACK) IDT(130, INTR, IRQ_STACK); IDT(131, INTR, IRQ_STACK) IDT(132, INTR, IRQ_STACK); IDT(133, INTR, IRQ_STACK) IDT(134, INTR, IRQ_STACK); IDT(135, INTR, IRQ_STACK) IDT(136, INTR, IRQ_STACK); IDT(137, INTR, IRQ_STACK) IDT(138, INTR, IRQ_STACK); IDT(139, INTR, IRQ_STACK) IDT(140, INTR, IRQ_STACK); IDT(141, INTR, IRQ_STACK) IDT(142, INTR, IRQ_STACK); IDT(143, INTR, IRQ_STACK) IDT(144, INTR, IRQ_STACK); IDT(145, INTR, IRQ_STACK) IDT(146, INTR, IRQ_STACK); IDT(147, INTR, IRQ_STACK) IDT(148, INTR, IRQ_STACK); IDT(149, INTR, IRQ_STACK) IDT(150, INTR, IRQ_STACK); IDT(151, INTR, IRQ_STACK) IDT(152, INTR, IRQ_STACK); IDT(153, INTR, IRQ_STACK) IDT(154, INTR, IRQ_STACK); IDT(155, INTR, IRQ_STACK) IDT(156, INTR, IRQ_STACK); IDT(157, INTR, IRQ_STACK) IDT(158, INTR, IRQ_STACK); IDT(159, INTR, IRQ_STACK) IDT(160, INTR, IRQ_STACK); IDT(161, INTR, IRQ_STACK) IDT(162, INTR, IRQ_STACK); IDT(163, INTR, IRQ_STACK) IDT(164, INTR, IRQ_STACK); IDT(165, INTR, IRQ_STACK) IDT(166, INTR, IRQ_STACK); IDT(167, INTR, IRQ_STACK) IDT(168, INTR, IRQ_STACK); IDT(169, INTR, IRQ_STACK) IDT(170, INTR, IRQ_STACK); IDT(171, INTR, IRQ_STACK) IDT(172, INTR, IRQ_STACK); IDT(173, INTR, IRQ_STACK) IDT(174, INTR, IRQ_STACK); IDT(175, INTR, IRQ_STACK) IDT(176, INTR, IRQ_STACK); IDT(177, INTR, IRQ_STACK) IDT(178, INTR, IRQ_STACK); IDT(179, INTR, IRQ_STACK) IDT(180, INTR, IRQ_STACK); IDT(181, INTR, IRQ_STACK) IDT(182, INTR, IRQ_STACK); IDT(183, INTR, IRQ_STACK) IDT(184, INTR, IRQ_STACK); IDT(185, INTR, IRQ_STACK) IDT(186, INTR, IRQ_STACK); IDT(187, INTR, IRQ_STACK) IDT(188, INTR, IRQ_STACK); IDT(189, INTR, IRQ_STACK) IDT(190, INTR, IRQ_STACK); IDT(191, INTR, IRQ_STACK) IDT(192, INTR, IRQ_STACK); IDT(193, INTR, IRQ_STACK) IDT(194, INTR, IRQ_STACK); IDT(195, INTR, IRQ_STACK) IDT(196, INTR, IRQ_STACK); IDT(197, INTR, IRQ_STACK) IDT(198, INTR, IRQ_STACK); IDT(199, INTR, IRQ_STACK) IDT(200, INTR, IRQ_STACK); IDT(201, INTR, IRQ_STACK) IDT(202, INTR, IRQ_STACK); IDT(203, INTR, IRQ_STACK) IDT(204, INTR, IRQ_STACK); IDT(205, INTR, IRQ_STACK) IDT(206, INTR, IRQ_STACK); IDT(207, INTR, IRQ_STACK) IDT(208, INTR, IRQ_STACK); IDT(209, INTR, IRQ_STACK) IDT(210, INTR, IRQ_STACK); IDT(211, INTR, IRQ_STACK) IDT(212, INTR, IRQ_STACK); IDT(213, INTR, IRQ_STACK) IDT(214, INTR, IRQ_STACK); IDT(215, INTR, IRQ_STACK) IDT(216, INTR, IRQ_STACK); IDT(217, INTR, IRQ_STACK) IDT(218, INTR, IRQ_STACK); IDT(219, INTR, IRQ_STACK) IDT(220, INTR, IRQ_STACK); IDT(221, INTR, IRQ_STACK) IDT(222, INTR, IRQ_STACK); IDT(223, INTR, IRQ_STACK) IDT(224, INTR, IRQ_STACK); IDT(225, INTR, IRQ_STACK) IDT(226, INTR, IRQ_STACK); IDT(227, INTR, IRQ_STACK) IDT(228, INTR, IRQ_STACK); IDT(229, INTR, IRQ_STACK) IDT(230, INTR, IRQ_STACK); IDT(231, INTR, IRQ_STACK) IDT(232, INTR, IRQ_STACK); IDT(233, INTR, IRQ_STACK) IDT(234, INTR, IRQ_STACK); IDT(235, INTR, IRQ_STACK) IDT(236, INTR, IRQ_STACK); IDT(237, INTR, IRQ_STACK) IDT(238, INTR, IRQ_STACK); IDT(239, INTR, IRQ_STACK) IDT(240, INTR, IRQ_STACK); IDT(241, INTR, IRQ_STACK) IDT(242, INTR, IRQ_STACK); IDT(243, INTR, IRQ_STACK) IDT(244, INTR, IRQ_STACK); IDT(245, INTR, IRQ_STACK) IDT(246, INTR, IRQ_STACK); IDT(247, INTR, IRQ_STACK) IDT(248, INTR, IRQ_STACK); IDT(249, INTR, IRQ_STACK) IDT(250, INTR, IRQ_STACK); IDT(251, INTR, IRQ_STACK) IDT(252, INTR, IRQ_STACK); IDT(253, INTR, IRQ_STACK) IDT(254, INTR, IRQ_STACK); IDT(255, INTR, IRQ_STACK) idt_end: idt48: /* LIDT descriptor for 32 bit mode */ .word (idt_end - idt - 1) .long idt idt80: /* LIDT descriptor for 64 bit mode */ .word (idt_end - idt - 1) .quad idt .section .gdt,"ad" /* * GDT - a single GDT is shared by all threads (and, eventually, all CPUs). * This layout must agree with the selectors in * include/arch/x86/intel64/thread.h. * * The 64-bit kernel code and data segment descriptors must be in sequence as * required by 'syscall' * * The 32-bit user code, 64-bit user code, and 64-bit user data segment * descriptors must be in sequence as required by 'sysret' */ .align 8 gdt: .word 0, 0, 0, 0 /* 0x00: null descriptor */ .word 0xFFFF, 0, 0x9A00, 0x00CF /* 0x08: 32-bit kernel code */ .word 0xFFFF, 0, 0x9200, 0x00CF /* 0x10: 32-bit kernel data */ .word 0, 0, 0x9800, 0x0020 /* 0x18: 64-bit kernel code */ .word 0, 0, 0x9200, 0x0000 /* 0x20: 64-bit kernel data */ .word 0xFFFF, 0, 0xFA00, 0x00CF /* 0x28: 32-bit user code (unused) */ .word 0, 0, 0xF200, 0x0000 /* 0x30: 64-bit user data */ .word 0, 0, 0xF800, 0x0020 /* 0x38: 64-bit user code */ /* Remaining entries are TSS for each enabled CPU */ DEFINE_TSS_STACK_ARRAY gdt_end: gdt48: /* LGDT descriptor for 32 bit mode */ .word (gdt_end - gdt - 1) .long gdt gdt80: /* LGDT descriptor for long mode */ .word (gdt_end - gdt - 1) .quad gdt ```
/content/code_sandbox/arch/x86/core/intel64/locore.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
12,511
```c /* */ #include <zephyr/kernel.h> #include <ksched.h> #include <zephyr/kernel_structs.h> #include <kernel_internal.h> #include <zephyr/logging/log.h> LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); /* NMI handlers should override weak implementation * return true if NMI is handled, false otherwise */ __weak bool z_x86_do_kernel_nmi(const struct arch_esf *esf) { ARG_UNUSED(esf); return false; } void z_x86_exception(struct arch_esf *esf) { switch (esf->vector) { case Z_X86_OOPS_VECTOR: z_x86_do_kernel_oops(esf); break; case IV_PAGE_FAULT: z_x86_page_fault_handler(esf); break; case IV_NON_MASKABLE_INTERRUPT: if (!z_x86_do_kernel_nmi(esf)) { z_x86_unhandled_cpu_exception(esf->vector, esf); CODE_UNREACHABLE; } break; default: z_x86_unhandled_cpu_exception(esf->vector, esf); CODE_UNREACHABLE; } } #ifdef CONFIG_USERSPACE void arch_syscall_oops(void *ssf_ptr) { struct x86_ssf *ssf = ssf_ptr; LOG_ERR("Bad system call from RIP 0x%lx", ssf->rip); z_x86_fatal_error(K_ERR_KERNEL_OOPS, NULL); } #endif /* CONFIG_USERSPACE */ ```
/content/code_sandbox/arch/x86/core/intel64/fatal.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
314
```c /* */ #include <zephyr/kernel.h> #include <ksched.h> #include <zephyr/arch/cpu.h> #include <kernel_arch_data.h> #include <kernel_arch_func.h> #include <zephyr/drivers/interrupt_controller/sysapic.h> #include <zephyr/drivers/interrupt_controller/loapic.h> #include <zephyr/irq.h> #include <zephyr/logging/log.h> #include <zephyr/sys/iterable_sections.h> LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); unsigned char _irq_to_interrupt_vector[CONFIG_MAX_IRQ_LINES]; #define NR_IRQ_VECTORS (IV_NR_VECTORS - IV_IRQS) /* # vectors free for IRQs */ void (*x86_irq_funcs[NR_IRQ_VECTORS])(const void *arg); const void *x86_irq_args[NR_IRQ_VECTORS]; #if defined(CONFIG_INTEL_VTD_ICTL) #include <zephyr/device.h> #include <zephyr/drivers/interrupt_controller/intel_vtd.h> static const struct device *const vtd = DEVICE_DT_GET_ONE(intel_vt_d); #endif /* CONFIG_INTEL_VTD_ICTL */ static void irq_spurious(const void *arg) { LOG_ERR("Spurious interrupt, vector %d\n", (uint32_t)(uint64_t)arg); z_fatal_error(K_ERR_SPURIOUS_IRQ, NULL); } void x86_64_irq_init(void) { for (int i = 0; i < NR_IRQ_VECTORS; i++) { x86_irq_funcs[i] = irq_spurious; x86_irq_args[i] = (const void *)(long)(i + IV_IRQS); } } int z_x86_allocate_vector(unsigned int priority, int prev_vector) { const int VECTORS_PER_PRIORITY = 16; const int MAX_PRIORITY = 13; int vector = prev_vector; int i; if (priority >= MAX_PRIORITY) { priority = MAX_PRIORITY; } if (vector == -1) { vector = (priority * VECTORS_PER_PRIORITY) + IV_IRQS; } for (i = 0; i < VECTORS_PER_PRIORITY; ++i, ++vector) { if (prev_vector != 1 && vector == prev_vector) { continue; } #ifdef CONFIG_IRQ_OFFLOAD if (vector == CONFIG_IRQ_OFFLOAD_VECTOR) { continue; } #endif if (vector == Z_X86_OOPS_VECTOR) { continue; } if (x86_irq_funcs[vector - IV_IRQS] == irq_spurious) { return vector; } } return -1; } void z_x86_irq_connect_on_vector(unsigned int irq, uint8_t vector, void (*func)(const void *arg), const void *arg) { _irq_to_interrupt_vector[irq] = vector; x86_irq_funcs[vector - IV_IRQS] = func; x86_irq_args[vector - IV_IRQS] = arg; } /* * N.B.: the API docs don't say anything about returning error values, but * this function returns -1 if a vector at the specific priority can't be * allocated. Whether it should simply __ASSERT instead is up for debate. */ int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, void (*routine)(const void *parameter), const void *parameter, uint32_t flags) { uint32_t key; int vector; __ASSERT(irq <= CONFIG_MAX_IRQ_LINES, "IRQ %u out of range", irq); key = irq_lock(); vector = z_x86_allocate_vector(priority, -1); if (vector >= 0) { #if defined(CONFIG_INTEL_VTD_ICTL) if (device_is_ready(vtd)) { int irte = vtd_allocate_entries(vtd, 1); __ASSERT(irte >= 0, "IRTE allocation must succeed"); vtd_set_irte_vector(vtd, irte, vector); vtd_set_irte_irq(vtd, irte, irq); } #endif /* CONFIG_INTEL_VTD_ICTL */ z_irq_controller_irq_config(vector, irq, flags); z_x86_irq_connect_on_vector(irq, vector, routine, parameter); } irq_unlock(key); return vector; } /* The first bit is used to indicate whether the list of reserved interrupts * have been initialized based on content stored in the irq_alloc linker * section in ROM. */ #define IRQ_LIST_INITIALIZED 0 static ATOMIC_DEFINE(irq_reserved, CONFIG_MAX_IRQ_LINES); static void irq_init(void) { TYPE_SECTION_FOREACH(const uint8_t, irq_alloc, irq) { __ASSERT_NO_MSG(*irq < CONFIG_MAX_IRQ_LINES); atomic_set_bit(irq_reserved, *irq); } } unsigned int arch_irq_allocate(void) { unsigned int key = irq_lock(); int i; if (!atomic_test_and_set_bit(irq_reserved, IRQ_LIST_INITIALIZED)) { irq_init(); } for (i = 0; i < ARRAY_SIZE(irq_reserved); i++) { unsigned int fz, irq; while ((fz = find_lsb_set(~atomic_get(&irq_reserved[i])))) { irq = (fz - 1) + (i * sizeof(atomic_val_t) * 8); if (irq >= CONFIG_MAX_IRQ_LINES) { break; } if (!atomic_test_and_set_bit(irq_reserved, irq)) { irq_unlock(key); return irq; } } } irq_unlock(key); return UINT_MAX; } void arch_irq_set_used(unsigned int irq) { unsigned int key = irq_lock(); atomic_set_bit(irq_reserved, irq); irq_unlock(key); } bool arch_irq_is_used(unsigned int irq) { return atomic_test_bit(irq_reserved, irq); } ```
/content/code_sandbox/arch/x86/core/intel64/irq.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,242
```c /* * */ /** * @file * @brief Thread support primitives * * This module provides core thread related primitives for the IA-32 * processor architecture. */ #include <zephyr/kernel.h> #include <ksched.h> #include <zephyr/arch/x86/mmustructs.h> #include <kswap.h> #include <x86_mmu.h> /* forward declaration */ /* Initial thread stack frame, such that everything is laid out as expected * for when z_swap() switches to it for the first time. */ struct _x86_initial_frame { uint32_t swap_retval; uint32_t ebp; uint32_t ebx; uint32_t esi; uint32_t edi; void *thread_entry; uint32_t eflags; k_thread_entry_t entry; void *p1; void *p2; void *p3; }; #ifdef CONFIG_X86_USERSPACE /* Implemented in userspace.S */ extern void z_x86_syscall_entry_stub(void); /* Syscalls invoked by 'int 0x80'. Installed in the IDT at DPL=3 so that * userspace can invoke it. */ NANO_CPU_INT_REGISTER(z_x86_syscall_entry_stub, -1, -1, 0x80, 3); #endif /* CONFIG_X86_USERSPACE */ #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) extern int z_float_disable(struct k_thread *thread); int arch_float_disable(struct k_thread *thread) { #if defined(CONFIG_LAZY_FPU_SHARING) return z_float_disable(thread); #else return -ENOTSUP; #endif /* CONFIG_LAZY_FPU_SHARING */ } extern int z_float_enable(struct k_thread *thread, unsigned int options); int arch_float_enable(struct k_thread *thread, unsigned int options) { #if defined(CONFIG_LAZY_FPU_SHARING) return z_float_enable(thread, options); #else return -ENOTSUP; #endif /* CONFIG_LAZY_FPU_SHARING */ } #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, char *stack_ptr, k_thread_entry_t entry, void *p1, void *p2, void *p3) { void *swap_entry; struct _x86_initial_frame *initial_frame; #if defined(CONFIG_X86_STACK_PROTECTION) && !defined(CONFIG_THREAD_STACK_MEM_MAPPED) /* This unconditionally set the first page of stack as guard page, * which is only needed if the stack is not memory mapped. */ z_x86_set_stack_guard(stack); #endif #ifdef CONFIG_USERSPACE swap_entry = z_x86_userspace_prepare_thread(thread); #else swap_entry = z_thread_entry; #endif /* Create an initial context on the stack expected by z_swap() */ initial_frame = Z_STACK_PTR_TO_FRAME(struct _x86_initial_frame, stack_ptr); /* z_thread_entry() arguments */ initial_frame->entry = entry; initial_frame->p1 = p1; initial_frame->p2 = p2; initial_frame->p3 = p3; initial_frame->eflags = EFLAGS_INITIAL; #ifdef _THREAD_WRAPPER_REQUIRED initial_frame->edi = (uint32_t)swap_entry; initial_frame->thread_entry = z_x86_thread_entry_wrapper; #else initial_frame->thread_entry = swap_entry; #endif /* _THREAD_WRAPPER_REQUIRED */ /* Remaining _x86_initial_frame members can be garbage, z_thread_entry() * doesn't care about their state when execution begins */ thread->callee_saved.esp = (unsigned long)initial_frame; #if defined(CONFIG_LAZY_FPU_SHARING) thread->arch.excNestCount = 0; #endif /* CONFIG_LAZY_FPU_SHARING */ thread->arch.flags = 0; /* * When "eager FPU sharing" mode is enabled, FPU registers must be * initialised at the time of thread creation because the floating-point * context is always active and no further FPU initialisation is performed * later. */ #if defined(CONFIG_EAGER_FPU_SHARING) thread->arch.preempFloatReg.floatRegsUnion.fpRegs.fcw = 0x037f; thread->arch.preempFloatReg.floatRegsUnion.fpRegs.ftw = 0xffff; #if defined(CONFIG_X86_SSE) thread->arch.preempFloatReg.floatRegsUnion.fpRegsEx.mxcsr = 0x1f80; #endif /* CONFIG_X86_SSE */ #endif /* CONFIG_EAGER_FPU_SHARING */ } ```
/content/code_sandbox/arch/x86/core/ia32/thread.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
948
```unknown /* * */ /** * @file * @brief Crt0 module for the IA-32 boards * * This module contains the initial code executed by the Zephyr Kernel ELF image * after having been loaded into RAM. * * Note that most addresses (functions and variables) must be in physical * address space. Depending on page table setup, they may or may not be * available in virtual address space after loading of page table. */ #include <zephyr/arch/x86/ia32/asm.h> #include <zephyr/arch/x86/msr.h> #include <kernel_arch_data.h> #include <zephyr/arch/cpu.h> #include <zephyr/arch/x86/multiboot.h> #include <x86_mmu.h> #include <zephyr/kernel/mm.h> /* exports (private APIs) */ GTEXT(__start) /* externs */ GTEXT(z_prep_c) GTEXT(z_bss_zero) GTEXT(z_data_copy) GDATA(_idt_base_address) GDATA(z_interrupt_stacks) GDATA(z_x86_idt) #ifndef CONFIG_GDT_DYNAMIC GDATA(_gdt) #endif #if defined(CONFIG_X86_SSE) GDATA(_sse_mxcsr_default_value) #endif #if defined(CONFIG_THREAD_LOCAL_STORAGE) GTEXT(z_x86_early_tls_update_gdt) #endif GDATA(x86_cpu_boot_arg) .macro install_page_tables #ifdef CONFIG_X86_MMU /* Enable paging. If virtual memory is enabled, the instruction pointer * is currently at a physical address. There is an identity mapping * for all RAM, plus a virtual mapping of RAM starting at * CONFIG_KERNEL_VM_BASE using the same paging structures. * * Until we enable these page tables, only physical memory addresses * work. */ movl $K_MEM_PHYS_ADDR(z_x86_kernel_ptables), %eax movl %eax, %cr3 #ifdef CONFIG_X86_PAE /* Enable PAE */ movl %cr4, %eax orl $CR4_PAE, %eax movl %eax, %cr4 /* IA32_EFER NXE bit set */ movl $0xC0000080, %ecx rdmsr orl $0x800, %eax wrmsr #else /* Enable Page Size Extensions (allowing 4MB pages). * This is ignored if PAE is enabled so no need to do * this above in PAE code. */ movl %cr4, %eax orl $CR4_PSE, %eax movl %eax, %cr4 #endif /* CONFIG_X86_PAE */ /* Enable paging (CR0.PG, bit 31) / write protect (CR0.WP, bit 16) */ movl %cr0, %eax orl $(CR0_PG | CR0_WP), %eax movl %eax, %cr0 #ifdef K_MEM_IS_VM_KERNEL /* Jump to a virtual address, which works because the identity and * virtual mappings both are to the same physical address. */ ljmp $CODE_SEG, $vm_enter vm_enter: /* We are now executing in virtual memory. We'll un-map the identity * mappings later once we are in the C domain */ #endif /* K_MEM_IS_VM_KERNEL */ #endif /* CONFIG_X86_MMU */ .endm SECTION_FUNC(BOOT_TEXT, __start) #include "../common.S" /* Enable write-back caching by clearing the NW and CD bits */ movl %cr0, %eax andl $0x9fffffff, %eax movl %eax, %cr0 /* * Ensure interrupts are disabled. Interrupts are enabled when * the first context switch occurs. */ cli /* * Although the bootloader sets up an Interrupt Descriptor Table (IDT) * and a Global Descriptor Table (GDT), the specification encourages * booted operating systems to setup their own IDT and GDT. */ #if CONFIG_SET_GDT /* load 32-bit operand size GDT */ lgdt K_MEM_PHYS_ADDR(_gdt_rom) /* If we set our own GDT, update the segment registers as well. */ movw $DATA_SEG, %ax /* data segment selector (entry = 3) */ movw %ax, %ds /* set DS */ movw %ax, %es /* set ES */ movw %ax, %ss /* set SS */ xorw %ax, %ax /* AX = 0 */ movw %ax, %fs /* Zero FS */ movw %ax, %gs /* Zero GS */ ljmp $CODE_SEG, $K_MEM_PHYS_ADDR(__csSet) /* set CS = 0x08 */ __csSet: #endif /* CONFIG_SET_GDT */ #if !defined(CONFIG_FPU) /* * Force an #NM exception for floating point instructions * since FP support hasn't been configured */ movl %cr0, %eax /* move CR0 to EAX */ orl $0x2e, %eax /* CR0[NE+TS+EM+MP]=1 */ movl %eax, %cr0 /* move EAX to CR0 */ #else /* * Permit use of x87 FPU instructions * * Note that all floating point exceptions are masked by default, * and that _no_ handler for x87 FPU exceptions (#MF) is provided. */ movl %cr0, %eax /* move CR0 to EAX */ orl $0x22, %eax /* CR0[NE+MP]=1 */ andl $~0xc, %eax /* CR0[TS+EM]=0 */ movl %eax, %cr0 /* move EAX to CR0 */ fninit /* set x87 FPU to its default state */ #if defined(CONFIG_X86_SSE) /* * Permit use of SSE instructions * * Note that all SSE exceptions are masked by default, * and that _no_ handler for SSE exceptions (#XM) is provided. */ movl %cr4, %eax /* move CR4 to EAX */ orl $0x200, %eax /* CR4[OSFXSR] = 1 */ andl $~0x400, %eax /* CR4[OSXMMEXCPT] = 0 */ movl %eax, %cr4 /* move EAX to CR4 */ /* initialize SSE control/status reg */ ldmxcsr K_MEM_PHYS_ADDR(_sse_mxcsr_default_value) #endif /* CONFIG_X86_SSE */ #endif /* !CONFIG_FPU */ /* * Set the stack pointer to the area used for the interrupt stack. * Note this stack is used during the execution of __start() and * z_cstart() until the multi-tasking kernel is initialized. The * dual-purposing of this area of memory is safe since * interrupts are disabled until the first context switch. * * kernel/init.c enforces that the z_interrupt_stacks pointer and * the ISR stack size are some multiple of ARCH_STACK_PTR_ALIGN, which * is at least 4. */ #ifdef CONFIG_INIT_STACKS movl $0xAAAAAAAA, %eax leal K_MEM_PHYS_ADDR(z_interrupt_stacks), %edi #ifdef CONFIG_X86_STACK_PROTECTION addl $4096, %edi #endif stack_size_dwords = (CONFIG_ISR_STACK_SIZE / 4) movl $stack_size_dwords, %ecx rep stosl #endif movl $K_MEM_PHYS_ADDR(z_interrupt_stacks), %esp #ifdef CONFIG_X86_STACK_PROTECTION /* In this configuration, all stacks, including IRQ stack, are declared * with a 4K non-present guard page preceding the stack buffer */ addl $(CONFIG_ISR_STACK_SIZE + 4096), %esp #else addl $CONFIG_ISR_STACK_SIZE, %esp #endif #ifdef CONFIG_XIP /* Copy data from flash to RAM. * * This is a must is CONFIG_GDT_DYNAMIC is enabled, * as _gdt needs to be in RAM. */ call z_data_copy #endif /* Note that installing page tables must be done after * z_data_copy() as the page tables are being copied into * RAM there. */ install_page_tables #ifdef CONFIG_GDT_DYNAMIC /* activate RAM-based Global Descriptor Table (GDT) */ lgdt %ds:_gdt #endif #if defined(CONFIG_X86_ENABLE_TSS) mov $MAIN_TSS, %ax ltr %ax #endif #ifdef K_MEM_IS_VM_KERNEL /* Need to reset the stack to virtual address after * page table is loaded. */ movl $z_interrupt_stacks, %esp #ifdef CONFIG_X86_STACK_PROTECTION addl $(CONFIG_ISR_STACK_SIZE + 4096), %esp #else addl $CONFIG_ISR_STACK_SIZE, %esp #endif #endif /* K_MEM_IS_VM_KERNEL */ #ifdef CONFIG_THREAD_LOCAL_STORAGE pushl %esp call z_x86_early_tls_update_gdt popl %esp #endif /* Clear BSS */ #ifdef CONFIG_LINKER_USE_BOOT_SECTION call z_bss_zero_boot #endif #ifdef CONFIG_LINKER_USE_PINNED_SECTION call z_bss_zero_pinned #endif #ifdef CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT /* Don't clear BSS if the section is not present * in memory at boot. Or else it would cause page * faults. Zeroing BSS will be done later once the * paging mechanism has been initialized. */ call z_bss_zero #endif /* load 32-bit operand size IDT */ lidt z_x86_idt movl $x86_cpu_boot_arg, %ebp /* Boot type to multiboot, ebx content will help to mitigate */ movl $MULTIBOOT_BOOT_TYPE, \ __x86_boot_arg_t_boot_type_OFFSET(%ebp) /* pointer to multiboot info, or NULL */ movl %ebx, __x86_boot_arg_t_arg_OFFSET(%ebp) pushl $x86_cpu_boot_arg call z_prep_c /* enter kernel; never returns */ #if defined(CONFIG_X86_SSE) /* SSE control & status register initial value */ _sse_mxcsr_default_value: .long 0x1f80 /* all SSE exceptions clear & masked */ #endif /* CONFIG_X86_SSE */ /* Interrupt Descriptor Table (IDT) definition */ z_x86_idt: .word (CONFIG_IDT_NUM_VECTORS * 8) - 1 /* limit: size of IDT-1 */ /* * Physical start address = 0. When executing natively, this * will be placed at the same location as the interrupt vector table * setup by the BIOS (or GRUB?). */ /* IDT table start address */ .long _idt_base_address #ifdef CONFIG_SET_GDT /* * The following 3 GDT entries implement the so-called "basic * flat model", i.e. a single code segment descriptor and a single * data segment descriptor, giving the kernel access to a continuous, * unsegmented address space. Both segment descriptors map the entire * linear address space (i.e. 0 to 4 GB-1), thus the segmentation * mechanism will never generate "out of limit memory reference" * exceptions even if physical memory does not reside at the referenced * address. * * The 'A' (accessed) bit in the type field is set for all the * data/code segment descriptors to accommodate placing these entries * in ROM, to prevent the processor from freaking out when it tries * and fails to set it. */ SECTION_VAR(PINNED_RODATA, _gdt_rom) #ifndef CONFIG_GDT_DYNAMIC _gdt: #endif /* GDT should be aligned on 8-byte boundary for best processor * performance, see Section 3.5.1 of IA architecture SW developer * manual, Vol 3. */ .balign 8 /* Entry 0 (selector=0x0000): The "NULL descriptor". The CPU never * actually looks at this entry, so we stuff 6-byte the pseudo * descriptor here */ /* Limit on GDT */ .word K_MEM_PHYS_ADDR(_gdt_rom_end) - K_MEM_PHYS_ADDR(_gdt_rom) - 1 /* table address: _gdt_rom */ .long K_MEM_PHYS_ADDR(_gdt_rom) .word 0x0000 /* Entry 1 (selector=0x0008): Code descriptor: DPL0 */ .word 0xffff /* limit: xffff */ .word 0x0000 /* base : xxxx0000 */ .byte 0x00 /* base : xx00xxxx */ .byte 0x9b /* Accessed, Code e/r, Present, DPL0 */ .byte 0xcf /* limit: fxxxx, Page Gra, 32bit */ .byte 0x00 /* base : 00xxxxxx */ /* Entry 2 (selector=0x0010): Data descriptor: DPL0 */ .word 0xffff /* limit: xffff */ .word 0x0000 /* base : xxxx0000 */ .byte 0x00 /* base : xx00xxxx */ .byte 0x93 /* Accessed, Data r/w, Present, DPL0 */ .byte 0xcf /* limit: fxxxx, Page Gra, 32bit */ .byte 0x00 /* base : 00xxxxxx */ _gdt_rom_end: #endif ```
/content/code_sandbox/arch/x86/core/ia32/crt0.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,083
```c /* * */ /** * @file IRQ offload - x86 implementation */ #include <zephyr/kernel.h> #include <zephyr/irq_offload.h> extern void (*_irq_sw_handler)(void); NANO_CPU_INT_REGISTER(_irq_sw_handler, NANO_SOFT_IRQ, CONFIG_IRQ_OFFLOAD_VECTOR / 16, CONFIG_IRQ_OFFLOAD_VECTOR, 0); __pinned_bss static irq_offload_routine_t offload_routine; __pinned_bss static const void *offload_param; /* Called by asm stub */ __pinned_func void z_irq_do_offload(void) { offload_routine(offload_param); } __pinned_func void arch_irq_offload(irq_offload_routine_t routine, const void *parameter) { unsigned int key; /* * Lock interrupts here to prevent any concurrency issues with * the two globals */ key = irq_lock(); offload_routine = routine; offload_param = parameter; __asm__ volatile("int %[vector]" : : [vector] "i" (CONFIG_IRQ_OFFLOAD_VECTOR)); irq_unlock(key); } ```
/content/code_sandbox/arch/x86/core/ia32/irq_offload.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
246
```c /* * */ #include <string.h> #include <zephyr/debug/coredump.h> #define ARCH_HDR_VER 1 struct x86_arch_block { uint32_t vector; uint32_t code; struct { uint32_t eax; uint32_t ecx; uint32_t edx; uint32_t ebx; uint32_t esp; uint32_t ebp; uint32_t esi; uint32_t edi; uint32_t eip; uint32_t eflags; uint32_t cs; } r; } __packed; /* * This might be too large for stack space if defined * inside function. So do it here. */ static struct x86_arch_block arch_blk; void arch_coredump_info_dump(const struct arch_esf *esf) { struct coredump_arch_hdr_t hdr = { .id = COREDUMP_ARCH_HDR_ID, .hdr_version = ARCH_HDR_VER, .num_bytes = sizeof(arch_blk), }; /* Nothing to process */ if (esf == NULL) { return; } (void)memset(&arch_blk, 0, sizeof(arch_blk)); arch_blk.vector = z_x86_exception_vector; arch_blk.code = esf->errorCode; /* * 16 registers expected by GDB. * Not all are in ESF but the GDB stub * will need to send all 16 as one packet. * The stub will need to send undefined * for registers not presented in coredump. */ arch_blk.r.eax = esf->eax; arch_blk.r.ebx = esf->ebx; arch_blk.r.ecx = esf->ecx; arch_blk.r.edx = esf->edx; arch_blk.r.esp = esf->esp; arch_blk.r.ebp = esf->ebp; arch_blk.r.esi = esf->esi; arch_blk.r.edi = esf->edi; arch_blk.r.eip = esf->eip; arch_blk.r.eflags = esf->eflags; arch_blk.r.cs = esf->cs & 0xFFFFU; /* Send for output */ coredump_buffer_output((uint8_t *)&hdr, sizeof(hdr)); coredump_buffer_output((uint8_t *)&arch_blk, sizeof(arch_blk)); } uint16_t arch_coredump_tgt_code_get(void) { return COREDUMP_TGT_X86; } ```
/content/code_sandbox/arch/x86/core/ia32/coredump.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
540
```unknown /* * */ /** * @file * @brief Exception management support for IA-32 architecture * * This module implements assembly routines to manage exceptions (synchronous * interrupts) on the Intel IA-32 architecture. More specifically, * exceptions are implemented in this module. The stubs are invoked when entering * and exiting a C exception handler. */ #include <zephyr/arch/x86/ia32/asm.h> #include <zephyr/arch/x86/ia32/arch.h> /* For MK_ISR_NAME */ #include <offsets_short.h> /* exports (internal APIs) */ GTEXT(_exception_enter) GTEXT(_kernel_oops_handler) /* externs (internal APIs) */ GTEXT(z_x86_do_kernel_oops) /** * * @brief Inform the kernel of an exception * * This function is called from the exception stub created by nanoCpuExcConnect() * to inform the kernel of an exception. This routine currently does * _not_ increment a thread/interrupt specific exception count. Also, * execution of the exception handler occurs on the current stack, i.e. * this does not switch to another stack. The volatile integer * registers are saved on the stack, and control is returned back to the * exception stub. * * WARNINGS * * Host-based tools and the target-based GDB agent depend on the stack frame * created by this routine to determine the locations of volatile registers. * These tools must be updated to reflect any changes to the stack frame. * * C function prototype: * * void _exception_enter(uint32_t error_code, void *handler) * */ SECTION_FUNC(PINNED_TEXT, _exception_enter) /* * The gen_idt tool creates an interrupt-gate descriptor for * all connections. The processor will automatically clear the IF * bit in the EFLAGS register upon execution of the handler, thus * this does need not issue an 'cli' as the first instruction. * * Note that the processor has pushed both the EFLAGS register * and the linear return address (cs:eip) onto the stack prior * to invoking the handler specified in the IDT. * * Clear the direction flag. It is automatically restored when the * exception exits. */ cld #ifdef CONFIG_X86_KPTI call z_x86_trampoline_to_kernel #endif /* * Swap ecx and handler function on the current stack; */ xchgl %ecx, (%esp) /* By the time we get here, the stack should look like this: * ESP -> ECX (excepting task) * Exception Error code (or junk) * EIP (excepting task) * CS (excepting task) * EFLAGS (excepting task) * ... * * ECX now contains the address of the handler function */ /* * Push the remaining volatile registers on the existing stack. */ pushl %eax pushl %edx /* * Push the cooperative registers on the existing stack as they are * required by debug tools. */ pushl %edi pushl %esi pushl %ebx pushl %ebp #ifdef CONFIG_USERSPACE /* Test if interrupted context was in ring 3 */ testb $3, 36(%esp) jz 1f /* It was. The original stack pointer is on the stack 44 bytes * from the current top */ pushl 44(%esp) jmp 2f 1: #endif leal 44(%esp), %eax /* Calculate ESP before interrupt occurred */ pushl %eax /* Save calculated ESP */ #ifdef CONFIG_USERSPACE 2: #endif #ifdef CONFIG_GDBSTUB pushl %ds pushl %es pushl %fs pushl %gs pushl %ss #endif /* ESP is pointing to the ESF at this point */ #if defined(CONFIG_LAZY_FPU_SHARING) movl _kernel + _kernel_offset_to_current, %edx /* inc exception nest count */ incl _thread_offset_to_excNestCount(%edx) /* * Set X86_THREAD_FLAG_EXC in the current thread. This enables * z_swap() to preserve the thread's FP registers (where needed) * if the exception handler causes a context switch. It also * indicates to debug tools that an exception is being handled * in the event of a context switch. */ orb $X86_THREAD_FLAG_EXC, _thread_offset_to_flags(%edx) #endif /* CONFIG_LAZY_FPU_SHARING */ /* * restore interrupt enable state, then call the handler * * interrupts are enabled only if they were allowed at the time * the exception was triggered -- this protects kernel level code * that mustn't be interrupted * * Test IF bit of saved EFLAGS and re-enable interrupts if IF=1. */ /* ESP is still pointing to the ESF at this point */ testl $0x200, __struct_arch_esf_eflags_OFFSET(%esp) je allDone sti allDone: pushl %esp /* push struct_arch_esf * parameter */ call *%ecx /* call exception handler */ addl $0x4, %esp #if defined(CONFIG_LAZY_FPU_SHARING) movl _kernel + _kernel_offset_to_current, %ecx /* * Must lock interrupts to prevent outside interference. * (Using "lock" prefix would be nicer, but this won't work * on platforms that don't respect the CPU's bus lock signal.) */ cli /* * Determine whether exiting from a nested interrupt. */ decl _thread_offset_to_excNestCount(%ecx) cmpl $0, _thread_offset_to_excNestCount(%ecx) jne nestedException /* * Clear X86_THREAD_FLAG_EXC in the k_thread of the current execution * context if we are not in a nested exception (ie, when we exit the * outermost exception). */ andb $~X86_THREAD_FLAG_EXC, _thread_offset_to_flags(%ecx) nestedException: #endif /* CONFIG_LAZY_FPU_SHARING */ #ifdef CONFIG_GDBSTUB popl %ss popl %gs popl %fs popl %es popl %ds #endif /* * Pop the non-volatile registers from the stack. * Note that debug tools may have altered the saved register values while * the task was stopped, and we want to pick up the altered values. */ popl %ebp /* Discard saved ESP */ popl %ebp popl %ebx popl %esi popl %edi /* restore edx and ecx which are always saved on the stack */ popl %edx popl %eax popl %ecx addl $4, %esp /* "pop" error code */ /* Pop of EFLAGS will re-enable interrupts and restore direction flag */ KPTI_IRET SECTION_FUNC(PINNED_TEXT, _kernel_oops_handler) push $0 /* dummy error code */ push $z_x86_do_kernel_oops jmp _exception_enter ```
/content/code_sandbox/arch/x86/core/ia32/excstub.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,593
```c /* * */ /** * @file * @brief Floating point register sharing routines * * This module allows multiple preemptible threads to safely share the system's * floating point registers, by allowing the system to save FPU state info * in a thread's stack region when a preemptive context switch occurs. * * Note: If the kernel has been built without floating point register sharing * support (CONFIG_FPU_SHARING), the floating point registers can still be used * safely by one or more cooperative threads OR by a single preemptive thread, * but not by both. * * This code is not necessary for systems with CONFIG_EAGER_FPU_SHARING, as * the floating point context is unconditionally saved/restored with every * context switch. * * The floating point register sharing mechanism is designed for minimal * intrusiveness. Floating point state saving is only performed for threads * that explicitly indicate they are using FPU registers, to avoid impacting * the stack size requirements of all other threads. Also, the SSE registers * are only saved for threads that actually used them. For those threads that * do require floating point state saving, a "lazy save/restore" mechanism * is employed so that the FPU's register sets are only switched in and out * when absolutely necessary; this avoids wasting effort preserving them when * there is no risk that they will be altered, or when there is no need to * preserve their contents. * * WARNING * The use of floating point instructions by ISRs is not supported by the * kernel. * * INTERNAL * The kernel sets CR0[TS] to 0 only for threads that require FP register * sharing. All other threads have CR0[TS] set to 1 so that an attempt * to perform an FP operation will cause an exception, allowing the kernel * to enable FP register sharing on its behalf. */ #include <zephyr/kernel.h> #include <kernel_internal.h> /* SSE control/status register default value (used by assembler code) */ extern uint32_t _sse_mxcsr_default_value; /** * @brief Disallow use of floating point capabilities * * This routine sets CR0[TS] to 1, which disallows the use of FP instructions * by the currently executing thread. */ static inline void z_FpAccessDisable(void) { void *tempReg; __asm__ volatile( "movl %%cr0, %0;\n\t" "orl $0x8, %0;\n\t" "movl %0, %%cr0;\n\t" : "=r"(tempReg) : : "memory"); } /** * @brief Save non-integer context information * * This routine saves the system's "live" non-integer context into the * specified area. If the specified thread supports SSE then * x87/MMX/SSEx thread info is saved, otherwise only x87/MMX thread is saved. * Function is invoked by FpCtxSave(struct k_thread *thread) */ static inline void z_do_fp_regs_save(void *preemp_float_reg) { __asm__ volatile("fnsave (%0);\n\t" : : "r"(preemp_float_reg) : "memory"); } /** * @brief Save non-integer context information * * This routine saves the system's "live" non-integer context into the * specified area. If the specified thread supports SSE then * x87/MMX/SSEx thread info is saved, otherwise only x87/MMX thread is saved. * Function is invoked by FpCtxSave(struct k_thread *thread) */ static inline void z_do_fp_and_sse_regs_save(void *preemp_float_reg) { __asm__ volatile("fxsave (%0);\n\t" : : "r"(preemp_float_reg) : "memory"); } /** * @brief Initialize floating point register context information. * * This routine initializes the system's "live" floating point registers. */ static inline void z_do_fp_regs_init(void) { __asm__ volatile("fninit\n\t"); } /** * @brief Initialize SSE register context information. * * This routine initializes the system's "live" SSE registers. */ static inline void z_do_sse_regs_init(void) { __asm__ volatile("ldmxcsr _sse_mxcsr_default_value\n\t"); } /* * Save a thread's floating point context information. * * This routine saves the system's "live" floating point context into the * specified thread control block. The SSE registers are saved only if the * thread is actually using them. */ static void FpCtxSave(struct k_thread *thread) { #ifdef CONFIG_X86_SSE if ((thread->base.user_options & K_SSE_REGS) != 0) { z_do_fp_and_sse_regs_save(&thread->arch.preempFloatReg); return; } #endif z_do_fp_regs_save(&thread->arch.preempFloatReg); } /* * Initialize a thread's floating point context information. * * This routine initializes the system's "live" floating point context. * The SSE registers are initialized only if the thread is actually using them. */ static inline void FpCtxInit(struct k_thread *thread) { z_do_fp_regs_init(); #ifdef CONFIG_X86_SSE if ((thread->base.user_options & K_SSE_REGS) != 0) { z_do_sse_regs_init(); } #endif } /* * Enable preservation of floating point context information. * * The transition from "non-FP supporting" to "FP supporting" must be done * atomically to avoid confusing the floating point logic used by z_swap(), so * this routine locks interrupts to ensure that a context switch does not occur. * The locking isn't really needed when the routine is called by a cooperative * thread (since context switching can't occur), but it is harmless. */ void z_float_enable(struct k_thread *thread, unsigned int options) { unsigned int imask; struct k_thread *fp_owner; if (!thread) { return; } /* Ensure a preemptive context switch does not occur */ imask = irq_lock(); /* Indicate thread requires floating point context saving */ thread->base.user_options |= (uint8_t)options; /* * The current thread might not allow FP instructions, so clear CR0[TS] * so we can use them. (CR0[TS] gets restored later on, if necessary.) */ __asm__ volatile("clts\n\t"); /* * Save existing floating point context (since it is about to change), * but only if the FPU is "owned" by an FP-capable task that is * currently handling an interrupt or exception (meaning its FP context * must be preserved). */ fp_owner = _kernel.current_fp; if (fp_owner != NULL) { if ((fp_owner->arch.flags & X86_THREAD_FLAG_ALL) != 0) { FpCtxSave(fp_owner); } } /* Now create a virgin FP context */ FpCtxInit(thread); /* Associate the new FP context with the specified thread */ if (thread == _current) { /* * When enabling FP support for the current thread, just claim * ownership of the FPU and leave CR0[TS] unset. * * (The FP context is "live" in hardware, not saved in TCS.) */ _kernel.current_fp = thread; } else { /* * When enabling FP support for someone else, assign ownership * of the FPU to them (unless we need it ourselves). */ if ((_current->base.user_options & _FP_USER_MASK) == 0) { /* * We are not FP-capable, so mark FPU as owned by the * thread we've just enabled FP support for, then * disable our own FP access by setting CR0[TS] back * to its original state. */ _kernel.current_fp = thread; z_FpAccessDisable(); } else { /* * We are FP-capable (and thus had FPU ownership on * entry), so save the new FP context in their TCS, * leave FPU ownership with self, and leave CR0[TS] * unset. * * The saved FP context is needed in case the thread * we enabled FP support for is currently pre-empted, * since z_swap() uses it to restore FP context when * the thread re-activates. * * Saving the FP context reinits the FPU, and thus * our own FP context, but that's OK since it didn't * need to be preserved. (i.e. We aren't currently * handling an interrupt or exception.) */ FpCtxSave(thread); } } irq_unlock(imask); } /** * Disable preservation of floating point context information. * * The transition from "FP supporting" to "non-FP supporting" must be done * atomically to avoid confusing the floating point logic used by z_swap(), so * this routine locks interrupts to ensure that a context switch does not occur. * The locking isn't really needed when the routine is called by a cooperative * thread (since context switching can't occur), but it is harmless. */ int z_float_disable(struct k_thread *thread) { unsigned int imask; /* Ensure a preemptive context switch does not occur */ imask = irq_lock(); /* Disable all floating point capabilities for the thread */ thread->base.user_options &= ~_FP_USER_MASK; if (thread == _current) { z_FpAccessDisable(); _kernel.current_fp = (struct k_thread *)0; } else { if (_kernel.current_fp == thread) { _kernel.current_fp = (struct k_thread *)0; } } irq_unlock(imask); return 0; } /* * Handler for "device not available" exception. * * This routine is registered to handle the "device not available" exception * (vector = 7). * * The processor will generate this exception if any x87 FPU, MMX, or SSEx * instruction is executed while CR0[TS]=1. The handler then enables the * current thread to use all supported floating point registers. */ void _FpNotAvailableExcHandler(struct arch_esf *pEsf) { ARG_UNUSED(pEsf); /* * Assume the exception did not occur in an ISR. * (In other words, CPU cycles will not be consumed to perform * error checking to ensure the exception was not generated in an ISR.) */ /* Enable highest level of FP capability configured into the kernel */ k_float_enable(_current, _FP_USER_MASK); } _EXCEPTION_CONNECT_NOCODE(_FpNotAvailableExcHandler, IV_DEVICE_NOT_AVAILABLE, 0); ```
/content/code_sandbox/arch/x86/core/ia32/float.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,350
```unknown /* * */ #include <zephyr/arch/x86/ia32/asm.h> #include <zephyr/arch/cpu.h> #include <offsets_short.h> #include <zephyr/syscall.h> #include <zephyr/kernel/mm.h> #include <x86_mmu.h> /* Exports */ GTEXT(z_x86_syscall_entry_stub) GTEXT(z_x86_userspace_enter) GTEXT(arch_user_string_nlen) GTEXT(z_x86_user_string_nlen_fault_start) GTEXT(z_x86_user_string_nlen_fault_end) GTEXT(z_x86_user_string_nlen_fixup) /* Imports */ GDATA(_k_syscall_table) #ifdef CONFIG_X86_KPTI /* Switch from the shadow to the kernel page table, switch to the interrupted * thread's kernel stack, and copy all context from the trampoline stack. * * Assumes all registers are callee-saved since this gets called from other * ASM code. Assumes a particular stack layout which is correct for * _exception_enter and _interrupt_enter when invoked with a call instruction: * * 28 SS * 24 ES * 20 EFLAGS * 16 CS * 12 EIP * 8 isr_param or exc code * 4 isr or exc handler * 0 return address */ SECTION_FUNC(PINNED_TEXT, z_x86_trampoline_to_kernel) /* Check interrupted code segment to see if we came from ring 3 * and hence on the trampoline stack */ testb $3, 16(%esp) /* Offset of CS */ jz 1f /* Stash these regs as we need to use them */ pushl %esi pushl %edi /* Switch to kernel page table */ movl $K_MEM_PHYS_ADDR(z_x86_kernel_ptables), %esi movl %esi, %cr3 /* Save old trampoline stack pointer in %edi */ movl %esp, %edi /* Switch to privilege mode stack */ movl $_kernel, %esi movl _kernel_offset_to_current(%esi), %esi movl _thread_offset_to_psp(%esi), %esp /* Transplant stack context and restore ESI/EDI. Taking care to zero * or put uninteresting values where we stashed ESI/EDI since the * trampoline page is insecure and there might a context switch * on the way out instead of returning to the original thread * immediately. */ pushl 36(%edi) /* SS */ pushl 32(%edi) /* ESP */ pushl 28(%edi) /* EFLAGS */ pushl 24(%edi) /* CS */ pushl 20(%edi) /* EIP */ pushl 16(%edi) /* error code or isr parameter */ pushl 12(%edi) /* exception/irq handler */ pushl 8(%edi) /* return address */ movl 4(%edi), %esi /* restore ESI */ movl $0, 4(%edi) /* Zero old esi storage area */ xchgl %edi, (%edi) /* Exchange old edi to restore it and put old sp in the storage area */ /* Trampoline stack should have nothing sensitive in it at this point */ 1: ret /* Copy interrupt return stack context to the trampoline stack, switch back * to the user page table, and only then 'iret'. We jump to this instead * of calling 'iret' if KPTI is turned on. * * Stack layout is expected to be as follows: * * 16 SS * 12 ESP * 8 EFLAGS * 4 CS * 0 EIP * * This function is conditionally macroed to KPTI_IRET/KPTI_IRET_USER */ SECTION_FUNC(PINNED_TEXT, z_x86_trampoline_to_user) /* Check interrupted code segment to see if we came from ring 3 * and hence on the trampoline stack */ testb $3, 4(%esp) /* Offset of CS */ jz 1f /* Otherwise, fall through ... */ SECTION_FUNC(PINNED_TEXT, z_x86_trampoline_to_user_always) /* Stash EDI, need a free register */ pushl %edi /* Store old stack pointer and switch to trampoline stack. * Lock IRQs before changing stack pointer to the trampoline stack, * we don't want any interrupts also using the trampoline stack * during this time. */ movl %esp, %edi cli movl $z_trampoline_stack_end, %esp /* Copy context */ pushl 20(%edi) /* SS */ pushl 16(%edi) /* ESP */ pushl 12(%edi) /* EFLAGS */ pushl 8(%edi) /* CS */ pushl 4(%edi) /* EIP */ xchgl %edi, (%edi) /* Exchange old edi to restore it and put trampoline stack address in its old storage area */ /* Switch to user page table */ pushl %eax movl $_kernel, %eax movl _kernel_offset_to_current(%eax), %eax movl _thread_offset_to_ptables(%eax), %eax movl %eax, %cr3 popl %eax movl $0, -4(%esp) /* Delete stashed EAX data */ /* Trampoline stack should have nothing sensitive in it at this point */ 1: iret #endif /* CONFIG_X86_KPTI */ /* Landing site for syscall SW IRQ. Marshal arguments and call C function for * further processing. We're on the kernel stack for the invoking thread, * unless KPTI is enabled, in which case we're on the trampoline stack and * need to get off it before enabling interrupts. */ SECTION_FUNC(TEXT, z_x86_syscall_entry_stub) #ifdef CONFIG_X86_KPTI /* Stash these regs as we need to use them */ pushl %esi pushl %edi /* Switch to kernel page table */ movl $K_MEM_PHYS_ADDR(z_x86_kernel_ptables), %esi movl %esi, %cr3 /* Save old trampoline stack pointer in %edi */ movl %esp, %edi /* Switch to privilege elevation stack */ movl $_kernel, %esi movl _kernel_offset_to_current(%esi), %esi movl _thread_offset_to_psp(%esi), %esp /* Transplant context according to layout above. Variant of logic * in x86_trampoline_to_kernel */ pushl 24(%edi) /* SS */ pushl 20(%edi) /* ESP */ pushl 16(%edi) /* EFLAGS */ pushl 12(%edi) /* CS */ pushl 8(%edi) /* EIP */ movl 4(%edi), %esi /* restore ESI */ movl $0, 4(%edi) /* Zero old esi storage area */ xchgl %edi, (%edi) /* Exchange old edi to restore it and put old sp in the storage area */ /* Trampoline stack should have nothing sensitive in it at this point */ #endif /* CONFIG_X86_KPTI */ sti /* re-enable interrupts */ cld /* clear direction flag, restored on 'iret' */ /* call_id is in ESI. bounds-check it, must be less than * K_SYSCALL_LIMIT */ cmp $K_SYSCALL_LIMIT, %esi jae _bad_syscall _id_ok: #ifdef CONFIG_X86_BOUNDS_CHECK_BYPASS_MITIGATION /* Prevent speculation with bogus system call IDs */ lfence #endif /* Marshal arguments per calling convention to match what is expected * for _k_syscall_handler_t functions */ push %esp /* ssf */ push %ebp /* arg6 */ push %edi /* arg5 */ push %ebx /* arg4 */ push %ecx /* arg3 */ push %edx /* arg2 */ push %eax /* arg1 */ /* from the call ID in ESI, load EBX with the actual function pointer * to call by looking it up in the system call dispatch table */ xor %edi, %edi mov _k_syscall_table(%edi, %esi, 4), %ebx /* Run the handler, which is some entry in _k_syscall_table */ call *%ebx /* EAX now contains return value. Pop or xor everything else to prevent * information leak from kernel mode. */ pop %edx /* old arg1 value, discard it */ pop %edx pop %ecx pop %ebx pop %edi /* Discard ssf and arg6 */ add $8, %esp KPTI_IRET_USER _bad_syscall: /* ESI had a bogus syscall value in it, replace with the bad syscall * handler's ID, and put the bad ID as its first argument. This * clobbers ESI but the bad syscall handler never returns * anyway, it's going to generate a kernel oops */ mov %esi, %eax mov $K_SYSCALL_BAD, %esi jmp _id_ok /* * size_t arch_user_string_nlen(const char *s, size_t maxsize, int *err_arg) */ SECTION_FUNC(TEXT, arch_user_string_nlen) push %ebp mov %esp, %ebp /* error value, set to -1 initially. This location is -4(%ebp) */ push $-1 /* Do the strlen operation, based on disassembly of minimal libc */ xor %eax, %eax /* EAX = 0, length count */ mov 0x8(%ebp), %edx /* EDX base of string */ /* This code might page fault */ strlen_loop: z_x86_user_string_nlen_fault_start: cmpb $0x0, (%edx, %eax, 1) /* *(EDX + EAX) == 0? Could fault. */ z_x86_user_string_nlen_fault_end: je strlen_done cmp 0xc(%ebp), %eax /* Max length reached? */ je strlen_done inc %eax /* EAX++ and loop again */ jmp strlen_loop strlen_done: /* Set error value to 0 since we succeeded */ movl $0, -4(%ebp) z_x86_user_string_nlen_fixup: /* Write error value to err pointer parameter */ movl 0x10(%ebp), %ecx pop %edx movl %edx, (%ecx) pop %ebp ret /* FUNC_NORETURN void z_x86_userspace_enter(k_thread_entry_t user_entry, * void *p1, void *p2, void *p3, * uint32_t stack_end, * uint32_t stack_start) * * A one-way trip to userspace. */ SECTION_FUNC(TEXT, z_x86_userspace_enter) pop %esi /* Discard return address on stack */ /* Fetch parameters on the stack */ pop %eax /* user_entry */ pop %edx /* p1 */ pop %ecx /* p2 */ pop %esi /* p3 */ pop %ebx /* stack_end (high address) */ pop %edi /* stack_start (low address) */ /* Move to the kernel stack for this thread, so we can erase the * user stack. The kernel stack is the page immediately before * the user stack. * * For security reasons, we must erase the entire user stack. * We don't know what previous contexts it was used and do not * want to leak any information. */ mov %edi, %esp /* Erase and enable US bit in page tables for the stack buffer */ push %ecx push %eax push %edx call z_x86_current_stack_perms pop %edx pop %eax pop %ecx /* Set stack pointer to the base of the freshly-erased user stack. * Now that this is set we won't need EBX any more. */ mov %ebx, %esp /* Set segment registers (except CS and SS which are done in * a special way by 'iret' below) */ mov $USER_DATA_SEG, %bx mov %bx, %ds mov %bx, %es /* Push arguments to z_thread_entry() */ push %esi /* p3 */ push %ecx /* p2 */ push %edx /* p1 */ push %eax /* user_entry */ /* NULL return address */ push $0 /* Save stack pointer at this position, this is where it will be * when we land in z_thread_entry() */ mov %esp, %edi /* Inter-privilege 'iret' pops all of these. Need to fake an interrupt * return to enter user mode as far calls cannot change privilege * level */ push $USER_DATA_SEG /* SS */ push %edi /* ESP */ pushfl /* EFLAGS */ push $USER_CODE_SEG /* CS */ push $z_thread_entry /* EIP */ /* We will land in z_thread_entry() in user mode after this */ KPTI_IRET_USER ```
/content/code_sandbox/arch/x86/core/ia32/userspace.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,015
```c /* * */ #include <kernel_internal.h> #include <zephyr/arch/x86/ia32/arch.h> #include <zephyr/arch/x86/ia32/segmentation.h> #define ENTRY_NUM (GS_TLS_SEG >> 3) void z_x86_tls_update_gdt(struct k_thread *thread) { /* * GS is used for thread local storage to pointer to * the TLS storage area in stack. Here we update one * of the descriptor so GS has the new address. * * The re-loading of descriptor into GS is taken care * of inside the assembly swap code just before * swapping into the new thread. */ struct segment_descriptor *sd = &_gdt.entries[ENTRY_NUM]; sd->base_low = thread->tls & 0xFFFFU; sd->base_mid = (thread->tls >> 16) & 0xFFU; sd->base_hi = (thread->tls >> 24) & 0xFFU; } FUNC_NO_STACK_PROTECTOR void z_x86_early_tls_update_gdt(char *stack_ptr) { uintptr_t *self_ptr; uintptr_t tls_seg = GS_TLS_SEG; struct segment_descriptor *sd = &_gdt.entries[ENTRY_NUM]; /* * Since we are populating things backwards, store * the pointer to the TLS area at top of stack. */ stack_ptr -= sizeof(uintptr_t); self_ptr = (void *)stack_ptr; *self_ptr = POINTER_TO_UINT(stack_ptr); sd->base_low = POINTER_TO_UINT(self_ptr) & 0xFFFFU; sd->base_mid = (POINTER_TO_UINT(self_ptr) >> 16) & 0xFFU; sd->base_hi = (POINTER_TO_UINT(self_ptr) >> 24) & 0xFFU; __asm__ volatile( "movl %0, %%eax;\n\t" "movl %%eax, %%gs;\n\t" : : "r"(tls_seg)); } ```
/content/code_sandbox/arch/x86/core/ia32/tls.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
431
```c /* * */ #include <zephyr/kernel.h> #include <kernel_internal.h> #include <ia32/exception.h> #include <inttypes.h> #include <zephyr/debug/gdbstub.h> static struct gdb_ctx debug_ctx; /** * Currently we just handle vectors 1 and 3 but lets keep it generic * to be able to notify other exceptions in the future */ static unsigned int get_exception(unsigned int vector) { unsigned int exception; switch (vector) { case IV_DIVIDE_ERROR: exception = GDB_EXCEPTION_DIVIDE_ERROR; break; case IV_DEBUG: exception = GDB_EXCEPTION_BREAKPOINT; break; case IV_BREAKPOINT: exception = GDB_EXCEPTION_BREAKPOINT; break; case IV_OVERFLOW: exception = GDB_EXCEPTION_OVERFLOW; break; case IV_BOUND_RANGE: exception = GDB_EXCEPTION_OVERFLOW; break; case IV_INVALID_OPCODE: exception = GDB_EXCEPTION_INVALID_INSTRUCTION; break; case IV_DEVICE_NOT_AVAILABLE: exception = GDB_EXCEPTION_DIVIDE_ERROR; break; case IV_DOUBLE_FAULT: exception = GDB_EXCEPTION_MEMORY_FAULT; break; case IV_COPROC_SEGMENT_OVERRUN: exception = GDB_EXCEPTION_INVALID_MEMORY; break; case IV_INVALID_TSS: exception = GDB_EXCEPTION_INVALID_MEMORY; break; case IV_SEGMENT_NOT_PRESENT: exception = GDB_EXCEPTION_INVALID_MEMORY; break; case IV_STACK_FAULT: exception = GDB_EXCEPTION_INVALID_MEMORY; break; case IV_GENERAL_PROTECTION: exception = GDB_EXCEPTION_INVALID_MEMORY; break; case IV_PAGE_FAULT: exception = GDB_EXCEPTION_INVALID_MEMORY; break; case IV_X87_FPU_FP_ERROR: exception = GDB_EXCEPTION_MEMORY_FAULT; break; default: exception = GDB_EXCEPTION_MEMORY_FAULT; break; } return exception; } /* * Debug exception handler. */ static void z_gdb_interrupt(unsigned int vector, struct arch_esf *esf) { debug_ctx.exception = get_exception(vector); debug_ctx.registers[GDB_EAX] = esf->eax; debug_ctx.registers[GDB_ECX] = esf->ecx; debug_ctx.registers[GDB_EDX] = esf->edx; debug_ctx.registers[GDB_EBX] = esf->ebx; debug_ctx.registers[GDB_ESP] = esf->esp; debug_ctx.registers[GDB_EBP] = esf->ebp; debug_ctx.registers[GDB_ESI] = esf->esi; debug_ctx.registers[GDB_EDI] = esf->edi; debug_ctx.registers[GDB_PC] = esf->eip; debug_ctx.registers[GDB_CS] = esf->cs; debug_ctx.registers[GDB_EFLAGS] = esf->eflags; debug_ctx.registers[GDB_SS] = esf->ss; debug_ctx.registers[GDB_DS] = esf->ds; debug_ctx.registers[GDB_ES] = esf->es; debug_ctx.registers[GDB_FS] = esf->fs; debug_ctx.registers[GDB_GS] = esf->gs; z_gdb_main_loop(&debug_ctx); esf->eax = debug_ctx.registers[GDB_EAX]; esf->ecx = debug_ctx.registers[GDB_ECX]; esf->edx = debug_ctx.registers[GDB_EDX]; esf->ebx = debug_ctx.registers[GDB_EBX]; esf->esp = debug_ctx.registers[GDB_ESP]; esf->ebp = debug_ctx.registers[GDB_EBP]; esf->esi = debug_ctx.registers[GDB_ESI]; esf->edi = debug_ctx.registers[GDB_EDI]; esf->eip = debug_ctx.registers[GDB_PC]; esf->cs = debug_ctx.registers[GDB_CS]; esf->eflags = debug_ctx.registers[GDB_EFLAGS]; esf->ss = debug_ctx.registers[GDB_SS]; esf->ds = debug_ctx.registers[GDB_DS]; esf->es = debug_ctx.registers[GDB_ES]; esf->fs = debug_ctx.registers[GDB_FS]; esf->gs = debug_ctx.registers[GDB_GS]; } void arch_gdb_continue(void) { /* Clear the TRAP FLAG bit */ debug_ctx.registers[GDB_EFLAGS] &= ~BIT(8); } void arch_gdb_step(void) { /* Set the TRAP FLAG bit */ debug_ctx.registers[GDB_EFLAGS] |= BIT(8); } size_t arch_gdb_reg_readall(struct gdb_ctx *ctx, uint8_t *buf, size_t buflen) { size_t ret; if (buflen < (sizeof(ctx->registers) * 2)) { ret = 0; } else { ret = bin2hex((const uint8_t *)&(ctx->registers), sizeof(ctx->registers), buf, buflen); } return ret; } size_t arch_gdb_reg_writeall(struct gdb_ctx *ctx, uint8_t *hex, size_t hexlen) { size_t ret; if (hexlen != (sizeof(ctx->registers) * 2)) { ret = 0; } else { ret = hex2bin(hex, hexlen, (uint8_t *)&(ctx->registers), sizeof(ctx->registers)); } return ret; } size_t arch_gdb_reg_readone(struct gdb_ctx *ctx, uint8_t *buf, size_t buflen, uint32_t regno) { size_t ret; if (buflen < (sizeof(unsigned int) * 2)) { /* Make sure there is enough space to write hex string */ ret = 0; } else if (regno >= GDB_STUB_NUM_REGISTERS) { /* Return hex string "xx" to tell GDB that this register * is not available. So GDB will continue probing other * registers instead of stopping in the middle of * "info registers all". */ memcpy(buf, "xx", 2); ret = 2; } else { ret = bin2hex((const uint8_t *)&(ctx->registers[regno]), sizeof(ctx->registers[regno]), buf, buflen); } return ret; } size_t arch_gdb_reg_writeone(struct gdb_ctx *ctx, uint8_t *hex, size_t hexlen, uint32_t regno) { size_t ret; if (regno == GDB_ORIG_EAX) { /* GDB requires orig_eax that seems to be * Linux specific. Unfortunately if we just * return error, GDB will stop working. * So just fake an OK response by saying * that we have processed the hex string. */ ret = hexlen; } else if (regno >= GDB_STUB_NUM_REGISTERS) { ret = 0; } else if (hexlen != (sizeof(unsigned int) * 2)) { /* Make sure the input hex string matches register size */ ret = 0; } else { ret = hex2bin(hex, hexlen, (uint8_t *)&(ctx->registers[regno]), sizeof(ctx->registers[regno])); } return ret; } static __used void z_gdb_debug_isr(struct arch_esf *esf) { #ifdef CONFIG_GDBSTUB_TRACE printk("gdbstub:enter %s (IV_DEBUG)\n", __func__); #endif z_gdb_interrupt(IV_DEBUG, esf); #ifdef CONFIG_GDBSTUB_TRACE printk("gdbstub:exit %s (IV_DEBUG)\n", __func__); #endif } static __used void z_gdb_break_isr(struct arch_esf *esf) { #ifdef CONFIG_GDBSTUB_TRACE printk("gdbstub:enter %s (IV_BREAKPOINT)\n", __func__); #endif z_gdb_interrupt(IV_BREAKPOINT, esf); #ifdef CONFIG_GDBSTUB_TRACE printk("gdbstub:exit %s (IV_BREAKPOINT)\n", __func__); #endif } void arch_gdb_init(void) { #ifdef CONFIG_GDBSTUB_TRACE printk("gdbstub:%s awaits GDB connection\n", __func__); #endif __asm__ volatile ("int3"); #ifdef CONFIG_GDBSTUB_TRACE printk("gdbstub:%s GDB is connected\n", __func__); #endif } /* Hook current IDT. */ _EXCEPTION_CONNECT_NOCODE(z_gdb_debug_isr, IV_DEBUG, 3); _EXCEPTION_CONNECT_NOCODE(z_gdb_break_isr, IV_BREAKPOINT, 3); ```
/content/code_sandbox/arch/x86/core/ia32/gdbstub.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,854
```unknown /* * */ /** * @file * @brief Kernel swapper code for IA-32 * * This module implements the arch_swap() routine for the IA-32 architecture. */ #include <zephyr/arch/x86/ia32/asm.h> #include <zephyr/kernel.h> #include <zephyr/arch/cpu.h> #include <kernel_arch_data.h> #include <offsets_short.h> /* exports (internal APIs) */ GTEXT(arch_swap) GTEXT(z_x86_thread_entry_wrapper) GTEXT(_x86_user_thread_entry_wrapper) /* externs */ #if !defined(CONFIG_X86_KPTI) && defined(CONFIG_X86_USERSPACE) GTEXT(z_x86_swap_update_page_tables) #endif GDATA(_k_neg_eagain) /* * Given that arch_swap() is called to effect a cooperative context switch, * only the non-volatile integer registers need to be saved in the TCS of the * outgoing thread. The restoration of the integer registers of the incoming * thread depends on whether that thread was preemptively context switched out. * The X86_THREAD_FLAG_INT and _EXC bits in the k_thread->arch.flags field will * signify that the thread was preemptively context switched out, and thus both * the volatile and non-volatile integer registers need to be restored. * * The non-volatile registers need to be scrubbed to ensure they contain no * sensitive information that could compromise system security. This is to * make sure that information will not be leaked from one application to * another via these volatile registers. * * Here, the integer registers (EAX, ECX, EDX) have been scrubbed. Any changes * to this routine that alter the values of these registers MUST be reviewed * for potential security impacts. * * Floating point registers are handled using a lazy save/restore mechanism * since it's expected relatively few threads will be created with the * K_FP_REGS or K_SSE_REGS option bits. The kernel data structure maintains a * 'current_fp' field to keep track of the thread that "owns" the floating * point registers. Floating point registers consist of ST0->ST7 (x87 FPU and * MMX registers) and XMM0 -> XMM7. * * All floating point registers are considered 'volatile' thus they will only * be saved/restored when a preemptive context switch occurs. * * Floating point registers are currently NOT scrubbed, and are subject to * potential security leaks. * * C function prototype: * * unsigned int arch_swap (unsigned int eflags); */ SECTION_FUNC(PINNED_TEXT, arch_swap) #if defined(CONFIG_INSTRUMENT_THREAD_SWITCHING) pushl %eax call z_thread_mark_switched_out popl %eax #endif /* * Push all non-volatile registers onto the stack; do not copy * any of these registers into the k_thread. Only the 'esp' register * after all the pushes have been performed) will be stored in the * k_thread. */ pushl %edi movl $_kernel, %edi pushl %esi pushl %ebx pushl %ebp /* * Carve space for the return value. Setting it to a default of * -EAGAIN eliminates the need for the timeout code to set it. * If another value is ever needed, it can be modified with * arch_thread_return_value_set(). */ pushl _k_neg_eagain /* save esp into k_thread structure */ movl _kernel_offset_to_current(%edi), %edx movl %esp, _thread_offset_to_esp(%edx) movl _kernel_offset_to_ready_q_cache(%edi), %eax /* * At this point, the %eax register contains the 'k_thread *' of the * thread to be swapped in, and %edi still contains &_kernel. %edx * has the pointer to the outgoing thread. */ #if defined(CONFIG_X86_USERSPACE) && !defined(CONFIG_X86_KPTI) push %eax call z_x86_swap_update_page_tables pop %eax /* Page tables updated. All memory access after this point needs to be * to memory that has the same mappings and access attributes wrt * supervisor mode! */ #endif #ifdef CONFIG_EAGER_FPU_SHARING /* Eager floating point state restore logic * * Addresses CVE-2018-3665 * Used as an alternate to CONFIG_LAZY_FPU_SHARING if there is any * sensitive data in the floating point/SIMD registers in a system * with untrusted threads. * * Unconditionally save/restore floating point registers on context * switch. */ /* Save outgpoing thread context */ #ifdef CONFIG_X86_SSE fxsave _thread_offset_to_preempFloatReg(%edx) fninit #else fnsave _thread_offset_to_preempFloatReg(%edx) #endif /* Restore incoming thread context */ #ifdef CONFIG_X86_SSE fxrstor _thread_offset_to_preempFloatReg(%eax) #else frstor _thread_offset_to_preempFloatReg(%eax) #endif /* CONFIG_X86_SSE */ #elif defined(CONFIG_LAZY_FPU_SHARING) /* * Clear the CR0[TS] bit (in the event the current thread * doesn't have floating point enabled) to prevent the "device not * available" exception when executing the subsequent fxsave/fnsave * and/or fxrstor/frstor instructions. * * Indeed, it's possible that none of the aforementioned instructions * need to be executed, for example, the incoming thread doesn't * utilize floating point operations. However, the code responsible * for setting the CR0[TS] bit appropriately for the incoming thread * (just after the 'restoreContext_NoFloatSwap' label) will leverage * the fact that the following 'clts' was performed already. */ clts /* * Determine whether the incoming thread utilizes floating point regs * _and_ whether the thread was context switched out preemptively. */ testb $_FP_USER_MASK, _thread_offset_to_user_options(%eax) je restoreContext_NoFloatSwap /* * The incoming thread uses floating point registers: * Was it the last thread to use floating point registers? * If so, there there is no need to restore the floating point context. */ movl _kernel_offset_to_current_fp(%edi), %ebx cmpl %ebx, %eax je restoreContext_NoFloatSwap /* * The incoming thread uses floating point registers and it was _not_ * the last thread to use those registers: * Check whether the current FP context actually needs to be saved * before swapping in the context of the incoming thread. */ testl %ebx, %ebx jz restoreContext_NoFloatSave /* * The incoming thread uses floating point registers and it was _not_ * the last thread to use those registers _and_ the current FP context * needs to be saved. * * Given that the ST[0] -> ST[7] and XMM0 -> XMM7 registers are all * 'volatile', only save the registers if the "current FP context" * was preemptively context switched. */ testb $X86_THREAD_FLAG_ALL, _thread_offset_to_flags(%ebx) je restoreContext_NoFloatSave #ifdef CONFIG_X86_SSE testb $K_SSE_REGS, _thread_offset_to_user_options(%ebx) je x87FloatSave /* * 'fxsave' does NOT perform an implicit 'fninit', therefore issue an * 'fninit' to ensure a "clean" FPU state for the incoming thread * (for the case when the fxrstor is not executed). */ fxsave _thread_offset_to_preempFloatReg(%ebx) fninit jmp floatSaveDone x87FloatSave: #endif /* CONFIG_X86_SSE */ /* 'fnsave' performs an implicit 'fninit' after saving state! */ fnsave _thread_offset_to_preempFloatReg(%ebx) /* fall through to 'floatSaveDone' */ floatSaveDone: restoreContext_NoFloatSave: /********************************************************* * Restore floating point context of the incoming thread. *********************************************************/ /* * Again, given that the ST[0] -> ST[7] and XMM0 -> XMM7 registers are * all 'volatile', only restore the registers if the incoming thread * was previously preemptively context switched out. */ testb $X86_THREAD_FLAG_ALL, _thread_offset_to_flags(%eax) je restoreContext_NoFloatRestore #ifdef CONFIG_X86_SSE testb $K_SSE_REGS, _thread_offset_to_user_options(%eax) je x87FloatRestore fxrstor _thread_offset_to_preempFloatReg(%eax) jmp floatRestoreDone x87FloatRestore: #endif /* CONFIG_X86_SSE */ frstor _thread_offset_to_preempFloatReg(%eax) /* fall through to 'floatRestoreDone' */ floatRestoreDone: restoreContext_NoFloatRestore: /* record that the incoming thread "owns" the floating point registers */ movl %eax, _kernel_offset_to_current_fp(%edi) /* * Branch point when none of the floating point registers need to be * swapped because: a) the incoming thread does not use them OR * b) the incoming thread is the last thread that used those registers. */ restoreContext_NoFloatSwap: /* * Leave CR0[TS] clear if incoming thread utilizes the floating point * registers */ testb $_FP_USER_MASK, _thread_offset_to_user_options(%eax) jne CROHandlingDone /* * The incoming thread does NOT currently utilize the floating point * registers, so set CR0[TS] to ensure the "device not available" * exception occurs on the first attempt to access a x87 FPU, MMX, * or XMM register. */ movl %cr0, %edx orl $0x8, %edx movl %edx, %cr0 CROHandlingDone: #endif /* CONFIG_LAZY_FPU_SHARING */ /* update _kernel.current to reflect incoming thread */ movl %eax, _kernel_offset_to_current(%edi) #if defined(CONFIG_X86_USE_THREAD_LOCAL_STORAGE) pushl %eax call z_x86_tls_update_gdt /* Since segment descriptor has changed, need to reload */ movw $GS_TLS_SEG, %ax movw %ax, %gs popl %eax #endif /* recover thread stack pointer from k_thread */ movl _thread_offset_to_esp(%eax), %esp /* load return value from a possible arch_thread_return_value_set() */ popl %eax /* pop the non-volatile registers from the stack */ popl %ebp popl %ebx popl %esi popl %edi /* * %eax may contain one of these values: * * - the return value for arch_swap() that was set up by a call to * arch_thread_return_value_set() * - -EINVAL */ /* Utilize the 'eflags' parameter to arch_swap() */ pushl 4(%esp) popfl #if defined(CONFIG_INSTRUMENT_THREAD_SWITCHING) pushl %eax call z_thread_mark_switched_in popl %eax #endif ret #ifdef _THREAD_WRAPPER_REQUIRED /** * * @brief Adjust stack/parameters before invoking thread entry function * * This function adjusts the initial stack frame created by arch_new_thread() * such that the GDB stack frame unwinders recognize it as the outermost frame * in the thread's stack. * * GDB normally stops unwinding a stack when it detects that it has * reached a function called main(). Kernel threads, however, do not have * a main() function, and there does not appear to be a simple way of stopping * the unwinding of the stack. * * Given the initial thread created by arch_new_thread(), GDB expects to find * a return address on the stack immediately above the thread entry routine * z_thread_entry, in the location occupied by the initial EFLAGS. GDB * attempts to examine the memory at this return address, which typically * results in an invalid access to page 0 of memory. * * This function overwrites the initial EFLAGS with zero. When GDB subsequently * attempts to examine memory at address zero, the PeekPoke driver detects * an invalid access to address zero and returns an error, which causes the * GDB stack unwinder to stop somewhat gracefully. * * The initial EFLAGS cannot be overwritten until after z_swap() has swapped in * the new thread for the first time. This routine is called by z_swap() the * first time that the new thread is swapped in, and it jumps to * z_thread_entry after it has done its work. * * __________________ * | param3 | <------ Top of the stack * |__________________| * | param2 | Stack Grows Down * |__________________| | * | param1 | V * |__________________| * | pEntry | * |__________________| * | initial EFLAGS | <---- ESP when invoked by z_swap() * |__________________| (Zeroed by this routine) * * The address of the thread entry function needs to be in %edi when this is * invoked. It will either be z_thread_entry, or if userspace is enabled, * _arch_drop_to_user_mode if this is a user thread. * * @return this routine does NOT return. */ SECTION_FUNC(PINNED_TEXT, z_x86_thread_entry_wrapper) movl $0, (%esp) jmp *%edi #endif /* _THREAD_WRAPPER_REQUIRED */ ```
/content/code_sandbox/arch/x86/core/ia32/swap.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,075
```c /* * */ /** * @file * @brief Kernel fatal error handler */ #include <zephyr/kernel.h> #include <kernel_internal.h> #include <zephyr/drivers/interrupt_controller/sysapic.h> #include <zephyr/arch/x86/ia32/segmentation.h> #include <zephyr/arch/syscall.h> #include <ia32/exception.h> #include <inttypes.h> #include <zephyr/arch/common/exc_handle.h> #include <zephyr/logging/log.h> #include <x86_mmu.h> #include <zephyr/kernel/mm.h> LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); #ifdef CONFIG_DEBUG_COREDUMP unsigned int z_x86_exception_vector; #endif __weak void z_debug_fatal_hook(const struct arch_esf *esf) { ARG_UNUSED(esf); } __pinned_func void z_x86_spurious_irq(const struct arch_esf *esf) { int vector = z_irq_controller_isr_vector_get(); if (vector >= 0) { LOG_ERR("IRQ vector: %d", vector); } z_x86_fatal_error(K_ERR_SPURIOUS_IRQ, esf); } __pinned_func void arch_syscall_oops(void *ssf) { struct _x86_syscall_stack_frame *ssf_ptr = (struct _x86_syscall_stack_frame *)ssf; struct arch_esf oops = { .eip = ssf_ptr->eip, .cs = ssf_ptr->cs, .eflags = ssf_ptr->eflags }; if (oops.cs == USER_CODE_SEG) { oops.esp = ssf_ptr->esp; } z_x86_fatal_error(K_ERR_KERNEL_OOPS, &oops); } extern void (*_kernel_oops_handler)(void); NANO_CPU_INT_REGISTER(_kernel_oops_handler, NANO_SOFT_IRQ, Z_X86_OOPS_VECTOR / 16, Z_X86_OOPS_VECTOR, 3); #if CONFIG_EXCEPTION_DEBUG __pinned_func FUNC_NORETURN static void generic_exc_handle(unsigned int vector, const struct arch_esf *pEsf) { #ifdef CONFIG_DEBUG_COREDUMP z_x86_exception_vector = vector; #endif z_x86_unhandled_cpu_exception(vector, pEsf); } #define _EXC_FUNC(vector) \ __pinned_func \ FUNC_NORETURN __used static void handle_exc_##vector(const struct arch_esf *pEsf) \ { \ generic_exc_handle(vector, pEsf); \ } #define Z_EXC_FUNC_CODE(vector, dpl) \ _EXC_FUNC(vector) \ _EXCEPTION_CONNECT_CODE(handle_exc_##vector, vector, dpl) #define Z_EXC_FUNC_NOCODE(vector, dpl) \ _EXC_FUNC(vector) \ _EXCEPTION_CONNECT_NOCODE(handle_exc_##vector, vector, dpl) /* Necessary indirection to ensure 'vector' is expanded before we expand * the handle_exc_##vector */ #define EXC_FUNC_NOCODE(vector, dpl) \ Z_EXC_FUNC_NOCODE(vector, dpl) #define EXC_FUNC_CODE(vector, dpl) \ Z_EXC_FUNC_CODE(vector, dpl) EXC_FUNC_NOCODE(IV_DIVIDE_ERROR, 0); EXC_FUNC_NOCODE(IV_NON_MASKABLE_INTERRUPT, 0); EXC_FUNC_NOCODE(IV_OVERFLOW, 0); EXC_FUNC_NOCODE(IV_BOUND_RANGE, 0); EXC_FUNC_NOCODE(IV_INVALID_OPCODE, 0); EXC_FUNC_NOCODE(IV_DEVICE_NOT_AVAILABLE, 0); #ifndef CONFIG_X86_ENABLE_TSS EXC_FUNC_NOCODE(IV_DOUBLE_FAULT, 0); #endif EXC_FUNC_CODE(IV_INVALID_TSS, 0); EXC_FUNC_CODE(IV_SEGMENT_NOT_PRESENT, 0); EXC_FUNC_CODE(IV_STACK_FAULT, 0); EXC_FUNC_CODE(IV_GENERAL_PROTECTION, 0); EXC_FUNC_NOCODE(IV_X87_FPU_FP_ERROR, 0); EXC_FUNC_CODE(IV_ALIGNMENT_CHECK, 0); EXC_FUNC_NOCODE(IV_MACHINE_CHECK, 0); #endif _EXCEPTION_CONNECT_CODE(z_x86_page_fault_handler, IV_PAGE_FAULT, 0); #ifdef CONFIG_X86_ENABLE_TSS static __pinned_noinit volatile struct arch_esf _df_esf; /* Very tiny stack; just enough for the bogus error code pushed by the CPU * and a frame pointer push by the compiler. All df_handler_top does is * shuffle some data around with 'mov' statements and then 'iret'. */ static __pinned_noinit char _df_stack[8]; static FUNC_NORETURN __used void df_handler_top(void); #ifdef CONFIG_X86_KPTI extern char z_trampoline_stack_end[]; #endif Z_GENERIC_SECTION(.tss) struct task_state_segment _main_tss = { .ss0 = DATA_SEG, #ifdef CONFIG_X86_KPTI /* Stack to land on when we get a soft/hard IRQ in user mode. * In a special kernel page that, unlike all other kernel pages, * is marked present in the user page table. */ .esp0 = (uint32_t)&z_trampoline_stack_end #endif }; /* Special TSS for handling double-faults with a known good stack */ Z_GENERIC_SECTION(.tss) struct task_state_segment _df_tss = { .esp = (uint32_t)(_df_stack + sizeof(_df_stack)), .cs = CODE_SEG, .ds = DATA_SEG, .es = DATA_SEG, .ss = DATA_SEG, .eip = (uint32_t)df_handler_top, .cr3 = (uint32_t) K_MEM_PHYS_ADDR(POINTER_TO_UINT(&z_x86_kernel_ptables[0])) }; __pinned_func static __used void df_handler_bottom(void) { /* We're back in the main hardware task on the interrupt stack */ unsigned int reason = K_ERR_CPU_EXCEPTION; /* Restore the top half so it is runnable again */ _df_tss.esp = (uint32_t)(_df_stack + sizeof(_df_stack)); _df_tss.eip = (uint32_t)df_handler_top; LOG_ERR("Double Fault"); #ifdef CONFIG_THREAD_STACK_INFO /* To comply with MISRA 13.2 rule necessary to exclude code that depends * on the order of evaluation of function arguments. * Create 2 variables to store volatile data from the structure _df_esf */ uint32_t df_esf_esp = _df_esf.esp; uint16_t df_esf_cs = _df_esf.cs; if (z_x86_check_stack_bounds(df_esf_esp, 0, df_esf_cs)) { reason = K_ERR_STACK_CHK_FAIL; } #endif z_x86_fatal_error(reason, (struct arch_esf *)&_df_esf); } __pinned_func static FUNC_NORETURN __used void df_handler_top(void) { /* State of the system when the double-fault forced a task switch * will be in _main_tss. Set up a struct arch_esf and copy system state into * it */ _df_esf.esp = _main_tss.esp; _df_esf.ebp = _main_tss.ebp; _df_esf.ebx = _main_tss.ebx; _df_esf.esi = _main_tss.esi; _df_esf.edi = _main_tss.edi; _df_esf.edx = _main_tss.edx; _df_esf.eax = _main_tss.eax; _df_esf.ecx = _main_tss.ecx; _df_esf.errorCode = 0; _df_esf.eip = _main_tss.eip; _df_esf.cs = _main_tss.cs; _df_esf.eflags = _main_tss.eflags; /* Restore the main IA task to a runnable state */ _main_tss.esp = (uint32_t)(K_KERNEL_STACK_BUFFER( z_interrupt_stacks[0]) + CONFIG_ISR_STACK_SIZE); _main_tss.cs = CODE_SEG; _main_tss.ds = DATA_SEG; _main_tss.es = DATA_SEG; _main_tss.ss = DATA_SEG; _main_tss.eip = (uint32_t)df_handler_bottom; _main_tss.cr3 = k_mem_phys_addr(z_x86_kernel_ptables); _main_tss.eflags = 0U; /* NT bit is set in EFLAGS so we will task switch back to _main_tss * and run df_handler_bottom */ __asm__ volatile ("iret"); CODE_UNREACHABLE; } /* Configure a task gate descriptor in the IDT for the double fault * exception */ _X86_IDT_TSS_REGISTER(DF_TSS, -1, -1, IV_DOUBLE_FAULT, 0); #endif /* CONFIG_X86_ENABLE_TSS */ ```
/content/code_sandbox/arch/x86/core/ia32/fatal.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,912
```unknown /* * */ /** * @file * @brief Interrupt management support for IA-32 architecture * * This module implements assembly routines to manage interrupts on * the Intel IA-32 architecture. More specifically, the interrupt (asynchronous * exception) stubs are implemented in this module. The stubs are invoked when * entering and exiting a C interrupt handler. */ #define LOAPIC_BASE_ADDRESS DT_REG_ADDR(DT_NODELABEL(intc_loapic)) #include <zephyr/arch/x86/ia32/asm.h> #include <offsets_short.h> #include <zephyr/arch/cpu.h> #include <zephyr/drivers/interrupt_controller/sysapic.h> /* exports (internal APIs) */ GTEXT(_interrupt_enter) GTEXT(z_SpuriousIntNoErrCodeHandler) GTEXT(z_SpuriousIntHandler) GTEXT(_irq_sw_handler) GTEXT(z_dynamic_stubs_begin) /* externs */ GTEXT(arch_swap) #ifdef CONFIG_PM GTEXT(pm_system_resume) #endif /** * * @brief Inform the kernel of an interrupt * * This function is called from the interrupt stub created by IRQ_CONNECT() * to inform the kernel of an interrupt. This routine increments * _kernel.nested (to support interrupt nesting), switches to the * base of the interrupt stack, if not already on the interrupt stack, and then * saves the volatile integer registers onto the stack. Finally, control is * returned back to the interrupt stub code (which will then invoke the * "application" interrupt service routine). * * Only the volatile integer registers are saved since ISRs are assumed not to * utilize floating point (or SSE) instructions. * * WARNINGS * * Host-based tools and the target-based GDB agent depend on the stack frame * created by this routine to determine the locations of volatile registers. * These tools must be updated to reflect any changes to the stack frame. * * C function prototype: * * void _interrupt_enter(void *isr, void *isr_param); */ SECTION_FUNC(PINNED_TEXT, _interrupt_enter) /* * Note that the processor has pushed both the EFLAGS register * and the logical return address (cs:eip) onto the stack prior * to invoking the handler specified in the IDT. The stack looks * like this: * * 24 SS (only on privilege level change) * 20 ESP (only on privilege level change) * 16 EFLAGS * 12 CS * 8 EIP * 4 isr_param * 0 isr <-- stack pointer */ /* * The gen_idt tool creates an interrupt-gate descriptor for * all connections. The processor will automatically clear the IF * bit in the EFLAGS register upon execution of the handler, hence * this need not issue an 'cli' as the first instruction. * * Clear the direction flag. It is automatically restored when the * interrupt exits via the IRET instruction. */ cld #ifdef CONFIG_X86_KPTI call z_x86_trampoline_to_kernel #endif /* * Swap EAX with isr_param and EDX with isr. * Push ECX onto the stack */ xchgl %eax, 4(%esp) xchgl %edx, (%esp) pushl %ecx /* Now the stack looks like: * * EFLAGS * CS * EIP * saved EAX * saved EDX * saved ECX * * EAX = isr_param, EDX = isr */ /* Push EBP as we will use it for scratch space. * Also it helps in stack unwinding * Rest of the callee-saved regs get saved by invocation of C * functions (isr handler, arch_swap(), etc) */ pushl %ebp /* load %ecx with &_kernel */ movl $_kernel, %ecx /* switch to the interrupt stack for the non-nested case */ incl _kernel_offset_to_nested(%ecx) /* use interrupt stack if not nested */ cmpl $1, _kernel_offset_to_nested(%ecx) jne alreadyOnIntStack /* * switch to base of the interrupt stack: save esp in ebp, then load * irq_stack pointer */ movl %esp, %ebp movl _kernel_offset_to_irq_stack(%ecx), %esp /* save thread's stack pointer onto base of interrupt stack */ pushl %ebp /* Save stack pointer */ #ifdef CONFIG_PM cmpl $0, _kernel_offset_to_idle(%ecx) jne handle_idle /* fast path is !idle, in the pipeline */ #endif /* CONFIG_PM */ /* fall through to nested case */ alreadyOnIntStack: push %eax /* interrupt handler argument */ #if defined(CONFIG_TRACING_ISR) /* Save these as we are using to keep track of isr and isr_param */ pushl %eax pushl %edx call sys_trace_isr_enter popl %edx popl %eax #endif #ifdef CONFIG_NESTED_INTERRUPTS sti /* re-enable interrupts */ #endif /* Now call the interrupt handler */ call *%edx /* Discard ISR argument */ addl $0x4, %esp #ifdef CONFIG_NESTED_INTERRUPTS cli /* disable interrupts again */ #endif #if defined(CONFIG_TRACING_ISR) pushl %eax call sys_trace_isr_exit popl %eax #endif #if defined(CONFIG_X86_RUNTIME_IRQ_STATS) /* * The runtime_irq_stats() function should be implemented * by platform with this config. */ pushl %eax call runtime_irq_stats popl %eax #endif xorl %eax, %eax #if defined(CONFIG_X2APIC) xorl %edx, %edx movl $(X86_X2APIC_BASE_MSR + (LOAPIC_EOI >> 4)), %ecx wrmsr #else /* xAPIC */ #ifdef DEVICE_MMIO_IS_IN_RAM movl Z_TOPLEVEL_RAM_NAME(LOAPIC_REGS_STR), %edx movl %eax, LOAPIC_EOI(%edx) #else movl %eax, (LOAPIC_BASE_ADDRESS + LOAPIC_EOI) #endif /* DEVICE_MMIO_IS_IN_RAM */ #endif /* CONFIG_X2APIC */ /* determine whether exiting from a nested interrupt */ movl $_kernel, %ecx decl _kernel_offset_to_nested(%ecx) /* dec interrupt nest count */ jne nestedInterrupt /* 'iret' if nested case */ #ifdef CONFIG_PREEMPT_ENABLED movl _kernel_offset_to_current(%ecx), %edx /* reschedule only if the scheduler says that we must do so */ cmpl %edx, _kernel_offset_to_ready_q_cache(%ecx) je noReschedule /* * Set X86_THREAD_FLAG_INT bit in k_thread to allow the upcoming call * to arch_swap() to determine whether non-floating registers need to be * preserved using the lazy save/restore algorithm, or to indicate to * debug tools that a preemptive context switch has occurred. */ #if defined(CONFIG_LAZY_FPU_SHARING) orb $X86_THREAD_FLAG_INT, _thread_offset_to_flags(%edx) #endif /* * A context reschedule is required: keep the volatile registers of * the interrupted thread on the context's stack. Utilize * the existing arch_swap() primitive to save the remaining * thread's registers (including floating point) and perform * a switch to the new thread. */ popl %esp /* switch back to outgoing thread's stack */ #ifdef CONFIG_STACK_SENTINEL call z_check_stack_sentinel #endif pushfl /* push KERNEL_LOCK_KEY argument */ call arch_swap addl $4, %esp /* pop KERNEL_LOCK_KEY argument */ /* * The interrupted thread has now been scheduled, * as the result of a _later_ invocation of arch_swap(). * * Now need to restore the interrupted thread's environment before * returning control to it at the point where it was interrupted ... */ #if defined(CONFIG_LAZY_FPU_SHARING) /* * arch_swap() has restored the floating point registers, if needed. * Clear X86_THREAD_FLAG_INT in the interrupted thread's state * since it has served its purpose. */ movl _kernel + _kernel_offset_to_current, %eax andb $~X86_THREAD_FLAG_INT, _thread_offset_to_flags(%eax) #endif /* CONFIG_LAZY_FPU_SHARING */ /* Restore volatile registers and return to the interrupted thread */ popl %ebp popl %ecx popl %edx popl %eax /* Pop of EFLAGS will re-enable interrupts and restore direction flag */ KPTI_IRET #endif /* CONFIG_PREEMPT_ENABLED */ noReschedule: /* * A thread reschedule is not required; switch back to the * interrupted thread's stack and restore volatile registers */ popl %esp /* pop thread stack pointer */ #ifdef CONFIG_STACK_SENTINEL call z_check_stack_sentinel #endif /* fall through to 'nestedInterrupt' */ /* * For the nested interrupt case, the interrupt stack must still be * utilized, and more importantly, a rescheduling decision must * not be performed. */ nestedInterrupt: popl %ebp popl %ecx /* pop volatile registers in reverse order */ popl %edx popl %eax /* Pop of EFLAGS will re-enable interrupts and restore direction flag */ KPTI_IRET #ifdef CONFIG_PM handle_idle: pushl %eax pushl %edx /* Zero out _kernel.idle */ movl $0, _kernel_offset_to_idle(%ecx) /* * Beware that a timer driver's pm_system_resume() implementation might * expect that interrupts are disabled when invoked. This ensures that * the calculation and programming of the device for the next timer * deadline is not interrupted. */ call pm_system_resume popl %edx popl %eax jmp alreadyOnIntStack #endif /* CONFIG_PM */ /** * * z_SpuriousIntHandler - * @brief Spurious interrupt handler stubs * * Interrupt-gate descriptors are statically created for all slots in the IDT * that point to z_SpuriousIntHandler() or z_SpuriousIntNoErrCodeHandler(). The * former stub is connected to exception vectors where the processor pushes an * error code onto the stack (or kernel stack) in addition to the EFLAGS/CS/EIP * records. * * A spurious interrupt is considered a fatal condition; there is no provision * to return to the interrupted execution context and thus the volatile * registers are not saved. * * @return Never returns * * C function prototype: * * void z_SpuriousIntHandler (void); * * INTERNAL * The gen_idt tool creates an interrupt-gate descriptor for all * connections. The processor will automatically clear the IF bit * in the EFLAGS register upon execution of the handler, * thus z_SpuriousIntNoErrCodeHandler()/z_SpuriousIntHandler() shall be * invoked with interrupts disabled. */ SECTION_FUNC(PINNED_TEXT, z_SpuriousIntNoErrCodeHandler) pushl $0 /* push dummy err code onto stk */ /* fall through to z_SpuriousIntHandler */ SECTION_FUNC(PINNED_TEXT, z_SpuriousIntHandler) cld /* Clear direction flag */ /* Create the ESF */ pushl %eax pushl %ecx pushl %edx pushl %edi pushl %esi pushl %ebx pushl %ebp leal 44(%esp), %ecx /* Calculate ESP before exception occurred */ pushl %ecx /* Save calculated ESP */ pushl %esp /* push cur stack pointer: pEsf arg */ /* re-enable interrupts */ sti /* call the fatal error handler */ call z_x86_spurious_irq /* handler doesn't return */ #if CONFIG_IRQ_OFFLOAD SECTION_FUNC(PINNED_TEXT, _irq_sw_handler) push $0 push $z_irq_do_offload jmp _interrupt_enter #endif #if CONFIG_X86_DYNAMIC_IRQ_STUBS > 0 z_dynamic_irq_stub_common: /* stub number already pushed */ push $z_x86_dynamic_irq_handler jmp _interrupt_enter /* Create all the dynamic IRQ stubs * * NOTE: Please update DYN_STUB_SIZE in include/arch/x86/ia32/arch.h if you * change how large the generated stubs are, otherwise _get_dynamic_stub() * will be unable to correctly determine the offset */ /* * Create nice labels for all the stubs so we can see where we * are in a debugger */ .altmacro .macro __INT_STUB_NUM id z_dynamic_irq_stub_\id: .endm .macro INT_STUB_NUM id __INT_STUB_NUM %id .endm z_dynamic_stubs_begin: stub_num = 0 .rept ((CONFIG_X86_DYNAMIC_IRQ_STUBS + Z_DYN_STUB_PER_BLOCK - 1) / Z_DYN_STUB_PER_BLOCK) block_counter = 0 .rept Z_DYN_STUB_PER_BLOCK .if stub_num < CONFIG_X86_DYNAMIC_IRQ_STUBS INT_STUB_NUM stub_num /* * 2-byte push imm8. */ push $stub_num /* * Check to make sure this isn't the last stub in * a block, in which case we just fall through */ .if (block_counter <> (Z_DYN_STUB_PER_BLOCK - 1) && \ (stub_num <> CONFIG_X86_DYNAMIC_IRQ_STUBS - 1)) /* This should always be a 2-byte jmp rel8 */ jmp 1f .endif stub_num = stub_num + 1 block_counter = block_counter + 1 .endif .endr /* * This must a 5-bvte jump rel32, which is why z_dynamic_irq_stub_common * is before the actual stubs */ 1: jmp z_dynamic_irq_stub_common .endr #endif /* CONFIG_X86_DYNAMIC_IRQ_STUBS > 0 */ ```
/content/code_sandbox/arch/x86/core/ia32/intstub.S
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,202
```c /* * */ #include <zephyr/kernel.h> #include <zephyr/toolchain.h> /** * @file * @brief Provide soft float function stubs for long double operations. * * GCC soft float does not support long double so these need to be * stubbed out. * * The function names come from the GCC public documentation. */ extern void abort(void); __weak void __addtf3(long double a, long double b) { k_oops(); } __weak void __addxf3(long double a, long double b) { k_oops(); } __weak void __subtf3(long double a, long double b) { k_oops(); } __weak void __subxf3(long double a, long double b) { k_oops(); } __weak void __multf3(long double a, long double b) { k_oops(); } __weak void __mulxf3(long double a, long double b) { k_oops(); } __weak void __divtf3(long double a, long double b) { k_oops(); } __weak void __divxf3(long double a, long double b) { k_oops(); } __weak void __negtf2(long double a) { k_oops(); } __weak void __negxf2(long double a) { k_oops(); } __weak void __extendsftf2(float a) { k_oops(); } __weak void __extendsfxf2(float a) { k_oops(); } __weak void __extenddftf2(double a) { k_oops(); } __weak void __extenddfxf2(double a) { k_oops(); } __weak void __truncxfdf2(long double a) { k_oops(); } __weak void __trunctfdf2(long double a) { k_oops(); } __weak void __truncxfsf2(long double a) { k_oops(); } __weak void __trunctfsf2(long double a) { k_oops(); } __weak void __fixtfsi(long double a) { k_oops(); } __weak void __fixxfsi(long double a) { k_oops(); } __weak void __fixtfdi(long double a) { k_oops(); } __weak void __fixxfdi(long double a) { k_oops(); } __weak void __fixtfti(long double a) { k_oops(); } __weak void __fixxfti(long double a) { k_oops(); } __weak void __fixunstfsi(long double a) { k_oops(); } __weak void __fixunsxfsi(long double a) { k_oops(); } __weak void __fixunstfdi(long double a) { k_oops(); } __weak void __fixunsxfdi(long double a) { k_oops(); } __weak void __fixunstfti(long double a) { k_oops(); } __weak void __fixunsxfti(long double a) { k_oops(); } __weak void __floatsitf(int i) { k_oops(); } __weak void __floatsixf(int i) { k_oops(); } __weak void __floatditf(long i) { k_oops(); } __weak void __floatdixf(long i) { k_oops(); } __weak void __floattitf(long long i) { k_oops(); } __weak void __floattixf(long long i) { k_oops(); } __weak void __floatunsitf(unsigned int i) { k_oops(); } __weak void __floatunsixf(unsigned int i) { k_oops(); } __weak void __floatunditf(unsigned long i) { k_oops(); } __weak void __floatundixf(unsigned long i) { k_oops(); } __weak void __floatuntitf(unsigned long long i) { k_oops(); } __weak void __floatuntixf(unsigned long long i) { k_oops(); } __weak void __cmptf2(long double a, long double b) { k_oops(); } __weak void __unordtf2(long double a, long double b) { k_oops(); } __weak void __eqtf2(long double a, long double b) { k_oops(); } __weak void __netf2(long double a, long double b) { k_oops(); } __weak void __getf2(long double a, long double b) { k_oops(); } __weak void __lttf2(long double a, long double b) { k_oops(); } __weak void __letf2(long double a, long double b) { k_oops(); } __weak void __gttf2(long double a, long double b) { k_oops(); } __weak void __powitf2(long double a, int b) { k_oops(); } __weak void __powixf2(long double a, int b) { k_oops(); } ```
/content/code_sandbox/arch/x86/core/ia32/soft_float_stubs.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,068
```objective-c /* */ #ifndef ZEPHYR_ARCH_X86_INCLUDE_KERNEL_ARCH_FUNC_H_ #define ZEPHYR_ARCH_X86_INCLUDE_KERNEL_ARCH_FUNC_H_ #include <kernel_arch_data.h> #include <zephyr/arch/x86/mmustructs.h> #ifdef CONFIG_X86_64 #include <intel64/kernel_arch_func.h> #else #include <ia32/kernel_arch_func.h> #endif #ifndef _ASMLANGUAGE static inline bool arch_is_in_isr(void) { #ifdef CONFIG_SMP /* On SMP, there is a race vs. the current CPU changing if we * are preempted. Need to mask interrupts while inspecting * (note deliberate lack of gcc size suffix on the * instructions, we need to work with both architectures here) */ bool ret; __asm__ volatile ("pushf; cli"); ret = arch_curr_cpu()->nested != 0; __asm__ volatile ("popf"); return ret; #else return _kernel.cpus[0].nested != 0U; #endif } struct multiboot_info; extern FUNC_NORETURN void z_prep_c(void *arg); #ifdef CONFIG_X86_VERY_EARLY_CONSOLE /* Setup ultra-minimal serial driver for printk() */ void z_x86_early_serial_init(void); #endif /* CONFIG_X86_VERY_EARLY_CONSOLE */ /* Called upon CPU exception that is unhandled and hence fatal; dump * interesting info and call z_x86_fatal_error() */ FUNC_NORETURN void z_x86_unhandled_cpu_exception(uintptr_t vector, const struct arch_esf *esf); /* Called upon unrecoverable error; dump registers and transfer control to * kernel via z_fatal_error() */ FUNC_NORETURN void z_x86_fatal_error(unsigned int reason, const struct arch_esf *esf); /* Common handling for page fault exceptions */ void z_x86_page_fault_handler(struct arch_esf *esf); #ifdef CONFIG_THREAD_STACK_INFO /** * @brief Check if a memory address range falls within the stack * * Given a memory address range, ensure that it falls within the bounds * of the faulting context's stack. * * @param addr Starting address * @param size Size of the region, or 0 if we just want to see if addr is * in bounds * @param cs Code segment of faulting context * @return true if addr/size region is not within the thread stack */ bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, uint16_t cs); #endif /* CONFIG_THREAD_STACK_INFO */ #ifdef CONFIG_USERSPACE extern FUNC_NORETURN void z_x86_userspace_enter(k_thread_entry_t user_entry, void *p1, void *p2, void *p3, uintptr_t stack_end, uintptr_t stack_start); /* Preparation steps needed for all threads if user mode is turned on. * * Returns the initial entry point to swap into. */ void *z_x86_userspace_prepare_thread(struct k_thread *thread); #endif /* CONFIG_USERSPACE */ void z_x86_do_kernel_oops(const struct arch_esf *esf); /* * Find a free IRQ vector at the specified priority, or return -1 if none left. * For multiple vector allocated one after another, prev_vector can be used to * speed up the allocation: it only needs to be filled with the previous * allocated vector, or -1 to start over. */ int z_x86_allocate_vector(unsigned int priority, int prev_vector); /* * Connect a vector */ void z_x86_irq_connect_on_vector(unsigned int irq, uint8_t vector, void (*func)(const void *arg), const void *arg); #endif /* !_ASMLANGUAGE */ #endif /* ZEPHYR_ARCH_X86_INCLUDE_KERNEL_ARCH_FUNC_H_ */ ```
/content/code_sandbox/arch/x86/include/kernel_arch_func.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
801
```c /* * */ /** * @file * @brief Interrupt support for IA-32 arch * * INTERNAL * The _idt_base_address symbol is used to determine the base address of the IDT. * (It is generated by the linker script, and doesn't correspond to an actual * global variable.) */ #include <zephyr/kernel.h> #include <zephyr/arch/cpu.h> #include <zephyr/kernel_structs.h> #include <zephyr/sys/__assert.h> #include <zephyr/irq.h> #include <zephyr/tracing/tracing.h> #include <kswap.h> #include <zephyr/arch/x86/ia32/segmentation.h> extern void z_SpuriousIntHandler(void *handler); extern void z_SpuriousIntNoErrCodeHandler(void *handler); /* * Place the addresses of the spurious interrupt handlers into the intList * section. The genIdt tool can then populate any unused vectors with * these routines. */ void *__attribute__((section(".spurIsr"))) MK_ISR_NAME(z_SpuriousIntHandler) = &z_SpuriousIntHandler; void *__attribute__((section(".spurNoErrIsr"))) MK_ISR_NAME(z_SpuriousIntNoErrCodeHandler) = &z_SpuriousIntNoErrCodeHandler; __pinned_func void arch_isr_direct_footer_swap(unsigned int key) { (void)z_swap_irqlock(key); } #if CONFIG_X86_DYNAMIC_IRQ_STUBS > 0 /* * z_interrupt_vectors_allocated[] bitfield is generated by the 'gen_idt' tool. * It is initialized to identify which interrupts have been statically * connected and which interrupts are available to be dynamically connected at * run time, with a 1 bit indicating a free vector. The variable itself is * defined in the linker file. */ extern unsigned int z_interrupt_vectors_allocated[]; struct dyn_irq_info { /** IRQ handler */ void (*handler)(const void *param); /** Parameter to pass to the handler */ const void *param; }; /* * Instead of creating a large sparse table mapping all possible IDT vectors * to dyn_irq_info, the dynamic stubs push a "stub id" onto the stack * which is used by common_dynamic_handler() to fetch the appropriate * information out of this much smaller table */ __pinned_bss static struct dyn_irq_info dyn_irq_list[CONFIG_X86_DYNAMIC_IRQ_STUBS]; __pinned_bss static unsigned int next_irq_stub; /* Memory address pointing to where in ROM the code for the dynamic stubs are. * Linker symbol. */ extern char z_dynamic_stubs_begin[]; /** * @brief Allocate a free interrupt vector given <priority> * * This routine scans the z_interrupt_vectors_allocated[] array for a free vector * that satisfies the specified <priority>. * * This routine assumes that the relationship between interrupt priority and * interrupt vector is : * * priority = (vector / 16) - 2; * * Vectors 0 to 31 are reserved for CPU exceptions and do NOT fall under * the priority scheme. The first vector used for priority level 0 will be 32. * Each interrupt priority level contains 16 vectors. * * It is also assumed that the interrupt controllers are capable of managing * interrupt requests on a per-vector level as opposed to a per-priority level. * For example, the local APIC on Pentium4 and later processors, the in-service * register (ISR) and the interrupt request register (IRR) are 256 bits wide. * * @return allocated interrupt vector */ static unsigned int priority_to_free_vector(unsigned int requested_priority) { unsigned int entry; unsigned int fsb; /* first set bit in entry */ unsigned int search_set; unsigned int vector_block; unsigned int vector; static unsigned int mask[2] = {0x0000ffffU, 0xffff0000U}; vector_block = requested_priority + 2; __ASSERT(((vector_block << 4) + 15) <= CONFIG_IDT_NUM_VECTORS, "IDT too small (%d entries) to use priority %d", CONFIG_IDT_NUM_VECTORS, requested_priority); /* * Atomically allocate a vector from the * z_interrupt_vectors_allocated[] array to prevent race conditions * with other threads attempting to allocate an interrupt * vector. * * Note: As z_interrupt_vectors_allocated[] is initialized by the * 'gen_idt.py' tool, it is critical that this routine use the same * algorithm as the 'gen_idt.py' tool for allocating interrupt vectors. */ entry = vector_block >> 1; /* * The z_interrupt_vectors_allocated[] entry indexed by 'entry' * is a 32-bit quantity and thus represents the vectors for a pair of * priority levels. Mask out the unwanted priority level and then use * find_lsb_set() to scan for an available vector of the requested * priority. * * Note that find_lsb_set() returns bit position from 1 to 32, or 0 if * the argument is zero. */ search_set = mask[vector_block & 1] & z_interrupt_vectors_allocated[entry]; fsb = find_lsb_set(search_set); __ASSERT(fsb != 0U, "No remaning vectors for priority level %d", requested_priority); /* * An available vector of the requested priority was found. * Mark it as allocated by clearing the bit. */ --fsb; z_interrupt_vectors_allocated[entry] &= ~BIT(fsb); /* compute vector given allocated bit within the priority level */ vector = (entry << 5) + fsb; return vector; } /** * @brief Get the memory address of an unused dynamic IRQ or exception stub * * We generate at build time a set of dynamic stubs which push * a stub index onto the stack for use as an argument by * common handling code. * * @param stub_idx Stub number to fetch the corresponding stub function * @return Pointer to the stub code to install into the IDT */ __pinned_func static void *get_dynamic_stub(int stub_idx) { uint32_t offset; /* * Because we want the sizes of the stubs to be consistent and minimized, * stubs are grouped into blocks, each containing a push and subsequent * 2-byte jump instruction to the end of the block, which then contains * a larger jump instruction to common dynamic IRQ handling code */ offset = (stub_idx * Z_DYN_STUB_SIZE) + ((stub_idx / Z_DYN_STUB_PER_BLOCK) * Z_DYN_STUB_LONG_JMP_EXTRA_SIZE); return (void *)((uint32_t)&z_dynamic_stubs_begin + offset); } extern const struct pseudo_descriptor z_x86_idt; static void idt_vector_install(int vector, void *irq_handler) { unsigned int key; key = irq_lock(); z_init_irq_gate(&z_x86_idt.entries[vector], CODE_SEG, (uint32_t)irq_handler, 0); irq_unlock(key); } int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, void (*routine)(const void *parameter), const void *parameter, uint32_t flags) { int vector, stub_idx, key; key = irq_lock(); vector = priority_to_free_vector(priority); /* 0 indicates not used, vectors for interrupts start at 32 */ __ASSERT(_irq_to_interrupt_vector[irq] == 0U, "IRQ %d already configured", irq); _irq_to_interrupt_vector[irq] = vector; z_irq_controller_irq_config(vector, irq, flags); stub_idx = next_irq_stub++; __ASSERT(stub_idx < CONFIG_X86_DYNAMIC_IRQ_STUBS, "No available interrupt stubs found"); dyn_irq_list[stub_idx].handler = routine; dyn_irq_list[stub_idx].param = parameter; idt_vector_install(vector, get_dynamic_stub(stub_idx)); irq_unlock(key); return vector; } /** * @brief Common dynamic IRQ handler function * * This gets called by the IRQ entry asm code with the stub index supplied as * an argument. Look up the required information in dyn_irq_list and * execute it. * * @param stub_idx Index into the dyn_irq_list array */ __pinned_func void z_x86_dynamic_irq_handler(uint8_t stub_idx) { dyn_irq_list[stub_idx].handler(dyn_irq_list[stub_idx].param); } #endif /* CONFIG_X86_DYNAMIC_IRQ_STUBS > 0 */ ```
/content/code_sandbox/arch/x86/core/ia32/irq_manage.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,841
```objective-c /* */ #ifndef ZEPHYR_ARCH_X86_INCLUDE_OFFSETS_SHORT_ARCH_H_ #define ZEPHYR_ARCH_X86_INCLUDE_OFFSETS_SHORT_ARCH_H_ #ifdef CONFIG_X86_64 #include <intel64/offsets_short_arch.h> #else #include <ia32/offsets_short_arch.h> #endif #define _thread_offset_to_flags \ (___thread_t_arch_OFFSET + ___thread_arch_t_flags_OFFSET) #ifdef CONFIG_USERSPACE #define _thread_offset_to_psp \ (___thread_t_arch_OFFSET + ___thread_arch_t_psp_OFFSET) #define _thread_offset_to_ptables \ (___thread_t_arch_OFFSET + ___thread_arch_t_ptables_OFFSET) #endif /* CONFIG_USERSPACE */ #endif /* ZEPHYR_ARCH_X86_INCLUDE_OFFSETS_SHORT_ARCH_H_ */ ```
/content/code_sandbox/arch/x86/include/offsets_short_arch.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
161
```objective-c /* */ #ifndef ZEPHYR_ARCH_X86_INCLUDE_KERNEL_ARCH_DATA_H_ #define ZEPHYR_ARCH_X86_INCLUDE_KERNEL_ARCH_DATA_H_ /* * Exception/interrupt vector definitions: vectors 20 to 31 are reserved * for Intel; vectors 32 to 255 are user defined interrupt vectors. */ #define IV_DIVIDE_ERROR 0 #define IV_DEBUG 1 #define IV_NON_MASKABLE_INTERRUPT 2 #define IV_BREAKPOINT 3 #define IV_OVERFLOW 4 #define IV_BOUND_RANGE 5 #define IV_INVALID_OPCODE 6 #define IV_DEVICE_NOT_AVAILABLE 7 #define IV_DOUBLE_FAULT 8 #define IV_COPROC_SEGMENT_OVERRUN 9 #define IV_INVALID_TSS 10 #define IV_SEGMENT_NOT_PRESENT 11 #define IV_STACK_FAULT 12 #define IV_GENERAL_PROTECTION 13 #define IV_PAGE_FAULT 14 #define IV_RESERVED 15 #define IV_X87_FPU_FP_ERROR 16 #define IV_ALIGNMENT_CHECK 17 #define IV_MACHINE_CHECK 18 #define IV_SIMD_FP 19 #define IV_VIRT_EXCEPTION 20 #define IV_SECURITY_EXCEPTION 30 #define IV_IRQS 32 /* start of vectors available for IRQs */ #define IV_NR_VECTORS 256 /* total number of vectors */ /* * EFLAGS/RFLAGS definitions. (RFLAGS is just zero-extended EFLAGS.) */ #define EFLAGS_IF BIT(9) /* interrupts enabled */ #define EFLAGS_DF BIT(10) /* Direction flag */ #define EFLAGS_INITIAL (EFLAGS_IF) #define EFLAGS_SYSCALL (EFLAGS_IF | EFLAGS_DF) /* * Control register definitions. */ #define CR0_PG BIT(31) /* enable paging */ #define CR0_WP BIT(16) /* honor W bit even when supervisor */ #define CR4_PSE BIT(4) /* Page size extension (4MB pages) */ #define CR4_PAE BIT(5) /* enable PAE */ #define CR4_OSFXSR BIT(9) /* enable SSE (OS FXSAVE/RSTOR) */ #ifndef _ASMLANGUAGE /* x86 boot argument (see prep_c.c) */ struct x86_boot_arg { int boot_type; void *arg; }; typedef struct x86_boot_arg x86_boot_arg_t; #endif /* _ASMLANGUAGE */ #ifdef CONFIG_X86_64 #include <intel64/kernel_arch_data.h> #else #include <ia32/kernel_arch_data.h> #endif #endif /* ZEPHYR_ARCH_X86_INCLUDE_KERNEL_ARCH_DATA_H_ */ ```
/content/code_sandbox/arch/x86/include/kernel_arch_data.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
537
```objective-c /* */ #ifndef ZEPHYR_ARCH_X86_INCLUDE_INTEL64_KERNEL_ARCH_FUNC_H_ #define ZEPHYR_ARCH_X86_INCLUDE_INTEL64_KERNEL_ARCH_FUNC_H_ #include <zephyr/kernel_structs.h> #ifndef _ASMLANGUAGE extern void z_x86_switch(void *switch_to, void **switched_from); static inline void arch_switch(void *switch_to, void **switched_from) { z_x86_switch(switch_to, switched_from); } /** * @brief Initialize scheduler IPI vector. * * Called in early BSP boot to set up scheduler IPI handling. */ extern void z_x86_ipi_setup(void); static inline void arch_kernel_init(void) { /* nothing */; } FUNC_NORETURN void z_x86_cpu_init(struct x86_cpuboot *cpuboot); void x86_sse_init(struct k_thread *thread); void z_x86_syscall_entry_stub(void); bool z_x86_do_kernel_nmi(const struct arch_esf *esf); #endif /* _ASMLANGUAGE */ #endif /* ZEPHYR_ARCH_X86_INCLUDE_INTEL64_KERNEL_ARCH_FUNC_H_ */ ```
/content/code_sandbox/arch/x86/include/intel64/kernel_arch_func.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
235
```objective-c /* * * * Internal memory management interfaces implemented in x86_mmu.c. * None of these are application-facing, use only if you know what you are * doing! */ #ifndef ZEPHYR_ARCH_X86_INCLUDE_X86_MMU_H #define ZEPHYR_ARCH_X86_INCLUDE_X86_MMU_H #include <zephyr/kernel.h> #include <zephyr/arch/x86/mmustructs.h> #include <zephyr/kernel/mm.h> #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) #define XD_SUPPORTED #define BITL BIT64 #define PRI_ENTRY "0x%016llx" #else #define BITL BIT #define PRI_ENTRY "0x%08x" #endif /* * Common flags in the same bit position regardless of which structure level, * although not every flag is supported at every level, and some may be * ignored depending on the state of other bits (such as P or PS) * * These flags indicate bit position, and can be used for setting flags or * masks as needed. */ #define MMU_P BITL(0) /** Present */ #define MMU_RW BITL(1) /** Read-Write */ #define MMU_US BITL(2) /** User-Supervisor */ #define MMU_PWT BITL(3) /** Page Write Through */ #define MMU_PCD BITL(4) /** Page Cache Disable */ #define MMU_A BITL(5) /** Accessed */ #define MMU_D BITL(6) /** Dirty */ #define MMU_PS BITL(7) /** Page Size (non PTE)*/ #define MMU_PAT BITL(7) /** Page Attribute (PTE) */ #define MMU_G BITL(8) /** Global */ #ifdef XD_SUPPORTED #define MMU_XD BITL(63) /** Execute Disable */ #else #define MMU_XD 0 #endif /* Unused PTE bits ignored by the CPU, which we use for our own OS purposes. * These bits ignored for all paging modes. */ #define MMU_IGNORED0 BITL(9) #define MMU_IGNORED1 BITL(10) #define MMU_IGNORED2 BITL(11) /* Page fault error code flags. See Chapter 4.7 of the Intel SDM vol. 3A. */ #define PF_P BIT(0) /* 0 Non-present page 1 Protection violation */ #define PF_WR BIT(1) /* 0 Read 1 Write */ #define PF_US BIT(2) /* 0 Supervisor mode 1 User mode */ #define PF_RSVD BIT(3) /* 1 reserved bit set */ #define PF_ID BIT(4) /* 1 instruction fetch */ #define PF_PK BIT(5) /* 1 protection-key violation */ #define PF_SGX BIT(15) /* 1 SGX-specific access control requirements */ #ifndef _ASMLANGUAGE #ifdef CONFIG_EXCEPTION_DEBUG /** * Dump out page table entries for a particular virtual memory address * * For the provided memory address, dump out interesting information about * its mapping to the error log * * @param ptables Page tables to walk * @param virt Virtual address to inspect */ void z_x86_dump_mmu_flags(pentry_t *ptables, void *virt); /** * Fetch the page table entry for a virtual memory address * * @param paging_level [out] what paging level the entry was found at. * 0=toplevel * @param val Value stored in page table entry, with address and flags * @param ptables Toplevel pointer to page tables * @param virt Virtual address to lookup */ void z_x86_pentry_get(int *paging_level, pentry_t *val, pentry_t *ptables, void *virt); /** * Debug function for dumping out page tables * * Iterates through the entire linked set of page table structures, * dumping out codes for the configuration of each table entry. * * Entry codes: * * . - not present * w - present, writable, not executable * a - present, writable, executable * r - present, read-only, not executable * x - present, read-only, executable * * Entry codes in uppercase indicate that user mode may access. * * Color is used to indicate the physical mapping characteristics: * * yellow - Identity mapping (virt = phys) * green - Fixed virtual memory mapping (virt = phys + constant) * magenta - entry is child page table * cyan - General mapped memory * * @param ptables Top-level pointer to the page tables, as programmed in CR3 */ void z_x86_dump_page_tables(pentry_t *ptables); #endif /* CONFIG_EXCEPTION_DEBUG */ #ifdef CONFIG_X86_STACK_PROTECTION /* Legacy function - set identity-mapped MMU stack guard page to RO in the * kernel's page tables to prevent writes and generate an exception */ void z_x86_set_stack_guard(k_thread_stack_t *stack); #endif #ifdef CONFIG_USERSPACE #ifdef CONFIG_X86_KPTI /* Defined in linker script. Contains all the data that must be mapped * in a KPTI table even though US bit is not set (trampoline stack, GDT, * IDT, etc) */ extern uint8_t z_shared_kernel_page_start; #ifdef CONFIG_DEMAND_PAGING /* Called from page fault handler. ptables here is the ptage tables for the * faulting user thread and not the current set of page tables */ extern bool z_x86_kpti_is_access_ok(void *virt, pentry_t *ptables) #endif /* CONFIG_DEMAND_PAGING */ #endif /* CONFIG_X86_KPTI */ #endif /* CONFIG_USERSPACE */ #ifdef CONFIG_X86_PAE #define PTABLES_ALIGN 0x1fU #else #define PTABLES_ALIGN 0xfffU #endif /* Set CR3 to a physical address. There must be a valid top-level paging * structure here or the CPU will triple fault. The incoming page tables must * have the same kernel mappings wrt supervisor mode. Don't use this function * unless you know exactly what you are doing. */ static inline void z_x86_cr3_set(uintptr_t phys) { __ASSERT((phys & PTABLES_ALIGN) == 0U, "unaligned page tables"); #ifdef CONFIG_X86_64 __asm__ volatile("movq %0, %%cr3\n\t" : : "r" (phys) : "memory"); #else __asm__ volatile("movl %0, %%cr3\n\t" : : "r" (phys) : "memory"); #endif } /* Return cr3 value, which is the physical (not virtual) address of the * current set of page tables */ static inline uintptr_t z_x86_cr3_get(void) { uintptr_t cr3; #ifdef CONFIG_X86_64 __asm__ volatile("movq %%cr3, %0\n\t" : "=r" (cr3)); #else __asm__ volatile("movl %%cr3, %0\n\t" : "=r" (cr3)); #endif return cr3; } /* Return the virtual address of the page tables installed in this CPU in CR3 */ static inline pentry_t *z_x86_page_tables_get(void) { return k_mem_virt_addr(z_x86_cr3_get()); } /* Return cr2 value, which contains the page fault linear address. * See Section 6.15 of the IA32 Software Developer's Manual vol 3. * Used by page fault handling code. */ static inline void *z_x86_cr2_get(void) { void *cr2; #ifdef CONFIG_X86_64 __asm__ volatile("movq %%cr2, %0\n\t" : "=r" (cr2)); #else __asm__ volatile("movl %%cr2, %0\n\t" : "=r" (cr2)); #endif return cr2; } /* Kernel's page table. This is in CR3 for all supervisor threads. * if KPTI is enabled, we switch to this when handling exceptions or syscalls */ extern pentry_t z_x86_kernel_ptables[]; /* Get the page tables used by this thread during normal execution */ static inline pentry_t *z_x86_thread_page_tables_get(struct k_thread *thread) { #if defined(CONFIG_USERSPACE) && !defined(CONFIG_X86_COMMON_PAGE_TABLE) if (!IS_ENABLED(CONFIG_X86_KPTI) || (thread->base.user_options & K_USER) != 0U) { /* If KPTI is enabled, supervisor threads always use * the kernel's page tables and not the page tables associated * with their memory domain. */ return k_mem_virt_addr(thread->arch.ptables); } #else ARG_UNUSED(thread); #endif return z_x86_kernel_ptables; } #ifdef CONFIG_SMP /* Handling function for TLB shootdown inter-processor interrupts. */ void z_x86_tlb_ipi(const void *arg); #endif #ifdef CONFIG_X86_COMMON_PAGE_TABLE void z_x86_swap_update_common_page_table(struct k_thread *incoming); #endif /* Early-boot paging setup tasks, called from prep_c */ void z_x86_mmu_init(void); #endif /* _ASMLANGUAGE */ #endif /* ZEPHYR_ARCH_X86_INCLUDE_X86_MMU_H */ ```
/content/code_sandbox/arch/x86/include/x86_mmu.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,036
```objective-c /* */ #ifndef ZEPHYR_ARCH_X86_INCLUDE_INTEL64_KERNEL_ARCH_DATA_H_ #define ZEPHYR_ARCH_X86_INCLUDE_INTEL64_KERNEL_ARCH_DATA_H_ #include <zephyr/arch/x86/mmustructs.h> #ifndef _ASMLANGUAGE /* linker symbols defining the bounds of the kernel part loaded in locore */ extern char _locore_start[], _locore_end[]; /* * Per-CPU bootstrapping parameters. See locore.S and cpu.c. */ struct x86_cpuboot { volatile int ready; /* CPU has started */ uint16_t tr; /* selector for task register */ struct x86_tss64 *gs_base; /* Base address for GS segment */ uint64_t sp; /* initial stack pointer */ size_t stack_size; /* size of stack */ arch_cpustart_t fn; /* kernel entry function */ void *arg; /* argument for above function */ uint8_t cpu_id; /* CPU ID */ }; typedef struct x86_cpuboot x86_cpuboot_t; extern uint8_t x86_cpu_loapics[]; /* CPU logical ID -> local APIC ID */ #endif /* _ASMLANGUAGE */ #ifdef CONFIG_X86_KPTI #define Z_X86_TRAMPOLINE_STACK_SIZE 128 #endif #ifdef CONFIG_X86_KPTI #define TRAMPOLINE_STACK(n) \ uint8_t z_x86_trampoline_stack##n[Z_X86_TRAMPOLINE_STACK_SIZE] \ __attribute__ ((section(".trampolines"))); #define TRAMPOLINE_INIT(n) \ .ist2 = (uint64_t)z_x86_trampoline_stack##n + Z_X86_TRAMPOLINE_STACK_SIZE, #else #define TRAMPOLINE_STACK(n) #define TRAMPOLINE_INIT(n) #endif /* CONFIG_X86_KPTI */ #define ACPI_CPU_INIT(n, _) \ uint8_t z_x86_exception_stack##n[CONFIG_X86_EXCEPTION_STACK_SIZE] __aligned(16); \ uint8_t z_x86_nmi_stack##n[CONFIG_X86_EXCEPTION_STACK_SIZE] __aligned(16); \ TRAMPOLINE_STACK(n); \ Z_GENERIC_SECTION(.tss) \ struct x86_tss64 tss##n = { \ TRAMPOLINE_INIT(n) \ .ist6 = (uint64_t)z_x86_nmi_stack##n + CONFIG_X86_EXCEPTION_STACK_SIZE, \ .ist7 = (uint64_t)z_x86_exception_stack##n + CONFIG_X86_EXCEPTION_STACK_SIZE, \ .iomapb = 0xFFFF, .cpu = &(_kernel.cpus[n]) \ } #define X86_CPU_BOOT_INIT(n, _) \ { \ .tr = (0x40 + (16 * n)), \ .gs_base = &tss##n, \ .sp = (uint64_t)z_interrupt_stacks[n] + \ K_KERNEL_STACK_LEN(CONFIG_ISR_STACK_SIZE), \ .stack_size = K_KERNEL_STACK_LEN(CONFIG_ISR_STACK_SIZE), \ .fn = z_prep_c, \ .arg = &x86_cpu_boot_arg, \ } #define STACK_ARRAY_IDX(n, _) n #define DEFINE_STACK_ARRAY_IDX\ LISTIFY(CONFIG_MP_MAX_NUM_CPUS, STACK_ARRAY_IDX, (,)) #endif /* ZEPHYR_ARCH_X86_INCLUDE_INTEL64_KERNEL_ARCH_DATA_H_ */ ```
/content/code_sandbox/arch/x86/include/intel64/kernel_arch_data.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
765
```objective-c /* * */ /* this file is only meant to be included by kernel_structs.h */ #ifndef ZEPHYR_ARCH_X86_INCLUDE_IA32_KERNEL_ARCH_FUNC_H_ #define ZEPHYR_ARCH_X86_INCLUDE_IA32_KERNEL_ARCH_FUNC_H_ #ifndef _ASMLANGUAGE #include <stddef.h> /* For size_t */ #ifdef __cplusplus extern "C" { #endif static inline void arch_kernel_init(void) { /* No-op on this arch */ } static ALWAYS_INLINE void arch_thread_return_value_set(struct k_thread *thread, unsigned int value) { /* write into 'eax' slot created in z_swap() entry */ *(unsigned int *)(thread->callee_saved.esp) = value; } extern void arch_cpu_atomic_idle(unsigned int key); /* ASM code to fiddle with registers to enable the MMU with PAE paging */ void z_x86_enable_paging(void); #ifdef __cplusplus } #endif #endif /* _ASMLANGUAGE */ #endif /* ZEPHYR_ARCH_X86_INCLUDE_IA32_KERNEL_ARCH_FUNC_H_ */ ```
/content/code_sandbox/arch/x86/include/ia32/kernel_arch_func.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
216
```objective-c /* */ #ifndef ZEPHYR_ARCH_X86_INCLUDE_INTEL64_OFFSETS_SHORT_ARCH_H_ #define ZEPHYR_ARCH_X86_INCLUDE_INTEL64_OFFSETS_SHORT_ARCH_H_ #include <zephyr/offsets.h> #define _thread_offset_to_rsp \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_rsp_OFFSET) #define _thread_offset_to_rbx \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_rbx_OFFSET) #define _thread_offset_to_rbp \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_rbp_OFFSET) #define _thread_offset_to_r12 \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_r12_OFFSET) #define _thread_offset_to_r13 \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_r13_OFFSET) #define _thread_offset_to_r14 \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_r14_OFFSET) #define _thread_offset_to_r15 \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_r15_OFFSET) #define _thread_offset_to_rip \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_rip_OFFSET) #define _thread_offset_to_rflags \ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_rflags_OFFSET) #define _thread_offset_to_rax \ (___thread_t_arch_OFFSET + ___thread_arch_t_rax_OFFSET) #define _thread_offset_to_rcx \ (___thread_t_arch_OFFSET + ___thread_arch_t_rcx_OFFSET) #define _thread_offset_to_rdx \ (___thread_t_arch_OFFSET + ___thread_arch_t_rdx_OFFSET) #define _thread_offset_to_rsi \ (___thread_t_arch_OFFSET + ___thread_arch_t_rsi_OFFSET) #define _thread_offset_to_rdi \ (___thread_t_arch_OFFSET + ___thread_arch_t_rdi_OFFSET) #define _thread_offset_to_r8 \ (___thread_t_arch_OFFSET + ___thread_arch_t_r8_OFFSET) #define _thread_offset_to_r9 \ (___thread_t_arch_OFFSET + ___thread_arch_t_r9_OFFSET) #define _thread_offset_to_r10 \ (___thread_t_arch_OFFSET + ___thread_arch_t_r10_OFFSET) #define _thread_offset_to_r11 \ (___thread_t_arch_OFFSET + ___thread_arch_t_r11_OFFSET) #define _thread_offset_to_sse \ (___thread_t_arch_OFFSET + ___thread_arch_t_sse_OFFSET) #define _thread_offset_to_ss \ (___thread_t_arch_OFFSET + ___thread_arch_t_ss_OFFSET) #define _thread_offset_to_cs \ (___thread_t_arch_OFFSET + ___thread_arch_t_cs_OFFSET) #endif /* ZEPHYR_ARCH_X86_INCLUDE_INTEL64_OFFSETS_SHORT_ARCH_H_ */ ```
/content/code_sandbox/arch/x86/include/intel64/offsets_short_arch.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
564
```objective-c /* * */ /** * @file * @brief Private kernel definitions (IA-32) * * This file contains private kernel structures definitions and various * other definitions for the Intel Architecture 32 bit (IA-32) processor * architecture. * The header include/kernel.h contains the public kernel interface * definitions, with include/arch/x86/ia32/arch.h supplying the * IA-32 specific portions of the public kernel interface. * * This file is also included by assembly language files which must #define * _ASMLANGUAGE before including this header file. Note that kernel * assembly source files obtains structure offset values via "absolute symbols" * in the offsets.o module. */ /* this file is only meant to be included by kernel_structs.h */ #ifndef ZEPHYR_ARCH_X86_INCLUDE_IA32_KERNEL_ARCH_DATA_H_ #define ZEPHYR_ARCH_X86_INCLUDE_IA32_KERNEL_ARCH_DATA_H_ #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #include <ia32/exception.h> #include <zephyr/sys/util.h> #ifndef _ASMLANGUAGE #include <zephyr/kernel.h> #include <zephyr/types.h> #include <zephyr/sys/dlist.h> #endif /* Some configurations require that the stack/registers be adjusted before * z_thread_entry. See discussion in swap.S for z_x86_thread_entry_wrapper() */ #if defined(CONFIG_DEBUG_INFO) #define _THREAD_WRAPPER_REQUIRED #endif #if defined(CONFIG_LAZY_FPU_SHARING) && defined(CONFIG_X86_SSE) #define _FP_USER_MASK (K_FP_REGS | K_SSE_REGS) #elif defined(CONFIG_LAZY_FPU_SHARING) #define _FP_USER_MASK (K_FP_REGS) #endif #ifndef _ASMLANGUAGE #include <zephyr/sys/util.h> #ifdef __cplusplus extern "C" { #endif #ifdef _THREAD_WRAPPER_REQUIRED extern void z_x86_thread_entry_wrapper(k_thread_entry_t entry, void *p1, void *p2, void *p3); #endif /* _THREAD_WRAPPER_REQUIRED */ #ifdef CONFIG_THREAD_LOCAL_STORAGE extern void z_x86_tls_update_gdt(struct k_thread *thread); #endif #ifdef __cplusplus } #endif #endif /* _ASMLANGUAGE */ #endif /* ZEPHYR_ARCH_X86_INCLUDE_IA32_KERNEL_ARCH_DATA_H_ */ ```
/content/code_sandbox/arch/x86/include/ia32/kernel_arch_data.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
488